generic: fix Qcom SNAND driver and move to backports directory
authorChristian Marangi <ansuelsmth@gmail.com>
Fri, 7 Feb 2025 20:12:07 +0000 (21:12 +0100)
committerRobert Marko <robimarko@gmail.com>
Sat, 8 Feb 2025 09:41:27 +0000 (10:41 +0100)
Add patch to fix Qcom SNAND driver and move the SNAND patches to
backports directory as they are shared between qualcommax and qualcommbe
target.

Fixes: #17897
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Link: https://github.com/openwrt/openwrt/pull/17900
Signed-off-by: Robert Marko <robimarko@gmail.com>
18 files changed:
target/linux/generic/backport-6.6/413-01-v6.14-mtd-rawnand-qcom-cleanup-qcom_nandc-driver.patch [new file with mode: 0644]
target/linux/generic/backport-6.6/413-02-v6.14-mtd-rawnand-qcom-Add-qcom-prefix-to-common-api.patch [new file with mode: 0644]
target/linux/generic/backport-6.6/413-03-v6.14-mtd-nand-Add-qpic_common-API-file.patch [new file with mode: 0644]
target/linux/generic/backport-6.6/413-04-v6.14-mtd-rawnand-qcom-use-FIELD_PREP-and-GENMASK.patch [new file with mode: 0644]
target/linux/generic/backport-6.6/414-v6.14-mtd-rawnand-qcom-fix-broken-config-in-qcom_param_pag.patch [new file with mode: 0644]
target/linux/generic/backport-6.6/415-v6.14-mtd-rawnand-qcom-Fix-build-issue-on-x86-architecture.patch [new file with mode: 0644]
target/linux/generic/hack-6.6/430-mtk-bmt-support.patch
target/linux/qualcommax/patches-6.6/0402-mtd-rawnand-qcom-cleanup-qcom_nandc-driver.patch [deleted file]
target/linux/qualcommax/patches-6.6/0403-mtd-rawnand-qcom-Add-qcom-prefix-to-common-api.patch [deleted file]
target/linux/qualcommax/patches-6.6/0404-mtd-nand-Add-qpic_common-API-file.patch [deleted file]
target/linux/qualcommax/patches-6.6/0405-mtd-rawnand-qcom-use-FIELD_PREP-and-GENMASK.patch [deleted file]
target/linux/qualcommax/patches-6.6/0406-spi-spi-qpic-add-driver-for-QCOM-SPI-NAND-flash-Interface.patch
target/linux/qualcommax/patches-6.6/0408-spi-spi-qpic-fix-compilation-issues.patch [deleted file]
target/linux/qualcommbe/patches-6.6/016-01-v6.14-mtd-rawnand-qcom-cleanup-qcom_nandc-driver.patch [deleted file]
target/linux/qualcommbe/patches-6.6/016-02-v6.14-mtd-rawnand-qcom-Add-qcom-prefix-to-common-api.patch [deleted file]
target/linux/qualcommbe/patches-6.6/016-03-v6.14-mtd-nand-Add-qpic_common-API-file.patch [deleted file]
target/linux/qualcommbe/patches-6.6/016-04-v6.14-mtd-rawnand-qcom-use-FIELD_PREP-and-GENMASK.patch [deleted file]
target/linux/qualcommbe/patches-6.6/017-v6.,14-mtd-rawnand-qcom-Fix-build-issue-on-x86-architecture.patch [deleted file]

diff --git a/target/linux/generic/backport-6.6/413-01-v6.14-mtd-rawnand-qcom-cleanup-qcom_nandc-driver.patch b/target/linux/generic/backport-6.6/413-01-v6.14-mtd-rawnand-qcom-cleanup-qcom_nandc-driver.patch
new file mode 100644 (file)
index 0000000..8c5457a
--- /dev/null
@@ -0,0 +1,1013 @@
+From 8c52932da5e6756fa66f52f0720da283fba13aa6 Mon Sep 17 00:00:00 2001
+From: Md Sadre Alam <quic_mdalam@quicinc.com>
+Date: Wed, 20 Nov 2024 14:45:00 +0530
+Subject: [PATCH 1/4] mtd: rawnand: qcom: cleanup qcom_nandc driver
+
+Perform a global cleanup of the Qualcomm NAND
+controller driver with the following improvements:
+
+- Remove register value indirection API
+
+- Remove set_reg() API
+
+- Convert read_loc_first & read_loc_last macro to functions
+
+- Rename multiple variables
+
+Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+---
+ drivers/mtd/nand/raw/qcom_nandc.c | 516 ++++++++++++++----------------
+ 1 file changed, 234 insertions(+), 282 deletions(-)
+
+--- a/drivers/mtd/nand/raw/qcom_nandc.c
++++ b/drivers/mtd/nand/raw/qcom_nandc.c
+@@ -189,17 +189,6 @@
+ #define       ECC_BCH_4BIT    BIT(2)
+ #define       ECC_BCH_8BIT    BIT(3)
+-#define nandc_set_read_loc_first(chip, reg, cw_offset, read_size, is_last_read_loc)   \
+-nandc_set_reg(chip, reg,                      \
+-            ((cw_offset) << READ_LOCATION_OFFSET) |           \
+-            ((read_size) << READ_LOCATION_SIZE) |                     \
+-            ((is_last_read_loc) << READ_LOCATION_LAST))
+-
+-#define nandc_set_read_loc_last(chip, reg, cw_offset, read_size, is_last_read_loc)    \
+-nandc_set_reg(chip, reg,                      \
+-            ((cw_offset) << READ_LOCATION_OFFSET) |           \
+-            ((read_size) << READ_LOCATION_SIZE) |                     \
+-            ((is_last_read_loc) << READ_LOCATION_LAST))
+ /*
+  * Returns the actual register address for all NAND_DEV_ registers
+  * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
+@@ -257,8 +246,6 @@ nandc_set_reg(chip, reg,                   \
+  * @tx_sgl_start - start index in data sgl for tx.
+  * @rx_sgl_pos - current index in data sgl for rx.
+  * @rx_sgl_start - start index in data sgl for rx.
+- * @wait_second_completion - wait for second DMA desc completion before making
+- *                         the NAND transfer completion.
+  */
+ struct bam_transaction {
+       struct bam_cmd_element *bam_ce;
+@@ -275,7 +262,6 @@ struct bam_transaction {
+       u32 tx_sgl_start;
+       u32 rx_sgl_pos;
+       u32 rx_sgl_start;
+-      bool wait_second_completion;
+ };
+ /*
+@@ -471,9 +457,9 @@ struct qcom_op {
+       unsigned int data_instr_idx;
+       unsigned int rdy_timeout_ms;
+       unsigned int rdy_delay_ns;
+-      u32 addr1_reg;
+-      u32 addr2_reg;
+-      u32 cmd_reg;
++      __le32 addr1_reg;
++      __le32 addr2_reg;
++      __le32 cmd_reg;
+       u8 flag;
+ };
+@@ -549,17 +535,17 @@ struct qcom_nand_host {
+  * among different NAND controllers.
+  * @ecc_modes - ecc mode for NAND
+  * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
+- * @is_bam - whether NAND controller is using BAM
+- * @is_qpic - whether NAND CTRL is part of qpic IP
+- * @qpic_v2 - flag to indicate QPIC IP version 2
++ * @supports_bam - whether NAND controller is using Bus Access Manager (BAM)
++ * @nandc_part_of_qpic - whether NAND controller is part of qpic IP
++ * @qpic_version2 - flag to indicate QPIC IP version 2
+  * @use_codeword_fixup - whether NAND has different layout for boot partitions
+  */
+ struct qcom_nandc_props {
+       u32 ecc_modes;
+       u32 dev_cmd_reg_start;
+-      bool is_bam;
+-      bool is_qpic;
+-      bool qpic_v2;
++      bool supports_bam;
++      bool nandc_part_of_qpic;
++      bool qpic_version2;
+       bool use_codeword_fixup;
+ };
+@@ -613,19 +599,11 @@ static void clear_bam_transaction(struct
+ {
+       struct bam_transaction *bam_txn = nandc->bam_txn;
+-      if (!nandc->props->is_bam)
++      if (!nandc->props->supports_bam)
+               return;
+-      bam_txn->bam_ce_pos = 0;
+-      bam_txn->bam_ce_start = 0;
+-      bam_txn->cmd_sgl_pos = 0;
+-      bam_txn->cmd_sgl_start = 0;
+-      bam_txn->tx_sgl_pos = 0;
+-      bam_txn->tx_sgl_start = 0;
+-      bam_txn->rx_sgl_pos = 0;
+-      bam_txn->rx_sgl_start = 0;
++      memset(&bam_txn->bam_ce_pos, 0, sizeof(u32) * 8);
+       bam_txn->last_data_desc = NULL;
+-      bam_txn->wait_second_completion = false;
+       sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
+                     QPIC_PER_CW_CMD_SGL);
+@@ -640,46 +618,35 @@ static void qpic_bam_dma_done(void *data
+ {
+       struct bam_transaction *bam_txn = data;
+-      /*
+-       * In case of data transfer with NAND, 2 callbacks will be generated.
+-       * One for command channel and another one for data channel.
+-       * If current transaction has data descriptors
+-       * (i.e. wait_second_completion is true), then set this to false
+-       * and wait for second DMA descriptor completion.
+-       */
+-      if (bam_txn->wait_second_completion)
+-              bam_txn->wait_second_completion = false;
+-      else
+-              complete(&bam_txn->txn_done);
++      complete(&bam_txn->txn_done);
+ }
+-static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
++static struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
+ {
+       return container_of(chip, struct qcom_nand_host, chip);
+ }
+-static inline struct qcom_nand_controller *
++static struct qcom_nand_controller *
+ get_qcom_nand_controller(struct nand_chip *chip)
+ {
+       return container_of(chip->controller, struct qcom_nand_controller,
+                           controller);
+ }
+-static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
++static u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
+ {
+       return ioread32(nandc->base + offset);
+ }
+-static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
+-                             u32 val)
++static void nandc_write(struct qcom_nand_controller *nandc, int offset,
++                      u32 val)
+ {
+       iowrite32(val, nandc->base + offset);
+ }
+-static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc,
+-                                        bool is_cpu)
++static void nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
+ {
+-      if (!nandc->props->is_bam)
++      if (!nandc->props->supports_bam)
+               return;
+       if (is_cpu)
+@@ -694,93 +661,90 @@ static inline void nandc_read_buffer_syn
+                                          DMA_FROM_DEVICE);
+ }
+-static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
+-{
+-      switch (offset) {
+-      case NAND_FLASH_CMD:
+-              return &regs->cmd;
+-      case NAND_ADDR0:
+-              return &regs->addr0;
+-      case NAND_ADDR1:
+-              return &regs->addr1;
+-      case NAND_FLASH_CHIP_SELECT:
+-              return &regs->chip_sel;
+-      case NAND_EXEC_CMD:
+-              return &regs->exec;
+-      case NAND_FLASH_STATUS:
+-              return &regs->clrflashstatus;
+-      case NAND_DEV0_CFG0:
+-              return &regs->cfg0;
+-      case NAND_DEV0_CFG1:
+-              return &regs->cfg1;
+-      case NAND_DEV0_ECC_CFG:
+-              return &regs->ecc_bch_cfg;
+-      case NAND_READ_STATUS:
+-              return &regs->clrreadstatus;
+-      case NAND_DEV_CMD1:
+-              return &regs->cmd1;
+-      case NAND_DEV_CMD1_RESTORE:
+-              return &regs->orig_cmd1;
+-      case NAND_DEV_CMD_VLD:
+-              return &regs->vld;
+-      case NAND_DEV_CMD_VLD_RESTORE:
+-              return &regs->orig_vld;
+-      case NAND_EBI2_ECC_BUF_CFG:
+-              return &regs->ecc_buf_cfg;
+-      case NAND_READ_LOCATION_0:
+-              return &regs->read_location0;
+-      case NAND_READ_LOCATION_1:
+-              return &regs->read_location1;
+-      case NAND_READ_LOCATION_2:
+-              return &regs->read_location2;
+-      case NAND_READ_LOCATION_3:
+-              return &regs->read_location3;
+-      case NAND_READ_LOCATION_LAST_CW_0:
+-              return &regs->read_location_last0;
+-      case NAND_READ_LOCATION_LAST_CW_1:
+-              return &regs->read_location_last1;
+-      case NAND_READ_LOCATION_LAST_CW_2:
+-              return &regs->read_location_last2;
+-      case NAND_READ_LOCATION_LAST_CW_3:
+-              return &regs->read_location_last3;
+-      default:
+-              return NULL;
+-      }
+-}
+-
+-static void nandc_set_reg(struct nand_chip *chip, int offset,
+-                        u32 val)
+-{
+-      struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+-      struct nandc_regs *regs = nandc->regs;
+-      __le32 *reg;
+-
+-      reg = offset_to_nandc_reg(regs, offset);
+-
+-      if (reg)
+-              *reg = cpu_to_le32(val);
+-}
+-
+-/* Helper to check the code word, whether it is last cw or not */
++/* Helper to check whether this is the last CW or not */
+ static bool qcom_nandc_is_last_cw(struct nand_ecc_ctrl *ecc, int cw)
+ {
+       return cw == (ecc->steps - 1);
+ }
++/**
++ * nandc_set_read_loc_first() - to set read location first register
++ * @chip:             NAND Private Flash Chip Data
++ * @reg_base:         location register base
++ * @cw_offset:                code word offset
++ * @read_size:                code word read length
++ * @is_last_read_loc: is this the last read location
++ *
++ * This function will set location register value
++ */
++static void nandc_set_read_loc_first(struct nand_chip *chip,
++                                   int reg_base, u32 cw_offset,
++                                   u32 read_size, u32 is_last_read_loc)
++{
++      struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
++      __le32 locreg_val;
++      u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
++                ((read_size) << READ_LOCATION_SIZE) |
++                ((is_last_read_loc) << READ_LOCATION_LAST));
++
++      locreg_val = cpu_to_le32(val);
++
++      if (reg_base == NAND_READ_LOCATION_0)
++              nandc->regs->read_location0 = locreg_val;
++      else if (reg_base == NAND_READ_LOCATION_1)
++              nandc->regs->read_location1 = locreg_val;
++      else if (reg_base == NAND_READ_LOCATION_2)
++              nandc->regs->read_location2 = locreg_val;
++      else if (reg_base == NAND_READ_LOCATION_3)
++              nandc->regs->read_location3 = locreg_val;
++}
++
++/**
++ * nandc_set_read_loc_last - to set read location last register
++ * @chip:             NAND Private Flash Chip Data
++ * @reg_base:         location register base
++ * @cw_offset:                code word offset
++ * @read_size:                code word read length
++ * @is_last_read_loc: is this the last read location
++ *
++ * This function will set location last register value
++ */
++static void nandc_set_read_loc_last(struct nand_chip *chip,
++                                  int reg_base, u32 cw_offset,
++                                  u32 read_size, u32 is_last_read_loc)
++{
++      struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
++      __le32 locreg_val;
++      u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
++                ((read_size) << READ_LOCATION_SIZE) |
++                ((is_last_read_loc) << READ_LOCATION_LAST));
++
++      locreg_val = cpu_to_le32(val);
++
++      if (reg_base == NAND_READ_LOCATION_LAST_CW_0)
++              nandc->regs->read_location_last0 = locreg_val;
++      else if (reg_base == NAND_READ_LOCATION_LAST_CW_1)
++              nandc->regs->read_location_last1 = locreg_val;
++      else if (reg_base == NAND_READ_LOCATION_LAST_CW_2)
++              nandc->regs->read_location_last2 = locreg_val;
++      else if (reg_base == NAND_READ_LOCATION_LAST_CW_3)
++              nandc->regs->read_location_last3 = locreg_val;
++}
++
+ /* helper to configure location register values */
+ static void nandc_set_read_loc(struct nand_chip *chip, int cw, int reg,
+-                             int cw_offset, int read_size, int is_last_read_loc)
++                             u32 cw_offset, u32 read_size, u32 is_last_read_loc)
+ {
+       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+       struct nand_ecc_ctrl *ecc = &chip->ecc;
+       int reg_base = NAND_READ_LOCATION_0;
+-      if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
++      if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw))
+               reg_base = NAND_READ_LOCATION_LAST_CW_0;
+       reg_base += reg * 4;
+-      if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
++      if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw))
+               return nandc_set_read_loc_last(chip, reg_base, cw_offset,
+                               read_size, is_last_read_loc);
+       else
+@@ -792,12 +756,13 @@ static void nandc_set_read_loc(struct na
+ static void set_address(struct qcom_nand_host *host, u16 column, int page)
+ {
+       struct nand_chip *chip = &host->chip;
++      struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+       if (chip->options & NAND_BUSWIDTH_16)
+               column >>= 1;
+-      nandc_set_reg(chip, NAND_ADDR0, page << 16 | column);
+-      nandc_set_reg(chip, NAND_ADDR1, page >> 16 & 0xff);
++      nandc->regs->addr0 = cpu_to_le32(page << 16 | column);
++      nandc->regs->addr1 = cpu_to_le32(page >> 16 & 0xff);
+ }
+ /*
+@@ -811,41 +776,43 @@ static void set_address(struct qcom_nand
+ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, int cw)
+ {
+       struct nand_chip *chip = &host->chip;
+-      u32 cmd, cfg0, cfg1, ecc_bch_cfg;
++      __le32 cmd, cfg0, cfg1, ecc_bch_cfg;
+       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+       if (read) {
+               if (host->use_ecc)
+-                      cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
++                      cmd = cpu_to_le32(OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE);
+               else
+-                      cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE;
++                      cmd = cpu_to_le32(OP_PAGE_READ | PAGE_ACC | LAST_PAGE);
+       } else {
+-              cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
++              cmd = cpu_to_le32(OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE);
+       }
+       if (host->use_ecc) {
+-              cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
+-                              (num_cw - 1) << CW_PER_PAGE;
++              cfg0 = cpu_to_le32((host->cfg0 & ~(7U << CW_PER_PAGE)) |
++                              (num_cw - 1) << CW_PER_PAGE);
+-              cfg1 = host->cfg1;
+-              ecc_bch_cfg = host->ecc_bch_cfg;
++              cfg1 = cpu_to_le32(host->cfg1);
++              ecc_bch_cfg = cpu_to_le32(host->ecc_bch_cfg);
+       } else {
+-              cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
+-                              (num_cw - 1) << CW_PER_PAGE;
++              cfg0 = cpu_to_le32((host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
++                              (num_cw - 1) << CW_PER_PAGE);
+-              cfg1 = host->cfg1_raw;
+-              ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
++              cfg1 = cpu_to_le32(host->cfg1_raw);
++              ecc_bch_cfg = cpu_to_le32(1 << ECC_CFG_ECC_DISABLE);
+       }
+-      nandc_set_reg(chip, NAND_FLASH_CMD, cmd);
+-      nandc_set_reg(chip, NAND_DEV0_CFG0, cfg0);
+-      nandc_set_reg(chip, NAND_DEV0_CFG1, cfg1);
+-      nandc_set_reg(chip, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
+-      if (!nandc->props->qpic_v2)
+-              nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
+-      nandc_set_reg(chip, NAND_FLASH_STATUS, host->clrflashstatus);
+-      nandc_set_reg(chip, NAND_READ_STATUS, host->clrreadstatus);
+-      nandc_set_reg(chip, NAND_EXEC_CMD, 1);
++      nandc->regs->cmd = cmd;
++      nandc->regs->cfg0 = cfg0;
++      nandc->regs->cfg1 = cfg1;
++      nandc->regs->ecc_bch_cfg = ecc_bch_cfg;
++
++      if (!nandc->props->qpic_version2)
++              nandc->regs->ecc_buf_cfg = cpu_to_le32(host->ecc_buf_cfg);
++
++      nandc->regs->clrflashstatus = cpu_to_le32(host->clrflashstatus);
++      nandc->regs->clrreadstatus = cpu_to_le32(host->clrreadstatus);
++      nandc->regs->exec = cpu_to_le32(1);
+       if (read)
+               nandc_set_read_loc(chip, cw, 0, 0, host->use_ecc ?
+@@ -1121,7 +1088,7 @@ static int read_reg_dma(struct qcom_nand
+       if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
+               first = dev_cmd_reg_addr(nandc, first);
+-      if (nandc->props->is_bam)
++      if (nandc->props->supports_bam)
+               return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
+                                            num_regs, flags);
+@@ -1136,25 +1103,16 @@ static int read_reg_dma(struct qcom_nand
+  * write_reg_dma:     prepares a descriptor to write a given number of
+  *                    contiguous registers
+  *
++ * @vaddr:            contiguous memory from where register value will
++ *                    be written
+  * @first:            offset of the first register in the contiguous block
+  * @num_regs:         number of registers to write
+  * @flags:            flags to control DMA descriptor preparation
+  */
+-static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
+-                       int num_regs, unsigned int flags)
++static int write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
++                       int first, int num_regs, unsigned int flags)
+ {
+       bool flow_control = false;
+-      struct nandc_regs *regs = nandc->regs;
+-      void *vaddr;
+-
+-      vaddr = offset_to_nandc_reg(regs, first);
+-
+-      if (first == NAND_ERASED_CW_DETECT_CFG) {
+-              if (flags & NAND_ERASED_CW_SET)
+-                      vaddr = &regs->erased_cw_detect_cfg_set;
+-              else
+-                      vaddr = &regs->erased_cw_detect_cfg_clr;
+-      }
+       if (first == NAND_EXEC_CMD)
+               flags |= NAND_BAM_NWD;
+@@ -1165,7 +1123,7 @@ static int write_reg_dma(struct qcom_nan
+       if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
+               first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
+-      if (nandc->props->is_bam)
++      if (nandc->props->supports_bam)
+               return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
+                                            num_regs, flags);
+@@ -1188,7 +1146,7 @@ static int write_reg_dma(struct qcom_nan
+ static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+                        const u8 *vaddr, int size, unsigned int flags)
+ {
+-      if (nandc->props->is_bam)
++      if (nandc->props->supports_bam)
+               return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
+       return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
+@@ -1206,7 +1164,7 @@ static int read_data_dma(struct qcom_nan
+ static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+                         const u8 *vaddr, int size, unsigned int flags)
+ {
+-      if (nandc->props->is_bam)
++      if (nandc->props->supports_bam)
+               return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
+       return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
+@@ -1220,13 +1178,14 @@ static void config_nand_page_read(struct
+ {
+       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+-      write_reg_dma(nandc, NAND_ADDR0, 2, 0);
+-      write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
+-      if (!nandc->props->qpic_v2)
+-              write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
+-      write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
+-      write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
+-                    NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
++      write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
++      write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
++      if (!nandc->props->qpic_version2)
++              write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
++      write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_clr,
++                    NAND_ERASED_CW_DETECT_CFG, 1, 0);
++      write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_set,
++                    NAND_ERASED_CW_DETECT_CFG, 1, NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
+ }
+ /*
+@@ -1239,16 +1198,16 @@ config_nand_cw_read(struct nand_chip *ch
+       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+       struct nand_ecc_ctrl *ecc = &chip->ecc;
+-      int reg = NAND_READ_LOCATION_0;
++      __le32 *reg = &nandc->regs->read_location0;
+-      if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
+-              reg = NAND_READ_LOCATION_LAST_CW_0;
++      if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw))
++              reg = &nandc->regs->read_location_last0;
+-      if (nandc->props->is_bam)
+-              write_reg_dma(nandc, reg, 4, NAND_BAM_NEXT_SGL);
++      if (nandc->props->supports_bam)
++              write_reg_dma(nandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
+-      write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+-      write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
++      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+       if (use_ecc) {
+               read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
+@@ -1279,10 +1238,10 @@ static void config_nand_page_write(struc
+ {
+       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+-      write_reg_dma(nandc, NAND_ADDR0, 2, 0);
+-      write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
+-      if (!nandc->props->qpic_v2)
+-              write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
++      write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
++      write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
++      if (!nandc->props->qpic_version2)
++              write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1,
+                             NAND_BAM_NEXT_SGL);
+ }
+@@ -1294,13 +1253,13 @@ static void config_nand_cw_write(struct
+ {
+       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+-      write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+-      write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
++      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+       read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+-      write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
+-      write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
++      write_reg_dma(nandc, &nandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
++      write_reg_dma(nandc, &nandc->regs->clrreadstatus, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
+ }
+ /* helpers to submit/free our list of dma descriptors */
+@@ -1311,7 +1270,7 @@ static int submit_descs(struct qcom_nand
+       struct bam_transaction *bam_txn = nandc->bam_txn;
+       int ret = 0;
+-      if (nandc->props->is_bam) {
++      if (nandc->props->supports_bam) {
+               if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
+                       ret = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
+                       if (ret)
+@@ -1336,14 +1295,9 @@ static int submit_descs(struct qcom_nand
+       list_for_each_entry(desc, &nandc->desc_list, node)
+               cookie = dmaengine_submit(desc->dma_desc);
+-      if (nandc->props->is_bam) {
++      if (nandc->props->supports_bam) {
+               bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
+               bam_txn->last_cmd_desc->callback_param = bam_txn;
+-              if (bam_txn->last_data_desc) {
+-                      bam_txn->last_data_desc->callback = qpic_bam_dma_done;
+-                      bam_txn->last_data_desc->callback_param = bam_txn;
+-                      bam_txn->wait_second_completion = true;
+-              }
+               dma_async_issue_pending(nandc->tx_chan);
+               dma_async_issue_pending(nandc->rx_chan);
+@@ -1365,7 +1319,7 @@ err_unmap_free_desc:
+       list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
+               list_del(&desc->node);
+-              if (nandc->props->is_bam)
++              if (nandc->props->supports_bam)
+                       dma_unmap_sg(nandc->dev, desc->bam_sgl,
+                                    desc->sgl_cnt, desc->dir);
+               else
+@@ -1382,7 +1336,7 @@ err_unmap_free_desc:
+ static void clear_read_regs(struct qcom_nand_controller *nandc)
+ {
+       nandc->reg_read_pos = 0;
+-      nandc_read_buffer_sync(nandc, false);
++      nandc_dev_to_mem(nandc, false);
+ }
+ /*
+@@ -1446,7 +1400,7 @@ static int check_flash_errors(struct qco
+       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+       int i;
+-      nandc_read_buffer_sync(nandc, true);
++      nandc_dev_to_mem(nandc, true);
+       for (i = 0; i < cw_cnt; i++) {
+               u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
+@@ -1476,7 +1430,7 @@ qcom_nandc_read_cw_raw(struct mtd_info *
+       clear_read_regs(nandc);
+       host->use_ecc = false;
+-      if (nandc->props->qpic_v2)
++      if (nandc->props->qpic_version2)
+               raw_cw = ecc->steps - 1;
+       clear_bam_transaction(nandc);
+@@ -1497,7 +1451,7 @@ qcom_nandc_read_cw_raw(struct mtd_info *
+               oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
+       }
+-      if (nandc->props->is_bam) {
++      if (nandc->props->supports_bam) {
+               nandc_set_read_loc(chip, cw, 0, read_loc, data_size1, 0);
+               read_loc += data_size1;
+@@ -1621,7 +1575,7 @@ static int parse_read_errors(struct qcom
+       u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
+       buf = (struct read_stats *)nandc->reg_read_buf;
+-      nandc_read_buffer_sync(nandc, true);
++      nandc_dev_to_mem(nandc, true);
+       for (i = 0; i < ecc->steps; i++, buf++) {
+               u32 flash, buffer, erased_cw;
+@@ -1734,7 +1688,7 @@ static int read_page_ecc(struct qcom_nan
+                       oob_size = host->ecc_bytes_hw + host->spare_bytes;
+               }
+-              if (nandc->props->is_bam) {
++              if (nandc->props->supports_bam) {
+                       if (data_buf && oob_buf) {
+                               nandc_set_read_loc(chip, i, 0, 0, data_size, 0);
+                               nandc_set_read_loc(chip, i, 1, data_size,
+@@ -2455,14 +2409,14 @@ static int qcom_nand_attach_chip(struct
+       mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
+       /* Free the initially allocated BAM transaction for reading the ONFI params */
+-      if (nandc->props->is_bam)
++      if (nandc->props->supports_bam)
+               free_bam_transaction(nandc);
+       nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
+                                    cwperpage);
+       /* Now allocate the BAM transaction based on updated max_cwperpage */
+-      if (nandc->props->is_bam) {
++      if (nandc->props->supports_bam) {
+               nandc->bam_txn = alloc_bam_transaction(nandc);
+               if (!nandc->bam_txn) {
+                       dev_err(nandc->dev,
+@@ -2522,7 +2476,7 @@ static int qcom_nand_attach_chip(struct
+                               | ecc_mode << ECC_MODE
+                               | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
+-      if (!nandc->props->qpic_v2)
++      if (!nandc->props->qpic_version2)
+               host->ecc_buf_cfg = 0x203 << NUM_STEPS;
+       host->clrflashstatus = FS_READY_BSY_N;
+@@ -2556,7 +2510,7 @@ static int qcom_op_cmd_mapping(struct na
+               cmd = OP_FETCH_ID;
+               break;
+       case NAND_CMD_PARAM:
+-              if (nandc->props->qpic_v2)
++              if (nandc->props->qpic_version2)
+                       cmd = OP_PAGE_READ_ONFI_READ;
+               else
+                       cmd = OP_PAGE_READ;
+@@ -2609,7 +2563,7 @@ static int qcom_parse_instructions(struc
+                       if (ret < 0)
+                               return ret;
+-                      q_op->cmd_reg = ret;
++                      q_op->cmd_reg = cpu_to_le32(ret);
+                       q_op->rdy_delay_ns = instr->delay_ns;
+                       break;
+@@ -2619,10 +2573,10 @@ static int qcom_parse_instructions(struc
+                       addrs = &instr->ctx.addr.addrs[offset];
+                       for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
+-                              q_op->addr1_reg |= addrs[i] << (i * 8);
++                              q_op->addr1_reg |= cpu_to_le32(addrs[i] << (i * 8));
+                       if (naddrs > 4)
+-                              q_op->addr2_reg |= addrs[4];
++                              q_op->addr2_reg |= cpu_to_le32(addrs[4]);
+                       q_op->rdy_delay_ns = instr->delay_ns;
+                       break;
+@@ -2663,7 +2617,7 @@ static int qcom_wait_rdy_poll(struct nan
+       unsigned long start = jiffies + msecs_to_jiffies(time_ms);
+       u32 flash;
+-      nandc_read_buffer_sync(nandc, true);
++      nandc_dev_to_mem(nandc, true);
+       do {
+               flash = le32_to_cpu(nandc->reg_read_buf[0]);
+@@ -2706,11 +2660,11 @@ static int qcom_read_status_exec(struct
+       clear_read_regs(nandc);
+       clear_bam_transaction(nandc);
+-      nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
+-      nandc_set_reg(chip, NAND_EXEC_CMD, 1);
++      nandc->regs->cmd = q_op.cmd_reg;
++      nandc->regs->exec = cpu_to_le32(1);
+-      write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+-      write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
++      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+       read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+       ret = submit_descs(nandc);
+@@ -2719,7 +2673,7 @@ static int qcom_read_status_exec(struct
+               goto err_out;
+       }
+-      nandc_read_buffer_sync(nandc, true);
++      nandc_dev_to_mem(nandc, true);
+       for (i = 0; i < num_cw; i++) {
+               flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
+@@ -2763,16 +2717,14 @@ static int qcom_read_id_type_exec(struct
+       clear_read_regs(nandc);
+       clear_bam_transaction(nandc);
+-      nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
+-      nandc_set_reg(chip, NAND_ADDR0, q_op.addr1_reg);
+-      nandc_set_reg(chip, NAND_ADDR1, q_op.addr2_reg);
+-      nandc_set_reg(chip, NAND_FLASH_CHIP_SELECT,
+-                    nandc->props->is_bam ? 0 : DM_EN);
++      nandc->regs->cmd = q_op.cmd_reg;
++      nandc->regs->addr0 = q_op.addr1_reg;
++      nandc->regs->addr1 = q_op.addr2_reg;
++      nandc->regs->chip_sel = cpu_to_le32(nandc->props->supports_bam ? 0 : DM_EN);
++      nandc->regs->exec = cpu_to_le32(1);
+-      nandc_set_reg(chip, NAND_EXEC_CMD, 1);
+-
+-      write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
+-      write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
++      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+       read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
+@@ -2786,7 +2738,7 @@ static int qcom_read_id_type_exec(struct
+       op_id = q_op.data_instr_idx;
+       len = nand_subop_get_data_len(subop, op_id);
+-      nandc_read_buffer_sync(nandc, true);
++      nandc_dev_to_mem(nandc, true);
+       memcpy(instr->ctx.data.buf.in, nandc->reg_read_buf, len);
+ err_out:
+@@ -2807,15 +2759,14 @@ static int qcom_misc_cmd_type_exec(struc
+       if (q_op.flag == OP_PROGRAM_PAGE) {
+               goto wait_rdy;
+-      } else if (q_op.cmd_reg == OP_BLOCK_ERASE) {
+-              q_op.cmd_reg |= PAGE_ACC | LAST_PAGE;
+-              nandc_set_reg(chip, NAND_ADDR0, q_op.addr1_reg);
+-              nandc_set_reg(chip, NAND_ADDR1, q_op.addr2_reg);
+-              nandc_set_reg(chip, NAND_DEV0_CFG0,
+-                            host->cfg0_raw & ~(7 << CW_PER_PAGE));
+-              nandc_set_reg(chip, NAND_DEV0_CFG1, host->cfg1_raw);
++      } else if (q_op.cmd_reg == cpu_to_le32(OP_BLOCK_ERASE)) {
++              q_op.cmd_reg |= cpu_to_le32(PAGE_ACC | LAST_PAGE);
++              nandc->regs->addr0 = q_op.addr1_reg;
++              nandc->regs->addr1 = q_op.addr2_reg;
++              nandc->regs->cfg0 = cpu_to_le32(host->cfg0_raw & ~(7 << CW_PER_PAGE));
++              nandc->regs->cfg1 = cpu_to_le32(host->cfg1_raw);
+               instrs = 3;
+-      } else if (q_op.cmd_reg != OP_RESET_DEVICE) {
++      } else if (q_op.cmd_reg != cpu_to_le32(OP_RESET_DEVICE)) {
+               return 0;
+       }
+@@ -2826,14 +2777,14 @@ static int qcom_misc_cmd_type_exec(struc
+       clear_read_regs(nandc);
+       clear_bam_transaction(nandc);
+-      nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
+-      nandc_set_reg(chip, NAND_EXEC_CMD, 1);
++      nandc->regs->cmd = q_op.cmd_reg;
++      nandc->regs->exec = cpu_to_le32(1);
+-      write_reg_dma(nandc, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
+-      if (q_op.cmd_reg == OP_BLOCK_ERASE)
+-              write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
++      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
++      if (q_op.cmd_reg == cpu_to_le32(OP_BLOCK_ERASE))
++              write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
+-      write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+       read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+       ret = submit_descs(nandc);
+@@ -2864,7 +2815,7 @@ static int qcom_param_page_type_exec(str
+       if (ret)
+               return ret;
+-      q_op.cmd_reg |= PAGE_ACC | LAST_PAGE;
++      q_op.cmd_reg |= cpu_to_le32(PAGE_ACC | LAST_PAGE);
+       nandc->buf_count = 0;
+       nandc->buf_start = 0;
+@@ -2872,38 +2823,38 @@ static int qcom_param_page_type_exec(str
+       clear_read_regs(nandc);
+       clear_bam_transaction(nandc);
+-      nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
++      nandc->regs->cmd = q_op.cmd_reg;
++      nandc->regs->addr0 = 0;
++      nandc->regs->addr1 = 0;
++
++      nandc->regs->cfg0 = cpu_to_le32(0 << CW_PER_PAGE |
++                                      512 << UD_SIZE_BYTES |
++                                      5 << NUM_ADDR_CYCLES |
++                                      0 << SPARE_SIZE_BYTES);
++
++      nandc->regs->cfg1 = cpu_to_le32(7 << NAND_RECOVERY_CYCLES |
++                                      0 << CS_ACTIVE_BSY |
++                                      17 << BAD_BLOCK_BYTE_NUM |
++                                      1 << BAD_BLOCK_IN_SPARE_AREA |
++                                      2 << WR_RD_BSY_GAP |
++                                      0 << WIDE_FLASH |
++                                      1 << DEV0_CFG1_ECC_DISABLE);
+-      nandc_set_reg(chip, NAND_ADDR0, 0);
+-      nandc_set_reg(chip, NAND_ADDR1, 0);
+-      nandc_set_reg(chip, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
+-                                      | 512 << UD_SIZE_BYTES
+-                                      | 5 << NUM_ADDR_CYCLES
+-                                      | 0 << SPARE_SIZE_BYTES);
+-      nandc_set_reg(chip, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
+-                                      | 0 << CS_ACTIVE_BSY
+-                                      | 17 << BAD_BLOCK_BYTE_NUM
+-                                      | 1 << BAD_BLOCK_IN_SPARE_AREA
+-                                      | 2 << WR_RD_BSY_GAP
+-                                      | 0 << WIDE_FLASH
+-                                      | 1 << DEV0_CFG1_ECC_DISABLE);
+-      if (!nandc->props->qpic_v2)
+-              nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
++      if (!nandc->props->qpic_version2)
++              nandc->regs->ecc_buf_cfg = cpu_to_le32(1 << ECC_CFG_ECC_DISABLE);
+       /* configure CMD1 and VLD for ONFI param probing in QPIC v1 */
+-      if (!nandc->props->qpic_v2) {
+-              nandc_set_reg(chip, NAND_DEV_CMD_VLD,
+-                            (nandc->vld & ~READ_START_VLD));
+-              nandc_set_reg(chip, NAND_DEV_CMD1,
+-                            (nandc->cmd1 & ~(0xFF << READ_ADDR))
+-                            | NAND_CMD_PARAM << READ_ADDR);
++      if (!nandc->props->qpic_version2) {
++              nandc->regs->vld = cpu_to_le32((nandc->vld & ~READ_START_VLD));
++              nandc->regs->cmd1 = cpu_to_le32((nandc->cmd1 & ~(0xFF << READ_ADDR))
++                                  | NAND_CMD_PARAM << READ_ADDR);
+       }
+-      nandc_set_reg(chip, NAND_EXEC_CMD, 1);
+-
+-      if (!nandc->props->qpic_v2) {
+-              nandc_set_reg(chip, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
+-              nandc_set_reg(chip, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
++      nandc->regs->exec = cpu_to_le32(1);
++
++      if (!nandc->props->qpic_version2) {
++              nandc->regs->orig_cmd1 = cpu_to_le32(nandc->cmd1);
++              nandc->regs->orig_vld = cpu_to_le32(nandc->vld);
+       }
+       instr = q_op.data_instr;
+@@ -2912,9 +2863,9 @@ static int qcom_param_page_type_exec(str
+       nandc_set_read_loc(chip, 0, 0, 0, len, 1);
+-      if (!nandc->props->qpic_v2) {
+-              write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
+-              write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
++      if (!nandc->props->qpic_version2) {
++              write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0);
++              write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
+       }
+       nandc->buf_count = len;
+@@ -2926,9 +2877,10 @@ static int qcom_param_page_type_exec(str
+                     nandc->buf_count, 0);
+       /* restore CMD1 and VLD regs */
+-      if (!nandc->props->qpic_v2) {
+-              write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
+-              write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
++      if (!nandc->props->qpic_version2) {
++              write_reg_dma(nandc, &nandc->regs->orig_cmd1, NAND_DEV_CMD1_RESTORE, 1, 0);
++              write_reg_dma(nandc, &nandc->regs->orig_vld, NAND_DEV_CMD_VLD_RESTORE, 1,
++                            NAND_BAM_NEXT_SGL);
+       }
+       ret = submit_descs(nandc);
+@@ -3017,7 +2969,7 @@ static const struct nand_controller_ops
+ static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
+ {
+-      if (nandc->props->is_bam) {
++      if (nandc->props->supports_bam) {
+               if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
+                       dma_unmap_single(nandc->dev, nandc->reg_read_dma,
+                                        MAX_REG_RD *
+@@ -3070,7 +3022,7 @@ static int qcom_nandc_alloc(struct qcom_
+       if (!nandc->reg_read_buf)
+               return -ENOMEM;
+-      if (nandc->props->is_bam) {
++      if (nandc->props->supports_bam) {
+               nandc->reg_read_dma =
+                       dma_map_single(nandc->dev, nandc->reg_read_buf,
+                                      MAX_REG_RD *
+@@ -3151,15 +3103,15 @@ static int qcom_nandc_setup(struct qcom_
+       u32 nand_ctrl;
+       /* kill onenand */
+-      if (!nandc->props->is_qpic)
++      if (!nandc->props->nandc_part_of_qpic)
+               nandc_write(nandc, SFLASHC_BURST_CFG, 0);
+-      if (!nandc->props->qpic_v2)
++      if (!nandc->props->qpic_version2)
+               nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
+                           NAND_DEV_CMD_VLD_VAL);
+       /* enable ADM or BAM DMA */
+-      if (nandc->props->is_bam) {
++      if (nandc->props->supports_bam) {
+               nand_ctrl = nandc_read(nandc, NAND_CTRL);
+               /*
+@@ -3176,7 +3128,7 @@ static int qcom_nandc_setup(struct qcom_
+       }
+       /* save the original values of these registers */
+-      if (!nandc->props->qpic_v2) {
++      if (!nandc->props->qpic_version2) {
+               nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1));
+               nandc->vld = NAND_DEV_CMD_VLD_VAL;
+       }
+@@ -3349,7 +3301,7 @@ static int qcom_nandc_parse_dt(struct pl
+       struct device_node *np = nandc->dev->of_node;
+       int ret;
+-      if (!nandc->props->is_bam) {
++      if (!nandc->props->supports_bam) {
+               ret = of_property_read_u32(np, "qcom,cmd-crci",
+                                          &nandc->cmd_crci);
+               if (ret) {
+@@ -3474,30 +3426,30 @@ static void qcom_nandc_remove(struct pla
+ static const struct qcom_nandc_props ipq806x_nandc_props = {
+       .ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
+-      .is_bam = false,
++      .supports_bam = false,
+       .use_codeword_fixup = true,
+       .dev_cmd_reg_start = 0x0,
+ };
+ static const struct qcom_nandc_props ipq4019_nandc_props = {
+       .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
+-      .is_bam = true,
+-      .is_qpic = true,
++      .supports_bam = true,
++      .nandc_part_of_qpic = true,
+       .dev_cmd_reg_start = 0x0,
+ };
+ static const struct qcom_nandc_props ipq8074_nandc_props = {
+       .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
+-      .is_bam = true,
+-      .is_qpic = true,
++      .supports_bam = true,
++      .nandc_part_of_qpic = true,
+       .dev_cmd_reg_start = 0x7000,
+ };
+ static const struct qcom_nandc_props sdx55_nandc_props = {
+       .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
+-      .is_bam = true,
+-      .is_qpic = true,
+-      .qpic_v2 = true,
++      .supports_bam = true,
++      .nandc_part_of_qpic = true,
++      .qpic_version2 = true,
+       .dev_cmd_reg_start = 0x7000,
+ };
diff --git a/target/linux/generic/backport-6.6/413-02-v6.14-mtd-rawnand-qcom-Add-qcom-prefix-to-common-api.patch b/target/linux/generic/backport-6.6/413-02-v6.14-mtd-rawnand-qcom-Add-qcom-prefix-to-common-api.patch
new file mode 100644 (file)
index 0000000..078a56c
--- /dev/null
@@ -0,0 +1,880 @@
+From 1d479f5b345e0c3650fec4dddeef9fc6fab30c8b Mon Sep 17 00:00:00 2001
+From: Md Sadre Alam <quic_mdalam@quicinc.com>
+Date: Wed, 20 Nov 2024 14:45:01 +0530
+Subject: [PATCH 2/4] mtd: rawnand: qcom: Add qcom prefix to common api
+
+Add qcom prefix to all the api which will be commonly
+used by spi nand driver and raw nand driver.
+
+Reviewed-by: Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>
+Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+---
+ drivers/mtd/nand/raw/qcom_nandc.c | 320 +++++++++++++++---------------
+ 1 file changed, 160 insertions(+), 160 deletions(-)
+
+--- a/drivers/mtd/nand/raw/qcom_nandc.c
++++ b/drivers/mtd/nand/raw/qcom_nandc.c
+@@ -53,7 +53,7 @@
+ #define       NAND_READ_LOCATION_LAST_CW_2    0xf48
+ #define       NAND_READ_LOCATION_LAST_CW_3    0xf4c
+-/* dummy register offsets, used by write_reg_dma */
++/* dummy register offsets, used by qcom_write_reg_dma */
+ #define       NAND_DEV_CMD1_RESTORE           0xdead
+ #define       NAND_DEV_CMD_VLD_RESTORE        0xbeef
+@@ -211,7 +211,7 @@
+ /*
+  * Flags used in DMA descriptor preparation helper functions
+- * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
++ * (i.e. qcom_read_reg_dma/qcom_write_reg_dma/qcom_read_data_dma/qcom_write_data_dma)
+  */
+ /* Don't set the EOT in current tx BAM sgl */
+ #define NAND_BAM_NO_EOT                       BIT(0)
+@@ -550,7 +550,7 @@ struct qcom_nandc_props {
+ };
+ /* Frees the BAM transaction memory */
+-static void free_bam_transaction(struct qcom_nand_controller *nandc)
++static void qcom_free_bam_transaction(struct qcom_nand_controller *nandc)
+ {
+       struct bam_transaction *bam_txn = nandc->bam_txn;
+@@ -559,7 +559,7 @@ static void free_bam_transaction(struct
+ /* Allocates and Initializes the BAM transaction */
+ static struct bam_transaction *
+-alloc_bam_transaction(struct qcom_nand_controller *nandc)
++qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc)
+ {
+       struct bam_transaction *bam_txn;
+       size_t bam_txn_size;
+@@ -595,7 +595,7 @@ alloc_bam_transaction(struct qcom_nand_c
+ }
+ /* Clears the BAM transaction indexes */
+-static void clear_bam_transaction(struct qcom_nand_controller *nandc)
++static void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc)
+ {
+       struct bam_transaction *bam_txn = nandc->bam_txn;
+@@ -614,7 +614,7 @@ static void clear_bam_transaction(struct
+ }
+ /* Callback for DMA descriptor completion */
+-static void qpic_bam_dma_done(void *data)
++static void qcom_qpic_bam_dma_done(void *data)
+ {
+       struct bam_transaction *bam_txn = data;
+@@ -644,7 +644,7 @@ static void nandc_write(struct qcom_nand
+       iowrite32(val, nandc->base + offset);
+ }
+-static void nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
++static void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
+ {
+       if (!nandc->props->supports_bam)
+               return;
+@@ -824,9 +824,9 @@ static void update_rw_regs(struct qcom_n
+  * for BAM. This descriptor will be added in the NAND DMA descriptor queue
+  * which will be submitted to DMA engine.
+  */
+-static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
+-                                struct dma_chan *chan,
+-                                unsigned long flags)
++static int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
++                                     struct dma_chan *chan,
++                                     unsigned long flags)
+ {
+       struct desc_info *desc;
+       struct scatterlist *sgl;
+@@ -903,9 +903,9 @@ static int prepare_bam_async_desc(struct
+  * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
+  * after the current command element.
+  */
+-static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
+-                               int reg_off, const void *vaddr,
+-                               int size, unsigned int flags)
++static int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
++                                    int reg_off, const void *vaddr,
++                                    int size, unsigned int flags)
+ {
+       int bam_ce_size;
+       int i, ret;
+@@ -943,9 +943,9 @@ static int prep_bam_dma_desc_cmd(struct
+               bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
+               if (flags & NAND_BAM_NWD) {
+-                      ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
+-                                                   DMA_PREP_FENCE |
+-                                                   DMA_PREP_CMD);
++                      ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
++                                                        DMA_PREP_FENCE |
++                                                        DMA_PREP_CMD);
+                       if (ret)
+                               return ret;
+               }
+@@ -958,9 +958,8 @@ static int prep_bam_dma_desc_cmd(struct
+  * Prepares the data descriptor for BAM DMA which will be used for NAND
+  * data reads and writes.
+  */
+-static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
+-                                const void *vaddr,
+-                                int size, unsigned int flags)
++static int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
++                                     const void *vaddr, int size, unsigned int flags)
+ {
+       int ret;
+       struct bam_transaction *bam_txn = nandc->bam_txn;
+@@ -979,8 +978,8 @@ static int prep_bam_dma_desc_data(struct
+                * is not set, form the DMA descriptor
+                */
+               if (!(flags & NAND_BAM_NO_EOT)) {
+-                      ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
+-                                                   DMA_PREP_INTERRUPT);
++                      ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
++                                                        DMA_PREP_INTERRUPT);
+                       if (ret)
+                               return ret;
+               }
+@@ -989,9 +988,9 @@ static int prep_bam_dma_desc_data(struct
+       return 0;
+ }
+-static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
+-                           int reg_off, const void *vaddr, int size,
+-                           bool flow_control)
++static int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
++                                int reg_off, const void *vaddr, int size,
++                                bool flow_control)
+ {
+       struct desc_info *desc;
+       struct dma_async_tx_descriptor *dma_desc;
+@@ -1069,15 +1068,15 @@ err:
+ }
+ /*
+- * read_reg_dma:      prepares a descriptor to read a given number of
++ * qcom_read_reg_dma: prepares a descriptor to read a given number of
+  *                    contiguous registers to the reg_read_buf pointer
+  *
+  * @first:            offset of the first register in the contiguous block
+  * @num_regs:         number of registers to read
+  * @flags:            flags to control DMA descriptor preparation
+  */
+-static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
+-                      int num_regs, unsigned int flags)
++static int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first,
++                           int num_regs, unsigned int flags)
+ {
+       bool flow_control = false;
+       void *vaddr;
+@@ -1089,18 +1088,18 @@ static int read_reg_dma(struct qcom_nand
+               first = dev_cmd_reg_addr(nandc, first);
+       if (nandc->props->supports_bam)
+-              return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
++              return qcom_prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
+                                            num_regs, flags);
+       if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
+               flow_control = true;
+-      return prep_adm_dma_desc(nandc, true, first, vaddr,
++      return qcom_prep_adm_dma_desc(nandc, true, first, vaddr,
+                                num_regs * sizeof(u32), flow_control);
+ }
+ /*
+- * write_reg_dma:     prepares a descriptor to write a given number of
++ * qcom_write_reg_dma:        prepares a descriptor to write a given number of
+  *                    contiguous registers
+  *
+  * @vaddr:            contiguous memory from where register value will
+@@ -1109,8 +1108,8 @@ static int read_reg_dma(struct qcom_nand
+  * @num_regs:         number of registers to write
+  * @flags:            flags to control DMA descriptor preparation
+  */
+-static int write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
+-                       int first, int num_regs, unsigned int flags)
++static int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
++                            int first, int num_regs, unsigned int flags)
+ {
+       bool flow_control = false;
+@@ -1124,18 +1123,18 @@ static int write_reg_dma(struct qcom_nan
+               first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
+       if (nandc->props->supports_bam)
+-              return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
++              return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
+                                            num_regs, flags);
+       if (first == NAND_FLASH_CMD)
+               flow_control = true;
+-      return prep_adm_dma_desc(nandc, false, first, vaddr,
++      return qcom_prep_adm_dma_desc(nandc, false, first, vaddr,
+                                num_regs * sizeof(u32), flow_control);
+ }
+ /*
+- * read_data_dma:     prepares a DMA descriptor to transfer data from the
++ * qcom_read_data_dma:        prepares a DMA descriptor to transfer data from the
+  *                    controller's internal buffer to the buffer 'vaddr'
+  *
+  * @reg_off:          offset within the controller's data buffer
+@@ -1143,17 +1142,17 @@ static int write_reg_dma(struct qcom_nan
+  * @size:             DMA transaction size in bytes
+  * @flags:            flags to control DMA descriptor preparation
+  */
+-static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+-                       const u8 *vaddr, int size, unsigned int flags)
++static int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
++                            const u8 *vaddr, int size, unsigned int flags)
+ {
+       if (nandc->props->supports_bam)
+-              return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
++              return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
+-      return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
++      return qcom_prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
+ }
+ /*
+- * write_data_dma:    prepares a DMA descriptor to transfer data from
++ * qcom_write_data_dma:       prepares a DMA descriptor to transfer data from
+  *                    'vaddr' to the controller's internal buffer
+  *
+  * @reg_off:          offset within the controller's data buffer
+@@ -1161,13 +1160,13 @@ static int read_data_dma(struct qcom_nan
+  * @size:             DMA transaction size in bytes
+  * @flags:            flags to control DMA descriptor preparation
+  */
+-static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+-                        const u8 *vaddr, int size, unsigned int flags)
++static int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
++                             const u8 *vaddr, int size, unsigned int flags)
+ {
+       if (nandc->props->supports_bam)
+-              return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
++              return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
+-      return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
++      return qcom_prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
+ }
+ /*
+@@ -1178,14 +1177,14 @@ static void config_nand_page_read(struct
+ {
+       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+-      write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
+-      write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
++      qcom_write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
++      qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
+       if (!nandc->props->qpic_version2)
+-              write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
+-      write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_clr,
+-                    NAND_ERASED_CW_DETECT_CFG, 1, 0);
+-      write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_set,
+-                    NAND_ERASED_CW_DETECT_CFG, 1, NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
++              qcom_write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
++      qcom_write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_clr,
++                         NAND_ERASED_CW_DETECT_CFG, 1, 0);
++      qcom_write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_set,
++                         NAND_ERASED_CW_DETECT_CFG, 1, NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
+ }
+ /*
+@@ -1204,17 +1203,17 @@ config_nand_cw_read(struct nand_chip *ch
+               reg = &nandc->regs->read_location_last0;
+       if (nandc->props->supports_bam)
+-              write_reg_dma(nandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
++              qcom_write_reg_dma(nandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
+-      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+-      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++      qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
++      qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+       if (use_ecc) {
+-              read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
+-              read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
+-                           NAND_BAM_NEXT_SGL);
++              qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
++              qcom_read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
++                                NAND_BAM_NEXT_SGL);
+       } else {
+-              read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
++              qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+       }
+ }
+@@ -1238,11 +1237,11 @@ static void config_nand_page_write(struc
+ {
+       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+-      write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
+-      write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
++      qcom_write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
++      qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
+       if (!nandc->props->qpic_version2)
+-              write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1,
+-                            NAND_BAM_NEXT_SGL);
++              qcom_write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1,
++                                 NAND_BAM_NEXT_SGL);
+ }
+ /*
+@@ -1253,17 +1252,18 @@ static void config_nand_cw_write(struct
+ {
+       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+-      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+-      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++      qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
++      qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+-      read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
++      qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+-      write_reg_dma(nandc, &nandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
+-      write_reg_dma(nandc, &nandc->regs->clrreadstatus, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
++      qcom_write_reg_dma(nandc, &nandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
++      qcom_write_reg_dma(nandc, &nandc->regs->clrreadstatus, NAND_READ_STATUS, 1,
++                         NAND_BAM_NEXT_SGL);
+ }
+ /* helpers to submit/free our list of dma descriptors */
+-static int submit_descs(struct qcom_nand_controller *nandc)
++static int qcom_submit_descs(struct qcom_nand_controller *nandc)
+ {
+       struct desc_info *desc, *n;
+       dma_cookie_t cookie = 0;
+@@ -1272,21 +1272,21 @@ static int submit_descs(struct qcom_nand
+       if (nandc->props->supports_bam) {
+               if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
+-                      ret = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
++                      ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
+                       if (ret)
+                               goto err_unmap_free_desc;
+               }
+               if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
+-                      ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
+-                                                 DMA_PREP_INTERRUPT);
++                      ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
++                                                        DMA_PREP_INTERRUPT);
+                       if (ret)
+                               goto err_unmap_free_desc;
+               }
+               if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
+-                      ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
+-                                                 DMA_PREP_CMD);
++                      ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
++                                                        DMA_PREP_CMD);
+                       if (ret)
+                               goto err_unmap_free_desc;
+               }
+@@ -1296,7 +1296,7 @@ static int submit_descs(struct qcom_nand
+               cookie = dmaengine_submit(desc->dma_desc);
+       if (nandc->props->supports_bam) {
+-              bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
++              bam_txn->last_cmd_desc->callback = qcom_qpic_bam_dma_done;
+               bam_txn->last_cmd_desc->callback_param = bam_txn;
+               dma_async_issue_pending(nandc->tx_chan);
+@@ -1314,7 +1314,7 @@ static int submit_descs(struct qcom_nand
+ err_unmap_free_desc:
+       /*
+        * Unmap the dma sg_list and free the desc allocated by both
+-       * prepare_bam_async_desc() and prep_adm_dma_desc() functions.
++       * qcom_prepare_bam_async_desc() and qcom_prep_adm_dma_desc() functions.
+        */
+       list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
+               list_del(&desc->node);
+@@ -1333,10 +1333,10 @@ err_unmap_free_desc:
+ }
+ /* reset the register read buffer for next NAND operation */
+-static void clear_read_regs(struct qcom_nand_controller *nandc)
++static void qcom_clear_read_regs(struct qcom_nand_controller *nandc)
+ {
+       nandc->reg_read_pos = 0;
+-      nandc_dev_to_mem(nandc, false);
++      qcom_nandc_dev_to_mem(nandc, false);
+ }
+ /*
+@@ -1400,7 +1400,7 @@ static int check_flash_errors(struct qco
+       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+       int i;
+-      nandc_dev_to_mem(nandc, true);
++      qcom_nandc_dev_to_mem(nandc, true);
+       for (i = 0; i < cw_cnt; i++) {
+               u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
+@@ -1427,13 +1427,13 @@ qcom_nandc_read_cw_raw(struct mtd_info *
+       nand_read_page_op(chip, page, 0, NULL, 0);
+       nandc->buf_count = 0;
+       nandc->buf_start = 0;
+-      clear_read_regs(nandc);
++      qcom_clear_read_regs(nandc);
+       host->use_ecc = false;
+       if (nandc->props->qpic_version2)
+               raw_cw = ecc->steps - 1;
+-      clear_bam_transaction(nandc);
++      qcom_clear_bam_transaction(nandc);
+       set_address(host, host->cw_size * cw, page);
+       update_rw_regs(host, 1, true, raw_cw);
+       config_nand_page_read(chip);
+@@ -1466,18 +1466,18 @@ qcom_nandc_read_cw_raw(struct mtd_info *
+       config_nand_cw_read(chip, false, raw_cw);
+-      read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
++      qcom_read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
+       reg_off += data_size1;
+-      read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
++      qcom_read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
+       reg_off += oob_size1;
+-      read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
++      qcom_read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
+       reg_off += data_size2;
+-      read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
++      qcom_read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
+-      ret = submit_descs(nandc);
++      ret = qcom_submit_descs(nandc);
+       if (ret) {
+               dev_err(nandc->dev, "failure to read raw cw %d\n", cw);
+               return ret;
+@@ -1575,7 +1575,7 @@ static int parse_read_errors(struct qcom
+       u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
+       buf = (struct read_stats *)nandc->reg_read_buf;
+-      nandc_dev_to_mem(nandc, true);
++      qcom_nandc_dev_to_mem(nandc, true);
+       for (i = 0; i < ecc->steps; i++, buf++) {
+               u32 flash, buffer, erased_cw;
+@@ -1704,8 +1704,8 @@ static int read_page_ecc(struct qcom_nan
+               config_nand_cw_read(chip, true, i);
+               if (data_buf)
+-                      read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
+-                                    data_size, 0);
++                      qcom_read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
++                                         data_size, 0);
+               /*
+                * when ecc is enabled, the controller doesn't read the real
+@@ -1720,8 +1720,8 @@ static int read_page_ecc(struct qcom_nan
+                       for (j = 0; j < host->bbm_size; j++)
+                               *oob_buf++ = 0xff;
+-                      read_data_dma(nandc, FLASH_BUF_ACC + data_size,
+-                                    oob_buf, oob_size, 0);
++                      qcom_read_data_dma(nandc, FLASH_BUF_ACC + data_size,
++                                         oob_buf, oob_size, 0);
+               }
+               if (data_buf)
+@@ -1730,7 +1730,7 @@ static int read_page_ecc(struct qcom_nan
+                       oob_buf += oob_size;
+       }
+-      ret = submit_descs(nandc);
++      ret = qcom_submit_descs(nandc);
+       if (ret) {
+               dev_err(nandc->dev, "failure to read page/oob\n");
+               return ret;
+@@ -1751,7 +1751,7 @@ static int copy_last_cw(struct qcom_nand
+       int size;
+       int ret;
+-      clear_read_regs(nandc);
++      qcom_clear_read_regs(nandc);
+       size = host->use_ecc ? host->cw_data : host->cw_size;
+@@ -1763,9 +1763,9 @@ static int copy_last_cw(struct qcom_nand
+       config_nand_single_cw_page_read(chip, host->use_ecc, ecc->steps - 1);
+-      read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
++      qcom_read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
+-      ret = submit_descs(nandc);
++      ret = qcom_submit_descs(nandc);
+       if (ret)
+               dev_err(nandc->dev, "failed to copy last codeword\n");
+@@ -1851,14 +1851,14 @@ static int qcom_nandc_read_page(struct n
+       nandc->buf_count = 0;
+       nandc->buf_start = 0;
+       host->use_ecc = true;
+-      clear_read_regs(nandc);
++      qcom_clear_read_regs(nandc);
+       set_address(host, 0, page);
+       update_rw_regs(host, ecc->steps, true, 0);
+       data_buf = buf;
+       oob_buf = oob_required ? chip->oob_poi : NULL;
+-      clear_bam_transaction(nandc);
++      qcom_clear_bam_transaction(nandc);
+       return read_page_ecc(host, data_buf, oob_buf, page);
+ }
+@@ -1899,8 +1899,8 @@ static int qcom_nandc_read_oob(struct na
+       if (host->nr_boot_partitions)
+               qcom_nandc_codeword_fixup(host, page);
+-      clear_read_regs(nandc);
+-      clear_bam_transaction(nandc);
++      qcom_clear_read_regs(nandc);
++      qcom_clear_bam_transaction(nandc);
+       host->use_ecc = true;
+       set_address(host, 0, page);
+@@ -1927,8 +1927,8 @@ static int qcom_nandc_write_page(struct
+       set_address(host, 0, page);
+       nandc->buf_count = 0;
+       nandc->buf_start = 0;
+-      clear_read_regs(nandc);
+-      clear_bam_transaction(nandc);
++      qcom_clear_read_regs(nandc);
++      qcom_clear_bam_transaction(nandc);
+       data_buf = (u8 *)buf;
+       oob_buf = chip->oob_poi;
+@@ -1949,8 +1949,8 @@ static int qcom_nandc_write_page(struct
+                       oob_size = ecc->bytes;
+               }
+-              write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
+-                             i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
++              qcom_write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
++                                  i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
+               /*
+                * when ECC is enabled, we don't really need to write anything
+@@ -1962,8 +1962,8 @@ static int qcom_nandc_write_page(struct
+               if (qcom_nandc_is_last_cw(ecc, i)) {
+                       oob_buf += host->bbm_size;
+-                      write_data_dma(nandc, FLASH_BUF_ACC + data_size,
+-                                     oob_buf, oob_size, 0);
++                      qcom_write_data_dma(nandc, FLASH_BUF_ACC + data_size,
++                                          oob_buf, oob_size, 0);
+               }
+               config_nand_cw_write(chip);
+@@ -1972,7 +1972,7 @@ static int qcom_nandc_write_page(struct
+               oob_buf += oob_size;
+       }
+-      ret = submit_descs(nandc);
++      ret = qcom_submit_descs(nandc);
+       if (ret) {
+               dev_err(nandc->dev, "failure to write page\n");
+               return ret;
+@@ -1997,8 +1997,8 @@ static int qcom_nandc_write_page_raw(str
+               qcom_nandc_codeword_fixup(host, page);
+       nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+-      clear_read_regs(nandc);
+-      clear_bam_transaction(nandc);
++      qcom_clear_read_regs(nandc);
++      qcom_clear_bam_transaction(nandc);
+       data_buf = (u8 *)buf;
+       oob_buf = chip->oob_poi;
+@@ -2024,28 +2024,28 @@ static int qcom_nandc_write_page_raw(str
+                       oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
+               }
+-              write_data_dma(nandc, reg_off, data_buf, data_size1,
+-                             NAND_BAM_NO_EOT);
++              qcom_write_data_dma(nandc, reg_off, data_buf, data_size1,
++                                  NAND_BAM_NO_EOT);
+               reg_off += data_size1;
+               data_buf += data_size1;
+-              write_data_dma(nandc, reg_off, oob_buf, oob_size1,
+-                             NAND_BAM_NO_EOT);
++              qcom_write_data_dma(nandc, reg_off, oob_buf, oob_size1,
++                                  NAND_BAM_NO_EOT);
+               reg_off += oob_size1;
+               oob_buf += oob_size1;
+-              write_data_dma(nandc, reg_off, data_buf, data_size2,
+-                             NAND_BAM_NO_EOT);
++              qcom_write_data_dma(nandc, reg_off, data_buf, data_size2,
++                                  NAND_BAM_NO_EOT);
+               reg_off += data_size2;
+               data_buf += data_size2;
+-              write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
++              qcom_write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
+               oob_buf += oob_size2;
+               config_nand_cw_write(chip);
+       }
+-      ret = submit_descs(nandc);
++      ret = qcom_submit_descs(nandc);
+       if (ret) {
+               dev_err(nandc->dev, "failure to write raw page\n");
+               return ret;
+@@ -2075,7 +2075,7 @@ static int qcom_nandc_write_oob(struct n
+               qcom_nandc_codeword_fixup(host, page);
+       host->use_ecc = true;
+-      clear_bam_transaction(nandc);
++      qcom_clear_bam_transaction(nandc);
+       /* calculate the data and oob size for the last codeword/step */
+       data_size = ecc->size - ((ecc->steps - 1) << 2);
+@@ -2090,11 +2090,11 @@ static int qcom_nandc_write_oob(struct n
+       update_rw_regs(host, 1, false, 0);
+       config_nand_page_write(chip);
+-      write_data_dma(nandc, FLASH_BUF_ACC,
+-                     nandc->data_buffer, data_size + oob_size, 0);
++      qcom_write_data_dma(nandc, FLASH_BUF_ACC,
++                          nandc->data_buffer, data_size + oob_size, 0);
+       config_nand_cw_write(chip);
+-      ret = submit_descs(nandc);
++      ret = qcom_submit_descs(nandc);
+       if (ret) {
+               dev_err(nandc->dev, "failure to write oob\n");
+               return ret;
+@@ -2121,7 +2121,7 @@ static int qcom_nandc_block_bad(struct n
+        */
+       host->use_ecc = false;
+-      clear_bam_transaction(nandc);
++      qcom_clear_bam_transaction(nandc);
+       ret = copy_last_cw(host, page);
+       if (ret)
+               goto err;
+@@ -2148,8 +2148,8 @@ static int qcom_nandc_block_markbad(stru
+       struct nand_ecc_ctrl *ecc = &chip->ecc;
+       int page, ret;
+-      clear_read_regs(nandc);
+-      clear_bam_transaction(nandc);
++      qcom_clear_read_regs(nandc);
++      qcom_clear_bam_transaction(nandc);
+       /*
+        * to mark the BBM as bad, we flash the entire last codeword with 0s.
+@@ -2166,11 +2166,11 @@ static int qcom_nandc_block_markbad(stru
+       update_rw_regs(host, 1, false, ecc->steps - 1);
+       config_nand_page_write(chip);
+-      write_data_dma(nandc, FLASH_BUF_ACC,
+-                     nandc->data_buffer, host->cw_size, 0);
++      qcom_write_data_dma(nandc, FLASH_BUF_ACC,
++                          nandc->data_buffer, host->cw_size, 0);
+       config_nand_cw_write(chip);
+-      ret = submit_descs(nandc);
++      ret = qcom_submit_descs(nandc);
+       if (ret) {
+               dev_err(nandc->dev, "failure to update BBM\n");
+               return ret;
+@@ -2410,14 +2410,14 @@ static int qcom_nand_attach_chip(struct
+       mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
+       /* Free the initially allocated BAM transaction for reading the ONFI params */
+       if (nandc->props->supports_bam)
+-              free_bam_transaction(nandc);
++              qcom_free_bam_transaction(nandc);
+       nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
+                                    cwperpage);
+       /* Now allocate the BAM transaction based on updated max_cwperpage */
+       if (nandc->props->supports_bam) {
+-              nandc->bam_txn = alloc_bam_transaction(nandc);
++              nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
+               if (!nandc->bam_txn) {
+                       dev_err(nandc->dev,
+                               "failed to allocate bam transaction\n");
+@@ -2617,7 +2617,7 @@ static int qcom_wait_rdy_poll(struct nan
+       unsigned long start = jiffies + msecs_to_jiffies(time_ms);
+       u32 flash;
+-      nandc_dev_to_mem(nandc, true);
++      qcom_nandc_dev_to_mem(nandc, true);
+       do {
+               flash = le32_to_cpu(nandc->reg_read_buf[0]);
+@@ -2657,23 +2657,23 @@ static int qcom_read_status_exec(struct
+       nandc->buf_start = 0;
+       host->use_ecc = false;
+-      clear_read_regs(nandc);
+-      clear_bam_transaction(nandc);
++      qcom_clear_read_regs(nandc);
++      qcom_clear_bam_transaction(nandc);
+       nandc->regs->cmd = q_op.cmd_reg;
+       nandc->regs->exec = cpu_to_le32(1);
+-      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+-      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+-      read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
++      qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
++      qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++      qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+-      ret = submit_descs(nandc);
++      ret = qcom_submit_descs(nandc);
+       if (ret) {
+               dev_err(nandc->dev, "failure in submitting status descriptor\n");
+               goto err_out;
+       }
+-      nandc_dev_to_mem(nandc, true);
++      qcom_nandc_dev_to_mem(nandc, true);
+       for (i = 0; i < num_cw; i++) {
+               flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
+@@ -2714,8 +2714,8 @@ static int qcom_read_id_type_exec(struct
+       nandc->buf_start = 0;
+       host->use_ecc = false;
+-      clear_read_regs(nandc);
+-      clear_bam_transaction(nandc);
++      qcom_clear_read_regs(nandc);
++      qcom_clear_bam_transaction(nandc);
+       nandc->regs->cmd = q_op.cmd_reg;
+       nandc->regs->addr0 = q_op.addr1_reg;
+@@ -2723,12 +2723,12 @@ static int qcom_read_id_type_exec(struct
+       nandc->regs->chip_sel = cpu_to_le32(nandc->props->supports_bam ? 0 : DM_EN);
+       nandc->regs->exec = cpu_to_le32(1);
+-      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
+-      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++      qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
++      qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+-      read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
++      qcom_read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
+-      ret = submit_descs(nandc);
++      ret = qcom_submit_descs(nandc);
+       if (ret) {
+               dev_err(nandc->dev, "failure in submitting read id descriptor\n");
+               goto err_out;
+@@ -2738,7 +2738,7 @@ static int qcom_read_id_type_exec(struct
+       op_id = q_op.data_instr_idx;
+       len = nand_subop_get_data_len(subop, op_id);
+-      nandc_dev_to_mem(nandc, true);
++      qcom_nandc_dev_to_mem(nandc, true);
+       memcpy(instr->ctx.data.buf.in, nandc->reg_read_buf, len);
+ err_out:
+@@ -2774,20 +2774,20 @@ static int qcom_misc_cmd_type_exec(struc
+       nandc->buf_start = 0;
+       host->use_ecc = false;
+-      clear_read_regs(nandc);
+-      clear_bam_transaction(nandc);
++      qcom_clear_read_regs(nandc);
++      qcom_clear_bam_transaction(nandc);
+       nandc->regs->cmd = q_op.cmd_reg;
+       nandc->regs->exec = cpu_to_le32(1);
+-      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
++      qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
+       if (q_op.cmd_reg == cpu_to_le32(OP_BLOCK_ERASE))
+-              write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
++              qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
+-      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+-      read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
++      qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++      qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+-      ret = submit_descs(nandc);
++      ret = qcom_submit_descs(nandc);
+       if (ret) {
+               dev_err(nandc->dev, "failure in submitting misc descriptor\n");
+               goto err_out;
+@@ -2820,8 +2820,8 @@ static int qcom_param_page_type_exec(str
+       nandc->buf_count = 0;
+       nandc->buf_start = 0;
+       host->use_ecc = false;
+-      clear_read_regs(nandc);
+-      clear_bam_transaction(nandc);
++      qcom_clear_read_regs(nandc);
++      qcom_clear_bam_transaction(nandc);
+       nandc->regs->cmd = q_op.cmd_reg;
+       nandc->regs->addr0 = 0;
+@@ -2864,8 +2864,8 @@ static int qcom_param_page_type_exec(str
+       nandc_set_read_loc(chip, 0, 0, 0, len, 1);
+       if (!nandc->props->qpic_version2) {
+-              write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0);
+-              write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
++              qcom_write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0);
++              qcom_write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
+       }
+       nandc->buf_count = len;
+@@ -2873,17 +2873,17 @@ static int qcom_param_page_type_exec(str
+       config_nand_single_cw_page_read(chip, false, 0);
+-      read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
+-                    nandc->buf_count, 0);
++      qcom_read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
++                         nandc->buf_count, 0);
+       /* restore CMD1 and VLD regs */
+       if (!nandc->props->qpic_version2) {
+-              write_reg_dma(nandc, &nandc->regs->orig_cmd1, NAND_DEV_CMD1_RESTORE, 1, 0);
+-              write_reg_dma(nandc, &nandc->regs->orig_vld, NAND_DEV_CMD_VLD_RESTORE, 1,
+-                            NAND_BAM_NEXT_SGL);
++              qcom_write_reg_dma(nandc, &nandc->regs->orig_cmd1, NAND_DEV_CMD1_RESTORE, 1, 0);
++              qcom_write_reg_dma(nandc, &nandc->regs->orig_vld, NAND_DEV_CMD_VLD_RESTORE, 1,
++                                 NAND_BAM_NEXT_SGL);
+       }
+-      ret = submit_descs(nandc);
++      ret = qcom_submit_descs(nandc);
+       if (ret) {
+               dev_err(nandc->dev, "failure in submitting param page descriptor\n");
+               goto err_out;
+@@ -3067,7 +3067,7 @@ static int qcom_nandc_alloc(struct qcom_
+                * maximum codeword size
+                */
+               nandc->max_cwperpage = 1;
+-              nandc->bam_txn = alloc_bam_transaction(nandc);
++              nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
+               if (!nandc->bam_txn) {
+                       dev_err(nandc->dev,
+                               "failed to allocate bam transaction\n");
diff --git a/target/linux/generic/backport-6.6/413-03-v6.14-mtd-nand-Add-qpic_common-API-file.patch b/target/linux/generic/backport-6.6/413-03-v6.14-mtd-nand-Add-qpic_common-API-file.patch
new file mode 100644 (file)
index 0000000..2621146
--- /dev/null
@@ -0,0 +1,2436 @@
+From fdf3ee5c6e5278dab4f60b998b47ed2d510bf80f Mon Sep 17 00:00:00 2001
+From: Md Sadre Alam <quic_mdalam@quicinc.com>
+Date: Wed, 20 Nov 2024 14:45:02 +0530
+Subject: [PATCH 3/4] mtd: nand: Add qpic_common API file
+
+Add qpic_common.c file which hold all the common
+qpic APIs which will be used by both qpic raw nand
+driver and qpic spi nand driver.
+
+Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+---
+ drivers/mtd/nand/Makefile            |    2 +-
+ drivers/mtd/nand/qpic_common.c       |  759 ++++++++++++++++++
+ drivers/mtd/nand/raw/qcom_nandc.c    | 1092 +-------------------------
+ include/linux/mtd/nand-qpic-common.h |  468 +++++++++++
+ 4 files changed, 1240 insertions(+), 1081 deletions(-)
+ create mode 100644 drivers/mtd/nand/qpic_common.c
+ create mode 100644 include/linux/mtd/nand-qpic-common.h
+
+--- a/drivers/mtd/nand/Makefile
++++ b/drivers/mtd/nand/Makefile
+@@ -3,7 +3,7 @@
+ nandcore-objs := core.o bbt.o
+ obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o
+ obj-$(CONFIG_MTD_NAND_ECC_MEDIATEK) += ecc-mtk.o
+-
++obj-$(CONFIG_MTD_NAND_QCOM) += qpic_common.o
+ obj-y += onenand/
+ obj-y += raw/
+ obj-y += spi/
+--- /dev/null
++++ b/drivers/mtd/nand/qpic_common.c
+@@ -0,0 +1,759 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved
++ */
++#include <linux/clk.h>
++#include <linux/delay.h>
++#include <linux/dmaengine.h>
++#include <linux/dma-mapping.h>
++#include <linux/dma/qcom_adm.h>
++#include <linux/dma/qcom_bam_dma.h>
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++#include <linux/mtd/nand-qpic-common.h>
++
++/**
++ * qcom_free_bam_transaction() - Frees the BAM transaction memory
++ * @nandc: qpic nand controller
++ *
++ * This function frees the bam transaction memory
++ */
++void qcom_free_bam_transaction(struct qcom_nand_controller *nandc)
++{
++      struct bam_transaction *bam_txn = nandc->bam_txn;
++
++      kfree(bam_txn);
++}
++EXPORT_SYMBOL(qcom_free_bam_transaction);
++
++/**
++ * qcom_alloc_bam_transaction() - allocate BAM transaction
++ * @nandc: qpic nand controller
++ *
++ * This function will allocate and initialize the BAM transaction structure
++ */
++struct bam_transaction *
++qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc)
++{
++      struct bam_transaction *bam_txn;
++      size_t bam_txn_size;
++      unsigned int num_cw = nandc->max_cwperpage;
++      void *bam_txn_buf;
++
++      bam_txn_size =
++              sizeof(*bam_txn) + num_cw *
++              ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
++              (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
++              (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
++
++      bam_txn_buf = kzalloc(bam_txn_size, GFP_KERNEL);
++      if (!bam_txn_buf)
++              return NULL;
++
++      bam_txn = bam_txn_buf;
++      bam_txn_buf += sizeof(*bam_txn);
++
++      bam_txn->bam_ce = bam_txn_buf;
++      bam_txn_buf +=
++              sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
++
++      bam_txn->cmd_sgl = bam_txn_buf;
++      bam_txn_buf +=
++              sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
++
++      bam_txn->data_sgl = bam_txn_buf;
++
++      init_completion(&bam_txn->txn_done);
++
++      return bam_txn;
++}
++EXPORT_SYMBOL(qcom_alloc_bam_transaction);
++
++/**
++ * qcom_clear_bam_transaction() - Clears the BAM transaction
++ * @nandc: qpic nand controller
++ *
++ * This function will clear the BAM transaction indexes.
++ */
++void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc)
++{
++      struct bam_transaction *bam_txn = nandc->bam_txn;
++
++      if (!nandc->props->supports_bam)
++              return;
++
++      memset(&bam_txn->bam_ce_pos, 0, sizeof(u32) * 8);
++      bam_txn->last_data_desc = NULL;
++
++      sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
++                    QPIC_PER_CW_CMD_SGL);
++      sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
++                    QPIC_PER_CW_DATA_SGL);
++
++      reinit_completion(&bam_txn->txn_done);
++}
++EXPORT_SYMBOL(qcom_clear_bam_transaction);
++
++/**
++ * qcom_qpic_bam_dma_done() - Callback for DMA descriptor completion
++ * @data: data pointer
++ *
++ * This function is a callback for DMA descriptor completion
++ */
++void qcom_qpic_bam_dma_done(void *data)
++{
++      struct bam_transaction *bam_txn = data;
++
++      complete(&bam_txn->txn_done);
++}
++EXPORT_SYMBOL(qcom_qpic_bam_dma_done);
++
++/**
++ * qcom_nandc_dev_to_mem() - Check for dma sync for cpu or device
++ * @nandc: qpic nand controller
++ * @is_cpu: cpu or Device
++ *
++ * This function will check for dma sync for cpu or device
++ */
++inline void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
++{
++      if (!nandc->props->supports_bam)
++              return;
++
++      if (is_cpu)
++              dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
++                                      MAX_REG_RD *
++                                      sizeof(*nandc->reg_read_buf),
++                                      DMA_FROM_DEVICE);
++      else
++              dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
++                                         MAX_REG_RD *
++                                         sizeof(*nandc->reg_read_buf),
++                                         DMA_FROM_DEVICE);
++}
++EXPORT_SYMBOL(qcom_nandc_dev_to_mem);
++
++/**
++ * qcom_prepare_bam_async_desc() - Prepare DMA descriptor
++ * @nandc: qpic nand controller
++ * @chan: dma channel
++ * @flags: flags to control DMA descriptor preparation
++ *
++ * This function maps the scatter gather list for DMA transfer and forms the
++ * DMA descriptor for BAM.This descriptor will be added in the NAND DMA
++ * descriptor queue which will be submitted to DMA engine.
++ */
++int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
++                              struct dma_chan *chan, unsigned long flags)
++{
++      struct desc_info *desc;
++      struct scatterlist *sgl;
++      unsigned int sgl_cnt;
++      int ret;
++      struct bam_transaction *bam_txn = nandc->bam_txn;
++      enum dma_transfer_direction dir_eng;
++      struct dma_async_tx_descriptor *dma_desc;
++
++      desc = kzalloc(sizeof(*desc), GFP_KERNEL);
++      if (!desc)
++              return -ENOMEM;
++
++      if (chan == nandc->cmd_chan) {
++              sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
++              sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
++              bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
++              dir_eng = DMA_MEM_TO_DEV;
++              desc->dir = DMA_TO_DEVICE;
++      } else if (chan == nandc->tx_chan) {
++              sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
++              sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
++              bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
++              dir_eng = DMA_MEM_TO_DEV;
++              desc->dir = DMA_TO_DEVICE;
++      } else {
++              sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
++              sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
++              bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
++              dir_eng = DMA_DEV_TO_MEM;
++              desc->dir = DMA_FROM_DEVICE;
++      }
++
++      sg_mark_end(sgl + sgl_cnt - 1);
++      ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
++      if (ret == 0) {
++              dev_err(nandc->dev, "failure in mapping desc\n");
++              kfree(desc);
++              return -ENOMEM;
++      }
++
++      desc->sgl_cnt = sgl_cnt;
++      desc->bam_sgl = sgl;
++
++      dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
++                                         flags);
++
++      if (!dma_desc) {
++              dev_err(nandc->dev, "failure in prep desc\n");
++              dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
++              kfree(desc);
++              return -EINVAL;
++      }
++
++      desc->dma_desc = dma_desc;
++
++      /* update last data/command descriptor */
++      if (chan == nandc->cmd_chan)
++              bam_txn->last_cmd_desc = dma_desc;
++      else
++              bam_txn->last_data_desc = dma_desc;
++
++      list_add_tail(&desc->node, &nandc->desc_list);
++
++      return 0;
++}
++EXPORT_SYMBOL(qcom_prepare_bam_async_desc);
++
++/**
++ * qcom_prep_bam_dma_desc_cmd() - Prepares the command descriptor for BAM DMA
++ * @nandc: qpic nand controller
++ * @read: read or write type
++ * @reg_off: offset within the controller's data buffer
++ * @vaddr: virtual address of the buffer we want to write to
++ * @size: DMA transaction size in bytes
++ * @flags: flags to control DMA descriptor preparation
++ *
++ * This function will prepares the command descriptor for BAM DMA
++ * which will be used for NAND register reads and writes.
++ */
++int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
++                             int reg_off, const void *vaddr,
++                             int size, unsigned int flags)
++{
++      int bam_ce_size;
++      int i, ret;
++      struct bam_cmd_element *bam_ce_buffer;
++      struct bam_transaction *bam_txn = nandc->bam_txn;
++
++      bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
++
++      /* fill the command desc */
++      for (i = 0; i < size; i++) {
++              if (read)
++                      bam_prep_ce(&bam_ce_buffer[i],
++                                  nandc_reg_phys(nandc, reg_off + 4 * i),
++                                  BAM_READ_COMMAND,
++                                  reg_buf_dma_addr(nandc,
++                                                   (__le32 *)vaddr + i));
++              else
++                      bam_prep_ce_le32(&bam_ce_buffer[i],
++                                       nandc_reg_phys(nandc, reg_off + 4 * i),
++                                       BAM_WRITE_COMMAND,
++                                       *((__le32 *)vaddr + i));
++      }
++
++      bam_txn->bam_ce_pos += size;
++
++      /* use the separate sgl after this command */
++      if (flags & NAND_BAM_NEXT_SGL) {
++              bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
++              bam_ce_size = (bam_txn->bam_ce_pos -
++                              bam_txn->bam_ce_start) *
++                              sizeof(struct bam_cmd_element);
++              sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
++                         bam_ce_buffer, bam_ce_size);
++              bam_txn->cmd_sgl_pos++;
++              bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
++
++              if (flags & NAND_BAM_NWD) {
++                      ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
++                                                        DMA_PREP_FENCE | DMA_PREP_CMD);
++                      if (ret)
++                              return ret;
++              }
++      }
++
++      return 0;
++}
++EXPORT_SYMBOL(qcom_prep_bam_dma_desc_cmd);
++
++/**
++ * qcom_prep_bam_dma_desc_data() - Prepares the data descriptor for BAM DMA
++ * @nandc: qpic nand controller
++ * @read: read or write type
++ * @vaddr: virtual address of the buffer we want to write to
++ * @size: DMA transaction size in bytes
++ * @flags: flags to control DMA descriptor preparation
++ *
++ * This function will prepares the data descriptor for BAM DMA which
++ * will be used for NAND data reads and writes.
++ */
++int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
++                              const void *vaddr, int size, unsigned int flags)
++{
++      int ret;
++      struct bam_transaction *bam_txn = nandc->bam_txn;
++
++      if (read) {
++              sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
++                         vaddr, size);
++              bam_txn->rx_sgl_pos++;
++      } else {
++              sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
++                         vaddr, size);
++              bam_txn->tx_sgl_pos++;
++
++              /*
++               * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
++               * is not set, form the DMA descriptor
++               */
++              if (!(flags & NAND_BAM_NO_EOT)) {
++                      ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
++                                                        DMA_PREP_INTERRUPT);
++                      if (ret)
++                              return ret;
++              }
++      }
++
++      return 0;
++}
++EXPORT_SYMBOL(qcom_prep_bam_dma_desc_data);
++
++/**
++ * qcom_prep_adm_dma_desc() - Prepare descriptor for adma
++ * @nandc: qpic nand controller
++ * @read: read or write type
++ * @reg_off: offset within the controller's data buffer
++ * @vaddr: virtual address of the buffer we want to write to
++ * @size: adm dma transaction size in bytes
++ * @flow_control: flow controller
++ *
++ * This function will prepare descriptor for adma
++ */
++int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
++                         int reg_off, const void *vaddr, int size,
++                         bool flow_control)
++{
++      struct qcom_adm_peripheral_config periph_conf = {};
++      struct dma_async_tx_descriptor *dma_desc;
++      struct dma_slave_config slave_conf = {0};
++      enum dma_transfer_direction dir_eng;
++      struct desc_info *desc;
++      struct scatterlist *sgl;
++      int ret;
++
++      desc = kzalloc(sizeof(*desc), GFP_KERNEL);
++      if (!desc)
++              return -ENOMEM;
++
++      sgl = &desc->adm_sgl;
++
++      sg_init_one(sgl, vaddr, size);
++
++      if (read) {
++              dir_eng = DMA_DEV_TO_MEM;
++              desc->dir = DMA_FROM_DEVICE;
++      } else {
++              dir_eng = DMA_MEM_TO_DEV;
++              desc->dir = DMA_TO_DEVICE;
++      }
++
++      ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
++      if (!ret) {
++              ret = -ENOMEM;
++              goto err;
++      }
++
++      slave_conf.device_fc = flow_control;
++      if (read) {
++              slave_conf.src_maxburst = 16;
++              slave_conf.src_addr = nandc->base_dma + reg_off;
++              if (nandc->data_crci) {
++                      periph_conf.crci = nandc->data_crci;
++                      slave_conf.peripheral_config = &periph_conf;
++                      slave_conf.peripheral_size = sizeof(periph_conf);
++              }
++      } else {
++              slave_conf.dst_maxburst = 16;
++              slave_conf.dst_addr = nandc->base_dma + reg_off;
++              if (nandc->cmd_crci) {
++                      periph_conf.crci = nandc->cmd_crci;
++                      slave_conf.peripheral_config = &periph_conf;
++                      slave_conf.peripheral_size = sizeof(periph_conf);
++              }
++      }
++
++      ret = dmaengine_slave_config(nandc->chan, &slave_conf);
++      if (ret) {
++              dev_err(nandc->dev, "failed to configure dma channel\n");
++              goto err;
++      }
++
++      dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
++      if (!dma_desc) {
++              dev_err(nandc->dev, "failed to prepare desc\n");
++              ret = -EINVAL;
++              goto err;
++      }
++
++      desc->dma_desc = dma_desc;
++
++      list_add_tail(&desc->node, &nandc->desc_list);
++
++      return 0;
++err:
++      kfree(desc);
++
++      return ret;
++}
++EXPORT_SYMBOL(qcom_prep_adm_dma_desc);
++
++/**
++ * qcom_read_reg_dma() - read a given number of registers to the reg_read_buf pointer
++ * @nandc: qpic nand controller
++ * @first: offset of the first register in the contiguous block
++ * @num_regs: number of registers to read
++ * @flags: flags to control DMA descriptor preparation
++ *
++ * This function will prepares a descriptor to read a given number of
++ * contiguous registers to the reg_read_buf pointer.
++ */
++int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first,
++                    int num_regs, unsigned int flags)
++{
++      bool flow_control = false;
++      void *vaddr;
++
++      vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
++      nandc->reg_read_pos += num_regs;
++
++      if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
++              first = dev_cmd_reg_addr(nandc, first);
++
++      if (nandc->props->supports_bam)
++              return qcom_prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
++                                           num_regs, flags);
++
++      if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
++              flow_control = true;
++
++      return qcom_prep_adm_dma_desc(nandc, true, first, vaddr,
++                                    num_regs * sizeof(u32), flow_control);
++}
++EXPORT_SYMBOL(qcom_read_reg_dma);
++
++/**
++ * qcom_write_reg_dma() - write a given number of registers
++ * @nandc: qpic nand controller
++ * @vaddr: contiguous memory from where register value will
++ *       be written
++ * @first: offset of the first register in the contiguous block
++ * @num_regs: number of registers to write
++ * @flags: flags to control DMA descriptor preparation
++ *
++ * This function will prepares a descriptor to write a given number of
++ * contiguous registers
++ */
++int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
++                     int first, int num_regs, unsigned int flags)
++{
++      bool flow_control = false;
++
++      if (first == NAND_EXEC_CMD)
++              flags |= NAND_BAM_NWD;
++
++      if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
++              first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
++
++      if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
++              first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
++
++      if (nandc->props->supports_bam)
++              return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
++                                                num_regs, flags);
++
++      if (first == NAND_FLASH_CMD)
++              flow_control = true;
++
++      return qcom_prep_adm_dma_desc(nandc, false, first, vaddr,
++                                    num_regs * sizeof(u32), flow_control);
++}
++EXPORT_SYMBOL(qcom_write_reg_dma);
++
++/**
++ * qcom_read_data_dma() - transfer data
++ * @nandc: qpic nand controller
++ * @reg_off: offset within the controller's data buffer
++ * @vaddr: virtual address of the buffer we want to write to
++ * @size: DMA transaction size in bytes
++ * @flags: flags to control DMA descriptor preparation
++ *
++ * This function will prepares a DMA descriptor to transfer data from the
++ * controller's internal buffer to the buffer 'vaddr'
++ */
++int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
++                     const u8 *vaddr, int size, unsigned int flags)
++{
++      if (nandc->props->supports_bam)
++              return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
++
++      return qcom_prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
++}
++EXPORT_SYMBOL(qcom_read_data_dma);
++
++/**
++ * qcom_write_data_dma() - transfer data
++ * @nandc: qpic nand controller
++ * @reg_off: offset within the controller's data buffer
++ * @vaddr: virtual address of the buffer we want to read from
++ * @size: DMA transaction size in bytes
++ * @flags: flags to control DMA descriptor preparation
++ *
++ * This function will prepares a DMA descriptor to transfer data from
++ * 'vaddr' to the controller's internal buffer
++ */
++int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
++                      const u8 *vaddr, int size, unsigned int flags)
++{
++      if (nandc->props->supports_bam)
++              return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
++
++      return qcom_prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
++}
++EXPORT_SYMBOL(qcom_write_data_dma);
++
++/**
++ * qcom_submit_descs() - submit dma descriptor
++ * @nandc: qpic nand controller
++ *
++ * This function will submit all the prepared dma descriptor
++ * cmd or data descriptor
++ */
++int qcom_submit_descs(struct qcom_nand_controller *nandc)
++{
++      struct desc_info *desc, *n;
++      dma_cookie_t cookie = 0;
++      struct bam_transaction *bam_txn = nandc->bam_txn;
++      int ret = 0;
++
++      if (nandc->props->supports_bam) {
++              if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
++                      ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
++                      if (ret)
++                              goto err_unmap_free_desc;
++              }
++
++              if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
++                      ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
++                                                        DMA_PREP_INTERRUPT);
++                      if (ret)
++                              goto err_unmap_free_desc;
++              }
++
++              if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
++                      ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
++                                                        DMA_PREP_CMD);
++                      if (ret)
++                              goto err_unmap_free_desc;
++              }
++      }
++
++      list_for_each_entry(desc, &nandc->desc_list, node)
++              cookie = dmaengine_submit(desc->dma_desc);
++
++      if (nandc->props->supports_bam) {
++              bam_txn->last_cmd_desc->callback = qcom_qpic_bam_dma_done;
++              bam_txn->last_cmd_desc->callback_param = bam_txn;
++
++              dma_async_issue_pending(nandc->tx_chan);
++              dma_async_issue_pending(nandc->rx_chan);
++              dma_async_issue_pending(nandc->cmd_chan);
++
++              if (!wait_for_completion_timeout(&bam_txn->txn_done,
++                                               QPIC_NAND_COMPLETION_TIMEOUT))
++                      ret = -ETIMEDOUT;
++      } else {
++              if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
++                      ret = -ETIMEDOUT;
++      }
++
++err_unmap_free_desc:
++      /*
++       * Unmap the dma sg_list and free the desc allocated by both
++       * qcom_prepare_bam_async_desc() and qcom_prep_adm_dma_desc() functions.
++       */
++      list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
++              list_del(&desc->node);
++
++              if (nandc->props->supports_bam)
++                      dma_unmap_sg(nandc->dev, desc->bam_sgl,
++                                   desc->sgl_cnt, desc->dir);
++              else
++                      dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
++                                   desc->dir);
++
++              kfree(desc);
++      }
++
++      return ret;
++}
++EXPORT_SYMBOL(qcom_submit_descs);
++
++/**
++ * qcom_clear_read_regs() - reset the read register buffer
++ * @nandc: qpic nand controller
++ *
++ * This function reset the register read buffer for next NAND operation
++ */
++void qcom_clear_read_regs(struct qcom_nand_controller *nandc)
++{
++      nandc->reg_read_pos = 0;
++      qcom_nandc_dev_to_mem(nandc, false);
++}
++EXPORT_SYMBOL(qcom_clear_read_regs);
++
++/**
++ * qcom_nandc_unalloc() - unallocate qpic nand controller
++ * @nandc: qpic nand controller
++ *
++ * This function will unallocate memory alloacted for qpic nand controller
++ */
++void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
++{
++      if (nandc->props->supports_bam) {
++              if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
++                      dma_unmap_single(nandc->dev, nandc->reg_read_dma,
++                                       MAX_REG_RD *
++                                       sizeof(*nandc->reg_read_buf),
++                                       DMA_FROM_DEVICE);
++
++              if (nandc->tx_chan)
++                      dma_release_channel(nandc->tx_chan);
++
++              if (nandc->rx_chan)
++                      dma_release_channel(nandc->rx_chan);
++
++              if (nandc->cmd_chan)
++                      dma_release_channel(nandc->cmd_chan);
++      } else {
++              if (nandc->chan)
++                      dma_release_channel(nandc->chan);
++      }
++}
++EXPORT_SYMBOL(qcom_nandc_unalloc);
++
++/**
++ * qcom_nandc_alloc() - Allocate qpic nand controller
++ * @nandc: qpic nand controller
++ *
++ * This function will allocate memory for qpic nand controller
++ */
++int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
++{
++      int ret;
++
++      ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
++      if (ret) {
++              dev_err(nandc->dev, "failed to set DMA mask\n");
++              return ret;
++      }
++
++      /*
++       * we use the internal buffer for reading ONFI params, reading small
++       * data like ID and status, and preforming read-copy-write operations
++       * when writing to a codeword partially. 532 is the maximum possible
++       * size of a codeword for our nand controller
++       */
++      nandc->buf_size = 532;
++
++      nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size, GFP_KERNEL);
++      if (!nandc->data_buffer)
++              return -ENOMEM;
++
++      nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs), GFP_KERNEL);
++      if (!nandc->regs)
++              return -ENOMEM;
++
++      nandc->reg_read_buf = devm_kcalloc(nandc->dev, MAX_REG_RD,
++                                         sizeof(*nandc->reg_read_buf),
++                                         GFP_KERNEL);
++      if (!nandc->reg_read_buf)
++              return -ENOMEM;
++
++      if (nandc->props->supports_bam) {
++              nandc->reg_read_dma =
++                      dma_map_single(nandc->dev, nandc->reg_read_buf,
++                                     MAX_REG_RD *
++                                     sizeof(*nandc->reg_read_buf),
++                                     DMA_FROM_DEVICE);
++              if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
++                      dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
++                      return -EIO;
++              }
++
++              nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
++              if (IS_ERR(nandc->tx_chan)) {
++                      ret = PTR_ERR(nandc->tx_chan);
++                      nandc->tx_chan = NULL;
++                      dev_err_probe(nandc->dev, ret,
++                                    "tx DMA channel request failed\n");
++                      goto unalloc;
++              }
++
++              nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
++              if (IS_ERR(nandc->rx_chan)) {
++                      ret = PTR_ERR(nandc->rx_chan);
++                      nandc->rx_chan = NULL;
++                      dev_err_probe(nandc->dev, ret,
++                                    "rx DMA channel request failed\n");
++                      goto unalloc;
++              }
++
++              nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
++              if (IS_ERR(nandc->cmd_chan)) {
++                      ret = PTR_ERR(nandc->cmd_chan);
++                      nandc->cmd_chan = NULL;
++                      dev_err_probe(nandc->dev, ret,
++                                    "cmd DMA channel request failed\n");
++                      goto unalloc;
++              }
++
++              /*
++               * Initially allocate BAM transaction to read ONFI param page.
++               * After detecting all the devices, this BAM transaction will
++               * be freed and the next BAM transaction will be allocated with
++               * maximum codeword size
++               */
++              nandc->max_cwperpage = 1;
++              nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
++              if (!nandc->bam_txn) {
++                      dev_err(nandc->dev,
++                              "failed to allocate bam transaction\n");
++                      ret = -ENOMEM;
++                      goto unalloc;
++              }
++      } else {
++              nandc->chan = dma_request_chan(nandc->dev, "rxtx");
++              if (IS_ERR(nandc->chan)) {
++                      ret = PTR_ERR(nandc->chan);
++                      nandc->chan = NULL;
++                      dev_err_probe(nandc->dev, ret,
++                                    "rxtx DMA channel request failed\n");
++                      return ret;
++              }
++      }
++
++      INIT_LIST_HEAD(&nandc->desc_list);
++      INIT_LIST_HEAD(&nandc->host_list);
++
++      return 0;
++unalloc:
++      qcom_nandc_unalloc(nandc);
++      return ret;
++}
++EXPORT_SYMBOL(qcom_nandc_alloc);
++
++MODULE_DESCRIPTION("QPIC controller common api");
++MODULE_LICENSE("GPL");
+--- a/drivers/mtd/nand/raw/qcom_nandc.c
++++ b/drivers/mtd/nand/raw/qcom_nandc.c
+@@ -15,417 +15,7 @@
+ #include <linux/of.h>
+ #include <linux/platform_device.h>
+ #include <linux/slab.h>
+-
+-/* NANDc reg offsets */
+-#define       NAND_FLASH_CMD                  0x00
+-#define       NAND_ADDR0                      0x04
+-#define       NAND_ADDR1                      0x08
+-#define       NAND_FLASH_CHIP_SELECT          0x0c
+-#define       NAND_EXEC_CMD                   0x10
+-#define       NAND_FLASH_STATUS               0x14
+-#define       NAND_BUFFER_STATUS              0x18
+-#define       NAND_DEV0_CFG0                  0x20
+-#define       NAND_DEV0_CFG1                  0x24
+-#define       NAND_DEV0_ECC_CFG               0x28
+-#define       NAND_AUTO_STATUS_EN             0x2c
+-#define       NAND_DEV1_CFG0                  0x30
+-#define       NAND_DEV1_CFG1                  0x34
+-#define       NAND_READ_ID                    0x40
+-#define       NAND_READ_STATUS                0x44
+-#define       NAND_DEV_CMD0                   0xa0
+-#define       NAND_DEV_CMD1                   0xa4
+-#define       NAND_DEV_CMD2                   0xa8
+-#define       NAND_DEV_CMD_VLD                0xac
+-#define       SFLASHC_BURST_CFG               0xe0
+-#define       NAND_ERASED_CW_DETECT_CFG       0xe8
+-#define       NAND_ERASED_CW_DETECT_STATUS    0xec
+-#define       NAND_EBI2_ECC_BUF_CFG           0xf0
+-#define       FLASH_BUF_ACC                   0x100
+-
+-#define       NAND_CTRL                       0xf00
+-#define       NAND_VERSION                    0xf08
+-#define       NAND_READ_LOCATION_0            0xf20
+-#define       NAND_READ_LOCATION_1            0xf24
+-#define       NAND_READ_LOCATION_2            0xf28
+-#define       NAND_READ_LOCATION_3            0xf2c
+-#define       NAND_READ_LOCATION_LAST_CW_0    0xf40
+-#define       NAND_READ_LOCATION_LAST_CW_1    0xf44
+-#define       NAND_READ_LOCATION_LAST_CW_2    0xf48
+-#define       NAND_READ_LOCATION_LAST_CW_3    0xf4c
+-
+-/* dummy register offsets, used by qcom_write_reg_dma */
+-#define       NAND_DEV_CMD1_RESTORE           0xdead
+-#define       NAND_DEV_CMD_VLD_RESTORE        0xbeef
+-
+-/* NAND_FLASH_CMD bits */
+-#define       PAGE_ACC                        BIT(4)
+-#define       LAST_PAGE                       BIT(5)
+-
+-/* NAND_FLASH_CHIP_SELECT bits */
+-#define       NAND_DEV_SEL                    0
+-#define       DM_EN                           BIT(2)
+-
+-/* NAND_FLASH_STATUS bits */
+-#define       FS_OP_ERR                       BIT(4)
+-#define       FS_READY_BSY_N                  BIT(5)
+-#define       FS_MPU_ERR                      BIT(8)
+-#define       FS_DEVICE_STS_ERR               BIT(16)
+-#define       FS_DEVICE_WP                    BIT(23)
+-
+-/* NAND_BUFFER_STATUS bits */
+-#define       BS_UNCORRECTABLE_BIT            BIT(8)
+-#define       BS_CORRECTABLE_ERR_MSK          0x1f
+-
+-/* NAND_DEVn_CFG0 bits */
+-#define       DISABLE_STATUS_AFTER_WRITE      4
+-#define       CW_PER_PAGE                     6
+-#define       UD_SIZE_BYTES                   9
+-#define       UD_SIZE_BYTES_MASK              GENMASK(18, 9)
+-#define       ECC_PARITY_SIZE_BYTES_RS        19
+-#define       SPARE_SIZE_BYTES                23
+-#define       SPARE_SIZE_BYTES_MASK           GENMASK(26, 23)
+-#define       NUM_ADDR_CYCLES                 27
+-#define       STATUS_BFR_READ                 30
+-#define       SET_RD_MODE_AFTER_STATUS        31
+-
+-/* NAND_DEVn_CFG0 bits */
+-#define       DEV0_CFG1_ECC_DISABLE           0
+-#define       WIDE_FLASH                      1
+-#define       NAND_RECOVERY_CYCLES            2
+-#define       CS_ACTIVE_BSY                   5
+-#define       BAD_BLOCK_BYTE_NUM              6
+-#define       BAD_BLOCK_IN_SPARE_AREA         16
+-#define       WR_RD_BSY_GAP                   17
+-#define       ENABLE_BCH_ECC                  27
+-
+-/* NAND_DEV0_ECC_CFG bits */
+-#define       ECC_CFG_ECC_DISABLE             0
+-#define       ECC_SW_RESET                    1
+-#define       ECC_MODE                        4
+-#define       ECC_PARITY_SIZE_BYTES_BCH       8
+-#define       ECC_NUM_DATA_BYTES              16
+-#define       ECC_NUM_DATA_BYTES_MASK         GENMASK(25, 16)
+-#define       ECC_FORCE_CLK_OPEN              30
+-
+-/* NAND_DEV_CMD1 bits */
+-#define       READ_ADDR                       0
+-
+-/* NAND_DEV_CMD_VLD bits */
+-#define       READ_START_VLD                  BIT(0)
+-#define       READ_STOP_VLD                   BIT(1)
+-#define       WRITE_START_VLD                 BIT(2)
+-#define       ERASE_START_VLD                 BIT(3)
+-#define       SEQ_READ_START_VLD              BIT(4)
+-
+-/* NAND_EBI2_ECC_BUF_CFG bits */
+-#define       NUM_STEPS                       0
+-
+-/* NAND_ERASED_CW_DETECT_CFG bits */
+-#define       ERASED_CW_ECC_MASK              1
+-#define       AUTO_DETECT_RES                 0
+-#define       MASK_ECC                        BIT(ERASED_CW_ECC_MASK)
+-#define       RESET_ERASED_DET                BIT(AUTO_DETECT_RES)
+-#define       ACTIVE_ERASED_DET               (0 << AUTO_DETECT_RES)
+-#define       CLR_ERASED_PAGE_DET             (RESET_ERASED_DET | MASK_ECC)
+-#define       SET_ERASED_PAGE_DET             (ACTIVE_ERASED_DET | MASK_ECC)
+-
+-/* NAND_ERASED_CW_DETECT_STATUS bits */
+-#define       PAGE_ALL_ERASED                 BIT(7)
+-#define       CODEWORD_ALL_ERASED             BIT(6)
+-#define       PAGE_ERASED                     BIT(5)
+-#define       CODEWORD_ERASED                 BIT(4)
+-#define       ERASED_PAGE                     (PAGE_ALL_ERASED | PAGE_ERASED)
+-#define       ERASED_CW                       (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
+-
+-/* NAND_READ_LOCATION_n bits */
+-#define READ_LOCATION_OFFSET          0
+-#define READ_LOCATION_SIZE            16
+-#define READ_LOCATION_LAST            31
+-
+-/* Version Mask */
+-#define       NAND_VERSION_MAJOR_MASK         0xf0000000
+-#define       NAND_VERSION_MAJOR_SHIFT        28
+-#define       NAND_VERSION_MINOR_MASK         0x0fff0000
+-#define       NAND_VERSION_MINOR_SHIFT        16
+-
+-/* NAND OP_CMDs */
+-#define       OP_PAGE_READ                    0x2
+-#define       OP_PAGE_READ_WITH_ECC           0x3
+-#define       OP_PAGE_READ_WITH_ECC_SPARE     0x4
+-#define       OP_PAGE_READ_ONFI_READ          0x5
+-#define       OP_PROGRAM_PAGE                 0x6
+-#define       OP_PAGE_PROGRAM_WITH_ECC        0x7
+-#define       OP_PROGRAM_PAGE_SPARE           0x9
+-#define       OP_BLOCK_ERASE                  0xa
+-#define       OP_CHECK_STATUS                 0xc
+-#define       OP_FETCH_ID                     0xb
+-#define       OP_RESET_DEVICE                 0xd
+-
+-/* Default Value for NAND_DEV_CMD_VLD */
+-#define NAND_DEV_CMD_VLD_VAL          (READ_START_VLD | WRITE_START_VLD | \
+-                                       ERASE_START_VLD | SEQ_READ_START_VLD)
+-
+-/* NAND_CTRL bits */
+-#define       BAM_MODE_EN                     BIT(0)
+-
+-/*
+- * the NAND controller performs reads/writes with ECC in 516 byte chunks.
+- * the driver calls the chunks 'step' or 'codeword' interchangeably
+- */
+-#define       NANDC_STEP_SIZE                 512
+-
+-/*
+- * the largest page size we support is 8K, this will have 16 steps/codewords
+- * of 512 bytes each
+- */
+-#define       MAX_NUM_STEPS                   (SZ_8K / NANDC_STEP_SIZE)
+-
+-/* we read at most 3 registers per codeword scan */
+-#define       MAX_REG_RD                      (3 * MAX_NUM_STEPS)
+-
+-/* ECC modes supported by the controller */
+-#define       ECC_NONE        BIT(0)
+-#define       ECC_RS_4BIT     BIT(1)
+-#define       ECC_BCH_4BIT    BIT(2)
+-#define       ECC_BCH_8BIT    BIT(3)
+-
+-/*
+- * Returns the actual register address for all NAND_DEV_ registers
+- * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
+- */
+-#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
+-
+-/* Returns the NAND register physical address */
+-#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
+-
+-/* Returns the dma address for reg read buffer */
+-#define reg_buf_dma_addr(chip, vaddr) \
+-      ((chip)->reg_read_dma + \
+-      ((u8 *)(vaddr) - (u8 *)(chip)->reg_read_buf))
+-
+-#define QPIC_PER_CW_CMD_ELEMENTS      32
+-#define QPIC_PER_CW_CMD_SGL           32
+-#define QPIC_PER_CW_DATA_SGL          8
+-
+-#define QPIC_NAND_COMPLETION_TIMEOUT  msecs_to_jiffies(2000)
+-
+-/*
+- * Flags used in DMA descriptor preparation helper functions
+- * (i.e. qcom_read_reg_dma/qcom_write_reg_dma/qcom_read_data_dma/qcom_write_data_dma)
+- */
+-/* Don't set the EOT in current tx BAM sgl */
+-#define NAND_BAM_NO_EOT                       BIT(0)
+-/* Set the NWD flag in current BAM sgl */
+-#define NAND_BAM_NWD                  BIT(1)
+-/* Finish writing in the current BAM sgl and start writing in another BAM sgl */
+-#define NAND_BAM_NEXT_SGL             BIT(2)
+-/*
+- * Erased codeword status is being used two times in single transfer so this
+- * flag will determine the current value of erased codeword status register
+- */
+-#define NAND_ERASED_CW_SET            BIT(4)
+-
+-#define MAX_ADDRESS_CYCLE             5
+-
+-/*
+- * This data type corresponds to the BAM transaction which will be used for all
+- * NAND transfers.
+- * @bam_ce - the array of BAM command elements
+- * @cmd_sgl - sgl for NAND BAM command pipe
+- * @data_sgl - sgl for NAND BAM consumer/producer pipe
+- * @last_data_desc - last DMA desc in data channel (tx/rx).
+- * @last_cmd_desc - last DMA desc in command channel.
+- * @txn_done - completion for NAND transfer.
+- * @bam_ce_pos - the index in bam_ce which is available for next sgl
+- * @bam_ce_start - the index in bam_ce which marks the start position ce
+- *               for current sgl. It will be used for size calculation
+- *               for current sgl
+- * @cmd_sgl_pos - current index in command sgl.
+- * @cmd_sgl_start - start index in command sgl.
+- * @tx_sgl_pos - current index in data sgl for tx.
+- * @tx_sgl_start - start index in data sgl for tx.
+- * @rx_sgl_pos - current index in data sgl for rx.
+- * @rx_sgl_start - start index in data sgl for rx.
+- */
+-struct bam_transaction {
+-      struct bam_cmd_element *bam_ce;
+-      struct scatterlist *cmd_sgl;
+-      struct scatterlist *data_sgl;
+-      struct dma_async_tx_descriptor *last_data_desc;
+-      struct dma_async_tx_descriptor *last_cmd_desc;
+-      struct completion txn_done;
+-      u32 bam_ce_pos;
+-      u32 bam_ce_start;
+-      u32 cmd_sgl_pos;
+-      u32 cmd_sgl_start;
+-      u32 tx_sgl_pos;
+-      u32 tx_sgl_start;
+-      u32 rx_sgl_pos;
+-      u32 rx_sgl_start;
+-};
+-
+-/*
+- * This data type corresponds to the nand dma descriptor
+- * @dma_desc - low level DMA engine descriptor
+- * @list - list for desc_info
+- *
+- * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
+- *          ADM
+- * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
+- * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
+- * @dir - DMA transfer direction
+- */
+-struct desc_info {
+-      struct dma_async_tx_descriptor *dma_desc;
+-      struct list_head node;
+-
+-      union {
+-              struct scatterlist adm_sgl;
+-              struct {
+-                      struct scatterlist *bam_sgl;
+-                      int sgl_cnt;
+-              };
+-      };
+-      enum dma_data_direction dir;
+-};
+-
+-/*
+- * holds the current register values that we want to write. acts as a contiguous
+- * chunk of memory which we use to write the controller registers through DMA.
+- */
+-struct nandc_regs {
+-      __le32 cmd;
+-      __le32 addr0;
+-      __le32 addr1;
+-      __le32 chip_sel;
+-      __le32 exec;
+-
+-      __le32 cfg0;
+-      __le32 cfg1;
+-      __le32 ecc_bch_cfg;
+-
+-      __le32 clrflashstatus;
+-      __le32 clrreadstatus;
+-
+-      __le32 cmd1;
+-      __le32 vld;
+-
+-      __le32 orig_cmd1;
+-      __le32 orig_vld;
+-
+-      __le32 ecc_buf_cfg;
+-      __le32 read_location0;
+-      __le32 read_location1;
+-      __le32 read_location2;
+-      __le32 read_location3;
+-      __le32 read_location_last0;
+-      __le32 read_location_last1;
+-      __le32 read_location_last2;
+-      __le32 read_location_last3;
+-
+-      __le32 erased_cw_detect_cfg_clr;
+-      __le32 erased_cw_detect_cfg_set;
+-};
+-
+-/*
+- * NAND controller data struct
+- *
+- * @dev:                      parent device
+- *
+- * @base:                     MMIO base
+- *
+- * @core_clk:                 controller clock
+- * @aon_clk:                  another controller clock
+- *
+- * @regs:                     a contiguous chunk of memory for DMA register
+- *                            writes. contains the register values to be
+- *                            written to controller
+- *
+- * @props:                    properties of current NAND controller,
+- *                            initialized via DT match data
+- *
+- * @controller:                       base controller structure
+- * @host_list:                        list containing all the chips attached to the
+- *                            controller
+- *
+- * @chan:                     dma channel
+- * @cmd_crci:                 ADM DMA CRCI for command flow control
+- * @data_crci:                        ADM DMA CRCI for data flow control
+- *
+- * @desc_list:                        DMA descriptor list (list of desc_infos)
+- *
+- * @data_buffer:              our local DMA buffer for page read/writes,
+- *                            used when we can't use the buffer provided
+- *                            by upper layers directly
+- * @reg_read_buf:             local buffer for reading back registers via DMA
+- *
+- * @base_phys:                        physical base address of controller registers
+- * @base_dma:                 dma base address of controller registers
+- * @reg_read_dma:             contains dma address for register read buffer
+- *
+- * @buf_size/count/start:     markers for chip->legacy.read_buf/write_buf
+- *                            functions
+- * @max_cwperpage:            maximum QPIC codewords required. calculated
+- *                            from all connected NAND devices pagesize
+- *
+- * @reg_read_pos:             marker for data read in reg_read_buf
+- *
+- * @cmd1/vld:                 some fixed controller register values
+- *
+- * @exec_opwrite:             flag to select correct number of code word
+- *                            while reading status
+- */
+-struct qcom_nand_controller {
+-      struct device *dev;
+-
+-      void __iomem *base;
+-
+-      struct clk *core_clk;
+-      struct clk *aon_clk;
+-
+-      struct nandc_regs *regs;
+-      struct bam_transaction *bam_txn;
+-
+-      const struct qcom_nandc_props *props;
+-
+-      struct nand_controller controller;
+-      struct list_head host_list;
+-
+-      union {
+-              /* will be used only by QPIC for BAM DMA */
+-              struct {
+-                      struct dma_chan *tx_chan;
+-                      struct dma_chan *rx_chan;
+-                      struct dma_chan *cmd_chan;
+-              };
+-
+-              /* will be used only by EBI2 for ADM DMA */
+-              struct {
+-                      struct dma_chan *chan;
+-                      unsigned int cmd_crci;
+-                      unsigned int data_crci;
+-              };
+-      };
+-
+-      struct list_head desc_list;
+-
+-      u8              *data_buffer;
+-      __le32          *reg_read_buf;
+-
+-      phys_addr_t base_phys;
+-      dma_addr_t base_dma;
+-      dma_addr_t reg_read_dma;
+-
+-      int             buf_size;
+-      int             buf_count;
+-      int             buf_start;
+-      unsigned int    max_cwperpage;
+-
+-      int reg_read_pos;
+-
+-      u32 cmd1, vld;
+-      bool exec_opwrite;
+-};
++#include <linux/mtd/nand-qpic-common.h>
+ /*
+  * NAND special boot partitions
+@@ -530,97 +120,6 @@ struct qcom_nand_host {
+       bool bch_enabled;
+ };
+-/*
+- * This data type corresponds to the NAND controller properties which varies
+- * among different NAND controllers.
+- * @ecc_modes - ecc mode for NAND
+- * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
+- * @supports_bam - whether NAND controller is using Bus Access Manager (BAM)
+- * @nandc_part_of_qpic - whether NAND controller is part of qpic IP
+- * @qpic_version2 - flag to indicate QPIC IP version 2
+- * @use_codeword_fixup - whether NAND has different layout for boot partitions
+- */
+-struct qcom_nandc_props {
+-      u32 ecc_modes;
+-      u32 dev_cmd_reg_start;
+-      bool supports_bam;
+-      bool nandc_part_of_qpic;
+-      bool qpic_version2;
+-      bool use_codeword_fixup;
+-};
+-
+-/* Frees the BAM transaction memory */
+-static void qcom_free_bam_transaction(struct qcom_nand_controller *nandc)
+-{
+-      struct bam_transaction *bam_txn = nandc->bam_txn;
+-
+-      devm_kfree(nandc->dev, bam_txn);
+-}
+-
+-/* Allocates and Initializes the BAM transaction */
+-static struct bam_transaction *
+-qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc)
+-{
+-      struct bam_transaction *bam_txn;
+-      size_t bam_txn_size;
+-      unsigned int num_cw = nandc->max_cwperpage;
+-      void *bam_txn_buf;
+-
+-      bam_txn_size =
+-              sizeof(*bam_txn) + num_cw *
+-              ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
+-              (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
+-              (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
+-
+-      bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
+-      if (!bam_txn_buf)
+-              return NULL;
+-
+-      bam_txn = bam_txn_buf;
+-      bam_txn_buf += sizeof(*bam_txn);
+-
+-      bam_txn->bam_ce = bam_txn_buf;
+-      bam_txn_buf +=
+-              sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
+-
+-      bam_txn->cmd_sgl = bam_txn_buf;
+-      bam_txn_buf +=
+-              sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
+-
+-      bam_txn->data_sgl = bam_txn_buf;
+-
+-      init_completion(&bam_txn->txn_done);
+-
+-      return bam_txn;
+-}
+-
+-/* Clears the BAM transaction indexes */
+-static void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc)
+-{
+-      struct bam_transaction *bam_txn = nandc->bam_txn;
+-
+-      if (!nandc->props->supports_bam)
+-              return;
+-
+-      memset(&bam_txn->bam_ce_pos, 0, sizeof(u32) * 8);
+-      bam_txn->last_data_desc = NULL;
+-
+-      sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
+-                    QPIC_PER_CW_CMD_SGL);
+-      sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
+-                    QPIC_PER_CW_DATA_SGL);
+-
+-      reinit_completion(&bam_txn->txn_done);
+-}
+-
+-/* Callback for DMA descriptor completion */
+-static void qcom_qpic_bam_dma_done(void *data)
+-{
+-      struct bam_transaction *bam_txn = data;
+-
+-      complete(&bam_txn->txn_done);
+-}
+-
+ static struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
+ {
+       return container_of(chip, struct qcom_nand_host, chip);
+@@ -629,8 +128,8 @@ static struct qcom_nand_host *to_qcom_na
+ static struct qcom_nand_controller *
+ get_qcom_nand_controller(struct nand_chip *chip)
+ {
+-      return container_of(chip->controller, struct qcom_nand_controller,
+-                          controller);
++      return (struct qcom_nand_controller *)
++              ((u8 *)chip->controller - sizeof(struct qcom_nand_controller));
+ }
+ static u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
+@@ -644,23 +143,6 @@ static void nandc_write(struct qcom_nand
+       iowrite32(val, nandc->base + offset);
+ }
+-static void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
+-{
+-      if (!nandc->props->supports_bam)
+-              return;
+-
+-      if (is_cpu)
+-              dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
+-                                      MAX_REG_RD *
+-                                      sizeof(*nandc->reg_read_buf),
+-                                      DMA_FROM_DEVICE);
+-      else
+-              dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
+-                                         MAX_REG_RD *
+-                                         sizeof(*nandc->reg_read_buf),
+-                                         DMA_FROM_DEVICE);
+-}
+-
+ /* Helper to check whether this is the last CW or not */
+ static bool qcom_nandc_is_last_cw(struct nand_ecc_ctrl *ecc, int cw)
+ {
+@@ -820,356 +302,6 @@ static void update_rw_regs(struct qcom_n
+ }
+ /*
+- * Maps the scatter gather list for DMA transfer and forms the DMA descriptor
+- * for BAM. This descriptor will be added in the NAND DMA descriptor queue
+- * which will be submitted to DMA engine.
+- */
+-static int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
+-                                     struct dma_chan *chan,
+-                                     unsigned long flags)
+-{
+-      struct desc_info *desc;
+-      struct scatterlist *sgl;
+-      unsigned int sgl_cnt;
+-      int ret;
+-      struct bam_transaction *bam_txn = nandc->bam_txn;
+-      enum dma_transfer_direction dir_eng;
+-      struct dma_async_tx_descriptor *dma_desc;
+-
+-      desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+-      if (!desc)
+-              return -ENOMEM;
+-
+-      if (chan == nandc->cmd_chan) {
+-              sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
+-              sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
+-              bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
+-              dir_eng = DMA_MEM_TO_DEV;
+-              desc->dir = DMA_TO_DEVICE;
+-      } else if (chan == nandc->tx_chan) {
+-              sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
+-              sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
+-              bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
+-              dir_eng = DMA_MEM_TO_DEV;
+-              desc->dir = DMA_TO_DEVICE;
+-      } else {
+-              sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
+-              sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
+-              bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
+-              dir_eng = DMA_DEV_TO_MEM;
+-              desc->dir = DMA_FROM_DEVICE;
+-      }
+-
+-      sg_mark_end(sgl + sgl_cnt - 1);
+-      ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
+-      if (ret == 0) {
+-              dev_err(nandc->dev, "failure in mapping desc\n");
+-              kfree(desc);
+-              return -ENOMEM;
+-      }
+-
+-      desc->sgl_cnt = sgl_cnt;
+-      desc->bam_sgl = sgl;
+-
+-      dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
+-                                         flags);
+-
+-      if (!dma_desc) {
+-              dev_err(nandc->dev, "failure in prep desc\n");
+-              dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
+-              kfree(desc);
+-              return -EINVAL;
+-      }
+-
+-      desc->dma_desc = dma_desc;
+-
+-      /* update last data/command descriptor */
+-      if (chan == nandc->cmd_chan)
+-              bam_txn->last_cmd_desc = dma_desc;
+-      else
+-              bam_txn->last_data_desc = dma_desc;
+-
+-      list_add_tail(&desc->node, &nandc->desc_list);
+-
+-      return 0;
+-}
+-
+-/*
+- * Prepares the command descriptor for BAM DMA which will be used for NAND
+- * register reads and writes. The command descriptor requires the command
+- * to be formed in command element type so this function uses the command
+- * element from bam transaction ce array and fills the same with required
+- * data. A single SGL can contain multiple command elements so
+- * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
+- * after the current command element.
+- */
+-static int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
+-                                    int reg_off, const void *vaddr,
+-                                    int size, unsigned int flags)
+-{
+-      int bam_ce_size;
+-      int i, ret;
+-      struct bam_cmd_element *bam_ce_buffer;
+-      struct bam_transaction *bam_txn = nandc->bam_txn;
+-
+-      bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
+-
+-      /* fill the command desc */
+-      for (i = 0; i < size; i++) {
+-              if (read)
+-                      bam_prep_ce(&bam_ce_buffer[i],
+-                                  nandc_reg_phys(nandc, reg_off + 4 * i),
+-                                  BAM_READ_COMMAND,
+-                                  reg_buf_dma_addr(nandc,
+-                                                   (__le32 *)vaddr + i));
+-              else
+-                      bam_prep_ce_le32(&bam_ce_buffer[i],
+-                                       nandc_reg_phys(nandc, reg_off + 4 * i),
+-                                       BAM_WRITE_COMMAND,
+-                                       *((__le32 *)vaddr + i));
+-      }
+-
+-      bam_txn->bam_ce_pos += size;
+-
+-      /* use the separate sgl after this command */
+-      if (flags & NAND_BAM_NEXT_SGL) {
+-              bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
+-              bam_ce_size = (bam_txn->bam_ce_pos -
+-                              bam_txn->bam_ce_start) *
+-                              sizeof(struct bam_cmd_element);
+-              sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
+-                         bam_ce_buffer, bam_ce_size);
+-              bam_txn->cmd_sgl_pos++;
+-              bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
+-
+-              if (flags & NAND_BAM_NWD) {
+-                      ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
+-                                                        DMA_PREP_FENCE |
+-                                                        DMA_PREP_CMD);
+-                      if (ret)
+-                              return ret;
+-              }
+-      }
+-
+-      return 0;
+-}
+-
+-/*
+- * Prepares the data descriptor for BAM DMA which will be used for NAND
+- * data reads and writes.
+- */
+-static int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
+-                                     const void *vaddr, int size, unsigned int flags)
+-{
+-      int ret;
+-      struct bam_transaction *bam_txn = nandc->bam_txn;
+-
+-      if (read) {
+-              sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
+-                         vaddr, size);
+-              bam_txn->rx_sgl_pos++;
+-      } else {
+-              sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
+-                         vaddr, size);
+-              bam_txn->tx_sgl_pos++;
+-
+-              /*
+-               * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
+-               * is not set, form the DMA descriptor
+-               */
+-              if (!(flags & NAND_BAM_NO_EOT)) {
+-                      ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
+-                                                        DMA_PREP_INTERRUPT);
+-                      if (ret)
+-                              return ret;
+-              }
+-      }
+-
+-      return 0;
+-}
+-
+-static int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
+-                                int reg_off, const void *vaddr, int size,
+-                                bool flow_control)
+-{
+-      struct desc_info *desc;
+-      struct dma_async_tx_descriptor *dma_desc;
+-      struct scatterlist *sgl;
+-      struct dma_slave_config slave_conf;
+-      struct qcom_adm_peripheral_config periph_conf = {};
+-      enum dma_transfer_direction dir_eng;
+-      int ret;
+-
+-      desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+-      if (!desc)
+-              return -ENOMEM;
+-
+-      sgl = &desc->adm_sgl;
+-
+-      sg_init_one(sgl, vaddr, size);
+-
+-      if (read) {
+-              dir_eng = DMA_DEV_TO_MEM;
+-              desc->dir = DMA_FROM_DEVICE;
+-      } else {
+-              dir_eng = DMA_MEM_TO_DEV;
+-              desc->dir = DMA_TO_DEVICE;
+-      }
+-
+-      ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
+-      if (ret == 0) {
+-              ret = -ENOMEM;
+-              goto err;
+-      }
+-
+-      memset(&slave_conf, 0x00, sizeof(slave_conf));
+-
+-      slave_conf.device_fc = flow_control;
+-      if (read) {
+-              slave_conf.src_maxburst = 16;
+-              slave_conf.src_addr = nandc->base_dma + reg_off;
+-              if (nandc->data_crci) {
+-                      periph_conf.crci = nandc->data_crci;
+-                      slave_conf.peripheral_config = &periph_conf;
+-                      slave_conf.peripheral_size = sizeof(periph_conf);
+-              }
+-      } else {
+-              slave_conf.dst_maxburst = 16;
+-              slave_conf.dst_addr = nandc->base_dma + reg_off;
+-              if (nandc->cmd_crci) {
+-                      periph_conf.crci = nandc->cmd_crci;
+-                      slave_conf.peripheral_config = &periph_conf;
+-                      slave_conf.peripheral_size = sizeof(periph_conf);
+-              }
+-      }
+-
+-      ret = dmaengine_slave_config(nandc->chan, &slave_conf);
+-      if (ret) {
+-              dev_err(nandc->dev, "failed to configure dma channel\n");
+-              goto err;
+-      }
+-
+-      dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
+-      if (!dma_desc) {
+-              dev_err(nandc->dev, "failed to prepare desc\n");
+-              ret = -EINVAL;
+-              goto err;
+-      }
+-
+-      desc->dma_desc = dma_desc;
+-
+-      list_add_tail(&desc->node, &nandc->desc_list);
+-
+-      return 0;
+-err:
+-      kfree(desc);
+-
+-      return ret;
+-}
+-
+-/*
+- * qcom_read_reg_dma: prepares a descriptor to read a given number of
+- *                    contiguous registers to the reg_read_buf pointer
+- *
+- * @first:            offset of the first register in the contiguous block
+- * @num_regs:         number of registers to read
+- * @flags:            flags to control DMA descriptor preparation
+- */
+-static int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first,
+-                           int num_regs, unsigned int flags)
+-{
+-      bool flow_control = false;
+-      void *vaddr;
+-
+-      vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
+-      nandc->reg_read_pos += num_regs;
+-
+-      if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
+-              first = dev_cmd_reg_addr(nandc, first);
+-
+-      if (nandc->props->supports_bam)
+-              return qcom_prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
+-                                           num_regs, flags);
+-
+-      if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
+-              flow_control = true;
+-
+-      return qcom_prep_adm_dma_desc(nandc, true, first, vaddr,
+-                               num_regs * sizeof(u32), flow_control);
+-}
+-
+-/*
+- * qcom_write_reg_dma:        prepares a descriptor to write a given number of
+- *                    contiguous registers
+- *
+- * @vaddr:            contiguous memory from where register value will
+- *                    be written
+- * @first:            offset of the first register in the contiguous block
+- * @num_regs:         number of registers to write
+- * @flags:            flags to control DMA descriptor preparation
+- */
+-static int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
+-                            int first, int num_regs, unsigned int flags)
+-{
+-      bool flow_control = false;
+-
+-      if (first == NAND_EXEC_CMD)
+-              flags |= NAND_BAM_NWD;
+-
+-      if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
+-              first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
+-
+-      if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
+-              first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
+-
+-      if (nandc->props->supports_bam)
+-              return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
+-                                           num_regs, flags);
+-
+-      if (first == NAND_FLASH_CMD)
+-              flow_control = true;
+-
+-      return qcom_prep_adm_dma_desc(nandc, false, first, vaddr,
+-                               num_regs * sizeof(u32), flow_control);
+-}
+-
+-/*
+- * qcom_read_data_dma:        prepares a DMA descriptor to transfer data from the
+- *                    controller's internal buffer to the buffer 'vaddr'
+- *
+- * @reg_off:          offset within the controller's data buffer
+- * @vaddr:            virtual address of the buffer we want to write to
+- * @size:             DMA transaction size in bytes
+- * @flags:            flags to control DMA descriptor preparation
+- */
+-static int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+-                            const u8 *vaddr, int size, unsigned int flags)
+-{
+-      if (nandc->props->supports_bam)
+-              return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
+-
+-      return qcom_prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
+-}
+-
+-/*
+- * qcom_write_data_dma:       prepares a DMA descriptor to transfer data from
+- *                    'vaddr' to the controller's internal buffer
+- *
+- * @reg_off:          offset within the controller's data buffer
+- * @vaddr:            virtual address of the buffer we want to read from
+- * @size:             DMA transaction size in bytes
+- * @flags:            flags to control DMA descriptor preparation
+- */
+-static int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+-                             const u8 *vaddr, int size, unsigned int flags)
+-{
+-      if (nandc->props->supports_bam)
+-              return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
+-
+-      return qcom_prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
+-}
+-
+-/*
+  * Helper to prepare DMA descriptors for configuring registers
+  * before reading a NAND page.
+  */
+@@ -1262,83 +394,6 @@ static void config_nand_cw_write(struct
+                          NAND_BAM_NEXT_SGL);
+ }
+-/* helpers to submit/free our list of dma descriptors */
+-static int qcom_submit_descs(struct qcom_nand_controller *nandc)
+-{
+-      struct desc_info *desc, *n;
+-      dma_cookie_t cookie = 0;
+-      struct bam_transaction *bam_txn = nandc->bam_txn;
+-      int ret = 0;
+-
+-      if (nandc->props->supports_bam) {
+-              if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
+-                      ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
+-                      if (ret)
+-                              goto err_unmap_free_desc;
+-              }
+-
+-              if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
+-                      ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
+-                                                        DMA_PREP_INTERRUPT);
+-                      if (ret)
+-                              goto err_unmap_free_desc;
+-              }
+-
+-              if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
+-                      ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
+-                                                        DMA_PREP_CMD);
+-                      if (ret)
+-                              goto err_unmap_free_desc;
+-              }
+-      }
+-
+-      list_for_each_entry(desc, &nandc->desc_list, node)
+-              cookie = dmaengine_submit(desc->dma_desc);
+-
+-      if (nandc->props->supports_bam) {
+-              bam_txn->last_cmd_desc->callback = qcom_qpic_bam_dma_done;
+-              bam_txn->last_cmd_desc->callback_param = bam_txn;
+-
+-              dma_async_issue_pending(nandc->tx_chan);
+-              dma_async_issue_pending(nandc->rx_chan);
+-              dma_async_issue_pending(nandc->cmd_chan);
+-
+-              if (!wait_for_completion_timeout(&bam_txn->txn_done,
+-                                               QPIC_NAND_COMPLETION_TIMEOUT))
+-                      ret = -ETIMEDOUT;
+-      } else {
+-              if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
+-                      ret = -ETIMEDOUT;
+-      }
+-
+-err_unmap_free_desc:
+-      /*
+-       * Unmap the dma sg_list and free the desc allocated by both
+-       * qcom_prepare_bam_async_desc() and qcom_prep_adm_dma_desc() functions.
+-       */
+-      list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
+-              list_del(&desc->node);
+-
+-              if (nandc->props->supports_bam)
+-                      dma_unmap_sg(nandc->dev, desc->bam_sgl,
+-                                   desc->sgl_cnt, desc->dir);
+-              else
+-                      dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
+-                                   desc->dir);
+-
+-              kfree(desc);
+-      }
+-
+-      return ret;
+-}
+-
+-/* reset the register read buffer for next NAND operation */
+-static void qcom_clear_read_regs(struct qcom_nand_controller *nandc)
+-{
+-      nandc->reg_read_pos = 0;
+-      qcom_nandc_dev_to_mem(nandc, false);
+-}
+-
+ /*
+  * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read
+  * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS.
+@@ -2967,141 +2022,14 @@ static const struct nand_controller_ops
+       .exec_op = qcom_nand_exec_op,
+ };
+-static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
+-{
+-      if (nandc->props->supports_bam) {
+-              if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
+-                      dma_unmap_single(nandc->dev, nandc->reg_read_dma,
+-                                       MAX_REG_RD *
+-                                       sizeof(*nandc->reg_read_buf),
+-                                       DMA_FROM_DEVICE);
+-
+-              if (nandc->tx_chan)
+-                      dma_release_channel(nandc->tx_chan);
+-
+-              if (nandc->rx_chan)
+-                      dma_release_channel(nandc->rx_chan);
+-
+-              if (nandc->cmd_chan)
+-                      dma_release_channel(nandc->cmd_chan);
+-      } else {
+-              if (nandc->chan)
+-                      dma_release_channel(nandc->chan);
+-      }
+-}
+-
+-static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
+-{
+-      int ret;
+-
+-      ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
+-      if (ret) {
+-              dev_err(nandc->dev, "failed to set DMA mask\n");
+-              return ret;
+-      }
+-
+-      /*
+-       * we use the internal buffer for reading ONFI params, reading small
+-       * data like ID and status, and preforming read-copy-write operations
+-       * when writing to a codeword partially. 532 is the maximum possible
+-       * size of a codeword for our nand controller
+-       */
+-      nandc->buf_size = 532;
+-
+-      nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size, GFP_KERNEL);
+-      if (!nandc->data_buffer)
+-              return -ENOMEM;
+-
+-      nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs), GFP_KERNEL);
+-      if (!nandc->regs)
+-              return -ENOMEM;
+-
+-      nandc->reg_read_buf = devm_kcalloc(nandc->dev, MAX_REG_RD,
+-                                         sizeof(*nandc->reg_read_buf),
+-                                         GFP_KERNEL);
+-      if (!nandc->reg_read_buf)
+-              return -ENOMEM;
+-
+-      if (nandc->props->supports_bam) {
+-              nandc->reg_read_dma =
+-                      dma_map_single(nandc->dev, nandc->reg_read_buf,
+-                                     MAX_REG_RD *
+-                                     sizeof(*nandc->reg_read_buf),
+-                                     DMA_FROM_DEVICE);
+-              if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
+-                      dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
+-                      return -EIO;
+-              }
+-
+-              nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
+-              if (IS_ERR(nandc->tx_chan)) {
+-                      ret = PTR_ERR(nandc->tx_chan);
+-                      nandc->tx_chan = NULL;
+-                      dev_err_probe(nandc->dev, ret,
+-                                    "tx DMA channel request failed\n");
+-                      goto unalloc;
+-              }
+-
+-              nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
+-              if (IS_ERR(nandc->rx_chan)) {
+-                      ret = PTR_ERR(nandc->rx_chan);
+-                      nandc->rx_chan = NULL;
+-                      dev_err_probe(nandc->dev, ret,
+-                                    "rx DMA channel request failed\n");
+-                      goto unalloc;
+-              }
+-
+-              nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
+-              if (IS_ERR(nandc->cmd_chan)) {
+-                      ret = PTR_ERR(nandc->cmd_chan);
+-                      nandc->cmd_chan = NULL;
+-                      dev_err_probe(nandc->dev, ret,
+-                                    "cmd DMA channel request failed\n");
+-                      goto unalloc;
+-              }
+-
+-              /*
+-               * Initially allocate BAM transaction to read ONFI param page.
+-               * After detecting all the devices, this BAM transaction will
+-               * be freed and the next BAM transaction will be allocated with
+-               * maximum codeword size
+-               */
+-              nandc->max_cwperpage = 1;
+-              nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
+-              if (!nandc->bam_txn) {
+-                      dev_err(nandc->dev,
+-                              "failed to allocate bam transaction\n");
+-                      ret = -ENOMEM;
+-                      goto unalloc;
+-              }
+-      } else {
+-              nandc->chan = dma_request_chan(nandc->dev, "rxtx");
+-              if (IS_ERR(nandc->chan)) {
+-                      ret = PTR_ERR(nandc->chan);
+-                      nandc->chan = NULL;
+-                      dev_err_probe(nandc->dev, ret,
+-                                    "rxtx DMA channel request failed\n");
+-                      return ret;
+-              }
+-      }
+-
+-      INIT_LIST_HEAD(&nandc->desc_list);
+-      INIT_LIST_HEAD(&nandc->host_list);
+-
+-      nand_controller_init(&nandc->controller);
+-      nandc->controller.ops = &qcom_nandc_ops;
+-
+-      return 0;
+-unalloc:
+-      qcom_nandc_unalloc(nandc);
+-      return ret;
+-}
+-
+ /* one time setup of a few nand controller registers */
+ static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
+ {
+       u32 nand_ctrl;
++      nand_controller_init(nandc->controller);
++      nandc->controller->ops = &qcom_nandc_ops;
++
+       /* kill onenand */
+       if (!nandc->props->nandc_part_of_qpic)
+               nandc_write(nandc, SFLASHC_BURST_CFG, 0);
+@@ -3240,7 +2168,7 @@ static int qcom_nand_host_init_and_regis
+       chip->legacy.block_bad          = qcom_nandc_block_bad;
+       chip->legacy.block_markbad      = qcom_nandc_block_markbad;
+-      chip->controller = &nandc->controller;
++      chip->controller = nandc->controller;
+       chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA |
+                        NAND_SKIP_BBTSCAN;
+@@ -3323,17 +2251,21 @@ static int qcom_nandc_parse_dt(struct pl
+ static int qcom_nandc_probe(struct platform_device *pdev)
+ {
+       struct qcom_nand_controller *nandc;
++      struct nand_controller *controller;
+       const void *dev_data;
+       struct device *dev = &pdev->dev;
+       struct resource *res;
+       int ret;
+-      nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
++      nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc) + sizeof(*controller),
++                           GFP_KERNEL);
+       if (!nandc)
+               return -ENOMEM;
++      controller = (struct nand_controller *)&nandc[1];
+       platform_set_drvdata(pdev, nandc);
+       nandc->dev = dev;
++      nandc->controller = controller;
+       dev_data = of_device_get_match_data(dev);
+       if (!dev_data) {
+--- /dev/null
++++ b/include/linux/mtd/nand-qpic-common.h
+@@ -0,0 +1,468 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * QCOM QPIC common APIs header file
++ *
++ * Copyright (c) 2023 Qualcomm Inc.
++ * Authors:   Md sadre Alam   <quic_mdalam@quicinc.com>
++ *
++ */
++#ifndef __MTD_NAND_QPIC_COMMON_H__
++#define __MTD_NAND_QPIC_COMMON_H__
++
++/* NANDc reg offsets */
++#define       NAND_FLASH_CMD                  0x00
++#define       NAND_ADDR0                      0x04
++#define       NAND_ADDR1                      0x08
++#define       NAND_FLASH_CHIP_SELECT          0x0c
++#define       NAND_EXEC_CMD                   0x10
++#define       NAND_FLASH_STATUS               0x14
++#define       NAND_BUFFER_STATUS              0x18
++#define       NAND_DEV0_CFG0                  0x20
++#define       NAND_DEV0_CFG1                  0x24
++#define       NAND_DEV0_ECC_CFG               0x28
++#define       NAND_AUTO_STATUS_EN             0x2c
++#define       NAND_DEV1_CFG0                  0x30
++#define       NAND_DEV1_CFG1                  0x34
++#define       NAND_READ_ID                    0x40
++#define       NAND_READ_STATUS                0x44
++#define       NAND_DEV_CMD0                   0xa0
++#define       NAND_DEV_CMD1                   0xa4
++#define       NAND_DEV_CMD2                   0xa8
++#define       NAND_DEV_CMD_VLD                0xac
++#define       SFLASHC_BURST_CFG               0xe0
++#define       NAND_ERASED_CW_DETECT_CFG       0xe8
++#define       NAND_ERASED_CW_DETECT_STATUS    0xec
++#define       NAND_EBI2_ECC_BUF_CFG           0xf0
++#define       FLASH_BUF_ACC                   0x100
++
++#define       NAND_CTRL                       0xf00
++#define       NAND_VERSION                    0xf08
++#define       NAND_READ_LOCATION_0            0xf20
++#define       NAND_READ_LOCATION_1            0xf24
++#define       NAND_READ_LOCATION_2            0xf28
++#define       NAND_READ_LOCATION_3            0xf2c
++#define       NAND_READ_LOCATION_LAST_CW_0    0xf40
++#define       NAND_READ_LOCATION_LAST_CW_1    0xf44
++#define       NAND_READ_LOCATION_LAST_CW_2    0xf48
++#define       NAND_READ_LOCATION_LAST_CW_3    0xf4c
++
++/* dummy register offsets, used by qcom_write_reg_dma */
++#define       NAND_DEV_CMD1_RESTORE           0xdead
++#define       NAND_DEV_CMD_VLD_RESTORE        0xbeef
++
++/* NAND_FLASH_CMD bits */
++#define       PAGE_ACC                        BIT(4)
++#define       LAST_PAGE                       BIT(5)
++
++/* NAND_FLASH_CHIP_SELECT bits */
++#define       NAND_DEV_SEL                    0
++#define       DM_EN                           BIT(2)
++
++/* NAND_FLASH_STATUS bits */
++#define       FS_OP_ERR                       BIT(4)
++#define       FS_READY_BSY_N                  BIT(5)
++#define       FS_MPU_ERR                      BIT(8)
++#define       FS_DEVICE_STS_ERR               BIT(16)
++#define       FS_DEVICE_WP                    BIT(23)
++
++/* NAND_BUFFER_STATUS bits */
++#define       BS_UNCORRECTABLE_BIT            BIT(8)
++#define       BS_CORRECTABLE_ERR_MSK          0x1f
++
++/* NAND_DEVn_CFG0 bits */
++#define       DISABLE_STATUS_AFTER_WRITE      4
++#define       CW_PER_PAGE                     6
++#define       UD_SIZE_BYTES                   9
++#define       UD_SIZE_BYTES_MASK              GENMASK(18, 9)
++#define       ECC_PARITY_SIZE_BYTES_RS        19
++#define       SPARE_SIZE_BYTES                23
++#define       SPARE_SIZE_BYTES_MASK           GENMASK(26, 23)
++#define       NUM_ADDR_CYCLES                 27
++#define       STATUS_BFR_READ                 30
++#define       SET_RD_MODE_AFTER_STATUS        31
++
++/* NAND_DEVn_CFG0 bits */
++#define       DEV0_CFG1_ECC_DISABLE           0
++#define       WIDE_FLASH                      1
++#define       NAND_RECOVERY_CYCLES            2
++#define       CS_ACTIVE_BSY                   5
++#define       BAD_BLOCK_BYTE_NUM              6
++#define       BAD_BLOCK_IN_SPARE_AREA         16
++#define       WR_RD_BSY_GAP                   17
++#define       ENABLE_BCH_ECC                  27
++
++/* NAND_DEV0_ECC_CFG bits */
++#define       ECC_CFG_ECC_DISABLE             0
++#define       ECC_SW_RESET                    1
++#define       ECC_MODE                        4
++#define       ECC_PARITY_SIZE_BYTES_BCH       8
++#define       ECC_NUM_DATA_BYTES              16
++#define       ECC_NUM_DATA_BYTES_MASK         GENMASK(25, 16)
++#define       ECC_FORCE_CLK_OPEN              30
++
++/* NAND_DEV_CMD1 bits */
++#define       READ_ADDR                       0
++
++/* NAND_DEV_CMD_VLD bits */
++#define       READ_START_VLD                  BIT(0)
++#define       READ_STOP_VLD                   BIT(1)
++#define       WRITE_START_VLD                 BIT(2)
++#define       ERASE_START_VLD                 BIT(3)
++#define       SEQ_READ_START_VLD              BIT(4)
++
++/* NAND_EBI2_ECC_BUF_CFG bits */
++#define       NUM_STEPS                       0
++
++/* NAND_ERASED_CW_DETECT_CFG bits */
++#define       ERASED_CW_ECC_MASK              1
++#define       AUTO_DETECT_RES                 0
++#define       MASK_ECC                        BIT(ERASED_CW_ECC_MASK)
++#define       RESET_ERASED_DET                BIT(AUTO_DETECT_RES)
++#define       ACTIVE_ERASED_DET               (0 << AUTO_DETECT_RES)
++#define       CLR_ERASED_PAGE_DET             (RESET_ERASED_DET | MASK_ECC)
++#define       SET_ERASED_PAGE_DET             (ACTIVE_ERASED_DET | MASK_ECC)
++
++/* NAND_ERASED_CW_DETECT_STATUS bits */
++#define       PAGE_ALL_ERASED                 BIT(7)
++#define       CODEWORD_ALL_ERASED             BIT(6)
++#define       PAGE_ERASED                     BIT(5)
++#define       CODEWORD_ERASED                 BIT(4)
++#define       ERASED_PAGE                     (PAGE_ALL_ERASED | PAGE_ERASED)
++#define       ERASED_CW                       (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
++
++/* NAND_READ_LOCATION_n bits */
++#define READ_LOCATION_OFFSET          0
++#define READ_LOCATION_SIZE            16
++#define READ_LOCATION_LAST            31
++
++/* Version Mask */
++#define       NAND_VERSION_MAJOR_MASK         0xf0000000
++#define       NAND_VERSION_MAJOR_SHIFT        28
++#define       NAND_VERSION_MINOR_MASK         0x0fff0000
++#define       NAND_VERSION_MINOR_SHIFT        16
++
++/* NAND OP_CMDs */
++#define       OP_PAGE_READ                    0x2
++#define       OP_PAGE_READ_WITH_ECC           0x3
++#define       OP_PAGE_READ_WITH_ECC_SPARE     0x4
++#define       OP_PAGE_READ_ONFI_READ          0x5
++#define       OP_PROGRAM_PAGE                 0x6
++#define       OP_PAGE_PROGRAM_WITH_ECC        0x7
++#define       OP_PROGRAM_PAGE_SPARE           0x9
++#define       OP_BLOCK_ERASE                  0xa
++#define       OP_CHECK_STATUS                 0xc
++#define       OP_FETCH_ID                     0xb
++#define       OP_RESET_DEVICE                 0xd
++
++/* Default Value for NAND_DEV_CMD_VLD */
++#define NAND_DEV_CMD_VLD_VAL          (READ_START_VLD | WRITE_START_VLD | \
++                                       ERASE_START_VLD | SEQ_READ_START_VLD)
++
++/* NAND_CTRL bits */
++#define       BAM_MODE_EN                     BIT(0)
++
++/*
++ * the NAND controller performs reads/writes with ECC in 516 byte chunks.
++ * the driver calls the chunks 'step' or 'codeword' interchangeably
++ */
++#define       NANDC_STEP_SIZE                 512
++
++/*
++ * the largest page size we support is 8K, this will have 16 steps/codewords
++ * of 512 bytes each
++ */
++#define       MAX_NUM_STEPS                   (SZ_8K / NANDC_STEP_SIZE)
++
++/* we read at most 3 registers per codeword scan */
++#define       MAX_REG_RD                      (3 * MAX_NUM_STEPS)
++
++/* ECC modes supported by the controller */
++#define       ECC_NONE        BIT(0)
++#define       ECC_RS_4BIT     BIT(1)
++#define       ECC_BCH_4BIT    BIT(2)
++#define       ECC_BCH_8BIT    BIT(3)
++
++/*
++ * Returns the actual register address for all NAND_DEV_ registers
++ * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
++ */
++#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
++
++/* Returns the NAND register physical address */
++#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
++
++/* Returns the dma address for reg read buffer */
++#define reg_buf_dma_addr(chip, vaddr) \
++      ((chip)->reg_read_dma + \
++      ((u8 *)(vaddr) - (u8 *)(chip)->reg_read_buf))
++
++#define QPIC_PER_CW_CMD_ELEMENTS      32
++#define QPIC_PER_CW_CMD_SGL           32
++#define QPIC_PER_CW_DATA_SGL          8
++
++#define QPIC_NAND_COMPLETION_TIMEOUT  msecs_to_jiffies(2000)
++
++/*
++ * Flags used in DMA descriptor preparation helper functions
++ * (i.e. qcom_read_reg_dma/qcom_write_reg_dma/qcom_read_data_dma/qcom_write_data_dma)
++ */
++/* Don't set the EOT in current tx BAM sgl */
++#define NAND_BAM_NO_EOT                       BIT(0)
++/* Set the NWD flag in current BAM sgl */
++#define NAND_BAM_NWD                  BIT(1)
++/* Finish writing in the current BAM sgl and start writing in another BAM sgl */
++#define NAND_BAM_NEXT_SGL             BIT(2)
++/*
++ * Erased codeword status is being used two times in single transfer so this
++ * flag will determine the current value of erased codeword status register
++ */
++#define NAND_ERASED_CW_SET            BIT(4)
++
++#define MAX_ADDRESS_CYCLE             5
++
++/*
++ * This data type corresponds to the BAM transaction which will be used for all
++ * NAND transfers.
++ * @bam_ce - the array of BAM command elements
++ * @cmd_sgl - sgl for NAND BAM command pipe
++ * @data_sgl - sgl for NAND BAM consumer/producer pipe
++ * @last_data_desc - last DMA desc in data channel (tx/rx).
++ * @last_cmd_desc - last DMA desc in command channel.
++ * @txn_done - completion for NAND transfer.
++ * @bam_ce_pos - the index in bam_ce which is available for next sgl
++ * @bam_ce_start - the index in bam_ce which marks the start position ce
++ *               for current sgl. It will be used for size calculation
++ *               for current sgl
++ * @cmd_sgl_pos - current index in command sgl.
++ * @cmd_sgl_start - start index in command sgl.
++ * @tx_sgl_pos - current index in data sgl for tx.
++ * @tx_sgl_start - start index in data sgl for tx.
++ * @rx_sgl_pos - current index in data sgl for rx.
++ * @rx_sgl_start - start index in data sgl for rx.
++ */
++struct bam_transaction {
++      struct bam_cmd_element *bam_ce;
++      struct scatterlist *cmd_sgl;
++      struct scatterlist *data_sgl;
++      struct dma_async_tx_descriptor *last_data_desc;
++      struct dma_async_tx_descriptor *last_cmd_desc;
++      struct completion txn_done;
++      u32 bam_ce_pos;
++      u32 bam_ce_start;
++      u32 cmd_sgl_pos;
++      u32 cmd_sgl_start;
++      u32 tx_sgl_pos;
++      u32 tx_sgl_start;
++      u32 rx_sgl_pos;
++      u32 rx_sgl_start;
++};
++
++/*
++ * This data type corresponds to the nand dma descriptor
++ * @dma_desc - low level DMA engine descriptor
++ * @list - list for desc_info
++ *
++ * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
++ *          ADM
++ * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
++ * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
++ * @dir - DMA transfer direction
++ */
++struct desc_info {
++      struct dma_async_tx_descriptor *dma_desc;
++      struct list_head node;
++
++      union {
++              struct scatterlist adm_sgl;
++              struct {
++                      struct scatterlist *bam_sgl;
++                      int sgl_cnt;
++              };
++      };
++      enum dma_data_direction dir;
++};
++
++/*
++ * holds the current register values that we want to write. acts as a contiguous
++ * chunk of memory which we use to write the controller registers through DMA.
++ */
++struct nandc_regs {
++      __le32 cmd;
++      __le32 addr0;
++      __le32 addr1;
++      __le32 chip_sel;
++      __le32 exec;
++
++      __le32 cfg0;
++      __le32 cfg1;
++      __le32 ecc_bch_cfg;
++
++      __le32 clrflashstatus;
++      __le32 clrreadstatus;
++
++      __le32 cmd1;
++      __le32 vld;
++
++      __le32 orig_cmd1;
++      __le32 orig_vld;
++
++      __le32 ecc_buf_cfg;
++      __le32 read_location0;
++      __le32 read_location1;
++      __le32 read_location2;
++      __le32 read_location3;
++      __le32 read_location_last0;
++      __le32 read_location_last1;
++      __le32 read_location_last2;
++      __le32 read_location_last3;
++
++      __le32 erased_cw_detect_cfg_clr;
++      __le32 erased_cw_detect_cfg_set;
++};
++
++/*
++ * NAND controller data struct
++ *
++ * @dev:                      parent device
++ *
++ * @base:                     MMIO base
++ *
++ * @core_clk:                 controller clock
++ * @aon_clk:                  another controller clock
++ *
++ * @regs:                     a contiguous chunk of memory for DMA register
++ *                            writes. contains the register values to be
++ *                            written to controller
++ *
++ * @props:                    properties of current NAND controller,
++ *                            initialized via DT match data
++ *
++ * @controller:                       base controller structure
++ * @host_list:                        list containing all the chips attached to the
++ *                            controller
++ *
++ * @chan:                     dma channel
++ * @cmd_crci:                 ADM DMA CRCI for command flow control
++ * @data_crci:                        ADM DMA CRCI for data flow control
++ *
++ * @desc_list:                        DMA descriptor list (list of desc_infos)
++ *
++ * @data_buffer:              our local DMA buffer for page read/writes,
++ *                            used when we can't use the buffer provided
++ *                            by upper layers directly
++ * @reg_read_buf:             local buffer for reading back registers via DMA
++ *
++ * @base_phys:                        physical base address of controller registers
++ * @base_dma:                 dma base address of controller registers
++ * @reg_read_dma:             contains dma address for register read buffer
++ *
++ * @buf_size/count/start:     markers for chip->legacy.read_buf/write_buf
++ *                            functions
++ * @max_cwperpage:            maximum QPIC codewords required. calculated
++ *                            from all connected NAND devices pagesize
++ *
++ * @reg_read_pos:             marker for data read in reg_read_buf
++ *
++ * @cmd1/vld:                 some fixed controller register values
++ *
++ * @exec_opwrite:             flag to select correct number of code word
++ *                            while reading status
++ */
++struct qcom_nand_controller {
++      struct device *dev;
++
++      void __iomem *base;
++
++      struct clk *core_clk;
++      struct clk *aon_clk;
++
++      struct nandc_regs *regs;
++      struct bam_transaction *bam_txn;
++
++      const struct qcom_nandc_props *props;
++
++      struct nand_controller *controller;
++      struct list_head host_list;
++
++      union {
++              /* will be used only by QPIC for BAM DMA */
++              struct {
++                      struct dma_chan *tx_chan;
++                      struct dma_chan *rx_chan;
++                      struct dma_chan *cmd_chan;
++              };
++
++              /* will be used only by EBI2 for ADM DMA */
++              struct {
++                      struct dma_chan *chan;
++                      unsigned int cmd_crci;
++                      unsigned int data_crci;
++              };
++      };
++
++      struct list_head desc_list;
++
++      u8              *data_buffer;
++      __le32          *reg_read_buf;
++
++      phys_addr_t base_phys;
++      dma_addr_t base_dma;
++      dma_addr_t reg_read_dma;
++
++      int             buf_size;
++      int             buf_count;
++      int             buf_start;
++      unsigned int    max_cwperpage;
++
++      int reg_read_pos;
++
++      u32 cmd1, vld;
++      bool exec_opwrite;
++};
++
++/*
++ * This data type corresponds to the NAND controller properties which varies
++ * among different NAND controllers.
++ * @ecc_modes - ecc mode for NAND
++ * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
++ * @supports_bam - whether NAND controller is using BAM
++ * @nandc_part_of_qpic - whether NAND controller is part of qpic IP
++ * @qpic_version2 - flag to indicate QPIC IP version 2
++ * @use_codeword_fixup - whether NAND has different layout for boot partitions
++ */
++struct qcom_nandc_props {
++      u32 ecc_modes;
++      u32 dev_cmd_reg_start;
++      bool supports_bam;
++      bool nandc_part_of_qpic;
++      bool qpic_version2;
++      bool use_codeword_fixup;
++};
++
++void qcom_free_bam_transaction(struct qcom_nand_controller *nandc);
++struct bam_transaction *qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc);
++void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc);
++void qcom_qpic_bam_dma_done(void *data);
++void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu);
++int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
++                              struct dma_chan *chan, unsigned long flags);
++int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
++                             int reg_off, const void *vaddr, int size, unsigned int flags);
++int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
++                              const void *vaddr, int size, unsigned int flags);
++int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read, int reg_off,
++                         const void *vaddr, int size, bool flow_control);
++int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first, int num_regs,
++                    unsigned int flags);
++int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr, int first,
++                     int num_regs, unsigned int flags);
++int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off, const u8 *vaddr,
++                     int size, unsigned int flags);
++int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off, const u8 *vaddr,
++                      int size, unsigned int flags);
++int qcom_submit_descs(struct qcom_nand_controller *nandc);
++void qcom_clear_read_regs(struct qcom_nand_controller *nandc);
++void qcom_nandc_unalloc(struct qcom_nand_controller *nandc);
++int qcom_nandc_alloc(struct qcom_nand_controller *nandc);
++#endif
++
diff --git a/target/linux/generic/backport-6.6/413-04-v6.14-mtd-rawnand-qcom-use-FIELD_PREP-and-GENMASK.patch b/target/linux/generic/backport-6.6/413-04-v6.14-mtd-rawnand-qcom-use-FIELD_PREP-and-GENMASK.patch
new file mode 100644 (file)
index 0000000..b375074
--- /dev/null
@@ -0,0 +1,198 @@
+From 0c08080fd71cd5dd59643104b39d3c89d793ab3c Mon Sep 17 00:00:00 2001
+From: Md Sadre Alam <quic_mdalam@quicinc.com>
+Date: Wed, 20 Nov 2024 14:45:03 +0530
+Subject: [PATCH 4/4] mtd: rawnand: qcom: use FIELD_PREP and GENMASK
+
+Use the bitfield macro FIELD_PREP, and GENMASK to
+do the shift and mask in one go. This makes the code
+more readable.
+
+Reviewed-by: Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>
+Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+---
+ drivers/mtd/nand/raw/qcom_nandc.c    | 97 ++++++++++++++--------------
+ include/linux/mtd/nand-qpic-common.h | 31 +++++----
+ 2 files changed, 67 insertions(+), 61 deletions(-)
+
+--- a/drivers/mtd/nand/raw/qcom_nandc.c
++++ b/drivers/mtd/nand/raw/qcom_nandc.c
+@@ -281,7 +281,7 @@ static void update_rw_regs(struct qcom_n
+                               (num_cw - 1) << CW_PER_PAGE);
+               cfg1 = cpu_to_le32(host->cfg1_raw);
+-              ecc_bch_cfg = cpu_to_le32(1 << ECC_CFG_ECC_DISABLE);
++              ecc_bch_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE);
+       }
+       nandc->regs->cmd = cmd;
+@@ -1494,42 +1494,41 @@ static int qcom_nand_attach_chip(struct
+       host->cw_size = host->cw_data + ecc->bytes;
+       bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
+-      host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
+-                              | host->cw_data << UD_SIZE_BYTES
+-                              | 0 << DISABLE_STATUS_AFTER_WRITE
+-                              | 5 << NUM_ADDR_CYCLES
+-                              | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
+-                              | 0 << STATUS_BFR_READ
+-                              | 1 << SET_RD_MODE_AFTER_STATUS
+-                              | host->spare_bytes << SPARE_SIZE_BYTES;
+-
+-      host->cfg1 = 7 << NAND_RECOVERY_CYCLES
+-                              | 0 <<  CS_ACTIVE_BSY
+-                              | bad_block_byte << BAD_BLOCK_BYTE_NUM
+-                              | 0 << BAD_BLOCK_IN_SPARE_AREA
+-                              | 2 << WR_RD_BSY_GAP
+-                              | wide_bus << WIDE_FLASH
+-                              | host->bch_enabled << ENABLE_BCH_ECC;
+-
+-      host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
+-                              | host->cw_size << UD_SIZE_BYTES
+-                              | 5 << NUM_ADDR_CYCLES
+-                              | 0 << SPARE_SIZE_BYTES;
+-
+-      host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
+-                              | 0 << CS_ACTIVE_BSY
+-                              | 17 << BAD_BLOCK_BYTE_NUM
+-                              | 1 << BAD_BLOCK_IN_SPARE_AREA
+-                              | 2 << WR_RD_BSY_GAP
+-                              | wide_bus << WIDE_FLASH
+-                              | 1 << DEV0_CFG1_ECC_DISABLE;
+-
+-      host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
+-                              | 0 << ECC_SW_RESET
+-                              | host->cw_data << ECC_NUM_DATA_BYTES
+-                              | 1 << ECC_FORCE_CLK_OPEN
+-                              | ecc_mode << ECC_MODE
+-                              | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
++      host->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
++                   FIELD_PREP(UD_SIZE_BYTES_MASK, host->cw_data) |
++                   FIELD_PREP(DISABLE_STATUS_AFTER_WRITE, 0) |
++                   FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
++                   FIELD_PREP(ECC_PARITY_SIZE_BYTES_RS, host->ecc_bytes_hw) |
++                   FIELD_PREP(STATUS_BFR_READ, 0) |
++                   FIELD_PREP(SET_RD_MODE_AFTER_STATUS, 1) |
++                   FIELD_PREP(SPARE_SIZE_BYTES_MASK, host->spare_bytes);
++
++      host->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
++                   FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, bad_block_byte) |
++                   FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 0) |
++                   FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
++                   FIELD_PREP(WIDE_FLASH, wide_bus) |
++                   FIELD_PREP(ENABLE_BCH_ECC, host->bch_enabled);
++
++      host->cfg0_raw = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
++                       FIELD_PREP(UD_SIZE_BYTES_MASK, host->cw_size) |
++                       FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
++                       FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
++
++      host->cfg1_raw = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
++                       FIELD_PREP(CS_ACTIVE_BSY, 0) |
++                       FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
++                       FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
++                       FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
++                       FIELD_PREP(WIDE_FLASH, wide_bus) |
++                       FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
++
++      host->ecc_bch_cfg = FIELD_PREP(ECC_CFG_ECC_DISABLE, !host->bch_enabled) |
++                          FIELD_PREP(ECC_SW_RESET, 0) |
++                          FIELD_PREP(ECC_NUM_DATA_BYTES_MASK, host->cw_data) |
++                          FIELD_PREP(ECC_FORCE_CLK_OPEN, 1) |
++                          FIELD_PREP(ECC_MODE_MASK, ecc_mode) |
++                          FIELD_PREP(ECC_PARITY_SIZE_BYTES_BCH_MASK, host->ecc_bytes_hw);
+       if (!nandc->props->qpic_version2)
+               host->ecc_buf_cfg = 0x203 << NUM_STEPS;
+@@ -1882,21 +1881,21 @@ static int qcom_param_page_type_exec(str
+       nandc->regs->addr0 = 0;
+       nandc->regs->addr1 = 0;
+-      nandc->regs->cfg0 = cpu_to_le32(0 << CW_PER_PAGE |
+-                                      512 << UD_SIZE_BYTES |
+-                                      5 << NUM_ADDR_CYCLES |
+-                                      0 << SPARE_SIZE_BYTES);
+-
+-      nandc->regs->cfg1 = cpu_to_le32(7 << NAND_RECOVERY_CYCLES |
+-                                      0 << CS_ACTIVE_BSY |
+-                                      17 << BAD_BLOCK_BYTE_NUM |
+-                                      1 << BAD_BLOCK_IN_SPARE_AREA |
+-                                      2 << WR_RD_BSY_GAP |
+-                                      0 << WIDE_FLASH |
+-                                      1 << DEV0_CFG1_ECC_DISABLE);
++      host->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, 0) |
++                   FIELD_PREP(UD_SIZE_BYTES_MASK, 512) |
++                   FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
++                   FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
++
++      host->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
++                   FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
++                   FIELD_PREP(CS_ACTIVE_BSY, 0) |
++                   FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
++                   FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
++                   FIELD_PREP(WIDE_FLASH, 0) |
++                   FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
+       if (!nandc->props->qpic_version2)
+-              nandc->regs->ecc_buf_cfg = cpu_to_le32(1 << ECC_CFG_ECC_DISABLE);
++              nandc->regs->ecc_buf_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE);
+       /* configure CMD1 and VLD for ONFI param probing in QPIC v1 */
+       if (!nandc->props->qpic_version2) {
+--- a/include/linux/mtd/nand-qpic-common.h
++++ b/include/linux/mtd/nand-qpic-common.h
+@@ -70,35 +70,42 @@
+ #define       BS_CORRECTABLE_ERR_MSK          0x1f
+ /* NAND_DEVn_CFG0 bits */
+-#define       DISABLE_STATUS_AFTER_WRITE      4
++#define       DISABLE_STATUS_AFTER_WRITE      BIT(4)
+ #define       CW_PER_PAGE                     6
++#define       CW_PER_PAGE_MASK                GENMASK(8, 6)
+ #define       UD_SIZE_BYTES                   9
+ #define       UD_SIZE_BYTES_MASK              GENMASK(18, 9)
+-#define       ECC_PARITY_SIZE_BYTES_RS        19
++#define       ECC_PARITY_SIZE_BYTES_RS        GENMASK(22, 19)
+ #define       SPARE_SIZE_BYTES                23
+ #define       SPARE_SIZE_BYTES_MASK           GENMASK(26, 23)
+ #define       NUM_ADDR_CYCLES                 27
+-#define       STATUS_BFR_READ                 30
+-#define       SET_RD_MODE_AFTER_STATUS        31
++#define       NUM_ADDR_CYCLES_MASK            GENMASK(29, 27)
++#define       STATUS_BFR_READ                 BIT(30)
++#define       SET_RD_MODE_AFTER_STATUS        BIT(31)
+ /* NAND_DEVn_CFG0 bits */
+-#define       DEV0_CFG1_ECC_DISABLE           0
+-#define       WIDE_FLASH                      1
++#define       DEV0_CFG1_ECC_DISABLE           BIT(0)
++#define       WIDE_FLASH                      BIT(1)
+ #define       NAND_RECOVERY_CYCLES            2
+-#define       CS_ACTIVE_BSY                   5
++#define       NAND_RECOVERY_CYCLES_MASK       GENMASK(4, 2)
++#define       CS_ACTIVE_BSY                   BIT(5)
+ #define       BAD_BLOCK_BYTE_NUM              6
+-#define       BAD_BLOCK_IN_SPARE_AREA         16
++#define       BAD_BLOCK_BYTE_NUM_MASK         GENMASK(15, 6)
++#define       BAD_BLOCK_IN_SPARE_AREA         BIT(16)
+ #define       WR_RD_BSY_GAP                   17
+-#define       ENABLE_BCH_ECC                  27
++#define       WR_RD_BSY_GAP_MASK              GENMASK(22, 17)
++#define       ENABLE_BCH_ECC                  BIT(27)
+ /* NAND_DEV0_ECC_CFG bits */
+-#define       ECC_CFG_ECC_DISABLE             0
+-#define       ECC_SW_RESET                    1
++#define       ECC_CFG_ECC_DISABLE             BIT(0)
++#define       ECC_SW_RESET                    BIT(1)
+ #define       ECC_MODE                        4
++#define       ECC_MODE_MASK                   GENMASK(5, 4)
+ #define       ECC_PARITY_SIZE_BYTES_BCH       8
++#define       ECC_PARITY_SIZE_BYTES_BCH_MASK  GENMASK(12, 8)
+ #define       ECC_NUM_DATA_BYTES              16
+ #define       ECC_NUM_DATA_BYTES_MASK         GENMASK(25, 16)
+-#define       ECC_FORCE_CLK_OPEN              30
++#define       ECC_FORCE_CLK_OPEN              BIT(30)
+ /* NAND_DEV_CMD1 bits */
+ #define       READ_ADDR                       0
diff --git a/target/linux/generic/backport-6.6/414-v6.14-mtd-rawnand-qcom-fix-broken-config-in-qcom_param_pag.patch b/target/linux/generic/backport-6.6/414-v6.14-mtd-rawnand-qcom-fix-broken-config-in-qcom_param_pag.patch
new file mode 100644 (file)
index 0000000..a6a4db2
--- /dev/null
@@ -0,0 +1,64 @@
+From 9d4ffbcfde283f2a87ea45128ddf7e6651facdd9 Mon Sep 17 00:00:00 2001
+From: Christian Marangi <ansuelsmth@gmail.com>
+Date: Fri, 7 Feb 2025 20:42:38 +0100
+Subject: [PATCH] mtd: rawnand: qcom: fix broken config in
+ qcom_param_page_type_exec
+
+Fix broken config in qcom_param_page_type_exec caused by copy-paste error
+from commit 0c08080fd71c ("mtd: rawnand: qcom: use FIELD_PREP and GENMASK")
+
+In qcom_param_page_type_exec the value needs to be set to
+nandc->regs->cfg0 instead of host->cfg0. This wrong configuration caused
+the Qcom NANDC driver to malfunction on any device that makes use of it
+(IPQ806x, IPQ40xx, IPQ807x, IPQ60xx) with the following error:
+
+[    0.885369] nand: device found, Manufacturer ID: 0x2c, Chip ID: 0xaa
+[    0.885909] nand: Micron NAND 256MiB 1,8V 8-bit
+[    0.892499] nand: 256 MiB, SLC, erase size: 128 KiB, page size: 2048, OOB size: 64
+[    0.896823] nand: ECC (step, strength) = (512, 8) does not fit in OOB
+[    0.896836] qcom-nandc 79b0000.nand-controller: No valid ECC settings possible
+[    0.910996] bam-dma-engine 7984000.dma-controller: Cannot free busy channel
+[    0.918070] qcom-nandc: probe of 79b0000.nand-controller failed with error -28
+
+Restore original configuration fix the problem and makes the driver work
+again.
+
+Cc: stable@vger.kernel.org
+Fixes: 0c08080fd71c ("mtd: rawnand: qcom: use FIELD_PREP and GENMASK")
+Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
+---
+ drivers/mtd/nand/raw/qcom_nandc.c | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+--- a/drivers/mtd/nand/raw/qcom_nandc.c
++++ b/drivers/mtd/nand/raw/qcom_nandc.c
+@@ -1881,18 +1881,18 @@ static int qcom_param_page_type_exec(str
+       nandc->regs->addr0 = 0;
+       nandc->regs->addr1 = 0;
+-      host->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, 0) |
+-                   FIELD_PREP(UD_SIZE_BYTES_MASK, 512) |
+-                   FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
+-                   FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
++      nandc->regs->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, 0) |
++                          FIELD_PREP(UD_SIZE_BYTES_MASK, 512) |
++                          FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
++                          FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
+-      host->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
+-                   FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
+-                   FIELD_PREP(CS_ACTIVE_BSY, 0) |
+-                   FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
+-                   FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
+-                   FIELD_PREP(WIDE_FLASH, 0) |
+-                   FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
++      nandc->regs->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
++                          FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
++                          FIELD_PREP(CS_ACTIVE_BSY, 0) |
++                          FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
++                          FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
++                          FIELD_PREP(WIDE_FLASH, 0) |
++                          FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
+       if (!nandc->props->qpic_version2)
+               nandc->regs->ecc_buf_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE);
diff --git a/target/linux/generic/backport-6.6/415-v6.14-mtd-rawnand-qcom-Fix-build-issue-on-x86-architecture.patch b/target/linux/generic/backport-6.6/415-v6.14-mtd-rawnand-qcom-Fix-build-issue-on-x86-architecture.patch
new file mode 100644 (file)
index 0000000..67beed3
--- /dev/null
@@ -0,0 +1,77 @@
+From b9371866799d67a80be0ea9e01bd41987db22f26 Mon Sep 17 00:00:00 2001
+From: Md Sadre Alam <quic_mdalam@quicinc.com>
+Date: Mon, 6 Jan 2025 18:45:58 +0530
+Subject: [PATCH] mtd: rawnand: qcom: Fix build issue on x86 architecture
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Fix a buffer overflow issue in qcom_clear_bam_transaction by using
+struct_group to group related fields and avoid FORTIFY_SOURCE warnings.
+
+On x86 architecture, the following error occurs due to warnings being
+treated as errors:
+
+In function ‘fortify_memset_chk’,
+    inlined from ‘qcom_clear_bam_transaction’ at
+drivers/mtd/nand/qpic_common.c:88:2:
+./include/linux/fortify-string.h:480:25: error: call to ‘__write_overflow_field’
+declared with attribute warning: detected write beyond size of field
+(1st parameter); maybe use struct_group()? [-Werror=attribute-warning]
+  480 |                         __write_overflow_field(p_size_field, size);
+      |                         ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+  LD [M]  drivers/mtd/nand/nandcore.o
+  CC [M]  drivers/w1/masters/mxc_w1.o
+cc1: all warnings being treated as errors
+
+This patch addresses the issue by grouping the related fields in
+struct bam_transaction using struct_group and updating the memset call
+accordingly.
+
+Fixes: 8c52932da5e6 ("mtd: rawnand: qcom: cleanup qcom_nandc driver")
+Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+---
+ drivers/mtd/nand/qpic_common.c       |  2 +-
+ include/linux/mtd/nand-qpic-common.h | 19 +++++++++++--------
+ 2 files changed, 12 insertions(+), 9 deletions(-)
+
+--- a/drivers/mtd/nand/qpic_common.c
++++ b/drivers/mtd/nand/qpic_common.c
+@@ -85,7 +85,7 @@ void qcom_clear_bam_transaction(struct q
+       if (!nandc->props->supports_bam)
+               return;
+-      memset(&bam_txn->bam_ce_pos, 0, sizeof(u32) * 8);
++      memset(&bam_txn->bam_positions, 0, sizeof(bam_txn->bam_positions));
+       bam_txn->last_data_desc = NULL;
+       sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
+--- a/include/linux/mtd/nand-qpic-common.h
++++ b/include/linux/mtd/nand-qpic-common.h
+@@ -254,14 +254,17 @@ struct bam_transaction {
+       struct dma_async_tx_descriptor *last_data_desc;
+       struct dma_async_tx_descriptor *last_cmd_desc;
+       struct completion txn_done;
+-      u32 bam_ce_pos;
+-      u32 bam_ce_start;
+-      u32 cmd_sgl_pos;
+-      u32 cmd_sgl_start;
+-      u32 tx_sgl_pos;
+-      u32 tx_sgl_start;
+-      u32 rx_sgl_pos;
+-      u32 rx_sgl_start;
++      struct_group(bam_positions,
++              u32 bam_ce_pos;
++              u32 bam_ce_start;
++              u32 cmd_sgl_pos;
++              u32 cmd_sgl_start;
++              u32 tx_sgl_pos;
++              u32 tx_sgl_start;
++              u32 rx_sgl_pos;
++              u32 rx_sgl_start;
++
++      );
+ };
+ /*
index 1e69ee644600ffd6da613551d1786ff78da5a498..ecee160faaa188f0934673ed6a7506b3c983f775 100644 (file)
@@ -28,6 +28,6 @@ Subject: [PATCH] mtd/nand: add MediaTek NAND bad block managment table
  obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o
  obj-$(CONFIG_MTD_NAND_ECC_MEDIATEK) += ecc-mtk.o
 +obj-$(CONFIG_MTD_NAND_MTK_BMT)        += mtk_bmt.o mtk_bmt_v2.o mtk_bmt_bbt.o mtk_bmt_nmbm.o
+ obj-$(CONFIG_MTD_NAND_QCOM) += qpic_common.o
  obj-y += onenand/
  obj-y += raw/
diff --git a/target/linux/qualcommax/patches-6.6/0402-mtd-rawnand-qcom-cleanup-qcom_nandc-driver.patch b/target/linux/qualcommax/patches-6.6/0402-mtd-rawnand-qcom-cleanup-qcom_nandc-driver.patch
deleted file mode 100644 (file)
index 91dceaa..0000000
+++ /dev/null
@@ -1,983 +0,0 @@
-From: Md Sadre Alam <quic_mdalam@quicinc.com>
-Date: Sun, 22 Sep 2024 17:03:45 +0530
-Subject: [PATCH] mtd: rawnand: qcom: cleanup qcom_nandc driver
-
-cleanup qcom_nandc driver as below
-
-- Remove register value indirection api
-
-- Remove set_reg() api
-
-- Convert read_loc_first & read_loc_last macro to function
-
-- Renamed multiple variables
-
-Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
----
---- a/drivers/mtd/nand/raw/qcom_nandc.c
-+++ b/drivers/mtd/nand/raw/qcom_nandc.c
-@@ -189,17 +189,6 @@
- #define       ECC_BCH_4BIT    BIT(2)
- #define       ECC_BCH_8BIT    BIT(3)
--#define nandc_set_read_loc_first(chip, reg, cw_offset, read_size, is_last_read_loc)   \
--nandc_set_reg(chip, reg,                      \
--            ((cw_offset) << READ_LOCATION_OFFSET) |           \
--            ((read_size) << READ_LOCATION_SIZE) |                     \
--            ((is_last_read_loc) << READ_LOCATION_LAST))
--
--#define nandc_set_read_loc_last(chip, reg, cw_offset, read_size, is_last_read_loc)    \
--nandc_set_reg(chip, reg,                      \
--            ((cw_offset) << READ_LOCATION_OFFSET) |           \
--            ((read_size) << READ_LOCATION_SIZE) |                     \
--            ((is_last_read_loc) << READ_LOCATION_LAST))
- /*
-  * Returns the actual register address for all NAND_DEV_ registers
-  * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
-@@ -257,8 +246,6 @@ nandc_set_reg(chip, reg,                   \
-  * @tx_sgl_start - start index in data sgl for tx.
-  * @rx_sgl_pos - current index in data sgl for rx.
-  * @rx_sgl_start - start index in data sgl for rx.
-- * @wait_second_completion - wait for second DMA desc completion before making
-- *                         the NAND transfer completion.
-  */
- struct bam_transaction {
-       struct bam_cmd_element *bam_ce;
-@@ -275,7 +262,6 @@ struct bam_transaction {
-       u32 tx_sgl_start;
-       u32 rx_sgl_pos;
-       u32 rx_sgl_start;
--      bool wait_second_completion;
- };
- /*
-@@ -471,9 +457,9 @@ struct qcom_op {
-       unsigned int data_instr_idx;
-       unsigned int rdy_timeout_ms;
-       unsigned int rdy_delay_ns;
--      u32 addr1_reg;
--      u32 addr2_reg;
--      u32 cmd_reg;
-+      __le32 addr1_reg;
-+      __le32 addr2_reg;
-+      __le32 cmd_reg;
-       u8 flag;
- };
-@@ -549,17 +535,17 @@ struct qcom_nand_host {
-  * among different NAND controllers.
-  * @ecc_modes - ecc mode for NAND
-  * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
-- * @is_bam - whether NAND controller is using BAM
-- * @is_qpic - whether NAND CTRL is part of qpic IP
-- * @qpic_v2 - flag to indicate QPIC IP version 2
-+ * @supports_bam - whether NAND controller is using BAM
-+ * @nandc_part_of_qpic - whether NAND controller is part of qpic IP
-+ * @qpic_version2 - flag to indicate QPIC IP version 2
-  * @use_codeword_fixup - whether NAND has different layout for boot partitions
-  */
- struct qcom_nandc_props {
-       u32 ecc_modes;
-       u32 dev_cmd_reg_start;
--      bool is_bam;
--      bool is_qpic;
--      bool qpic_v2;
-+      bool supports_bam;
-+      bool nandc_part_of_qpic;
-+      bool qpic_version2;
-       bool use_codeword_fixup;
- };
-@@ -613,19 +599,11 @@ static void clear_bam_transaction(struct
- {
-       struct bam_transaction *bam_txn = nandc->bam_txn;
--      if (!nandc->props->is_bam)
-+      if (!nandc->props->supports_bam)
-               return;
--      bam_txn->bam_ce_pos = 0;
--      bam_txn->bam_ce_start = 0;
--      bam_txn->cmd_sgl_pos = 0;
--      bam_txn->cmd_sgl_start = 0;
--      bam_txn->tx_sgl_pos = 0;
--      bam_txn->tx_sgl_start = 0;
--      bam_txn->rx_sgl_pos = 0;
--      bam_txn->rx_sgl_start = 0;
-+      memset(&bam_txn->bam_ce_pos, 0, sizeof(u32) * 8);
-       bam_txn->last_data_desc = NULL;
--      bam_txn->wait_second_completion = false;
-       sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
-                     QPIC_PER_CW_CMD_SGL);
-@@ -640,17 +618,7 @@ static void qpic_bam_dma_done(void *data
- {
-       struct bam_transaction *bam_txn = data;
--      /*
--       * In case of data transfer with NAND, 2 callbacks will be generated.
--       * One for command channel and another one for data channel.
--       * If current transaction has data descriptors
--       * (i.e. wait_second_completion is true), then set this to false
--       * and wait for second DMA descriptor completion.
--       */
--      if (bam_txn->wait_second_completion)
--              bam_txn->wait_second_completion = false;
--      else
--              complete(&bam_txn->txn_done);
-+      complete(&bam_txn->txn_done);
- }
- static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
-@@ -676,10 +644,9 @@ static inline void nandc_write(struct qc
-       iowrite32(val, nandc->base + offset);
- }
--static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc,
--                                        bool is_cpu)
-+static inline void nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
- {
--      if (!nandc->props->is_bam)
-+      if (!nandc->props->supports_bam)
-               return;
-       if (is_cpu)
-@@ -694,93 +661,90 @@ static inline void nandc_read_buffer_syn
-                                          DMA_FROM_DEVICE);
- }
--static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
--{
--      switch (offset) {
--      case NAND_FLASH_CMD:
--              return &regs->cmd;
--      case NAND_ADDR0:
--              return &regs->addr0;
--      case NAND_ADDR1:
--              return &regs->addr1;
--      case NAND_FLASH_CHIP_SELECT:
--              return &regs->chip_sel;
--      case NAND_EXEC_CMD:
--              return &regs->exec;
--      case NAND_FLASH_STATUS:
--              return &regs->clrflashstatus;
--      case NAND_DEV0_CFG0:
--              return &regs->cfg0;
--      case NAND_DEV0_CFG1:
--              return &regs->cfg1;
--      case NAND_DEV0_ECC_CFG:
--              return &regs->ecc_bch_cfg;
--      case NAND_READ_STATUS:
--              return &regs->clrreadstatus;
--      case NAND_DEV_CMD1:
--              return &regs->cmd1;
--      case NAND_DEV_CMD1_RESTORE:
--              return &regs->orig_cmd1;
--      case NAND_DEV_CMD_VLD:
--              return &regs->vld;
--      case NAND_DEV_CMD_VLD_RESTORE:
--              return &regs->orig_vld;
--      case NAND_EBI2_ECC_BUF_CFG:
--              return &regs->ecc_buf_cfg;
--      case NAND_READ_LOCATION_0:
--              return &regs->read_location0;
--      case NAND_READ_LOCATION_1:
--              return &regs->read_location1;
--      case NAND_READ_LOCATION_2:
--              return &regs->read_location2;
--      case NAND_READ_LOCATION_3:
--              return &regs->read_location3;
--      case NAND_READ_LOCATION_LAST_CW_0:
--              return &regs->read_location_last0;
--      case NAND_READ_LOCATION_LAST_CW_1:
--              return &regs->read_location_last1;
--      case NAND_READ_LOCATION_LAST_CW_2:
--              return &regs->read_location_last2;
--      case NAND_READ_LOCATION_LAST_CW_3:
--              return &regs->read_location_last3;
--      default:
--              return NULL;
--      }
--}
--
--static void nandc_set_reg(struct nand_chip *chip, int offset,
--                        u32 val)
--{
--      struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
--      struct nandc_regs *regs = nandc->regs;
--      __le32 *reg;
--
--      reg = offset_to_nandc_reg(regs, offset);
--
--      if (reg)
--              *reg = cpu_to_le32(val);
--}
--
- /* Helper to check the code word, whether it is last cw or not */
- static bool qcom_nandc_is_last_cw(struct nand_ecc_ctrl *ecc, int cw)
- {
-       return cw == (ecc->steps - 1);
- }
-+/**
-+ * nandc_set_read_loc_first() - to set read location first register
-+ * @chip:             NAND Private Flash Chip Data
-+ * @reg_base:         location register base
-+ * @cw_offset:                code word offset
-+ * @read_size:                code word read length
-+ * @is_last_read_loc: is this the last read location
-+ *
-+ * This function will set location register value
-+ */
-+static void nandc_set_read_loc_first(struct nand_chip *chip,
-+                                   int reg_base, u32 cw_offset,
-+                                   u32 read_size, u32 is_last_read_loc)
-+{
-+      struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-+      __le32 locreg_val;
-+      u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
-+                ((read_size) << READ_LOCATION_SIZE) |
-+                ((is_last_read_loc) << READ_LOCATION_LAST));
-+
-+      locreg_val = cpu_to_le32(val);
-+
-+      if (reg_base == NAND_READ_LOCATION_0)
-+              nandc->regs->read_location0 = locreg_val;
-+      else if (reg_base == NAND_READ_LOCATION_1)
-+              nandc->regs->read_location1 = locreg_val;
-+      else if (reg_base == NAND_READ_LOCATION_2)
-+              nandc->regs->read_location2 = locreg_val;
-+      else if (reg_base == NAND_READ_LOCATION_3)
-+              nandc->regs->read_location3 = locreg_val;
-+}
-+
-+/**
-+ * nandc_set_read_loc_last - to set read location last register
-+ * @chip:             NAND Private Flash Chip Data
-+ * @reg_base:         location register base
-+ * @cw_offset:                code word offset
-+ * @read_size:                code word read length
-+ * @is_last_read_loc: is this the last read location
-+ *
-+ * This function will set location last register value
-+ */
-+static void nandc_set_read_loc_last(struct nand_chip *chip,
-+                                  int reg_base, u32 cw_offset,
-+                                  u32 read_size, u32 is_last_read_loc)
-+{
-+      struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-+      __le32 locreg_val;
-+      u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
-+                ((read_size) << READ_LOCATION_SIZE) |
-+                ((is_last_read_loc) << READ_LOCATION_LAST));
-+
-+      locreg_val = cpu_to_le32(val);
-+
-+      if (reg_base == NAND_READ_LOCATION_LAST_CW_0)
-+              nandc->regs->read_location_last0 = locreg_val;
-+      else if (reg_base == NAND_READ_LOCATION_LAST_CW_1)
-+              nandc->regs->read_location_last1 = locreg_val;
-+      else if (reg_base == NAND_READ_LOCATION_LAST_CW_2)
-+              nandc->regs->read_location_last2 = locreg_val;
-+      else if (reg_base == NAND_READ_LOCATION_LAST_CW_3)
-+              nandc->regs->read_location_last3 = locreg_val;
-+}
-+
- /* helper to configure location register values */
- static void nandc_set_read_loc(struct nand_chip *chip, int cw, int reg,
--                             int cw_offset, int read_size, int is_last_read_loc)
-+                             u32 cw_offset, u32 read_size, u32 is_last_read_loc)
- {
-       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-       struct nand_ecc_ctrl *ecc = &chip->ecc;
-       int reg_base = NAND_READ_LOCATION_0;
--      if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
-+      if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw))
-               reg_base = NAND_READ_LOCATION_LAST_CW_0;
-       reg_base += reg * 4;
--      if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
-+      if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw))
-               return nandc_set_read_loc_last(chip, reg_base, cw_offset,
-                               read_size, is_last_read_loc);
-       else
-@@ -792,12 +756,13 @@ static void nandc_set_read_loc(struct na
- static void set_address(struct qcom_nand_host *host, u16 column, int page)
- {
-       struct nand_chip *chip = &host->chip;
-+      struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-       if (chip->options & NAND_BUSWIDTH_16)
-               column >>= 1;
--      nandc_set_reg(chip, NAND_ADDR0, page << 16 | column);
--      nandc_set_reg(chip, NAND_ADDR1, page >> 16 & 0xff);
-+      nandc->regs->addr0 = cpu_to_le32(page << 16 | column);
-+      nandc->regs->addr1 = cpu_to_le32(page >> 16 & 0xff);
- }
- /*
-@@ -811,41 +776,43 @@ static void set_address(struct qcom_nand
- static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, int cw)
- {
-       struct nand_chip *chip = &host->chip;
--      u32 cmd, cfg0, cfg1, ecc_bch_cfg;
-+      __le32 cmd, cfg0, cfg1, ecc_bch_cfg;
-       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-       if (read) {
-               if (host->use_ecc)
--                      cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
-+                      cmd = cpu_to_le32(OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE);
-               else
--                      cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE;
-+                      cmd = cpu_to_le32(OP_PAGE_READ | PAGE_ACC | LAST_PAGE);
-       } else {
--              cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
-+              cmd = cpu_to_le32(OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE);
-       }
-       if (host->use_ecc) {
--              cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
--                              (num_cw - 1) << CW_PER_PAGE;
-+              cfg0 = cpu_to_le32((host->cfg0 & ~(7U << CW_PER_PAGE)) |
-+                              (num_cw - 1) << CW_PER_PAGE);
--              cfg1 = host->cfg1;
--              ecc_bch_cfg = host->ecc_bch_cfg;
-+              cfg1 = cpu_to_le32(host->cfg1);
-+              ecc_bch_cfg = cpu_to_le32(host->ecc_bch_cfg);
-       } else {
--              cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
--                              (num_cw - 1) << CW_PER_PAGE;
-+              cfg0 = cpu_to_le32((host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
-+                              (num_cw - 1) << CW_PER_PAGE);
--              cfg1 = host->cfg1_raw;
--              ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
-+              cfg1 = cpu_to_le32(host->cfg1_raw);
-+              ecc_bch_cfg = cpu_to_le32(1 << ECC_CFG_ECC_DISABLE);
-       }
--      nandc_set_reg(chip, NAND_FLASH_CMD, cmd);
--      nandc_set_reg(chip, NAND_DEV0_CFG0, cfg0);
--      nandc_set_reg(chip, NAND_DEV0_CFG1, cfg1);
--      nandc_set_reg(chip, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
--      if (!nandc->props->qpic_v2)
--              nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
--      nandc_set_reg(chip, NAND_FLASH_STATUS, host->clrflashstatus);
--      nandc_set_reg(chip, NAND_READ_STATUS, host->clrreadstatus);
--      nandc_set_reg(chip, NAND_EXEC_CMD, 1);
-+      nandc->regs->cmd = cmd;
-+      nandc->regs->cfg0 = cfg0;
-+      nandc->regs->cfg1 = cfg1;
-+      nandc->regs->ecc_bch_cfg = ecc_bch_cfg;
-+
-+      if (!nandc->props->qpic_version2)
-+              nandc->regs->ecc_buf_cfg = cpu_to_le32(host->ecc_buf_cfg);
-+
-+      nandc->regs->clrflashstatus = cpu_to_le32(host->clrflashstatus);
-+      nandc->regs->clrreadstatus = cpu_to_le32(host->clrreadstatus);
-+      nandc->regs->exec = cpu_to_le32(1);
-       if (read)
-               nandc_set_read_loc(chip, cw, 0, 0, host->use_ecc ?
-@@ -1121,7 +1088,7 @@ static int read_reg_dma(struct qcom_nand
-       if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
-               first = dev_cmd_reg_addr(nandc, first);
--      if (nandc->props->is_bam)
-+      if (nandc->props->supports_bam)
-               return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
-                                            num_regs, flags);
-@@ -1136,25 +1103,16 @@ static int read_reg_dma(struct qcom_nand
-  * write_reg_dma:     prepares a descriptor to write a given number of
-  *                    contiguous registers
-  *
-+ * @vaddr:            contnigeous memory from where register value will
-+ *                    be written
-  * @first:            offset of the first register in the contiguous block
-  * @num_regs:         number of registers to write
-  * @flags:            flags to control DMA descriptor preparation
-  */
--static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
--                       int num_regs, unsigned int flags)
-+static int write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
-+                       int first, int num_regs, unsigned int flags)
- {
-       bool flow_control = false;
--      struct nandc_regs *regs = nandc->regs;
--      void *vaddr;
--
--      vaddr = offset_to_nandc_reg(regs, first);
--
--      if (first == NAND_ERASED_CW_DETECT_CFG) {
--              if (flags & NAND_ERASED_CW_SET)
--                      vaddr = &regs->erased_cw_detect_cfg_set;
--              else
--                      vaddr = &regs->erased_cw_detect_cfg_clr;
--      }
-       if (first == NAND_EXEC_CMD)
-               flags |= NAND_BAM_NWD;
-@@ -1165,7 +1123,7 @@ static int write_reg_dma(struct qcom_nan
-       if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
-               first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
--      if (nandc->props->is_bam)
-+      if (nandc->props->supports_bam)
-               return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
-                                            num_regs, flags);
-@@ -1188,7 +1146,7 @@ static int write_reg_dma(struct qcom_nan
- static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
-                        const u8 *vaddr, int size, unsigned int flags)
- {
--      if (nandc->props->is_bam)
-+      if (nandc->props->supports_bam)
-               return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
-       return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
-@@ -1206,7 +1164,7 @@ static int read_data_dma(struct qcom_nan
- static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
-                         const u8 *vaddr, int size, unsigned int flags)
- {
--      if (nandc->props->is_bam)
-+      if (nandc->props->supports_bam)
-               return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
-       return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
-@@ -1220,13 +1178,14 @@ static void config_nand_page_read(struct
- {
-       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
--      write_reg_dma(nandc, NAND_ADDR0, 2, 0);
--      write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
--      if (!nandc->props->qpic_v2)
--              write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
--      write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
--      write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
--                    NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
-+      write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
-+      write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
-+      if (!nandc->props->qpic_version2)
-+              write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
-+      write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_clr,
-+                    NAND_ERASED_CW_DETECT_CFG, 1, 0);
-+      write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_set,
-+                    NAND_ERASED_CW_DETECT_CFG, 1, NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
- }
- /*
-@@ -1239,16 +1198,16 @@ config_nand_cw_read(struct nand_chip *ch
-       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-       struct nand_ecc_ctrl *ecc = &chip->ecc;
--      int reg = NAND_READ_LOCATION_0;
-+      __le32 *reg = &nandc->regs->read_location0;
--      if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
--              reg = NAND_READ_LOCATION_LAST_CW_0;
-+      if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw))
-+              reg = &nandc->regs->read_location_last0;
--      if (nandc->props->is_bam)
--              write_reg_dma(nandc, reg, 4, NAND_BAM_NEXT_SGL);
-+      if (nandc->props->supports_bam)
-+              write_reg_dma(nandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
--      write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
--      write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
-+      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-       if (use_ecc) {
-               read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
-@@ -1279,10 +1238,10 @@ static void config_nand_page_write(struc
- {
-       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
--      write_reg_dma(nandc, NAND_ADDR0, 2, 0);
--      write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
--      if (!nandc->props->qpic_v2)
--              write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
-+      write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
-+      write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
-+      if (!nandc->props->qpic_version2)
-+              write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1,
-                             NAND_BAM_NEXT_SGL);
- }
-@@ -1294,13 +1253,13 @@ static void config_nand_cw_write(struct
- {
-       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
--      write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
--      write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
-+      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-       read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
--      write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
--      write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
-+      write_reg_dma(nandc, &nandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
-+      write_reg_dma(nandc, &nandc->regs->clrreadstatus, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
- }
- /* helpers to submit/free our list of dma descriptors */
-@@ -1311,7 +1270,7 @@ static int submit_descs(struct qcom_nand
-       struct bam_transaction *bam_txn = nandc->bam_txn;
-       int ret = 0;
--      if (nandc->props->is_bam) {
-+      if (nandc->props->supports_bam) {
-               if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
-                       ret = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
-                       if (ret)
-@@ -1336,14 +1295,9 @@ static int submit_descs(struct qcom_nand
-       list_for_each_entry(desc, &nandc->desc_list, node)
-               cookie = dmaengine_submit(desc->dma_desc);
--      if (nandc->props->is_bam) {
-+      if (nandc->props->supports_bam) {
-               bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
-               bam_txn->last_cmd_desc->callback_param = bam_txn;
--              if (bam_txn->last_data_desc) {
--                      bam_txn->last_data_desc->callback = qpic_bam_dma_done;
--                      bam_txn->last_data_desc->callback_param = bam_txn;
--                      bam_txn->wait_second_completion = true;
--              }
-               dma_async_issue_pending(nandc->tx_chan);
-               dma_async_issue_pending(nandc->rx_chan);
-@@ -1365,7 +1319,7 @@ err_unmap_free_desc:
-       list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
-               list_del(&desc->node);
--              if (nandc->props->is_bam)
-+              if (nandc->props->supports_bam)
-                       dma_unmap_sg(nandc->dev, desc->bam_sgl,
-                                    desc->sgl_cnt, desc->dir);
-               else
-@@ -1382,7 +1336,7 @@ err_unmap_free_desc:
- static void clear_read_regs(struct qcom_nand_controller *nandc)
- {
-       nandc->reg_read_pos = 0;
--      nandc_read_buffer_sync(nandc, false);
-+      nandc_dev_to_mem(nandc, false);
- }
- /*
-@@ -1446,7 +1400,7 @@ static int check_flash_errors(struct qco
-       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-       int i;
--      nandc_read_buffer_sync(nandc, true);
-+      nandc_dev_to_mem(nandc, true);
-       for (i = 0; i < cw_cnt; i++) {
-               u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
-@@ -1476,7 +1430,7 @@ qcom_nandc_read_cw_raw(struct mtd_info *
-       clear_read_regs(nandc);
-       host->use_ecc = false;
--      if (nandc->props->qpic_v2)
-+      if (nandc->props->qpic_version2)
-               raw_cw = ecc->steps - 1;
-       clear_bam_transaction(nandc);
-@@ -1497,7 +1451,7 @@ qcom_nandc_read_cw_raw(struct mtd_info *
-               oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
-       }
--      if (nandc->props->is_bam) {
-+      if (nandc->props->supports_bam) {
-               nandc_set_read_loc(chip, cw, 0, read_loc, data_size1, 0);
-               read_loc += data_size1;
-@@ -1621,7 +1575,7 @@ static int parse_read_errors(struct qcom
-       u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
-       buf = (struct read_stats *)nandc->reg_read_buf;
--      nandc_read_buffer_sync(nandc, true);
-+      nandc_dev_to_mem(nandc, true);
-       for (i = 0; i < ecc->steps; i++, buf++) {
-               u32 flash, buffer, erased_cw;
-@@ -1734,7 +1688,7 @@ static int read_page_ecc(struct qcom_nan
-                       oob_size = host->ecc_bytes_hw + host->spare_bytes;
-               }
--              if (nandc->props->is_bam) {
-+              if (nandc->props->supports_bam) {
-                       if (data_buf && oob_buf) {
-                               nandc_set_read_loc(chip, i, 0, 0, data_size, 0);
-                               nandc_set_read_loc(chip, i, 1, data_size,
-@@ -2455,14 +2409,14 @@ static int qcom_nand_attach_chip(struct
-       mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
-       /* Free the initially allocated BAM transaction for reading the ONFI params */
--      if (nandc->props->is_bam)
-+      if (nandc->props->supports_bam)
-               free_bam_transaction(nandc);
-       nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
-                                    cwperpage);
-       /* Now allocate the BAM transaction based on updated max_cwperpage */
--      if (nandc->props->is_bam) {
-+      if (nandc->props->supports_bam) {
-               nandc->bam_txn = alloc_bam_transaction(nandc);
-               if (!nandc->bam_txn) {
-                       dev_err(nandc->dev,
-@@ -2522,7 +2476,7 @@ static int qcom_nand_attach_chip(struct
-                               | ecc_mode << ECC_MODE
-                               | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
--      if (!nandc->props->qpic_v2)
-+      if (!nandc->props->qpic_version2)
-               host->ecc_buf_cfg = 0x203 << NUM_STEPS;
-       host->clrflashstatus = FS_READY_BSY_N;
-@@ -2556,7 +2510,7 @@ static int qcom_op_cmd_mapping(struct na
-               cmd = OP_FETCH_ID;
-               break;
-       case NAND_CMD_PARAM:
--              if (nandc->props->qpic_v2)
-+              if (nandc->props->qpic_version2)
-                       cmd = OP_PAGE_READ_ONFI_READ;
-               else
-                       cmd = OP_PAGE_READ;
-@@ -2609,7 +2563,7 @@ static int qcom_parse_instructions(struc
-                       if (ret < 0)
-                               return ret;
--                      q_op->cmd_reg = ret;
-+                      q_op->cmd_reg = cpu_to_le32(ret);
-                       q_op->rdy_delay_ns = instr->delay_ns;
-                       break;
-@@ -2619,10 +2573,10 @@ static int qcom_parse_instructions(struc
-                       addrs = &instr->ctx.addr.addrs[offset];
-                       for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
--                              q_op->addr1_reg |= addrs[i] << (i * 8);
-+                              q_op->addr1_reg |= cpu_to_le32(addrs[i] << (i * 8));
-                       if (naddrs > 4)
--                              q_op->addr2_reg |= addrs[4];
-+                              q_op->addr2_reg |= cpu_to_le32(addrs[4]);
-                       q_op->rdy_delay_ns = instr->delay_ns;
-                       break;
-@@ -2663,7 +2617,7 @@ static int qcom_wait_rdy_poll(struct nan
-       unsigned long start = jiffies + msecs_to_jiffies(time_ms);
-       u32 flash;
--      nandc_read_buffer_sync(nandc, true);
-+      nandc_dev_to_mem(nandc, true);
-       do {
-               flash = le32_to_cpu(nandc->reg_read_buf[0]);
-@@ -2706,11 +2660,11 @@ static int qcom_read_status_exec(struct
-       clear_read_regs(nandc);
-       clear_bam_transaction(nandc);
--      nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
--      nandc_set_reg(chip, NAND_EXEC_CMD, 1);
-+      nandc->regs->cmd = q_op.cmd_reg;
-+      nandc->regs->exec = cpu_to_le32(1);
--      write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
--      write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
-+      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-       read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
-       ret = submit_descs(nandc);
-@@ -2719,7 +2673,7 @@ static int qcom_read_status_exec(struct
-               goto err_out;
-       }
--      nandc_read_buffer_sync(nandc, true);
-+      nandc_dev_to_mem(nandc, true);
-       for (i = 0; i < num_cw; i++) {
-               flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
-@@ -2763,16 +2717,14 @@ static int qcom_read_id_type_exec(struct
-       clear_read_regs(nandc);
-       clear_bam_transaction(nandc);
--      nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
--      nandc_set_reg(chip, NAND_ADDR0, q_op.addr1_reg);
--      nandc_set_reg(chip, NAND_ADDR1, q_op.addr2_reg);
--      nandc_set_reg(chip, NAND_FLASH_CHIP_SELECT,
--                    nandc->props->is_bam ? 0 : DM_EN);
-+      nandc->regs->cmd = q_op.cmd_reg;
-+      nandc->regs->addr0 = q_op.addr1_reg;
-+      nandc->regs->addr1 = q_op.addr2_reg;
-+      nandc->regs->chip_sel = cpu_to_le32(nandc->props->supports_bam ? 0 : DM_EN);
-+      nandc->regs->exec = cpu_to_le32(1);
--      nandc_set_reg(chip, NAND_EXEC_CMD, 1);
--
--      write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
--      write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
-+      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-       read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
-@@ -2786,7 +2738,7 @@ static int qcom_read_id_type_exec(struct
-       op_id = q_op.data_instr_idx;
-       len = nand_subop_get_data_len(subop, op_id);
--      nandc_read_buffer_sync(nandc, true);
-+      nandc_dev_to_mem(nandc, true);
-       memcpy(instr->ctx.data.buf.in, nandc->reg_read_buf, len);
- err_out:
-@@ -2807,15 +2759,14 @@ static int qcom_misc_cmd_type_exec(struc
-       if (q_op.flag == OP_PROGRAM_PAGE) {
-               goto wait_rdy;
--      } else if (q_op.cmd_reg == OP_BLOCK_ERASE) {
--              q_op.cmd_reg |= PAGE_ACC | LAST_PAGE;
--              nandc_set_reg(chip, NAND_ADDR0, q_op.addr1_reg);
--              nandc_set_reg(chip, NAND_ADDR1, q_op.addr2_reg);
--              nandc_set_reg(chip, NAND_DEV0_CFG0,
--                            host->cfg0_raw & ~(7 << CW_PER_PAGE));
--              nandc_set_reg(chip, NAND_DEV0_CFG1, host->cfg1_raw);
-+      } else if (q_op.cmd_reg == cpu_to_le32(OP_BLOCK_ERASE)) {
-+              q_op.cmd_reg |= cpu_to_le32(PAGE_ACC | LAST_PAGE);
-+              nandc->regs->addr0 = q_op.addr1_reg;
-+              nandc->regs->addr1 = q_op.addr2_reg;
-+              nandc->regs->cfg0 = cpu_to_le32(host->cfg0_raw & ~(7 << CW_PER_PAGE));
-+              nandc->regs->cfg1 = cpu_to_le32(host->cfg1_raw);
-               instrs = 3;
--      } else if (q_op.cmd_reg != OP_RESET_DEVICE) {
-+      } else if (q_op.cmd_reg != cpu_to_le32(OP_RESET_DEVICE)) {
-               return 0;
-       }
-@@ -2826,14 +2777,14 @@ static int qcom_misc_cmd_type_exec(struc
-       clear_read_regs(nandc);
-       clear_bam_transaction(nandc);
--      nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
--      nandc_set_reg(chip, NAND_EXEC_CMD, 1);
-+      nandc->regs->cmd = q_op.cmd_reg;
-+      nandc->regs->exec = cpu_to_le32(1);
--      write_reg_dma(nandc, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
--      if (q_op.cmd_reg == OP_BLOCK_ERASE)
--              write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
-+      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
-+      if (q_op.cmd_reg == cpu_to_le32(OP_BLOCK_ERASE))
-+              write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
--      write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-       read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
-       ret = submit_descs(nandc);
-@@ -2864,7 +2815,7 @@ static int qcom_param_page_type_exec(str
-       if (ret)
-               return ret;
--      q_op.cmd_reg |= PAGE_ACC | LAST_PAGE;
-+      q_op.cmd_reg |= cpu_to_le32(PAGE_ACC | LAST_PAGE);
-       nandc->buf_count = 0;
-       nandc->buf_start = 0;
-@@ -2872,38 +2823,38 @@ static int qcom_param_page_type_exec(str
-       clear_read_regs(nandc);
-       clear_bam_transaction(nandc);
--      nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
-+      nandc->regs->cmd = q_op.cmd_reg;
-+      nandc->regs->addr0 = 0;
-+      nandc->regs->addr1 = 0;
-+
-+      nandc->regs->cfg0 = cpu_to_le32(0 << CW_PER_PAGE
-+                          | 512 << UD_SIZE_BYTES
-+                          | 5 << NUM_ADDR_CYCLES
-+                          | 0 << SPARE_SIZE_BYTES);
-+
-+      nandc->regs->cfg1 = cpu_to_le32(7 << NAND_RECOVERY_CYCLES
-+                          | 0 << CS_ACTIVE_BSY
-+                          | 17 << BAD_BLOCK_BYTE_NUM
-+                          | 1 << BAD_BLOCK_IN_SPARE_AREA
-+                          | 2 << WR_RD_BSY_GAP
-+                          | 0 << WIDE_FLASH
-+                          | 1 << DEV0_CFG1_ECC_DISABLE);
--      nandc_set_reg(chip, NAND_ADDR0, 0);
--      nandc_set_reg(chip, NAND_ADDR1, 0);
--      nandc_set_reg(chip, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
--                                      | 512 << UD_SIZE_BYTES
--                                      | 5 << NUM_ADDR_CYCLES
--                                      | 0 << SPARE_SIZE_BYTES);
--      nandc_set_reg(chip, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
--                                      | 0 << CS_ACTIVE_BSY
--                                      | 17 << BAD_BLOCK_BYTE_NUM
--                                      | 1 << BAD_BLOCK_IN_SPARE_AREA
--                                      | 2 << WR_RD_BSY_GAP
--                                      | 0 << WIDE_FLASH
--                                      | 1 << DEV0_CFG1_ECC_DISABLE);
--      if (!nandc->props->qpic_v2)
--              nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
-+      if (!nandc->props->qpic_version2)
-+              nandc->regs->ecc_buf_cfg = cpu_to_le32(1 << ECC_CFG_ECC_DISABLE);
-       /* configure CMD1 and VLD for ONFI param probing in QPIC v1 */
--      if (!nandc->props->qpic_v2) {
--              nandc_set_reg(chip, NAND_DEV_CMD_VLD,
--                            (nandc->vld & ~READ_START_VLD));
--              nandc_set_reg(chip, NAND_DEV_CMD1,
--                            (nandc->cmd1 & ~(0xFF << READ_ADDR))
--                            | NAND_CMD_PARAM << READ_ADDR);
-+      if (!nandc->props->qpic_version2) {
-+              nandc->regs->vld = cpu_to_le32((nandc->vld & ~READ_START_VLD));
-+              nandc->regs->cmd1 = cpu_to_le32((nandc->cmd1 & ~(0xFF << READ_ADDR))
-+                                  | NAND_CMD_PARAM << READ_ADDR);
-       }
--      nandc_set_reg(chip, NAND_EXEC_CMD, 1);
--
--      if (!nandc->props->qpic_v2) {
--              nandc_set_reg(chip, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
--              nandc_set_reg(chip, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
-+      nandc->regs->exec = cpu_to_le32(1);
-+
-+      if (!nandc->props->qpic_version2) {
-+              nandc->regs->orig_cmd1 = cpu_to_le32(nandc->cmd1);
-+              nandc->regs->orig_vld = cpu_to_le32(nandc->vld);
-       }
-       instr = q_op.data_instr;
-@@ -2912,9 +2863,9 @@ static int qcom_param_page_type_exec(str
-       nandc_set_read_loc(chip, 0, 0, 0, len, 1);
--      if (!nandc->props->qpic_v2) {
--              write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
--              write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
-+      if (!nandc->props->qpic_version2) {
-+              write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0);
-+              write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
-       }
-       nandc->buf_count = len;
-@@ -2926,9 +2877,10 @@ static int qcom_param_page_type_exec(str
-                     nandc->buf_count, 0);
-       /* restore CMD1 and VLD regs */
--      if (!nandc->props->qpic_v2) {
--              write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
--              write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
-+      if (!nandc->props->qpic_version2) {
-+              write_reg_dma(nandc, &nandc->regs->orig_cmd1, NAND_DEV_CMD1_RESTORE, 1, 0);
-+              write_reg_dma(nandc, &nandc->regs->orig_vld, NAND_DEV_CMD_VLD_RESTORE, 1,
-+                            NAND_BAM_NEXT_SGL);
-       }
-       ret = submit_descs(nandc);
-@@ -3017,7 +2969,7 @@ static const struct nand_controller_ops
- static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
- {
--      if (nandc->props->is_bam) {
-+      if (nandc->props->supports_bam) {
-               if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
-                       dma_unmap_single(nandc->dev, nandc->reg_read_dma,
-                                        MAX_REG_RD *
-@@ -3070,7 +3022,7 @@ static int qcom_nandc_alloc(struct qcom_
-       if (!nandc->reg_read_buf)
-               return -ENOMEM;
--      if (nandc->props->is_bam) {
-+      if (nandc->props->supports_bam) {
-               nandc->reg_read_dma =
-                       dma_map_single(nandc->dev, nandc->reg_read_buf,
-                                      MAX_REG_RD *
-@@ -3151,15 +3103,15 @@ static int qcom_nandc_setup(struct qcom_
-       u32 nand_ctrl;
-       /* kill onenand */
--      if (!nandc->props->is_qpic)
-+      if (!nandc->props->nandc_part_of_qpic)
-               nandc_write(nandc, SFLASHC_BURST_CFG, 0);
--      if (!nandc->props->qpic_v2)
-+      if (!nandc->props->qpic_version2)
-               nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
-                           NAND_DEV_CMD_VLD_VAL);
-       /* enable ADM or BAM DMA */
--      if (nandc->props->is_bam) {
-+      if (nandc->props->supports_bam) {
-               nand_ctrl = nandc_read(nandc, NAND_CTRL);
-               /*
-@@ -3176,7 +3128,7 @@ static int qcom_nandc_setup(struct qcom_
-       }
-       /* save the original values of these registers */
--      if (!nandc->props->qpic_v2) {
-+      if (!nandc->props->qpic_version2) {
-               nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1));
-               nandc->vld = NAND_DEV_CMD_VLD_VAL;
-       }
-@@ -3349,7 +3301,7 @@ static int qcom_nandc_parse_dt(struct pl
-       struct device_node *np = nandc->dev->of_node;
-       int ret;
--      if (!nandc->props->is_bam) {
-+      if (!nandc->props->supports_bam) {
-               ret = of_property_read_u32(np, "qcom,cmd-crci",
-                                          &nandc->cmd_crci);
-               if (ret) {
-@@ -3474,30 +3426,30 @@ static void qcom_nandc_remove(struct pla
- static const struct qcom_nandc_props ipq806x_nandc_props = {
-       .ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
--      .is_bam = false,
-+      .supports_bam = false,
-       .use_codeword_fixup = true,
-       .dev_cmd_reg_start = 0x0,
- };
- static const struct qcom_nandc_props ipq4019_nandc_props = {
-       .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
--      .is_bam = true,
--      .is_qpic = true,
-+      .supports_bam = true,
-+      .nandc_part_of_qpic = true,
-       .dev_cmd_reg_start = 0x0,
- };
- static const struct qcom_nandc_props ipq8074_nandc_props = {
-       .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
--      .is_bam = true,
--      .is_qpic = true,
-+      .supports_bam = true,
-+      .nandc_part_of_qpic = true,
-       .dev_cmd_reg_start = 0x7000,
- };
- static const struct qcom_nandc_props sdx55_nandc_props = {
-       .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
--      .is_bam = true,
--      .is_qpic = true,
--      .qpic_v2 = true,
-+      .supports_bam = true,
-+      .nandc_part_of_qpic = true,
-+      .qpic_version2 = true,
-       .dev_cmd_reg_start = 0x7000,
- };
diff --git a/target/linux/qualcommax/patches-6.6/0403-mtd-rawnand-qcom-Add-qcom-prefix-to-common-api.patch b/target/linux/qualcommax/patches-6.6/0403-mtd-rawnand-qcom-Add-qcom-prefix-to-common-api.patch
deleted file mode 100644 (file)
index 0bce70c..0000000
+++ /dev/null
@@ -1,874 +0,0 @@
-From: Md Sadre Alam <quic_mdalam@quicinc.com>
-Date: Sun, 22 Sep 2024 17:03:46 +0530
-Subject: [PATCH] mtd: rawnand: qcom: Add qcom prefix to common api
-
-Add qcom prefix to all the api which will be commonly
-used by spi nand driver and raw nand driver.
-
-Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
----
---- a/drivers/mtd/nand/raw/qcom_nandc.c
-+++ b/drivers/mtd/nand/raw/qcom_nandc.c
-@@ -53,7 +53,7 @@
- #define       NAND_READ_LOCATION_LAST_CW_2    0xf48
- #define       NAND_READ_LOCATION_LAST_CW_3    0xf4c
--/* dummy register offsets, used by write_reg_dma */
-+/* dummy register offsets, used by qcom_write_reg_dma */
- #define       NAND_DEV_CMD1_RESTORE           0xdead
- #define       NAND_DEV_CMD_VLD_RESTORE        0xbeef
-@@ -211,7 +211,7 @@
- /*
-  * Flags used in DMA descriptor preparation helper functions
-- * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
-+ * (i.e. qcom_read_reg_dma/qcom_write_reg_dma/qcom_read_data_dma/qcom_write_data_dma)
-  */
- /* Don't set the EOT in current tx BAM sgl */
- #define NAND_BAM_NO_EOT                       BIT(0)
-@@ -550,7 +550,7 @@ struct qcom_nandc_props {
- };
- /* Frees the BAM transaction memory */
--static void free_bam_transaction(struct qcom_nand_controller *nandc)
-+static void qcom_free_bam_transaction(struct qcom_nand_controller *nandc)
- {
-       struct bam_transaction *bam_txn = nandc->bam_txn;
-@@ -559,7 +559,7 @@ static void free_bam_transaction(struct
- /* Allocates and Initializes the BAM transaction */
- static struct bam_transaction *
--alloc_bam_transaction(struct qcom_nand_controller *nandc)
-+qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc)
- {
-       struct bam_transaction *bam_txn;
-       size_t bam_txn_size;
-@@ -595,7 +595,7 @@ alloc_bam_transaction(struct qcom_nand_c
- }
- /* Clears the BAM transaction indexes */
--static void clear_bam_transaction(struct qcom_nand_controller *nandc)
-+static void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc)
- {
-       struct bam_transaction *bam_txn = nandc->bam_txn;
-@@ -614,7 +614,7 @@ static void clear_bam_transaction(struct
- }
- /* Callback for DMA descriptor completion */
--static void qpic_bam_dma_done(void *data)
-+static void qcom_qpic_bam_dma_done(void *data)
- {
-       struct bam_transaction *bam_txn = data;
-@@ -644,7 +644,7 @@ static inline void nandc_write(struct qc
-       iowrite32(val, nandc->base + offset);
- }
--static inline void nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
-+static inline void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
- {
-       if (!nandc->props->supports_bam)
-               return;
-@@ -824,9 +824,9 @@ static void update_rw_regs(struct qcom_n
-  * for BAM. This descriptor will be added in the NAND DMA descriptor queue
-  * which will be submitted to DMA engine.
-  */
--static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
--                                struct dma_chan *chan,
--                                unsigned long flags)
-+static int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
-+                                     struct dma_chan *chan,
-+                                     unsigned long flags)
- {
-       struct desc_info *desc;
-       struct scatterlist *sgl;
-@@ -903,9 +903,9 @@ static int prepare_bam_async_desc(struct
-  * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
-  * after the current command element.
-  */
--static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
--                               int reg_off, const void *vaddr,
--                               int size, unsigned int flags)
-+static int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
-+                                    int reg_off, const void *vaddr,
-+                                    int size, unsigned int flags)
- {
-       int bam_ce_size;
-       int i, ret;
-@@ -943,9 +943,9 @@ static int prep_bam_dma_desc_cmd(struct
-               bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
-               if (flags & NAND_BAM_NWD) {
--                      ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
--                                                   DMA_PREP_FENCE |
--                                                   DMA_PREP_CMD);
-+                      ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
-+                                                        DMA_PREP_FENCE |
-+                                                        DMA_PREP_CMD);
-                       if (ret)
-                               return ret;
-               }
-@@ -958,9 +958,8 @@ static int prep_bam_dma_desc_cmd(struct
-  * Prepares the data descriptor for BAM DMA which will be used for NAND
-  * data reads and writes.
-  */
--static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
--                                const void *vaddr,
--                                int size, unsigned int flags)
-+static int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
-+                                     const void *vaddr, int size, unsigned int flags)
- {
-       int ret;
-       struct bam_transaction *bam_txn = nandc->bam_txn;
-@@ -979,8 +978,8 @@ static int prep_bam_dma_desc_data(struct
-                * is not set, form the DMA descriptor
-                */
-               if (!(flags & NAND_BAM_NO_EOT)) {
--                      ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
--                                                   DMA_PREP_INTERRUPT);
-+                      ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
-+                                                        DMA_PREP_INTERRUPT);
-                       if (ret)
-                               return ret;
-               }
-@@ -989,9 +988,9 @@ static int prep_bam_dma_desc_data(struct
-       return 0;
- }
--static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
--                           int reg_off, const void *vaddr, int size,
--                           bool flow_control)
-+static int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
-+                                int reg_off, const void *vaddr, int size,
-+                                bool flow_control)
- {
-       struct desc_info *desc;
-       struct dma_async_tx_descriptor *dma_desc;
-@@ -1069,15 +1068,15 @@ err:
- }
- /*
-- * read_reg_dma:      prepares a descriptor to read a given number of
-+ * qcom_read_reg_dma: prepares a descriptor to read a given number of
-  *                    contiguous registers to the reg_read_buf pointer
-  *
-  * @first:            offset of the first register in the contiguous block
-  * @num_regs:         number of registers to read
-  * @flags:            flags to control DMA descriptor preparation
-  */
--static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
--                      int num_regs, unsigned int flags)
-+static int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first,
-+                           int num_regs, unsigned int flags)
- {
-       bool flow_control = false;
-       void *vaddr;
-@@ -1089,18 +1088,18 @@ static int read_reg_dma(struct qcom_nand
-               first = dev_cmd_reg_addr(nandc, first);
-       if (nandc->props->supports_bam)
--              return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
-+              return qcom_prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
-                                            num_regs, flags);
-       if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
-               flow_control = true;
--      return prep_adm_dma_desc(nandc, true, first, vaddr,
-+      return qcom_prep_adm_dma_desc(nandc, true, first, vaddr,
-                                num_regs * sizeof(u32), flow_control);
- }
- /*
-- * write_reg_dma:     prepares a descriptor to write a given number of
-+ * qcom_write_reg_dma:        prepares a descriptor to write a given number of
-  *                    contiguous registers
-  *
-  * @vaddr:            contnigeous memory from where register value will
-@@ -1109,8 +1108,8 @@ static int read_reg_dma(struct qcom_nand
-  * @num_regs:         number of registers to write
-  * @flags:            flags to control DMA descriptor preparation
-  */
--static int write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
--                       int first, int num_regs, unsigned int flags)
-+static int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
-+                            int first, int num_regs, unsigned int flags)
- {
-       bool flow_control = false;
-@@ -1124,18 +1123,18 @@ static int write_reg_dma(struct qcom_nan
-               first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
-       if (nandc->props->supports_bam)
--              return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
-+              return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
-                                            num_regs, flags);
-       if (first == NAND_FLASH_CMD)
-               flow_control = true;
--      return prep_adm_dma_desc(nandc, false, first, vaddr,
-+      return qcom_prep_adm_dma_desc(nandc, false, first, vaddr,
-                                num_regs * sizeof(u32), flow_control);
- }
- /*
-- * read_data_dma:     prepares a DMA descriptor to transfer data from the
-+ * qcom_read_data_dma:        prepares a DMA descriptor to transfer data from the
-  *                    controller's internal buffer to the buffer 'vaddr'
-  *
-  * @reg_off:          offset within the controller's data buffer
-@@ -1143,17 +1142,17 @@ static int write_reg_dma(struct qcom_nan
-  * @size:             DMA transaction size in bytes
-  * @flags:            flags to control DMA descriptor preparation
-  */
--static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
--                       const u8 *vaddr, int size, unsigned int flags)
-+static int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
-+                            const u8 *vaddr, int size, unsigned int flags)
- {
-       if (nandc->props->supports_bam)
--              return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
-+              return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
--      return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
-+      return qcom_prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
- }
- /*
-- * write_data_dma:    prepares a DMA descriptor to transfer data from
-+ * qcom_write_data_dma:       prepares a DMA descriptor to transfer data from
-  *                    'vaddr' to the controller's internal buffer
-  *
-  * @reg_off:          offset within the controller's data buffer
-@@ -1161,13 +1160,13 @@ static int read_data_dma(struct qcom_nan
-  * @size:             DMA transaction size in bytes
-  * @flags:            flags to control DMA descriptor preparation
-  */
--static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
--                        const u8 *vaddr, int size, unsigned int flags)
-+static int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
-+                             const u8 *vaddr, int size, unsigned int flags)
- {
-       if (nandc->props->supports_bam)
--              return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
-+              return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
--      return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
-+      return qcom_prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
- }
- /*
-@@ -1178,14 +1177,14 @@ static void config_nand_page_read(struct
- {
-       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
--      write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
--      write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
-+      qcom_write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
-+      qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
-       if (!nandc->props->qpic_version2)
--              write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
--      write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_clr,
--                    NAND_ERASED_CW_DETECT_CFG, 1, 0);
--      write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_set,
--                    NAND_ERASED_CW_DETECT_CFG, 1, NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
-+              qcom_write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
-+      qcom_write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_clr,
-+                         NAND_ERASED_CW_DETECT_CFG, 1, 0);
-+      qcom_write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_set,
-+                         NAND_ERASED_CW_DETECT_CFG, 1, NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
- }
- /*
-@@ -1204,17 +1203,17 @@ config_nand_cw_read(struct nand_chip *ch
-               reg = &nandc->regs->read_location_last0;
-       if (nandc->props->supports_bam)
--              write_reg_dma(nandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
-+              qcom_write_reg_dma(nandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
--      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
--      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+      qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
-+      qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-       if (use_ecc) {
--              read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
--              read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
--                           NAND_BAM_NEXT_SGL);
-+              qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
-+              qcom_read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
-+                                NAND_BAM_NEXT_SGL);
-       } else {
--              read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
-+              qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
-       }
- }
-@@ -1238,11 +1237,11 @@ static void config_nand_page_write(struc
- {
-       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
--      write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
--      write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
-+      qcom_write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
-+      qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
-       if (!nandc->props->qpic_version2)
--              write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1,
--                            NAND_BAM_NEXT_SGL);
-+              qcom_write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1,
-+                                 NAND_BAM_NEXT_SGL);
- }
- /*
-@@ -1253,17 +1252,18 @@ static void config_nand_cw_write(struct
- {
-       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
--      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
--      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+      qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
-+      qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
--      read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
-+      qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
--      write_reg_dma(nandc, &nandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
--      write_reg_dma(nandc, &nandc->regs->clrreadstatus, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
-+      qcom_write_reg_dma(nandc, &nandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
-+      qcom_write_reg_dma(nandc, &nandc->regs->clrreadstatus, NAND_READ_STATUS, 1,
-+                         NAND_BAM_NEXT_SGL);
- }
- /* helpers to submit/free our list of dma descriptors */
--static int submit_descs(struct qcom_nand_controller *nandc)
-+static int qcom_submit_descs(struct qcom_nand_controller *nandc)
- {
-       struct desc_info *desc, *n;
-       dma_cookie_t cookie = 0;
-@@ -1272,21 +1272,21 @@ static int submit_descs(struct qcom_nand
-       if (nandc->props->supports_bam) {
-               if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
--                      ret = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
-+                      ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
-                       if (ret)
-                               goto err_unmap_free_desc;
-               }
-               if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
--                      ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
--                                                 DMA_PREP_INTERRUPT);
-+                      ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
-+                                                        DMA_PREP_INTERRUPT);
-                       if (ret)
-                               goto err_unmap_free_desc;
-               }
-               if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
--                      ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
--                                                 DMA_PREP_CMD);
-+                      ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
-+                                                        DMA_PREP_CMD);
-                       if (ret)
-                               goto err_unmap_free_desc;
-               }
-@@ -1296,7 +1296,7 @@ static int submit_descs(struct qcom_nand
-               cookie = dmaengine_submit(desc->dma_desc);
-       if (nandc->props->supports_bam) {
--              bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
-+              bam_txn->last_cmd_desc->callback = qcom_qpic_bam_dma_done;
-               bam_txn->last_cmd_desc->callback_param = bam_txn;
-               dma_async_issue_pending(nandc->tx_chan);
-@@ -1314,7 +1314,7 @@ static int submit_descs(struct qcom_nand
- err_unmap_free_desc:
-       /*
-        * Unmap the dma sg_list and free the desc allocated by both
--       * prepare_bam_async_desc() and prep_adm_dma_desc() functions.
-+       * qcom_prepare_bam_async_desc() and qcom_prep_adm_dma_desc() functions.
-        */
-       list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
-               list_del(&desc->node);
-@@ -1333,10 +1333,10 @@ err_unmap_free_desc:
- }
- /* reset the register read buffer for next NAND operation */
--static void clear_read_regs(struct qcom_nand_controller *nandc)
-+static void qcom_clear_read_regs(struct qcom_nand_controller *nandc)
- {
-       nandc->reg_read_pos = 0;
--      nandc_dev_to_mem(nandc, false);
-+      qcom_nandc_dev_to_mem(nandc, false);
- }
- /*
-@@ -1400,7 +1400,7 @@ static int check_flash_errors(struct qco
-       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-       int i;
--      nandc_dev_to_mem(nandc, true);
-+      qcom_nandc_dev_to_mem(nandc, true);
-       for (i = 0; i < cw_cnt; i++) {
-               u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
-@@ -1427,13 +1427,13 @@ qcom_nandc_read_cw_raw(struct mtd_info *
-       nand_read_page_op(chip, page, 0, NULL, 0);
-       nandc->buf_count = 0;
-       nandc->buf_start = 0;
--      clear_read_regs(nandc);
-+      qcom_clear_read_regs(nandc);
-       host->use_ecc = false;
-       if (nandc->props->qpic_version2)
-               raw_cw = ecc->steps - 1;
--      clear_bam_transaction(nandc);
-+      qcom_clear_bam_transaction(nandc);
-       set_address(host, host->cw_size * cw, page);
-       update_rw_regs(host, 1, true, raw_cw);
-       config_nand_page_read(chip);
-@@ -1466,18 +1466,18 @@ qcom_nandc_read_cw_raw(struct mtd_info *
-       config_nand_cw_read(chip, false, raw_cw);
--      read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
-+      qcom_read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
-       reg_off += data_size1;
--      read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
-+      qcom_read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
-       reg_off += oob_size1;
--      read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
-+      qcom_read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
-       reg_off += data_size2;
--      read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
-+      qcom_read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
--      ret = submit_descs(nandc);
-+      ret = qcom_submit_descs(nandc);
-       if (ret) {
-               dev_err(nandc->dev, "failure to read raw cw %d\n", cw);
-               return ret;
-@@ -1575,7 +1575,7 @@ static int parse_read_errors(struct qcom
-       u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
-       buf = (struct read_stats *)nandc->reg_read_buf;
--      nandc_dev_to_mem(nandc, true);
-+      qcom_nandc_dev_to_mem(nandc, true);
-       for (i = 0; i < ecc->steps; i++, buf++) {
-               u32 flash, buffer, erased_cw;
-@@ -1704,8 +1704,8 @@ static int read_page_ecc(struct qcom_nan
-               config_nand_cw_read(chip, true, i);
-               if (data_buf)
--                      read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
--                                    data_size, 0);
-+                      qcom_read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
-+                                         data_size, 0);
-               /*
-                * when ecc is enabled, the controller doesn't read the real
-@@ -1720,8 +1720,8 @@ static int read_page_ecc(struct qcom_nan
-                       for (j = 0; j < host->bbm_size; j++)
-                               *oob_buf++ = 0xff;
--                      read_data_dma(nandc, FLASH_BUF_ACC + data_size,
--                                    oob_buf, oob_size, 0);
-+                      qcom_read_data_dma(nandc, FLASH_BUF_ACC + data_size,
-+                                         oob_buf, oob_size, 0);
-               }
-               if (data_buf)
-@@ -1730,7 +1730,7 @@ static int read_page_ecc(struct qcom_nan
-                       oob_buf += oob_size;
-       }
--      ret = submit_descs(nandc);
-+      ret = qcom_submit_descs(nandc);
-       if (ret) {
-               dev_err(nandc->dev, "failure to read page/oob\n");
-               return ret;
-@@ -1751,7 +1751,7 @@ static int copy_last_cw(struct qcom_nand
-       int size;
-       int ret;
--      clear_read_regs(nandc);
-+      qcom_clear_read_regs(nandc);
-       size = host->use_ecc ? host->cw_data : host->cw_size;
-@@ -1763,9 +1763,9 @@ static int copy_last_cw(struct qcom_nand
-       config_nand_single_cw_page_read(chip, host->use_ecc, ecc->steps - 1);
--      read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
-+      qcom_read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
--      ret = submit_descs(nandc);
-+      ret = qcom_submit_descs(nandc);
-       if (ret)
-               dev_err(nandc->dev, "failed to copy last codeword\n");
-@@ -1851,14 +1851,14 @@ static int qcom_nandc_read_page(struct n
-       nandc->buf_count = 0;
-       nandc->buf_start = 0;
-       host->use_ecc = true;
--      clear_read_regs(nandc);
-+      qcom_clear_read_regs(nandc);
-       set_address(host, 0, page);
-       update_rw_regs(host, ecc->steps, true, 0);
-       data_buf = buf;
-       oob_buf = oob_required ? chip->oob_poi : NULL;
--      clear_bam_transaction(nandc);
-+      qcom_clear_bam_transaction(nandc);
-       return read_page_ecc(host, data_buf, oob_buf, page);
- }
-@@ -1899,8 +1899,8 @@ static int qcom_nandc_read_oob(struct na
-       if (host->nr_boot_partitions)
-               qcom_nandc_codeword_fixup(host, page);
--      clear_read_regs(nandc);
--      clear_bam_transaction(nandc);
-+      qcom_clear_read_regs(nandc);
-+      qcom_clear_bam_transaction(nandc);
-       host->use_ecc = true;
-       set_address(host, 0, page);
-@@ -1927,8 +1927,8 @@ static int qcom_nandc_write_page(struct
-       set_address(host, 0, page);
-       nandc->buf_count = 0;
-       nandc->buf_start = 0;
--      clear_read_regs(nandc);
--      clear_bam_transaction(nandc);
-+      qcom_clear_read_regs(nandc);
-+      qcom_clear_bam_transaction(nandc);
-       data_buf = (u8 *)buf;
-       oob_buf = chip->oob_poi;
-@@ -1949,8 +1949,8 @@ static int qcom_nandc_write_page(struct
-                       oob_size = ecc->bytes;
-               }
--              write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
--                             i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
-+              qcom_write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
-+                                  i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
-               /*
-                * when ECC is enabled, we don't really need to write anything
-@@ -1962,8 +1962,8 @@ static int qcom_nandc_write_page(struct
-               if (qcom_nandc_is_last_cw(ecc, i)) {
-                       oob_buf += host->bbm_size;
--                      write_data_dma(nandc, FLASH_BUF_ACC + data_size,
--                                     oob_buf, oob_size, 0);
-+                      qcom_write_data_dma(nandc, FLASH_BUF_ACC + data_size,
-+                                          oob_buf, oob_size, 0);
-               }
-               config_nand_cw_write(chip);
-@@ -1972,7 +1972,7 @@ static int qcom_nandc_write_page(struct
-               oob_buf += oob_size;
-       }
--      ret = submit_descs(nandc);
-+      ret = qcom_submit_descs(nandc);
-       if (ret) {
-               dev_err(nandc->dev, "failure to write page\n");
-               return ret;
-@@ -1997,8 +1997,8 @@ static int qcom_nandc_write_page_raw(str
-               qcom_nandc_codeword_fixup(host, page);
-       nand_prog_page_begin_op(chip, page, 0, NULL, 0);
--      clear_read_regs(nandc);
--      clear_bam_transaction(nandc);
-+      qcom_clear_read_regs(nandc);
-+      qcom_clear_bam_transaction(nandc);
-       data_buf = (u8 *)buf;
-       oob_buf = chip->oob_poi;
-@@ -2024,28 +2024,28 @@ static int qcom_nandc_write_page_raw(str
-                       oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
-               }
--              write_data_dma(nandc, reg_off, data_buf, data_size1,
--                             NAND_BAM_NO_EOT);
-+              qcom_write_data_dma(nandc, reg_off, data_buf, data_size1,
-+                                  NAND_BAM_NO_EOT);
-               reg_off += data_size1;
-               data_buf += data_size1;
--              write_data_dma(nandc, reg_off, oob_buf, oob_size1,
--                             NAND_BAM_NO_EOT);
-+              qcom_write_data_dma(nandc, reg_off, oob_buf, oob_size1,
-+                                  NAND_BAM_NO_EOT);
-               reg_off += oob_size1;
-               oob_buf += oob_size1;
--              write_data_dma(nandc, reg_off, data_buf, data_size2,
--                             NAND_BAM_NO_EOT);
-+              qcom_write_data_dma(nandc, reg_off, data_buf, data_size2,
-+                                  NAND_BAM_NO_EOT);
-               reg_off += data_size2;
-               data_buf += data_size2;
--              write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
-+              qcom_write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
-               oob_buf += oob_size2;
-               config_nand_cw_write(chip);
-       }
--      ret = submit_descs(nandc);
-+      ret = qcom_submit_descs(nandc);
-       if (ret) {
-               dev_err(nandc->dev, "failure to write raw page\n");
-               return ret;
-@@ -2075,7 +2075,7 @@ static int qcom_nandc_write_oob(struct n
-               qcom_nandc_codeword_fixup(host, page);
-       host->use_ecc = true;
--      clear_bam_transaction(nandc);
-+      qcom_clear_bam_transaction(nandc);
-       /* calculate the data and oob size for the last codeword/step */
-       data_size = ecc->size - ((ecc->steps - 1) << 2);
-@@ -2090,11 +2090,11 @@ static int qcom_nandc_write_oob(struct n
-       update_rw_regs(host, 1, false, 0);
-       config_nand_page_write(chip);
--      write_data_dma(nandc, FLASH_BUF_ACC,
--                     nandc->data_buffer, data_size + oob_size, 0);
-+      qcom_write_data_dma(nandc, FLASH_BUF_ACC,
-+                          nandc->data_buffer, data_size + oob_size, 0);
-       config_nand_cw_write(chip);
--      ret = submit_descs(nandc);
-+      ret = qcom_submit_descs(nandc);
-       if (ret) {
-               dev_err(nandc->dev, "failure to write oob\n");
-               return ret;
-@@ -2121,7 +2121,7 @@ static int qcom_nandc_block_bad(struct n
-        */
-       host->use_ecc = false;
--      clear_bam_transaction(nandc);
-+      qcom_clear_bam_transaction(nandc);
-       ret = copy_last_cw(host, page);
-       if (ret)
-               goto err;
-@@ -2148,8 +2148,8 @@ static int qcom_nandc_block_markbad(stru
-       struct nand_ecc_ctrl *ecc = &chip->ecc;
-       int page, ret;
--      clear_read_regs(nandc);
--      clear_bam_transaction(nandc);
-+      qcom_clear_read_regs(nandc);
-+      qcom_clear_bam_transaction(nandc);
-       /*
-        * to mark the BBM as bad, we flash the entire last codeword with 0s.
-@@ -2166,11 +2166,11 @@ static int qcom_nandc_block_markbad(stru
-       update_rw_regs(host, 1, false, ecc->steps - 1);
-       config_nand_page_write(chip);
--      write_data_dma(nandc, FLASH_BUF_ACC,
--                     nandc->data_buffer, host->cw_size, 0);
-+      qcom_write_data_dma(nandc, FLASH_BUF_ACC,
-+                          nandc->data_buffer, host->cw_size, 0);
-       config_nand_cw_write(chip);
--      ret = submit_descs(nandc);
-+      ret = qcom_submit_descs(nandc);
-       if (ret) {
-               dev_err(nandc->dev, "failure to update BBM\n");
-               return ret;
-@@ -2410,14 +2410,14 @@ static int qcom_nand_attach_chip(struct
-       mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
-       /* Free the initially allocated BAM transaction for reading the ONFI params */
-       if (nandc->props->supports_bam)
--              free_bam_transaction(nandc);
-+              qcom_free_bam_transaction(nandc);
-       nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
-                                    cwperpage);
-       /* Now allocate the BAM transaction based on updated max_cwperpage */
-       if (nandc->props->supports_bam) {
--              nandc->bam_txn = alloc_bam_transaction(nandc);
-+              nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
-               if (!nandc->bam_txn) {
-                       dev_err(nandc->dev,
-                               "failed to allocate bam transaction\n");
-@@ -2617,7 +2617,7 @@ static int qcom_wait_rdy_poll(struct nan
-       unsigned long start = jiffies + msecs_to_jiffies(time_ms);
-       u32 flash;
--      nandc_dev_to_mem(nandc, true);
-+      qcom_nandc_dev_to_mem(nandc, true);
-       do {
-               flash = le32_to_cpu(nandc->reg_read_buf[0]);
-@@ -2657,23 +2657,23 @@ static int qcom_read_status_exec(struct
-       nandc->buf_start = 0;
-       host->use_ecc = false;
--      clear_read_regs(nandc);
--      clear_bam_transaction(nandc);
-+      qcom_clear_read_regs(nandc);
-+      qcom_clear_bam_transaction(nandc);
-       nandc->regs->cmd = q_op.cmd_reg;
-       nandc->regs->exec = cpu_to_le32(1);
--      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
--      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
--      read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
-+      qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
-+      qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+      qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
--      ret = submit_descs(nandc);
-+      ret = qcom_submit_descs(nandc);
-       if (ret) {
-               dev_err(nandc->dev, "failure in submitting status descriptor\n");
-               goto err_out;
-       }
--      nandc_dev_to_mem(nandc, true);
-+      qcom_nandc_dev_to_mem(nandc, true);
-       for (i = 0; i < num_cw; i++) {
-               flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
-@@ -2714,8 +2714,8 @@ static int qcom_read_id_type_exec(struct
-       nandc->buf_start = 0;
-       host->use_ecc = false;
--      clear_read_regs(nandc);
--      clear_bam_transaction(nandc);
-+      qcom_clear_read_regs(nandc);
-+      qcom_clear_bam_transaction(nandc);
-       nandc->regs->cmd = q_op.cmd_reg;
-       nandc->regs->addr0 = q_op.addr1_reg;
-@@ -2723,12 +2723,12 @@ static int qcom_read_id_type_exec(struct
-       nandc->regs->chip_sel = cpu_to_le32(nandc->props->supports_bam ? 0 : DM_EN);
-       nandc->regs->exec = cpu_to_le32(1);
--      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
--      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+      qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
-+      qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
--      read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
-+      qcom_read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
--      ret = submit_descs(nandc);
-+      ret = qcom_submit_descs(nandc);
-       if (ret) {
-               dev_err(nandc->dev, "failure in submitting read id descriptor\n");
-               goto err_out;
-@@ -2738,7 +2738,7 @@ static int qcom_read_id_type_exec(struct
-       op_id = q_op.data_instr_idx;
-       len = nand_subop_get_data_len(subop, op_id);
--      nandc_dev_to_mem(nandc, true);
-+      qcom_nandc_dev_to_mem(nandc, true);
-       memcpy(instr->ctx.data.buf.in, nandc->reg_read_buf, len);
- err_out:
-@@ -2774,20 +2774,20 @@ static int qcom_misc_cmd_type_exec(struc
-       nandc->buf_start = 0;
-       host->use_ecc = false;
--      clear_read_regs(nandc);
--      clear_bam_transaction(nandc);
-+      qcom_clear_read_regs(nandc);
-+      qcom_clear_bam_transaction(nandc);
-       nandc->regs->cmd = q_op.cmd_reg;
-       nandc->regs->exec = cpu_to_le32(1);
--      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
-+      qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
-       if (q_op.cmd_reg == cpu_to_le32(OP_BLOCK_ERASE))
--              write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
-+              qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
--      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
--      read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
-+      qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+      qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
--      ret = submit_descs(nandc);
-+      ret = qcom_submit_descs(nandc);
-       if (ret) {
-               dev_err(nandc->dev, "failure in submitting misc descriptor\n");
-               goto err_out;
-@@ -2820,8 +2820,8 @@ static int qcom_param_page_type_exec(str
-       nandc->buf_count = 0;
-       nandc->buf_start = 0;
-       host->use_ecc = false;
--      clear_read_regs(nandc);
--      clear_bam_transaction(nandc);
-+      qcom_clear_read_regs(nandc);
-+      qcom_clear_bam_transaction(nandc);
-       nandc->regs->cmd = q_op.cmd_reg;
-       nandc->regs->addr0 = 0;
-@@ -2864,8 +2864,8 @@ static int qcom_param_page_type_exec(str
-       nandc_set_read_loc(chip, 0, 0, 0, len, 1);
-       if (!nandc->props->qpic_version2) {
--              write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0);
--              write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
-+              qcom_write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0);
-+              qcom_write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
-       }
-       nandc->buf_count = len;
-@@ -2873,17 +2873,17 @@ static int qcom_param_page_type_exec(str
-       config_nand_single_cw_page_read(chip, false, 0);
--      read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
--                    nandc->buf_count, 0);
-+      qcom_read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
-+                         nandc->buf_count, 0);
-       /* restore CMD1 and VLD regs */
-       if (!nandc->props->qpic_version2) {
--              write_reg_dma(nandc, &nandc->regs->orig_cmd1, NAND_DEV_CMD1_RESTORE, 1, 0);
--              write_reg_dma(nandc, &nandc->regs->orig_vld, NAND_DEV_CMD_VLD_RESTORE, 1,
--                            NAND_BAM_NEXT_SGL);
-+              qcom_write_reg_dma(nandc, &nandc->regs->orig_cmd1, NAND_DEV_CMD1_RESTORE, 1, 0);
-+              qcom_write_reg_dma(nandc, &nandc->regs->orig_vld, NAND_DEV_CMD_VLD_RESTORE, 1,
-+                                 NAND_BAM_NEXT_SGL);
-       }
--      ret = submit_descs(nandc);
-+      ret = qcom_submit_descs(nandc);
-       if (ret) {
-               dev_err(nandc->dev, "failure in submitting param page descriptor\n");
-               goto err_out;
-@@ -3067,7 +3067,7 @@ static int qcom_nandc_alloc(struct qcom_
-                * maximum codeword size
-                */
-               nandc->max_cwperpage = 1;
--              nandc->bam_txn = alloc_bam_transaction(nandc);
-+              nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
-               if (!nandc->bam_txn) {
-                       dev_err(nandc->dev,
-                               "failed to allocate bam transaction\n");
diff --git a/target/linux/qualcommax/patches-6.6/0404-mtd-nand-Add-qpic_common-API-file.patch b/target/linux/qualcommax/patches-6.6/0404-mtd-nand-Add-qpic_common-API-file.patch
deleted file mode 100644 (file)
index e992e26..0000000
+++ /dev/null
@@ -1,2418 +0,0 @@
-From: Md Sadre Alam <quic_mdalam@quicinc.com>
-Date: Sun, 22 Sep 2024 17:03:47 +0530
-Subject: [PATCH] mtd: nand: Add qpic_common API file
-
-Add qpic_common.c file which hold all the common
-qpic APIs which will be used by both qpic raw nand
-driver and qpic spi nand driver.
-
-Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
----
---- a/drivers/mtd/nand/Makefile
-+++ b/drivers/mtd/nand/Makefile
-@@ -5,6 +5,10 @@ obj-$(CONFIG_MTD_NAND_CORE) += nandcore.
- obj-$(CONFIG_MTD_NAND_ECC_MEDIATEK) += ecc-mtk.o
- obj-$(CONFIG_MTD_NAND_MTK_BMT)        += mtk_bmt.o mtk_bmt_v2.o mtk_bmt_bbt.o mtk_bmt_nmbm.o
-+ifeq ($(CONFIG_MTD_NAND_QCOM),y)
-+obj-y += qpic_common.o
-+endif
-+
- obj-y += onenand/
- obj-y += raw/
- obj-y += spi/
---- /dev/null
-+++ b/drivers/mtd/nand/qpic_common.c
-@@ -0,0 +1,738 @@
-+// SPDX-License-Identifier: GPL-2.0-only
-+/*
-+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
-+ */
-+#include <linux/clk.h>
-+#include <linux/delay.h>
-+#include <linux/dmaengine.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/dma/qcom_adm.h>
-+#include <linux/dma/qcom_bam_dma.h>
-+#include <linux/module.h>
-+#include <linux/of.h>
-+#include <linux/platform_device.h>
-+#include <linux/slab.h>
-+#include <linux/mtd/nand-qpic-common.h>
-+
-+/**
-+ * qcom_free_bam_transaction() - Frees the BAM transaction memory
-+ * @nandc: qpic nand controller
-+ *
-+ * This function frees the bam transaction memory
-+ */
-+void qcom_free_bam_transaction(struct qcom_nand_controller *nandc)
-+{
-+      struct bam_transaction *bam_txn = nandc->bam_txn;
-+
-+      kfree(bam_txn);
-+}
-+
-+/**
-+ * qcom_alloc_bam_transaction() - allocate BAM transaction
-+ * @nandc: qpic nand controller
-+ *
-+ * This function will allocate and initialize the BAM transaction structure
-+ */
-+struct bam_transaction *
-+qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc)
-+{
-+      struct bam_transaction *bam_txn;
-+      size_t bam_txn_size;
-+      unsigned int num_cw = nandc->max_cwperpage;
-+      void *bam_txn_buf;
-+
-+      bam_txn_size =
-+              sizeof(*bam_txn) + num_cw *
-+              ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
-+              (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
-+              (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
-+
-+      bam_txn_buf = kzalloc(bam_txn_size, GFP_KERNEL);
-+      if (!bam_txn_buf)
-+              return NULL;
-+
-+      bam_txn = bam_txn_buf;
-+      bam_txn_buf += sizeof(*bam_txn);
-+
-+      bam_txn->bam_ce = bam_txn_buf;
-+      bam_txn_buf +=
-+              sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
-+
-+      bam_txn->cmd_sgl = bam_txn_buf;
-+      bam_txn_buf +=
-+              sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
-+
-+      bam_txn->data_sgl = bam_txn_buf;
-+
-+      init_completion(&bam_txn->txn_done);
-+
-+      return bam_txn;
-+}
-+
-+/**
-+ * qcom_clear_bam_transaction() - Clears the BAM transaction
-+ * @nandc: qpic nand controller
-+ *
-+ * This function will clear the BAM transaction indexes.
-+ */
-+void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc)
-+{
-+      struct bam_transaction *bam_txn = nandc->bam_txn;
-+
-+      if (!nandc->props->supports_bam)
-+              return;
-+
-+      memset(&bam_txn->bam_ce_pos, 0, sizeof(u32) * 8);
-+      bam_txn->last_data_desc = NULL;
-+
-+      sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
-+                    QPIC_PER_CW_CMD_SGL);
-+      sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
-+                    QPIC_PER_CW_DATA_SGL);
-+
-+      reinit_completion(&bam_txn->txn_done);
-+}
-+
-+/**
-+ * qcom_qpic_bam_dma_done() - Callback for DMA descriptor completion
-+ * @data: data pointer
-+ *
-+ * This function is a callback for DMA descriptor completion
-+ */
-+void qcom_qpic_bam_dma_done(void *data)
-+{
-+      struct bam_transaction *bam_txn = data;
-+
-+      complete(&bam_txn->txn_done);
-+}
-+
-+/**
-+ * qcom_nandc_dev_to_mem() - Check for dma sync for cpu or device
-+ * @nandc: qpic nand controller
-+ * @is_cpu: cpu or Device
-+ *
-+ * This function will check for dma sync for cpu or device
-+ */
-+inline void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
-+{
-+      if (!nandc->props->supports_bam)
-+              return;
-+
-+      if (is_cpu)
-+              dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
-+                                      MAX_REG_RD *
-+                                      sizeof(*nandc->reg_read_buf),
-+                                      DMA_FROM_DEVICE);
-+      else
-+              dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
-+                                         MAX_REG_RD *
-+                                         sizeof(*nandc->reg_read_buf),
-+                                         DMA_FROM_DEVICE);
-+}
-+
-+/**
-+ * qcom_prepare_bam_async_desc() - Prepare DMA descriptor
-+ * @nandc: qpic nand controller
-+ * @chan: dma channel
-+ * @flags: flags to control DMA descriptor preparation
-+ *
-+ * This function maps the scatter gather list for DMA transfer and forms the
-+ * DMA descriptor for BAM.This descriptor will be added in the NAND DMA
-+ * descriptor queue which will be submitted to DMA engine.
-+ */
-+int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
-+                              struct dma_chan *chan, unsigned long flags)
-+{
-+      struct desc_info *desc;
-+      struct scatterlist *sgl;
-+      unsigned int sgl_cnt;
-+      int ret;
-+      struct bam_transaction *bam_txn = nandc->bam_txn;
-+      enum dma_transfer_direction dir_eng;
-+      struct dma_async_tx_descriptor *dma_desc;
-+
-+      desc = kzalloc(sizeof(*desc), GFP_KERNEL);
-+      if (!desc)
-+              return -ENOMEM;
-+
-+      if (chan == nandc->cmd_chan) {
-+              sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
-+              sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
-+              bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
-+              dir_eng = DMA_MEM_TO_DEV;
-+              desc->dir = DMA_TO_DEVICE;
-+      } else if (chan == nandc->tx_chan) {
-+              sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
-+              sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
-+              bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
-+              dir_eng = DMA_MEM_TO_DEV;
-+              desc->dir = DMA_TO_DEVICE;
-+      } else {
-+              sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
-+              sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
-+              bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
-+              dir_eng = DMA_DEV_TO_MEM;
-+              desc->dir = DMA_FROM_DEVICE;
-+      }
-+
-+      sg_mark_end(sgl + sgl_cnt - 1);
-+      ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
-+      if (ret == 0) {
-+              dev_err(nandc->dev, "failure in mapping desc\n");
-+              kfree(desc);
-+              return -ENOMEM;
-+      }
-+
-+      desc->sgl_cnt = sgl_cnt;
-+      desc->bam_sgl = sgl;
-+
-+      dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
-+                                         flags);
-+
-+      if (!dma_desc) {
-+              dev_err(nandc->dev, "failure in prep desc\n");
-+              dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
-+              kfree(desc);
-+              return -EINVAL;
-+      }
-+
-+      desc->dma_desc = dma_desc;
-+
-+      /* update last data/command descriptor */
-+      if (chan == nandc->cmd_chan)
-+              bam_txn->last_cmd_desc = dma_desc;
-+      else
-+              bam_txn->last_data_desc = dma_desc;
-+
-+      list_add_tail(&desc->node, &nandc->desc_list);
-+
-+      return 0;
-+}
-+
-+/**
-+ * qcom_prep_bam_dma_desc_cmd() - Prepares the command descriptor for BAM DMA
-+ * @nandc: qpic nand controller
-+ * @read: read or write type
-+ * @reg_off: offset within the controller's data buffer
-+ * @vaddr: virtual address of the buffer we want to write to
-+ * @size: DMA transaction size in bytes
-+ * @flags: flags to control DMA descriptor preparation
-+ *
-+ * This function will prepares the command descriptor for BAM DMA
-+ * which will be used for NAND register reads and writes.
-+ */
-+int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
-+                             int reg_off, const void *vaddr,
-+                             int size, unsigned int flags)
-+{
-+      int bam_ce_size;
-+      int i, ret;
-+      struct bam_cmd_element *bam_ce_buffer;
-+      struct bam_transaction *bam_txn = nandc->bam_txn;
-+
-+      bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
-+
-+      /* fill the command desc */
-+      for (i = 0; i < size; i++) {
-+              if (read)
-+                      bam_prep_ce(&bam_ce_buffer[i],
-+                                  nandc_reg_phys(nandc, reg_off + 4 * i),
-+                                  BAM_READ_COMMAND,
-+                                  reg_buf_dma_addr(nandc,
-+                                                   (__le32 *)vaddr + i));
-+              else
-+                      bam_prep_ce_le32(&bam_ce_buffer[i],
-+                                       nandc_reg_phys(nandc, reg_off + 4 * i),
-+                                       BAM_WRITE_COMMAND,
-+                                       *((__le32 *)vaddr + i));
-+      }
-+
-+      bam_txn->bam_ce_pos += size;
-+
-+      /* use the separate sgl after this command */
-+      if (flags & NAND_BAM_NEXT_SGL) {
-+              bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
-+              bam_ce_size = (bam_txn->bam_ce_pos -
-+                              bam_txn->bam_ce_start) *
-+                              sizeof(struct bam_cmd_element);
-+              sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
-+                         bam_ce_buffer, bam_ce_size);
-+              bam_txn->cmd_sgl_pos++;
-+              bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
-+
-+              if (flags & NAND_BAM_NWD) {
-+                      ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
-+                                                        DMA_PREP_FENCE | DMA_PREP_CMD);
-+                      if (ret)
-+                              return ret;
-+              }
-+      }
-+
-+      return 0;
-+}
-+
-+/**
-+ * qcom_prep_bam_dma_desc_data() - Prepares the data descriptor for BAM DMA
-+ * @nandc: qpic nand controller
-+ * @read: read or write type
-+ * @vaddr: virtual address of the buffer we want to write to
-+ * @size: DMA transaction size in bytes
-+ * @flags: flags to control DMA descriptor preparation
-+ *
-+ * This function will prepares the data descriptor for BAM DMA which
-+ * will be used for NAND data reads and writes.
-+ */
-+int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
-+                              const void *vaddr, int size, unsigned int flags)
-+{
-+      int ret;
-+      struct bam_transaction *bam_txn = nandc->bam_txn;
-+
-+      if (read) {
-+              sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
-+                         vaddr, size);
-+              bam_txn->rx_sgl_pos++;
-+      } else {
-+              sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
-+                         vaddr, size);
-+              bam_txn->tx_sgl_pos++;
-+
-+              /*
-+               * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
-+               * is not set, form the DMA descriptor
-+               */
-+              if (!(flags & NAND_BAM_NO_EOT)) {
-+                      ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
-+                                                        DMA_PREP_INTERRUPT);
-+                      if (ret)
-+                              return ret;
-+              }
-+      }
-+
-+      return 0;
-+}
-+
-+/**
-+ * qcom_prep_adm_dma_desc() - Prepare descriptor for adma
-+ * @nandc: qpic nand controller
-+ * @read: read or write type
-+ * @reg_off: offset within the controller's data buffer
-+ * @vaddr: virtual address of the buffer we want to write to
-+ * @size: adm dma transaction size in bytes
-+ * @flow_control: flow controller
-+ *
-+ * This function will prepare descriptor for adma
-+ */
-+int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
-+                         int reg_off, const void *vaddr, int size,
-+                         bool flow_control)
-+{
-+      struct qcom_adm_peripheral_config periph_conf = {};
-+      struct dma_async_tx_descriptor *dma_desc;
-+      struct dma_slave_config slave_conf = {0};
-+      enum dma_transfer_direction dir_eng;
-+      struct desc_info *desc;
-+      struct scatterlist *sgl;
-+      int ret;
-+
-+      desc = kzalloc(sizeof(*desc), GFP_KERNEL);
-+      if (!desc)
-+              return -ENOMEM;
-+
-+      sgl = &desc->adm_sgl;
-+
-+      sg_init_one(sgl, vaddr, size);
-+
-+      if (read) {
-+              dir_eng = DMA_DEV_TO_MEM;
-+              desc->dir = DMA_FROM_DEVICE;
-+      } else {
-+              dir_eng = DMA_MEM_TO_DEV;
-+              desc->dir = DMA_TO_DEVICE;
-+      }
-+
-+      ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
-+      if (!ret) {
-+              ret = -ENOMEM;
-+              goto err;
-+      }
-+
-+      slave_conf.device_fc = flow_control;
-+      if (read) {
-+              slave_conf.src_maxburst = 16;
-+              slave_conf.src_addr = nandc->base_dma + reg_off;
-+              if (nandc->data_crci) {
-+                      periph_conf.crci = nandc->data_crci;
-+                      slave_conf.peripheral_config = &periph_conf;
-+                      slave_conf.peripheral_size = sizeof(periph_conf);
-+              }
-+      } else {
-+              slave_conf.dst_maxburst = 16;
-+              slave_conf.dst_addr = nandc->base_dma + reg_off;
-+              if (nandc->cmd_crci) {
-+                      periph_conf.crci = nandc->cmd_crci;
-+                      slave_conf.peripheral_config = &periph_conf;
-+                      slave_conf.peripheral_size = sizeof(periph_conf);
-+              }
-+      }
-+
-+      ret = dmaengine_slave_config(nandc->chan, &slave_conf);
-+      if (ret) {
-+              dev_err(nandc->dev, "failed to configure dma channel\n");
-+              goto err;
-+      }
-+
-+      dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
-+      if (!dma_desc) {
-+              dev_err(nandc->dev, "failed to prepare desc\n");
-+              ret = -EINVAL;
-+              goto err;
-+      }
-+
-+      desc->dma_desc = dma_desc;
-+
-+      list_add_tail(&desc->node, &nandc->desc_list);
-+
-+      return 0;
-+err:
-+      kfree(desc);
-+
-+      return ret;
-+}
-+
-+/**
-+ * qcom_read_reg_dma() - read a given number of registers to the reg_read_buf pointer
-+ * @nandc: qpic nand controller
-+ * @first: offset of the first register in the contiguous block
-+ * @num_regs: number of registers to read
-+ * @flags: flags to control DMA descriptor preparation
-+ *
-+ * This function will prepares a descriptor to read a given number of
-+ * contiguous registers to the reg_read_buf pointer.
-+ */
-+int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first,
-+                    int num_regs, unsigned int flags)
-+{
-+      bool flow_control = false;
-+      void *vaddr;
-+
-+      vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
-+      nandc->reg_read_pos += num_regs;
-+
-+      if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
-+              first = dev_cmd_reg_addr(nandc, first);
-+
-+      if (nandc->props->supports_bam)
-+              return qcom_prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
-+                                           num_regs, flags);
-+
-+      if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
-+              flow_control = true;
-+
-+      return qcom_prep_adm_dma_desc(nandc, true, first, vaddr,
-+                                    num_regs * sizeof(u32), flow_control);
-+}
-+
-+/**
-+ * qcom_write_reg_dma() - write a given number of registers
-+ * @nandc: qpic nand controller
-+ * @vaddr: contnigeous memory from where register value will
-+ *       be written
-+ * @first: offset of the first register in the contiguous block
-+ * @num_regs: number of registers to write
-+ * @flags: flags to control DMA descriptor preparation
-+ *
-+ * This function will prepares a descriptor to write a given number of
-+ * contiguous registers
-+ */
-+int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
-+                     int first, int num_regs, unsigned int flags)
-+{
-+      bool flow_control = false;
-+
-+      if (first == NAND_EXEC_CMD)
-+              flags |= NAND_BAM_NWD;
-+
-+      if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
-+              first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
-+
-+      if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
-+              first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
-+
-+      if (nandc->props->supports_bam)
-+              return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
-+                                                num_regs, flags);
-+
-+      if (first == NAND_FLASH_CMD)
-+              flow_control = true;
-+
-+      return qcom_prep_adm_dma_desc(nandc, false, first, vaddr,
-+                                    num_regs * sizeof(u32), flow_control);
-+}
-+
-+/**
-+ * qcom_read_data_dma() - transfer data
-+ * @nandc: qpic nand controller
-+ * @reg_off: offset within the controller's data buffer
-+ * @vaddr: virtual address of the buffer we want to write to
-+ * @size: DMA transaction size in bytes
-+ * @flags: flags to control DMA descriptor preparation
-+ *
-+ * This function will prepares a DMA descriptor to transfer data from the
-+ * controller's internal buffer to the buffer 'vaddr'
-+ */
-+int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
-+                     const u8 *vaddr, int size, unsigned int flags)
-+{
-+      if (nandc->props->supports_bam)
-+              return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
-+
-+      return qcom_prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
-+}
-+
-+/**
-+ * qcom_write_data_dma() - transfer data
-+ * @nandc: qpic nand controller
-+ * @reg_off: offset within the controller's data buffer
-+ * @vaddr: virtual address of the buffer we want to read from
-+ * @size: DMA transaction size in bytes
-+ * @flags: flags to control DMA descriptor preparation
-+ *
-+ * This function will prepares a DMA descriptor to transfer data from
-+ * 'vaddr' to the controller's internal buffer
-+ */
-+int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
-+                      const u8 *vaddr, int size, unsigned int flags)
-+{
-+      if (nandc->props->supports_bam)
-+              return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
-+
-+      return qcom_prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
-+}
-+
-+/**
-+ * qcom_submit_descs() - submit dma descriptor
-+ * @nandc: qpic nand controller
-+ *
-+ * This function will submit all the prepared dma descriptor
-+ * cmd or data descriptor
-+ */
-+int qcom_submit_descs(struct qcom_nand_controller *nandc)
-+{
-+      struct desc_info *desc, *n;
-+      dma_cookie_t cookie = 0;
-+      struct bam_transaction *bam_txn = nandc->bam_txn;
-+      int ret = 0;
-+
-+      if (nandc->props->supports_bam) {
-+              if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
-+                      ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
-+                      if (ret)
-+                              goto err_unmap_free_desc;
-+              }
-+
-+              if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
-+                      ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
-+                                                        DMA_PREP_INTERRUPT);
-+                      if (ret)
-+                              goto err_unmap_free_desc;
-+              }
-+
-+              if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
-+                      ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
-+                                                        DMA_PREP_CMD);
-+                      if (ret)
-+                              goto err_unmap_free_desc;
-+              }
-+      }
-+
-+      list_for_each_entry(desc, &nandc->desc_list, node)
-+              cookie = dmaengine_submit(desc->dma_desc);
-+
-+      if (nandc->props->supports_bam) {
-+              bam_txn->last_cmd_desc->callback = qcom_qpic_bam_dma_done;
-+              bam_txn->last_cmd_desc->callback_param = bam_txn;
-+
-+              dma_async_issue_pending(nandc->tx_chan);
-+              dma_async_issue_pending(nandc->rx_chan);
-+              dma_async_issue_pending(nandc->cmd_chan);
-+
-+              if (!wait_for_completion_timeout(&bam_txn->txn_done,
-+                                               QPIC_NAND_COMPLETION_TIMEOUT))
-+                      ret = -ETIMEDOUT;
-+      } else {
-+              if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
-+                      ret = -ETIMEDOUT;
-+      }
-+
-+err_unmap_free_desc:
-+      /*
-+       * Unmap the dma sg_list and free the desc allocated by both
-+       * qcom_prepare_bam_async_desc() and qcom_prep_adm_dma_desc() functions.
-+       */
-+      list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
-+              list_del(&desc->node);
-+
-+              if (nandc->props->supports_bam)
-+                      dma_unmap_sg(nandc->dev, desc->bam_sgl,
-+                                   desc->sgl_cnt, desc->dir);
-+              else
-+                      dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
-+                                   desc->dir);
-+
-+              kfree(desc);
-+      }
-+
-+      return ret;
-+}
-+
-+/**
-+ * qcom_clear_read_regs() - reset the read register buffer
-+ * @nandc: qpic nand controller
-+ *
-+ * This function reset the register read buffer for next NAND operation
-+ */
-+void qcom_clear_read_regs(struct qcom_nand_controller *nandc)
-+{
-+      nandc->reg_read_pos = 0;
-+      qcom_nandc_dev_to_mem(nandc, false);
-+}
-+
-+/**
-+ * qcom_nandc_unalloc() - unallocate qpic nand controller
-+ * @nandc: qpic nand controller
-+ *
-+ * This function will unallocate memory alloacted for qpic nand controller
-+ */
-+void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
-+{
-+      if (nandc->props->supports_bam) {
-+              if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
-+                      dma_unmap_single(nandc->dev, nandc->reg_read_dma,
-+                                       MAX_REG_RD *
-+                                       sizeof(*nandc->reg_read_buf),
-+                                       DMA_FROM_DEVICE);
-+
-+              if (nandc->tx_chan)
-+                      dma_release_channel(nandc->tx_chan);
-+
-+              if (nandc->rx_chan)
-+                      dma_release_channel(nandc->rx_chan);
-+
-+              if (nandc->cmd_chan)
-+                      dma_release_channel(nandc->cmd_chan);
-+      } else {
-+              if (nandc->chan)
-+                      dma_release_channel(nandc->chan);
-+      }
-+}
-+
-+/**
-+ * qcom_nandc_alloc() - Allocate qpic nand controller
-+ * @nandc: qpic nand controller
-+ *
-+ * This function will allocate memory for qpic nand controller
-+ */
-+int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
-+{
-+      int ret;
-+
-+      ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
-+      if (ret) {
-+              dev_err(nandc->dev, "failed to set DMA mask\n");
-+              return ret;
-+      }
-+
-+      /*
-+       * we use the internal buffer for reading ONFI params, reading small
-+       * data like ID and status, and preforming read-copy-write operations
-+       * when writing to a codeword partially. 532 is the maximum possible
-+       * size of a codeword for our nand controller
-+       */
-+      nandc->buf_size = 532;
-+
-+      nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size, GFP_KERNEL);
-+      if (!nandc->data_buffer)
-+              return -ENOMEM;
-+
-+      nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs), GFP_KERNEL);
-+      if (!nandc->regs)
-+              return -ENOMEM;
-+
-+      nandc->reg_read_buf = devm_kcalloc(nandc->dev, MAX_REG_RD,
-+                                         sizeof(*nandc->reg_read_buf),
-+                                         GFP_KERNEL);
-+      if (!nandc->reg_read_buf)
-+              return -ENOMEM;
-+
-+      if (nandc->props->supports_bam) {
-+              nandc->reg_read_dma =
-+                      dma_map_single(nandc->dev, nandc->reg_read_buf,
-+                                     MAX_REG_RD *
-+                                     sizeof(*nandc->reg_read_buf),
-+                                     DMA_FROM_DEVICE);
-+              if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
-+                      dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
-+                      return -EIO;
-+              }
-+
-+              nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
-+              if (IS_ERR(nandc->tx_chan)) {
-+                      ret = PTR_ERR(nandc->tx_chan);
-+                      nandc->tx_chan = NULL;
-+                      dev_err_probe(nandc->dev, ret,
-+                                    "tx DMA channel request failed\n");
-+                      goto unalloc;
-+              }
-+
-+              nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
-+              if (IS_ERR(nandc->rx_chan)) {
-+                      ret = PTR_ERR(nandc->rx_chan);
-+                      nandc->rx_chan = NULL;
-+                      dev_err_probe(nandc->dev, ret,
-+                                    "rx DMA channel request failed\n");
-+                      goto unalloc;
-+              }
-+
-+              nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
-+              if (IS_ERR(nandc->cmd_chan)) {
-+                      ret = PTR_ERR(nandc->cmd_chan);
-+                      nandc->cmd_chan = NULL;
-+                      dev_err_probe(nandc->dev, ret,
-+                                    "cmd DMA channel request failed\n");
-+                      goto unalloc;
-+              }
-+
-+              /*
-+               * Initially allocate BAM transaction to read ONFI param page.
-+               * After detecting all the devices, this BAM transaction will
-+               * be freed and the next BAM transaction will be allocated with
-+               * maximum codeword size
-+               */
-+              nandc->max_cwperpage = 1;
-+              nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
-+              if (!nandc->bam_txn) {
-+                      dev_err(nandc->dev,
-+                              "failed to allocate bam transaction\n");
-+                      ret = -ENOMEM;
-+                      goto unalloc;
-+              }
-+      } else {
-+              nandc->chan = dma_request_chan(nandc->dev, "rxtx");
-+              if (IS_ERR(nandc->chan)) {
-+                      ret = PTR_ERR(nandc->chan);
-+                      nandc->chan = NULL;
-+                      dev_err_probe(nandc->dev, ret,
-+                                    "rxtx DMA channel request failed\n");
-+                      return ret;
-+              }
-+      }
-+
-+      INIT_LIST_HEAD(&nandc->desc_list);
-+      INIT_LIST_HEAD(&nandc->host_list);
-+
-+      return 0;
-+unalloc:
-+      qcom_nandc_unalloc(nandc);
-+      return ret;
-+}
---- a/drivers/mtd/nand/raw/Kconfig
-+++ b/drivers/mtd/nand/raw/Kconfig
-@@ -330,7 +330,7 @@ config MTD_NAND_HISI504
-         Enables support for NAND controller on Hisilicon SoC Hip04.
- config MTD_NAND_QCOM
--      tristate "QCOM NAND controller"
-+      bool "QCOM NAND controller"
-       depends on ARCH_QCOM || COMPILE_TEST
-       depends on HAS_IOMEM
-       help
---- a/drivers/mtd/nand/raw/qcom_nandc.c
-+++ b/drivers/mtd/nand/raw/qcom_nandc.c
-@@ -15,417 +15,7 @@
- #include <linux/of.h>
- #include <linux/platform_device.h>
- #include <linux/slab.h>
--
--/* NANDc reg offsets */
--#define       NAND_FLASH_CMD                  0x00
--#define       NAND_ADDR0                      0x04
--#define       NAND_ADDR1                      0x08
--#define       NAND_FLASH_CHIP_SELECT          0x0c
--#define       NAND_EXEC_CMD                   0x10
--#define       NAND_FLASH_STATUS               0x14
--#define       NAND_BUFFER_STATUS              0x18
--#define       NAND_DEV0_CFG0                  0x20
--#define       NAND_DEV0_CFG1                  0x24
--#define       NAND_DEV0_ECC_CFG               0x28
--#define       NAND_AUTO_STATUS_EN             0x2c
--#define       NAND_DEV1_CFG0                  0x30
--#define       NAND_DEV1_CFG1                  0x34
--#define       NAND_READ_ID                    0x40
--#define       NAND_READ_STATUS                0x44
--#define       NAND_DEV_CMD0                   0xa0
--#define       NAND_DEV_CMD1                   0xa4
--#define       NAND_DEV_CMD2                   0xa8
--#define       NAND_DEV_CMD_VLD                0xac
--#define       SFLASHC_BURST_CFG               0xe0
--#define       NAND_ERASED_CW_DETECT_CFG       0xe8
--#define       NAND_ERASED_CW_DETECT_STATUS    0xec
--#define       NAND_EBI2_ECC_BUF_CFG           0xf0
--#define       FLASH_BUF_ACC                   0x100
--
--#define       NAND_CTRL                       0xf00
--#define       NAND_VERSION                    0xf08
--#define       NAND_READ_LOCATION_0            0xf20
--#define       NAND_READ_LOCATION_1            0xf24
--#define       NAND_READ_LOCATION_2            0xf28
--#define       NAND_READ_LOCATION_3            0xf2c
--#define       NAND_READ_LOCATION_LAST_CW_0    0xf40
--#define       NAND_READ_LOCATION_LAST_CW_1    0xf44
--#define       NAND_READ_LOCATION_LAST_CW_2    0xf48
--#define       NAND_READ_LOCATION_LAST_CW_3    0xf4c
--
--/* dummy register offsets, used by qcom_write_reg_dma */
--#define       NAND_DEV_CMD1_RESTORE           0xdead
--#define       NAND_DEV_CMD_VLD_RESTORE        0xbeef
--
--/* NAND_FLASH_CMD bits */
--#define       PAGE_ACC                        BIT(4)
--#define       LAST_PAGE                       BIT(5)
--
--/* NAND_FLASH_CHIP_SELECT bits */
--#define       NAND_DEV_SEL                    0
--#define       DM_EN                           BIT(2)
--
--/* NAND_FLASH_STATUS bits */
--#define       FS_OP_ERR                       BIT(4)
--#define       FS_READY_BSY_N                  BIT(5)
--#define       FS_MPU_ERR                      BIT(8)
--#define       FS_DEVICE_STS_ERR               BIT(16)
--#define       FS_DEVICE_WP                    BIT(23)
--
--/* NAND_BUFFER_STATUS bits */
--#define       BS_UNCORRECTABLE_BIT            BIT(8)
--#define       BS_CORRECTABLE_ERR_MSK          0x1f
--
--/* NAND_DEVn_CFG0 bits */
--#define       DISABLE_STATUS_AFTER_WRITE      4
--#define       CW_PER_PAGE                     6
--#define       UD_SIZE_BYTES                   9
--#define       UD_SIZE_BYTES_MASK              GENMASK(18, 9)
--#define       ECC_PARITY_SIZE_BYTES_RS        19
--#define       SPARE_SIZE_BYTES                23
--#define       SPARE_SIZE_BYTES_MASK           GENMASK(26, 23)
--#define       NUM_ADDR_CYCLES                 27
--#define       STATUS_BFR_READ                 30
--#define       SET_RD_MODE_AFTER_STATUS        31
--
--/* NAND_DEVn_CFG0 bits */
--#define       DEV0_CFG1_ECC_DISABLE           0
--#define       WIDE_FLASH                      1
--#define       NAND_RECOVERY_CYCLES            2
--#define       CS_ACTIVE_BSY                   5
--#define       BAD_BLOCK_BYTE_NUM              6
--#define       BAD_BLOCK_IN_SPARE_AREA         16
--#define       WR_RD_BSY_GAP                   17
--#define       ENABLE_BCH_ECC                  27
--
--/* NAND_DEV0_ECC_CFG bits */
--#define       ECC_CFG_ECC_DISABLE             0
--#define       ECC_SW_RESET                    1
--#define       ECC_MODE                        4
--#define       ECC_PARITY_SIZE_BYTES_BCH       8
--#define       ECC_NUM_DATA_BYTES              16
--#define       ECC_NUM_DATA_BYTES_MASK         GENMASK(25, 16)
--#define       ECC_FORCE_CLK_OPEN              30
--
--/* NAND_DEV_CMD1 bits */
--#define       READ_ADDR                       0
--
--/* NAND_DEV_CMD_VLD bits */
--#define       READ_START_VLD                  BIT(0)
--#define       READ_STOP_VLD                   BIT(1)
--#define       WRITE_START_VLD                 BIT(2)
--#define       ERASE_START_VLD                 BIT(3)
--#define       SEQ_READ_START_VLD              BIT(4)
--
--/* NAND_EBI2_ECC_BUF_CFG bits */
--#define       NUM_STEPS                       0
--
--/* NAND_ERASED_CW_DETECT_CFG bits */
--#define       ERASED_CW_ECC_MASK              1
--#define       AUTO_DETECT_RES                 0
--#define       MASK_ECC                        BIT(ERASED_CW_ECC_MASK)
--#define       RESET_ERASED_DET                BIT(AUTO_DETECT_RES)
--#define       ACTIVE_ERASED_DET               (0 << AUTO_DETECT_RES)
--#define       CLR_ERASED_PAGE_DET             (RESET_ERASED_DET | MASK_ECC)
--#define       SET_ERASED_PAGE_DET             (ACTIVE_ERASED_DET | MASK_ECC)
--
--/* NAND_ERASED_CW_DETECT_STATUS bits */
--#define       PAGE_ALL_ERASED                 BIT(7)
--#define       CODEWORD_ALL_ERASED             BIT(6)
--#define       PAGE_ERASED                     BIT(5)
--#define       CODEWORD_ERASED                 BIT(4)
--#define       ERASED_PAGE                     (PAGE_ALL_ERASED | PAGE_ERASED)
--#define       ERASED_CW                       (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
--
--/* NAND_READ_LOCATION_n bits */
--#define READ_LOCATION_OFFSET          0
--#define READ_LOCATION_SIZE            16
--#define READ_LOCATION_LAST            31
--
--/* Version Mask */
--#define       NAND_VERSION_MAJOR_MASK         0xf0000000
--#define       NAND_VERSION_MAJOR_SHIFT        28
--#define       NAND_VERSION_MINOR_MASK         0x0fff0000
--#define       NAND_VERSION_MINOR_SHIFT        16
--
--/* NAND OP_CMDs */
--#define       OP_PAGE_READ                    0x2
--#define       OP_PAGE_READ_WITH_ECC           0x3
--#define       OP_PAGE_READ_WITH_ECC_SPARE     0x4
--#define       OP_PAGE_READ_ONFI_READ          0x5
--#define       OP_PROGRAM_PAGE                 0x6
--#define       OP_PAGE_PROGRAM_WITH_ECC        0x7
--#define       OP_PROGRAM_PAGE_SPARE           0x9
--#define       OP_BLOCK_ERASE                  0xa
--#define       OP_CHECK_STATUS                 0xc
--#define       OP_FETCH_ID                     0xb
--#define       OP_RESET_DEVICE                 0xd
--
--/* Default Value for NAND_DEV_CMD_VLD */
--#define NAND_DEV_CMD_VLD_VAL          (READ_START_VLD | WRITE_START_VLD | \
--                                       ERASE_START_VLD | SEQ_READ_START_VLD)
--
--/* NAND_CTRL bits */
--#define       BAM_MODE_EN                     BIT(0)
--
--/*
-- * the NAND controller performs reads/writes with ECC in 516 byte chunks.
-- * the driver calls the chunks 'step' or 'codeword' interchangeably
-- */
--#define       NANDC_STEP_SIZE                 512
--
--/*
-- * the largest page size we support is 8K, this will have 16 steps/codewords
-- * of 512 bytes each
-- */
--#define       MAX_NUM_STEPS                   (SZ_8K / NANDC_STEP_SIZE)
--
--/* we read at most 3 registers per codeword scan */
--#define       MAX_REG_RD                      (3 * MAX_NUM_STEPS)
--
--/* ECC modes supported by the controller */
--#define       ECC_NONE        BIT(0)
--#define       ECC_RS_4BIT     BIT(1)
--#define       ECC_BCH_4BIT    BIT(2)
--#define       ECC_BCH_8BIT    BIT(3)
--
--/*
-- * Returns the actual register address for all NAND_DEV_ registers
-- * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
-- */
--#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
--
--/* Returns the NAND register physical address */
--#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
--
--/* Returns the dma address for reg read buffer */
--#define reg_buf_dma_addr(chip, vaddr) \
--      ((chip)->reg_read_dma + \
--      ((u8 *)(vaddr) - (u8 *)(chip)->reg_read_buf))
--
--#define QPIC_PER_CW_CMD_ELEMENTS      32
--#define QPIC_PER_CW_CMD_SGL           32
--#define QPIC_PER_CW_DATA_SGL          8
--
--#define QPIC_NAND_COMPLETION_TIMEOUT  msecs_to_jiffies(2000)
--
--/*
-- * Flags used in DMA descriptor preparation helper functions
-- * (i.e. qcom_read_reg_dma/qcom_write_reg_dma/qcom_read_data_dma/qcom_write_data_dma)
-- */
--/* Don't set the EOT in current tx BAM sgl */
--#define NAND_BAM_NO_EOT                       BIT(0)
--/* Set the NWD flag in current BAM sgl */
--#define NAND_BAM_NWD                  BIT(1)
--/* Finish writing in the current BAM sgl and start writing in another BAM sgl */
--#define NAND_BAM_NEXT_SGL             BIT(2)
--/*
-- * Erased codeword status is being used two times in single transfer so this
-- * flag will determine the current value of erased codeword status register
-- */
--#define NAND_ERASED_CW_SET            BIT(4)
--
--#define MAX_ADDRESS_CYCLE             5
--
--/*
-- * This data type corresponds to the BAM transaction which will be used for all
-- * NAND transfers.
-- * @bam_ce - the array of BAM command elements
-- * @cmd_sgl - sgl for NAND BAM command pipe
-- * @data_sgl - sgl for NAND BAM consumer/producer pipe
-- * @last_data_desc - last DMA desc in data channel (tx/rx).
-- * @last_cmd_desc - last DMA desc in command channel.
-- * @txn_done - completion for NAND transfer.
-- * @bam_ce_pos - the index in bam_ce which is available for next sgl
-- * @bam_ce_start - the index in bam_ce which marks the start position ce
-- *               for current sgl. It will be used for size calculation
-- *               for current sgl
-- * @cmd_sgl_pos - current index in command sgl.
-- * @cmd_sgl_start - start index in command sgl.
-- * @tx_sgl_pos - current index in data sgl for tx.
-- * @tx_sgl_start - start index in data sgl for tx.
-- * @rx_sgl_pos - current index in data sgl for rx.
-- * @rx_sgl_start - start index in data sgl for rx.
-- */
--struct bam_transaction {
--      struct bam_cmd_element *bam_ce;
--      struct scatterlist *cmd_sgl;
--      struct scatterlist *data_sgl;
--      struct dma_async_tx_descriptor *last_data_desc;
--      struct dma_async_tx_descriptor *last_cmd_desc;
--      struct completion txn_done;
--      u32 bam_ce_pos;
--      u32 bam_ce_start;
--      u32 cmd_sgl_pos;
--      u32 cmd_sgl_start;
--      u32 tx_sgl_pos;
--      u32 tx_sgl_start;
--      u32 rx_sgl_pos;
--      u32 rx_sgl_start;
--};
--
--/*
-- * This data type corresponds to the nand dma descriptor
-- * @dma_desc - low level DMA engine descriptor
-- * @list - list for desc_info
-- *
-- * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
-- *          ADM
-- * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
-- * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
-- * @dir - DMA transfer direction
-- */
--struct desc_info {
--      struct dma_async_tx_descriptor *dma_desc;
--      struct list_head node;
--
--      union {
--              struct scatterlist adm_sgl;
--              struct {
--                      struct scatterlist *bam_sgl;
--                      int sgl_cnt;
--              };
--      };
--      enum dma_data_direction dir;
--};
--
--/*
-- * holds the current register values that we want to write. acts as a contiguous
-- * chunk of memory which we use to write the controller registers through DMA.
-- */
--struct nandc_regs {
--      __le32 cmd;
--      __le32 addr0;
--      __le32 addr1;
--      __le32 chip_sel;
--      __le32 exec;
--
--      __le32 cfg0;
--      __le32 cfg1;
--      __le32 ecc_bch_cfg;
--
--      __le32 clrflashstatus;
--      __le32 clrreadstatus;
--
--      __le32 cmd1;
--      __le32 vld;
--
--      __le32 orig_cmd1;
--      __le32 orig_vld;
--
--      __le32 ecc_buf_cfg;
--      __le32 read_location0;
--      __le32 read_location1;
--      __le32 read_location2;
--      __le32 read_location3;
--      __le32 read_location_last0;
--      __le32 read_location_last1;
--      __le32 read_location_last2;
--      __le32 read_location_last3;
--
--      __le32 erased_cw_detect_cfg_clr;
--      __le32 erased_cw_detect_cfg_set;
--};
--
--/*
-- * NAND controller data struct
-- *
-- * @dev:                      parent device
-- *
-- * @base:                     MMIO base
-- *
-- * @core_clk:                 controller clock
-- * @aon_clk:                  another controller clock
-- *
-- * @regs:                     a contiguous chunk of memory for DMA register
-- *                            writes. contains the register values to be
-- *                            written to controller
-- *
-- * @props:                    properties of current NAND controller,
-- *                            initialized via DT match data
-- *
-- * @controller:                       base controller structure
-- * @host_list:                        list containing all the chips attached to the
-- *                            controller
-- *
-- * @chan:                     dma channel
-- * @cmd_crci:                 ADM DMA CRCI for command flow control
-- * @data_crci:                        ADM DMA CRCI for data flow control
-- *
-- * @desc_list:                        DMA descriptor list (list of desc_infos)
-- *
-- * @data_buffer:              our local DMA buffer for page read/writes,
-- *                            used when we can't use the buffer provided
-- *                            by upper layers directly
-- * @reg_read_buf:             local buffer for reading back registers via DMA
-- *
-- * @base_phys:                        physical base address of controller registers
-- * @base_dma:                 dma base address of controller registers
-- * @reg_read_dma:             contains dma address for register read buffer
-- *
-- * @buf_size/count/start:     markers for chip->legacy.read_buf/write_buf
-- *                            functions
-- * @max_cwperpage:            maximum QPIC codewords required. calculated
-- *                            from all connected NAND devices pagesize
-- *
-- * @reg_read_pos:             marker for data read in reg_read_buf
-- *
-- * @cmd1/vld:                 some fixed controller register values
-- *
-- * @exec_opwrite:             flag to select correct number of code word
-- *                            while reading status
-- */
--struct qcom_nand_controller {
--      struct device *dev;
--
--      void __iomem *base;
--
--      struct clk *core_clk;
--      struct clk *aon_clk;
--
--      struct nandc_regs *regs;
--      struct bam_transaction *bam_txn;
--
--      const struct qcom_nandc_props *props;
--
--      struct nand_controller controller;
--      struct list_head host_list;
--
--      union {
--              /* will be used only by QPIC for BAM DMA */
--              struct {
--                      struct dma_chan *tx_chan;
--                      struct dma_chan *rx_chan;
--                      struct dma_chan *cmd_chan;
--              };
--
--              /* will be used only by EBI2 for ADM DMA */
--              struct {
--                      struct dma_chan *chan;
--                      unsigned int cmd_crci;
--                      unsigned int data_crci;
--              };
--      };
--
--      struct list_head desc_list;
--
--      u8              *data_buffer;
--      __le32          *reg_read_buf;
--
--      phys_addr_t base_phys;
--      dma_addr_t base_dma;
--      dma_addr_t reg_read_dma;
--
--      int             buf_size;
--      int             buf_count;
--      int             buf_start;
--      unsigned int    max_cwperpage;
--
--      int reg_read_pos;
--
--      u32 cmd1, vld;
--      bool exec_opwrite;
--};
-+#include <linux/mtd/nand-qpic-common.h>
- /*
-  * NAND special boot partitions
-@@ -530,97 +120,6 @@ struct qcom_nand_host {
-       bool bch_enabled;
- };
--/*
-- * This data type corresponds to the NAND controller properties which varies
-- * among different NAND controllers.
-- * @ecc_modes - ecc mode for NAND
-- * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
-- * @supports_bam - whether NAND controller is using BAM
-- * @nandc_part_of_qpic - whether NAND controller is part of qpic IP
-- * @qpic_version2 - flag to indicate QPIC IP version 2
-- * @use_codeword_fixup - whether NAND has different layout for boot partitions
-- */
--struct qcom_nandc_props {
--      u32 ecc_modes;
--      u32 dev_cmd_reg_start;
--      bool supports_bam;
--      bool nandc_part_of_qpic;
--      bool qpic_version2;
--      bool use_codeword_fixup;
--};
--
--/* Frees the BAM transaction memory */
--static void qcom_free_bam_transaction(struct qcom_nand_controller *nandc)
--{
--      struct bam_transaction *bam_txn = nandc->bam_txn;
--
--      devm_kfree(nandc->dev, bam_txn);
--}
--
--/* Allocates and Initializes the BAM transaction */
--static struct bam_transaction *
--qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc)
--{
--      struct bam_transaction *bam_txn;
--      size_t bam_txn_size;
--      unsigned int num_cw = nandc->max_cwperpage;
--      void *bam_txn_buf;
--
--      bam_txn_size =
--              sizeof(*bam_txn) + num_cw *
--              ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
--              (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
--              (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
--
--      bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
--      if (!bam_txn_buf)
--              return NULL;
--
--      bam_txn = bam_txn_buf;
--      bam_txn_buf += sizeof(*bam_txn);
--
--      bam_txn->bam_ce = bam_txn_buf;
--      bam_txn_buf +=
--              sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
--
--      bam_txn->cmd_sgl = bam_txn_buf;
--      bam_txn_buf +=
--              sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
--
--      bam_txn->data_sgl = bam_txn_buf;
--
--      init_completion(&bam_txn->txn_done);
--
--      return bam_txn;
--}
--
--/* Clears the BAM transaction indexes */
--static void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc)
--{
--      struct bam_transaction *bam_txn = nandc->bam_txn;
--
--      if (!nandc->props->supports_bam)
--              return;
--
--      memset(&bam_txn->bam_ce_pos, 0, sizeof(u32) * 8);
--      bam_txn->last_data_desc = NULL;
--
--      sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
--                    QPIC_PER_CW_CMD_SGL);
--      sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
--                    QPIC_PER_CW_DATA_SGL);
--
--      reinit_completion(&bam_txn->txn_done);
--}
--
--/* Callback for DMA descriptor completion */
--static void qcom_qpic_bam_dma_done(void *data)
--{
--      struct bam_transaction *bam_txn = data;
--
--      complete(&bam_txn->txn_done);
--}
--
- static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
- {
-       return container_of(chip, struct qcom_nand_host, chip);
-@@ -629,8 +128,8 @@ static inline struct qcom_nand_host *to_
- static inline struct qcom_nand_controller *
- get_qcom_nand_controller(struct nand_chip *chip)
- {
--      return container_of(chip->controller, struct qcom_nand_controller,
--                          controller);
-+      return (struct qcom_nand_controller *)
-+              ((u8 *)chip->controller - sizeof(struct qcom_nand_controller));
- }
- static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
-@@ -644,23 +143,6 @@ static inline void nandc_write(struct qc
-       iowrite32(val, nandc->base + offset);
- }
--static inline void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
--{
--      if (!nandc->props->supports_bam)
--              return;
--
--      if (is_cpu)
--              dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
--                                      MAX_REG_RD *
--                                      sizeof(*nandc->reg_read_buf),
--                                      DMA_FROM_DEVICE);
--      else
--              dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
--                                         MAX_REG_RD *
--                                         sizeof(*nandc->reg_read_buf),
--                                         DMA_FROM_DEVICE);
--}
--
- /* Helper to check the code word, whether it is last cw or not */
- static bool qcom_nandc_is_last_cw(struct nand_ecc_ctrl *ecc, int cw)
- {
-@@ -820,356 +302,6 @@ static void update_rw_regs(struct qcom_n
- }
- /*
-- * Maps the scatter gather list for DMA transfer and forms the DMA descriptor
-- * for BAM. This descriptor will be added in the NAND DMA descriptor queue
-- * which will be submitted to DMA engine.
-- */
--static int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
--                                     struct dma_chan *chan,
--                                     unsigned long flags)
--{
--      struct desc_info *desc;
--      struct scatterlist *sgl;
--      unsigned int sgl_cnt;
--      int ret;
--      struct bam_transaction *bam_txn = nandc->bam_txn;
--      enum dma_transfer_direction dir_eng;
--      struct dma_async_tx_descriptor *dma_desc;
--
--      desc = kzalloc(sizeof(*desc), GFP_KERNEL);
--      if (!desc)
--              return -ENOMEM;
--
--      if (chan == nandc->cmd_chan) {
--              sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
--              sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
--              bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
--              dir_eng = DMA_MEM_TO_DEV;
--              desc->dir = DMA_TO_DEVICE;
--      } else if (chan == nandc->tx_chan) {
--              sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
--              sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
--              bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
--              dir_eng = DMA_MEM_TO_DEV;
--              desc->dir = DMA_TO_DEVICE;
--      } else {
--              sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
--              sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
--              bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
--              dir_eng = DMA_DEV_TO_MEM;
--              desc->dir = DMA_FROM_DEVICE;
--      }
--
--      sg_mark_end(sgl + sgl_cnt - 1);
--      ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
--      if (ret == 0) {
--              dev_err(nandc->dev, "failure in mapping desc\n");
--              kfree(desc);
--              return -ENOMEM;
--      }
--
--      desc->sgl_cnt = sgl_cnt;
--      desc->bam_sgl = sgl;
--
--      dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
--                                         flags);
--
--      if (!dma_desc) {
--              dev_err(nandc->dev, "failure in prep desc\n");
--              dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
--              kfree(desc);
--              return -EINVAL;
--      }
--
--      desc->dma_desc = dma_desc;
--
--      /* update last data/command descriptor */
--      if (chan == nandc->cmd_chan)
--              bam_txn->last_cmd_desc = dma_desc;
--      else
--              bam_txn->last_data_desc = dma_desc;
--
--      list_add_tail(&desc->node, &nandc->desc_list);
--
--      return 0;
--}
--
--/*
-- * Prepares the command descriptor for BAM DMA which will be used for NAND
-- * register reads and writes. The command descriptor requires the command
-- * to be formed in command element type so this function uses the command
-- * element from bam transaction ce array and fills the same with required
-- * data. A single SGL can contain multiple command elements so
-- * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
-- * after the current command element.
-- */
--static int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
--                                    int reg_off, const void *vaddr,
--                                    int size, unsigned int flags)
--{
--      int bam_ce_size;
--      int i, ret;
--      struct bam_cmd_element *bam_ce_buffer;
--      struct bam_transaction *bam_txn = nandc->bam_txn;
--
--      bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
--
--      /* fill the command desc */
--      for (i = 0; i < size; i++) {
--              if (read)
--                      bam_prep_ce(&bam_ce_buffer[i],
--                                  nandc_reg_phys(nandc, reg_off + 4 * i),
--                                  BAM_READ_COMMAND,
--                                  reg_buf_dma_addr(nandc,
--                                                   (__le32 *)vaddr + i));
--              else
--                      bam_prep_ce_le32(&bam_ce_buffer[i],
--                                       nandc_reg_phys(nandc, reg_off + 4 * i),
--                                       BAM_WRITE_COMMAND,
--                                       *((__le32 *)vaddr + i));
--      }
--
--      bam_txn->bam_ce_pos += size;
--
--      /* use the separate sgl after this command */
--      if (flags & NAND_BAM_NEXT_SGL) {
--              bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
--              bam_ce_size = (bam_txn->bam_ce_pos -
--                              bam_txn->bam_ce_start) *
--                              sizeof(struct bam_cmd_element);
--              sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
--                         bam_ce_buffer, bam_ce_size);
--              bam_txn->cmd_sgl_pos++;
--              bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
--
--              if (flags & NAND_BAM_NWD) {
--                      ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
--                                                        DMA_PREP_FENCE |
--                                                        DMA_PREP_CMD);
--                      if (ret)
--                              return ret;
--              }
--      }
--
--      return 0;
--}
--
--/*
-- * Prepares the data descriptor for BAM DMA which will be used for NAND
-- * data reads and writes.
-- */
--static int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
--                                     const void *vaddr, int size, unsigned int flags)
--{
--      int ret;
--      struct bam_transaction *bam_txn = nandc->bam_txn;
--
--      if (read) {
--              sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
--                         vaddr, size);
--              bam_txn->rx_sgl_pos++;
--      } else {
--              sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
--                         vaddr, size);
--              bam_txn->tx_sgl_pos++;
--
--              /*
--               * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
--               * is not set, form the DMA descriptor
--               */
--              if (!(flags & NAND_BAM_NO_EOT)) {
--                      ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
--                                                        DMA_PREP_INTERRUPT);
--                      if (ret)
--                              return ret;
--              }
--      }
--
--      return 0;
--}
--
--static int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
--                                int reg_off, const void *vaddr, int size,
--                                bool flow_control)
--{
--      struct desc_info *desc;
--      struct dma_async_tx_descriptor *dma_desc;
--      struct scatterlist *sgl;
--      struct dma_slave_config slave_conf;
--      struct qcom_adm_peripheral_config periph_conf = {};
--      enum dma_transfer_direction dir_eng;
--      int ret;
--
--      desc = kzalloc(sizeof(*desc), GFP_KERNEL);
--      if (!desc)
--              return -ENOMEM;
--
--      sgl = &desc->adm_sgl;
--
--      sg_init_one(sgl, vaddr, size);
--
--      if (read) {
--              dir_eng = DMA_DEV_TO_MEM;
--              desc->dir = DMA_FROM_DEVICE;
--      } else {
--              dir_eng = DMA_MEM_TO_DEV;
--              desc->dir = DMA_TO_DEVICE;
--      }
--
--      ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
--      if (ret == 0) {
--              ret = -ENOMEM;
--              goto err;
--      }
--
--      memset(&slave_conf, 0x00, sizeof(slave_conf));
--
--      slave_conf.device_fc = flow_control;
--      if (read) {
--              slave_conf.src_maxburst = 16;
--              slave_conf.src_addr = nandc->base_dma + reg_off;
--              if (nandc->data_crci) {
--                      periph_conf.crci = nandc->data_crci;
--                      slave_conf.peripheral_config = &periph_conf;
--                      slave_conf.peripheral_size = sizeof(periph_conf);
--              }
--      } else {
--              slave_conf.dst_maxburst = 16;
--              slave_conf.dst_addr = nandc->base_dma + reg_off;
--              if (nandc->cmd_crci) {
--                      periph_conf.crci = nandc->cmd_crci;
--                      slave_conf.peripheral_config = &periph_conf;
--                      slave_conf.peripheral_size = sizeof(periph_conf);
--              }
--      }
--
--      ret = dmaengine_slave_config(nandc->chan, &slave_conf);
--      if (ret) {
--              dev_err(nandc->dev, "failed to configure dma channel\n");
--              goto err;
--      }
--
--      dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
--      if (!dma_desc) {
--              dev_err(nandc->dev, "failed to prepare desc\n");
--              ret = -EINVAL;
--              goto err;
--      }
--
--      desc->dma_desc = dma_desc;
--
--      list_add_tail(&desc->node, &nandc->desc_list);
--
--      return 0;
--err:
--      kfree(desc);
--
--      return ret;
--}
--
--/*
-- * qcom_read_reg_dma: prepares a descriptor to read a given number of
-- *                    contiguous registers to the reg_read_buf pointer
-- *
-- * @first:            offset of the first register in the contiguous block
-- * @num_regs:         number of registers to read
-- * @flags:            flags to control DMA descriptor preparation
-- */
--static int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first,
--                           int num_regs, unsigned int flags)
--{
--      bool flow_control = false;
--      void *vaddr;
--
--      vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
--      nandc->reg_read_pos += num_regs;
--
--      if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
--              first = dev_cmd_reg_addr(nandc, first);
--
--      if (nandc->props->supports_bam)
--              return qcom_prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
--                                           num_regs, flags);
--
--      if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
--              flow_control = true;
--
--      return qcom_prep_adm_dma_desc(nandc, true, first, vaddr,
--                               num_regs * sizeof(u32), flow_control);
--}
--
--/*
-- * qcom_write_reg_dma:        prepares a descriptor to write a given number of
-- *                    contiguous registers
-- *
-- * @vaddr:            contnigeous memory from where register value will
-- *                    be written
-- * @first:            offset of the first register in the contiguous block
-- * @num_regs:         number of registers to write
-- * @flags:            flags to control DMA descriptor preparation
-- */
--static int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
--                            int first, int num_regs, unsigned int flags)
--{
--      bool flow_control = false;
--
--      if (first == NAND_EXEC_CMD)
--              flags |= NAND_BAM_NWD;
--
--      if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
--              first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
--
--      if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
--              first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
--
--      if (nandc->props->supports_bam)
--              return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
--                                           num_regs, flags);
--
--      if (first == NAND_FLASH_CMD)
--              flow_control = true;
--
--      return qcom_prep_adm_dma_desc(nandc, false, first, vaddr,
--                               num_regs * sizeof(u32), flow_control);
--}
--
--/*
-- * qcom_read_data_dma:        prepares a DMA descriptor to transfer data from the
-- *                    controller's internal buffer to the buffer 'vaddr'
-- *
-- * @reg_off:          offset within the controller's data buffer
-- * @vaddr:            virtual address of the buffer we want to write to
-- * @size:             DMA transaction size in bytes
-- * @flags:            flags to control DMA descriptor preparation
-- */
--static int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
--                            const u8 *vaddr, int size, unsigned int flags)
--{
--      if (nandc->props->supports_bam)
--              return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
--
--      return qcom_prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
--}
--
--/*
-- * qcom_write_data_dma:       prepares a DMA descriptor to transfer data from
-- *                    'vaddr' to the controller's internal buffer
-- *
-- * @reg_off:          offset within the controller's data buffer
-- * @vaddr:            virtual address of the buffer we want to read from
-- * @size:             DMA transaction size in bytes
-- * @flags:            flags to control DMA descriptor preparation
-- */
--static int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
--                             const u8 *vaddr, int size, unsigned int flags)
--{
--      if (nandc->props->supports_bam)
--              return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
--
--      return qcom_prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
--}
--
--/*
-  * Helper to prepare DMA descriptors for configuring registers
-  * before reading a NAND page.
-  */
-@@ -1262,83 +394,6 @@ static void config_nand_cw_write(struct
-                          NAND_BAM_NEXT_SGL);
- }
--/* helpers to submit/free our list of dma descriptors */
--static int qcom_submit_descs(struct qcom_nand_controller *nandc)
--{
--      struct desc_info *desc, *n;
--      dma_cookie_t cookie = 0;
--      struct bam_transaction *bam_txn = nandc->bam_txn;
--      int ret = 0;
--
--      if (nandc->props->supports_bam) {
--              if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
--                      ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
--                      if (ret)
--                              goto err_unmap_free_desc;
--              }
--
--              if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
--                      ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
--                                                        DMA_PREP_INTERRUPT);
--                      if (ret)
--                              goto err_unmap_free_desc;
--              }
--
--              if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
--                      ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
--                                                        DMA_PREP_CMD);
--                      if (ret)
--                              goto err_unmap_free_desc;
--              }
--      }
--
--      list_for_each_entry(desc, &nandc->desc_list, node)
--              cookie = dmaengine_submit(desc->dma_desc);
--
--      if (nandc->props->supports_bam) {
--              bam_txn->last_cmd_desc->callback = qcom_qpic_bam_dma_done;
--              bam_txn->last_cmd_desc->callback_param = bam_txn;
--
--              dma_async_issue_pending(nandc->tx_chan);
--              dma_async_issue_pending(nandc->rx_chan);
--              dma_async_issue_pending(nandc->cmd_chan);
--
--              if (!wait_for_completion_timeout(&bam_txn->txn_done,
--                                               QPIC_NAND_COMPLETION_TIMEOUT))
--                      ret = -ETIMEDOUT;
--      } else {
--              if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
--                      ret = -ETIMEDOUT;
--      }
--
--err_unmap_free_desc:
--      /*
--       * Unmap the dma sg_list and free the desc allocated by both
--       * qcom_prepare_bam_async_desc() and qcom_prep_adm_dma_desc() functions.
--       */
--      list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
--              list_del(&desc->node);
--
--              if (nandc->props->supports_bam)
--                      dma_unmap_sg(nandc->dev, desc->bam_sgl,
--                                   desc->sgl_cnt, desc->dir);
--              else
--                      dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
--                                   desc->dir);
--
--              kfree(desc);
--      }
--
--      return ret;
--}
--
--/* reset the register read buffer for next NAND operation */
--static void qcom_clear_read_regs(struct qcom_nand_controller *nandc)
--{
--      nandc->reg_read_pos = 0;
--      qcom_nandc_dev_to_mem(nandc, false);
--}
--
- /*
-  * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read
-  * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS.
-@@ -2967,141 +2022,14 @@ static const struct nand_controller_ops
-       .exec_op = qcom_nand_exec_op,
- };
--static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
--{
--      if (nandc->props->supports_bam) {
--              if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
--                      dma_unmap_single(nandc->dev, nandc->reg_read_dma,
--                                       MAX_REG_RD *
--                                       sizeof(*nandc->reg_read_buf),
--                                       DMA_FROM_DEVICE);
--
--              if (nandc->tx_chan)
--                      dma_release_channel(nandc->tx_chan);
--
--              if (nandc->rx_chan)
--                      dma_release_channel(nandc->rx_chan);
--
--              if (nandc->cmd_chan)
--                      dma_release_channel(nandc->cmd_chan);
--      } else {
--              if (nandc->chan)
--                      dma_release_channel(nandc->chan);
--      }
--}
--
--static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
--{
--      int ret;
--
--      ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
--      if (ret) {
--              dev_err(nandc->dev, "failed to set DMA mask\n");
--              return ret;
--      }
--
--      /*
--       * we use the internal buffer for reading ONFI params, reading small
--       * data like ID and status, and preforming read-copy-write operations
--       * when writing to a codeword partially. 532 is the maximum possible
--       * size of a codeword for our nand controller
--       */
--      nandc->buf_size = 532;
--
--      nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size, GFP_KERNEL);
--      if (!nandc->data_buffer)
--              return -ENOMEM;
--
--      nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs), GFP_KERNEL);
--      if (!nandc->regs)
--              return -ENOMEM;
--
--      nandc->reg_read_buf = devm_kcalloc(nandc->dev, MAX_REG_RD,
--                                         sizeof(*nandc->reg_read_buf),
--                                         GFP_KERNEL);
--      if (!nandc->reg_read_buf)
--              return -ENOMEM;
--
--      if (nandc->props->supports_bam) {
--              nandc->reg_read_dma =
--                      dma_map_single(nandc->dev, nandc->reg_read_buf,
--                                     MAX_REG_RD *
--                                     sizeof(*nandc->reg_read_buf),
--                                     DMA_FROM_DEVICE);
--              if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
--                      dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
--                      return -EIO;
--              }
--
--              nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
--              if (IS_ERR(nandc->tx_chan)) {
--                      ret = PTR_ERR(nandc->tx_chan);
--                      nandc->tx_chan = NULL;
--                      dev_err_probe(nandc->dev, ret,
--                                    "tx DMA channel request failed\n");
--                      goto unalloc;
--              }
--
--              nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
--              if (IS_ERR(nandc->rx_chan)) {
--                      ret = PTR_ERR(nandc->rx_chan);
--                      nandc->rx_chan = NULL;
--                      dev_err_probe(nandc->dev, ret,
--                                    "rx DMA channel request failed\n");
--                      goto unalloc;
--              }
--
--              nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
--              if (IS_ERR(nandc->cmd_chan)) {
--                      ret = PTR_ERR(nandc->cmd_chan);
--                      nandc->cmd_chan = NULL;
--                      dev_err_probe(nandc->dev, ret,
--                                    "cmd DMA channel request failed\n");
--                      goto unalloc;
--              }
--
--              /*
--               * Initially allocate BAM transaction to read ONFI param page.
--               * After detecting all the devices, this BAM transaction will
--               * be freed and the next BAM transaction will be allocated with
--               * maximum codeword size
--               */
--              nandc->max_cwperpage = 1;
--              nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
--              if (!nandc->bam_txn) {
--                      dev_err(nandc->dev,
--                              "failed to allocate bam transaction\n");
--                      ret = -ENOMEM;
--                      goto unalloc;
--              }
--      } else {
--              nandc->chan = dma_request_chan(nandc->dev, "rxtx");
--              if (IS_ERR(nandc->chan)) {
--                      ret = PTR_ERR(nandc->chan);
--                      nandc->chan = NULL;
--                      dev_err_probe(nandc->dev, ret,
--                                    "rxtx DMA channel request failed\n");
--                      return ret;
--              }
--      }
--
--      INIT_LIST_HEAD(&nandc->desc_list);
--      INIT_LIST_HEAD(&nandc->host_list);
--
--      nand_controller_init(&nandc->controller);
--      nandc->controller.ops = &qcom_nandc_ops;
--
--      return 0;
--unalloc:
--      qcom_nandc_unalloc(nandc);
--      return ret;
--}
--
- /* one time setup of a few nand controller registers */
- static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
- {
-       u32 nand_ctrl;
-+      nand_controller_init(nandc->controller);
-+      nandc->controller->ops = &qcom_nandc_ops;
-+
-       /* kill onenand */
-       if (!nandc->props->nandc_part_of_qpic)
-               nandc_write(nandc, SFLASHC_BURST_CFG, 0);
-@@ -3240,7 +2168,7 @@ static int qcom_nand_host_init_and_regis
-       chip->legacy.block_bad          = qcom_nandc_block_bad;
-       chip->legacy.block_markbad      = qcom_nandc_block_markbad;
--      chip->controller = &nandc->controller;
-+      chip->controller = nandc->controller;
-       chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA |
-                        NAND_SKIP_BBTSCAN;
-@@ -3323,17 +2251,21 @@ static int qcom_nandc_parse_dt(struct pl
- static int qcom_nandc_probe(struct platform_device *pdev)
- {
-       struct qcom_nand_controller *nandc;
-+      struct nand_controller *controller;
-       const void *dev_data;
-       struct device *dev = &pdev->dev;
-       struct resource *res;
-       int ret;
--      nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
-+      nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc) + sizeof(*controller),
-+                           GFP_KERNEL);
-       if (!nandc)
-               return -ENOMEM;
-+      controller = (struct nand_controller *)&nandc[1];
-       platform_set_drvdata(pdev, nandc);
-       nandc->dev = dev;
-+      nandc->controller = controller;
-       dev_data = of_device_get_match_data(dev);
-       if (!dev_data) {
---- /dev/null
-+++ b/include/linux/mtd/nand-qpic-common.h
-@@ -0,0 +1,468 @@
-+/* SPDX-License-Identifier: GPL-2.0 */
-+/*
-+ * QCOM QPIC common APIs header file
-+ *
-+ * Copyright (c) 2023 Qualcomm Inc.
-+ * Authors:   Md sadre Alam   <quic_mdalam@quicinc.com>
-+ *
-+ */
-+#ifndef __MTD_NAND_QPIC_COMMON_H__
-+#define __MTD_NAND_QPIC_COMMON_H__
-+
-+/* NANDc reg offsets */
-+#define       NAND_FLASH_CMD                  0x00
-+#define       NAND_ADDR0                      0x04
-+#define       NAND_ADDR1                      0x08
-+#define       NAND_FLASH_CHIP_SELECT          0x0c
-+#define       NAND_EXEC_CMD                   0x10
-+#define       NAND_FLASH_STATUS               0x14
-+#define       NAND_BUFFER_STATUS              0x18
-+#define       NAND_DEV0_CFG0                  0x20
-+#define       NAND_DEV0_CFG1                  0x24
-+#define       NAND_DEV0_ECC_CFG               0x28
-+#define       NAND_AUTO_STATUS_EN             0x2c
-+#define       NAND_DEV1_CFG0                  0x30
-+#define       NAND_DEV1_CFG1                  0x34
-+#define       NAND_READ_ID                    0x40
-+#define       NAND_READ_STATUS                0x44
-+#define       NAND_DEV_CMD0                   0xa0
-+#define       NAND_DEV_CMD1                   0xa4
-+#define       NAND_DEV_CMD2                   0xa8
-+#define       NAND_DEV_CMD_VLD                0xac
-+#define       SFLASHC_BURST_CFG               0xe0
-+#define       NAND_ERASED_CW_DETECT_CFG       0xe8
-+#define       NAND_ERASED_CW_DETECT_STATUS    0xec
-+#define       NAND_EBI2_ECC_BUF_CFG           0xf0
-+#define       FLASH_BUF_ACC                   0x100
-+
-+#define       NAND_CTRL                       0xf00
-+#define       NAND_VERSION                    0xf08
-+#define       NAND_READ_LOCATION_0            0xf20
-+#define       NAND_READ_LOCATION_1            0xf24
-+#define       NAND_READ_LOCATION_2            0xf28
-+#define       NAND_READ_LOCATION_3            0xf2c
-+#define       NAND_READ_LOCATION_LAST_CW_0    0xf40
-+#define       NAND_READ_LOCATION_LAST_CW_1    0xf44
-+#define       NAND_READ_LOCATION_LAST_CW_2    0xf48
-+#define       NAND_READ_LOCATION_LAST_CW_3    0xf4c
-+
-+/* dummy register offsets, used by qcom_write_reg_dma */
-+#define       NAND_DEV_CMD1_RESTORE           0xdead
-+#define       NAND_DEV_CMD_VLD_RESTORE        0xbeef
-+
-+/* NAND_FLASH_CMD bits */
-+#define       PAGE_ACC                        BIT(4)
-+#define       LAST_PAGE                       BIT(5)
-+
-+/* NAND_FLASH_CHIP_SELECT bits */
-+#define       NAND_DEV_SEL                    0
-+#define       DM_EN                           BIT(2)
-+
-+/* NAND_FLASH_STATUS bits */
-+#define       FS_OP_ERR                       BIT(4)
-+#define       FS_READY_BSY_N                  BIT(5)
-+#define       FS_MPU_ERR                      BIT(8)
-+#define       FS_DEVICE_STS_ERR               BIT(16)
-+#define       FS_DEVICE_WP                    BIT(23)
-+
-+/* NAND_BUFFER_STATUS bits */
-+#define       BS_UNCORRECTABLE_BIT            BIT(8)
-+#define       BS_CORRECTABLE_ERR_MSK          0x1f
-+
-+/* NAND_DEVn_CFG0 bits */
-+#define       DISABLE_STATUS_AFTER_WRITE      4
-+#define       CW_PER_PAGE                     6
-+#define       UD_SIZE_BYTES                   9
-+#define       UD_SIZE_BYTES_MASK              GENMASK(18, 9)
-+#define       ECC_PARITY_SIZE_BYTES_RS        19
-+#define       SPARE_SIZE_BYTES                23
-+#define       SPARE_SIZE_BYTES_MASK           GENMASK(26, 23)
-+#define       NUM_ADDR_CYCLES                 27
-+#define       STATUS_BFR_READ                 30
-+#define       SET_RD_MODE_AFTER_STATUS        31
-+
-+/* NAND_DEVn_CFG0 bits */
-+#define       DEV0_CFG1_ECC_DISABLE           0
-+#define       WIDE_FLASH                      1
-+#define       NAND_RECOVERY_CYCLES            2
-+#define       CS_ACTIVE_BSY                   5
-+#define       BAD_BLOCK_BYTE_NUM              6
-+#define       BAD_BLOCK_IN_SPARE_AREA         16
-+#define       WR_RD_BSY_GAP                   17
-+#define       ENABLE_BCH_ECC                  27
-+
-+/* NAND_DEV0_ECC_CFG bits */
-+#define       ECC_CFG_ECC_DISABLE             0
-+#define       ECC_SW_RESET                    1
-+#define       ECC_MODE                        4
-+#define       ECC_PARITY_SIZE_BYTES_BCH       8
-+#define       ECC_NUM_DATA_BYTES              16
-+#define       ECC_NUM_DATA_BYTES_MASK         GENMASK(25, 16)
-+#define       ECC_FORCE_CLK_OPEN              30
-+
-+/* NAND_DEV_CMD1 bits */
-+#define       READ_ADDR                       0
-+
-+/* NAND_DEV_CMD_VLD bits */
-+#define       READ_START_VLD                  BIT(0)
-+#define       READ_STOP_VLD                   BIT(1)
-+#define       WRITE_START_VLD                 BIT(2)
-+#define       ERASE_START_VLD                 BIT(3)
-+#define       SEQ_READ_START_VLD              BIT(4)
-+
-+/* NAND_EBI2_ECC_BUF_CFG bits */
-+#define       NUM_STEPS                       0
-+
-+/* NAND_ERASED_CW_DETECT_CFG bits */
-+#define       ERASED_CW_ECC_MASK              1
-+#define       AUTO_DETECT_RES                 0
-+#define       MASK_ECC                        BIT(ERASED_CW_ECC_MASK)
-+#define       RESET_ERASED_DET                BIT(AUTO_DETECT_RES)
-+#define       ACTIVE_ERASED_DET               (0 << AUTO_DETECT_RES)
-+#define       CLR_ERASED_PAGE_DET             (RESET_ERASED_DET | MASK_ECC)
-+#define       SET_ERASED_PAGE_DET             (ACTIVE_ERASED_DET | MASK_ECC)
-+
-+/* NAND_ERASED_CW_DETECT_STATUS bits */
-+#define       PAGE_ALL_ERASED                 BIT(7)
-+#define       CODEWORD_ALL_ERASED             BIT(6)
-+#define       PAGE_ERASED                     BIT(5)
-+#define       CODEWORD_ERASED                 BIT(4)
-+#define       ERASED_PAGE                     (PAGE_ALL_ERASED | PAGE_ERASED)
-+#define       ERASED_CW                       (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
-+
-+/* NAND_READ_LOCATION_n bits */
-+#define READ_LOCATION_OFFSET          0
-+#define READ_LOCATION_SIZE            16
-+#define READ_LOCATION_LAST            31
-+
-+/* Version Mask */
-+#define       NAND_VERSION_MAJOR_MASK         0xf0000000
-+#define       NAND_VERSION_MAJOR_SHIFT        28
-+#define       NAND_VERSION_MINOR_MASK         0x0fff0000
-+#define       NAND_VERSION_MINOR_SHIFT        16
-+
-+/* NAND OP_CMDs */
-+#define       OP_PAGE_READ                    0x2
-+#define       OP_PAGE_READ_WITH_ECC           0x3
-+#define       OP_PAGE_READ_WITH_ECC_SPARE     0x4
-+#define       OP_PAGE_READ_ONFI_READ          0x5
-+#define       OP_PROGRAM_PAGE                 0x6
-+#define       OP_PAGE_PROGRAM_WITH_ECC        0x7
-+#define       OP_PROGRAM_PAGE_SPARE           0x9
-+#define       OP_BLOCK_ERASE                  0xa
-+#define       OP_CHECK_STATUS                 0xc
-+#define       OP_FETCH_ID                     0xb
-+#define       OP_RESET_DEVICE                 0xd
-+
-+/* Default Value for NAND_DEV_CMD_VLD */
-+#define NAND_DEV_CMD_VLD_VAL          (READ_START_VLD | WRITE_START_VLD | \
-+                                       ERASE_START_VLD | SEQ_READ_START_VLD)
-+
-+/* NAND_CTRL bits */
-+#define       BAM_MODE_EN                     BIT(0)
-+
-+/*
-+ * the NAND controller performs reads/writes with ECC in 516 byte chunks.
-+ * the driver calls the chunks 'step' or 'codeword' interchangeably
-+ */
-+#define       NANDC_STEP_SIZE                 512
-+
-+/*
-+ * the largest page size we support is 8K, this will have 16 steps/codewords
-+ * of 512 bytes each
-+ */
-+#define       MAX_NUM_STEPS                   (SZ_8K / NANDC_STEP_SIZE)
-+
-+/* we read at most 3 registers per codeword scan */
-+#define       MAX_REG_RD                      (3 * MAX_NUM_STEPS)
-+
-+/* ECC modes supported by the controller */
-+#define       ECC_NONE        BIT(0)
-+#define       ECC_RS_4BIT     BIT(1)
-+#define       ECC_BCH_4BIT    BIT(2)
-+#define       ECC_BCH_8BIT    BIT(3)
-+
-+/*
-+ * Returns the actual register address for all NAND_DEV_ registers
-+ * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
-+ */
-+#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
-+
-+/* Returns the NAND register physical address */
-+#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
-+
-+/* Returns the dma address for reg read buffer */
-+#define reg_buf_dma_addr(chip, vaddr) \
-+      ((chip)->reg_read_dma + \
-+      ((u8 *)(vaddr) - (u8 *)(chip)->reg_read_buf))
-+
-+#define QPIC_PER_CW_CMD_ELEMENTS      32
-+#define QPIC_PER_CW_CMD_SGL           32
-+#define QPIC_PER_CW_DATA_SGL          8
-+
-+#define QPIC_NAND_COMPLETION_TIMEOUT  msecs_to_jiffies(2000)
-+
-+/*
-+ * Flags used in DMA descriptor preparation helper functions
-+ * (i.e. qcom_read_reg_dma/qcom_write_reg_dma/qcom_read_data_dma/qcom_write_data_dma)
-+ */
-+/* Don't set the EOT in current tx BAM sgl */
-+#define NAND_BAM_NO_EOT                       BIT(0)
-+/* Set the NWD flag in current BAM sgl */
-+#define NAND_BAM_NWD                  BIT(1)
-+/* Finish writing in the current BAM sgl and start writing in another BAM sgl */
-+#define NAND_BAM_NEXT_SGL             BIT(2)
-+/*
-+ * Erased codeword status is being used two times in single transfer so this
-+ * flag will determine the current value of erased codeword status register
-+ */
-+#define NAND_ERASED_CW_SET            BIT(4)
-+
-+#define MAX_ADDRESS_CYCLE             5
-+
-+/*
-+ * This data type corresponds to the BAM transaction which will be used for all
-+ * NAND transfers.
-+ * @bam_ce - the array of BAM command elements
-+ * @cmd_sgl - sgl for NAND BAM command pipe
-+ * @data_sgl - sgl for NAND BAM consumer/producer pipe
-+ * @last_data_desc - last DMA desc in data channel (tx/rx).
-+ * @last_cmd_desc - last DMA desc in command channel.
-+ * @txn_done - completion for NAND transfer.
-+ * @bam_ce_pos - the index in bam_ce which is available for next sgl
-+ * @bam_ce_start - the index in bam_ce which marks the start position ce
-+ *               for current sgl. It will be used for size calculation
-+ *               for current sgl
-+ * @cmd_sgl_pos - current index in command sgl.
-+ * @cmd_sgl_start - start index in command sgl.
-+ * @tx_sgl_pos - current index in data sgl for tx.
-+ * @tx_sgl_start - start index in data sgl for tx.
-+ * @rx_sgl_pos - current index in data sgl for rx.
-+ * @rx_sgl_start - start index in data sgl for rx.
-+ */
-+struct bam_transaction {
-+      struct bam_cmd_element *bam_ce;
-+      struct scatterlist *cmd_sgl;
-+      struct scatterlist *data_sgl;
-+      struct dma_async_tx_descriptor *last_data_desc;
-+      struct dma_async_tx_descriptor *last_cmd_desc;
-+      struct completion txn_done;
-+      u32 bam_ce_pos;
-+      u32 bam_ce_start;
-+      u32 cmd_sgl_pos;
-+      u32 cmd_sgl_start;
-+      u32 tx_sgl_pos;
-+      u32 tx_sgl_start;
-+      u32 rx_sgl_pos;
-+      u32 rx_sgl_start;
-+};
-+
-+/*
-+ * This data type corresponds to the nand dma descriptor
-+ * @dma_desc - low level DMA engine descriptor
-+ * @list - list for desc_info
-+ *
-+ * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
-+ *          ADM
-+ * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
-+ * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
-+ * @dir - DMA transfer direction
-+ */
-+struct desc_info {
-+      struct dma_async_tx_descriptor *dma_desc;
-+      struct list_head node;
-+
-+      union {
-+              struct scatterlist adm_sgl;
-+              struct {
-+                      struct scatterlist *bam_sgl;
-+                      int sgl_cnt;
-+              };
-+      };
-+      enum dma_data_direction dir;
-+};
-+
-+/*
-+ * holds the current register values that we want to write. acts as a contiguous
-+ * chunk of memory which we use to write the controller registers through DMA.
-+ */
-+struct nandc_regs {
-+      __le32 cmd;
-+      __le32 addr0;
-+      __le32 addr1;
-+      __le32 chip_sel;
-+      __le32 exec;
-+
-+      __le32 cfg0;
-+      __le32 cfg1;
-+      __le32 ecc_bch_cfg;
-+
-+      __le32 clrflashstatus;
-+      __le32 clrreadstatus;
-+
-+      __le32 cmd1;
-+      __le32 vld;
-+
-+      __le32 orig_cmd1;
-+      __le32 orig_vld;
-+
-+      __le32 ecc_buf_cfg;
-+      __le32 read_location0;
-+      __le32 read_location1;
-+      __le32 read_location2;
-+      __le32 read_location3;
-+      __le32 read_location_last0;
-+      __le32 read_location_last1;
-+      __le32 read_location_last2;
-+      __le32 read_location_last3;
-+
-+      __le32 erased_cw_detect_cfg_clr;
-+      __le32 erased_cw_detect_cfg_set;
-+};
-+
-+/*
-+ * NAND controller data struct
-+ *
-+ * @dev:                      parent device
-+ *
-+ * @base:                     MMIO base
-+ *
-+ * @core_clk:                 controller clock
-+ * @aon_clk:                  another controller clock
-+ *
-+ * @regs:                     a contiguous chunk of memory for DMA register
-+ *                            writes. contains the register values to be
-+ *                            written to controller
-+ *
-+ * @props:                    properties of current NAND controller,
-+ *                            initialized via DT match data
-+ *
-+ * @controller:                       base controller structure
-+ * @host_list:                        list containing all the chips attached to the
-+ *                            controller
-+ *
-+ * @chan:                     dma channel
-+ * @cmd_crci:                 ADM DMA CRCI for command flow control
-+ * @data_crci:                        ADM DMA CRCI for data flow control
-+ *
-+ * @desc_list:                        DMA descriptor list (list of desc_infos)
-+ *
-+ * @data_buffer:              our local DMA buffer for page read/writes,
-+ *                            used when we can't use the buffer provided
-+ *                            by upper layers directly
-+ * @reg_read_buf:             local buffer for reading back registers via DMA
-+ *
-+ * @base_phys:                        physical base address of controller registers
-+ * @base_dma:                 dma base address of controller registers
-+ * @reg_read_dma:             contains dma address for register read buffer
-+ *
-+ * @buf_size/count/start:     markers for chip->legacy.read_buf/write_buf
-+ *                            functions
-+ * @max_cwperpage:            maximum QPIC codewords required. calculated
-+ *                            from all connected NAND devices pagesize
-+ *
-+ * @reg_read_pos:             marker for data read in reg_read_buf
-+ *
-+ * @cmd1/vld:                 some fixed controller register values
-+ *
-+ * @exec_opwrite:             flag to select correct number of code word
-+ *                            while reading status
-+ */
-+struct qcom_nand_controller {
-+      struct device *dev;
-+
-+      void __iomem *base;
-+
-+      struct clk *core_clk;
-+      struct clk *aon_clk;
-+
-+      struct nandc_regs *regs;
-+      struct bam_transaction *bam_txn;
-+
-+      const struct qcom_nandc_props *props;
-+
-+      struct nand_controller *controller;
-+      struct list_head host_list;
-+
-+      union {
-+              /* will be used only by QPIC for BAM DMA */
-+              struct {
-+                      struct dma_chan *tx_chan;
-+                      struct dma_chan *rx_chan;
-+                      struct dma_chan *cmd_chan;
-+              };
-+
-+              /* will be used only by EBI2 for ADM DMA */
-+              struct {
-+                      struct dma_chan *chan;
-+                      unsigned int cmd_crci;
-+                      unsigned int data_crci;
-+              };
-+      };
-+
-+      struct list_head desc_list;
-+
-+      u8              *data_buffer;
-+      __le32          *reg_read_buf;
-+
-+      phys_addr_t base_phys;
-+      dma_addr_t base_dma;
-+      dma_addr_t reg_read_dma;
-+
-+      int             buf_size;
-+      int             buf_count;
-+      int             buf_start;
-+      unsigned int    max_cwperpage;
-+
-+      int reg_read_pos;
-+
-+      u32 cmd1, vld;
-+      bool exec_opwrite;
-+};
-+
-+/*
-+ * This data type corresponds to the NAND controller properties which varies
-+ * among different NAND controllers.
-+ * @ecc_modes - ecc mode for NAND
-+ * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
-+ * @supports_bam - whether NAND controller is using BAM
-+ * @nandc_part_of_qpic - whether NAND controller is part of qpic IP
-+ * @qpic_version2 - flag to indicate QPIC IP version 2
-+ * @use_codeword_fixup - whether NAND has different layout for boot partitions
-+ */
-+struct qcom_nandc_props {
-+      u32 ecc_modes;
-+      u32 dev_cmd_reg_start;
-+      bool supports_bam;
-+      bool nandc_part_of_qpic;
-+      bool qpic_version2;
-+      bool use_codeword_fixup;
-+};
-+
-+void qcom_free_bam_transaction(struct qcom_nand_controller *nandc);
-+struct bam_transaction *qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc);
-+void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc);
-+void qcom_qpic_bam_dma_done(void *data);
-+void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu);
-+int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
-+                              struct dma_chan *chan, unsigned long flags);
-+int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
-+                             int reg_off, const void *vaddr, int size, unsigned int flags);
-+int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
-+                              const void *vaddr, int size, unsigned int flags);
-+int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read, int reg_off,
-+                         const void *vaddr, int size, bool flow_control);
-+int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first, int num_regs,
-+                    unsigned int flags);
-+int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr, int first,
-+                     int num_regs, unsigned int flags);
-+int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off, const u8 *vaddr,
-+                     int size, unsigned int flags);
-+int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off, const u8 *vaddr,
-+                      int size, unsigned int flags);
-+int qcom_submit_descs(struct qcom_nand_controller *nandc);
-+void qcom_clear_read_regs(struct qcom_nand_controller *nandc);
-+void qcom_nandc_unalloc(struct qcom_nand_controller *nandc);
-+int qcom_nandc_alloc(struct qcom_nand_controller *nandc);
-+#endif
-+
diff --git a/target/linux/qualcommax/patches-6.6/0405-mtd-rawnand-qcom-use-FIELD_PREP-and-GENMASK.patch b/target/linux/qualcommax/patches-6.6/0405-mtd-rawnand-qcom-use-FIELD_PREP-and-GENMASK.patch
deleted file mode 100644 (file)
index 5f8ff3e..0000000
+++ /dev/null
@@ -1,191 +0,0 @@
-From: Md Sadre Alam <quic_mdalam@quicinc.com>
-Date: Sun, 22 Sep 2024 17:03:48 +0530
-Subject: [PATCH] mtd: rawnand: qcom: use FIELD_PREP and GENMASK
-
-Use the bitfield macro FIELD_PREP, and GENMASK to
-do the shift and mask in one go. This makes the code
-more readable.
-
-Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
----
---- a/drivers/mtd/nand/raw/qcom_nandc.c
-+++ b/drivers/mtd/nand/raw/qcom_nandc.c
-@@ -281,7 +281,7 @@ static void update_rw_regs(struct qcom_n
-                               (num_cw - 1) << CW_PER_PAGE);
-               cfg1 = cpu_to_le32(host->cfg1_raw);
--              ecc_bch_cfg = cpu_to_le32(1 << ECC_CFG_ECC_DISABLE);
-+              ecc_bch_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE);
-       }
-       nandc->regs->cmd = cmd;
-@@ -1494,42 +1494,41 @@ static int qcom_nand_attach_chip(struct
-       host->cw_size = host->cw_data + ecc->bytes;
-       bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
--      host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
--                              | host->cw_data << UD_SIZE_BYTES
--                              | 0 << DISABLE_STATUS_AFTER_WRITE
--                              | 5 << NUM_ADDR_CYCLES
--                              | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
--                              | 0 << STATUS_BFR_READ
--                              | 1 << SET_RD_MODE_AFTER_STATUS
--                              | host->spare_bytes << SPARE_SIZE_BYTES;
--
--      host->cfg1 = 7 << NAND_RECOVERY_CYCLES
--                              | 0 <<  CS_ACTIVE_BSY
--                              | bad_block_byte << BAD_BLOCK_BYTE_NUM
--                              | 0 << BAD_BLOCK_IN_SPARE_AREA
--                              | 2 << WR_RD_BSY_GAP
--                              | wide_bus << WIDE_FLASH
--                              | host->bch_enabled << ENABLE_BCH_ECC;
--
--      host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
--                              | host->cw_size << UD_SIZE_BYTES
--                              | 5 << NUM_ADDR_CYCLES
--                              | 0 << SPARE_SIZE_BYTES;
--
--      host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
--                              | 0 << CS_ACTIVE_BSY
--                              | 17 << BAD_BLOCK_BYTE_NUM
--                              | 1 << BAD_BLOCK_IN_SPARE_AREA
--                              | 2 << WR_RD_BSY_GAP
--                              | wide_bus << WIDE_FLASH
--                              | 1 << DEV0_CFG1_ECC_DISABLE;
--
--      host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
--                              | 0 << ECC_SW_RESET
--                              | host->cw_data << ECC_NUM_DATA_BYTES
--                              | 1 << ECC_FORCE_CLK_OPEN
--                              | ecc_mode << ECC_MODE
--                              | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
-+      host->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
-+                   FIELD_PREP(UD_SIZE_BYTES_MASK, host->cw_data) |
-+                   FIELD_PREP(DISABLE_STATUS_AFTER_WRITE, 0) |
-+                   FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
-+                   FIELD_PREP(ECC_PARITY_SIZE_BYTES_RS, host->ecc_bytes_hw) |
-+                   FIELD_PREP(STATUS_BFR_READ, 0) |
-+                   FIELD_PREP(SET_RD_MODE_AFTER_STATUS, 1) |
-+                   FIELD_PREP(SPARE_SIZE_BYTES_MASK, host->spare_bytes);
-+
-+      host->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
-+                   FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, bad_block_byte) |
-+                   FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 0) |
-+                   FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
-+                   FIELD_PREP(WIDE_FLASH, wide_bus) |
-+                   FIELD_PREP(ENABLE_BCH_ECC, host->bch_enabled);
-+
-+      host->cfg0_raw = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
-+                       FIELD_PREP(UD_SIZE_BYTES_MASK, host->cw_size) |
-+                       FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
-+                       FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
-+
-+      host->cfg1_raw = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
-+                       FIELD_PREP(CS_ACTIVE_BSY, 0) |
-+                       FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
-+                       FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
-+                       FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
-+                       FIELD_PREP(WIDE_FLASH, wide_bus) |
-+                       FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
-+
-+      host->ecc_bch_cfg = FIELD_PREP(ECC_CFG_ECC_DISABLE, !host->bch_enabled) |
-+                          FIELD_PREP(ECC_SW_RESET, 0) |
-+                          FIELD_PREP(ECC_NUM_DATA_BYTES_MASK, host->cw_data) |
-+                          FIELD_PREP(ECC_FORCE_CLK_OPEN, 1) |
-+                          FIELD_PREP(ECC_MODE_MASK, ecc_mode) |
-+                          FIELD_PREP(ECC_PARITY_SIZE_BYTES_BCH_MASK, host->ecc_bytes_hw);
-       if (!nandc->props->qpic_version2)
-               host->ecc_buf_cfg = 0x203 << NUM_STEPS;
-@@ -1882,21 +1881,21 @@ static int qcom_param_page_type_exec(str
-       nandc->regs->addr0 = 0;
-       nandc->regs->addr1 = 0;
--      nandc->regs->cfg0 = cpu_to_le32(0 << CW_PER_PAGE
--                          | 512 << UD_SIZE_BYTES
--                          | 5 << NUM_ADDR_CYCLES
--                          | 0 << SPARE_SIZE_BYTES);
--
--      nandc->regs->cfg1 = cpu_to_le32(7 << NAND_RECOVERY_CYCLES
--                          | 0 << CS_ACTIVE_BSY
--                          | 17 << BAD_BLOCK_BYTE_NUM
--                          | 1 << BAD_BLOCK_IN_SPARE_AREA
--                          | 2 << WR_RD_BSY_GAP
--                          | 0 << WIDE_FLASH
--                          | 1 << DEV0_CFG1_ECC_DISABLE);
-+      host->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, 0) |
-+                   FIELD_PREP(UD_SIZE_BYTES_MASK, 512) |
-+                   FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
-+                   FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
-+
-+      host->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
-+                   FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
-+                   FIELD_PREP(CS_ACTIVE_BSY, 0) |
-+                   FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
-+                   FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
-+                   FIELD_PREP(WIDE_FLASH, 0) |
-+                   FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
-       if (!nandc->props->qpic_version2)
--              nandc->regs->ecc_buf_cfg = cpu_to_le32(1 << ECC_CFG_ECC_DISABLE);
-+              nandc->regs->ecc_buf_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE);
-       /* configure CMD1 and VLD for ONFI param probing in QPIC v1 */
-       if (!nandc->props->qpic_version2) {
---- a/include/linux/mtd/nand-qpic-common.h
-+++ b/include/linux/mtd/nand-qpic-common.h
-@@ -70,35 +70,42 @@
- #define       BS_CORRECTABLE_ERR_MSK          0x1f
- /* NAND_DEVn_CFG0 bits */
--#define       DISABLE_STATUS_AFTER_WRITE      4
-+#define       DISABLE_STATUS_AFTER_WRITE      BIT(4)
- #define       CW_PER_PAGE                     6
-+#define       CW_PER_PAGE_MASK                GENMASK(8, 6)
- #define       UD_SIZE_BYTES                   9
- #define       UD_SIZE_BYTES_MASK              GENMASK(18, 9)
--#define       ECC_PARITY_SIZE_BYTES_RS        19
-+#define       ECC_PARITY_SIZE_BYTES_RS        GENMASK(22, 19)
- #define       SPARE_SIZE_BYTES                23
- #define       SPARE_SIZE_BYTES_MASK           GENMASK(26, 23)
- #define       NUM_ADDR_CYCLES                 27
--#define       STATUS_BFR_READ                 30
--#define       SET_RD_MODE_AFTER_STATUS        31
-+#define       NUM_ADDR_CYCLES_MASK            GENMASK(29, 27)
-+#define       STATUS_BFR_READ                 BIT(30)
-+#define       SET_RD_MODE_AFTER_STATUS        BIT(31)
- /* NAND_DEVn_CFG0 bits */
--#define       DEV0_CFG1_ECC_DISABLE           0
--#define       WIDE_FLASH                      1
-+#define       DEV0_CFG1_ECC_DISABLE           BIT(0)
-+#define       WIDE_FLASH                      BIT(1)
- #define       NAND_RECOVERY_CYCLES            2
--#define       CS_ACTIVE_BSY                   5
-+#define       NAND_RECOVERY_CYCLES_MASK       GENMASK(4, 2)
-+#define       CS_ACTIVE_BSY                   BIT(5)
- #define       BAD_BLOCK_BYTE_NUM              6
--#define       BAD_BLOCK_IN_SPARE_AREA         16
-+#define       BAD_BLOCK_BYTE_NUM_MASK         GENMASK(15, 6)
-+#define       BAD_BLOCK_IN_SPARE_AREA         BIT(16)
- #define       WR_RD_BSY_GAP                   17
--#define       ENABLE_BCH_ECC                  27
-+#define       WR_RD_BSY_GAP_MASK              GENMASK(22, 17)
-+#define       ENABLE_BCH_ECC                  BIT(27)
- /* NAND_DEV0_ECC_CFG bits */
--#define       ECC_CFG_ECC_DISABLE             0
--#define       ECC_SW_RESET                    1
-+#define       ECC_CFG_ECC_DISABLE             BIT(0)
-+#define       ECC_SW_RESET                    BIT(1)
- #define       ECC_MODE                        4
-+#define       ECC_MODE_MASK                   GENMASK(5, 4)
- #define       ECC_PARITY_SIZE_BYTES_BCH       8
-+#define       ECC_PARITY_SIZE_BYTES_BCH_MASK  GENMASK(12, 8)
- #define       ECC_NUM_DATA_BYTES              16
- #define       ECC_NUM_DATA_BYTES_MASK         GENMASK(25, 16)
--#define       ECC_FORCE_CLK_OPEN              30
-+#define       ECC_FORCE_CLK_OPEN              BIT(30)
- /* NAND_DEV_CMD1 bits */
- #define       READ_ADDR                       0
index 95188529c485fd2868d37603677b55b167db15cf..8ad17ff60c6c004108397e5d854e74dfac66702e 100644 (file)
@@ -13,16 +13,15 @@ Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
 ---
 --- a/drivers/mtd/nand/Makefile
 +++ b/drivers/mtd/nand/Makefile
-@@ -7,8 +7,11 @@ obj-$(CONFIG_MTD_NAND_MTK_BMT)        += mtk_bm
- ifeq ($(CONFIG_MTD_NAND_QCOM),y)
- obj-y += qpic_common.o
-+else
+@@ -4,7 +4,11 @@ nandcore-objs := core.o bbt.o
+ obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o
+ obj-$(CONFIG_MTD_NAND_ECC_MEDIATEK) += ecc-mtk.o
+ obj-$(CONFIG_MTD_NAND_MTK_BMT)        += mtk_bmt.o mtk_bmt_v2.o mtk_bmt_bbt.o mtk_bmt_nmbm.o
 +ifeq ($(CONFIG_SPI_QPIC_SNAND),y)
-+obj-y   += qpic_common.o
++obj-$(CONFIG_SPI_QPIC_SNAND) += qpic_common.o
++else
+ obj-$(CONFIG_MTD_NAND_QCOM) += qpic_common.o
 +endif
- endif
--
  obj-y += onenand/
  obj-y += raw/
  obj-y += spi/
@@ -1692,7 +1691,7 @@ Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
 +
 --- a/include/linux/mtd/nand-qpic-common.h
 +++ b/include/linux/mtd/nand-qpic-common.h
-@@ -322,6 +322,10 @@ struct nandc_regs {
+@@ -325,6 +325,10 @@ struct nandc_regs {
        __le32 read_location_last1;
        __le32 read_location_last2;
        __le32 read_location_last3;
@@ -1703,7 +1702,7 @@ Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
  
        __le32 erased_cw_detect_cfg_clr;
        __le32 erased_cw_detect_cfg_set;
-@@ -336,6 +340,7 @@ struct nandc_regs {
+@@ -339,6 +343,7 @@ struct nandc_regs {
   *
   * @core_clk:                 controller clock
   * @aon_clk:                  another controller clock
@@ -1711,7 +1710,7 @@ Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
   *
   * @regs:                     a contiguous chunk of memory for DMA register
   *                            writes. contains the register values to be
-@@ -345,6 +350,7 @@ struct nandc_regs {
+@@ -348,6 +353,7 @@ struct nandc_regs {
   *                            initialized via DT match data
   *
   * @controller:                       base controller structure
@@ -1719,7 +1718,7 @@ Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
   * @host_list:                        list containing all the chips attached to the
   *                            controller
   *
-@@ -389,6 +395,7 @@ struct qcom_nand_controller {
+@@ -392,6 +398,7 @@ struct qcom_nand_controller {
        const struct qcom_nandc_props *props;
  
        struct nand_controller *controller;
diff --git a/target/linux/qualcommax/patches-6.6/0408-spi-spi-qpic-fix-compilation-issues.patch b/target/linux/qualcommax/patches-6.6/0408-spi-spi-qpic-fix-compilation-issues.patch
deleted file mode 100644 (file)
index 03b18e0..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-From: George Moussalem <george.moussalem@outlook.com>
-Subject: [PATCH] spi: spi-qpic: fix compilation issues
-Date: Sun, 06 Oct 2024 16:34:11 +0400
-
-The compiler will throw a warning when freeing a variable, setting values 
-of u32 to zero using memset, when the number of bytes is greater than the 
-size of the variable passed, so let's set each of the 8 variables 
-contiguously set in memory as part of the structure to zero.
-
-The output type of the remove function is void while it should return an 
-integer indicating success (0) or a negative number as an error. So let's
-switch to use the new .remove_new function which expects nothing to be 
-returned
-
-Signed-off-by: George Moussalem <george.moussalem@outlook.com>
----
---- a/drivers/mtd/nand/qpic_common.c
-+++ b/drivers/mtd/nand/qpic_common.c
-@@ -82,7 +82,14 @@ void qcom_clear_bam_transaction(struct q
-       if (!nandc->props->supports_bam)
-               return;
--      memset(&bam_txn->bam_ce_pos, 0, sizeof(u32) * 8);
-+      bam_txn->bam_ce_pos = 0;
-+      bam_txn->bam_ce_start = 0;
-+      bam_txn->cmd_sgl_pos = 0;
-+      bam_txn->cmd_sgl_start = 0;
-+      bam_txn->tx_sgl_pos = 0;
-+      bam_txn->tx_sgl_start = 0;
-+      bam_txn->rx_sgl_pos = 0;
-+      bam_txn->rx_sgl_start = 0;
-       bam_txn->last_data_desc = NULL;
-       sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
---- a/drivers/spi/spi-qpic-snand.c
-+++ b/drivers/spi/spi-qpic-snand.c
-@@ -1624,7 +1624,7 @@ static struct platform_driver qcom_spi_d
-               .of_match_table = qcom_snandc_of_match,
-       },
-       .probe = qcom_spi_probe,
--      .remove = qcom_spi_remove,
-+      .remove_new = qcom_spi_remove,
- };
- module_platform_driver(qcom_spi_driver);
diff --git a/target/linux/qualcommbe/patches-6.6/016-01-v6.14-mtd-rawnand-qcom-cleanup-qcom_nandc-driver.patch b/target/linux/qualcommbe/patches-6.6/016-01-v6.14-mtd-rawnand-qcom-cleanup-qcom_nandc-driver.patch
deleted file mode 100644 (file)
index 7db73ad..0000000
+++ /dev/null
@@ -1,1015 +0,0 @@
-From 8c52932da5e6756fa66f52f0720da283fba13aa6 Mon Sep 17 00:00:00 2001
-From: Md Sadre Alam <quic_mdalam@quicinc.com>
-Date: Wed, 20 Nov 2024 14:45:00 +0530
-Subject: [PATCH 1/4] mtd: rawnand: qcom: cleanup qcom_nandc driver
-
-Perform a global cleanup of the Qualcomm NAND
-controller driver with the following improvements:
-
-- Remove register value indirection API
-
-- Remove set_reg() API
-
-- Convert read_loc_first & read_loc_last macro to functions
-
-- Rename multiple variables
-
-Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
-Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
----
- drivers/mtd/nand/raw/qcom_nandc.c | 516 ++++++++++++++----------------
- 1 file changed, 234 insertions(+), 282 deletions(-)
-
-diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
-index 636bba2528bf..9ae8c9f2ab55 100644
---- a/drivers/mtd/nand/raw/qcom_nandc.c
-+++ b/drivers/mtd/nand/raw/qcom_nandc.c
-@@ -189,17 +189,6 @@
- #define       ECC_BCH_4BIT    BIT(2)
- #define       ECC_BCH_8BIT    BIT(3)
--#define nandc_set_read_loc_first(chip, reg, cw_offset, read_size, is_last_read_loc)   \
--nandc_set_reg(chip, reg,                      \
--            ((cw_offset) << READ_LOCATION_OFFSET) |           \
--            ((read_size) << READ_LOCATION_SIZE) |                     \
--            ((is_last_read_loc) << READ_LOCATION_LAST))
--
--#define nandc_set_read_loc_last(chip, reg, cw_offset, read_size, is_last_read_loc)    \
--nandc_set_reg(chip, reg,                      \
--            ((cw_offset) << READ_LOCATION_OFFSET) |           \
--            ((read_size) << READ_LOCATION_SIZE) |                     \
--            ((is_last_read_loc) << READ_LOCATION_LAST))
- /*
-  * Returns the actual register address for all NAND_DEV_ registers
-  * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
-@@ -257,8 +246,6 @@ nandc_set_reg(chip, reg,                   \
-  * @tx_sgl_start - start index in data sgl for tx.
-  * @rx_sgl_pos - current index in data sgl for rx.
-  * @rx_sgl_start - start index in data sgl for rx.
-- * @wait_second_completion - wait for second DMA desc completion before making
-- *                         the NAND transfer completion.
-  */
- struct bam_transaction {
-       struct bam_cmd_element *bam_ce;
-@@ -275,7 +262,6 @@ struct bam_transaction {
-       u32 tx_sgl_start;
-       u32 rx_sgl_pos;
-       u32 rx_sgl_start;
--      bool wait_second_completion;
- };
- /*
-@@ -471,9 +457,9 @@ struct qcom_op {
-       unsigned int data_instr_idx;
-       unsigned int rdy_timeout_ms;
-       unsigned int rdy_delay_ns;
--      u32 addr1_reg;
--      u32 addr2_reg;
--      u32 cmd_reg;
-+      __le32 addr1_reg;
-+      __le32 addr2_reg;
-+      __le32 cmd_reg;
-       u8 flag;
- };
-@@ -549,17 +535,17 @@ struct qcom_nand_host {
-  * among different NAND controllers.
-  * @ecc_modes - ecc mode for NAND
-  * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
-- * @is_bam - whether NAND controller is using BAM
-- * @is_qpic - whether NAND CTRL is part of qpic IP
-- * @qpic_v2 - flag to indicate QPIC IP version 2
-+ * @supports_bam - whether NAND controller is using Bus Access Manager (BAM)
-+ * @nandc_part_of_qpic - whether NAND controller is part of qpic IP
-+ * @qpic_version2 - flag to indicate QPIC IP version 2
-  * @use_codeword_fixup - whether NAND has different layout for boot partitions
-  */
- struct qcom_nandc_props {
-       u32 ecc_modes;
-       u32 dev_cmd_reg_start;
--      bool is_bam;
--      bool is_qpic;
--      bool qpic_v2;
-+      bool supports_bam;
-+      bool nandc_part_of_qpic;
-+      bool qpic_version2;
-       bool use_codeword_fixup;
- };
-@@ -613,19 +599,11 @@ static void clear_bam_transaction(struct qcom_nand_controller *nandc)
- {
-       struct bam_transaction *bam_txn = nandc->bam_txn;
--      if (!nandc->props->is_bam)
-+      if (!nandc->props->supports_bam)
-               return;
--      bam_txn->bam_ce_pos = 0;
--      bam_txn->bam_ce_start = 0;
--      bam_txn->cmd_sgl_pos = 0;
--      bam_txn->cmd_sgl_start = 0;
--      bam_txn->tx_sgl_pos = 0;
--      bam_txn->tx_sgl_start = 0;
--      bam_txn->rx_sgl_pos = 0;
--      bam_txn->rx_sgl_start = 0;
-+      memset(&bam_txn->bam_ce_pos, 0, sizeof(u32) * 8);
-       bam_txn->last_data_desc = NULL;
--      bam_txn->wait_second_completion = false;
-       sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
-                     QPIC_PER_CW_CMD_SGL);
-@@ -640,46 +618,35 @@ static void qpic_bam_dma_done(void *data)
- {
-       struct bam_transaction *bam_txn = data;
--      /*
--       * In case of data transfer with NAND, 2 callbacks will be generated.
--       * One for command channel and another one for data channel.
--       * If current transaction has data descriptors
--       * (i.e. wait_second_completion is true), then set this to false
--       * and wait for second DMA descriptor completion.
--       */
--      if (bam_txn->wait_second_completion)
--              bam_txn->wait_second_completion = false;
--      else
--              complete(&bam_txn->txn_done);
-+      complete(&bam_txn->txn_done);
- }
--static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
-+static struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
- {
-       return container_of(chip, struct qcom_nand_host, chip);
- }
--static inline struct qcom_nand_controller *
-+static struct qcom_nand_controller *
- get_qcom_nand_controller(struct nand_chip *chip)
- {
-       return container_of(chip->controller, struct qcom_nand_controller,
-                           controller);
- }
--static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
-+static u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
- {
-       return ioread32(nandc->base + offset);
- }
--static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
--                             u32 val)
-+static void nandc_write(struct qcom_nand_controller *nandc, int offset,
-+                      u32 val)
- {
-       iowrite32(val, nandc->base + offset);
- }
--static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc,
--                                        bool is_cpu)
-+static void nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
- {
--      if (!nandc->props->is_bam)
-+      if (!nandc->props->supports_bam)
-               return;
-       if (is_cpu)
-@@ -694,93 +661,90 @@ static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc,
-                                          DMA_FROM_DEVICE);
- }
--static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
--{
--      switch (offset) {
--      case NAND_FLASH_CMD:
--              return &regs->cmd;
--      case NAND_ADDR0:
--              return &regs->addr0;
--      case NAND_ADDR1:
--              return &regs->addr1;
--      case NAND_FLASH_CHIP_SELECT:
--              return &regs->chip_sel;
--      case NAND_EXEC_CMD:
--              return &regs->exec;
--      case NAND_FLASH_STATUS:
--              return &regs->clrflashstatus;
--      case NAND_DEV0_CFG0:
--              return &regs->cfg0;
--      case NAND_DEV0_CFG1:
--              return &regs->cfg1;
--      case NAND_DEV0_ECC_CFG:
--              return &regs->ecc_bch_cfg;
--      case NAND_READ_STATUS:
--              return &regs->clrreadstatus;
--      case NAND_DEV_CMD1:
--              return &regs->cmd1;
--      case NAND_DEV_CMD1_RESTORE:
--              return &regs->orig_cmd1;
--      case NAND_DEV_CMD_VLD:
--              return &regs->vld;
--      case NAND_DEV_CMD_VLD_RESTORE:
--              return &regs->orig_vld;
--      case NAND_EBI2_ECC_BUF_CFG:
--              return &regs->ecc_buf_cfg;
--      case NAND_READ_LOCATION_0:
--              return &regs->read_location0;
--      case NAND_READ_LOCATION_1:
--              return &regs->read_location1;
--      case NAND_READ_LOCATION_2:
--              return &regs->read_location2;
--      case NAND_READ_LOCATION_3:
--              return &regs->read_location3;
--      case NAND_READ_LOCATION_LAST_CW_0:
--              return &regs->read_location_last0;
--      case NAND_READ_LOCATION_LAST_CW_1:
--              return &regs->read_location_last1;
--      case NAND_READ_LOCATION_LAST_CW_2:
--              return &regs->read_location_last2;
--      case NAND_READ_LOCATION_LAST_CW_3:
--              return &regs->read_location_last3;
--      default:
--              return NULL;
--      }
-+/* Helper to check whether this is the last CW or not */
-+static bool qcom_nandc_is_last_cw(struct nand_ecc_ctrl *ecc, int cw)
-+{
-+      return cw == (ecc->steps - 1);
- }
--static void nandc_set_reg(struct nand_chip *chip, int offset,
--                        u32 val)
-+/**
-+ * nandc_set_read_loc_first() - to set read location first register
-+ * @chip:             NAND Private Flash Chip Data
-+ * @reg_base:         location register base
-+ * @cw_offset:                code word offset
-+ * @read_size:                code word read length
-+ * @is_last_read_loc: is this the last read location
-+ *
-+ * This function will set location register value
-+ */
-+static void nandc_set_read_loc_first(struct nand_chip *chip,
-+                                   int reg_base, u32 cw_offset,
-+                                   u32 read_size, u32 is_last_read_loc)
- {
-       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
--      struct nandc_regs *regs = nandc->regs;
--      __le32 *reg;
--
--      reg = offset_to_nandc_reg(regs, offset);
-+      __le32 locreg_val;
-+      u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
-+                ((read_size) << READ_LOCATION_SIZE) |
-+                ((is_last_read_loc) << READ_LOCATION_LAST));
-+
-+      locreg_val = cpu_to_le32(val);
-+
-+      if (reg_base == NAND_READ_LOCATION_0)
-+              nandc->regs->read_location0 = locreg_val;
-+      else if (reg_base == NAND_READ_LOCATION_1)
-+              nandc->regs->read_location1 = locreg_val;
-+      else if (reg_base == NAND_READ_LOCATION_2)
-+              nandc->regs->read_location2 = locreg_val;
-+      else if (reg_base == NAND_READ_LOCATION_3)
-+              nandc->regs->read_location3 = locreg_val;
-+}
-+
-+/**
-+ * nandc_set_read_loc_last - to set read location last register
-+ * @chip:             NAND Private Flash Chip Data
-+ * @reg_base:         location register base
-+ * @cw_offset:                code word offset
-+ * @read_size:                code word read length
-+ * @is_last_read_loc: is this the last read location
-+ *
-+ * This function will set location last register value
-+ */
-+static void nandc_set_read_loc_last(struct nand_chip *chip,
-+                                  int reg_base, u32 cw_offset,
-+                                  u32 read_size, u32 is_last_read_loc)
-+{
-+      struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-+      __le32 locreg_val;
-+      u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
-+                ((read_size) << READ_LOCATION_SIZE) |
-+                ((is_last_read_loc) << READ_LOCATION_LAST));
--      if (reg)
--              *reg = cpu_to_le32(val);
--}
-+      locreg_val = cpu_to_le32(val);
--/* Helper to check the code word, whether it is last cw or not */
--static bool qcom_nandc_is_last_cw(struct nand_ecc_ctrl *ecc, int cw)
--{
--      return cw == (ecc->steps - 1);
-+      if (reg_base == NAND_READ_LOCATION_LAST_CW_0)
-+              nandc->regs->read_location_last0 = locreg_val;
-+      else if (reg_base == NAND_READ_LOCATION_LAST_CW_1)
-+              nandc->regs->read_location_last1 = locreg_val;
-+      else if (reg_base == NAND_READ_LOCATION_LAST_CW_2)
-+              nandc->regs->read_location_last2 = locreg_val;
-+      else if (reg_base == NAND_READ_LOCATION_LAST_CW_3)
-+              nandc->regs->read_location_last3 = locreg_val;
- }
- /* helper to configure location register values */
- static void nandc_set_read_loc(struct nand_chip *chip, int cw, int reg,
--                             int cw_offset, int read_size, int is_last_read_loc)
-+                             u32 cw_offset, u32 read_size, u32 is_last_read_loc)
- {
-       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-       struct nand_ecc_ctrl *ecc = &chip->ecc;
-       int reg_base = NAND_READ_LOCATION_0;
--      if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
-+      if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw))
-               reg_base = NAND_READ_LOCATION_LAST_CW_0;
-       reg_base += reg * 4;
--      if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
-+      if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw))
-               return nandc_set_read_loc_last(chip, reg_base, cw_offset,
-                               read_size, is_last_read_loc);
-       else
-@@ -792,12 +756,13 @@ static void nandc_set_read_loc(struct nand_chip *chip, int cw, int reg,
- static void set_address(struct qcom_nand_host *host, u16 column, int page)
- {
-       struct nand_chip *chip = &host->chip;
-+      struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-       if (chip->options & NAND_BUSWIDTH_16)
-               column >>= 1;
--      nandc_set_reg(chip, NAND_ADDR0, page << 16 | column);
--      nandc_set_reg(chip, NAND_ADDR1, page >> 16 & 0xff);
-+      nandc->regs->addr0 = cpu_to_le32(page << 16 | column);
-+      nandc->regs->addr1 = cpu_to_le32(page >> 16 & 0xff);
- }
- /*
-@@ -811,41 +776,43 @@ static void set_address(struct qcom_nand_host *host, u16 column, int page)
- static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, int cw)
- {
-       struct nand_chip *chip = &host->chip;
--      u32 cmd, cfg0, cfg1, ecc_bch_cfg;
-+      __le32 cmd, cfg0, cfg1, ecc_bch_cfg;
-       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-       if (read) {
-               if (host->use_ecc)
--                      cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
-+                      cmd = cpu_to_le32(OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE);
-               else
--                      cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE;
-+                      cmd = cpu_to_le32(OP_PAGE_READ | PAGE_ACC | LAST_PAGE);
-       } else {
--              cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
-+              cmd = cpu_to_le32(OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE);
-       }
-       if (host->use_ecc) {
--              cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
--                              (num_cw - 1) << CW_PER_PAGE;
-+              cfg0 = cpu_to_le32((host->cfg0 & ~(7U << CW_PER_PAGE)) |
-+                              (num_cw - 1) << CW_PER_PAGE);
--              cfg1 = host->cfg1;
--              ecc_bch_cfg = host->ecc_bch_cfg;
-+              cfg1 = cpu_to_le32(host->cfg1);
-+              ecc_bch_cfg = cpu_to_le32(host->ecc_bch_cfg);
-       } else {
--              cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
--                              (num_cw - 1) << CW_PER_PAGE;
-+              cfg0 = cpu_to_le32((host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
-+                              (num_cw - 1) << CW_PER_PAGE);
--              cfg1 = host->cfg1_raw;
--              ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
-+              cfg1 = cpu_to_le32(host->cfg1_raw);
-+              ecc_bch_cfg = cpu_to_le32(1 << ECC_CFG_ECC_DISABLE);
-       }
--      nandc_set_reg(chip, NAND_FLASH_CMD, cmd);
--      nandc_set_reg(chip, NAND_DEV0_CFG0, cfg0);
--      nandc_set_reg(chip, NAND_DEV0_CFG1, cfg1);
--      nandc_set_reg(chip, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
--      if (!nandc->props->qpic_v2)
--              nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
--      nandc_set_reg(chip, NAND_FLASH_STATUS, host->clrflashstatus);
--      nandc_set_reg(chip, NAND_READ_STATUS, host->clrreadstatus);
--      nandc_set_reg(chip, NAND_EXEC_CMD, 1);
-+      nandc->regs->cmd = cmd;
-+      nandc->regs->cfg0 = cfg0;
-+      nandc->regs->cfg1 = cfg1;
-+      nandc->regs->ecc_bch_cfg = ecc_bch_cfg;
-+
-+      if (!nandc->props->qpic_version2)
-+              nandc->regs->ecc_buf_cfg = cpu_to_le32(host->ecc_buf_cfg);
-+
-+      nandc->regs->clrflashstatus = cpu_to_le32(host->clrflashstatus);
-+      nandc->regs->clrreadstatus = cpu_to_le32(host->clrreadstatus);
-+      nandc->regs->exec = cpu_to_le32(1);
-       if (read)
-               nandc_set_read_loc(chip, cw, 0, 0, host->use_ecc ?
-@@ -1121,7 +1088,7 @@ static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
-       if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
-               first = dev_cmd_reg_addr(nandc, first);
--      if (nandc->props->is_bam)
-+      if (nandc->props->supports_bam)
-               return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
-                                            num_regs, flags);
-@@ -1136,25 +1103,16 @@ static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
-  * write_reg_dma:     prepares a descriptor to write a given number of
-  *                    contiguous registers
-  *
-+ * @vaddr:            contiguous memory from where register value will
-+ *                    be written
-  * @first:            offset of the first register in the contiguous block
-  * @num_regs:         number of registers to write
-  * @flags:            flags to control DMA descriptor preparation
-  */
--static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
--                       int num_regs, unsigned int flags)
-+static int write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
-+                       int first, int num_regs, unsigned int flags)
- {
-       bool flow_control = false;
--      struct nandc_regs *regs = nandc->regs;
--      void *vaddr;
--
--      vaddr = offset_to_nandc_reg(regs, first);
--
--      if (first == NAND_ERASED_CW_DETECT_CFG) {
--              if (flags & NAND_ERASED_CW_SET)
--                      vaddr = &regs->erased_cw_detect_cfg_set;
--              else
--                      vaddr = &regs->erased_cw_detect_cfg_clr;
--      }
-       if (first == NAND_EXEC_CMD)
-               flags |= NAND_BAM_NWD;
-@@ -1165,7 +1123,7 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
-       if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
-               first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
--      if (nandc->props->is_bam)
-+      if (nandc->props->supports_bam)
-               return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
-                                            num_regs, flags);
-@@ -1188,7 +1146,7 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
- static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
-                        const u8 *vaddr, int size, unsigned int flags)
- {
--      if (nandc->props->is_bam)
-+      if (nandc->props->supports_bam)
-               return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
-       return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
-@@ -1206,7 +1164,7 @@ static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
- static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
-                         const u8 *vaddr, int size, unsigned int flags)
- {
--      if (nandc->props->is_bam)
-+      if (nandc->props->supports_bam)
-               return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
-       return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
-@@ -1220,13 +1178,14 @@ static void config_nand_page_read(struct nand_chip *chip)
- {
-       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
--      write_reg_dma(nandc, NAND_ADDR0, 2, 0);
--      write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
--      if (!nandc->props->qpic_v2)
--              write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
--      write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
--      write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
--                    NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
-+      write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
-+      write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
-+      if (!nandc->props->qpic_version2)
-+              write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
-+      write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_clr,
-+                    NAND_ERASED_CW_DETECT_CFG, 1, 0);
-+      write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_set,
-+                    NAND_ERASED_CW_DETECT_CFG, 1, NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
- }
- /*
-@@ -1239,16 +1198,16 @@ config_nand_cw_read(struct nand_chip *chip, bool use_ecc, int cw)
-       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-       struct nand_ecc_ctrl *ecc = &chip->ecc;
--      int reg = NAND_READ_LOCATION_0;
-+      __le32 *reg = &nandc->regs->read_location0;
--      if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
--              reg = NAND_READ_LOCATION_LAST_CW_0;
-+      if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw))
-+              reg = &nandc->regs->read_location_last0;
--      if (nandc->props->is_bam)
--              write_reg_dma(nandc, reg, 4, NAND_BAM_NEXT_SGL);
-+      if (nandc->props->supports_bam)
-+              write_reg_dma(nandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
--      write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
--      write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
-+      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-       if (use_ecc) {
-               read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
-@@ -1279,10 +1238,10 @@ static void config_nand_page_write(struct nand_chip *chip)
- {
-       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
--      write_reg_dma(nandc, NAND_ADDR0, 2, 0);
--      write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
--      if (!nandc->props->qpic_v2)
--              write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
-+      write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
-+      write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
-+      if (!nandc->props->qpic_version2)
-+              write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1,
-                             NAND_BAM_NEXT_SGL);
- }
-@@ -1294,13 +1253,13 @@ static void config_nand_cw_write(struct nand_chip *chip)
- {
-       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
--      write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
--      write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
-+      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-       read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
--      write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
--      write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
-+      write_reg_dma(nandc, &nandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
-+      write_reg_dma(nandc, &nandc->regs->clrreadstatus, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
- }
- /* helpers to submit/free our list of dma descriptors */
-@@ -1311,7 +1270,7 @@ static int submit_descs(struct qcom_nand_controller *nandc)
-       struct bam_transaction *bam_txn = nandc->bam_txn;
-       int ret = 0;
--      if (nandc->props->is_bam) {
-+      if (nandc->props->supports_bam) {
-               if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
-                       ret = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
-                       if (ret)
-@@ -1336,14 +1295,9 @@ static int submit_descs(struct qcom_nand_controller *nandc)
-       list_for_each_entry(desc, &nandc->desc_list, node)
-               cookie = dmaengine_submit(desc->dma_desc);
--      if (nandc->props->is_bam) {
-+      if (nandc->props->supports_bam) {
-               bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
-               bam_txn->last_cmd_desc->callback_param = bam_txn;
--              if (bam_txn->last_data_desc) {
--                      bam_txn->last_data_desc->callback = qpic_bam_dma_done;
--                      bam_txn->last_data_desc->callback_param = bam_txn;
--                      bam_txn->wait_second_completion = true;
--              }
-               dma_async_issue_pending(nandc->tx_chan);
-               dma_async_issue_pending(nandc->rx_chan);
-@@ -1365,7 +1319,7 @@ static int submit_descs(struct qcom_nand_controller *nandc)
-       list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
-               list_del(&desc->node);
--              if (nandc->props->is_bam)
-+              if (nandc->props->supports_bam)
-                       dma_unmap_sg(nandc->dev, desc->bam_sgl,
-                                    desc->sgl_cnt, desc->dir);
-               else
-@@ -1382,7 +1336,7 @@ static int submit_descs(struct qcom_nand_controller *nandc)
- static void clear_read_regs(struct qcom_nand_controller *nandc)
- {
-       nandc->reg_read_pos = 0;
--      nandc_read_buffer_sync(nandc, false);
-+      nandc_dev_to_mem(nandc, false);
- }
- /*
-@@ -1446,7 +1400,7 @@ static int check_flash_errors(struct qcom_nand_host *host, int cw_cnt)
-       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-       int i;
--      nandc_read_buffer_sync(nandc, true);
-+      nandc_dev_to_mem(nandc, true);
-       for (i = 0; i < cw_cnt; i++) {
-               u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
-@@ -1476,7 +1430,7 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
-       clear_read_regs(nandc);
-       host->use_ecc = false;
--      if (nandc->props->qpic_v2)
-+      if (nandc->props->qpic_version2)
-               raw_cw = ecc->steps - 1;
-       clear_bam_transaction(nandc);
-@@ -1497,7 +1451,7 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
-               oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
-       }
--      if (nandc->props->is_bam) {
-+      if (nandc->props->supports_bam) {
-               nandc_set_read_loc(chip, cw, 0, read_loc, data_size1, 0);
-               read_loc += data_size1;
-@@ -1621,7 +1575,7 @@ static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
-       u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
-       buf = (struct read_stats *)nandc->reg_read_buf;
--      nandc_read_buffer_sync(nandc, true);
-+      nandc_dev_to_mem(nandc, true);
-       for (i = 0; i < ecc->steps; i++, buf++) {
-               u32 flash, buffer, erased_cw;
-@@ -1734,7 +1688,7 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
-                       oob_size = host->ecc_bytes_hw + host->spare_bytes;
-               }
--              if (nandc->props->is_bam) {
-+              if (nandc->props->supports_bam) {
-                       if (data_buf && oob_buf) {
-                               nandc_set_read_loc(chip, i, 0, 0, data_size, 0);
-                               nandc_set_read_loc(chip, i, 1, data_size,
-@@ -2455,14 +2409,14 @@ static int qcom_nand_attach_chip(struct nand_chip *chip)
-       mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
-       /* Free the initially allocated BAM transaction for reading the ONFI params */
--      if (nandc->props->is_bam)
-+      if (nandc->props->supports_bam)
-               free_bam_transaction(nandc);
-       nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
-                                    cwperpage);
-       /* Now allocate the BAM transaction based on updated max_cwperpage */
--      if (nandc->props->is_bam) {
-+      if (nandc->props->supports_bam) {
-               nandc->bam_txn = alloc_bam_transaction(nandc);
-               if (!nandc->bam_txn) {
-                       dev_err(nandc->dev,
-@@ -2522,7 +2476,7 @@ static int qcom_nand_attach_chip(struct nand_chip *chip)
-                               | ecc_mode << ECC_MODE
-                               | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
--      if (!nandc->props->qpic_v2)
-+      if (!nandc->props->qpic_version2)
-               host->ecc_buf_cfg = 0x203 << NUM_STEPS;
-       host->clrflashstatus = FS_READY_BSY_N;
-@@ -2556,7 +2510,7 @@ static int qcom_op_cmd_mapping(struct nand_chip *chip, u8 opcode,
-               cmd = OP_FETCH_ID;
-               break;
-       case NAND_CMD_PARAM:
--              if (nandc->props->qpic_v2)
-+              if (nandc->props->qpic_version2)
-                       cmd = OP_PAGE_READ_ONFI_READ;
-               else
-                       cmd = OP_PAGE_READ;
-@@ -2609,7 +2563,7 @@ static int qcom_parse_instructions(struct nand_chip *chip,
-                       if (ret < 0)
-                               return ret;
--                      q_op->cmd_reg = ret;
-+                      q_op->cmd_reg = cpu_to_le32(ret);
-                       q_op->rdy_delay_ns = instr->delay_ns;
-                       break;
-@@ -2619,10 +2573,10 @@ static int qcom_parse_instructions(struct nand_chip *chip,
-                       addrs = &instr->ctx.addr.addrs[offset];
-                       for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
--                              q_op->addr1_reg |= addrs[i] << (i * 8);
-+                              q_op->addr1_reg |= cpu_to_le32(addrs[i] << (i * 8));
-                       if (naddrs > 4)
--                              q_op->addr2_reg |= addrs[4];
-+                              q_op->addr2_reg |= cpu_to_le32(addrs[4]);
-                       q_op->rdy_delay_ns = instr->delay_ns;
-                       break;
-@@ -2663,7 +2617,7 @@ static int qcom_wait_rdy_poll(struct nand_chip *chip, unsigned int time_ms)
-       unsigned long start = jiffies + msecs_to_jiffies(time_ms);
-       u32 flash;
--      nandc_read_buffer_sync(nandc, true);
-+      nandc_dev_to_mem(nandc, true);
-       do {
-               flash = le32_to_cpu(nandc->reg_read_buf[0]);
-@@ -2706,11 +2660,11 @@ static int qcom_read_status_exec(struct nand_chip *chip,
-       clear_read_regs(nandc);
-       clear_bam_transaction(nandc);
--      nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
--      nandc_set_reg(chip, NAND_EXEC_CMD, 1);
-+      nandc->regs->cmd = q_op.cmd_reg;
-+      nandc->regs->exec = cpu_to_le32(1);
--      write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
--      write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
-+      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-       read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
-       ret = submit_descs(nandc);
-@@ -2719,7 +2673,7 @@ static int qcom_read_status_exec(struct nand_chip *chip,
-               goto err_out;
-       }
--      nandc_read_buffer_sync(nandc, true);
-+      nandc_dev_to_mem(nandc, true);
-       for (i = 0; i < num_cw; i++) {
-               flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
-@@ -2763,16 +2717,14 @@ static int qcom_read_id_type_exec(struct nand_chip *chip, const struct nand_subo
-       clear_read_regs(nandc);
-       clear_bam_transaction(nandc);
--      nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
--      nandc_set_reg(chip, NAND_ADDR0, q_op.addr1_reg);
--      nandc_set_reg(chip, NAND_ADDR1, q_op.addr2_reg);
--      nandc_set_reg(chip, NAND_FLASH_CHIP_SELECT,
--                    nandc->props->is_bam ? 0 : DM_EN);
--
--      nandc_set_reg(chip, NAND_EXEC_CMD, 1);
-+      nandc->regs->cmd = q_op.cmd_reg;
-+      nandc->regs->addr0 = q_op.addr1_reg;
-+      nandc->regs->addr1 = q_op.addr2_reg;
-+      nandc->regs->chip_sel = cpu_to_le32(nandc->props->supports_bam ? 0 : DM_EN);
-+      nandc->regs->exec = cpu_to_le32(1);
--      write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
--      write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
-+      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-       read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
-@@ -2786,7 +2738,7 @@ static int qcom_read_id_type_exec(struct nand_chip *chip, const struct nand_subo
-       op_id = q_op.data_instr_idx;
-       len = nand_subop_get_data_len(subop, op_id);
--      nandc_read_buffer_sync(nandc, true);
-+      nandc_dev_to_mem(nandc, true);
-       memcpy(instr->ctx.data.buf.in, nandc->reg_read_buf, len);
- err_out:
-@@ -2807,15 +2759,14 @@ static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_sub
-       if (q_op.flag == OP_PROGRAM_PAGE) {
-               goto wait_rdy;
--      } else if (q_op.cmd_reg == OP_BLOCK_ERASE) {
--              q_op.cmd_reg |= PAGE_ACC | LAST_PAGE;
--              nandc_set_reg(chip, NAND_ADDR0, q_op.addr1_reg);
--              nandc_set_reg(chip, NAND_ADDR1, q_op.addr2_reg);
--              nandc_set_reg(chip, NAND_DEV0_CFG0,
--                            host->cfg0_raw & ~(7 << CW_PER_PAGE));
--              nandc_set_reg(chip, NAND_DEV0_CFG1, host->cfg1_raw);
-+      } else if (q_op.cmd_reg == cpu_to_le32(OP_BLOCK_ERASE)) {
-+              q_op.cmd_reg |= cpu_to_le32(PAGE_ACC | LAST_PAGE);
-+              nandc->regs->addr0 = q_op.addr1_reg;
-+              nandc->regs->addr1 = q_op.addr2_reg;
-+              nandc->regs->cfg0 = cpu_to_le32(host->cfg0_raw & ~(7 << CW_PER_PAGE));
-+              nandc->regs->cfg1 = cpu_to_le32(host->cfg1_raw);
-               instrs = 3;
--      } else if (q_op.cmd_reg != OP_RESET_DEVICE) {
-+      } else if (q_op.cmd_reg != cpu_to_le32(OP_RESET_DEVICE)) {
-               return 0;
-       }
-@@ -2826,14 +2777,14 @@ static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_sub
-       clear_read_regs(nandc);
-       clear_bam_transaction(nandc);
--      nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
--      nandc_set_reg(chip, NAND_EXEC_CMD, 1);
-+      nandc->regs->cmd = q_op.cmd_reg;
-+      nandc->regs->exec = cpu_to_le32(1);
--      write_reg_dma(nandc, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
--      if (q_op.cmd_reg == OP_BLOCK_ERASE)
--              write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
-+      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
-+      if (q_op.cmd_reg == cpu_to_le32(OP_BLOCK_ERASE))
-+              write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
--      write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-       read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
-       ret = submit_descs(nandc);
-@@ -2864,7 +2815,7 @@ static int qcom_param_page_type_exec(struct nand_chip *chip,  const struct nand_
-       if (ret)
-               return ret;
--      q_op.cmd_reg |= PAGE_ACC | LAST_PAGE;
-+      q_op.cmd_reg |= cpu_to_le32(PAGE_ACC | LAST_PAGE);
-       nandc->buf_count = 0;
-       nandc->buf_start = 0;
-@@ -2872,38 +2823,38 @@ static int qcom_param_page_type_exec(struct nand_chip *chip,  const struct nand_
-       clear_read_regs(nandc);
-       clear_bam_transaction(nandc);
--      nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
--
--      nandc_set_reg(chip, NAND_ADDR0, 0);
--      nandc_set_reg(chip, NAND_ADDR1, 0);
--      nandc_set_reg(chip, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
--                                      | 512 << UD_SIZE_BYTES
--                                      | 5 << NUM_ADDR_CYCLES
--                                      | 0 << SPARE_SIZE_BYTES);
--      nandc_set_reg(chip, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
--                                      | 0 << CS_ACTIVE_BSY
--                                      | 17 << BAD_BLOCK_BYTE_NUM
--                                      | 1 << BAD_BLOCK_IN_SPARE_AREA
--                                      | 2 << WR_RD_BSY_GAP
--                                      | 0 << WIDE_FLASH
--                                      | 1 << DEV0_CFG1_ECC_DISABLE);
--      if (!nandc->props->qpic_v2)
--              nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
-+      nandc->regs->cmd = q_op.cmd_reg;
-+      nandc->regs->addr0 = 0;
-+      nandc->regs->addr1 = 0;
-+
-+      nandc->regs->cfg0 = cpu_to_le32(0 << CW_PER_PAGE |
-+                                      512 << UD_SIZE_BYTES |
-+                                      5 << NUM_ADDR_CYCLES |
-+                                      0 << SPARE_SIZE_BYTES);
-+
-+      nandc->regs->cfg1 = cpu_to_le32(7 << NAND_RECOVERY_CYCLES |
-+                                      0 << CS_ACTIVE_BSY |
-+                                      17 << BAD_BLOCK_BYTE_NUM |
-+                                      1 << BAD_BLOCK_IN_SPARE_AREA |
-+                                      2 << WR_RD_BSY_GAP |
-+                                      0 << WIDE_FLASH |
-+                                      1 << DEV0_CFG1_ECC_DISABLE);
-+
-+      if (!nandc->props->qpic_version2)
-+              nandc->regs->ecc_buf_cfg = cpu_to_le32(1 << ECC_CFG_ECC_DISABLE);
-       /* configure CMD1 and VLD for ONFI param probing in QPIC v1 */
--      if (!nandc->props->qpic_v2) {
--              nandc_set_reg(chip, NAND_DEV_CMD_VLD,
--                            (nandc->vld & ~READ_START_VLD));
--              nandc_set_reg(chip, NAND_DEV_CMD1,
--                            (nandc->cmd1 & ~(0xFF << READ_ADDR))
--                            | NAND_CMD_PARAM << READ_ADDR);
-+      if (!nandc->props->qpic_version2) {
-+              nandc->regs->vld = cpu_to_le32((nandc->vld & ~READ_START_VLD));
-+              nandc->regs->cmd1 = cpu_to_le32((nandc->cmd1 & ~(0xFF << READ_ADDR))
-+                                  | NAND_CMD_PARAM << READ_ADDR);
-       }
--      nandc_set_reg(chip, NAND_EXEC_CMD, 1);
-+      nandc->regs->exec = cpu_to_le32(1);
--      if (!nandc->props->qpic_v2) {
--              nandc_set_reg(chip, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
--              nandc_set_reg(chip, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
-+      if (!nandc->props->qpic_version2) {
-+              nandc->regs->orig_cmd1 = cpu_to_le32(nandc->cmd1);
-+              nandc->regs->orig_vld = cpu_to_le32(nandc->vld);
-       }
-       instr = q_op.data_instr;
-@@ -2912,9 +2863,9 @@ static int qcom_param_page_type_exec(struct nand_chip *chip,  const struct nand_
-       nandc_set_read_loc(chip, 0, 0, 0, len, 1);
--      if (!nandc->props->qpic_v2) {
--              write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
--              write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
-+      if (!nandc->props->qpic_version2) {
-+              write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0);
-+              write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
-       }
-       nandc->buf_count = len;
-@@ -2926,9 +2877,10 @@ static int qcom_param_page_type_exec(struct nand_chip *chip,  const struct nand_
-                     nandc->buf_count, 0);
-       /* restore CMD1 and VLD regs */
--      if (!nandc->props->qpic_v2) {
--              write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
--              write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
-+      if (!nandc->props->qpic_version2) {
-+              write_reg_dma(nandc, &nandc->regs->orig_cmd1, NAND_DEV_CMD1_RESTORE, 1, 0);
-+              write_reg_dma(nandc, &nandc->regs->orig_vld, NAND_DEV_CMD_VLD_RESTORE, 1,
-+                            NAND_BAM_NEXT_SGL);
-       }
-       ret = submit_descs(nandc);
-@@ -3017,7 +2969,7 @@ static const struct nand_controller_ops qcom_nandc_ops = {
- static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
- {
--      if (nandc->props->is_bam) {
-+      if (nandc->props->supports_bam) {
-               if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
-                       dma_unmap_single(nandc->dev, nandc->reg_read_dma,
-                                        MAX_REG_RD *
-@@ -3070,7 +3022,7 @@ static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
-       if (!nandc->reg_read_buf)
-               return -ENOMEM;
--      if (nandc->props->is_bam) {
-+      if (nandc->props->supports_bam) {
-               nandc->reg_read_dma =
-                       dma_map_single(nandc->dev, nandc->reg_read_buf,
-                                      MAX_REG_RD *
-@@ -3151,15 +3103,15 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
-       u32 nand_ctrl;
-       /* kill onenand */
--      if (!nandc->props->is_qpic)
-+      if (!nandc->props->nandc_part_of_qpic)
-               nandc_write(nandc, SFLASHC_BURST_CFG, 0);
--      if (!nandc->props->qpic_v2)
-+      if (!nandc->props->qpic_version2)
-               nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
-                           NAND_DEV_CMD_VLD_VAL);
-       /* enable ADM or BAM DMA */
--      if (nandc->props->is_bam) {
-+      if (nandc->props->supports_bam) {
-               nand_ctrl = nandc_read(nandc, NAND_CTRL);
-               /*
-@@ -3176,7 +3128,7 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
-       }
-       /* save the original values of these registers */
--      if (!nandc->props->qpic_v2) {
-+      if (!nandc->props->qpic_version2) {
-               nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1));
-               nandc->vld = NAND_DEV_CMD_VLD_VAL;
-       }
-@@ -3349,7 +3301,7 @@ static int qcom_nandc_parse_dt(struct platform_device *pdev)
-       struct device_node *np = nandc->dev->of_node;
-       int ret;
--      if (!nandc->props->is_bam) {
-+      if (!nandc->props->supports_bam) {
-               ret = of_property_read_u32(np, "qcom,cmd-crci",
-                                          &nandc->cmd_crci);
-               if (ret) {
-@@ -3474,30 +3426,30 @@ static void qcom_nandc_remove(struct platform_device *pdev)
- static const struct qcom_nandc_props ipq806x_nandc_props = {
-       .ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
--      .is_bam = false,
-+      .supports_bam = false,
-       .use_codeword_fixup = true,
-       .dev_cmd_reg_start = 0x0,
- };
- static const struct qcom_nandc_props ipq4019_nandc_props = {
-       .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
--      .is_bam = true,
--      .is_qpic = true,
-+      .supports_bam = true,
-+      .nandc_part_of_qpic = true,
-       .dev_cmd_reg_start = 0x0,
- };
- static const struct qcom_nandc_props ipq8074_nandc_props = {
-       .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
--      .is_bam = true,
--      .is_qpic = true,
-+      .supports_bam = true,
-+      .nandc_part_of_qpic = true,
-       .dev_cmd_reg_start = 0x7000,
- };
- static const struct qcom_nandc_props sdx55_nandc_props = {
-       .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
--      .is_bam = true,
--      .is_qpic = true,
--      .qpic_v2 = true,
-+      .supports_bam = true,
-+      .nandc_part_of_qpic = true,
-+      .qpic_version2 = true,
-       .dev_cmd_reg_start = 0x7000,
- };
--- 
-2.47.1
-
diff --git a/target/linux/qualcommbe/patches-6.6/016-02-v6.14-mtd-rawnand-qcom-Add-qcom-prefix-to-common-api.patch b/target/linux/qualcommbe/patches-6.6/016-02-v6.14-mtd-rawnand-qcom-Add-qcom-prefix-to-common-api.patch
deleted file mode 100644 (file)
index ca8727d..0000000
+++ /dev/null
@@ -1,885 +0,0 @@
-From 1d479f5b345e0c3650fec4dddeef9fc6fab30c8b Mon Sep 17 00:00:00 2001
-From: Md Sadre Alam <quic_mdalam@quicinc.com>
-Date: Wed, 20 Nov 2024 14:45:01 +0530
-Subject: [PATCH 2/4] mtd: rawnand: qcom: Add qcom prefix to common api
-
-Add qcom prefix to all the api which will be commonly
-used by spi nand driver and raw nand driver.
-
-Reviewed-by: Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>
-Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
-Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
----
- drivers/mtd/nand/raw/qcom_nandc.c | 320 +++++++++++++++---------------
- 1 file changed, 160 insertions(+), 160 deletions(-)
-
-diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
-index 9ae8c9f2ab55..6da5d23d2c8b 100644
---- a/drivers/mtd/nand/raw/qcom_nandc.c
-+++ b/drivers/mtd/nand/raw/qcom_nandc.c
-@@ -53,7 +53,7 @@
- #define       NAND_READ_LOCATION_LAST_CW_2    0xf48
- #define       NAND_READ_LOCATION_LAST_CW_3    0xf4c
--/* dummy register offsets, used by write_reg_dma */
-+/* dummy register offsets, used by qcom_write_reg_dma */
- #define       NAND_DEV_CMD1_RESTORE           0xdead
- #define       NAND_DEV_CMD_VLD_RESTORE        0xbeef
-@@ -211,7 +211,7 @@
- /*
-  * Flags used in DMA descriptor preparation helper functions
-- * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
-+ * (i.e. qcom_read_reg_dma/qcom_write_reg_dma/qcom_read_data_dma/qcom_write_data_dma)
-  */
- /* Don't set the EOT in current tx BAM sgl */
- #define NAND_BAM_NO_EOT                       BIT(0)
-@@ -550,7 +550,7 @@ struct qcom_nandc_props {
- };
- /* Frees the BAM transaction memory */
--static void free_bam_transaction(struct qcom_nand_controller *nandc)
-+static void qcom_free_bam_transaction(struct qcom_nand_controller *nandc)
- {
-       struct bam_transaction *bam_txn = nandc->bam_txn;
-@@ -559,7 +559,7 @@ static void free_bam_transaction(struct qcom_nand_controller *nandc)
- /* Allocates and Initializes the BAM transaction */
- static struct bam_transaction *
--alloc_bam_transaction(struct qcom_nand_controller *nandc)
-+qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc)
- {
-       struct bam_transaction *bam_txn;
-       size_t bam_txn_size;
-@@ -595,7 +595,7 @@ alloc_bam_transaction(struct qcom_nand_controller *nandc)
- }
- /* Clears the BAM transaction indexes */
--static void clear_bam_transaction(struct qcom_nand_controller *nandc)
-+static void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc)
- {
-       struct bam_transaction *bam_txn = nandc->bam_txn;
-@@ -614,7 +614,7 @@ static void clear_bam_transaction(struct qcom_nand_controller *nandc)
- }
- /* Callback for DMA descriptor completion */
--static void qpic_bam_dma_done(void *data)
-+static void qcom_qpic_bam_dma_done(void *data)
- {
-       struct bam_transaction *bam_txn = data;
-@@ -644,7 +644,7 @@ static void nandc_write(struct qcom_nand_controller *nandc, int offset,
-       iowrite32(val, nandc->base + offset);
- }
--static void nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
-+static void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
- {
-       if (!nandc->props->supports_bam)
-               return;
-@@ -824,9 +824,9 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, i
-  * for BAM. This descriptor will be added in the NAND DMA descriptor queue
-  * which will be submitted to DMA engine.
-  */
--static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
--                                struct dma_chan *chan,
--                                unsigned long flags)
-+static int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
-+                                     struct dma_chan *chan,
-+                                     unsigned long flags)
- {
-       struct desc_info *desc;
-       struct scatterlist *sgl;
-@@ -903,9 +903,9 @@ static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
-  * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
-  * after the current command element.
-  */
--static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
--                               int reg_off, const void *vaddr,
--                               int size, unsigned int flags)
-+static int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
-+                                    int reg_off, const void *vaddr,
-+                                    int size, unsigned int flags)
- {
-       int bam_ce_size;
-       int i, ret;
-@@ -943,9 +943,9 @@ static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
-               bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
-               if (flags & NAND_BAM_NWD) {
--                      ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
--                                                   DMA_PREP_FENCE |
--                                                   DMA_PREP_CMD);
-+                      ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
-+                                                        DMA_PREP_FENCE |
-+                                                        DMA_PREP_CMD);
-                       if (ret)
-                               return ret;
-               }
-@@ -958,9 +958,8 @@ static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
-  * Prepares the data descriptor for BAM DMA which will be used for NAND
-  * data reads and writes.
-  */
--static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
--                                const void *vaddr,
--                                int size, unsigned int flags)
-+static int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
-+                                     const void *vaddr, int size, unsigned int flags)
- {
-       int ret;
-       struct bam_transaction *bam_txn = nandc->bam_txn;
-@@ -979,8 +978,8 @@ static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
-                * is not set, form the DMA descriptor
-                */
-               if (!(flags & NAND_BAM_NO_EOT)) {
--                      ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
--                                                   DMA_PREP_INTERRUPT);
-+                      ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
-+                                                        DMA_PREP_INTERRUPT);
-                       if (ret)
-                               return ret;
-               }
-@@ -989,9 +988,9 @@ static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
-       return 0;
- }
--static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
--                           int reg_off, const void *vaddr, int size,
--                           bool flow_control)
-+static int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
-+                                int reg_off, const void *vaddr, int size,
-+                                bool flow_control)
- {
-       struct desc_info *desc;
-       struct dma_async_tx_descriptor *dma_desc;
-@@ -1069,15 +1068,15 @@ static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
- }
- /*
-- * read_reg_dma:      prepares a descriptor to read a given number of
-+ * qcom_read_reg_dma: prepares a descriptor to read a given number of
-  *                    contiguous registers to the reg_read_buf pointer
-  *
-  * @first:            offset of the first register in the contiguous block
-  * @num_regs:         number of registers to read
-  * @flags:            flags to control DMA descriptor preparation
-  */
--static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
--                      int num_regs, unsigned int flags)
-+static int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first,
-+                           int num_regs, unsigned int flags)
- {
-       bool flow_control = false;
-       void *vaddr;
-@@ -1089,18 +1088,18 @@ static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
-               first = dev_cmd_reg_addr(nandc, first);
-       if (nandc->props->supports_bam)
--              return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
-+              return qcom_prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
-                                            num_regs, flags);
-       if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
-               flow_control = true;
--      return prep_adm_dma_desc(nandc, true, first, vaddr,
-+      return qcom_prep_adm_dma_desc(nandc, true, first, vaddr,
-                                num_regs * sizeof(u32), flow_control);
- }
- /*
-- * write_reg_dma:     prepares a descriptor to write a given number of
-+ * qcom_write_reg_dma:        prepares a descriptor to write a given number of
-  *                    contiguous registers
-  *
-  * @vaddr:            contiguous memory from where register value will
-@@ -1109,8 +1108,8 @@ static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
-  * @num_regs:         number of registers to write
-  * @flags:            flags to control DMA descriptor preparation
-  */
--static int write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
--                       int first, int num_regs, unsigned int flags)
-+static int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
-+                            int first, int num_regs, unsigned int flags)
- {
-       bool flow_control = false;
-@@ -1124,18 +1123,18 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
-               first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
-       if (nandc->props->supports_bam)
--              return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
-+              return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
-                                            num_regs, flags);
-       if (first == NAND_FLASH_CMD)
-               flow_control = true;
--      return prep_adm_dma_desc(nandc, false, first, vaddr,
-+      return qcom_prep_adm_dma_desc(nandc, false, first, vaddr,
-                                num_regs * sizeof(u32), flow_control);
- }
- /*
-- * read_data_dma:     prepares a DMA descriptor to transfer data from the
-+ * qcom_read_data_dma:        prepares a DMA descriptor to transfer data from the
-  *                    controller's internal buffer to the buffer 'vaddr'
-  *
-  * @reg_off:          offset within the controller's data buffer
-@@ -1143,17 +1142,17 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
-  * @size:             DMA transaction size in bytes
-  * @flags:            flags to control DMA descriptor preparation
-  */
--static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
--                       const u8 *vaddr, int size, unsigned int flags)
-+static int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
-+                            const u8 *vaddr, int size, unsigned int flags)
- {
-       if (nandc->props->supports_bam)
--              return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
-+              return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
--      return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
-+      return qcom_prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
- }
- /*
-- * write_data_dma:    prepares a DMA descriptor to transfer data from
-+ * qcom_write_data_dma:       prepares a DMA descriptor to transfer data from
-  *                    'vaddr' to the controller's internal buffer
-  *
-  * @reg_off:          offset within the controller's data buffer
-@@ -1161,13 +1160,13 @@ static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
-  * @size:             DMA transaction size in bytes
-  * @flags:            flags to control DMA descriptor preparation
-  */
--static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
--                        const u8 *vaddr, int size, unsigned int flags)
-+static int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
-+                             const u8 *vaddr, int size, unsigned int flags)
- {
-       if (nandc->props->supports_bam)
--              return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
-+              return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
--      return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
-+      return qcom_prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
- }
- /*
-@@ -1178,14 +1177,14 @@ static void config_nand_page_read(struct nand_chip *chip)
- {
-       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
--      write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
--      write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
-+      qcom_write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
-+      qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
-       if (!nandc->props->qpic_version2)
--              write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
--      write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_clr,
--                    NAND_ERASED_CW_DETECT_CFG, 1, 0);
--      write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_set,
--                    NAND_ERASED_CW_DETECT_CFG, 1, NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
-+              qcom_write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
-+      qcom_write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_clr,
-+                         NAND_ERASED_CW_DETECT_CFG, 1, 0);
-+      qcom_write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_set,
-+                         NAND_ERASED_CW_DETECT_CFG, 1, NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
- }
- /*
-@@ -1204,17 +1203,17 @@ config_nand_cw_read(struct nand_chip *chip, bool use_ecc, int cw)
-               reg = &nandc->regs->read_location_last0;
-       if (nandc->props->supports_bam)
--              write_reg_dma(nandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
-+              qcom_write_reg_dma(nandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
--      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
--      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+      qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
-+      qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-       if (use_ecc) {
--              read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
--              read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
--                           NAND_BAM_NEXT_SGL);
-+              qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
-+              qcom_read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
-+                                NAND_BAM_NEXT_SGL);
-       } else {
--              read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
-+              qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
-       }
- }
-@@ -1238,11 +1237,11 @@ static void config_nand_page_write(struct nand_chip *chip)
- {
-       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
--      write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
--      write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
-+      qcom_write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
-+      qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
-       if (!nandc->props->qpic_version2)
--              write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1,
--                            NAND_BAM_NEXT_SGL);
-+              qcom_write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1,
-+                                 NAND_BAM_NEXT_SGL);
- }
- /*
-@@ -1253,17 +1252,18 @@ static void config_nand_cw_write(struct nand_chip *chip)
- {
-       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
--      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
--      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+      qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
-+      qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
--      read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
-+      qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
--      write_reg_dma(nandc, &nandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
--      write_reg_dma(nandc, &nandc->regs->clrreadstatus, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
-+      qcom_write_reg_dma(nandc, &nandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
-+      qcom_write_reg_dma(nandc, &nandc->regs->clrreadstatus, NAND_READ_STATUS, 1,
-+                         NAND_BAM_NEXT_SGL);
- }
- /* helpers to submit/free our list of dma descriptors */
--static int submit_descs(struct qcom_nand_controller *nandc)
-+static int qcom_submit_descs(struct qcom_nand_controller *nandc)
- {
-       struct desc_info *desc, *n;
-       dma_cookie_t cookie = 0;
-@@ -1272,21 +1272,21 @@ static int submit_descs(struct qcom_nand_controller *nandc)
-       if (nandc->props->supports_bam) {
-               if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
--                      ret = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
-+                      ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
-                       if (ret)
-                               goto err_unmap_free_desc;
-               }
-               if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
--                      ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
--                                                 DMA_PREP_INTERRUPT);
-+                      ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
-+                                                        DMA_PREP_INTERRUPT);
-                       if (ret)
-                               goto err_unmap_free_desc;
-               }
-               if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
--                      ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
--                                                 DMA_PREP_CMD);
-+                      ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
-+                                                        DMA_PREP_CMD);
-                       if (ret)
-                               goto err_unmap_free_desc;
-               }
-@@ -1296,7 +1296,7 @@ static int submit_descs(struct qcom_nand_controller *nandc)
-               cookie = dmaengine_submit(desc->dma_desc);
-       if (nandc->props->supports_bam) {
--              bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
-+              bam_txn->last_cmd_desc->callback = qcom_qpic_bam_dma_done;
-               bam_txn->last_cmd_desc->callback_param = bam_txn;
-               dma_async_issue_pending(nandc->tx_chan);
-@@ -1314,7 +1314,7 @@ static int submit_descs(struct qcom_nand_controller *nandc)
- err_unmap_free_desc:
-       /*
-        * Unmap the dma sg_list and free the desc allocated by both
--       * prepare_bam_async_desc() and prep_adm_dma_desc() functions.
-+       * qcom_prepare_bam_async_desc() and qcom_prep_adm_dma_desc() functions.
-        */
-       list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
-               list_del(&desc->node);
-@@ -1333,10 +1333,10 @@ static int submit_descs(struct qcom_nand_controller *nandc)
- }
- /* reset the register read buffer for next NAND operation */
--static void clear_read_regs(struct qcom_nand_controller *nandc)
-+static void qcom_clear_read_regs(struct qcom_nand_controller *nandc)
- {
-       nandc->reg_read_pos = 0;
--      nandc_dev_to_mem(nandc, false);
-+      qcom_nandc_dev_to_mem(nandc, false);
- }
- /*
-@@ -1400,7 +1400,7 @@ static int check_flash_errors(struct qcom_nand_host *host, int cw_cnt)
-       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-       int i;
--      nandc_dev_to_mem(nandc, true);
-+      qcom_nandc_dev_to_mem(nandc, true);
-       for (i = 0; i < cw_cnt; i++) {
-               u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
-@@ -1427,13 +1427,13 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
-       nand_read_page_op(chip, page, 0, NULL, 0);
-       nandc->buf_count = 0;
-       nandc->buf_start = 0;
--      clear_read_regs(nandc);
-+      qcom_clear_read_regs(nandc);
-       host->use_ecc = false;
-       if (nandc->props->qpic_version2)
-               raw_cw = ecc->steps - 1;
--      clear_bam_transaction(nandc);
-+      qcom_clear_bam_transaction(nandc);
-       set_address(host, host->cw_size * cw, page);
-       update_rw_regs(host, 1, true, raw_cw);
-       config_nand_page_read(chip);
-@@ -1466,18 +1466,18 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
-       config_nand_cw_read(chip, false, raw_cw);
--      read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
-+      qcom_read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
-       reg_off += data_size1;
--      read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
-+      qcom_read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
-       reg_off += oob_size1;
--      read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
-+      qcom_read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
-       reg_off += data_size2;
--      read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
-+      qcom_read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
--      ret = submit_descs(nandc);
-+      ret = qcom_submit_descs(nandc);
-       if (ret) {
-               dev_err(nandc->dev, "failure to read raw cw %d\n", cw);
-               return ret;
-@@ -1575,7 +1575,7 @@ static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
-       u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
-       buf = (struct read_stats *)nandc->reg_read_buf;
--      nandc_dev_to_mem(nandc, true);
-+      qcom_nandc_dev_to_mem(nandc, true);
-       for (i = 0; i < ecc->steps; i++, buf++) {
-               u32 flash, buffer, erased_cw;
-@@ -1704,8 +1704,8 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
-               config_nand_cw_read(chip, true, i);
-               if (data_buf)
--                      read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
--                                    data_size, 0);
-+                      qcom_read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
-+                                         data_size, 0);
-               /*
-                * when ecc is enabled, the controller doesn't read the real
-@@ -1720,8 +1720,8 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
-                       for (j = 0; j < host->bbm_size; j++)
-                               *oob_buf++ = 0xff;
--                      read_data_dma(nandc, FLASH_BUF_ACC + data_size,
--                                    oob_buf, oob_size, 0);
-+                      qcom_read_data_dma(nandc, FLASH_BUF_ACC + data_size,
-+                                         oob_buf, oob_size, 0);
-               }
-               if (data_buf)
-@@ -1730,7 +1730,7 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
-                       oob_buf += oob_size;
-       }
--      ret = submit_descs(nandc);
-+      ret = qcom_submit_descs(nandc);
-       if (ret) {
-               dev_err(nandc->dev, "failure to read page/oob\n");
-               return ret;
-@@ -1751,7 +1751,7 @@ static int copy_last_cw(struct qcom_nand_host *host, int page)
-       int size;
-       int ret;
--      clear_read_regs(nandc);
-+      qcom_clear_read_regs(nandc);
-       size = host->use_ecc ? host->cw_data : host->cw_size;
-@@ -1763,9 +1763,9 @@ static int copy_last_cw(struct qcom_nand_host *host, int page)
-       config_nand_single_cw_page_read(chip, host->use_ecc, ecc->steps - 1);
--      read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
-+      qcom_read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
--      ret = submit_descs(nandc);
-+      ret = qcom_submit_descs(nandc);
-       if (ret)
-               dev_err(nandc->dev, "failed to copy last codeword\n");
-@@ -1851,14 +1851,14 @@ static int qcom_nandc_read_page(struct nand_chip *chip, u8 *buf,
-       nandc->buf_count = 0;
-       nandc->buf_start = 0;
-       host->use_ecc = true;
--      clear_read_regs(nandc);
-+      qcom_clear_read_regs(nandc);
-       set_address(host, 0, page);
-       update_rw_regs(host, ecc->steps, true, 0);
-       data_buf = buf;
-       oob_buf = oob_required ? chip->oob_poi : NULL;
--      clear_bam_transaction(nandc);
-+      qcom_clear_bam_transaction(nandc);
-       return read_page_ecc(host, data_buf, oob_buf, page);
- }
-@@ -1899,8 +1899,8 @@ static int qcom_nandc_read_oob(struct nand_chip *chip, int page)
-       if (host->nr_boot_partitions)
-               qcom_nandc_codeword_fixup(host, page);
--      clear_read_regs(nandc);
--      clear_bam_transaction(nandc);
-+      qcom_clear_read_regs(nandc);
-+      qcom_clear_bam_transaction(nandc);
-       host->use_ecc = true;
-       set_address(host, 0, page);
-@@ -1927,8 +1927,8 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const u8 *buf,
-       set_address(host, 0, page);
-       nandc->buf_count = 0;
-       nandc->buf_start = 0;
--      clear_read_regs(nandc);
--      clear_bam_transaction(nandc);
-+      qcom_clear_read_regs(nandc);
-+      qcom_clear_bam_transaction(nandc);
-       data_buf = (u8 *)buf;
-       oob_buf = chip->oob_poi;
-@@ -1949,8 +1949,8 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const u8 *buf,
-                       oob_size = ecc->bytes;
-               }
--              write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
--                             i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
-+              qcom_write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
-+                                  i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
-               /*
-                * when ECC is enabled, we don't really need to write anything
-@@ -1962,8 +1962,8 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const u8 *buf,
-               if (qcom_nandc_is_last_cw(ecc, i)) {
-                       oob_buf += host->bbm_size;
--                      write_data_dma(nandc, FLASH_BUF_ACC + data_size,
--                                     oob_buf, oob_size, 0);
-+                      qcom_write_data_dma(nandc, FLASH_BUF_ACC + data_size,
-+                                          oob_buf, oob_size, 0);
-               }
-               config_nand_cw_write(chip);
-@@ -1972,7 +1972,7 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const u8 *buf,
-               oob_buf += oob_size;
-       }
--      ret = submit_descs(nandc);
-+      ret = qcom_submit_descs(nandc);
-       if (ret) {
-               dev_err(nandc->dev, "failure to write page\n");
-               return ret;
-@@ -1997,8 +1997,8 @@ static int qcom_nandc_write_page_raw(struct nand_chip *chip,
-               qcom_nandc_codeword_fixup(host, page);
-       nand_prog_page_begin_op(chip, page, 0, NULL, 0);
--      clear_read_regs(nandc);
--      clear_bam_transaction(nandc);
-+      qcom_clear_read_regs(nandc);
-+      qcom_clear_bam_transaction(nandc);
-       data_buf = (u8 *)buf;
-       oob_buf = chip->oob_poi;
-@@ -2024,28 +2024,28 @@ static int qcom_nandc_write_page_raw(struct nand_chip *chip,
-                       oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
-               }
--              write_data_dma(nandc, reg_off, data_buf, data_size1,
--                             NAND_BAM_NO_EOT);
-+              qcom_write_data_dma(nandc, reg_off, data_buf, data_size1,
-+                                  NAND_BAM_NO_EOT);
-               reg_off += data_size1;
-               data_buf += data_size1;
--              write_data_dma(nandc, reg_off, oob_buf, oob_size1,
--                             NAND_BAM_NO_EOT);
-+              qcom_write_data_dma(nandc, reg_off, oob_buf, oob_size1,
-+                                  NAND_BAM_NO_EOT);
-               reg_off += oob_size1;
-               oob_buf += oob_size1;
--              write_data_dma(nandc, reg_off, data_buf, data_size2,
--                             NAND_BAM_NO_EOT);
-+              qcom_write_data_dma(nandc, reg_off, data_buf, data_size2,
-+                                  NAND_BAM_NO_EOT);
-               reg_off += data_size2;
-               data_buf += data_size2;
--              write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
-+              qcom_write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
-               oob_buf += oob_size2;
-               config_nand_cw_write(chip);
-       }
--      ret = submit_descs(nandc);
-+      ret = qcom_submit_descs(nandc);
-       if (ret) {
-               dev_err(nandc->dev, "failure to write raw page\n");
-               return ret;
-@@ -2075,7 +2075,7 @@ static int qcom_nandc_write_oob(struct nand_chip *chip, int page)
-               qcom_nandc_codeword_fixup(host, page);
-       host->use_ecc = true;
--      clear_bam_transaction(nandc);
-+      qcom_clear_bam_transaction(nandc);
-       /* calculate the data and oob size for the last codeword/step */
-       data_size = ecc->size - ((ecc->steps - 1) << 2);
-@@ -2090,11 +2090,11 @@ static int qcom_nandc_write_oob(struct nand_chip *chip, int page)
-       update_rw_regs(host, 1, false, 0);
-       config_nand_page_write(chip);
--      write_data_dma(nandc, FLASH_BUF_ACC,
--                     nandc->data_buffer, data_size + oob_size, 0);
-+      qcom_write_data_dma(nandc, FLASH_BUF_ACC,
-+                          nandc->data_buffer, data_size + oob_size, 0);
-       config_nand_cw_write(chip);
--      ret = submit_descs(nandc);
-+      ret = qcom_submit_descs(nandc);
-       if (ret) {
-               dev_err(nandc->dev, "failure to write oob\n");
-               return ret;
-@@ -2121,7 +2121,7 @@ static int qcom_nandc_block_bad(struct nand_chip *chip, loff_t ofs)
-        */
-       host->use_ecc = false;
--      clear_bam_transaction(nandc);
-+      qcom_clear_bam_transaction(nandc);
-       ret = copy_last_cw(host, page);
-       if (ret)
-               goto err;
-@@ -2148,8 +2148,8 @@ static int qcom_nandc_block_markbad(struct nand_chip *chip, loff_t ofs)
-       struct nand_ecc_ctrl *ecc = &chip->ecc;
-       int page, ret;
--      clear_read_regs(nandc);
--      clear_bam_transaction(nandc);
-+      qcom_clear_read_regs(nandc);
-+      qcom_clear_bam_transaction(nandc);
-       /*
-        * to mark the BBM as bad, we flash the entire last codeword with 0s.
-@@ -2166,11 +2166,11 @@ static int qcom_nandc_block_markbad(struct nand_chip *chip, loff_t ofs)
-       update_rw_regs(host, 1, false, ecc->steps - 1);
-       config_nand_page_write(chip);
--      write_data_dma(nandc, FLASH_BUF_ACC,
--                     nandc->data_buffer, host->cw_size, 0);
-+      qcom_write_data_dma(nandc, FLASH_BUF_ACC,
-+                          nandc->data_buffer, host->cw_size, 0);
-       config_nand_cw_write(chip);
--      ret = submit_descs(nandc);
-+      ret = qcom_submit_descs(nandc);
-       if (ret) {
-               dev_err(nandc->dev, "failure to update BBM\n");
-               return ret;
-@@ -2410,14 +2410,14 @@ static int qcom_nand_attach_chip(struct nand_chip *chip)
-       mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
-       /* Free the initially allocated BAM transaction for reading the ONFI params */
-       if (nandc->props->supports_bam)
--              free_bam_transaction(nandc);
-+              qcom_free_bam_transaction(nandc);
-       nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
-                                    cwperpage);
-       /* Now allocate the BAM transaction based on updated max_cwperpage */
-       if (nandc->props->supports_bam) {
--              nandc->bam_txn = alloc_bam_transaction(nandc);
-+              nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
-               if (!nandc->bam_txn) {
-                       dev_err(nandc->dev,
-                               "failed to allocate bam transaction\n");
-@@ -2617,7 +2617,7 @@ static int qcom_wait_rdy_poll(struct nand_chip *chip, unsigned int time_ms)
-       unsigned long start = jiffies + msecs_to_jiffies(time_ms);
-       u32 flash;
--      nandc_dev_to_mem(nandc, true);
-+      qcom_nandc_dev_to_mem(nandc, true);
-       do {
-               flash = le32_to_cpu(nandc->reg_read_buf[0]);
-@@ -2657,23 +2657,23 @@ static int qcom_read_status_exec(struct nand_chip *chip,
-       nandc->buf_start = 0;
-       host->use_ecc = false;
--      clear_read_regs(nandc);
--      clear_bam_transaction(nandc);
-+      qcom_clear_read_regs(nandc);
-+      qcom_clear_bam_transaction(nandc);
-       nandc->regs->cmd = q_op.cmd_reg;
-       nandc->regs->exec = cpu_to_le32(1);
--      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
--      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
--      read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
-+      qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
-+      qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+      qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
--      ret = submit_descs(nandc);
-+      ret = qcom_submit_descs(nandc);
-       if (ret) {
-               dev_err(nandc->dev, "failure in submitting status descriptor\n");
-               goto err_out;
-       }
--      nandc_dev_to_mem(nandc, true);
-+      qcom_nandc_dev_to_mem(nandc, true);
-       for (i = 0; i < num_cw; i++) {
-               flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
-@@ -2714,8 +2714,8 @@ static int qcom_read_id_type_exec(struct nand_chip *chip, const struct nand_subo
-       nandc->buf_start = 0;
-       host->use_ecc = false;
--      clear_read_regs(nandc);
--      clear_bam_transaction(nandc);
-+      qcom_clear_read_regs(nandc);
-+      qcom_clear_bam_transaction(nandc);
-       nandc->regs->cmd = q_op.cmd_reg;
-       nandc->regs->addr0 = q_op.addr1_reg;
-@@ -2723,12 +2723,12 @@ static int qcom_read_id_type_exec(struct nand_chip *chip, const struct nand_subo
-       nandc->regs->chip_sel = cpu_to_le32(nandc->props->supports_bam ? 0 : DM_EN);
-       nandc->regs->exec = cpu_to_le32(1);
--      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
--      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+      qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
-+      qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
--      read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
-+      qcom_read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
--      ret = submit_descs(nandc);
-+      ret = qcom_submit_descs(nandc);
-       if (ret) {
-               dev_err(nandc->dev, "failure in submitting read id descriptor\n");
-               goto err_out;
-@@ -2738,7 +2738,7 @@ static int qcom_read_id_type_exec(struct nand_chip *chip, const struct nand_subo
-       op_id = q_op.data_instr_idx;
-       len = nand_subop_get_data_len(subop, op_id);
--      nandc_dev_to_mem(nandc, true);
-+      qcom_nandc_dev_to_mem(nandc, true);
-       memcpy(instr->ctx.data.buf.in, nandc->reg_read_buf, len);
- err_out:
-@@ -2774,20 +2774,20 @@ static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_sub
-       nandc->buf_start = 0;
-       host->use_ecc = false;
--      clear_read_regs(nandc);
--      clear_bam_transaction(nandc);
-+      qcom_clear_read_regs(nandc);
-+      qcom_clear_bam_transaction(nandc);
-       nandc->regs->cmd = q_op.cmd_reg;
-       nandc->regs->exec = cpu_to_le32(1);
--      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
-+      qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
-       if (q_op.cmd_reg == cpu_to_le32(OP_BLOCK_ERASE))
--              write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
-+              qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
--      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
--      read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
-+      qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+      qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
--      ret = submit_descs(nandc);
-+      ret = qcom_submit_descs(nandc);
-       if (ret) {
-               dev_err(nandc->dev, "failure in submitting misc descriptor\n");
-               goto err_out;
-@@ -2820,8 +2820,8 @@ static int qcom_param_page_type_exec(struct nand_chip *chip,  const struct nand_
-       nandc->buf_count = 0;
-       nandc->buf_start = 0;
-       host->use_ecc = false;
--      clear_read_regs(nandc);
--      clear_bam_transaction(nandc);
-+      qcom_clear_read_regs(nandc);
-+      qcom_clear_bam_transaction(nandc);
-       nandc->regs->cmd = q_op.cmd_reg;
-       nandc->regs->addr0 = 0;
-@@ -2864,8 +2864,8 @@ static int qcom_param_page_type_exec(struct nand_chip *chip,  const struct nand_
-       nandc_set_read_loc(chip, 0, 0, 0, len, 1);
-       if (!nandc->props->qpic_version2) {
--              write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0);
--              write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
-+              qcom_write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0);
-+              qcom_write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
-       }
-       nandc->buf_count = len;
-@@ -2873,17 +2873,17 @@ static int qcom_param_page_type_exec(struct nand_chip *chip,  const struct nand_
-       config_nand_single_cw_page_read(chip, false, 0);
--      read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
--                    nandc->buf_count, 0);
-+      qcom_read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
-+                         nandc->buf_count, 0);
-       /* restore CMD1 and VLD regs */
-       if (!nandc->props->qpic_version2) {
--              write_reg_dma(nandc, &nandc->regs->orig_cmd1, NAND_DEV_CMD1_RESTORE, 1, 0);
--              write_reg_dma(nandc, &nandc->regs->orig_vld, NAND_DEV_CMD_VLD_RESTORE, 1,
--                            NAND_BAM_NEXT_SGL);
-+              qcom_write_reg_dma(nandc, &nandc->regs->orig_cmd1, NAND_DEV_CMD1_RESTORE, 1, 0);
-+              qcom_write_reg_dma(nandc, &nandc->regs->orig_vld, NAND_DEV_CMD_VLD_RESTORE, 1,
-+                                 NAND_BAM_NEXT_SGL);
-       }
--      ret = submit_descs(nandc);
-+      ret = qcom_submit_descs(nandc);
-       if (ret) {
-               dev_err(nandc->dev, "failure in submitting param page descriptor\n");
-               goto err_out;
-@@ -3067,7 +3067,7 @@ static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
-                * maximum codeword size
-                */
-               nandc->max_cwperpage = 1;
--              nandc->bam_txn = alloc_bam_transaction(nandc);
-+              nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
-               if (!nandc->bam_txn) {
-                       dev_err(nandc->dev,
-                               "failed to allocate bam transaction\n");
--- 
-2.47.1
-
diff --git a/target/linux/qualcommbe/patches-6.6/016-03-v6.14-mtd-nand-Add-qpic_common-API-file.patch b/target/linux/qualcommbe/patches-6.6/016-03-v6.14-mtd-nand-Add-qpic_common-API-file.patch
deleted file mode 100644 (file)
index 8506fba..0000000
+++ /dev/null
@@ -1,2449 +0,0 @@
-From fdf3ee5c6e5278dab4f60b998b47ed2d510bf80f Mon Sep 17 00:00:00 2001
-From: Md Sadre Alam <quic_mdalam@quicinc.com>
-Date: Wed, 20 Nov 2024 14:45:02 +0530
-Subject: [PATCH 3/4] mtd: nand: Add qpic_common API file
-
-Add qpic_common.c file which hold all the common
-qpic APIs which will be used by both qpic raw nand
-driver and qpic spi nand driver.
-
-Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
-Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
----
- drivers/mtd/nand/Makefile            |    2 +-
- drivers/mtd/nand/qpic_common.c       |  759 ++++++++++++++++++
- drivers/mtd/nand/raw/qcom_nandc.c    | 1092 +-------------------------
- include/linux/mtd/nand-qpic-common.h |  468 +++++++++++
- 4 files changed, 1240 insertions(+), 1081 deletions(-)
- create mode 100644 drivers/mtd/nand/qpic_common.c
- create mode 100644 include/linux/mtd/nand-qpic-common.h
-
-diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
-index 19e1291ac4d5..da1586a36574 100644
---- a/drivers/mtd/nand/Makefile
-+++ b/drivers/mtd/nand/Makefile
-@@ -3,7 +3,7 @@
- obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o
- obj-$(CONFIG_MTD_NAND_ECC_MEDIATEK) += ecc-mtk.o
- obj-$(CONFIG_MTD_NAND_MTK_BMT)        += mtk_bmt.o mtk_bmt_v2.o mtk_bmt_bbt.o mtk_bmt_nmbm.o
--
-+obj-$(CONFIG_MTD_NAND_QCOM) += qpic_common.o
- obj-y += onenand/
- obj-y += raw/
- obj-y += spi/
-diff --git a/drivers/mtd/nand/qpic_common.c b/drivers/mtd/nand/qpic_common.c
-new file mode 100644
-index 000000000000..8abbb960a7ce
---- /dev/null
-+++ b/drivers/mtd/nand/qpic_common.c
-@@ -0,0 +1,759 @@
-+// SPDX-License-Identifier: GPL-2.0-only
-+/*
-+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
-+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved
-+ */
-+#include <linux/clk.h>
-+#include <linux/delay.h>
-+#include <linux/dmaengine.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/dma/qcom_adm.h>
-+#include <linux/dma/qcom_bam_dma.h>
-+#include <linux/module.h>
-+#include <linux/of.h>
-+#include <linux/platform_device.h>
-+#include <linux/slab.h>
-+#include <linux/mtd/nand-qpic-common.h>
-+
-+/**
-+ * qcom_free_bam_transaction() - Frees the BAM transaction memory
-+ * @nandc: qpic nand controller
-+ *
-+ * This function frees the bam transaction memory
-+ */
-+void qcom_free_bam_transaction(struct qcom_nand_controller *nandc)
-+{
-+      struct bam_transaction *bam_txn = nandc->bam_txn;
-+
-+      kfree(bam_txn);
-+}
-+EXPORT_SYMBOL(qcom_free_bam_transaction);
-+
-+/**
-+ * qcom_alloc_bam_transaction() - allocate BAM transaction
-+ * @nandc: qpic nand controller
-+ *
-+ * This function will allocate and initialize the BAM transaction structure
-+ */
-+struct bam_transaction *
-+qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc)
-+{
-+      struct bam_transaction *bam_txn;
-+      size_t bam_txn_size;
-+      unsigned int num_cw = nandc->max_cwperpage;
-+      void *bam_txn_buf;
-+
-+      bam_txn_size =
-+              sizeof(*bam_txn) + num_cw *
-+              ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
-+              (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
-+              (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
-+
-+      bam_txn_buf = kzalloc(bam_txn_size, GFP_KERNEL);
-+      if (!bam_txn_buf)
-+              return NULL;
-+
-+      bam_txn = bam_txn_buf;
-+      bam_txn_buf += sizeof(*bam_txn);
-+
-+      bam_txn->bam_ce = bam_txn_buf;
-+      bam_txn_buf +=
-+              sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
-+
-+      bam_txn->cmd_sgl = bam_txn_buf;
-+      bam_txn_buf +=
-+              sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
-+
-+      bam_txn->data_sgl = bam_txn_buf;
-+
-+      init_completion(&bam_txn->txn_done);
-+
-+      return bam_txn;
-+}
-+EXPORT_SYMBOL(qcom_alloc_bam_transaction);
-+
-+/**
-+ * qcom_clear_bam_transaction() - Clears the BAM transaction
-+ * @nandc: qpic nand controller
-+ *
-+ * This function will clear the BAM transaction indexes.
-+ */
-+void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc)
-+{
-+      struct bam_transaction *bam_txn = nandc->bam_txn;
-+
-+      if (!nandc->props->supports_bam)
-+              return;
-+
-+      memset(&bam_txn->bam_ce_pos, 0, sizeof(u32) * 8);
-+      bam_txn->last_data_desc = NULL;
-+
-+      sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
-+                    QPIC_PER_CW_CMD_SGL);
-+      sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
-+                    QPIC_PER_CW_DATA_SGL);
-+
-+      reinit_completion(&bam_txn->txn_done);
-+}
-+EXPORT_SYMBOL(qcom_clear_bam_transaction);
-+
-+/**
-+ * qcom_qpic_bam_dma_done() - Callback for DMA descriptor completion
-+ * @data: data pointer
-+ *
-+ * This function is a callback for DMA descriptor completion
-+ */
-+void qcom_qpic_bam_dma_done(void *data)
-+{
-+      struct bam_transaction *bam_txn = data;
-+
-+      complete(&bam_txn->txn_done);
-+}
-+EXPORT_SYMBOL(qcom_qpic_bam_dma_done);
-+
-+/**
-+ * qcom_nandc_dev_to_mem() - Check for dma sync for cpu or device
-+ * @nandc: qpic nand controller
-+ * @is_cpu: cpu or Device
-+ *
-+ * This function will check for dma sync for cpu or device
-+ */
-+inline void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
-+{
-+      if (!nandc->props->supports_bam)
-+              return;
-+
-+      if (is_cpu)
-+              dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
-+                                      MAX_REG_RD *
-+                                      sizeof(*nandc->reg_read_buf),
-+                                      DMA_FROM_DEVICE);
-+      else
-+              dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
-+                                         MAX_REG_RD *
-+                                         sizeof(*nandc->reg_read_buf),
-+                                         DMA_FROM_DEVICE);
-+}
-+EXPORT_SYMBOL(qcom_nandc_dev_to_mem);
-+
-+/**
-+ * qcom_prepare_bam_async_desc() - Prepare DMA descriptor
-+ * @nandc: qpic nand controller
-+ * @chan: dma channel
-+ * @flags: flags to control DMA descriptor preparation
-+ *
-+ * This function maps the scatter gather list for DMA transfer and forms the
-+ * DMA descriptor for BAM.This descriptor will be added in the NAND DMA
-+ * descriptor queue which will be submitted to DMA engine.
-+ */
-+int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
-+                              struct dma_chan *chan, unsigned long flags)
-+{
-+      struct desc_info *desc;
-+      struct scatterlist *sgl;
-+      unsigned int sgl_cnt;
-+      int ret;
-+      struct bam_transaction *bam_txn = nandc->bam_txn;
-+      enum dma_transfer_direction dir_eng;
-+      struct dma_async_tx_descriptor *dma_desc;
-+
-+      desc = kzalloc(sizeof(*desc), GFP_KERNEL);
-+      if (!desc)
-+              return -ENOMEM;
-+
-+      if (chan == nandc->cmd_chan) {
-+              sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
-+              sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
-+              bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
-+              dir_eng = DMA_MEM_TO_DEV;
-+              desc->dir = DMA_TO_DEVICE;
-+      } else if (chan == nandc->tx_chan) {
-+              sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
-+              sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
-+              bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
-+              dir_eng = DMA_MEM_TO_DEV;
-+              desc->dir = DMA_TO_DEVICE;
-+      } else {
-+              sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
-+              sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
-+              bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
-+              dir_eng = DMA_DEV_TO_MEM;
-+              desc->dir = DMA_FROM_DEVICE;
-+      }
-+
-+      sg_mark_end(sgl + sgl_cnt - 1);
-+      ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
-+      if (ret == 0) {
-+              dev_err(nandc->dev, "failure in mapping desc\n");
-+              kfree(desc);
-+              return -ENOMEM;
-+      }
-+
-+      desc->sgl_cnt = sgl_cnt;
-+      desc->bam_sgl = sgl;
-+
-+      dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
-+                                         flags);
-+
-+      if (!dma_desc) {
-+              dev_err(nandc->dev, "failure in prep desc\n");
-+              dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
-+              kfree(desc);
-+              return -EINVAL;
-+      }
-+
-+      desc->dma_desc = dma_desc;
-+
-+      /* update last data/command descriptor */
-+      if (chan == nandc->cmd_chan)
-+              bam_txn->last_cmd_desc = dma_desc;
-+      else
-+              bam_txn->last_data_desc = dma_desc;
-+
-+      list_add_tail(&desc->node, &nandc->desc_list);
-+
-+      return 0;
-+}
-+EXPORT_SYMBOL(qcom_prepare_bam_async_desc);
-+
-+/**
-+ * qcom_prep_bam_dma_desc_cmd() - Prepares the command descriptor for BAM DMA
-+ * @nandc: qpic nand controller
-+ * @read: read or write type
-+ * @reg_off: offset within the controller's data buffer
-+ * @vaddr: virtual address of the buffer we want to write to
-+ * @size: DMA transaction size in bytes
-+ * @flags: flags to control DMA descriptor preparation
-+ *
-+ * This function will prepares the command descriptor for BAM DMA
-+ * which will be used for NAND register reads and writes.
-+ */
-+int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
-+                             int reg_off, const void *vaddr,
-+                             int size, unsigned int flags)
-+{
-+      int bam_ce_size;
-+      int i, ret;
-+      struct bam_cmd_element *bam_ce_buffer;
-+      struct bam_transaction *bam_txn = nandc->bam_txn;
-+
-+      bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
-+
-+      /* fill the command desc */
-+      for (i = 0; i < size; i++) {
-+              if (read)
-+                      bam_prep_ce(&bam_ce_buffer[i],
-+                                  nandc_reg_phys(nandc, reg_off + 4 * i),
-+                                  BAM_READ_COMMAND,
-+                                  reg_buf_dma_addr(nandc,
-+                                                   (__le32 *)vaddr + i));
-+              else
-+                      bam_prep_ce_le32(&bam_ce_buffer[i],
-+                                       nandc_reg_phys(nandc, reg_off + 4 * i),
-+                                       BAM_WRITE_COMMAND,
-+                                       *((__le32 *)vaddr + i));
-+      }
-+
-+      bam_txn->bam_ce_pos += size;
-+
-+      /* use the separate sgl after this command */
-+      if (flags & NAND_BAM_NEXT_SGL) {
-+              bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
-+              bam_ce_size = (bam_txn->bam_ce_pos -
-+                              bam_txn->bam_ce_start) *
-+                              sizeof(struct bam_cmd_element);
-+              sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
-+                         bam_ce_buffer, bam_ce_size);
-+              bam_txn->cmd_sgl_pos++;
-+              bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
-+
-+              if (flags & NAND_BAM_NWD) {
-+                      ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
-+                                                        DMA_PREP_FENCE | DMA_PREP_CMD);
-+                      if (ret)
-+                              return ret;
-+              }
-+      }
-+
-+      return 0;
-+}
-+EXPORT_SYMBOL(qcom_prep_bam_dma_desc_cmd);
-+
-+/**
-+ * qcom_prep_bam_dma_desc_data() - Prepares the data descriptor for BAM DMA
-+ * @nandc: qpic nand controller
-+ * @read: read or write type
-+ * @vaddr: virtual address of the buffer we want to write to
-+ * @size: DMA transaction size in bytes
-+ * @flags: flags to control DMA descriptor preparation
-+ *
-+ * This function will prepares the data descriptor for BAM DMA which
-+ * will be used for NAND data reads and writes.
-+ */
-+int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
-+                              const void *vaddr, int size, unsigned int flags)
-+{
-+      int ret;
-+      struct bam_transaction *bam_txn = nandc->bam_txn;
-+
-+      if (read) {
-+              sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
-+                         vaddr, size);
-+              bam_txn->rx_sgl_pos++;
-+      } else {
-+              sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
-+                         vaddr, size);
-+              bam_txn->tx_sgl_pos++;
-+
-+              /*
-+               * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
-+               * is not set, form the DMA descriptor
-+               */
-+              if (!(flags & NAND_BAM_NO_EOT)) {
-+                      ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
-+                                                        DMA_PREP_INTERRUPT);
-+                      if (ret)
-+                              return ret;
-+              }
-+      }
-+
-+      return 0;
-+}
-+EXPORT_SYMBOL(qcom_prep_bam_dma_desc_data);
-+
-+/**
-+ * qcom_prep_adm_dma_desc() - Prepare descriptor for adma
-+ * @nandc: qpic nand controller
-+ * @read: read or write type
-+ * @reg_off: offset within the controller's data buffer
-+ * @vaddr: virtual address of the buffer we want to write to
-+ * @size: adm dma transaction size in bytes
-+ * @flow_control: flow controller
-+ *
-+ * This function will prepare descriptor for adma
-+ */
-+int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
-+                         int reg_off, const void *vaddr, int size,
-+                         bool flow_control)
-+{
-+      struct qcom_adm_peripheral_config periph_conf = {};
-+      struct dma_async_tx_descriptor *dma_desc;
-+      struct dma_slave_config slave_conf = {0};
-+      enum dma_transfer_direction dir_eng;
-+      struct desc_info *desc;
-+      struct scatterlist *sgl;
-+      int ret;
-+
-+      desc = kzalloc(sizeof(*desc), GFP_KERNEL);
-+      if (!desc)
-+              return -ENOMEM;
-+
-+      sgl = &desc->adm_sgl;
-+
-+      sg_init_one(sgl, vaddr, size);
-+
-+      if (read) {
-+              dir_eng = DMA_DEV_TO_MEM;
-+              desc->dir = DMA_FROM_DEVICE;
-+      } else {
-+              dir_eng = DMA_MEM_TO_DEV;
-+              desc->dir = DMA_TO_DEVICE;
-+      }
-+
-+      ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
-+      if (!ret) {
-+              ret = -ENOMEM;
-+              goto err;
-+      }
-+
-+      slave_conf.device_fc = flow_control;
-+      if (read) {
-+              slave_conf.src_maxburst = 16;
-+              slave_conf.src_addr = nandc->base_dma + reg_off;
-+              if (nandc->data_crci) {
-+                      periph_conf.crci = nandc->data_crci;
-+                      slave_conf.peripheral_config = &periph_conf;
-+                      slave_conf.peripheral_size = sizeof(periph_conf);
-+              }
-+      } else {
-+              slave_conf.dst_maxburst = 16;
-+              slave_conf.dst_addr = nandc->base_dma + reg_off;
-+              if (nandc->cmd_crci) {
-+                      periph_conf.crci = nandc->cmd_crci;
-+                      slave_conf.peripheral_config = &periph_conf;
-+                      slave_conf.peripheral_size = sizeof(periph_conf);
-+              }
-+      }
-+
-+      ret = dmaengine_slave_config(nandc->chan, &slave_conf);
-+      if (ret) {
-+              dev_err(nandc->dev, "failed to configure dma channel\n");
-+              goto err;
-+      }
-+
-+      dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
-+      if (!dma_desc) {
-+              dev_err(nandc->dev, "failed to prepare desc\n");
-+              ret = -EINVAL;
-+              goto err;
-+      }
-+
-+      desc->dma_desc = dma_desc;
-+
-+      list_add_tail(&desc->node, &nandc->desc_list);
-+
-+      return 0;
-+err:
-+      kfree(desc);
-+
-+      return ret;
-+}
-+EXPORT_SYMBOL(qcom_prep_adm_dma_desc);
-+
-+/**
-+ * qcom_read_reg_dma() - read a given number of registers to the reg_read_buf pointer
-+ * @nandc: qpic nand controller
-+ * @first: offset of the first register in the contiguous block
-+ * @num_regs: number of registers to read
-+ * @flags: flags to control DMA descriptor preparation
-+ *
-+ * This function will prepares a descriptor to read a given number of
-+ * contiguous registers to the reg_read_buf pointer.
-+ */
-+int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first,
-+                    int num_regs, unsigned int flags)
-+{
-+      bool flow_control = false;
-+      void *vaddr;
-+
-+      vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
-+      nandc->reg_read_pos += num_regs;
-+
-+      if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
-+              first = dev_cmd_reg_addr(nandc, first);
-+
-+      if (nandc->props->supports_bam)
-+              return qcom_prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
-+                                           num_regs, flags);
-+
-+      if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
-+              flow_control = true;
-+
-+      return qcom_prep_adm_dma_desc(nandc, true, first, vaddr,
-+                                    num_regs * sizeof(u32), flow_control);
-+}
-+EXPORT_SYMBOL(qcom_read_reg_dma);
-+
-+/**
-+ * qcom_write_reg_dma() - write a given number of registers
-+ * @nandc: qpic nand controller
-+ * @vaddr: contiguous memory from where register value will
-+ *       be written
-+ * @first: offset of the first register in the contiguous block
-+ * @num_regs: number of registers to write
-+ * @flags: flags to control DMA descriptor preparation
-+ *
-+ * This function will prepares a descriptor to write a given number of
-+ * contiguous registers
-+ */
-+int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
-+                     int first, int num_regs, unsigned int flags)
-+{
-+      bool flow_control = false;
-+
-+      if (first == NAND_EXEC_CMD)
-+              flags |= NAND_BAM_NWD;
-+
-+      if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
-+              first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
-+
-+      if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
-+              first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
-+
-+      if (nandc->props->supports_bam)
-+              return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
-+                                                num_regs, flags);
-+
-+      if (first == NAND_FLASH_CMD)
-+              flow_control = true;
-+
-+      return qcom_prep_adm_dma_desc(nandc, false, first, vaddr,
-+                                    num_regs * sizeof(u32), flow_control);
-+}
-+EXPORT_SYMBOL(qcom_write_reg_dma);
-+
-+/**
-+ * qcom_read_data_dma() - transfer data
-+ * @nandc: qpic nand controller
-+ * @reg_off: offset within the controller's data buffer
-+ * @vaddr: virtual address of the buffer we want to write to
-+ * @size: DMA transaction size in bytes
-+ * @flags: flags to control DMA descriptor preparation
-+ *
-+ * This function will prepares a DMA descriptor to transfer data from the
-+ * controller's internal buffer to the buffer 'vaddr'
-+ */
-+int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
-+                     const u8 *vaddr, int size, unsigned int flags)
-+{
-+      if (nandc->props->supports_bam)
-+              return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
-+
-+      return qcom_prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
-+}
-+EXPORT_SYMBOL(qcom_read_data_dma);
-+
-+/**
-+ * qcom_write_data_dma() - transfer data
-+ * @nandc: qpic nand controller
-+ * @reg_off: offset within the controller's data buffer
-+ * @vaddr: virtual address of the buffer we want to read from
-+ * @size: DMA transaction size in bytes
-+ * @flags: flags to control DMA descriptor preparation
-+ *
-+ * This function will prepares a DMA descriptor to transfer data from
-+ * 'vaddr' to the controller's internal buffer
-+ */
-+int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
-+                      const u8 *vaddr, int size, unsigned int flags)
-+{
-+      if (nandc->props->supports_bam)
-+              return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
-+
-+      return qcom_prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
-+}
-+EXPORT_SYMBOL(qcom_write_data_dma);
-+
-+/**
-+ * qcom_submit_descs() - submit dma descriptor
-+ * @nandc: qpic nand controller
-+ *
-+ * This function will submit all the prepared dma descriptor
-+ * cmd or data descriptor
-+ */
-+int qcom_submit_descs(struct qcom_nand_controller *nandc)
-+{
-+      struct desc_info *desc, *n;
-+      dma_cookie_t cookie = 0;
-+      struct bam_transaction *bam_txn = nandc->bam_txn;
-+      int ret = 0;
-+
-+      if (nandc->props->supports_bam) {
-+              if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
-+                      ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
-+                      if (ret)
-+                              goto err_unmap_free_desc;
-+              }
-+
-+              if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
-+                      ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
-+                                                        DMA_PREP_INTERRUPT);
-+                      if (ret)
-+                              goto err_unmap_free_desc;
-+              }
-+
-+              if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
-+                      ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
-+                                                        DMA_PREP_CMD);
-+                      if (ret)
-+                              goto err_unmap_free_desc;
-+              }
-+      }
-+
-+      list_for_each_entry(desc, &nandc->desc_list, node)
-+              cookie = dmaengine_submit(desc->dma_desc);
-+
-+      if (nandc->props->supports_bam) {
-+              bam_txn->last_cmd_desc->callback = qcom_qpic_bam_dma_done;
-+              bam_txn->last_cmd_desc->callback_param = bam_txn;
-+
-+              dma_async_issue_pending(nandc->tx_chan);
-+              dma_async_issue_pending(nandc->rx_chan);
-+              dma_async_issue_pending(nandc->cmd_chan);
-+
-+              if (!wait_for_completion_timeout(&bam_txn->txn_done,
-+                                               QPIC_NAND_COMPLETION_TIMEOUT))
-+                      ret = -ETIMEDOUT;
-+      } else {
-+              if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
-+                      ret = -ETIMEDOUT;
-+      }
-+
-+err_unmap_free_desc:
-+      /*
-+       * Unmap the dma sg_list and free the desc allocated by both
-+       * qcom_prepare_bam_async_desc() and qcom_prep_adm_dma_desc() functions.
-+       */
-+      list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
-+              list_del(&desc->node);
-+
-+              if (nandc->props->supports_bam)
-+                      dma_unmap_sg(nandc->dev, desc->bam_sgl,
-+                                   desc->sgl_cnt, desc->dir);
-+              else
-+                      dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
-+                                   desc->dir);
-+
-+              kfree(desc);
-+      }
-+
-+      return ret;
-+}
-+EXPORT_SYMBOL(qcom_submit_descs);
-+
-+/**
-+ * qcom_clear_read_regs() - reset the read register buffer
-+ * @nandc: qpic nand controller
-+ *
-+ * This function reset the register read buffer for next NAND operation
-+ */
-+void qcom_clear_read_regs(struct qcom_nand_controller *nandc)
-+{
-+      nandc->reg_read_pos = 0;
-+      qcom_nandc_dev_to_mem(nandc, false);
-+}
-+EXPORT_SYMBOL(qcom_clear_read_regs);
-+
-+/**
-+ * qcom_nandc_unalloc() - unallocate qpic nand controller
-+ * @nandc: qpic nand controller
-+ *
-+ * This function will unallocate memory alloacted for qpic nand controller
-+ */
-+void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
-+{
-+      if (nandc->props->supports_bam) {
-+              if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
-+                      dma_unmap_single(nandc->dev, nandc->reg_read_dma,
-+                                       MAX_REG_RD *
-+                                       sizeof(*nandc->reg_read_buf),
-+                                       DMA_FROM_DEVICE);
-+
-+              if (nandc->tx_chan)
-+                      dma_release_channel(nandc->tx_chan);
-+
-+              if (nandc->rx_chan)
-+                      dma_release_channel(nandc->rx_chan);
-+
-+              if (nandc->cmd_chan)
-+                      dma_release_channel(nandc->cmd_chan);
-+      } else {
-+              if (nandc->chan)
-+                      dma_release_channel(nandc->chan);
-+      }
-+}
-+EXPORT_SYMBOL(qcom_nandc_unalloc);
-+
-+/**
-+ * qcom_nandc_alloc() - Allocate qpic nand controller
-+ * @nandc: qpic nand controller
-+ *
-+ * This function will allocate memory for qpic nand controller
-+ */
-+int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
-+{
-+      int ret;
-+
-+      ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
-+      if (ret) {
-+              dev_err(nandc->dev, "failed to set DMA mask\n");
-+              return ret;
-+      }
-+
-+      /*
-+       * we use the internal buffer for reading ONFI params, reading small
-+       * data like ID and status, and preforming read-copy-write operations
-+       * when writing to a codeword partially. 532 is the maximum possible
-+       * size of a codeword for our nand controller
-+       */
-+      nandc->buf_size = 532;
-+
-+      nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size, GFP_KERNEL);
-+      if (!nandc->data_buffer)
-+              return -ENOMEM;
-+
-+      nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs), GFP_KERNEL);
-+      if (!nandc->regs)
-+              return -ENOMEM;
-+
-+      nandc->reg_read_buf = devm_kcalloc(nandc->dev, MAX_REG_RD,
-+                                         sizeof(*nandc->reg_read_buf),
-+                                         GFP_KERNEL);
-+      if (!nandc->reg_read_buf)
-+              return -ENOMEM;
-+
-+      if (nandc->props->supports_bam) {
-+              nandc->reg_read_dma =
-+                      dma_map_single(nandc->dev, nandc->reg_read_buf,
-+                                     MAX_REG_RD *
-+                                     sizeof(*nandc->reg_read_buf),
-+                                     DMA_FROM_DEVICE);
-+              if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
-+                      dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
-+                      return -EIO;
-+              }
-+
-+              nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
-+              if (IS_ERR(nandc->tx_chan)) {
-+                      ret = PTR_ERR(nandc->tx_chan);
-+                      nandc->tx_chan = NULL;
-+                      dev_err_probe(nandc->dev, ret,
-+                                    "tx DMA channel request failed\n");
-+                      goto unalloc;
-+              }
-+
-+              nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
-+              if (IS_ERR(nandc->rx_chan)) {
-+                      ret = PTR_ERR(nandc->rx_chan);
-+                      nandc->rx_chan = NULL;
-+                      dev_err_probe(nandc->dev, ret,
-+                                    "rx DMA channel request failed\n");
-+                      goto unalloc;
-+              }
-+
-+              nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
-+              if (IS_ERR(nandc->cmd_chan)) {
-+                      ret = PTR_ERR(nandc->cmd_chan);
-+                      nandc->cmd_chan = NULL;
-+                      dev_err_probe(nandc->dev, ret,
-+                                    "cmd DMA channel request failed\n");
-+                      goto unalloc;
-+              }
-+
-+              /*
-+               * Initially allocate BAM transaction to read ONFI param page.
-+               * After detecting all the devices, this BAM transaction will
-+               * be freed and the next BAM transaction will be allocated with
-+               * maximum codeword size
-+               */
-+              nandc->max_cwperpage = 1;
-+              nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
-+              if (!nandc->bam_txn) {
-+                      dev_err(nandc->dev,
-+                              "failed to allocate bam transaction\n");
-+                      ret = -ENOMEM;
-+                      goto unalloc;
-+              }
-+      } else {
-+              nandc->chan = dma_request_chan(nandc->dev, "rxtx");
-+              if (IS_ERR(nandc->chan)) {
-+                      ret = PTR_ERR(nandc->chan);
-+                      nandc->chan = NULL;
-+                      dev_err_probe(nandc->dev, ret,
-+                                    "rxtx DMA channel request failed\n");
-+                      return ret;
-+              }
-+      }
-+
-+      INIT_LIST_HEAD(&nandc->desc_list);
-+      INIT_LIST_HEAD(&nandc->host_list);
-+
-+      return 0;
-+unalloc:
-+      qcom_nandc_unalloc(nandc);
-+      return ret;
-+}
-+EXPORT_SYMBOL(qcom_nandc_alloc);
-+
-+MODULE_DESCRIPTION("QPIC controller common api");
-+MODULE_LICENSE("GPL");
-diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
-index 6da5d23d2c8b..dcb62fd19dd7 100644
---- a/drivers/mtd/nand/raw/qcom_nandc.c
-+++ b/drivers/mtd/nand/raw/qcom_nandc.c
-@@ -15,417 +15,7 @@
- #include <linux/of.h>
- #include <linux/platform_device.h>
- #include <linux/slab.h>
--
--/* NANDc reg offsets */
--#define       NAND_FLASH_CMD                  0x00
--#define       NAND_ADDR0                      0x04
--#define       NAND_ADDR1                      0x08
--#define       NAND_FLASH_CHIP_SELECT          0x0c
--#define       NAND_EXEC_CMD                   0x10
--#define       NAND_FLASH_STATUS               0x14
--#define       NAND_BUFFER_STATUS              0x18
--#define       NAND_DEV0_CFG0                  0x20
--#define       NAND_DEV0_CFG1                  0x24
--#define       NAND_DEV0_ECC_CFG               0x28
--#define       NAND_AUTO_STATUS_EN             0x2c
--#define       NAND_DEV1_CFG0                  0x30
--#define       NAND_DEV1_CFG1                  0x34
--#define       NAND_READ_ID                    0x40
--#define       NAND_READ_STATUS                0x44
--#define       NAND_DEV_CMD0                   0xa0
--#define       NAND_DEV_CMD1                   0xa4
--#define       NAND_DEV_CMD2                   0xa8
--#define       NAND_DEV_CMD_VLD                0xac
--#define       SFLASHC_BURST_CFG               0xe0
--#define       NAND_ERASED_CW_DETECT_CFG       0xe8
--#define       NAND_ERASED_CW_DETECT_STATUS    0xec
--#define       NAND_EBI2_ECC_BUF_CFG           0xf0
--#define       FLASH_BUF_ACC                   0x100
--
--#define       NAND_CTRL                       0xf00
--#define       NAND_VERSION                    0xf08
--#define       NAND_READ_LOCATION_0            0xf20
--#define       NAND_READ_LOCATION_1            0xf24
--#define       NAND_READ_LOCATION_2            0xf28
--#define       NAND_READ_LOCATION_3            0xf2c
--#define       NAND_READ_LOCATION_LAST_CW_0    0xf40
--#define       NAND_READ_LOCATION_LAST_CW_1    0xf44
--#define       NAND_READ_LOCATION_LAST_CW_2    0xf48
--#define       NAND_READ_LOCATION_LAST_CW_3    0xf4c
--
--/* dummy register offsets, used by qcom_write_reg_dma */
--#define       NAND_DEV_CMD1_RESTORE           0xdead
--#define       NAND_DEV_CMD_VLD_RESTORE        0xbeef
--
--/* NAND_FLASH_CMD bits */
--#define       PAGE_ACC                        BIT(4)
--#define       LAST_PAGE                       BIT(5)
--
--/* NAND_FLASH_CHIP_SELECT bits */
--#define       NAND_DEV_SEL                    0
--#define       DM_EN                           BIT(2)
--
--/* NAND_FLASH_STATUS bits */
--#define       FS_OP_ERR                       BIT(4)
--#define       FS_READY_BSY_N                  BIT(5)
--#define       FS_MPU_ERR                      BIT(8)
--#define       FS_DEVICE_STS_ERR               BIT(16)
--#define       FS_DEVICE_WP                    BIT(23)
--
--/* NAND_BUFFER_STATUS bits */
--#define       BS_UNCORRECTABLE_BIT            BIT(8)
--#define       BS_CORRECTABLE_ERR_MSK          0x1f
--
--/* NAND_DEVn_CFG0 bits */
--#define       DISABLE_STATUS_AFTER_WRITE      4
--#define       CW_PER_PAGE                     6
--#define       UD_SIZE_BYTES                   9
--#define       UD_SIZE_BYTES_MASK              GENMASK(18, 9)
--#define       ECC_PARITY_SIZE_BYTES_RS        19
--#define       SPARE_SIZE_BYTES                23
--#define       SPARE_SIZE_BYTES_MASK           GENMASK(26, 23)
--#define       NUM_ADDR_CYCLES                 27
--#define       STATUS_BFR_READ                 30
--#define       SET_RD_MODE_AFTER_STATUS        31
--
--/* NAND_DEVn_CFG0 bits */
--#define       DEV0_CFG1_ECC_DISABLE           0
--#define       WIDE_FLASH                      1
--#define       NAND_RECOVERY_CYCLES            2
--#define       CS_ACTIVE_BSY                   5
--#define       BAD_BLOCK_BYTE_NUM              6
--#define       BAD_BLOCK_IN_SPARE_AREA         16
--#define       WR_RD_BSY_GAP                   17
--#define       ENABLE_BCH_ECC                  27
--
--/* NAND_DEV0_ECC_CFG bits */
--#define       ECC_CFG_ECC_DISABLE             0
--#define       ECC_SW_RESET                    1
--#define       ECC_MODE                        4
--#define       ECC_PARITY_SIZE_BYTES_BCH       8
--#define       ECC_NUM_DATA_BYTES              16
--#define       ECC_NUM_DATA_BYTES_MASK         GENMASK(25, 16)
--#define       ECC_FORCE_CLK_OPEN              30
--
--/* NAND_DEV_CMD1 bits */
--#define       READ_ADDR                       0
--
--/* NAND_DEV_CMD_VLD bits */
--#define       READ_START_VLD                  BIT(0)
--#define       READ_STOP_VLD                   BIT(1)
--#define       WRITE_START_VLD                 BIT(2)
--#define       ERASE_START_VLD                 BIT(3)
--#define       SEQ_READ_START_VLD              BIT(4)
--
--/* NAND_EBI2_ECC_BUF_CFG bits */
--#define       NUM_STEPS                       0
--
--/* NAND_ERASED_CW_DETECT_CFG bits */
--#define       ERASED_CW_ECC_MASK              1
--#define       AUTO_DETECT_RES                 0
--#define       MASK_ECC                        BIT(ERASED_CW_ECC_MASK)
--#define       RESET_ERASED_DET                BIT(AUTO_DETECT_RES)
--#define       ACTIVE_ERASED_DET               (0 << AUTO_DETECT_RES)
--#define       CLR_ERASED_PAGE_DET             (RESET_ERASED_DET | MASK_ECC)
--#define       SET_ERASED_PAGE_DET             (ACTIVE_ERASED_DET | MASK_ECC)
--
--/* NAND_ERASED_CW_DETECT_STATUS bits */
--#define       PAGE_ALL_ERASED                 BIT(7)
--#define       CODEWORD_ALL_ERASED             BIT(6)
--#define       PAGE_ERASED                     BIT(5)
--#define       CODEWORD_ERASED                 BIT(4)
--#define       ERASED_PAGE                     (PAGE_ALL_ERASED | PAGE_ERASED)
--#define       ERASED_CW                       (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
--
--/* NAND_READ_LOCATION_n bits */
--#define READ_LOCATION_OFFSET          0
--#define READ_LOCATION_SIZE            16
--#define READ_LOCATION_LAST            31
--
--/* Version Mask */
--#define       NAND_VERSION_MAJOR_MASK         0xf0000000
--#define       NAND_VERSION_MAJOR_SHIFT        28
--#define       NAND_VERSION_MINOR_MASK         0x0fff0000
--#define       NAND_VERSION_MINOR_SHIFT        16
--
--/* NAND OP_CMDs */
--#define       OP_PAGE_READ                    0x2
--#define       OP_PAGE_READ_WITH_ECC           0x3
--#define       OP_PAGE_READ_WITH_ECC_SPARE     0x4
--#define       OP_PAGE_READ_ONFI_READ          0x5
--#define       OP_PROGRAM_PAGE                 0x6
--#define       OP_PAGE_PROGRAM_WITH_ECC        0x7
--#define       OP_PROGRAM_PAGE_SPARE           0x9
--#define       OP_BLOCK_ERASE                  0xa
--#define       OP_CHECK_STATUS                 0xc
--#define       OP_FETCH_ID                     0xb
--#define       OP_RESET_DEVICE                 0xd
--
--/* Default Value for NAND_DEV_CMD_VLD */
--#define NAND_DEV_CMD_VLD_VAL          (READ_START_VLD | WRITE_START_VLD | \
--                                       ERASE_START_VLD | SEQ_READ_START_VLD)
--
--/* NAND_CTRL bits */
--#define       BAM_MODE_EN                     BIT(0)
--
--/*
-- * the NAND controller performs reads/writes with ECC in 516 byte chunks.
-- * the driver calls the chunks 'step' or 'codeword' interchangeably
-- */
--#define       NANDC_STEP_SIZE                 512
--
--/*
-- * the largest page size we support is 8K, this will have 16 steps/codewords
-- * of 512 bytes each
-- */
--#define       MAX_NUM_STEPS                   (SZ_8K / NANDC_STEP_SIZE)
--
--/* we read at most 3 registers per codeword scan */
--#define       MAX_REG_RD                      (3 * MAX_NUM_STEPS)
--
--/* ECC modes supported by the controller */
--#define       ECC_NONE        BIT(0)
--#define       ECC_RS_4BIT     BIT(1)
--#define       ECC_BCH_4BIT    BIT(2)
--#define       ECC_BCH_8BIT    BIT(3)
--
--/*
-- * Returns the actual register address for all NAND_DEV_ registers
-- * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
-- */
--#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
--
--/* Returns the NAND register physical address */
--#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
--
--/* Returns the dma address for reg read buffer */
--#define reg_buf_dma_addr(chip, vaddr) \
--      ((chip)->reg_read_dma + \
--      ((u8 *)(vaddr) - (u8 *)(chip)->reg_read_buf))
--
--#define QPIC_PER_CW_CMD_ELEMENTS      32
--#define QPIC_PER_CW_CMD_SGL           32
--#define QPIC_PER_CW_DATA_SGL          8
--
--#define QPIC_NAND_COMPLETION_TIMEOUT  msecs_to_jiffies(2000)
--
--/*
-- * Flags used in DMA descriptor preparation helper functions
-- * (i.e. qcom_read_reg_dma/qcom_write_reg_dma/qcom_read_data_dma/qcom_write_data_dma)
-- */
--/* Don't set the EOT in current tx BAM sgl */
--#define NAND_BAM_NO_EOT                       BIT(0)
--/* Set the NWD flag in current BAM sgl */
--#define NAND_BAM_NWD                  BIT(1)
--/* Finish writing in the current BAM sgl and start writing in another BAM sgl */
--#define NAND_BAM_NEXT_SGL             BIT(2)
--/*
-- * Erased codeword status is being used two times in single transfer so this
-- * flag will determine the current value of erased codeword status register
-- */
--#define NAND_ERASED_CW_SET            BIT(4)
--
--#define MAX_ADDRESS_CYCLE             5
--
--/*
-- * This data type corresponds to the BAM transaction which will be used for all
-- * NAND transfers.
-- * @bam_ce - the array of BAM command elements
-- * @cmd_sgl - sgl for NAND BAM command pipe
-- * @data_sgl - sgl for NAND BAM consumer/producer pipe
-- * @last_data_desc - last DMA desc in data channel (tx/rx).
-- * @last_cmd_desc - last DMA desc in command channel.
-- * @txn_done - completion for NAND transfer.
-- * @bam_ce_pos - the index in bam_ce which is available for next sgl
-- * @bam_ce_start - the index in bam_ce which marks the start position ce
-- *               for current sgl. It will be used for size calculation
-- *               for current sgl
-- * @cmd_sgl_pos - current index in command sgl.
-- * @cmd_sgl_start - start index in command sgl.
-- * @tx_sgl_pos - current index in data sgl for tx.
-- * @tx_sgl_start - start index in data sgl for tx.
-- * @rx_sgl_pos - current index in data sgl for rx.
-- * @rx_sgl_start - start index in data sgl for rx.
-- */
--struct bam_transaction {
--      struct bam_cmd_element *bam_ce;
--      struct scatterlist *cmd_sgl;
--      struct scatterlist *data_sgl;
--      struct dma_async_tx_descriptor *last_data_desc;
--      struct dma_async_tx_descriptor *last_cmd_desc;
--      struct completion txn_done;
--      u32 bam_ce_pos;
--      u32 bam_ce_start;
--      u32 cmd_sgl_pos;
--      u32 cmd_sgl_start;
--      u32 tx_sgl_pos;
--      u32 tx_sgl_start;
--      u32 rx_sgl_pos;
--      u32 rx_sgl_start;
--};
--
--/*
-- * This data type corresponds to the nand dma descriptor
-- * @dma_desc - low level DMA engine descriptor
-- * @list - list for desc_info
-- *
-- * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
-- *          ADM
-- * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
-- * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
-- * @dir - DMA transfer direction
-- */
--struct desc_info {
--      struct dma_async_tx_descriptor *dma_desc;
--      struct list_head node;
--
--      union {
--              struct scatterlist adm_sgl;
--              struct {
--                      struct scatterlist *bam_sgl;
--                      int sgl_cnt;
--              };
--      };
--      enum dma_data_direction dir;
--};
--
--/*
-- * holds the current register values that we want to write. acts as a contiguous
-- * chunk of memory which we use to write the controller registers through DMA.
-- */
--struct nandc_regs {
--      __le32 cmd;
--      __le32 addr0;
--      __le32 addr1;
--      __le32 chip_sel;
--      __le32 exec;
--
--      __le32 cfg0;
--      __le32 cfg1;
--      __le32 ecc_bch_cfg;
--
--      __le32 clrflashstatus;
--      __le32 clrreadstatus;
--
--      __le32 cmd1;
--      __le32 vld;
--
--      __le32 orig_cmd1;
--      __le32 orig_vld;
--
--      __le32 ecc_buf_cfg;
--      __le32 read_location0;
--      __le32 read_location1;
--      __le32 read_location2;
--      __le32 read_location3;
--      __le32 read_location_last0;
--      __le32 read_location_last1;
--      __le32 read_location_last2;
--      __le32 read_location_last3;
--
--      __le32 erased_cw_detect_cfg_clr;
--      __le32 erased_cw_detect_cfg_set;
--};
--
--/*
-- * NAND controller data struct
-- *
-- * @dev:                      parent device
-- *
-- * @base:                     MMIO base
-- *
-- * @core_clk:                 controller clock
-- * @aon_clk:                  another controller clock
-- *
-- * @regs:                     a contiguous chunk of memory for DMA register
-- *                            writes. contains the register values to be
-- *                            written to controller
-- *
-- * @props:                    properties of current NAND controller,
-- *                            initialized via DT match data
-- *
-- * @controller:                       base controller structure
-- * @host_list:                        list containing all the chips attached to the
-- *                            controller
-- *
-- * @chan:                     dma channel
-- * @cmd_crci:                 ADM DMA CRCI for command flow control
-- * @data_crci:                        ADM DMA CRCI for data flow control
-- *
-- * @desc_list:                        DMA descriptor list (list of desc_infos)
-- *
-- * @data_buffer:              our local DMA buffer for page read/writes,
-- *                            used when we can't use the buffer provided
-- *                            by upper layers directly
-- * @reg_read_buf:             local buffer for reading back registers via DMA
-- *
-- * @base_phys:                        physical base address of controller registers
-- * @base_dma:                 dma base address of controller registers
-- * @reg_read_dma:             contains dma address for register read buffer
-- *
-- * @buf_size/count/start:     markers for chip->legacy.read_buf/write_buf
-- *                            functions
-- * @max_cwperpage:            maximum QPIC codewords required. calculated
-- *                            from all connected NAND devices pagesize
-- *
-- * @reg_read_pos:             marker for data read in reg_read_buf
-- *
-- * @cmd1/vld:                 some fixed controller register values
-- *
-- * @exec_opwrite:             flag to select correct number of code word
-- *                            while reading status
-- */
--struct qcom_nand_controller {
--      struct device *dev;
--
--      void __iomem *base;
--
--      struct clk *core_clk;
--      struct clk *aon_clk;
--
--      struct nandc_regs *regs;
--      struct bam_transaction *bam_txn;
--
--      const struct qcom_nandc_props *props;
--
--      struct nand_controller controller;
--      struct list_head host_list;
--
--      union {
--              /* will be used only by QPIC for BAM DMA */
--              struct {
--                      struct dma_chan *tx_chan;
--                      struct dma_chan *rx_chan;
--                      struct dma_chan *cmd_chan;
--              };
--
--              /* will be used only by EBI2 for ADM DMA */
--              struct {
--                      struct dma_chan *chan;
--                      unsigned int cmd_crci;
--                      unsigned int data_crci;
--              };
--      };
--
--      struct list_head desc_list;
--
--      u8              *data_buffer;
--      __le32          *reg_read_buf;
--
--      phys_addr_t base_phys;
--      dma_addr_t base_dma;
--      dma_addr_t reg_read_dma;
--
--      int             buf_size;
--      int             buf_count;
--      int             buf_start;
--      unsigned int    max_cwperpage;
--
--      int reg_read_pos;
--
--      u32 cmd1, vld;
--      bool exec_opwrite;
--};
-+#include <linux/mtd/nand-qpic-common.h>
- /*
-  * NAND special boot partitions
-@@ -530,97 +120,6 @@ struct qcom_nand_host {
-       bool bch_enabled;
- };
--/*
-- * This data type corresponds to the NAND controller properties which varies
-- * among different NAND controllers.
-- * @ecc_modes - ecc mode for NAND
-- * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
-- * @supports_bam - whether NAND controller is using Bus Access Manager (BAM)
-- * @nandc_part_of_qpic - whether NAND controller is part of qpic IP
-- * @qpic_version2 - flag to indicate QPIC IP version 2
-- * @use_codeword_fixup - whether NAND has different layout for boot partitions
-- */
--struct qcom_nandc_props {
--      u32 ecc_modes;
--      u32 dev_cmd_reg_start;
--      bool supports_bam;
--      bool nandc_part_of_qpic;
--      bool qpic_version2;
--      bool use_codeword_fixup;
--};
--
--/* Frees the BAM transaction memory */
--static void qcom_free_bam_transaction(struct qcom_nand_controller *nandc)
--{
--      struct bam_transaction *bam_txn = nandc->bam_txn;
--
--      devm_kfree(nandc->dev, bam_txn);
--}
--
--/* Allocates and Initializes the BAM transaction */
--static struct bam_transaction *
--qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc)
--{
--      struct bam_transaction *bam_txn;
--      size_t bam_txn_size;
--      unsigned int num_cw = nandc->max_cwperpage;
--      void *bam_txn_buf;
--
--      bam_txn_size =
--              sizeof(*bam_txn) + num_cw *
--              ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
--              (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
--              (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
--
--      bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
--      if (!bam_txn_buf)
--              return NULL;
--
--      bam_txn = bam_txn_buf;
--      bam_txn_buf += sizeof(*bam_txn);
--
--      bam_txn->bam_ce = bam_txn_buf;
--      bam_txn_buf +=
--              sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
--
--      bam_txn->cmd_sgl = bam_txn_buf;
--      bam_txn_buf +=
--              sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
--
--      bam_txn->data_sgl = bam_txn_buf;
--
--      init_completion(&bam_txn->txn_done);
--
--      return bam_txn;
--}
--
--/* Clears the BAM transaction indexes */
--static void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc)
--{
--      struct bam_transaction *bam_txn = nandc->bam_txn;
--
--      if (!nandc->props->supports_bam)
--              return;
--
--      memset(&bam_txn->bam_ce_pos, 0, sizeof(u32) * 8);
--      bam_txn->last_data_desc = NULL;
--
--      sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
--                    QPIC_PER_CW_CMD_SGL);
--      sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
--                    QPIC_PER_CW_DATA_SGL);
--
--      reinit_completion(&bam_txn->txn_done);
--}
--
--/* Callback for DMA descriptor completion */
--static void qcom_qpic_bam_dma_done(void *data)
--{
--      struct bam_transaction *bam_txn = data;
--
--      complete(&bam_txn->txn_done);
--}
--
- static struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
- {
-       return container_of(chip, struct qcom_nand_host, chip);
-@@ -629,8 +128,8 @@ static struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
- static struct qcom_nand_controller *
- get_qcom_nand_controller(struct nand_chip *chip)
- {
--      return container_of(chip->controller, struct qcom_nand_controller,
--                          controller);
-+      return (struct qcom_nand_controller *)
-+              ((u8 *)chip->controller - sizeof(struct qcom_nand_controller));
- }
- static u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
-@@ -644,23 +143,6 @@ static void nandc_write(struct qcom_nand_controller *nandc, int offset,
-       iowrite32(val, nandc->base + offset);
- }
--static void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
--{
--      if (!nandc->props->supports_bam)
--              return;
--
--      if (is_cpu)
--              dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
--                                      MAX_REG_RD *
--                                      sizeof(*nandc->reg_read_buf),
--                                      DMA_FROM_DEVICE);
--      else
--              dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
--                                         MAX_REG_RD *
--                                         sizeof(*nandc->reg_read_buf),
--                                         DMA_FROM_DEVICE);
--}
--
- /* Helper to check whether this is the last CW or not */
- static bool qcom_nandc_is_last_cw(struct nand_ecc_ctrl *ecc, int cw)
- {
-@@ -819,356 +301,6 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, i
-                                  host->cw_data : host->cw_size, 1);
- }
--/*
-- * Maps the scatter gather list for DMA transfer and forms the DMA descriptor
-- * for BAM. This descriptor will be added in the NAND DMA descriptor queue
-- * which will be submitted to DMA engine.
-- */
--static int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
--                                     struct dma_chan *chan,
--                                     unsigned long flags)
--{
--      struct desc_info *desc;
--      struct scatterlist *sgl;
--      unsigned int sgl_cnt;
--      int ret;
--      struct bam_transaction *bam_txn = nandc->bam_txn;
--      enum dma_transfer_direction dir_eng;
--      struct dma_async_tx_descriptor *dma_desc;
--
--      desc = kzalloc(sizeof(*desc), GFP_KERNEL);
--      if (!desc)
--              return -ENOMEM;
--
--      if (chan == nandc->cmd_chan) {
--              sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
--              sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
--              bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
--              dir_eng = DMA_MEM_TO_DEV;
--              desc->dir = DMA_TO_DEVICE;
--      } else if (chan == nandc->tx_chan) {
--              sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
--              sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
--              bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
--              dir_eng = DMA_MEM_TO_DEV;
--              desc->dir = DMA_TO_DEVICE;
--      } else {
--              sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
--              sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
--              bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
--              dir_eng = DMA_DEV_TO_MEM;
--              desc->dir = DMA_FROM_DEVICE;
--      }
--
--      sg_mark_end(sgl + sgl_cnt - 1);
--      ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
--      if (ret == 0) {
--              dev_err(nandc->dev, "failure in mapping desc\n");
--              kfree(desc);
--              return -ENOMEM;
--      }
--
--      desc->sgl_cnt = sgl_cnt;
--      desc->bam_sgl = sgl;
--
--      dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
--                                         flags);
--
--      if (!dma_desc) {
--              dev_err(nandc->dev, "failure in prep desc\n");
--              dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
--              kfree(desc);
--              return -EINVAL;
--      }
--
--      desc->dma_desc = dma_desc;
--
--      /* update last data/command descriptor */
--      if (chan == nandc->cmd_chan)
--              bam_txn->last_cmd_desc = dma_desc;
--      else
--              bam_txn->last_data_desc = dma_desc;
--
--      list_add_tail(&desc->node, &nandc->desc_list);
--
--      return 0;
--}
--
--/*
-- * Prepares the command descriptor for BAM DMA which will be used for NAND
-- * register reads and writes. The command descriptor requires the command
-- * to be formed in command element type so this function uses the command
-- * element from bam transaction ce array and fills the same with required
-- * data. A single SGL can contain multiple command elements so
-- * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
-- * after the current command element.
-- */
--static int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
--                                    int reg_off, const void *vaddr,
--                                    int size, unsigned int flags)
--{
--      int bam_ce_size;
--      int i, ret;
--      struct bam_cmd_element *bam_ce_buffer;
--      struct bam_transaction *bam_txn = nandc->bam_txn;
--
--      bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
--
--      /* fill the command desc */
--      for (i = 0; i < size; i++) {
--              if (read)
--                      bam_prep_ce(&bam_ce_buffer[i],
--                                  nandc_reg_phys(nandc, reg_off + 4 * i),
--                                  BAM_READ_COMMAND,
--                                  reg_buf_dma_addr(nandc,
--                                                   (__le32 *)vaddr + i));
--              else
--                      bam_prep_ce_le32(&bam_ce_buffer[i],
--                                       nandc_reg_phys(nandc, reg_off + 4 * i),
--                                       BAM_WRITE_COMMAND,
--                                       *((__le32 *)vaddr + i));
--      }
--
--      bam_txn->bam_ce_pos += size;
--
--      /* use the separate sgl after this command */
--      if (flags & NAND_BAM_NEXT_SGL) {
--              bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
--              bam_ce_size = (bam_txn->bam_ce_pos -
--                              bam_txn->bam_ce_start) *
--                              sizeof(struct bam_cmd_element);
--              sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
--                         bam_ce_buffer, bam_ce_size);
--              bam_txn->cmd_sgl_pos++;
--              bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
--
--              if (flags & NAND_BAM_NWD) {
--                      ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
--                                                        DMA_PREP_FENCE |
--                                                        DMA_PREP_CMD);
--                      if (ret)
--                              return ret;
--              }
--      }
--
--      return 0;
--}
--
--/*
-- * Prepares the data descriptor for BAM DMA which will be used for NAND
-- * data reads and writes.
-- */
--static int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
--                                     const void *vaddr, int size, unsigned int flags)
--{
--      int ret;
--      struct bam_transaction *bam_txn = nandc->bam_txn;
--
--      if (read) {
--              sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
--                         vaddr, size);
--              bam_txn->rx_sgl_pos++;
--      } else {
--              sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
--                         vaddr, size);
--              bam_txn->tx_sgl_pos++;
--
--              /*
--               * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
--               * is not set, form the DMA descriptor
--               */
--              if (!(flags & NAND_BAM_NO_EOT)) {
--                      ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
--                                                        DMA_PREP_INTERRUPT);
--                      if (ret)
--                              return ret;
--              }
--      }
--
--      return 0;
--}
--
--static int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
--                                int reg_off, const void *vaddr, int size,
--                                bool flow_control)
--{
--      struct desc_info *desc;
--      struct dma_async_tx_descriptor *dma_desc;
--      struct scatterlist *sgl;
--      struct dma_slave_config slave_conf;
--      struct qcom_adm_peripheral_config periph_conf = {};
--      enum dma_transfer_direction dir_eng;
--      int ret;
--
--      desc = kzalloc(sizeof(*desc), GFP_KERNEL);
--      if (!desc)
--              return -ENOMEM;
--
--      sgl = &desc->adm_sgl;
--
--      sg_init_one(sgl, vaddr, size);
--
--      if (read) {
--              dir_eng = DMA_DEV_TO_MEM;
--              desc->dir = DMA_FROM_DEVICE;
--      } else {
--              dir_eng = DMA_MEM_TO_DEV;
--              desc->dir = DMA_TO_DEVICE;
--      }
--
--      ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
--      if (ret == 0) {
--              ret = -ENOMEM;
--              goto err;
--      }
--
--      memset(&slave_conf, 0x00, sizeof(slave_conf));
--
--      slave_conf.device_fc = flow_control;
--      if (read) {
--              slave_conf.src_maxburst = 16;
--              slave_conf.src_addr = nandc->base_dma + reg_off;
--              if (nandc->data_crci) {
--                      periph_conf.crci = nandc->data_crci;
--                      slave_conf.peripheral_config = &periph_conf;
--                      slave_conf.peripheral_size = sizeof(periph_conf);
--              }
--      } else {
--              slave_conf.dst_maxburst = 16;
--              slave_conf.dst_addr = nandc->base_dma + reg_off;
--              if (nandc->cmd_crci) {
--                      periph_conf.crci = nandc->cmd_crci;
--                      slave_conf.peripheral_config = &periph_conf;
--                      slave_conf.peripheral_size = sizeof(periph_conf);
--              }
--      }
--
--      ret = dmaengine_slave_config(nandc->chan, &slave_conf);
--      if (ret) {
--              dev_err(nandc->dev, "failed to configure dma channel\n");
--              goto err;
--      }
--
--      dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
--      if (!dma_desc) {
--              dev_err(nandc->dev, "failed to prepare desc\n");
--              ret = -EINVAL;
--              goto err;
--      }
--
--      desc->dma_desc = dma_desc;
--
--      list_add_tail(&desc->node, &nandc->desc_list);
--
--      return 0;
--err:
--      kfree(desc);
--
--      return ret;
--}
--
--/*
-- * qcom_read_reg_dma: prepares a descriptor to read a given number of
-- *                    contiguous registers to the reg_read_buf pointer
-- *
-- * @first:            offset of the first register in the contiguous block
-- * @num_regs:         number of registers to read
-- * @flags:            flags to control DMA descriptor preparation
-- */
--static int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first,
--                           int num_regs, unsigned int flags)
--{
--      bool flow_control = false;
--      void *vaddr;
--
--      vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
--      nandc->reg_read_pos += num_regs;
--
--      if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
--              first = dev_cmd_reg_addr(nandc, first);
--
--      if (nandc->props->supports_bam)
--              return qcom_prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
--                                           num_regs, flags);
--
--      if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
--              flow_control = true;
--
--      return qcom_prep_adm_dma_desc(nandc, true, first, vaddr,
--                               num_regs * sizeof(u32), flow_control);
--}
--
--/*
-- * qcom_write_reg_dma:        prepares a descriptor to write a given number of
-- *                    contiguous registers
-- *
-- * @vaddr:            contiguous memory from where register value will
-- *                    be written
-- * @first:            offset of the first register in the contiguous block
-- * @num_regs:         number of registers to write
-- * @flags:            flags to control DMA descriptor preparation
-- */
--static int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
--                            int first, int num_regs, unsigned int flags)
--{
--      bool flow_control = false;
--
--      if (first == NAND_EXEC_CMD)
--              flags |= NAND_BAM_NWD;
--
--      if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
--              first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
--
--      if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
--              first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
--
--      if (nandc->props->supports_bam)
--              return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
--                                           num_regs, flags);
--
--      if (first == NAND_FLASH_CMD)
--              flow_control = true;
--
--      return qcom_prep_adm_dma_desc(nandc, false, first, vaddr,
--                               num_regs * sizeof(u32), flow_control);
--}
--
--/*
-- * qcom_read_data_dma:        prepares a DMA descriptor to transfer data from the
-- *                    controller's internal buffer to the buffer 'vaddr'
-- *
-- * @reg_off:          offset within the controller's data buffer
-- * @vaddr:            virtual address of the buffer we want to write to
-- * @size:             DMA transaction size in bytes
-- * @flags:            flags to control DMA descriptor preparation
-- */
--static int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
--                            const u8 *vaddr, int size, unsigned int flags)
--{
--      if (nandc->props->supports_bam)
--              return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
--
--      return qcom_prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
--}
--
--/*
-- * qcom_write_data_dma:       prepares a DMA descriptor to transfer data from
-- *                    'vaddr' to the controller's internal buffer
-- *
-- * @reg_off:          offset within the controller's data buffer
-- * @vaddr:            virtual address of the buffer we want to read from
-- * @size:             DMA transaction size in bytes
-- * @flags:            flags to control DMA descriptor preparation
-- */
--static int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
--                             const u8 *vaddr, int size, unsigned int flags)
--{
--      if (nandc->props->supports_bam)
--              return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
--
--      return qcom_prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
--}
--
- /*
-  * Helper to prepare DMA descriptors for configuring registers
-  * before reading a NAND page.
-@@ -1262,83 +394,6 @@ static void config_nand_cw_write(struct nand_chip *chip)
-                          NAND_BAM_NEXT_SGL);
- }
--/* helpers to submit/free our list of dma descriptors */
--static int qcom_submit_descs(struct qcom_nand_controller *nandc)
--{
--      struct desc_info *desc, *n;
--      dma_cookie_t cookie = 0;
--      struct bam_transaction *bam_txn = nandc->bam_txn;
--      int ret = 0;
--
--      if (nandc->props->supports_bam) {
--              if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
--                      ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
--                      if (ret)
--                              goto err_unmap_free_desc;
--              }
--
--              if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
--                      ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
--                                                        DMA_PREP_INTERRUPT);
--                      if (ret)
--                              goto err_unmap_free_desc;
--              }
--
--              if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
--                      ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
--                                                        DMA_PREP_CMD);
--                      if (ret)
--                              goto err_unmap_free_desc;
--              }
--      }
--
--      list_for_each_entry(desc, &nandc->desc_list, node)
--              cookie = dmaengine_submit(desc->dma_desc);
--
--      if (nandc->props->supports_bam) {
--              bam_txn->last_cmd_desc->callback = qcom_qpic_bam_dma_done;
--              bam_txn->last_cmd_desc->callback_param = bam_txn;
--
--              dma_async_issue_pending(nandc->tx_chan);
--              dma_async_issue_pending(nandc->rx_chan);
--              dma_async_issue_pending(nandc->cmd_chan);
--
--              if (!wait_for_completion_timeout(&bam_txn->txn_done,
--                                               QPIC_NAND_COMPLETION_TIMEOUT))
--                      ret = -ETIMEDOUT;
--      } else {
--              if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
--                      ret = -ETIMEDOUT;
--      }
--
--err_unmap_free_desc:
--      /*
--       * Unmap the dma sg_list and free the desc allocated by both
--       * qcom_prepare_bam_async_desc() and qcom_prep_adm_dma_desc() functions.
--       */
--      list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
--              list_del(&desc->node);
--
--              if (nandc->props->supports_bam)
--                      dma_unmap_sg(nandc->dev, desc->bam_sgl,
--                                   desc->sgl_cnt, desc->dir);
--              else
--                      dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
--                                   desc->dir);
--
--              kfree(desc);
--      }
--
--      return ret;
--}
--
--/* reset the register read buffer for next NAND operation */
--static void qcom_clear_read_regs(struct qcom_nand_controller *nandc)
--{
--      nandc->reg_read_pos = 0;
--      qcom_nandc_dev_to_mem(nandc, false);
--}
--
- /*
-  * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read
-  * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS.
-@@ -2967,141 +2022,14 @@ static const struct nand_controller_ops qcom_nandc_ops = {
-       .exec_op = qcom_nand_exec_op,
- };
--static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
--{
--      if (nandc->props->supports_bam) {
--              if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
--                      dma_unmap_single(nandc->dev, nandc->reg_read_dma,
--                                       MAX_REG_RD *
--                                       sizeof(*nandc->reg_read_buf),
--                                       DMA_FROM_DEVICE);
--
--              if (nandc->tx_chan)
--                      dma_release_channel(nandc->tx_chan);
--
--              if (nandc->rx_chan)
--                      dma_release_channel(nandc->rx_chan);
--
--              if (nandc->cmd_chan)
--                      dma_release_channel(nandc->cmd_chan);
--      } else {
--              if (nandc->chan)
--                      dma_release_channel(nandc->chan);
--      }
--}
--
--static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
--{
--      int ret;
--
--      ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
--      if (ret) {
--              dev_err(nandc->dev, "failed to set DMA mask\n");
--              return ret;
--      }
--
--      /*
--       * we use the internal buffer for reading ONFI params, reading small
--       * data like ID and status, and preforming read-copy-write operations
--       * when writing to a codeword partially. 532 is the maximum possible
--       * size of a codeword for our nand controller
--       */
--      nandc->buf_size = 532;
--
--      nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size, GFP_KERNEL);
--      if (!nandc->data_buffer)
--              return -ENOMEM;
--
--      nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs), GFP_KERNEL);
--      if (!nandc->regs)
--              return -ENOMEM;
--
--      nandc->reg_read_buf = devm_kcalloc(nandc->dev, MAX_REG_RD,
--                                         sizeof(*nandc->reg_read_buf),
--                                         GFP_KERNEL);
--      if (!nandc->reg_read_buf)
--              return -ENOMEM;
--
--      if (nandc->props->supports_bam) {
--              nandc->reg_read_dma =
--                      dma_map_single(nandc->dev, nandc->reg_read_buf,
--                                     MAX_REG_RD *
--                                     sizeof(*nandc->reg_read_buf),
--                                     DMA_FROM_DEVICE);
--              if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
--                      dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
--                      return -EIO;
--              }
--
--              nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
--              if (IS_ERR(nandc->tx_chan)) {
--                      ret = PTR_ERR(nandc->tx_chan);
--                      nandc->tx_chan = NULL;
--                      dev_err_probe(nandc->dev, ret,
--                                    "tx DMA channel request failed\n");
--                      goto unalloc;
--              }
--
--              nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
--              if (IS_ERR(nandc->rx_chan)) {
--                      ret = PTR_ERR(nandc->rx_chan);
--                      nandc->rx_chan = NULL;
--                      dev_err_probe(nandc->dev, ret,
--                                    "rx DMA channel request failed\n");
--                      goto unalloc;
--              }
--
--              nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
--              if (IS_ERR(nandc->cmd_chan)) {
--                      ret = PTR_ERR(nandc->cmd_chan);
--                      nandc->cmd_chan = NULL;
--                      dev_err_probe(nandc->dev, ret,
--                                    "cmd DMA channel request failed\n");
--                      goto unalloc;
--              }
--
--              /*
--               * Initially allocate BAM transaction to read ONFI param page.
--               * After detecting all the devices, this BAM transaction will
--               * be freed and the next BAM transaction will be allocated with
--               * maximum codeword size
--               */
--              nandc->max_cwperpage = 1;
--              nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
--              if (!nandc->bam_txn) {
--                      dev_err(nandc->dev,
--                              "failed to allocate bam transaction\n");
--                      ret = -ENOMEM;
--                      goto unalloc;
--              }
--      } else {
--              nandc->chan = dma_request_chan(nandc->dev, "rxtx");
--              if (IS_ERR(nandc->chan)) {
--                      ret = PTR_ERR(nandc->chan);
--                      nandc->chan = NULL;
--                      dev_err_probe(nandc->dev, ret,
--                                    "rxtx DMA channel request failed\n");
--                      return ret;
--              }
--      }
--
--      INIT_LIST_HEAD(&nandc->desc_list);
--      INIT_LIST_HEAD(&nandc->host_list);
--
--      nand_controller_init(&nandc->controller);
--      nandc->controller.ops = &qcom_nandc_ops;
--
--      return 0;
--unalloc:
--      qcom_nandc_unalloc(nandc);
--      return ret;
--}
--
- /* one time setup of a few nand controller registers */
- static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
- {
-       u32 nand_ctrl;
-+      nand_controller_init(nandc->controller);
-+      nandc->controller->ops = &qcom_nandc_ops;
-+
-       /* kill onenand */
-       if (!nandc->props->nandc_part_of_qpic)
-               nandc_write(nandc, SFLASHC_BURST_CFG, 0);
-@@ -3240,7 +2168,7 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
-       chip->legacy.block_bad          = qcom_nandc_block_bad;
-       chip->legacy.block_markbad      = qcom_nandc_block_markbad;
--      chip->controller = &nandc->controller;
-+      chip->controller = nandc->controller;
-       chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA |
-                        NAND_SKIP_BBTSCAN;
-@@ -3323,17 +2251,21 @@ static int qcom_nandc_parse_dt(struct platform_device *pdev)
- static int qcom_nandc_probe(struct platform_device *pdev)
- {
-       struct qcom_nand_controller *nandc;
-+      struct nand_controller *controller;
-       const void *dev_data;
-       struct device *dev = &pdev->dev;
-       struct resource *res;
-       int ret;
--      nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
-+      nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc) + sizeof(*controller),
-+                           GFP_KERNEL);
-       if (!nandc)
-               return -ENOMEM;
-+      controller = (struct nand_controller *)&nandc[1];
-       platform_set_drvdata(pdev, nandc);
-       nandc->dev = dev;
-+      nandc->controller = controller;
-       dev_data = of_device_get_match_data(dev);
-       if (!dev_data) {
-diff --git a/include/linux/mtd/nand-qpic-common.h b/include/linux/mtd/nand-qpic-common.h
-new file mode 100644
-index 000000000000..425994429387
---- /dev/null
-+++ b/include/linux/mtd/nand-qpic-common.h
-@@ -0,0 +1,468 @@
-+/* SPDX-License-Identifier: GPL-2.0 */
-+/*
-+ * QCOM QPIC common APIs header file
-+ *
-+ * Copyright (c) 2023 Qualcomm Inc.
-+ * Authors:   Md sadre Alam   <quic_mdalam@quicinc.com>
-+ *
-+ */
-+#ifndef __MTD_NAND_QPIC_COMMON_H__
-+#define __MTD_NAND_QPIC_COMMON_H__
-+
-+/* NANDc reg offsets */
-+#define       NAND_FLASH_CMD                  0x00
-+#define       NAND_ADDR0                      0x04
-+#define       NAND_ADDR1                      0x08
-+#define       NAND_FLASH_CHIP_SELECT          0x0c
-+#define       NAND_EXEC_CMD                   0x10
-+#define       NAND_FLASH_STATUS               0x14
-+#define       NAND_BUFFER_STATUS              0x18
-+#define       NAND_DEV0_CFG0                  0x20
-+#define       NAND_DEV0_CFG1                  0x24
-+#define       NAND_DEV0_ECC_CFG               0x28
-+#define       NAND_AUTO_STATUS_EN             0x2c
-+#define       NAND_DEV1_CFG0                  0x30
-+#define       NAND_DEV1_CFG1                  0x34
-+#define       NAND_READ_ID                    0x40
-+#define       NAND_READ_STATUS                0x44
-+#define       NAND_DEV_CMD0                   0xa0
-+#define       NAND_DEV_CMD1                   0xa4
-+#define       NAND_DEV_CMD2                   0xa8
-+#define       NAND_DEV_CMD_VLD                0xac
-+#define       SFLASHC_BURST_CFG               0xe0
-+#define       NAND_ERASED_CW_DETECT_CFG       0xe8
-+#define       NAND_ERASED_CW_DETECT_STATUS    0xec
-+#define       NAND_EBI2_ECC_BUF_CFG           0xf0
-+#define       FLASH_BUF_ACC                   0x100
-+
-+#define       NAND_CTRL                       0xf00
-+#define       NAND_VERSION                    0xf08
-+#define       NAND_READ_LOCATION_0            0xf20
-+#define       NAND_READ_LOCATION_1            0xf24
-+#define       NAND_READ_LOCATION_2            0xf28
-+#define       NAND_READ_LOCATION_3            0xf2c
-+#define       NAND_READ_LOCATION_LAST_CW_0    0xf40
-+#define       NAND_READ_LOCATION_LAST_CW_1    0xf44
-+#define       NAND_READ_LOCATION_LAST_CW_2    0xf48
-+#define       NAND_READ_LOCATION_LAST_CW_3    0xf4c
-+
-+/* dummy register offsets, used by qcom_write_reg_dma */
-+#define       NAND_DEV_CMD1_RESTORE           0xdead
-+#define       NAND_DEV_CMD_VLD_RESTORE        0xbeef
-+
-+/* NAND_FLASH_CMD bits */
-+#define       PAGE_ACC                        BIT(4)
-+#define       LAST_PAGE                       BIT(5)
-+
-+/* NAND_FLASH_CHIP_SELECT bits */
-+#define       NAND_DEV_SEL                    0
-+#define       DM_EN                           BIT(2)
-+
-+/* NAND_FLASH_STATUS bits */
-+#define       FS_OP_ERR                       BIT(4)
-+#define       FS_READY_BSY_N                  BIT(5)
-+#define       FS_MPU_ERR                      BIT(8)
-+#define       FS_DEVICE_STS_ERR               BIT(16)
-+#define       FS_DEVICE_WP                    BIT(23)
-+
-+/* NAND_BUFFER_STATUS bits */
-+#define       BS_UNCORRECTABLE_BIT            BIT(8)
-+#define       BS_CORRECTABLE_ERR_MSK          0x1f
-+
-+/* NAND_DEVn_CFG0 bits */
-+#define       DISABLE_STATUS_AFTER_WRITE      4
-+#define       CW_PER_PAGE                     6
-+#define       UD_SIZE_BYTES                   9
-+#define       UD_SIZE_BYTES_MASK              GENMASK(18, 9)
-+#define       ECC_PARITY_SIZE_BYTES_RS        19
-+#define       SPARE_SIZE_BYTES                23
-+#define       SPARE_SIZE_BYTES_MASK           GENMASK(26, 23)
-+#define       NUM_ADDR_CYCLES                 27
-+#define       STATUS_BFR_READ                 30
-+#define       SET_RD_MODE_AFTER_STATUS        31
-+
-+/* NAND_DEVn_CFG0 bits */
-+#define       DEV0_CFG1_ECC_DISABLE           0
-+#define       WIDE_FLASH                      1
-+#define       NAND_RECOVERY_CYCLES            2
-+#define       CS_ACTIVE_BSY                   5
-+#define       BAD_BLOCK_BYTE_NUM              6
-+#define       BAD_BLOCK_IN_SPARE_AREA         16
-+#define       WR_RD_BSY_GAP                   17
-+#define       ENABLE_BCH_ECC                  27
-+
-+/* NAND_DEV0_ECC_CFG bits */
-+#define       ECC_CFG_ECC_DISABLE             0
-+#define       ECC_SW_RESET                    1
-+#define       ECC_MODE                        4
-+#define       ECC_PARITY_SIZE_BYTES_BCH       8
-+#define       ECC_NUM_DATA_BYTES              16
-+#define       ECC_NUM_DATA_BYTES_MASK         GENMASK(25, 16)
-+#define       ECC_FORCE_CLK_OPEN              30
-+
-+/* NAND_DEV_CMD1 bits */
-+#define       READ_ADDR                       0
-+
-+/* NAND_DEV_CMD_VLD bits */
-+#define       READ_START_VLD                  BIT(0)
-+#define       READ_STOP_VLD                   BIT(1)
-+#define       WRITE_START_VLD                 BIT(2)
-+#define       ERASE_START_VLD                 BIT(3)
-+#define       SEQ_READ_START_VLD              BIT(4)
-+
-+/* NAND_EBI2_ECC_BUF_CFG bits */
-+#define       NUM_STEPS                       0
-+
-+/* NAND_ERASED_CW_DETECT_CFG bits */
-+#define       ERASED_CW_ECC_MASK              1
-+#define       AUTO_DETECT_RES                 0
-+#define       MASK_ECC                        BIT(ERASED_CW_ECC_MASK)
-+#define       RESET_ERASED_DET                BIT(AUTO_DETECT_RES)
-+#define       ACTIVE_ERASED_DET               (0 << AUTO_DETECT_RES)
-+#define       CLR_ERASED_PAGE_DET             (RESET_ERASED_DET | MASK_ECC)
-+#define       SET_ERASED_PAGE_DET             (ACTIVE_ERASED_DET | MASK_ECC)
-+
-+/* NAND_ERASED_CW_DETECT_STATUS bits */
-+#define       PAGE_ALL_ERASED                 BIT(7)
-+#define       CODEWORD_ALL_ERASED             BIT(6)
-+#define       PAGE_ERASED                     BIT(5)
-+#define       CODEWORD_ERASED                 BIT(4)
-+#define       ERASED_PAGE                     (PAGE_ALL_ERASED | PAGE_ERASED)
-+#define       ERASED_CW                       (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
-+
-+/* NAND_READ_LOCATION_n bits */
-+#define READ_LOCATION_OFFSET          0
-+#define READ_LOCATION_SIZE            16
-+#define READ_LOCATION_LAST            31
-+
-+/* Version Mask */
-+#define       NAND_VERSION_MAJOR_MASK         0xf0000000
-+#define       NAND_VERSION_MAJOR_SHIFT        28
-+#define       NAND_VERSION_MINOR_MASK         0x0fff0000
-+#define       NAND_VERSION_MINOR_SHIFT        16
-+
-+/* NAND OP_CMDs */
-+#define       OP_PAGE_READ                    0x2
-+#define       OP_PAGE_READ_WITH_ECC           0x3
-+#define       OP_PAGE_READ_WITH_ECC_SPARE     0x4
-+#define       OP_PAGE_READ_ONFI_READ          0x5
-+#define       OP_PROGRAM_PAGE                 0x6
-+#define       OP_PAGE_PROGRAM_WITH_ECC        0x7
-+#define       OP_PROGRAM_PAGE_SPARE           0x9
-+#define       OP_BLOCK_ERASE                  0xa
-+#define       OP_CHECK_STATUS                 0xc
-+#define       OP_FETCH_ID                     0xb
-+#define       OP_RESET_DEVICE                 0xd
-+
-+/* Default Value for NAND_DEV_CMD_VLD */
-+#define NAND_DEV_CMD_VLD_VAL          (READ_START_VLD | WRITE_START_VLD | \
-+                                       ERASE_START_VLD | SEQ_READ_START_VLD)
-+
-+/* NAND_CTRL bits */
-+#define       BAM_MODE_EN                     BIT(0)
-+
-+/*
-+ * the NAND controller performs reads/writes with ECC in 516 byte chunks.
-+ * the driver calls the chunks 'step' or 'codeword' interchangeably
-+ */
-+#define       NANDC_STEP_SIZE                 512
-+
-+/*
-+ * the largest page size we support is 8K, this will have 16 steps/codewords
-+ * of 512 bytes each
-+ */
-+#define       MAX_NUM_STEPS                   (SZ_8K / NANDC_STEP_SIZE)
-+
-+/* we read at most 3 registers per codeword scan */
-+#define       MAX_REG_RD                      (3 * MAX_NUM_STEPS)
-+
-+/* ECC modes supported by the controller */
-+#define       ECC_NONE        BIT(0)
-+#define       ECC_RS_4BIT     BIT(1)
-+#define       ECC_BCH_4BIT    BIT(2)
-+#define       ECC_BCH_8BIT    BIT(3)
-+
-+/*
-+ * Returns the actual register address for all NAND_DEV_ registers
-+ * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
-+ */
-+#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
-+
-+/* Returns the NAND register physical address */
-+#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
-+
-+/* Returns the dma address for reg read buffer */
-+#define reg_buf_dma_addr(chip, vaddr) \
-+      ((chip)->reg_read_dma + \
-+      ((u8 *)(vaddr) - (u8 *)(chip)->reg_read_buf))
-+
-+#define QPIC_PER_CW_CMD_ELEMENTS      32
-+#define QPIC_PER_CW_CMD_SGL           32
-+#define QPIC_PER_CW_DATA_SGL          8
-+
-+#define QPIC_NAND_COMPLETION_TIMEOUT  msecs_to_jiffies(2000)
-+
-+/*
-+ * Flags used in DMA descriptor preparation helper functions
-+ * (i.e. qcom_read_reg_dma/qcom_write_reg_dma/qcom_read_data_dma/qcom_write_data_dma)
-+ */
-+/* Don't set the EOT in current tx BAM sgl */
-+#define NAND_BAM_NO_EOT                       BIT(0)
-+/* Set the NWD flag in current BAM sgl */
-+#define NAND_BAM_NWD                  BIT(1)
-+/* Finish writing in the current BAM sgl and start writing in another BAM sgl */
-+#define NAND_BAM_NEXT_SGL             BIT(2)
-+/*
-+ * Erased codeword status is being used two times in single transfer so this
-+ * flag will determine the current value of erased codeword status register
-+ */
-+#define NAND_ERASED_CW_SET            BIT(4)
-+
-+#define MAX_ADDRESS_CYCLE             5
-+
-+/*
-+ * This data type corresponds to the BAM transaction which will be used for all
-+ * NAND transfers.
-+ * @bam_ce - the array of BAM command elements
-+ * @cmd_sgl - sgl for NAND BAM command pipe
-+ * @data_sgl - sgl for NAND BAM consumer/producer pipe
-+ * @last_data_desc - last DMA desc in data channel (tx/rx).
-+ * @last_cmd_desc - last DMA desc in command channel.
-+ * @txn_done - completion for NAND transfer.
-+ * @bam_ce_pos - the index in bam_ce which is available for next sgl
-+ * @bam_ce_start - the index in bam_ce which marks the start position ce
-+ *               for current sgl. It will be used for size calculation
-+ *               for current sgl
-+ * @cmd_sgl_pos - current index in command sgl.
-+ * @cmd_sgl_start - start index in command sgl.
-+ * @tx_sgl_pos - current index in data sgl for tx.
-+ * @tx_sgl_start - start index in data sgl for tx.
-+ * @rx_sgl_pos - current index in data sgl for rx.
-+ * @rx_sgl_start - start index in data sgl for rx.
-+ */
-+struct bam_transaction {
-+      struct bam_cmd_element *bam_ce;
-+      struct scatterlist *cmd_sgl;
-+      struct scatterlist *data_sgl;
-+      struct dma_async_tx_descriptor *last_data_desc;
-+      struct dma_async_tx_descriptor *last_cmd_desc;
-+      struct completion txn_done;
-+      u32 bam_ce_pos;
-+      u32 bam_ce_start;
-+      u32 cmd_sgl_pos;
-+      u32 cmd_sgl_start;
-+      u32 tx_sgl_pos;
-+      u32 tx_sgl_start;
-+      u32 rx_sgl_pos;
-+      u32 rx_sgl_start;
-+};
-+
-+/*
-+ * This data type corresponds to the nand dma descriptor
-+ * @dma_desc - low level DMA engine descriptor
-+ * @list - list for desc_info
-+ *
-+ * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
-+ *          ADM
-+ * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
-+ * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
-+ * @dir - DMA transfer direction
-+ */
-+struct desc_info {
-+      struct dma_async_tx_descriptor *dma_desc;
-+      struct list_head node;
-+
-+      union {
-+              struct scatterlist adm_sgl;
-+              struct {
-+                      struct scatterlist *bam_sgl;
-+                      int sgl_cnt;
-+              };
-+      };
-+      enum dma_data_direction dir;
-+};
-+
-+/*
-+ * holds the current register values that we want to write. acts as a contiguous
-+ * chunk of memory which we use to write the controller registers through DMA.
-+ */
-+struct nandc_regs {
-+      __le32 cmd;
-+      __le32 addr0;
-+      __le32 addr1;
-+      __le32 chip_sel;
-+      __le32 exec;
-+
-+      __le32 cfg0;
-+      __le32 cfg1;
-+      __le32 ecc_bch_cfg;
-+
-+      __le32 clrflashstatus;
-+      __le32 clrreadstatus;
-+
-+      __le32 cmd1;
-+      __le32 vld;
-+
-+      __le32 orig_cmd1;
-+      __le32 orig_vld;
-+
-+      __le32 ecc_buf_cfg;
-+      __le32 read_location0;
-+      __le32 read_location1;
-+      __le32 read_location2;
-+      __le32 read_location3;
-+      __le32 read_location_last0;
-+      __le32 read_location_last1;
-+      __le32 read_location_last2;
-+      __le32 read_location_last3;
-+
-+      __le32 erased_cw_detect_cfg_clr;
-+      __le32 erased_cw_detect_cfg_set;
-+};
-+
-+/*
-+ * NAND controller data struct
-+ *
-+ * @dev:                      parent device
-+ *
-+ * @base:                     MMIO base
-+ *
-+ * @core_clk:                 controller clock
-+ * @aon_clk:                  another controller clock
-+ *
-+ * @regs:                     a contiguous chunk of memory for DMA register
-+ *                            writes. contains the register values to be
-+ *                            written to controller
-+ *
-+ * @props:                    properties of current NAND controller,
-+ *                            initialized via DT match data
-+ *
-+ * @controller:                       base controller structure
-+ * @host_list:                        list containing all the chips attached to the
-+ *                            controller
-+ *
-+ * @chan:                     dma channel
-+ * @cmd_crci:                 ADM DMA CRCI for command flow control
-+ * @data_crci:                        ADM DMA CRCI for data flow control
-+ *
-+ * @desc_list:                        DMA descriptor list (list of desc_infos)
-+ *
-+ * @data_buffer:              our local DMA buffer for page read/writes,
-+ *                            used when we can't use the buffer provided
-+ *                            by upper layers directly
-+ * @reg_read_buf:             local buffer for reading back registers via DMA
-+ *
-+ * @base_phys:                        physical base address of controller registers
-+ * @base_dma:                 dma base address of controller registers
-+ * @reg_read_dma:             contains dma address for register read buffer
-+ *
-+ * @buf_size/count/start:     markers for chip->legacy.read_buf/write_buf
-+ *                            functions
-+ * @max_cwperpage:            maximum QPIC codewords required. calculated
-+ *                            from all connected NAND devices pagesize
-+ *
-+ * @reg_read_pos:             marker for data read in reg_read_buf
-+ *
-+ * @cmd1/vld:                 some fixed controller register values
-+ *
-+ * @exec_opwrite:             flag to select correct number of code word
-+ *                            while reading status
-+ */
-+struct qcom_nand_controller {
-+      struct device *dev;
-+
-+      void __iomem *base;
-+
-+      struct clk *core_clk;
-+      struct clk *aon_clk;
-+
-+      struct nandc_regs *regs;
-+      struct bam_transaction *bam_txn;
-+
-+      const struct qcom_nandc_props *props;
-+
-+      struct nand_controller *controller;
-+      struct list_head host_list;
-+
-+      union {
-+              /* will be used only by QPIC for BAM DMA */
-+              struct {
-+                      struct dma_chan *tx_chan;
-+                      struct dma_chan *rx_chan;
-+                      struct dma_chan *cmd_chan;
-+              };
-+
-+              /* will be used only by EBI2 for ADM DMA */
-+              struct {
-+                      struct dma_chan *chan;
-+                      unsigned int cmd_crci;
-+                      unsigned int data_crci;
-+              };
-+      };
-+
-+      struct list_head desc_list;
-+
-+      u8              *data_buffer;
-+      __le32          *reg_read_buf;
-+
-+      phys_addr_t base_phys;
-+      dma_addr_t base_dma;
-+      dma_addr_t reg_read_dma;
-+
-+      int             buf_size;
-+      int             buf_count;
-+      int             buf_start;
-+      unsigned int    max_cwperpage;
-+
-+      int reg_read_pos;
-+
-+      u32 cmd1, vld;
-+      bool exec_opwrite;
-+};
-+
-+/*
-+ * This data type corresponds to the NAND controller properties which varies
-+ * among different NAND controllers.
-+ * @ecc_modes - ecc mode for NAND
-+ * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
-+ * @supports_bam - whether NAND controller is using BAM
-+ * @nandc_part_of_qpic - whether NAND controller is part of qpic IP
-+ * @qpic_version2 - flag to indicate QPIC IP version 2
-+ * @use_codeword_fixup - whether NAND has different layout for boot partitions
-+ */
-+struct qcom_nandc_props {
-+      u32 ecc_modes;
-+      u32 dev_cmd_reg_start;
-+      bool supports_bam;
-+      bool nandc_part_of_qpic;
-+      bool qpic_version2;
-+      bool use_codeword_fixup;
-+};
-+
-+void qcom_free_bam_transaction(struct qcom_nand_controller *nandc);
-+struct bam_transaction *qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc);
-+void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc);
-+void qcom_qpic_bam_dma_done(void *data);
-+void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu);
-+int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
-+                              struct dma_chan *chan, unsigned long flags);
-+int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
-+                             int reg_off, const void *vaddr, int size, unsigned int flags);
-+int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
-+                              const void *vaddr, int size, unsigned int flags);
-+int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read, int reg_off,
-+                         const void *vaddr, int size, bool flow_control);
-+int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first, int num_regs,
-+                    unsigned int flags);
-+int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr, int first,
-+                     int num_regs, unsigned int flags);
-+int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off, const u8 *vaddr,
-+                     int size, unsigned int flags);
-+int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off, const u8 *vaddr,
-+                      int size, unsigned int flags);
-+int qcom_submit_descs(struct qcom_nand_controller *nandc);
-+void qcom_clear_read_regs(struct qcom_nand_controller *nandc);
-+void qcom_nandc_unalloc(struct qcom_nand_controller *nandc);
-+int qcom_nandc_alloc(struct qcom_nand_controller *nandc);
-+#endif
-+
--- 
-2.47.1
-
diff --git a/target/linux/qualcommbe/patches-6.6/016-04-v6.14-mtd-rawnand-qcom-use-FIELD_PREP-and-GENMASK.patch b/target/linux/qualcommbe/patches-6.6/016-04-v6.14-mtd-rawnand-qcom-use-FIELD_PREP-and-GENMASK.patch
deleted file mode 100644 (file)
index 71f25e2..0000000
+++ /dev/null
@@ -1,204 +0,0 @@
-From 0c08080fd71cd5dd59643104b39d3c89d793ab3c Mon Sep 17 00:00:00 2001
-From: Md Sadre Alam <quic_mdalam@quicinc.com>
-Date: Wed, 20 Nov 2024 14:45:03 +0530
-Subject: [PATCH 4/4] mtd: rawnand: qcom: use FIELD_PREP and GENMASK
-
-Use the bitfield macro FIELD_PREP, and GENMASK to
-do the shift and mask in one go. This makes the code
-more readable.
-
-Reviewed-by: Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>
-Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
-Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
----
- drivers/mtd/nand/raw/qcom_nandc.c    | 97 ++++++++++++++--------------
- include/linux/mtd/nand-qpic-common.h | 31 +++++----
- 2 files changed, 67 insertions(+), 61 deletions(-)
-
-diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
-index dcb62fd19dd7..d2d2aeee42a7 100644
---- a/drivers/mtd/nand/raw/qcom_nandc.c
-+++ b/drivers/mtd/nand/raw/qcom_nandc.c
-@@ -281,7 +281,7 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, i
-                               (num_cw - 1) << CW_PER_PAGE);
-               cfg1 = cpu_to_le32(host->cfg1_raw);
--              ecc_bch_cfg = cpu_to_le32(1 << ECC_CFG_ECC_DISABLE);
-+              ecc_bch_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE);
-       }
-       nandc->regs->cmd = cmd;
-@@ -1494,42 +1494,41 @@ static int qcom_nand_attach_chip(struct nand_chip *chip)
-       host->cw_size = host->cw_data + ecc->bytes;
-       bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
--      host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
--                              | host->cw_data << UD_SIZE_BYTES
--                              | 0 << DISABLE_STATUS_AFTER_WRITE
--                              | 5 << NUM_ADDR_CYCLES
--                              | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
--                              | 0 << STATUS_BFR_READ
--                              | 1 << SET_RD_MODE_AFTER_STATUS
--                              | host->spare_bytes << SPARE_SIZE_BYTES;
--
--      host->cfg1 = 7 << NAND_RECOVERY_CYCLES
--                              | 0 <<  CS_ACTIVE_BSY
--                              | bad_block_byte << BAD_BLOCK_BYTE_NUM
--                              | 0 << BAD_BLOCK_IN_SPARE_AREA
--                              | 2 << WR_RD_BSY_GAP
--                              | wide_bus << WIDE_FLASH
--                              | host->bch_enabled << ENABLE_BCH_ECC;
--
--      host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
--                              | host->cw_size << UD_SIZE_BYTES
--                              | 5 << NUM_ADDR_CYCLES
--                              | 0 << SPARE_SIZE_BYTES;
--
--      host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
--                              | 0 << CS_ACTIVE_BSY
--                              | 17 << BAD_BLOCK_BYTE_NUM
--                              | 1 << BAD_BLOCK_IN_SPARE_AREA
--                              | 2 << WR_RD_BSY_GAP
--                              | wide_bus << WIDE_FLASH
--                              | 1 << DEV0_CFG1_ECC_DISABLE;
--
--      host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
--                              | 0 << ECC_SW_RESET
--                              | host->cw_data << ECC_NUM_DATA_BYTES
--                              | 1 << ECC_FORCE_CLK_OPEN
--                              | ecc_mode << ECC_MODE
--                              | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
-+      host->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
-+                   FIELD_PREP(UD_SIZE_BYTES_MASK, host->cw_data) |
-+                   FIELD_PREP(DISABLE_STATUS_AFTER_WRITE, 0) |
-+                   FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
-+                   FIELD_PREP(ECC_PARITY_SIZE_BYTES_RS, host->ecc_bytes_hw) |
-+                   FIELD_PREP(STATUS_BFR_READ, 0) |
-+                   FIELD_PREP(SET_RD_MODE_AFTER_STATUS, 1) |
-+                   FIELD_PREP(SPARE_SIZE_BYTES_MASK, host->spare_bytes);
-+
-+      host->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
-+                   FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, bad_block_byte) |
-+                   FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 0) |
-+                   FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
-+                   FIELD_PREP(WIDE_FLASH, wide_bus) |
-+                   FIELD_PREP(ENABLE_BCH_ECC, host->bch_enabled);
-+
-+      host->cfg0_raw = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
-+                       FIELD_PREP(UD_SIZE_BYTES_MASK, host->cw_size) |
-+                       FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
-+                       FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
-+
-+      host->cfg1_raw = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
-+                       FIELD_PREP(CS_ACTIVE_BSY, 0) |
-+                       FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
-+                       FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
-+                       FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
-+                       FIELD_PREP(WIDE_FLASH, wide_bus) |
-+                       FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
-+
-+      host->ecc_bch_cfg = FIELD_PREP(ECC_CFG_ECC_DISABLE, !host->bch_enabled) |
-+                          FIELD_PREP(ECC_SW_RESET, 0) |
-+                          FIELD_PREP(ECC_NUM_DATA_BYTES_MASK, host->cw_data) |
-+                          FIELD_PREP(ECC_FORCE_CLK_OPEN, 1) |
-+                          FIELD_PREP(ECC_MODE_MASK, ecc_mode) |
-+                          FIELD_PREP(ECC_PARITY_SIZE_BYTES_BCH_MASK, host->ecc_bytes_hw);
-       if (!nandc->props->qpic_version2)
-               host->ecc_buf_cfg = 0x203 << NUM_STEPS;
-@@ -1882,21 +1881,21 @@ static int qcom_param_page_type_exec(struct nand_chip *chip,  const struct nand_
-       nandc->regs->addr0 = 0;
-       nandc->regs->addr1 = 0;
--      nandc->regs->cfg0 = cpu_to_le32(0 << CW_PER_PAGE |
--                                      512 << UD_SIZE_BYTES |
--                                      5 << NUM_ADDR_CYCLES |
--                                      0 << SPARE_SIZE_BYTES);
-+      host->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, 0) |
-+                   FIELD_PREP(UD_SIZE_BYTES_MASK, 512) |
-+                   FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
-+                   FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
--      nandc->regs->cfg1 = cpu_to_le32(7 << NAND_RECOVERY_CYCLES |
--                                      0 << CS_ACTIVE_BSY |
--                                      17 << BAD_BLOCK_BYTE_NUM |
--                                      1 << BAD_BLOCK_IN_SPARE_AREA |
--                                      2 << WR_RD_BSY_GAP |
--                                      0 << WIDE_FLASH |
--                                      1 << DEV0_CFG1_ECC_DISABLE);
-+      host->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
-+                   FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
-+                   FIELD_PREP(CS_ACTIVE_BSY, 0) |
-+                   FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
-+                   FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
-+                   FIELD_PREP(WIDE_FLASH, 0) |
-+                   FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
-       if (!nandc->props->qpic_version2)
--              nandc->regs->ecc_buf_cfg = cpu_to_le32(1 << ECC_CFG_ECC_DISABLE);
-+              nandc->regs->ecc_buf_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE);
-       /* configure CMD1 and VLD for ONFI param probing in QPIC v1 */
-       if (!nandc->props->qpic_version2) {
-diff --git a/include/linux/mtd/nand-qpic-common.h b/include/linux/mtd/nand-qpic-common.h
-index 425994429387..e79c79775eb8 100644
---- a/include/linux/mtd/nand-qpic-common.h
-+++ b/include/linux/mtd/nand-qpic-common.h
-@@ -70,35 +70,42 @@
- #define       BS_CORRECTABLE_ERR_MSK          0x1f
- /* NAND_DEVn_CFG0 bits */
--#define       DISABLE_STATUS_AFTER_WRITE      4
-+#define       DISABLE_STATUS_AFTER_WRITE      BIT(4)
- #define       CW_PER_PAGE                     6
-+#define       CW_PER_PAGE_MASK                GENMASK(8, 6)
- #define       UD_SIZE_BYTES                   9
- #define       UD_SIZE_BYTES_MASK              GENMASK(18, 9)
--#define       ECC_PARITY_SIZE_BYTES_RS        19
-+#define       ECC_PARITY_SIZE_BYTES_RS        GENMASK(22, 19)
- #define       SPARE_SIZE_BYTES                23
- #define       SPARE_SIZE_BYTES_MASK           GENMASK(26, 23)
- #define       NUM_ADDR_CYCLES                 27
--#define       STATUS_BFR_READ                 30
--#define       SET_RD_MODE_AFTER_STATUS        31
-+#define       NUM_ADDR_CYCLES_MASK            GENMASK(29, 27)
-+#define       STATUS_BFR_READ                 BIT(30)
-+#define       SET_RD_MODE_AFTER_STATUS        BIT(31)
- /* NAND_DEVn_CFG0 bits */
--#define       DEV0_CFG1_ECC_DISABLE           0
--#define       WIDE_FLASH                      1
-+#define       DEV0_CFG1_ECC_DISABLE           BIT(0)
-+#define       WIDE_FLASH                      BIT(1)
- #define       NAND_RECOVERY_CYCLES            2
--#define       CS_ACTIVE_BSY                   5
-+#define       NAND_RECOVERY_CYCLES_MASK       GENMASK(4, 2)
-+#define       CS_ACTIVE_BSY                   BIT(5)
- #define       BAD_BLOCK_BYTE_NUM              6
--#define       BAD_BLOCK_IN_SPARE_AREA         16
-+#define       BAD_BLOCK_BYTE_NUM_MASK         GENMASK(15, 6)
-+#define       BAD_BLOCK_IN_SPARE_AREA         BIT(16)
- #define       WR_RD_BSY_GAP                   17
--#define       ENABLE_BCH_ECC                  27
-+#define       WR_RD_BSY_GAP_MASK              GENMASK(22, 17)
-+#define       ENABLE_BCH_ECC                  BIT(27)
- /* NAND_DEV0_ECC_CFG bits */
--#define       ECC_CFG_ECC_DISABLE             0
--#define       ECC_SW_RESET                    1
-+#define       ECC_CFG_ECC_DISABLE             BIT(0)
-+#define       ECC_SW_RESET                    BIT(1)
- #define       ECC_MODE                        4
-+#define       ECC_MODE_MASK                   GENMASK(5, 4)
- #define       ECC_PARITY_SIZE_BYTES_BCH       8
-+#define       ECC_PARITY_SIZE_BYTES_BCH_MASK  GENMASK(12, 8)
- #define       ECC_NUM_DATA_BYTES              16
- #define       ECC_NUM_DATA_BYTES_MASK         GENMASK(25, 16)
--#define       ECC_FORCE_CLK_OPEN              30
-+#define       ECC_FORCE_CLK_OPEN              BIT(30)
- /* NAND_DEV_CMD1 bits */
- #define       READ_ADDR                       0
--- 
-2.47.1
-
diff --git a/target/linux/qualcommbe/patches-6.6/017-v6.,14-mtd-rawnand-qcom-Fix-build-issue-on-x86-architecture.patch b/target/linux/qualcommbe/patches-6.6/017-v6.,14-mtd-rawnand-qcom-Fix-build-issue-on-x86-architecture.patch
deleted file mode 100644 (file)
index 5c3fcc4..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
-From b9371866799d67a80be0ea9e01bd41987db22f26 Mon Sep 17 00:00:00 2001
-From: Md Sadre Alam <quic_mdalam@quicinc.com>
-Date: Mon, 6 Jan 2025 18:45:58 +0530
-Subject: [PATCH] mtd: rawnand: qcom: Fix build issue on x86 architecture
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-Fix a buffer overflow issue in qcom_clear_bam_transaction by using
-struct_group to group related fields and avoid FORTIFY_SOURCE warnings.
-
-On x86 architecture, the following error occurs due to warnings being
-treated as errors:
-
-In function ‘fortify_memset_chk’,
-    inlined from ‘qcom_clear_bam_transaction’ at
-drivers/mtd/nand/qpic_common.c:88:2:
-./include/linux/fortify-string.h:480:25: error: call to ‘__write_overflow_field’
-declared with attribute warning: detected write beyond size of field
-(1st parameter); maybe use struct_group()? [-Werror=attribute-warning]
-  480 |                         __write_overflow_field(p_size_field, size);
-      |                         ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-  LD [M]  drivers/mtd/nand/nandcore.o
-  CC [M]  drivers/w1/masters/mxc_w1.o
-cc1: all warnings being treated as errors
-
-This patch addresses the issue by grouping the related fields in
-struct bam_transaction using struct_group and updating the memset call
-accordingly.
-
-Fixes: 8c52932da5e6 ("mtd: rawnand: qcom: cleanup qcom_nandc driver")
-Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
-Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
----
- drivers/mtd/nand/qpic_common.c       |  2 +-
- include/linux/mtd/nand-qpic-common.h | 19 +++++++++++--------
- 2 files changed, 12 insertions(+), 9 deletions(-)
-
-diff --git a/drivers/mtd/nand/qpic_common.c b/drivers/mtd/nand/qpic_common.c
-index 8abbb960a7ce..e0ed25b5afea 100644
---- a/drivers/mtd/nand/qpic_common.c
-+++ b/drivers/mtd/nand/qpic_common.c
-@@ -85,7 +85,7 @@ void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc)
-       if (!nandc->props->supports_bam)
-               return;
--      memset(&bam_txn->bam_ce_pos, 0, sizeof(u32) * 8);
-+      memset(&bam_txn->bam_positions, 0, sizeof(bam_txn->bam_positions));
-       bam_txn->last_data_desc = NULL;
-       sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
-diff --git a/include/linux/mtd/nand-qpic-common.h b/include/linux/mtd/nand-qpic-common.h
-index e79c79775eb8..4d9b736ff8b7 100644
---- a/include/linux/mtd/nand-qpic-common.h
-+++ b/include/linux/mtd/nand-qpic-common.h
-@@ -254,14 +254,17 @@ struct bam_transaction {
-       struct dma_async_tx_descriptor *last_data_desc;
-       struct dma_async_tx_descriptor *last_cmd_desc;
-       struct completion txn_done;
--      u32 bam_ce_pos;
--      u32 bam_ce_start;
--      u32 cmd_sgl_pos;
--      u32 cmd_sgl_start;
--      u32 tx_sgl_pos;
--      u32 tx_sgl_start;
--      u32 rx_sgl_pos;
--      u32 rx_sgl_start;
-+      struct_group(bam_positions,
-+              u32 bam_ce_pos;
-+              u32 bam_ce_start;
-+              u32 cmd_sgl_pos;
-+              u32 cmd_sgl_start;
-+              u32 tx_sgl_pos;
-+              u32 tx_sgl_start;
-+              u32 rx_sgl_pos;
-+              u32 rx_sgl_start;
-+
-+      );
- };
- /*
--- 
-2.47.1
-