1 From f7704275957852cd4c4632d6da126979ef24b83a Mon Sep 17 00:00:00 2001
2 From: Weijie Gao <weijie.gao@mediatek.com>
3 Date: Tue, 2 Mar 2021 16:58:01 +0800
4 Subject: [PATCH 36/71] drivers: mtd: add support for MediaTek SPI-NAND flash
7 Add mtd driver for MediaTek SPI-NAND flash controller
9 This driver is written from scratch, and uses standard mtd framework, not
10 the nand framework which only applies for raw parallel nand flashes so that
11 this driver can have a smaller size in binary.
13 Signed-off-by: Weijie Gao <weijie.gao@mediatek.com>
15 drivers/mtd/Kconfig | 2 +
16 drivers/mtd/Makefile | 2 +
17 drivers/mtd/mtk-snand/Kconfig | 21 +
18 drivers/mtd/mtk-snand/Makefile | 11 +
19 drivers/mtd/mtk-snand/mtk-snand-def.h | 271 ++++
20 drivers/mtd/mtk-snand/mtk-snand-ecc.c | 411 ++++++
21 drivers/mtd/mtk-snand/mtk-snand-ids.c | 515 +++++++
22 drivers/mtd/mtk-snand/mtk-snand-mtd.c | 535 +++++++
23 drivers/mtd/mtk-snand/mtk-snand-os.c | 39 +
24 drivers/mtd/mtk-snand/mtk-snand-os.h | 120 ++
25 drivers/mtd/mtk-snand/mtk-snand.c | 1933 +++++++++++++++++++++++++
26 drivers/mtd/mtk-snand/mtk-snand.h | 77 +
27 12 files changed, 3937 insertions(+)
28 create mode 100644 drivers/mtd/mtk-snand/Kconfig
29 create mode 100644 drivers/mtd/mtk-snand/Makefile
30 create mode 100644 drivers/mtd/mtk-snand/mtk-snand-def.h
31 create mode 100644 drivers/mtd/mtk-snand/mtk-snand-ecc.c
32 create mode 100644 drivers/mtd/mtk-snand/mtk-snand-ids.c
33 create mode 100644 drivers/mtd/mtk-snand/mtk-snand-mtd.c
34 create mode 100644 drivers/mtd/mtk-snand/mtk-snand-os.c
35 create mode 100644 drivers/mtd/mtk-snand/mtk-snand-os.h
36 create mode 100644 drivers/mtd/mtk-snand/mtk-snand.c
37 create mode 100644 drivers/mtd/mtk-snand/mtk-snand.h
39 --- a/drivers/mtd/Kconfig
40 +++ b/drivers/mtd/Kconfig
41 @@ -238,6 +238,8 @@ config SYS_MAX_FLASH_BANKS_DETECT
42 to reduce the effective number of flash bank, between 0 and
43 CONFIG_SYS_MAX_FLASH_BANKS
45 +source "drivers/mtd/mtk-snand/Kconfig"
47 source "drivers/mtd/nand/Kconfig"
49 config SYS_NAND_MAX_OOBFREE
50 --- a/drivers/mtd/Makefile
51 +++ b/drivers/mtd/Makefile
52 @@ -39,3 +39,5 @@ obj-$(CONFIG_$(SPL_TPL_)SPI_FLASH_SUPPOR
53 obj-$(CONFIG_SPL_UBI) += ubispl/
57 +obj-$(CONFIG_MTK_SPI_NAND) += mtk-snand/
59 +++ b/drivers/mtd/mtk-snand/Kconfig
62 +# Copyright (C) 2020 MediaTek Inc. All rights reserved.
63 +# Author: Weijie Gao <weijie.gao@mediatek.com>
65 +# SPDX-License-Identifier: GPL-2.0
69 + tristate "MediaTek SPI NAND flash controller driver"
70 + depends on !MTD_SPI_NAND
72 + This option enables access to SPI-NAND flashes through the
73 + MediaTek SPI NAND Flash Controller
75 +config MTK_SPI_NAND_MTD
76 + tristate "MTD support for MediaTek SPI NAND flash controller"
78 + depends on MTK_SPI_NAND
80 + This option enables access to SPI-NAND flashes through the
81 + MTD interface of MediaTek SPI NAND Flash Controller
83 +++ b/drivers/mtd/mtk-snand/Makefile
86 +# Copyright (C) 2020 MediaTek Inc. All rights reserved.
87 +# Author: Weijie Gao <weijie.gao@mediatek.com>
89 +# SPDX-License-Identifier: GPL-2.0
92 +obj-y += mtk-snand.o mtk-snand-ecc.o mtk-snand-ids.o mtk-snand-os.o
93 +obj-$(CONFIG_MTK_SPI_NAND_MTD) += mtk-snand-mtd.o
95 +ccflags-y += -DPRIVATE_MTK_SNAND_HEADER
97 +++ b/drivers/mtd/mtk-snand/mtk-snand-def.h
99 +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
101 + * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
103 + * Author: Weijie Gao <weijie.gao@mediatek.com>
106 +#ifndef _MTK_SNAND_DEF_H_
107 +#define _MTK_SNAND_DEF_H_
109 +#include "mtk-snand-os.h"
111 +#ifdef PRIVATE_MTK_SNAND_HEADER
112 +#include "mtk-snand.h"
114 +#include <mtk-snand.h>
117 +struct mtk_snand_plat_dev;
119 +enum snand_flash_io {
129 +#define SPI_IO_1_1_1 BIT(SNAND_IO_1_1_1)
130 +#define SPI_IO_1_1_2 BIT(SNAND_IO_1_1_2)
131 +#define SPI_IO_1_2_2 BIT(SNAND_IO_1_2_2)
132 +#define SPI_IO_1_1_4 BIT(SNAND_IO_1_1_4)
133 +#define SPI_IO_1_4_4 BIT(SNAND_IO_1_4_4)
135 +struct snand_opcode {
140 +struct snand_io_cap {
142 + struct snand_opcode opcodes[__SNAND_IO_MAX];
145 +#define SNAND_OP(_io, _opcode, _dummy) [_io] = { .opcode = (_opcode), \
146 + .dummy = (_dummy) }
148 +#define SNAND_IO_CAP(_name, _caps, ...) \
149 + struct snand_io_cap _name = { .caps = (_caps), \
150 + .opcodes = { __VA_ARGS__ } }
152 +#define SNAND_MAX_ID_LEN 4
154 +enum snand_id_type {
156 + SNAND_ID_ADDR = SNAND_ID_DYMMY,
159 + __SNAND_ID_TYPE_MAX
163 + uint8_t type; /* enum snand_id_type */
165 + uint8_t id[SNAND_MAX_ID_LEN];
168 +#define SNAND_ID(_type, ...) \
169 + { .type = (_type), .id = { __VA_ARGS__ }, \
170 + .len = sizeof((uint8_t[]) { __VA_ARGS__ }) }
172 +struct snand_mem_org {
174 + uint16_t sparesize;
175 + uint16_t pages_per_block;
176 + uint16_t blocks_per_die;
177 + uint16_t planes_per_die;
181 +#define SNAND_MEMORG(_ps, _ss, _ppb, _bpd, _ppd, _nd) \
182 + { .pagesize = (_ps), .sparesize = (_ss), .pages_per_block = (_ppb), \
183 + .blocks_per_die = (_bpd), .planes_per_die = (_ppd), .ndies = (_nd) }
185 +typedef int (*snand_select_die_t)(struct mtk_snand *snf, uint32_t dieidx);
187 +struct snand_flash_info {
189 + struct snand_id id;
190 + const struct snand_mem_org memorg;
191 + const struct snand_io_cap *cap_rd;
192 + const struct snand_io_cap *cap_pl;
193 + snand_select_die_t select_die;
196 +#define SNAND_INFO(_model, _id, _memorg, _cap_rd, _cap_pl, ...) \
197 + { .model = (_model), .id = _id, .memorg = _memorg, \
198 + .cap_rd = (_cap_rd), .cap_pl = (_cap_pl), __VA_ARGS__ }
200 +const struct snand_flash_info *snand_flash_id_lookup(enum snand_id_type type,
201 + const uint8_t *id);
203 +struct mtk_snand_soc_data {
204 + uint16_t sector_size;
205 + uint16_t max_sectors;
207 + uint16_t fdm_ecc_size;
208 + uint16_t fifo_size;
211 + bool empty_page_check;
212 + uint32_t mastersta_mask;
214 + const uint8_t *spare_sizes;
215 + uint32_t num_spare_size;
217 + uint16_t latch_lat;
218 + uint16_t sample_delay;
225 +struct mtk_ecc_soc_data {
226 + const uint8_t *ecc_caps;
227 + uint32_t num_ecc_cap;
228 + const uint32_t *regs;
229 + uint16_t mode_shift;
230 + uint8_t errnum_bits;
231 + uint8_t errnum_shift;
235 + struct mtk_snand_plat_dev *pdev;
237 + void __iomem *nfi_base;
238 + void __iomem *ecc_base;
240 + enum mtk_snand_soc soc;
241 + const struct mtk_snand_soc_data *nfi_soc;
242 + const struct mtk_ecc_soc_data *ecc_soc;
243 + bool snfi_quad_spi;
249 + uint32_t erasesize;
250 + uint32_t writesize;
254 + snand_select_die_t select_die;
256 + uint8_t opcode_rfc;
262 + uint32_t writesize_mask;
263 + uint32_t writesize_shift;
264 + uint32_t erasesize_mask;
265 + uint32_t erasesize_shift;
267 + uint32_t die_shift;
269 + uint32_t spare_per_sector;
270 + uint32_t raw_sector_size;
271 + uint32_t ecc_strength;
272 + uint32_t ecc_steps;
273 + uint32_t ecc_bytes;
274 + uint32_t ecc_parity_bits;
276 + uint8_t *page_cache; /* Used by read/write page */
277 + uint8_t *buf_cache; /* Used by block bad/markbad & auto_oob */
278 + int *sect_bf; /* Used by ECC correction */
281 +enum mtk_snand_log_category {
287 + __SNAND_LOG_CAT_MAX
290 +int mtk_ecc_setup(struct mtk_snand *snf, void *fmdaddr, uint32_t max_ecc_bytes,
291 + uint32_t msg_size);
292 +int mtk_snand_ecc_encoder_start(struct mtk_snand *snf);
293 +void mtk_snand_ecc_encoder_stop(struct mtk_snand *snf);
294 +int mtk_snand_ecc_decoder_start(struct mtk_snand *snf);
295 +void mtk_snand_ecc_decoder_stop(struct mtk_snand *snf);
296 +int mtk_ecc_wait_decoder_done(struct mtk_snand *snf);
297 +int mtk_ecc_check_decode_error(struct mtk_snand *snf);
298 +int mtk_ecc_fixup_empty_sector(struct mtk_snand *snf, uint32_t sect);
300 +int mtk_snand_mac_io(struct mtk_snand *snf, const uint8_t *out, uint32_t outlen,
301 + uint8_t *in, uint32_t inlen);
302 +int mtk_snand_set_feature(struct mtk_snand *snf, uint32_t addr, uint32_t val);
304 +int mtk_snand_log(struct mtk_snand_plat_dev *pdev,
305 + enum mtk_snand_log_category cat, const char *fmt, ...);
307 +#define snand_log_nfi(pdev, fmt, ...) \
308 + mtk_snand_log(pdev, SNAND_LOG_NFI, fmt, ##__VA_ARGS__)
310 +#define snand_log_snfi(pdev, fmt, ...) \
311 + mtk_snand_log(pdev, SNAND_LOG_SNFI, fmt, ##__VA_ARGS__)
313 +#define snand_log_ecc(pdev, fmt, ...) \
314 + mtk_snand_log(pdev, SNAND_LOG_ECC, fmt, ##__VA_ARGS__)
316 +#define snand_log_chip(pdev, fmt, ...) \
317 + mtk_snand_log(pdev, SNAND_LOG_CHIP, fmt, ##__VA_ARGS__)
320 +static inline int mtk_snand_ffs64(uint64_t x)
325 + if (!(x & 0xffffffff))
326 + return ffs((uint32_t)(x >> 32)) + 32;
328 + return ffs((uint32_t)(x & 0xffffffff));
331 +/* NFI dummy commands */
332 +#define NFI_CMD_DUMMY_READ 0x00
333 +#define NFI_CMD_DUMMY_WRITE 0x80
335 +/* SPI-NAND opcodes */
336 +#define SNAND_CMD_RESET 0xff
337 +#define SNAND_CMD_BLOCK_ERASE 0xd8
338 +#define SNAND_CMD_READ_FROM_CACHE_QUAD 0xeb
339 +#define SNAND_CMD_WINBOND_SELECT_DIE 0xc2
340 +#define SNAND_CMD_READ_FROM_CACHE_DUAL 0xbb
341 +#define SNAND_CMD_READID 0x9f
342 +#define SNAND_CMD_READ_FROM_CACHE_X4 0x6b
343 +#define SNAND_CMD_READ_FROM_CACHE_X2 0x3b
344 +#define SNAND_CMD_PROGRAM_LOAD_X4 0x32
345 +#define SNAND_CMD_SET_FEATURE 0x1f
346 +#define SNAND_CMD_READ_TO_CACHE 0x13
347 +#define SNAND_CMD_PROGRAM_EXECUTE 0x10
348 +#define SNAND_CMD_GET_FEATURE 0x0f
349 +#define SNAND_CMD_READ_FROM_CACHE 0x0b
350 +#define SNAND_CMD_WRITE_ENABLE 0x06
351 +#define SNAND_CMD_PROGRAM_LOAD 0x02
353 +/* SPI-NAND feature addresses */
354 +#define SNAND_FEATURE_MICRON_DIE_ADDR 0xd0
355 +#define SNAND_MICRON_DIE_SEL_1 BIT(6)
357 +#define SNAND_FEATURE_STATUS_ADDR 0xc0
358 +#define SNAND_STATUS_OIP BIT(0)
359 +#define SNAND_STATUS_WEL BIT(1)
360 +#define SNAND_STATUS_ERASE_FAIL BIT(2)
361 +#define SNAND_STATUS_PROGRAM_FAIL BIT(3)
363 +#define SNAND_FEATURE_CONFIG_ADDR 0xb0
364 +#define SNAND_FEATURE_QUAD_ENABLE BIT(0)
365 +#define SNAND_FEATURE_ECC_EN BIT(4)
367 +#define SNAND_FEATURE_PROTECT_ADDR 0xa0
369 +#endif /* _MTK_SNAND_DEF_H_ */
371 +++ b/drivers/mtd/mtk-snand/mtk-snand-ecc.c
373 +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
375 + * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
377 + * Author: Weijie Gao <weijie.gao@mediatek.com>
380 +#include "mtk-snand-def.h"
383 +#define ECC_ENCCON 0x000
384 +#define ENC_EN BIT(0)
386 +#define ECC_ENCCNFG 0x004
388 +#define ENC_BURST_EN BIT(8)
389 +#define ENC_TNUM_S 0
391 +#define ECC_ENCIDLE 0x00c
392 +#define ENC_IDLE BIT(0)
394 +#define ECC_DECCON 0x100
395 +#define DEC_EN BIT(0)
397 +#define ECC_DECCNFG 0x104
398 +#define DEC_EMPTY_EN BIT(31)
400 +#define DEC_CON_S 12
401 +#define DEC_CON_CORRECT 3
402 +#define DEC_BURST_EN BIT(8)
403 +#define DEC_TNUM_S 0
405 +#define ECC_DECIDLE 0x10c
406 +#define DEC_IDLE BIT(0)
408 +#define ECC_DECENUM0 0x114
409 +#define ECC_DECENUM(n) (ECC_DECENUM0 + (n) * 4)
411 +/* ECC_ENCIDLE & ECC_DECIDLE */
412 +#define ECC_IDLE BIT(0)
414 +/* ENC_MODE & DEC_MODE */
415 +#define ECC_MODE_NFI 1
417 +#define ECC_TIMEOUT 500000
419 +static const uint8_t mt7622_ecc_caps[] = { 4, 6, 8, 10, 12 };
421 +static const uint8_t mt7981_ecc_caps[] = {
422 + 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24
425 +static const uint8_t mt7986_ecc_caps[] = {
426 + 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24
429 +static const uint32_t mt7622_ecc_regs[] = {
430 + [ECC_DECDONE] = 0x11c,
433 +static const uint32_t mt7981_ecc_regs[] = {
434 + [ECC_DECDONE] = 0x124,
437 +static const uint32_t mt7986_ecc_regs[] = {
438 + [ECC_DECDONE] = 0x124,
441 +static const struct mtk_ecc_soc_data mtk_ecc_socs[__SNAND_SOC_MAX] = {
442 + [SNAND_SOC_MT7622] = {
443 + .ecc_caps = mt7622_ecc_caps,
444 + .num_ecc_cap = ARRAY_SIZE(mt7622_ecc_caps),
445 + .regs = mt7622_ecc_regs,
450 + [SNAND_SOC_MT7629] = {
451 + .ecc_caps = mt7622_ecc_caps,
452 + .num_ecc_cap = ARRAY_SIZE(mt7622_ecc_caps),
453 + .regs = mt7622_ecc_regs,
458 + [SNAND_SOC_MT7981] = {
459 + .ecc_caps = mt7981_ecc_caps,
460 + .num_ecc_cap = ARRAY_SIZE(mt7981_ecc_caps),
461 + .regs = mt7981_ecc_regs,
466 + [SNAND_SOC_MT7986] = {
467 + .ecc_caps = mt7986_ecc_caps,
468 + .num_ecc_cap = ARRAY_SIZE(mt7986_ecc_caps),
469 + .regs = mt7986_ecc_regs,
476 +static inline uint32_t ecc_read32(struct mtk_snand *snf, uint32_t reg)
478 + return readl(snf->ecc_base + reg);
481 +static inline void ecc_write32(struct mtk_snand *snf, uint32_t reg,
484 + writel(val, snf->ecc_base + reg);
487 +static inline void ecc_write16(struct mtk_snand *snf, uint32_t reg,
490 + writew(val, snf->ecc_base + reg);
493 +static int mtk_ecc_poll(struct mtk_snand *snf, uint32_t reg, uint32_t bits)
497 + return read16_poll_timeout(snf->ecc_base + reg, val, (val & bits), 0,
501 +static int mtk_ecc_wait_idle(struct mtk_snand *snf, uint32_t reg)
505 + ret = mtk_ecc_poll(snf, reg, ECC_IDLE);
507 + snand_log_ecc(snf->pdev, "ECC engine is busy\n");
514 +int mtk_ecc_setup(struct mtk_snand *snf, void *fmdaddr, uint32_t max_ecc_bytes,
517 + uint32_t i, val, ecc_msg_bits, ecc_strength;
520 + snf->ecc_soc = &mtk_ecc_socs[snf->soc];
522 + snf->ecc_parity_bits = fls(1 + 8 * msg_size);
523 + ecc_strength = max_ecc_bytes * 8 / snf->ecc_parity_bits;
525 + for (i = snf->ecc_soc->num_ecc_cap - 1; i >= 0; i--) {
526 + if (snf->ecc_soc->ecc_caps[i] <= ecc_strength)
530 + if (unlikely(i < 0)) {
531 + snand_log_ecc(snf->pdev, "Page size %u+%u is not supported\n",
532 + snf->writesize, snf->oobsize);
536 + snf->ecc_strength = snf->ecc_soc->ecc_caps[i];
537 + snf->ecc_bytes = DIV_ROUND_UP(snf->ecc_strength * snf->ecc_parity_bits,
540 + /* Encoder config */
541 + ecc_write16(snf, ECC_ENCCON, 0);
542 + ret = mtk_ecc_wait_idle(snf, ECC_ENCIDLE);
546 + ecc_msg_bits = msg_size * 8;
547 + val = (ecc_msg_bits << ENC_MS_S) |
548 + (ECC_MODE_NFI << snf->ecc_soc->mode_shift) | i;
549 + ecc_write32(snf, ECC_ENCCNFG, val);
551 + /* Decoder config */
552 + ecc_write16(snf, ECC_DECCON, 0);
553 + ret = mtk_ecc_wait_idle(snf, ECC_DECIDLE);
557 + ecc_msg_bits += snf->ecc_strength * snf->ecc_parity_bits;
558 + val = DEC_EMPTY_EN | (ecc_msg_bits << DEC_CS_S) |
559 + (DEC_CON_CORRECT << DEC_CON_S) |
560 + (ECC_MODE_NFI << snf->ecc_soc->mode_shift) | i;
561 + ecc_write32(snf, ECC_DECCNFG, val);
566 +int mtk_snand_ecc_encoder_start(struct mtk_snand *snf)
570 + ret = mtk_ecc_wait_idle(snf, ECC_ENCIDLE);
572 + ecc_write16(snf, ECC_ENCCON, 0);
573 + mtk_ecc_wait_idle(snf, ECC_ENCIDLE);
576 + ecc_write16(snf, ECC_ENCCON, ENC_EN);
581 +void mtk_snand_ecc_encoder_stop(struct mtk_snand *snf)
583 + mtk_ecc_wait_idle(snf, ECC_ENCIDLE);
584 + ecc_write16(snf, ECC_ENCCON, 0);
587 +int mtk_snand_ecc_decoder_start(struct mtk_snand *snf)
591 + ret = mtk_ecc_wait_idle(snf, ECC_DECIDLE);
593 + ecc_write16(snf, ECC_DECCON, 0);
594 + mtk_ecc_wait_idle(snf, ECC_DECIDLE);
597 + ecc_write16(snf, ECC_DECCON, DEC_EN);
602 +void mtk_snand_ecc_decoder_stop(struct mtk_snand *snf)
604 + mtk_ecc_wait_idle(snf, ECC_DECIDLE);
605 + ecc_write16(snf, ECC_DECCON, 0);
608 +int mtk_ecc_wait_decoder_done(struct mtk_snand *snf)
610 + uint16_t val, step_mask = (1 << snf->ecc_steps) - 1;
611 + uint32_t reg = snf->ecc_soc->regs[ECC_DECDONE];
614 + ret = read16_poll_timeout(snf->ecc_base + reg, val,
615 + (val & step_mask) == step_mask, 0,
618 + snand_log_ecc(snf->pdev, "ECC decoder is busy\n");
623 +int mtk_ecc_check_decode_error(struct mtk_snand *snf)
625 + uint32_t i, regi, fi, errnum;
626 + uint32_t errnum_shift = snf->ecc_soc->errnum_shift;
627 + uint32_t errnum_mask = (1 << snf->ecc_soc->errnum_bits) - 1;
630 + for (i = 0; i < snf->ecc_steps; i++) {
634 + errnum = ecc_read32(snf, ECC_DECENUM(regi));
635 + errnum = (errnum >> (fi * errnum_shift)) & errnum_mask;
637 + if (errnum <= snf->ecc_strength) {
638 + snf->sect_bf[i] = errnum;
640 + snf->sect_bf[i] = -1;
648 +static int mtk_ecc_check_buf_bitflips(struct mtk_snand *snf, const void *buf,
649 + size_t len, uint32_t bitflips)
651 + const uint8_t *buf8 = buf;
652 + const uint32_t *buf32;
653 + uint32_t d, weight;
655 + while (len && ((uintptr_t)buf8) % sizeof(uint32_t)) {
656 + weight = hweight8(*buf8);
657 + bitflips += BITS_PER_BYTE - weight;
661 + if (bitflips > snf->ecc_strength)
665 + buf32 = (const uint32_t *)buf8;
666 + while (len >= sizeof(uint32_t)) {
670 + weight = hweight32(d);
671 + bitflips += sizeof(uint32_t) * BITS_PER_BYTE - weight;
675 + len -= sizeof(uint32_t);
677 + if (bitflips > snf->ecc_strength)
681 + buf8 = (const uint8_t *)buf32;
683 + weight = hweight8(*buf8);
684 + bitflips += BITS_PER_BYTE - weight;
688 + if (bitflips > snf->ecc_strength)
695 +static int mtk_ecc_check_parity_bitflips(struct mtk_snand *snf, const void *buf,
696 + uint32_t bits, uint32_t bitflips)
705 + rc = mtk_ecc_check_buf_bitflips(snf, buf, len, bitflips);
706 + if (!bits || rc < 0)
711 + /* We want a precise count of bits */
712 + b = ((const uint8_t *)buf)[len];
713 + for (i = 0; i < bits; i++) {
718 + if (bitflips > snf->ecc_strength)
724 +static void mtk_ecc_reset_parity(void *buf, uint32_t bits)
731 + memset(buf, 0xff, len);
733 + /* Only reset bits protected by ECC to 1 */
735 + ((uint8_t *)buf)[len] |= GENMASK(bits - 1, 0);
738 +int mtk_ecc_fixup_empty_sector(struct mtk_snand *snf, uint32_t sect)
740 + uint32_t ecc_bytes = snf->spare_per_sector - snf->nfi_soc->fdm_size;
741 + uint8_t *oob = snf->page_cache + snf->writesize;
742 + uint8_t *data_ptr, *fdm_ptr, *ecc_ptr;
743 + int bitflips = 0, ecc_bits, parity_bits;
745 + parity_bits = fls(snf->nfi_soc->sector_size * 8);
746 + ecc_bits = snf->ecc_strength * parity_bits;
748 + data_ptr = snf->page_cache + sect * snf->nfi_soc->sector_size;
749 + fdm_ptr = oob + sect * snf->nfi_soc->fdm_size;
750 + ecc_ptr = oob + snf->ecc_steps * snf->nfi_soc->fdm_size +
754 + * Check whether DATA + FDM + ECC of a sector contains correctable
757 + bitflips = mtk_ecc_check_buf_bitflips(snf, data_ptr,
758 + snf->nfi_soc->sector_size,
763 + bitflips = mtk_ecc_check_buf_bitflips(snf, fdm_ptr,
764 + snf->nfi_soc->fdm_ecc_size,
769 + bitflips = mtk_ecc_check_parity_bitflips(snf, ecc_ptr, ecc_bits,
777 + /* Reset the data of this sector to 0xff */
778 + memset(data_ptr, 0xff, snf->nfi_soc->sector_size);
779 + memset(fdm_ptr, 0xff, snf->nfi_soc->fdm_ecc_size);
780 + mtk_ecc_reset_parity(ecc_ptr, ecc_bits);
785 +++ b/drivers/mtd/mtk-snand/mtk-snand-ids.c
787 +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
789 + * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
791 + * Author: Weijie Gao <weijie.gao@mediatek.com>
794 +#include "mtk-snand-def.h"
796 +static int mtk_snand_winbond_select_die(struct mtk_snand *snf, uint32_t dieidx);
797 +static int mtk_snand_micron_select_die(struct mtk_snand *snf, uint32_t dieidx);
799 +#define SNAND_MEMORG_512M_2K_64 SNAND_MEMORG(2048, 64, 64, 512, 1, 1)
800 +#define SNAND_MEMORG_1G_2K_64 SNAND_MEMORG(2048, 64, 64, 1024, 1, 1)
801 +#define SNAND_MEMORG_2G_2K_64 SNAND_MEMORG(2048, 64, 64, 2048, 1, 1)
802 +#define SNAND_MEMORG_2G_2K_120 SNAND_MEMORG(2048, 120, 64, 2048, 1, 1)
803 +#define SNAND_MEMORG_4G_2K_64 SNAND_MEMORG(2048, 64, 64, 4096, 1, 1)
804 +#define SNAND_MEMORG_1G_2K_120 SNAND_MEMORG(2048, 120, 64, 1024, 1, 1)
805 +#define SNAND_MEMORG_1G_2K_128 SNAND_MEMORG(2048, 128, 64, 1024, 1, 1)
806 +#define SNAND_MEMORG_2G_2K_128 SNAND_MEMORG(2048, 128, 64, 2048, 1, 1)
807 +#define SNAND_MEMORG_4G_2K_128 SNAND_MEMORG(2048, 128, 64, 4096, 1, 1)
808 +#define SNAND_MEMORG_4G_4K_240 SNAND_MEMORG(4096, 240, 64, 2048, 1, 1)
809 +#define SNAND_MEMORG_4G_4K_256 SNAND_MEMORG(4096, 256, 64, 2048, 1, 1)
810 +#define SNAND_MEMORG_8G_4K_256 SNAND_MEMORG(4096, 256, 64, 4096, 1, 1)
811 +#define SNAND_MEMORG_2G_2K_64_2P SNAND_MEMORG(2048, 64, 64, 2048, 2, 1)
812 +#define SNAND_MEMORG_2G_2K_64_2D SNAND_MEMORG(2048, 64, 64, 1024, 1, 2)
813 +#define SNAND_MEMORG_2G_2K_128_2P SNAND_MEMORG(2048, 128, 64, 2048, 2, 1)
814 +#define SNAND_MEMORG_4G_2K_64_2P SNAND_MEMORG(2048, 64, 64, 4096, 2, 1)
815 +#define SNAND_MEMORG_4G_2K_128_2P_2D SNAND_MEMORG(2048, 128, 64, 2048, 2, 2)
816 +#define SNAND_MEMORG_8G_4K_256_2D SNAND_MEMORG(4096, 256, 64, 2048, 1, 2)
818 +static const SNAND_IO_CAP(snand_cap_read_from_cache_quad,
819 + SPI_IO_1_1_1 | SPI_IO_1_1_2 | SPI_IO_1_2_2 | SPI_IO_1_1_4 |
821 + SNAND_OP(SNAND_IO_1_1_1, SNAND_CMD_READ_FROM_CACHE, 8),
822 + SNAND_OP(SNAND_IO_1_1_2, SNAND_CMD_READ_FROM_CACHE_X2, 8),
823 + SNAND_OP(SNAND_IO_1_2_2, SNAND_CMD_READ_FROM_CACHE_DUAL, 4),
824 + SNAND_OP(SNAND_IO_1_1_4, SNAND_CMD_READ_FROM_CACHE_X4, 8),
825 + SNAND_OP(SNAND_IO_1_4_4, SNAND_CMD_READ_FROM_CACHE_QUAD, 4));
827 +static const SNAND_IO_CAP(snand_cap_read_from_cache_quad_q2d,
828 + SPI_IO_1_1_1 | SPI_IO_1_1_2 | SPI_IO_1_2_2 | SPI_IO_1_1_4 |
830 + SNAND_OP(SNAND_IO_1_1_1, SNAND_CMD_READ_FROM_CACHE, 8),
831 + SNAND_OP(SNAND_IO_1_1_2, SNAND_CMD_READ_FROM_CACHE_X2, 8),
832 + SNAND_OP(SNAND_IO_1_2_2, SNAND_CMD_READ_FROM_CACHE_DUAL, 4),
833 + SNAND_OP(SNAND_IO_1_1_4, SNAND_CMD_READ_FROM_CACHE_X4, 8),
834 + SNAND_OP(SNAND_IO_1_4_4, SNAND_CMD_READ_FROM_CACHE_QUAD, 2));
836 +static const SNAND_IO_CAP(snand_cap_read_from_cache_quad_a8d,
837 + SPI_IO_1_1_1 | SPI_IO_1_1_2 | SPI_IO_1_2_2 | SPI_IO_1_1_4 |
839 + SNAND_OP(SNAND_IO_1_1_1, SNAND_CMD_READ_FROM_CACHE, 8),
840 + SNAND_OP(SNAND_IO_1_1_2, SNAND_CMD_READ_FROM_CACHE_X2, 8),
841 + SNAND_OP(SNAND_IO_1_2_2, SNAND_CMD_READ_FROM_CACHE_DUAL, 8),
842 + SNAND_OP(SNAND_IO_1_1_4, SNAND_CMD_READ_FROM_CACHE_X4, 8),
843 + SNAND_OP(SNAND_IO_1_4_4, SNAND_CMD_READ_FROM_CACHE_QUAD, 8));
845 +static const SNAND_IO_CAP(snand_cap_read_from_cache_x4,
846 + SPI_IO_1_1_1 | SPI_IO_1_1_2 | SPI_IO_1_1_4,
847 + SNAND_OP(SNAND_IO_1_1_1, SNAND_CMD_READ_FROM_CACHE, 8),
848 + SNAND_OP(SNAND_IO_1_1_2, SNAND_CMD_READ_FROM_CACHE_X2, 8),
849 + SNAND_OP(SNAND_IO_1_1_4, SNAND_CMD_READ_FROM_CACHE_X4, 8));
851 +static const SNAND_IO_CAP(snand_cap_read_from_cache_x4_only,
852 + SPI_IO_1_1_1 | SPI_IO_1_1_4,
853 + SNAND_OP(SNAND_IO_1_1_1, SNAND_CMD_READ_FROM_CACHE, 8),
854 + SNAND_OP(SNAND_IO_1_1_4, SNAND_CMD_READ_FROM_CACHE_X4, 8));
856 +static const SNAND_IO_CAP(snand_cap_program_load_x1,
858 + SNAND_OP(SNAND_IO_1_1_1, SNAND_CMD_PROGRAM_LOAD, 0));
860 +static const SNAND_IO_CAP(snand_cap_program_load_x4,
861 + SPI_IO_1_1_1 | SPI_IO_1_1_4,
862 + SNAND_OP(SNAND_IO_1_1_1, SNAND_CMD_PROGRAM_LOAD, 0),
863 + SNAND_OP(SNAND_IO_1_1_4, SNAND_CMD_PROGRAM_LOAD_X4, 0));
865 +static const struct snand_flash_info snand_flash_ids[] = {
866 + SNAND_INFO("W25N512GV", SNAND_ID(SNAND_ID_DYMMY, 0xef, 0xaa, 0x20),
867 + SNAND_MEMORG_512M_2K_64,
868 + &snand_cap_read_from_cache_quad,
869 + &snand_cap_program_load_x4),
870 + SNAND_INFO("W25N01GV", SNAND_ID(SNAND_ID_DYMMY, 0xef, 0xaa, 0x21),
871 + SNAND_MEMORG_1G_2K_64,
872 + &snand_cap_read_from_cache_quad,
873 + &snand_cap_program_load_x4),
874 + SNAND_INFO("W25M02GV", SNAND_ID(SNAND_ID_DYMMY, 0xef, 0xab, 0x21),
875 + SNAND_MEMORG_2G_2K_64_2D,
876 + &snand_cap_read_from_cache_quad,
877 + &snand_cap_program_load_x4,
878 + mtk_snand_winbond_select_die),
879 + SNAND_INFO("W25N01KV", SNAND_ID(SNAND_ID_DYMMY, 0xef, 0xae, 0x21),
880 + SNAND_MEMORG_1G_2K_64,
881 + &snand_cap_read_from_cache_quad,
882 + &snand_cap_program_load_x4),
883 + SNAND_INFO("W25N02KV", SNAND_ID(SNAND_ID_DYMMY, 0xef, 0xaa, 0x22),
884 + SNAND_MEMORG_2G_2K_128,
885 + &snand_cap_read_from_cache_quad,
886 + &snand_cap_program_load_x4),
888 + SNAND_INFO("GD5F1GQ4UAWxx", SNAND_ID(SNAND_ID_ADDR, 0xc8, 0x10),
889 + SNAND_MEMORG_1G_2K_64,
890 + &snand_cap_read_from_cache_quad_q2d,
891 + &snand_cap_program_load_x4),
892 + SNAND_INFO("GD5F1GQ4UExIG", SNAND_ID(SNAND_ID_ADDR, 0xc8, 0xd1),
893 + SNAND_MEMORG_1G_2K_128,
894 + &snand_cap_read_from_cache_quad_q2d,
895 + &snand_cap_program_load_x4),
896 + SNAND_INFO("GD5F1GQ4UExxH", SNAND_ID(SNAND_ID_ADDR, 0xc8, 0xd9),
897 + SNAND_MEMORG_1G_2K_64,
898 + &snand_cap_read_from_cache_quad_q2d,
899 + &snand_cap_program_load_x4),
900 + SNAND_INFO("GD5F1GQ4xAYIG", SNAND_ID(SNAND_ID_ADDR, 0xc8, 0xf1),
901 + SNAND_MEMORG_1G_2K_64,
902 + &snand_cap_read_from_cache_quad_q2d,
903 + &snand_cap_program_load_x4),
904 + SNAND_INFO("GD5F1GQ5UExxG", SNAND_ID(SNAND_ID_ADDR, 0xc8, 0x51),
905 + SNAND_MEMORG_1G_2K_128,
906 + &snand_cap_read_from_cache_quad,
907 + &snand_cap_program_load_x4),
908 + SNAND_INFO("GD5F2GQ4UExIG", SNAND_ID(SNAND_ID_ADDR, 0xc8, 0xd2),
909 + SNAND_MEMORG_2G_2K_128,
910 + &snand_cap_read_from_cache_quad_q2d,
911 + &snand_cap_program_load_x4),
912 + SNAND_INFO("GD5F2GQ5UExxH", SNAND_ID(SNAND_ID_ADDR, 0xc8, 0x32),
913 + SNAND_MEMORG_2G_2K_64,
914 + &snand_cap_read_from_cache_quad_a8d,
915 + &snand_cap_program_load_x4),
916 + SNAND_INFO("GD5F2GQ4xAYIG", SNAND_ID(SNAND_ID_ADDR, 0xc8, 0xf2),
917 + SNAND_MEMORG_2G_2K_64,
918 + &snand_cap_read_from_cache_quad_q2d,
919 + &snand_cap_program_load_x4),
920 + SNAND_INFO("GD5F4GQ4UBxIG", SNAND_ID(SNAND_ID_ADDR, 0xc8, 0xd4),
921 + SNAND_MEMORG_4G_4K_256,
922 + &snand_cap_read_from_cache_quad_q2d,
923 + &snand_cap_program_load_x4),
924 + SNAND_INFO("GD5F4GQ4xAYIG", SNAND_ID(SNAND_ID_ADDR, 0xc8, 0xf4),
925 + SNAND_MEMORG_4G_2K_64,
926 + &snand_cap_read_from_cache_quad_q2d,
927 + &snand_cap_program_load_x4),
928 + SNAND_INFO("GD5F2GQ5UExxG", SNAND_ID(SNAND_ID_DYMMY, 0xc8, 0x52),
929 + SNAND_MEMORG_2G_2K_128,
930 + &snand_cap_read_from_cache_quad_a8d,
931 + &snand_cap_program_load_x4),
932 + SNAND_INFO("GD5F4GQ4UCxIG", SNAND_ID(SNAND_ID_DYMMY, 0xc8, 0xb4),
933 + SNAND_MEMORG_4G_4K_256,
934 + &snand_cap_read_from_cache_quad_q2d,
935 + &snand_cap_program_load_x4),
937 + SNAND_INFO("MX35LF1GE4AB", SNAND_ID(SNAND_ID_DYMMY, 0xc2, 0x12),
938 + SNAND_MEMORG_1G_2K_64,
939 + &snand_cap_read_from_cache_x4,
940 + &snand_cap_program_load_x4),
941 + SNAND_INFO("MX35LF1G24AD", SNAND_ID(SNAND_ID_DYMMY, 0xc2, 0x14),
942 + SNAND_MEMORG_1G_2K_128,
943 + &snand_cap_read_from_cache_quad,
944 + &snand_cap_program_load_x4),
945 + SNAND_INFO("MX31LF1GE4BC", SNAND_ID(SNAND_ID_DYMMY, 0xc2, 0x1e),
946 + SNAND_MEMORG_1G_2K_64,
947 + &snand_cap_read_from_cache_x4,
948 + &snand_cap_program_load_x4),
949 + SNAND_INFO("MX35LF2GE4AB", SNAND_ID(SNAND_ID_DYMMY, 0xc2, 0x22),
950 + SNAND_MEMORG_2G_2K_64,
951 + &snand_cap_read_from_cache_x4,
952 + &snand_cap_program_load_x4),
953 + SNAND_INFO("MX35LF2G24AD", SNAND_ID(SNAND_ID_DYMMY, 0xc2, 0x24),
954 + SNAND_MEMORG_2G_2K_128,
955 + &snand_cap_read_from_cache_quad,
956 + &snand_cap_program_load_x4),
957 + SNAND_INFO("MX35LF2GE4AD", SNAND_ID(SNAND_ID_DYMMY, 0xc2, 0x26),
958 + SNAND_MEMORG_2G_2K_128,
959 + &snand_cap_read_from_cache_x4,
960 + &snand_cap_program_load_x4),
961 + SNAND_INFO("MX35LF2G14AC", SNAND_ID(SNAND_ID_DYMMY, 0xc2, 0x20),
962 + SNAND_MEMORG_2G_2K_64,
963 + &snand_cap_read_from_cache_x4,
964 + &snand_cap_program_load_x4),
965 + SNAND_INFO("MX35LF4G24AD", SNAND_ID(SNAND_ID_DYMMY, 0xc2, 0x35),
966 + SNAND_MEMORG_4G_4K_256,
967 + &snand_cap_read_from_cache_quad,
968 + &snand_cap_program_load_x4),
969 + SNAND_INFO("MX35LF4GE4AD", SNAND_ID(SNAND_ID_DYMMY, 0xc2, 0x37),
970 + SNAND_MEMORG_4G_4K_256,
971 + &snand_cap_read_from_cache_x4,
972 + &snand_cap_program_load_x4),
974 + SNAND_INFO("MT29F1G01AAADD", SNAND_ID(SNAND_ID_DYMMY, 0x2c, 0x12),
975 + SNAND_MEMORG_1G_2K_64,
976 + &snand_cap_read_from_cache_x4,
977 + &snand_cap_program_load_x1),
978 + SNAND_INFO("MT29F1G01ABAFD", SNAND_ID(SNAND_ID_DYMMY, 0x2c, 0x14),
979 + SNAND_MEMORG_1G_2K_128,
980 + &snand_cap_read_from_cache_quad,
981 + &snand_cap_program_load_x4),
982 + SNAND_INFO("MT29F2G01AAAED", SNAND_ID(SNAND_ID_DYMMY, 0x2c, 0x9f),
983 + SNAND_MEMORG_2G_2K_64_2P,
984 + &snand_cap_read_from_cache_x4,
985 + &snand_cap_program_load_x1),
986 + SNAND_INFO("MT29F2G01ABAGD", SNAND_ID(SNAND_ID_DYMMY, 0x2c, 0x24),
987 + SNAND_MEMORG_2G_2K_128_2P,
988 + &snand_cap_read_from_cache_quad,
989 + &snand_cap_program_load_x4),
990 + SNAND_INFO("MT29F4G01AAADD", SNAND_ID(SNAND_ID_DYMMY, 0x2c, 0x32),
991 + SNAND_MEMORG_4G_2K_64_2P,
992 + &snand_cap_read_from_cache_x4,
993 + &snand_cap_program_load_x1),
994 + SNAND_INFO("MT29F4G01ABAFD", SNAND_ID(SNAND_ID_DYMMY, 0x2c, 0x34),
995 + SNAND_MEMORG_4G_4K_256,
996 + &snand_cap_read_from_cache_quad,
997 + &snand_cap_program_load_x4),
998 + SNAND_INFO("MT29F4G01ADAGD", SNAND_ID(SNAND_ID_DYMMY, 0x2c, 0x36),
999 + SNAND_MEMORG_4G_2K_128_2P_2D,
1000 + &snand_cap_read_from_cache_quad,
1001 + &snand_cap_program_load_x4,
1002 + mtk_snand_micron_select_die),
1003 + SNAND_INFO("MT29F8G01ADAFD", SNAND_ID(SNAND_ID_DYMMY, 0x2c, 0x46),
1004 + SNAND_MEMORG_8G_4K_256_2D,
1005 + &snand_cap_read_from_cache_quad,
1006 + &snand_cap_program_load_x4,
1007 + mtk_snand_micron_select_die),
1009 + SNAND_INFO("TC58CVG0S3HRAIG", SNAND_ID(SNAND_ID_DYMMY, 0x98, 0xc2),
1010 + SNAND_MEMORG_1G_2K_128,
1011 + &snand_cap_read_from_cache_x4,
1012 + &snand_cap_program_load_x1),
1013 + SNAND_INFO("TC58CVG1S3HRAIG", SNAND_ID(SNAND_ID_DYMMY, 0x98, 0xcb),
1014 + SNAND_MEMORG_2G_2K_128,
1015 + &snand_cap_read_from_cache_x4,
1016 + &snand_cap_program_load_x1),
1017 + SNAND_INFO("TC58CVG2S0HRAIG", SNAND_ID(SNAND_ID_DYMMY, 0x98, 0xcd),
1018 + SNAND_MEMORG_4G_4K_256,
1019 + &snand_cap_read_from_cache_x4,
1020 + &snand_cap_program_load_x1),
1021 + SNAND_INFO("TC58CVG0S3HRAIJ", SNAND_ID(SNAND_ID_DYMMY, 0x98, 0xe2),
1022 + SNAND_MEMORG_1G_2K_128,
1023 + &snand_cap_read_from_cache_x4,
1024 + &snand_cap_program_load_x4),
1025 + SNAND_INFO("TC58CVG1S3HRAIJ", SNAND_ID(SNAND_ID_DYMMY, 0x98, 0xeb),
1026 + SNAND_MEMORG_2G_2K_128,
1027 + &snand_cap_read_from_cache_x4,
1028 + &snand_cap_program_load_x4),
1029 + SNAND_INFO("TC58CVG2S0HRAIJ", SNAND_ID(SNAND_ID_DYMMY, 0x98, 0xed),
1030 + SNAND_MEMORG_4G_4K_256,
1031 + &snand_cap_read_from_cache_x4,
1032 + &snand_cap_program_load_x4),
1033 + SNAND_INFO("TH58CVG3S0HRAIJ", SNAND_ID(SNAND_ID_DYMMY, 0x98, 0xe4),
1034 + SNAND_MEMORG_8G_4K_256,
1035 + &snand_cap_read_from_cache_x4,
1036 + &snand_cap_program_load_x4),
1038 + SNAND_INFO("F50L512M41A", SNAND_ID(SNAND_ID_DYMMY, 0xc8, 0x20),
1039 + SNAND_MEMORG_512M_2K_64,
1040 + &snand_cap_read_from_cache_x4,
1041 + &snand_cap_program_load_x4),
1042 + SNAND_INFO("F50L1G41A", SNAND_ID(SNAND_ID_DYMMY, 0xc8, 0x21),
1043 + SNAND_MEMORG_1G_2K_64,
1044 + &snand_cap_read_from_cache_x4,
1045 + &snand_cap_program_load_x4),
1046 + SNAND_INFO("F50L1G41LB", SNAND_ID(SNAND_ID_DYMMY, 0xc8, 0x01),
1047 + SNAND_MEMORG_1G_2K_64,
1048 + &snand_cap_read_from_cache_quad,
1049 + &snand_cap_program_load_x4),
1050 + SNAND_INFO("F50L2G41LB", SNAND_ID(SNAND_ID_DYMMY, 0xc8, 0x0a),
1051 + SNAND_MEMORG_2G_2K_64_2D,
1052 + &snand_cap_read_from_cache_quad,
1053 + &snand_cap_program_load_x4,
1054 + mtk_snand_winbond_select_die),
1056 + SNAND_INFO("CS11G0T0A0AA", SNAND_ID(SNAND_ID_DYMMY, 0x6b, 0x00),
1057 + SNAND_MEMORG_1G_2K_128,
1058 + &snand_cap_read_from_cache_quad_q2d,
1059 + &snand_cap_program_load_x4),
1060 + SNAND_INFO("CS11G0G0A0AA", SNAND_ID(SNAND_ID_DYMMY, 0x6b, 0x10),
1061 + SNAND_MEMORG_1G_2K_128,
1062 + &snand_cap_read_from_cache_quad_q2d,
1063 + &snand_cap_program_load_x4),
1064 + SNAND_INFO("CS11G0S0A0AA", SNAND_ID(SNAND_ID_DYMMY, 0x6b, 0x20),
1065 + SNAND_MEMORG_1G_2K_64,
1066 + &snand_cap_read_from_cache_quad_q2d,
1067 + &snand_cap_program_load_x4),
1068 + SNAND_INFO("CS11G1T0A0AA", SNAND_ID(SNAND_ID_DYMMY, 0x6b, 0x01),
1069 + SNAND_MEMORG_2G_2K_128,
1070 + &snand_cap_read_from_cache_quad_q2d,
1071 + &snand_cap_program_load_x4),
1072 + SNAND_INFO("CS11G1S0A0AA", SNAND_ID(SNAND_ID_DYMMY, 0x6b, 0x21),
1073 + SNAND_MEMORG_2G_2K_64,
1074 + &snand_cap_read_from_cache_quad_q2d,
1075 + &snand_cap_program_load_x4),
1076 + SNAND_INFO("CS11G2T0A0AA", SNAND_ID(SNAND_ID_DYMMY, 0x6b, 0x02),
1077 + SNAND_MEMORG_4G_2K_128,
1078 + &snand_cap_read_from_cache_quad_q2d,
1079 + &snand_cap_program_load_x4),
1080 + SNAND_INFO("CS11G2S0A0AA", SNAND_ID(SNAND_ID_DYMMY, 0x6b, 0x22),
1081 + SNAND_MEMORG_4G_2K_64,
1082 + &snand_cap_read_from_cache_quad_q2d,
1083 + &snand_cap_program_load_x4),
1085 + SNAND_INFO("EM73B044VCA", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x01),
1086 + SNAND_MEMORG_512M_2K_64,
1087 + &snand_cap_read_from_cache_quad_q2d,
1088 + &snand_cap_program_load_x4),
1089 + SNAND_INFO("EM73C044SNB", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x11),
1090 + SNAND_MEMORG_1G_2K_120,
1091 + &snand_cap_read_from_cache_quad_q2d,
1092 + &snand_cap_program_load_x4),
1093 + SNAND_INFO("EM73C044SNF", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x09),
1094 + SNAND_MEMORG_1G_2K_128,
1095 + &snand_cap_read_from_cache_quad_q2d,
1096 + &snand_cap_program_load_x4),
1097 + SNAND_INFO("EM73C044VCA", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x18),
1098 + SNAND_MEMORG_1G_2K_64,
1099 + &snand_cap_read_from_cache_quad_q2d,
1100 + &snand_cap_program_load_x4),
1101 + SNAND_INFO("EM73C044SNA", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x19),
1102 + SNAND_MEMORG(2048, 64, 128, 512, 1, 1),
1103 + &snand_cap_read_from_cache_quad_q2d,
1104 + &snand_cap_program_load_x4),
1105 + SNAND_INFO("EM73C044VCD", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x1c),
1106 + SNAND_MEMORG_1G_2K_64,
1107 + &snand_cap_read_from_cache_quad_q2d,
1108 + &snand_cap_program_load_x4),
1109 + SNAND_INFO("EM73C044SND", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x1d),
1110 + SNAND_MEMORG_1G_2K_64,
1111 + &snand_cap_read_from_cache_quad_q2d,
1112 + &snand_cap_program_load_x4),
1113 + SNAND_INFO("EM73D044SND", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x1e),
1114 + SNAND_MEMORG_2G_2K_64,
1115 + &snand_cap_read_from_cache_quad_q2d,
1116 + &snand_cap_program_load_x4),
1117 + SNAND_INFO("EM73C044VCC", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x22),
1118 + SNAND_MEMORG_1G_2K_64,
1119 + &snand_cap_read_from_cache_quad_q2d,
1120 + &snand_cap_program_load_x4),
1121 + SNAND_INFO("EM73C044VCF", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x25),
1122 + SNAND_MEMORG_1G_2K_64,
1123 + &snand_cap_read_from_cache_quad_q2d,
1124 + &snand_cap_program_load_x4),
1125 + SNAND_INFO("EM73C044SNC", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x31),
1126 + SNAND_MEMORG_1G_2K_128,
1127 + &snand_cap_read_from_cache_quad_q2d,
1128 + &snand_cap_program_load_x4),
1129 + SNAND_INFO("EM73D044SNC", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x0a),
1130 + SNAND_MEMORG_2G_2K_120,
1131 + &snand_cap_read_from_cache_quad_q2d,
1132 + &snand_cap_program_load_x4),
1133 + SNAND_INFO("EM73D044SNA", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x12),
1134 + SNAND_MEMORG_2G_2K_128,
1135 + &snand_cap_read_from_cache_quad_q2d,
1136 + &snand_cap_program_load_x4),
1137 + SNAND_INFO("EM73D044SNF", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x10),
1138 + SNAND_MEMORG_2G_2K_128,
1139 + &snand_cap_read_from_cache_quad_q2d,
1140 + &snand_cap_program_load_x4),
1141 + SNAND_INFO("EM73D044VCA", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x13),
1142 + SNAND_MEMORG_2G_2K_128,
1143 + &snand_cap_read_from_cache_quad_q2d,
1144 + &snand_cap_program_load_x4),
1145 + SNAND_INFO("EM73D044VCB", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x14),
1146 + SNAND_MEMORG_2G_2K_64,
1147 + &snand_cap_read_from_cache_quad_q2d,
1148 + &snand_cap_program_load_x4),
1149 + SNAND_INFO("EM73D044VCD", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x17),
1150 + SNAND_MEMORG_2G_2K_128,
1151 + &snand_cap_read_from_cache_quad_q2d,
1152 + &snand_cap_program_load_x4),
1153 + SNAND_INFO("EM73D044VCH", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x1b),
1154 + SNAND_MEMORG_2G_2K_64,
1155 + &snand_cap_read_from_cache_quad_q2d,
1156 + &snand_cap_program_load_x4),
1157 + SNAND_INFO("EM73D044SND", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x1d),
1158 + SNAND_MEMORG_2G_2K_64,
1159 + &snand_cap_read_from_cache_quad_q2d,
1160 + &snand_cap_program_load_x4),
1161 + SNAND_INFO("EM73D044VCG", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x1f),
1162 + SNAND_MEMORG_2G_2K_64,
1163 + &snand_cap_read_from_cache_quad_q2d,
1164 + &snand_cap_program_load_x4),
1165 + SNAND_INFO("EM73D044VCE", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x20),
1166 + SNAND_MEMORG_2G_2K_64,
1167 + &snand_cap_read_from_cache_quad_q2d,
1168 + &snand_cap_program_load_x4),
1169 + SNAND_INFO("EM73D044VCL", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x2e),
1170 + SNAND_MEMORG_2G_2K_128,
1171 + &snand_cap_read_from_cache_quad_q2d,
1172 + &snand_cap_program_load_x4),
1173 + SNAND_INFO("EM73D044SNB", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x32),
1174 + SNAND_MEMORG_2G_2K_128,
1175 + &snand_cap_read_from_cache_quad_q2d,
1176 + &snand_cap_program_load_x4),
1177 + SNAND_INFO("EM73E044SNA", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x03),
1178 + SNAND_MEMORG_4G_4K_256,
1179 + &snand_cap_read_from_cache_quad_q2d,
1180 + &snand_cap_program_load_x4),
1181 + SNAND_INFO("EM73E044SND", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x0b),
1182 + SNAND_MEMORG_4G_4K_240,
1183 + &snand_cap_read_from_cache_quad_q2d,
1184 + &snand_cap_program_load_x4),
1185 + SNAND_INFO("EM73E044SNB", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x23),
1186 + SNAND_MEMORG_4G_4K_256,
1187 + &snand_cap_read_from_cache_quad_q2d,
1188 + &snand_cap_program_load_x4),
1189 + SNAND_INFO("EM73E044VCA", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x2c),
1190 + SNAND_MEMORG_4G_4K_256,
1191 + &snand_cap_read_from_cache_quad_q2d,
1192 + &snand_cap_program_load_x4),
1193 + SNAND_INFO("EM73E044VCB", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x2f),
1194 + SNAND_MEMORG_4G_2K_128,
1195 + &snand_cap_read_from_cache_quad_q2d,
1196 + &snand_cap_program_load_x4),
1197 + SNAND_INFO("EM73F044SNA", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x24),
1198 + SNAND_MEMORG_8G_4K_256,
1199 + &snand_cap_read_from_cache_quad_q2d,
1200 + &snand_cap_program_load_x4),
1201 + SNAND_INFO("EM73F044VCA", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x2d),
1202 + SNAND_MEMORG_8G_4K_256,
1203 + &snand_cap_read_from_cache_quad_q2d,
1204 + &snand_cap_program_load_x4),
1205 + SNAND_INFO("EM73E044SNE", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x0e),
1206 + SNAND_MEMORG_8G_4K_256,
1207 + &snand_cap_read_from_cache_quad_q2d,
1208 + &snand_cap_program_load_x4),
1209 + SNAND_INFO("EM73C044SNG", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x0c),
1210 + SNAND_MEMORG_1G_2K_120,
1211 + &snand_cap_read_from_cache_quad_q2d,
1212 + &snand_cap_program_load_x4),
1213 + SNAND_INFO("EM73D044VCN", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x0f),
1214 + SNAND_MEMORG_2G_2K_64,
1215 + &snand_cap_read_from_cache_quad_q2d,
1216 + &snand_cap_program_load_x4),
1218 + SNAND_INFO("FM35Q1GA", SNAND_ID(SNAND_ID_DYMMY, 0xe5, 0x71),
1219 + SNAND_MEMORG_1G_2K_64,
1220 + &snand_cap_read_from_cache_x4_only,
1221 + &snand_cap_program_load_x4),
1223 + SNAND_INFO("PN26G01A", SNAND_ID(SNAND_ID_DYMMY, 0xa1, 0xe1),
1224 + SNAND_MEMORG_1G_2K_128,
1225 + &snand_cap_read_from_cache_quad_q2d,
1226 + &snand_cap_program_load_x4),
1227 + SNAND_INFO("PN26G02A", SNAND_ID(SNAND_ID_DYMMY, 0xa1, 0xe2),
1228 + SNAND_MEMORG_2G_2K_128,
1229 + &snand_cap_read_from_cache_quad_q2d,
1230 + &snand_cap_program_load_x4),
1232 + SNAND_INFO("IS37SML01G1", SNAND_ID(SNAND_ID_DYMMY, 0xc8, 0x21),
1233 + SNAND_MEMORG_1G_2K_64,
1234 + &snand_cap_read_from_cache_x4,
1235 + &snand_cap_program_load_x4),
1237 + SNAND_INFO("ATO25D1GA", SNAND_ID(SNAND_ID_DYMMY, 0x9b, 0x12),
1238 + SNAND_MEMORG_1G_2K_64,
1239 + &snand_cap_read_from_cache_x4_only,
1240 + &snand_cap_program_load_x4),
1242 + SNAND_INFO("HYF1GQ4U", SNAND_ID(SNAND_ID_DYMMY, 0xc9, 0x51),
1243 + SNAND_MEMORG_1G_2K_128,
1244 + &snand_cap_read_from_cache_quad_q2d,
1245 + &snand_cap_program_load_x4),
1246 + SNAND_INFO("HYF2GQ4U", SNAND_ID(SNAND_ID_DYMMY, 0xc9, 0x52),
1247 + SNAND_MEMORG_2G_2K_128,
1248 + &snand_cap_read_from_cache_quad_q2d,
1249 + &snand_cap_program_load_x4),
1252 +static int mtk_snand_winbond_select_die(struct mtk_snand *snf, uint32_t dieidx)
1257 + snand_log_chip(snf->pdev, "Invalid die index %u\n", dieidx);
1261 + op[0] = SNAND_CMD_WINBOND_SELECT_DIE;
1262 + op[1] = (uint8_t)dieidx;
1264 + return mtk_snand_mac_io(snf, op, sizeof(op), NULL, 0);
1267 +static int mtk_snand_micron_select_die(struct mtk_snand *snf, uint32_t dieidx)
1272 + snand_log_chip(snf->pdev, "Invalid die index %u\n", dieidx);
1276 + ret = mtk_snand_set_feature(snf, SNAND_FEATURE_MICRON_DIE_ADDR,
1277 + SNAND_MICRON_DIE_SEL_1);
1279 + snand_log_chip(snf->pdev,
1280 + "Failed to set die selection feature\n");
1287 +const struct snand_flash_info *snand_flash_id_lookup(enum snand_id_type type,
1288 + const uint8_t *id)
1290 + const struct snand_id *fid;
1293 + for (i = 0; i < ARRAY_SIZE(snand_flash_ids); i++) {
1294 + if (snand_flash_ids[i].id.type != type)
1297 + fid = &snand_flash_ids[i].id;
1298 + if (memcmp(fid->id, id, fid->len))
1301 + return &snand_flash_ids[i];
1307 +++ b/drivers/mtd/mtk-snand/mtk-snand-mtd.c
1309 +// SPDX-License-Identifier: GPL-2.0
1311 + * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
1313 + * Author: Weijie Gao <weijie.gao@mediatek.com>
1316 +#include <common.h>
1318 +#include <malloc.h>
1319 +#include <mapmem.h>
1320 +#include <linux/mtd/mtd.h>
1321 +#include <watchdog.h>
1323 +#include "mtk-snand.h"
1325 +struct mtk_snand_mtd {
1326 + struct udevice *dev;
1327 + struct mtk_snand *snf;
1328 + struct mtk_snand_chip_info cinfo;
1329 + uint8_t *page_cache;
1332 +static const char snand_mtd_name_prefix[] = "spi-nand";
1334 +static u32 snandidx;
1336 +static inline struct mtk_snand_mtd *mtd_to_msm(struct mtd_info *mtd)
1341 +static int mtk_snand_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
1343 + struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
1344 + u64 start_addr, end_addr;
1347 + /* Do not allow write past end of device */
1348 + if ((instr->addr + instr->len) > mtd->size) {
1349 + pr_debug("%s: attempt to erase beyond end of device\n",
1354 + start_addr = instr->addr & (~mtd->erasesize_mask);
1355 + end_addr = instr->addr + instr->len;
1356 + if (end_addr & mtd->erasesize_mask) {
1357 + end_addr = (end_addr + mtd->erasesize_mask) &
1358 + (~mtd->erasesize_mask);
1361 + instr->state = MTD_ERASING;
1363 + while (start_addr < end_addr) {
1366 + if (mtk_snand_block_isbad(msm->snf, start_addr)) {
1367 + if (!instr->scrub) {
1368 + instr->fail_addr = start_addr;
1374 + ret = mtk_snand_erase_block(msm->snf, start_addr);
1376 + instr->fail_addr = start_addr;
1380 + start_addr += mtd->erasesize;
1384 + instr->state = MTD_ERASE_DONE;
1386 + instr->state = MTD_ERASE_FAILED;
1393 +static int mtk_snand_mtd_read_data(struct mtk_snand_mtd *msm, uint64_t addr,
1394 + struct mtd_oob_ops *ops)
1396 + struct mtd_info *mtd = dev_get_uclass_priv(msm->dev);
1397 + size_t len, ooblen, maxooblen, chklen;
1398 + uint32_t col, ooboffs;
1399 + uint8_t *datcache, *oobcache;
1400 + bool ecc_failed = false, raw = ops->mode == MTD_OPS_RAW ? true : false;
1401 + int ret, max_bitflips = 0;
1403 + col = addr & mtd->writesize_mask;
1404 + addr &= ~mtd->writesize_mask;
1405 + maxooblen = mtd_oobavail(mtd, ops);
1406 + ooboffs = ops->ooboffs;
1407 + ooblen = ops->ooblen;
1410 + datcache = len ? msm->page_cache : NULL;
1411 + oobcache = ooblen ? msm->page_cache + mtd->writesize : NULL;
1413 + ops->oobretlen = 0;
1416 + while (len || ooblen) {
1419 + if (ops->mode == MTD_OPS_AUTO_OOB)
1420 + ret = mtk_snand_read_page_auto_oob(msm->snf, addr,
1421 + datcache, oobcache, maxooblen, NULL, raw);
1423 + ret = mtk_snand_read_page(msm->snf, addr, datcache,
1426 + if (ret < 0 && ret != -EBADMSG)
1429 + if (ret == -EBADMSG) {
1430 + mtd->ecc_stats.failed++;
1431 + ecc_failed = true;
1433 + mtd->ecc_stats.corrected += ret;
1434 + max_bitflips = max_t(int, ret, max_bitflips);
1437 + mtd->ecc_stats.corrected += ret;
1438 + max_bitflips = max_t(int, ret, max_bitflips);
1442 + chklen = mtd->writesize - col;
1446 + memcpy(ops->datbuf + ops->retlen, datcache + col,
1449 + col = 0; /* (col + chklen) % */
1450 + ops->retlen += chklen;
1455 + chklen = maxooblen - ooboffs;
1456 + if (chklen > ooblen)
1459 + memcpy(ops->oobbuf + ops->oobretlen, oobcache + ooboffs,
1462 + ooboffs = 0; /* (ooboffs + chklen) % maxooblen; */
1463 + ops->oobretlen += chklen;
1466 + addr += mtd->writesize;
1469 + return ecc_failed ? -EBADMSG : max_bitflips;
1472 +static int mtk_snand_mtd_read_oob(struct mtd_info *mtd, loff_t from,
1473 + struct mtd_oob_ops *ops)
1475 + struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
1476 + uint32_t maxooblen;
1478 + if (!ops->oobbuf && !ops->datbuf) {
1479 + if (ops->ooblen || ops->len)
1485 + switch (ops->mode) {
1486 + case MTD_OPS_PLACE_OOB:
1487 + case MTD_OPS_AUTO_OOB:
1491 + pr_debug("%s: unsupported oob mode: %u\n", __func__, ops->mode);
1495 + maxooblen = mtd_oobavail(mtd, ops);
1497 + /* Do not allow read past end of device */
1498 + if (ops->datbuf && (from + ops->len) > mtd->size) {
1499 + pr_debug("%s: attempt to read beyond end of device\n",
1504 + if (unlikely(ops->ooboffs >= maxooblen)) {
1505 + pr_debug("%s: attempt to start read outside oob\n",
1510 + if (unlikely(from >= mtd->size ||
1511 + ops->ooboffs + ops->ooblen > ((mtd->size >> mtd->writesize_shift) -
1512 + (from >> mtd->writesize_shift)) * maxooblen)) {
1513 + pr_debug("%s: attempt to read beyond end of device\n",
1518 + return mtk_snand_mtd_read_data(msm, from, ops);
1521 +static int mtk_snand_mtd_write_data(struct mtk_snand_mtd *msm, uint64_t addr,
1522 + struct mtd_oob_ops *ops)
1524 + struct mtd_info *mtd = dev_get_uclass_priv(msm->dev);
1525 + size_t len, ooblen, maxooblen, chklen, oobwrlen;
1526 + uint32_t col, ooboffs;
1527 + uint8_t *datcache, *oobcache;
1528 + bool raw = ops->mode == MTD_OPS_RAW ? true : false;
1531 + col = addr & mtd->writesize_mask;
1532 + addr &= ~mtd->writesize_mask;
1533 + maxooblen = mtd_oobavail(mtd, ops);
1534 + ooboffs = ops->ooboffs;
1535 + ooblen = ops->ooblen;
1538 + datcache = len ? msm->page_cache : NULL;
1539 + oobcache = ooblen ? msm->page_cache + mtd->writesize : NULL;
1541 + ops->oobretlen = 0;
1544 + while (len || ooblen) {
1549 + chklen = mtd->writesize - col;
1553 + memset(datcache, 0xff, col);
1554 + memcpy(datcache + col, ops->datbuf + ops->retlen,
1556 + memset(datcache + col + chklen, 0xff,
1557 + mtd->writesize - col - chklen);
1559 + col = 0; /* (col + chklen) % */
1560 + ops->retlen += chklen;
1566 + chklen = maxooblen - ooboffs;
1567 + if (chklen > ooblen)
1570 + memset(oobcache, 0xff, ooboffs);
1571 + memcpy(oobcache + ooboffs,
1572 + ops->oobbuf + ops->oobretlen, chklen);
1573 + memset(oobcache + ooboffs + chklen, 0xff,
1574 + mtd->oobsize - ooboffs - chklen);
1575 + oobwrlen = chklen + ooboffs;
1577 + ooboffs = 0; /* (ooboffs + chklen) % maxooblen; */
1578 + ops->oobretlen += chklen;
1581 + if (ops->mode == MTD_OPS_AUTO_OOB)
1582 + ret = mtk_snand_write_page_auto_oob(msm->snf, addr,
1583 + datcache, oobcache, oobwrlen, NULL, raw);
1585 + ret = mtk_snand_write_page(msm->snf, addr, datcache,
1591 + addr += mtd->writesize;
1597 +static int mtk_snand_mtd_write_oob(struct mtd_info *mtd, loff_t to,
1598 + struct mtd_oob_ops *ops)
1600 + struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
1601 + uint32_t maxooblen;
1603 + if (!ops->oobbuf && !ops->datbuf) {
1604 + if (ops->ooblen || ops->len)
1610 + switch (ops->mode) {
1611 + case MTD_OPS_PLACE_OOB:
1612 + case MTD_OPS_AUTO_OOB:
1616 + pr_debug("%s: unsupported oob mode: %u\n", __func__, ops->mode);
1620 + maxooblen = mtd_oobavail(mtd, ops);
1622 + /* Do not allow write past end of device */
1623 + if (ops->datbuf && (to + ops->len) > mtd->size) {
1624 + pr_debug("%s: attempt to write beyond end of device\n",
1629 + if (unlikely(ops->ooboffs >= maxooblen)) {
1630 + pr_debug("%s: attempt to start write outside oob\n",
1635 + if (unlikely(to >= mtd->size ||
1636 + ops->ooboffs + ops->ooblen > ((mtd->size >> mtd->writesize_shift) -
1637 + (to >> mtd->writesize_shift)) * maxooblen)) {
1638 + pr_debug("%s: attempt to write beyond end of device\n",
1643 + return mtk_snand_mtd_write_data(msm, to, ops);
1646 +static int mtk_snand_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
1647 + size_t *retlen, u_char *buf)
1649 + struct mtd_oob_ops ops = {
1650 + .mode = MTD_OPS_PLACE_OOB,
1656 + ret = mtk_snand_mtd_read_oob(mtd, from, &ops);
1659 + *retlen = ops.retlen;
1664 +static int mtk_snand_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
1665 + size_t *retlen, const u_char *buf)
1667 + struct mtd_oob_ops ops = {
1668 + .mode = MTD_OPS_PLACE_OOB,
1669 + .datbuf = (void *)buf,
1674 + ret = mtk_snand_mtd_write_oob(mtd, to, &ops);
1677 + *retlen = ops.retlen;
1682 +static int mtk_snand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
1684 + struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
1686 + return mtk_snand_block_isbad(msm->snf, offs);
1689 +static int mtk_snand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
1691 + struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
1693 + return mtk_snand_block_markbad(msm->snf, offs);
1696 +static int mtk_snand_ooblayout_ecc(struct mtd_info *mtd, int section,
1697 + struct mtd_oob_region *oobecc)
1699 + struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
1704 + oobecc->offset = msm->cinfo.fdm_size * msm->cinfo.num_sectors;
1705 + oobecc->length = mtd->oobsize - oobecc->offset;
1710 +static int mtk_snand_ooblayout_free(struct mtd_info *mtd, int section,
1711 + struct mtd_oob_region *oobfree)
1713 + struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
1715 + if (section >= msm->cinfo.num_sectors)
1718 + oobfree->length = msm->cinfo.fdm_size - 1;
1719 + oobfree->offset = section * msm->cinfo.fdm_size + 1;
1724 +static const struct mtd_ooblayout_ops mtk_snand_ooblayout = {
1725 + .ecc = mtk_snand_ooblayout_ecc,
1726 + .rfree = mtk_snand_ooblayout_free,
1729 +static int mtk_snand_mtd_probe(struct udevice *dev)
1731 + struct mtk_snand_mtd *msm = dev_get_priv(dev);
1732 + struct mtd_info *mtd = dev_get_uclass_priv(dev);
1733 + struct mtk_snand_platdata mtk_snand_pdata = {};
1738 + base = dev_read_addr_name(dev, "nfi");
1739 + if (base == FDT_ADDR_T_NONE)
1741 + mtk_snand_pdata.nfi_base = map_sysmem(base, 0);
1743 + base = dev_read_addr_name(dev, "ecc");
1744 + if (base == FDT_ADDR_T_NONE)
1746 + mtk_snand_pdata.ecc_base = map_sysmem(base, 0);
1748 + mtk_snand_pdata.soc = dev_get_driver_data(dev);
1749 + mtk_snand_pdata.quad_spi = dev_read_bool(dev, "quad-spi");
1751 + ret = mtk_snand_init(NULL, &mtk_snand_pdata, &msm->snf);
1755 + mtk_snand_get_chip_info(msm->snf, &msm->cinfo);
1757 + msm->page_cache = malloc(msm->cinfo.pagesize + msm->cinfo.sparesize);
1758 + if (!msm->page_cache) {
1759 + printf("%s: failed to allocate memory for page cache\n",
1765 + namelen = sizeof(snand_mtd_name_prefix) + 12;
1767 + mtd->name = malloc(namelen);
1769 + printf("%s: failed to allocate memory for MTD name\n",
1777 + snprintf(mtd->name, namelen, "%s%u", snand_mtd_name_prefix, snandidx++);
1781 + mtd->type = MTD_NANDFLASH;
1782 + mtd->flags = MTD_CAP_NANDFLASH;
1784 + mtd->size = msm->cinfo.chipsize;
1785 + mtd->erasesize = msm->cinfo.blocksize;
1786 + mtd->writesize = msm->cinfo.pagesize;
1787 + mtd->writebufsize = mtd->writesize;
1788 + mtd->oobsize = msm->cinfo.sparesize;
1789 + mtd->oobavail = msm->cinfo.num_sectors * (msm->cinfo.fdm_size - 1);
1791 + mtd->ooblayout = &mtk_snand_ooblayout;
1793 + mtd->ecc_strength = msm->cinfo.ecc_strength;
1794 + mtd->bitflip_threshold = (mtd->ecc_strength * 3) / 4;
1795 + mtd->ecc_step_size = msm->cinfo.sector_size;
1797 + mtd->_read = mtk_snand_mtd_read;
1798 + mtd->_write = mtk_snand_mtd_write;
1799 + mtd->_erase = mtk_snand_mtd_erase;
1800 + mtd->_read_oob = mtk_snand_mtd_read_oob;
1801 + mtd->_write_oob = mtk_snand_mtd_write_oob;
1802 + mtd->_block_isbad = mtk_snand_mtd_block_isbad;
1803 + mtd->_block_markbad = mtk_snand_mtd_block_markbad;
1805 + ret = add_mtd_device(mtd);
1807 + printf("%s: failed to add SPI-NAND MTD device\n", __func__);
1812 + printf("SPI-NAND: %s (%lluMB)\n", msm->cinfo.model,
1813 + msm->cinfo.chipsize >> 20);
1821 + free(msm->page_cache);
1824 + mtk_snand_cleanup(msm->snf);
1829 +static const struct udevice_id mtk_snand_ids[] = {
1830 + { .compatible = "mediatek,mt7622-snand", .data = SNAND_SOC_MT7622 },
1831 + { .compatible = "mediatek,mt7629-snand", .data = SNAND_SOC_MT7629 },
1832 + { .compatible = "mediatek,mt7981-snand", .data = SNAND_SOC_MT7981 },
1833 + { .compatible = "mediatek,mt7986-snand", .data = SNAND_SOC_MT7986 },
1834 + { /* sentinel */ },
1837 +U_BOOT_DRIVER(spinand) = {
1838 + .name = "mtk-snand",
1840 + .of_match = mtk_snand_ids,
1841 + .priv_auto = sizeof(struct mtk_snand_mtd),
1842 + .probe = mtk_snand_mtd_probe,
1845 +++ b/drivers/mtd/mtk-snand/mtk-snand-os.c
1847 +// SPDX-License-Identifier: GPL-2.0
1849 + * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
1851 + * Author: Weijie Gao <weijie.gao@mediatek.com>
1854 +#include "mtk-snand-def.h"
1856 +int mtk_snand_log(struct mtk_snand_plat_dev *pdev,
1857 + enum mtk_snand_log_category cat, const char *fmt, ...)
1859 + const char *catname = "";
1864 + case SNAND_LOG_NFI:
1865 + catname = "NFI: ";
1867 + case SNAND_LOG_SNFI:
1868 + catname = "SNFI: ";
1870 + case SNAND_LOG_ECC:
1871 + catname = "ECC: ";
1877 + puts("SPI-NAND: ");
1880 + va_start(ap, fmt);
1881 + ret = vprintf(fmt, ap);
1887 +++ b/drivers/mtd/mtk-snand/mtk-snand-os.h
1889 +/* SPDX-License-Identifier: GPL-2.0 */
1891 + * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
1893 + * Author: Weijie Gao <weijie.gao@mediatek.com>
1896 +#ifndef _MTK_SNAND_OS_H_
1897 +#define _MTK_SNAND_OS_H_
1899 +#include <common.h>
1900 +#include <cpu_func.h>
1903 +#include <malloc.h>
1904 +#include <stdbool.h>
1905 +#include <stdarg.h>
1906 +#include <linux/types.h>
1907 +#include <asm/io.h>
1908 +#include <linux/bitops.h>
1909 +#include <linux/sizes.h>
1910 +#include <linux/iopoll.h>
1912 +#ifndef ARCH_DMA_MINALIGN
1913 +#define ARCH_DMA_MINALIGN 64
1916 +struct mtk_snand_plat_dev {
1920 +/* Polling helpers */
1921 +#define read16_poll_timeout(addr, val, cond, sleep_us, timeout_us) \
1922 + readw_poll_timeout((addr), (val), (cond), (timeout_us))
1924 +#define read32_poll_timeout(addr, val, cond, sleep_us, timeout_us) \
1925 + readl_poll_timeout((addr), (val), (cond), (timeout_us))
1927 +/* Timer helpers */
1928 +typedef uint64_t mtk_snand_time_t;
1930 +static inline mtk_snand_time_t timer_get_ticks(void)
1932 + return get_ticks();
1935 +static inline mtk_snand_time_t timer_time_to_tick(uint32_t timeout_us)
1937 + return usec_to_tick(timeout_us);
1940 +static inline bool timer_is_timeout(mtk_snand_time_t start_tick,
1941 + mtk_snand_time_t timeout_tick)
1943 + return get_ticks() - start_tick > timeout_tick;
1946 +/* Memory helpers */
1947 +static inline void *generic_mem_alloc(struct mtk_snand_plat_dev *pdev,
1950 + return calloc(1, size);
1953 +static inline void generic_mem_free(struct mtk_snand_plat_dev *pdev, void *ptr)
1958 +static inline void *dma_mem_alloc(struct mtk_snand_plat_dev *pdev, size_t size)
1960 + return memalign(ARCH_DMA_MINALIGN, size);
1963 +static inline void dma_mem_free(struct mtk_snand_plat_dev *pdev, void *ptr)
1968 +static inline int dma_mem_map(struct mtk_snand_plat_dev *pdev, void *vaddr,
1969 + uintptr_t *dma_addr, size_t size, bool to_device)
1971 + size_t cachelen = roundup(size, ARCH_DMA_MINALIGN);
1972 + uintptr_t endaddr = (uintptr_t)vaddr + cachelen;
1975 + flush_dcache_range((uintptr_t)vaddr, endaddr);
1977 + invalidate_dcache_range((uintptr_t)vaddr, endaddr);
1979 + *dma_addr = (uintptr_t)vaddr;
1984 +static inline void dma_mem_unmap(struct mtk_snand_plat_dev *pdev,
1985 + uintptr_t dma_addr, size_t size,
1990 +/* Interrupt helpers */
1991 +static inline void irq_completion_done(struct mtk_snand_plat_dev *pdev)
1995 +static inline void irq_completion_init(struct mtk_snand_plat_dev *pdev)
1999 +static inline int irq_completion_wait(struct mtk_snand_plat_dev *pdev,
2000 + void __iomem *reg, uint32_t bit,
2001 + uint32_t timeout_us)
2005 + return read32_poll_timeout(reg, val, val & bit, 0, timeout_us);
2008 +#endif /* _MTK_SNAND_OS_H_ */
2010 +++ b/drivers/mtd/mtk-snand/mtk-snand.c
2012 +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2014 + * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
2016 + * Author: Weijie Gao <weijie.gao@mediatek.com>
2019 +#include "mtk-snand-def.h"
2021 +/* NFI registers */
2022 +#define NFI_CNFG 0x000
2023 +#define CNFG_OP_MODE_S 12
2024 +#define CNFG_OP_MODE_CUST 6
2025 +#define CNFG_OP_MODE_PROGRAM 3
2026 +#define CNFG_AUTO_FMT_EN BIT(9)
2027 +#define CNFG_HW_ECC_EN BIT(8)
2028 +#define CNFG_DMA_BURST_EN BIT(2)
2029 +#define CNFG_READ_MODE BIT(1)
2030 +#define CNFG_DMA_MODE BIT(0)
2032 +#define NFI_PAGEFMT 0x0004
2033 +#define NFI_SPARE_SIZE_LS_S 16
2034 +#define NFI_FDM_ECC_NUM_S 12
2035 +#define NFI_FDM_NUM_S 8
2036 +#define NFI_SPARE_SIZE_S 4
2037 +#define NFI_SEC_SEL_512 BIT(2)
2038 +#define NFI_PAGE_SIZE_S 0
2039 +#define NFI_PAGE_SIZE_512_2K 0
2040 +#define NFI_PAGE_SIZE_2K_4K 1
2041 +#define NFI_PAGE_SIZE_4K_8K 2
2042 +#define NFI_PAGE_SIZE_8K_16K 3
2044 +#define NFI_CON 0x008
2045 +#define CON_SEC_NUM_S 12
2046 +#define CON_BWR BIT(9)
2047 +#define CON_BRD BIT(8)
2048 +#define CON_NFI_RST BIT(1)
2049 +#define CON_FIFO_FLUSH BIT(0)
2051 +#define NFI_INTR_EN 0x010
2052 +#define NFI_INTR_STA 0x014
2053 +#define NFI_IRQ_INTR_EN BIT(31)
2054 +#define NFI_IRQ_CUS_READ BIT(8)
2055 +#define NFI_IRQ_CUS_PG BIT(7)
2057 +#define NFI_CMD 0x020
2059 +#define NFI_STRDATA 0x040
2060 +#define STR_DATA BIT(0)
2062 +#define NFI_STA 0x060
2063 +#define NFI_NAND_FSM GENMASK(28, 24)
2064 +#define NFI_FSM GENMASK(19, 16)
2065 +#define READ_EMPTY BIT(12)
2067 +#define NFI_FIFOSTA 0x064
2068 +#define FIFO_WR_REMAIN_S 8
2069 +#define FIFO_RD_REMAIN_S 0
2071 +#define NFI_ADDRCNTR 0x070
2072 +#define SEC_CNTR GENMASK(16, 12)
2073 +#define SEC_CNTR_S 12
2074 +#define NFI_SEC_CNTR(val) (((val) & SEC_CNTR) >> SEC_CNTR_S)
2076 +#define NFI_STRADDR 0x080
2078 +#define NFI_BYTELEN 0x084
2079 +#define BUS_SEC_CNTR(val) (((val) & SEC_CNTR) >> SEC_CNTR_S)
2081 +#define NFI_FDM0L 0x0a0
2082 +#define NFI_FDM0M 0x0a4
2083 +#define NFI_FDML(n) (NFI_FDM0L + (n) * 8)
2084 +#define NFI_FDMM(n) (NFI_FDM0M + (n) * 8)
2086 +#define NFI_DEBUG_CON1 0x220
2087 +#define WBUF_EN BIT(2)
2089 +#define NFI_MASTERSTA 0x224
2090 +#define MAS_ADDR GENMASK(11, 9)
2091 +#define MAS_RD GENMASK(8, 6)
2092 +#define MAS_WR GENMASK(5, 3)
2093 +#define MAS_RDDLY GENMASK(2, 0)
2094 +#define NFI_MASTERSTA_MASK_7622 (MAS_ADDR | MAS_RD | MAS_WR | MAS_RDDLY)
2095 +#define AHB_BUS_BUSY BIT(1)
2096 +#define BUS_BUSY BIT(0)
2097 +#define NFI_MASTERSTA_MASK_7981 (AHB_BUS_BUSY | BUS_BUSY)
2098 +#define NFI_MASTERSTA_MASK_7986 (AHB_BUS_BUSY | BUS_BUSY)
2100 +/* SNFI registers */
2101 +#define SNF_MAC_CTL 0x500
2102 +#define MAC_XIO_SEL BIT(4)
2103 +#define SF_MAC_EN BIT(3)
2104 +#define SF_TRIG BIT(2)
2105 +#define WIP_READY BIT(1)
2108 +#define SNF_MAC_OUTL 0x504
2109 +#define SNF_MAC_INL 0x508
2111 +#define SNF_RD_CTL2 0x510
2112 +#define DATA_READ_DUMMY_S 8
2113 +#define DATA_READ_CMD_S 0
2115 +#define SNF_RD_CTL3 0x514
2117 +#define SNF_PG_CTL1 0x524
2118 +#define PG_LOAD_CMD_S 8
2120 +#define SNF_PG_CTL2 0x528
2122 +#define SNF_MISC_CTL 0x538
2123 +#define SW_RST BIT(28)
2124 +#define FIFO_RD_LTC_S 25
2125 +#define PG_LOAD_X4_EN BIT(20)
2126 +#define DATA_READ_MODE_S 16
2127 +#define DATA_READ_MODE GENMASK(18, 16)
2128 +#define DATA_READ_MODE_X1 0
2129 +#define DATA_READ_MODE_X2 1
2130 +#define DATA_READ_MODE_X4 2
2131 +#define DATA_READ_MODE_DUAL 5
2132 +#define DATA_READ_MODE_QUAD 6
2133 +#define LATCH_LAT_S 8
2134 +#define LATCH_LAT GENMASK(9, 8)
2135 +#define PG_LOAD_CUSTOM_EN BIT(7)
2136 +#define DATARD_CUSTOM_EN BIT(6)
2137 +#define CS_DESELECT_CYC_S 0
2139 +#define SNF_MISC_CTL2 0x53c
2140 +#define PROGRAM_LOAD_BYTE_NUM_S 16
2141 +#define READ_DATA_BYTE_NUM_S 11
2143 +#define SNF_DLY_CTL3 0x548
2144 +#define SFCK_SAM_DLY_S 0
2146 +#define SNF_STA_CTL1 0x550
2147 +#define CUS_PG_DONE BIT(28)
2148 +#define CUS_READ_DONE BIT(27)
2149 +#define SPI_STATE_S 0
2150 +#define SPI_STATE GENMASK(3, 0)
2152 +#define SNF_CFG 0x55c
2153 +#define SPI_MODE BIT(0)
2155 +#define SNF_GPRAM 0x800
2156 +#define SNF_GPRAM_SIZE 0xa0
2158 +#define SNFI_POLL_INTERVAL 1000000
2160 +static const uint8_t mt7622_spare_sizes[] = { 16, 26, 27, 28 };
2162 +static const uint8_t mt7981_spare_sizes[] = {
2163 + 16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, 52, 62, 61, 63, 64,
2167 +static const uint8_t mt7986_spare_sizes[] = {
2168 + 16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, 52, 62, 61, 63, 64,
2172 +static const struct mtk_snand_soc_data mtk_snand_socs[__SNAND_SOC_MAX] = {
2173 + [SNAND_SOC_MT7622] = {
2174 + .sector_size = 512,
2177 + .fdm_ecc_size = 1,
2179 + .bbm_swap = false,
2180 + .empty_page_check = false,
2181 + .mastersta_mask = NFI_MASTERSTA_MASK_7622,
2182 + .spare_sizes = mt7622_spare_sizes,
2183 + .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes),
2185 + .sample_delay = 40
2187 + [SNAND_SOC_MT7629] = {
2188 + .sector_size = 512,
2191 + .fdm_ecc_size = 1,
2194 + .empty_page_check = false,
2195 + .mastersta_mask = NFI_MASTERSTA_MASK_7622,
2196 + .spare_sizes = mt7622_spare_sizes,
2197 + .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes),
2199 + .sample_delay = 40
2201 + [SNAND_SOC_MT7981] = {
2202 + .sector_size = 1024,
2203 + .max_sectors = 16,
2205 + .fdm_ecc_size = 1,
2208 + .empty_page_check = true,
2209 + .mastersta_mask = NFI_MASTERSTA_MASK_7981,
2210 + .spare_sizes = mt7981_spare_sizes,
2211 + .num_spare_size = ARRAY_SIZE(mt7981_spare_sizes),
2213 + .sample_delay = 40
2215 + [SNAND_SOC_MT7986] = {
2216 + .sector_size = 1024,
2217 + .max_sectors = 16,
2219 + .fdm_ecc_size = 1,
2222 + .empty_page_check = true,
2223 + .mastersta_mask = NFI_MASTERSTA_MASK_7986,
2224 + .spare_sizes = mt7986_spare_sizes,
2225 + .num_spare_size = ARRAY_SIZE(mt7986_spare_sizes),
2227 + .sample_delay = 40
2231 +static inline uint32_t nfi_read32(struct mtk_snand *snf, uint32_t reg)
2233 + return readl(snf->nfi_base + reg);
2236 +static inline void nfi_write32(struct mtk_snand *snf, uint32_t reg,
2239 + writel(val, snf->nfi_base + reg);
2242 +static inline void nfi_write16(struct mtk_snand *snf, uint32_t reg,
2245 + writew(val, snf->nfi_base + reg);
2248 +static inline void nfi_rmw32(struct mtk_snand *snf, uint32_t reg, uint32_t clr,
2253 + val = readl(snf->nfi_base + reg);
2256 + writel(val, snf->nfi_base + reg);
2259 +static void nfi_write_data(struct mtk_snand *snf, uint32_t reg,
2260 + const uint8_t *data, uint32_t len)
2262 + uint32_t i, val = 0, es = sizeof(uint32_t);
2264 + for (i = reg; i < reg + len; i++) {
2265 + val |= ((uint32_t)*data++) << (8 * (i % es));
2267 + if (i % es == es - 1 || i == reg + len - 1) {
2268 + nfi_write32(snf, i & ~(es - 1), val);
2274 +static void nfi_read_data(struct mtk_snand *snf, uint32_t reg, uint8_t *data,
2277 + uint32_t i, val = 0, es = sizeof(uint32_t);
2279 + for (i = reg; i < reg + len; i++) {
2280 + if (i == reg || i % es == 0)
2281 + val = nfi_read32(snf, i & ~(es - 1));
2283 + *data++ = (uint8_t)(val >> (8 * (i % es)));
2287 +static inline void do_bm_swap(uint8_t *bm1, uint8_t *bm2)
2289 + uint8_t tmp = *bm1;
2294 +static void mtk_snand_bm_swap_raw(struct mtk_snand *snf)
2296 + uint32_t fdm_bbm_pos;
2298 + if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
2301 + fdm_bbm_pos = (snf->ecc_steps - 1) * snf->raw_sector_size +
2302 + snf->nfi_soc->sector_size;
2303 + do_bm_swap(&snf->page_cache[fdm_bbm_pos],
2304 + &snf->page_cache[snf->writesize]);
2307 +static void mtk_snand_bm_swap(struct mtk_snand *snf)
2309 + uint32_t buf_bbm_pos, fdm_bbm_pos;
2311 + if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
2314 + buf_bbm_pos = snf->writesize -
2315 + (snf->ecc_steps - 1) * snf->spare_per_sector;
2316 + fdm_bbm_pos = snf->writesize +
2317 + (snf->ecc_steps - 1) * snf->nfi_soc->fdm_size;
2318 + do_bm_swap(&snf->page_cache[fdm_bbm_pos],
2319 + &snf->page_cache[buf_bbm_pos]);
2322 +static void mtk_snand_fdm_bm_swap_raw(struct mtk_snand *snf)
2324 + uint32_t fdm_bbm_pos1, fdm_bbm_pos2;
2326 + if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
2329 + fdm_bbm_pos1 = snf->nfi_soc->sector_size;
2330 + fdm_bbm_pos2 = (snf->ecc_steps - 1) * snf->raw_sector_size +
2331 + snf->nfi_soc->sector_size;
2332 + do_bm_swap(&snf->page_cache[fdm_bbm_pos1],
2333 + &snf->page_cache[fdm_bbm_pos2]);
2336 +static void mtk_snand_fdm_bm_swap(struct mtk_snand *snf)
2338 + uint32_t fdm_bbm_pos1, fdm_bbm_pos2;
2340 + if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
2343 + fdm_bbm_pos1 = snf->writesize;
2344 + fdm_bbm_pos2 = snf->writesize +
2345 + (snf->ecc_steps - 1) * snf->nfi_soc->fdm_size;
2346 + do_bm_swap(&snf->page_cache[fdm_bbm_pos1],
2347 + &snf->page_cache[fdm_bbm_pos2]);
2350 +static int mtk_nfi_reset(struct mtk_snand *snf)
2352 + uint32_t val, fifo_mask;
2355 + nfi_write32(snf, NFI_CON, CON_FIFO_FLUSH | CON_NFI_RST);
2357 + ret = read16_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val,
2358 + !(val & snf->nfi_soc->mastersta_mask), 0,
2359 + SNFI_POLL_INTERVAL);
2361 + snand_log_nfi(snf->pdev,
2362 + "NFI master is still busy after reset\n");
2366 + ret = read32_poll_timeout(snf->nfi_base + NFI_STA, val,
2367 + !(val & (NFI_FSM | NFI_NAND_FSM)), 0,
2368 + SNFI_POLL_INTERVAL);
2370 + snand_log_nfi(snf->pdev, "Failed to reset NFI\n");
2374 + fifo_mask = ((snf->nfi_soc->fifo_size - 1) << FIFO_RD_REMAIN_S) |
2375 + ((snf->nfi_soc->fifo_size - 1) << FIFO_WR_REMAIN_S);
2376 + ret = read16_poll_timeout(snf->nfi_base + NFI_FIFOSTA, val,
2377 + !(val & fifo_mask), 0, SNFI_POLL_INTERVAL);
2379 + snand_log_nfi(snf->pdev, "NFI FIFOs are not empty\n");
2386 +static int mtk_snand_mac_reset(struct mtk_snand *snf)
2391 + nfi_rmw32(snf, SNF_MISC_CTL, 0, SW_RST);
2393 + ret = read32_poll_timeout(snf->nfi_base + SNF_STA_CTL1, val,
2394 + !(val & SPI_STATE), 0, SNFI_POLL_INTERVAL);
2396 + snand_log_snfi(snf->pdev, "Failed to reset SNFI MAC\n");
2398 + nfi_write32(snf, SNF_MISC_CTL, (2 << FIFO_RD_LTC_S) |
2399 + (10 << CS_DESELECT_CYC_S) | (snf->nfi_soc->latch_lat << LATCH_LAT_S));
2404 +static int mtk_snand_mac_trigger(struct mtk_snand *snf, uint32_t outlen,
2410 + nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN);
2411 + nfi_write32(snf, SNF_MAC_OUTL, outlen);
2412 + nfi_write32(snf, SNF_MAC_INL, inlen);
2414 + nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN | SF_TRIG);
2416 + ret = read32_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val,
2417 + val & WIP_READY, 0, SNFI_POLL_INTERVAL);
2419 + snand_log_snfi(snf->pdev, "Timed out waiting for WIP_READY\n");
2423 + ret = read32_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val,
2424 + !(val & WIP), 0, SNFI_POLL_INTERVAL);
2426 + snand_log_snfi(snf->pdev,
2427 + "Timed out waiting for WIP cleared\n");
2431 + nfi_write32(snf, SNF_MAC_CTL, 0);
2436 +int mtk_snand_mac_io(struct mtk_snand *snf, const uint8_t *out, uint32_t outlen,
2437 + uint8_t *in, uint32_t inlen)
2441 + if (outlen + inlen > SNF_GPRAM_SIZE)
2444 + mtk_snand_mac_reset(snf);
2446 + nfi_write_data(snf, SNF_GPRAM, out, outlen);
2448 + ret = mtk_snand_mac_trigger(snf, outlen, inlen);
2455 + nfi_read_data(snf, SNF_GPRAM + outlen, in, inlen);
2460 +static int mtk_snand_get_feature(struct mtk_snand *snf, uint32_t addr)
2462 + uint8_t op[2], val;
2465 + op[0] = SNAND_CMD_GET_FEATURE;
2466 + op[1] = (uint8_t)addr;
2468 + ret = mtk_snand_mac_io(snf, op, sizeof(op), &val, 1);
2475 +int mtk_snand_set_feature(struct mtk_snand *snf, uint32_t addr, uint32_t val)
2479 + op[0] = SNAND_CMD_SET_FEATURE;
2480 + op[1] = (uint8_t)addr;
2481 + op[2] = (uint8_t)val;
2483 + return mtk_snand_mac_io(snf, op, sizeof(op), NULL, 0);
2486 +static int mtk_snand_poll_status(struct mtk_snand *snf, uint32_t wait_us)
2489 + mtk_snand_time_t time_start, tmo;
2491 + time_start = timer_get_ticks();
2492 + tmo = timer_time_to_tick(wait_us);
2495 + val = mtk_snand_get_feature(snf, SNAND_FEATURE_STATUS_ADDR);
2496 + if (!(val & SNAND_STATUS_OIP))
2497 + return val & (SNAND_STATUS_ERASE_FAIL |
2498 + SNAND_STATUS_PROGRAM_FAIL);
2499 + } while (!timer_is_timeout(time_start, tmo));
2501 + return -ETIMEDOUT;
2504 +int mtk_snand_chip_reset(struct mtk_snand *snf)
2506 + uint8_t op = SNAND_CMD_RESET;
2509 + ret = mtk_snand_mac_io(snf, &op, 1, NULL, 0);
2513 + ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
2520 +static int mtk_snand_config_feature(struct mtk_snand *snf, uint8_t clr,
2526 + val = mtk_snand_get_feature(snf, SNAND_FEATURE_CONFIG_ADDR);
2528 + snand_log_chip(snf->pdev,
2529 + "Failed to get configuration feature\n");
2533 + newval = (val & (~clr)) | set;
2535 + if (newval == val)
2538 + ret = mtk_snand_set_feature(snf, SNAND_FEATURE_CONFIG_ADDR,
2541 + snand_log_chip(snf->pdev,
2542 + "Failed to set configuration feature\n");
2546 + val = mtk_snand_get_feature(snf, SNAND_FEATURE_CONFIG_ADDR);
2548 + snand_log_chip(snf->pdev,
2549 + "Failed to get configuration feature\n");
2553 + if (newval != val)
2559 +static int mtk_snand_ondie_ecc_control(struct mtk_snand *snf, bool enable)
2564 + ret = mtk_snand_config_feature(snf, 0, SNAND_FEATURE_ECC_EN);
2566 + ret = mtk_snand_config_feature(snf, SNAND_FEATURE_ECC_EN, 0);
2569 + snand_log_chip(snf->pdev, "Failed to %s On-Die ECC engine\n",
2570 + enable ? "enable" : "disable");
2576 +static int mtk_snand_qspi_control(struct mtk_snand *snf, bool enable)
2581 + ret = mtk_snand_config_feature(snf, 0,
2582 + SNAND_FEATURE_QUAD_ENABLE);
2584 + ret = mtk_snand_config_feature(snf,
2585 + SNAND_FEATURE_QUAD_ENABLE, 0);
2589 + snand_log_chip(snf->pdev, "Failed to %s quad spi\n",
2590 + enable ? "enable" : "disable");
2596 +static int mtk_snand_unlock(struct mtk_snand *snf)
2600 + ret = mtk_snand_set_feature(snf, SNAND_FEATURE_PROTECT_ADDR, 0);
2602 + snand_log_chip(snf->pdev, "Failed to set protection feature\n");
2609 +static int mtk_snand_write_enable(struct mtk_snand *snf)
2611 + uint8_t op = SNAND_CMD_WRITE_ENABLE;
2614 + ret = mtk_snand_mac_io(snf, &op, 1, NULL, 0);
2618 + val = mtk_snand_get_feature(snf, SNAND_FEATURE_STATUS_ADDR);
2622 + if (val & SNAND_STATUS_WEL)
2625 + snand_log_chip(snf->pdev, "Failed to send write-enable command\n");
2630 +static int mtk_snand_select_die(struct mtk_snand *snf, uint32_t dieidx)
2632 + if (!snf->select_die)
2635 + return snf->select_die(snf, dieidx);
2638 +static uint64_t mtk_snand_select_die_address(struct mtk_snand *snf,
2643 + if (!snf->select_die)
2646 + dieidx = addr >> snf->die_shift;
2648 + mtk_snand_select_die(snf, dieidx);
2650 + return addr & snf->die_mask;
2653 +static uint32_t mtk_snand_get_plane_address(struct mtk_snand *snf,
2656 + uint32_t pages_per_block;
2658 + pages_per_block = 1 << (snf->erasesize_shift - snf->writesize_shift);
2660 + if (page & pages_per_block)
2661 + return 1 << (snf->writesize_shift + 1);
2666 +static int mtk_snand_page_op(struct mtk_snand *snf, uint32_t page, uint8_t cmd)
2671 + op[1] = (page >> 16) & 0xff;
2672 + op[2] = (page >> 8) & 0xff;
2673 + op[3] = page & 0xff;
2675 + return mtk_snand_mac_io(snf, op, sizeof(op), NULL, 0);
2678 +static void mtk_snand_read_fdm(struct mtk_snand *snf, uint8_t *buf)
2680 + uint32_t vall, valm;
2681 + uint8_t *oobptr = buf;
2684 + for (i = 0; i < snf->ecc_steps; i++) {
2685 + vall = nfi_read32(snf, NFI_FDML(i));
2686 + valm = nfi_read32(snf, NFI_FDMM(i));
2688 + for (j = 0; j < snf->nfi_soc->fdm_size; j++)
2689 + oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8);
2691 + oobptr += snf->nfi_soc->fdm_size;
2695 +static int mtk_snand_read_ecc_parity(struct mtk_snand *snf, uint32_t page,
2696 + uint32_t sect, uint8_t *oob)
2698 + uint32_t ecc_bytes = snf->spare_per_sector - snf->nfi_soc->fdm_size;
2699 + uint32_t coladdr, raw_offs, offs;
2702 + if (sizeof(op) + ecc_bytes > SNF_GPRAM_SIZE) {
2703 + snand_log_snfi(snf->pdev,
2704 + "ECC parity size does not fit the GPRAM\n");
2708 + raw_offs = sect * snf->raw_sector_size + snf->nfi_soc->sector_size +
2709 + snf->nfi_soc->fdm_size;
2710 + offs = snf->ecc_steps * snf->nfi_soc->fdm_size + sect * ecc_bytes;
2712 + /* Column address with plane bit */
2713 + coladdr = raw_offs | mtk_snand_get_plane_address(snf, page);
2715 + op[0] = SNAND_CMD_READ_FROM_CACHE;
2716 + op[1] = (coladdr >> 8) & 0xff;
2717 + op[2] = coladdr & 0xff;
2720 + return mtk_snand_mac_io(snf, op, sizeof(op), oob + offs, ecc_bytes);
2723 +static int mtk_snand_check_ecc_result(struct mtk_snand *snf, uint32_t page)
2725 + uint8_t *oob = snf->page_cache + snf->writesize;
2726 + int i, rc, ret = 0, max_bitflips = 0;
2728 + for (i = 0; i < snf->ecc_steps; i++) {
2729 + if (snf->sect_bf[i] >= 0) {
2730 + if (snf->sect_bf[i] > max_bitflips)
2731 + max_bitflips = snf->sect_bf[i];
2735 + rc = mtk_snand_read_ecc_parity(snf, page, i, oob);
2739 + rc = mtk_ecc_fixup_empty_sector(snf, i);
2743 + snand_log_ecc(snf->pdev,
2744 + "Uncorrectable bitflips in page %u sect %u\n",
2747 + snf->sect_bf[i] = rc;
2749 + if (snf->sect_bf[i] > max_bitflips)
2750 + max_bitflips = snf->sect_bf[i];
2752 + snand_log_ecc(snf->pdev,
2753 + "%u bitflip%s corrected in page %u sect %u\n",
2754 + rc, rc > 1 ? "s" : "", page, i);
2756 + snf->sect_bf[i] = 0;
2760 + return ret ? ret : max_bitflips;
2763 +static int mtk_snand_read_cache(struct mtk_snand *snf, uint32_t page, bool raw)
2765 + uint32_t coladdr, rwbytes, mode, len, val;
2766 + uintptr_t dma_addr;
2769 + /* Column address with plane bit */
2770 + coladdr = mtk_snand_get_plane_address(snf, page);
2772 + mtk_snand_mac_reset(snf);
2773 + mtk_nfi_reset(snf);
2775 + /* Command and dummy cycles */
2776 + nfi_write32(snf, SNF_RD_CTL2,
2777 + ((uint32_t)snf->dummy_rfc << DATA_READ_DUMMY_S) |
2778 + (snf->opcode_rfc << DATA_READ_CMD_S));
2780 + /* Column address */
2781 + nfi_write32(snf, SNF_RD_CTL3, coladdr);
2783 + /* Set read mode */
2784 + mode = (uint32_t)snf->mode_rfc << DATA_READ_MODE_S;
2785 + nfi_rmw32(snf, SNF_MISC_CTL, DATA_READ_MODE,
2786 + mode | DATARD_CUSTOM_EN | (snf->nfi_soc->latch_lat << LATCH_LAT_S));
2788 + /* Set bytes to read */
2789 + rwbytes = snf->ecc_steps * snf->raw_sector_size;
2790 + nfi_write32(snf, SNF_MISC_CTL2, (rwbytes << PROGRAM_LOAD_BYTE_NUM_S) |
2793 + /* NFI read prepare */
2794 + mode = raw ? 0 : CNFG_HW_ECC_EN | CNFG_AUTO_FMT_EN;
2795 + nfi_write16(snf, NFI_CNFG, (CNFG_OP_MODE_CUST << CNFG_OP_MODE_S) |
2796 + CNFG_DMA_BURST_EN | CNFG_READ_MODE | CNFG_DMA_MODE | mode);
2798 + nfi_write32(snf, NFI_CON, (snf->ecc_steps << CON_SEC_NUM_S));
2800 + /* Prepare for DMA read */
2801 + len = snf->writesize + snf->oobsize;
2802 + ret = dma_mem_map(snf->pdev, snf->page_cache, &dma_addr, len, false);
2804 + snand_log_nfi(snf->pdev,
2805 + "DMA map from device failed with %d\n", ret);
2809 + nfi_write32(snf, NFI_STRADDR, (uint32_t)dma_addr);
2812 + mtk_snand_ecc_decoder_start(snf);
2814 + /* Prepare for custom read interrupt */
2815 + nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_READ);
2816 + irq_completion_init(snf->pdev);
2818 + /* Trigger NFI into custom mode */
2819 + nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_READ);
2821 + /* Start DMA read */
2822 + nfi_rmw32(snf, NFI_CON, 0, CON_BRD);
2823 + nfi_write16(snf, NFI_STRDATA, STR_DATA);
2825 + /* Wait for operation finished */
2826 + ret = irq_completion_wait(snf->pdev, snf->nfi_base + SNF_STA_CTL1,
2827 + CUS_READ_DONE, SNFI_POLL_INTERVAL);
2829 + snand_log_nfi(snf->pdev,
2830 + "DMA timed out for reading from cache\n");
2834 + /* Wait for BUS_SEC_CNTR returning expected value */
2835 + ret = read32_poll_timeout(snf->nfi_base + NFI_BYTELEN, val,
2836 + BUS_SEC_CNTR(val) >= snf->ecc_steps,
2837 + 0, SNFI_POLL_INTERVAL);
2839 + snand_log_nfi(snf->pdev,
2840 + "Timed out waiting for BUS_SEC_CNTR\n");
2844 + /* Wait for bus becoming idle */
2845 + ret = read32_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val,
2846 + !(val & snf->nfi_soc->mastersta_mask),
2847 + 0, SNFI_POLL_INTERVAL);
2849 + snand_log_nfi(snf->pdev,
2850 + "Timed out waiting for bus becoming idle\n");
2855 + ret = mtk_ecc_wait_decoder_done(snf);
2859 + mtk_snand_read_fdm(snf, snf->page_cache + snf->writesize);
2861 + mtk_ecc_check_decode_error(snf);
2862 + mtk_snand_ecc_decoder_stop(snf);
2864 + ret = mtk_snand_check_ecc_result(snf, page);
2869 + dma_mem_unmap(snf->pdev, dma_addr, len, false);
2872 + nfi_write32(snf, NFI_CON, 0);
2873 + nfi_write16(snf, NFI_CNFG, 0);
2875 + /* Clear SNF done flag */
2876 + nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_READ_DONE);
2877 + nfi_write32(snf, SNF_STA_CTL1, 0);
2879 + /* Disable interrupt */
2880 + nfi_read32(snf, NFI_INTR_STA);
2881 + nfi_write32(snf, NFI_INTR_EN, 0);
2883 + nfi_rmw32(snf, SNF_MISC_CTL, DATARD_CUSTOM_EN | LATCH_LAT, 0);
2888 +static void mtk_snand_from_raw_page(struct mtk_snand *snf, void *buf, void *oob)
2890 + uint32_t i, ecc_bytes = snf->spare_per_sector - snf->nfi_soc->fdm_size;
2891 + uint8_t *eccptr = oob + snf->ecc_steps * snf->nfi_soc->fdm_size;
2892 + uint8_t *bufptr = buf, *oobptr = oob, *raw_sector;
2894 + for (i = 0; i < snf->ecc_steps; i++) {
2895 + raw_sector = snf->page_cache + i * snf->raw_sector_size;
2898 + memcpy(bufptr, raw_sector, snf->nfi_soc->sector_size);
2899 + bufptr += snf->nfi_soc->sector_size;
2902 + raw_sector += snf->nfi_soc->sector_size;
2905 + memcpy(oobptr, raw_sector, snf->nfi_soc->fdm_size);
2906 + oobptr += snf->nfi_soc->fdm_size;
2907 + raw_sector += snf->nfi_soc->fdm_size;
2909 + memcpy(eccptr, raw_sector, ecc_bytes);
2910 + eccptr += ecc_bytes;
2915 +static int mtk_snand_do_read_page(struct mtk_snand *snf, uint64_t addr,
2916 + void *buf, void *oob, bool raw, bool format)
2918 + uint64_t die_addr;
2919 + uint32_t page, dly_ctrl3;
2920 + int ret, retry_cnt = 0;
2922 + die_addr = mtk_snand_select_die_address(snf, addr);
2923 + page = die_addr >> snf->writesize_shift;
2925 + dly_ctrl3 = nfi_read32(snf, SNF_DLY_CTL3);
2927 + ret = mtk_snand_page_op(snf, page, SNAND_CMD_READ_TO_CACHE);
2931 + ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
2933 + snand_log_chip(snf->pdev, "Read to cache command timed out\n");
2938 + ret = mtk_snand_read_cache(snf, page, raw);
2939 + if (ret < 0 && ret != -EBADMSG)
2942 + if (ret == -EBADMSG && retry_cnt < 16) {
2943 + nfi_write32(snf, SNF_DLY_CTL3, retry_cnt * 2);
2949 + if(ret == -EBADMSG) {
2950 + nfi_write32(snf, SNF_DLY_CTL3, dly_ctrl3);
2951 + snand_log_chip(snf->pdev,
2952 + "NFI calibration failed. Original sample delay: 0x%x\n",
2955 + snand_log_chip(snf->pdev,
2956 + "NFI calibration passed. New sample delay: 0x%x\n",
2957 + nfi_read32(snf, SNF_DLY_CTL3));
2963 + mtk_snand_bm_swap_raw(snf);
2964 + mtk_snand_fdm_bm_swap_raw(snf);
2965 + mtk_snand_from_raw_page(snf, buf, oob);
2968 + memcpy(buf, snf->page_cache, snf->writesize);
2971 + memset(oob, 0xff, snf->oobsize);
2972 + memcpy(oob, snf->page_cache + snf->writesize,
2973 + snf->ecc_steps * snf->spare_per_sector);
2977 + mtk_snand_bm_swap(snf);
2978 + mtk_snand_fdm_bm_swap(snf);
2981 + memcpy(buf, snf->page_cache, snf->writesize);
2984 + memset(oob, 0xff, snf->oobsize);
2985 + memcpy(oob, snf->page_cache + snf->writesize,
2986 + snf->ecc_steps * snf->nfi_soc->fdm_size);
2993 +int mtk_snand_read_page(struct mtk_snand *snf, uint64_t addr, void *buf,
2994 + void *oob, bool raw)
2996 + if (!snf || (!buf && !oob))
2999 + if (addr >= snf->size)
3002 + return mtk_snand_do_read_page(snf, addr, buf, oob, raw, true);
3005 +static void mtk_snand_write_fdm(struct mtk_snand *snf, const uint8_t *buf)
3007 + uint32_t vall, valm, fdm_size = snf->nfi_soc->fdm_size;
3008 + const uint8_t *oobptr = buf;
3011 + for (i = 0; i < snf->ecc_steps; i++) {
3015 + for (j = 0; j < 8; j++) {
3017 + vall |= (j < fdm_size ? oobptr[j] : 0xff)
3020 + valm |= (j < fdm_size ? oobptr[j] : 0xff)
3024 + nfi_write32(snf, NFI_FDML(i), vall);
3025 + nfi_write32(snf, NFI_FDMM(i), valm);
3027 + oobptr += fdm_size;
3031 +static int mtk_snand_program_load(struct mtk_snand *snf, uint32_t page,
3034 + uint32_t coladdr, rwbytes, mode, len, val;
3035 + uintptr_t dma_addr;
3038 + /* Column address with plane bit */
3039 + coladdr = mtk_snand_get_plane_address(snf, page);
3041 + mtk_snand_mac_reset(snf);
3042 + mtk_nfi_reset(snf);
3044 + /* Write FDM registers if necessary */
3046 + mtk_snand_write_fdm(snf, snf->page_cache + snf->writesize);
3049 + nfi_write32(snf, SNF_PG_CTL1, (snf->opcode_pl << PG_LOAD_CMD_S));
3051 + /* Column address */
3052 + nfi_write32(snf, SNF_PG_CTL2, coladdr);
3054 + /* Set write mode */
3055 + mode = snf->mode_pl ? PG_LOAD_X4_EN : 0;
3056 + nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_X4_EN, mode | PG_LOAD_CUSTOM_EN);
3058 + /* Set bytes to write */
3059 + rwbytes = snf->ecc_steps * snf->raw_sector_size;
3060 + nfi_write32(snf, SNF_MISC_CTL2, (rwbytes << PROGRAM_LOAD_BYTE_NUM_S) |
3063 + /* NFI write prepare */
3064 + mode = raw ? 0 : CNFG_HW_ECC_EN | CNFG_AUTO_FMT_EN;
3065 + nfi_write16(snf, NFI_CNFG, (CNFG_OP_MODE_PROGRAM << CNFG_OP_MODE_S) |
3066 + CNFG_DMA_BURST_EN | CNFG_DMA_MODE | mode);
3068 + nfi_write32(snf, NFI_CON, (snf->ecc_steps << CON_SEC_NUM_S));
3070 + /* Prepare for DMA write */
3071 + len = snf->writesize + snf->oobsize;
3072 + ret = dma_mem_map(snf->pdev, snf->page_cache, &dma_addr, len, true);
3074 + snand_log_nfi(snf->pdev,
3075 + "DMA map to device failed with %d\n", ret);
3079 + nfi_write32(snf, NFI_STRADDR, (uint32_t)dma_addr);
3082 + mtk_snand_ecc_encoder_start(snf);
3084 + /* Prepare for custom write interrupt */
3085 + nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_PG);
3086 + irq_completion_init(snf->pdev);
3088 + /* Trigger NFI into custom mode */
3089 + nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_WRITE);
3091 + /* Start DMA write */
3092 + nfi_rmw32(snf, NFI_CON, 0, CON_BWR);
3093 + nfi_write16(snf, NFI_STRDATA, STR_DATA);
3095 + /* Wait for operation finished */
3096 + ret = irq_completion_wait(snf->pdev, snf->nfi_base + SNF_STA_CTL1,
3097 + CUS_PG_DONE, SNFI_POLL_INTERVAL);
3099 + snand_log_nfi(snf->pdev,
3100 + "DMA timed out for program load\n");
3104 + /* Wait for NFI_SEC_CNTR returning expected value */
3105 + ret = read32_poll_timeout(snf->nfi_base + NFI_ADDRCNTR, val,
3106 + NFI_SEC_CNTR(val) >= snf->ecc_steps,
3107 + 0, SNFI_POLL_INTERVAL);
3109 + snand_log_nfi(snf->pdev,
3110 + "Timed out waiting for BUS_SEC_CNTR\n");
3115 + mtk_snand_ecc_encoder_stop(snf);
3119 + dma_mem_unmap(snf->pdev, dma_addr, len, true);
3122 + nfi_write32(snf, NFI_CON, 0);
3123 + nfi_write16(snf, NFI_CNFG, 0);
3125 + /* Clear SNF done flag */
3126 + nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_PG_DONE);
3127 + nfi_write32(snf, SNF_STA_CTL1, 0);
3129 + /* Disable interrupt */
3130 + nfi_read32(snf, NFI_INTR_STA);
3131 + nfi_write32(snf, NFI_INTR_EN, 0);
3133 + nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_CUSTOM_EN, 0);
3138 +static void mtk_snand_to_raw_page(struct mtk_snand *snf,
3139 + const void *buf, const void *oob,
3142 + uint32_t i, ecc_bytes = snf->spare_per_sector - snf->nfi_soc->fdm_size;
3143 + const uint8_t *eccptr = oob + snf->ecc_steps * snf->nfi_soc->fdm_size;
3144 + const uint8_t *bufptr = buf, *oobptr = oob;
3145 + uint8_t *raw_sector;
3147 + memset(snf->page_cache, 0xff, snf->writesize + snf->oobsize);
3148 + for (i = 0; i < snf->ecc_steps; i++) {
3149 + raw_sector = snf->page_cache + i * snf->raw_sector_size;
3152 + memcpy(raw_sector, bufptr, snf->nfi_soc->sector_size);
3153 + bufptr += snf->nfi_soc->sector_size;
3156 + raw_sector += snf->nfi_soc->sector_size;
3159 + memcpy(raw_sector, oobptr, snf->nfi_soc->fdm_size);
3160 + oobptr += snf->nfi_soc->fdm_size;
3161 + raw_sector += snf->nfi_soc->fdm_size;
3164 + memset(raw_sector, 0xff, ecc_bytes);
3166 + memcpy(raw_sector, eccptr, ecc_bytes);
3167 + eccptr += ecc_bytes;
3172 +static bool mtk_snand_is_empty_page(struct mtk_snand *snf, const void *buf,
3175 + const uint8_t *p = buf;
3179 + for (i = 0; i < snf->writesize; i++) {
3186 + for (j = 0; j < snf->ecc_steps; j++) {
3187 + p = oob + j * snf->nfi_soc->fdm_size;
3189 + for (i = 0; i < snf->nfi_soc->fdm_ecc_size; i++) {
3199 +static int mtk_snand_do_write_page(struct mtk_snand *snf, uint64_t addr,
3200 + const void *buf, const void *oob,
3201 + bool raw, bool format)
3203 + uint64_t die_addr;
3204 + bool empty_ecc = false;
3208 + die_addr = mtk_snand_select_die_address(snf, addr);
3209 + page = die_addr >> snf->writesize_shift;
3211 + if (!raw && mtk_snand_is_empty_page(snf, buf, oob)) {
3213 + * If the data in the page to be ecc-ed is full 0xff,
3214 + * change to raw write mode
3219 + /* fill ecc parity code region with 0xff */
3225 + mtk_snand_to_raw_page(snf, buf, oob, empty_ecc);
3226 + mtk_snand_fdm_bm_swap_raw(snf);
3227 + mtk_snand_bm_swap_raw(snf);
3229 + memset(snf->page_cache, 0xff,
3230 + snf->writesize + snf->oobsize);
3233 + memcpy(snf->page_cache, buf, snf->writesize);
3236 + memcpy(snf->page_cache + snf->writesize, oob,
3237 + snf->ecc_steps * snf->spare_per_sector);
3241 + memset(snf->page_cache, 0xff, snf->writesize + snf->oobsize);
3243 + memcpy(snf->page_cache, buf, snf->writesize);
3246 + memcpy(snf->page_cache + snf->writesize, oob,
3247 + snf->ecc_steps * snf->nfi_soc->fdm_size);
3250 + mtk_snand_fdm_bm_swap(snf);
3251 + mtk_snand_bm_swap(snf);
3254 + ret = mtk_snand_write_enable(snf);
3258 + ret = mtk_snand_program_load(snf, page, raw);
3262 + ret = mtk_snand_page_op(snf, page, SNAND_CMD_PROGRAM_EXECUTE);
3266 + ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
3268 + snand_log_chip(snf->pdev,
3269 + "Page program command timed out on page %u\n",
3274 + if (ret & SNAND_STATUS_PROGRAM_FAIL) {
3275 + snand_log_chip(snf->pdev,
3276 + "Page program failed on page %u\n", page);
3283 +int mtk_snand_write_page(struct mtk_snand *snf, uint64_t addr, const void *buf,
3284 + const void *oob, bool raw)
3286 + if (!snf || (!buf && !oob))
3289 + if (addr >= snf->size)
3292 + return mtk_snand_do_write_page(snf, addr, buf, oob, raw, true);
3295 +int mtk_snand_erase_block(struct mtk_snand *snf, uint64_t addr)
3297 + uint64_t die_addr;
3298 + uint32_t page, block;
3304 + if (addr >= snf->size)
3307 + die_addr = mtk_snand_select_die_address(snf, addr);
3308 + block = die_addr >> snf->erasesize_shift;
3309 + page = block << (snf->erasesize_shift - snf->writesize_shift);
3311 + ret = mtk_snand_write_enable(snf);
3315 + ret = mtk_snand_page_op(snf, page, SNAND_CMD_BLOCK_ERASE);
3319 + ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
3321 + snand_log_chip(snf->pdev,
3322 + "Block erase command timed out on block %u\n",
3327 + if (ret & SNAND_STATUS_ERASE_FAIL) {
3328 + snand_log_chip(snf->pdev,
3329 + "Block erase failed on block %u\n", block);
3336 +static int mtk_snand_block_isbad_std(struct mtk_snand *snf, uint64_t addr)
3340 + ret = mtk_snand_do_read_page(snf, addr, NULL, snf->buf_cache, true,
3342 + if (ret && ret != -EBADMSG)
3345 + return snf->buf_cache[0] != 0xff;
3348 +static int mtk_snand_block_isbad_mtk(struct mtk_snand *snf, uint64_t addr)
3352 + ret = mtk_snand_do_read_page(snf, addr, NULL, snf->buf_cache, true,
3354 + if (ret && ret != -EBADMSG)
3357 + return snf->buf_cache[0] != 0xff;
3360 +int mtk_snand_block_isbad(struct mtk_snand *snf, uint64_t addr)
3365 + if (addr >= snf->size)
3368 + addr &= ~snf->erasesize_mask;
3370 + if (snf->nfi_soc->bbm_swap)
3371 + return mtk_snand_block_isbad_std(snf, addr);
3373 + return mtk_snand_block_isbad_mtk(snf, addr);
3376 +static int mtk_snand_block_markbad_std(struct mtk_snand *snf, uint64_t addr)
3378 + /* Standard BBM position */
3379 + memset(snf->buf_cache, 0xff, snf->oobsize);
3380 + snf->buf_cache[0] = 0;
3382 + return mtk_snand_do_write_page(snf, addr, NULL, snf->buf_cache, true,
3386 +static int mtk_snand_block_markbad_mtk(struct mtk_snand *snf, uint64_t addr)
3388 + /* Write the whole page with zeros */
3389 + memset(snf->buf_cache, 0, snf->writesize + snf->oobsize);
3391 + return mtk_snand_do_write_page(snf, addr, snf->buf_cache,
3392 + snf->buf_cache + snf->writesize, true,
3396 +int mtk_snand_block_markbad(struct mtk_snand *snf, uint64_t addr)
3401 + if (addr >= snf->size)
3404 + addr &= ~snf->erasesize_mask;
3406 + if (snf->nfi_soc->bbm_swap)
3407 + return mtk_snand_block_markbad_std(snf, addr);
3409 + return mtk_snand_block_markbad_mtk(snf, addr);
3412 +int mtk_snand_fill_oob(struct mtk_snand *snf, uint8_t *oobraw,
3413 + const uint8_t *oobbuf, size_t ooblen)
3415 + size_t len = ooblen, sect_fdm_len;
3416 + const uint8_t *oob = oobbuf;
3417 + uint32_t step = 0;
3419 + if (!snf || !oobraw || !oob)
3422 + while (len && step < snf->ecc_steps) {
3423 + sect_fdm_len = snf->nfi_soc->fdm_size - 1;
3424 + if (sect_fdm_len > len)
3425 + sect_fdm_len = len;
3427 + memcpy(oobraw + step * snf->nfi_soc->fdm_size + 1, oob,
3430 + len -= sect_fdm_len;
3431 + oob += sect_fdm_len;
3438 +int mtk_snand_transfer_oob(struct mtk_snand *snf, uint8_t *oobbuf,
3439 + size_t ooblen, const uint8_t *oobraw)
3441 + size_t len = ooblen, sect_fdm_len;
3442 + uint8_t *oob = oobbuf;
3443 + uint32_t step = 0;
3445 + if (!snf || !oobraw || !oob)
3448 + while (len && step < snf->ecc_steps) {
3449 + sect_fdm_len = snf->nfi_soc->fdm_size - 1;
3450 + if (sect_fdm_len > len)
3451 + sect_fdm_len = len;
3453 + memcpy(oob, oobraw + step * snf->nfi_soc->fdm_size + 1,
3456 + len -= sect_fdm_len;
3457 + oob += sect_fdm_len;
3464 +int mtk_snand_read_page_auto_oob(struct mtk_snand *snf, uint64_t addr,
3465 + void *buf, void *oob, size_t ooblen,
3466 + size_t *actualooblen, bool raw)
3468 + int ret, oobremain;
3474 + return mtk_snand_read_page(snf, addr, buf, NULL, raw);
3476 + ret = mtk_snand_read_page(snf, addr, buf, snf->buf_cache, raw);
3477 + if (ret && ret != -EBADMSG) {
3479 + *actualooblen = 0;
3483 + oobremain = mtk_snand_transfer_oob(snf, oob, ooblen, snf->buf_cache);
3485 + *actualooblen = ooblen - oobremain;
3490 +int mtk_snand_write_page_auto_oob(struct mtk_snand *snf, uint64_t addr,
3491 + const void *buf, const void *oob,
3492 + size_t ooblen, size_t *actualooblen, bool raw)
3500 + return mtk_snand_write_page(snf, addr, buf, NULL, raw);
3502 + memset(snf->buf_cache, 0xff, snf->oobsize);
3503 + oobremain = mtk_snand_fill_oob(snf, snf->buf_cache, oob, ooblen);
3505 + *actualooblen = ooblen - oobremain;
3507 + return mtk_snand_write_page(snf, addr, buf, snf->buf_cache, raw);
3510 +int mtk_snand_get_chip_info(struct mtk_snand *snf,
3511 + struct mtk_snand_chip_info *info)
3513 + if (!snf || !info)
3516 + info->model = snf->model;
3517 + info->chipsize = snf->size;
3518 + info->blocksize = snf->erasesize;
3519 + info->pagesize = snf->writesize;
3520 + info->sparesize = snf->oobsize;
3521 + info->spare_per_sector = snf->spare_per_sector;
3522 + info->fdm_size = snf->nfi_soc->fdm_size;
3523 + info->fdm_ecc_size = snf->nfi_soc->fdm_ecc_size;
3524 + info->num_sectors = snf->ecc_steps;
3525 + info->sector_size = snf->nfi_soc->sector_size;
3526 + info->ecc_strength = snf->ecc_strength;
3527 + info->ecc_bytes = snf->ecc_bytes;
3532 +int mtk_snand_irq_process(struct mtk_snand *snf)
3534 + uint32_t sta, ien;
3539 + sta = nfi_read32(snf, NFI_INTR_STA);
3540 + ien = nfi_read32(snf, NFI_INTR_EN);
3545 + nfi_write32(snf, NFI_INTR_EN, 0);
3546 + irq_completion_done(snf->pdev);
3551 +static int mtk_snand_select_spare_per_sector(struct mtk_snand *snf)
3553 + uint32_t spare_per_step = snf->oobsize / snf->ecc_steps;
3557 + * If we're using the 1KB sector size, HW will automatically
3558 + * double the spare size. So we should only use half of the value.
3560 + if (snf->nfi_soc->sector_size == 1024)
3563 + spare_per_step /= mul;
3565 + for (i = snf->nfi_soc->num_spare_size - 1; i >= 0; i--) {
3566 + if (snf->nfi_soc->spare_sizes[i] <= spare_per_step) {
3567 + snf->spare_per_sector = snf->nfi_soc->spare_sizes[i];
3568 + snf->spare_per_sector *= mul;
3573 + snand_log_nfi(snf->pdev,
3574 + "Page size %u+%u is not supported\n", snf->writesize,
3580 +static int mtk_snand_pagefmt_setup(struct mtk_snand *snf)
3582 + uint32_t spare_size_idx, spare_size_shift, pagesize_idx;
3583 + uint32_t sector_size_512;
3585 + if (snf->nfi_soc->sector_size == 512) {
3586 + sector_size_512 = NFI_SEC_SEL_512;
3587 + spare_size_shift = NFI_SPARE_SIZE_S;
3589 + sector_size_512 = 0;
3590 + spare_size_shift = NFI_SPARE_SIZE_LS_S;
3593 + switch (snf->writesize) {
3595 + pagesize_idx = NFI_PAGE_SIZE_512_2K;
3598 + if (snf->nfi_soc->sector_size == 512)
3599 + pagesize_idx = NFI_PAGE_SIZE_2K_4K;
3601 + pagesize_idx = NFI_PAGE_SIZE_512_2K;
3604 + if (snf->nfi_soc->sector_size == 512)
3605 + pagesize_idx = NFI_PAGE_SIZE_4K_8K;
3607 + pagesize_idx = NFI_PAGE_SIZE_2K_4K;
3610 + if (snf->nfi_soc->sector_size == 512)
3611 + pagesize_idx = NFI_PAGE_SIZE_8K_16K;
3613 + pagesize_idx = NFI_PAGE_SIZE_4K_8K;
3616 + pagesize_idx = NFI_PAGE_SIZE_8K_16K;
3619 + snand_log_nfi(snf->pdev, "Page size %u is not supported\n",
3624 + spare_size_idx = mtk_snand_select_spare_per_sector(snf);
3625 + if (unlikely(spare_size_idx < 0))
3628 + snf->raw_sector_size = snf->nfi_soc->sector_size +
3629 + snf->spare_per_sector;
3631 + /* Setup page format */
3632 + nfi_write32(snf, NFI_PAGEFMT,
3633 + (snf->nfi_soc->fdm_ecc_size << NFI_FDM_ECC_NUM_S) |
3634 + (snf->nfi_soc->fdm_size << NFI_FDM_NUM_S) |
3635 + (spare_size_idx << spare_size_shift) |
3636 + (pagesize_idx << NFI_PAGE_SIZE_S) |
3642 +static enum snand_flash_io mtk_snand_select_opcode(struct mtk_snand *snf,
3643 + uint32_t snfi_caps, uint8_t *opcode,
3645 + const struct snand_io_cap *op_cap)
3649 + caps = snfi_caps & op_cap->caps;
3653 + *opcode = op_cap->opcodes[i - 1].opcode;
3655 + *dummy = op_cap->opcodes[i - 1].dummy;
3659 + return __SNAND_IO_MAX;
3662 +static int mtk_snand_select_opcode_rfc(struct mtk_snand *snf,
3663 + uint32_t snfi_caps,
3664 + const struct snand_io_cap *op_cap)
3666 + enum snand_flash_io idx;
3668 + static const uint8_t rfc_modes[__SNAND_IO_MAX] = {
3669 + [SNAND_IO_1_1_1] = DATA_READ_MODE_X1,
3670 + [SNAND_IO_1_1_2] = DATA_READ_MODE_X2,
3671 + [SNAND_IO_1_2_2] = DATA_READ_MODE_DUAL,
3672 + [SNAND_IO_1_1_4] = DATA_READ_MODE_X4,
3673 + [SNAND_IO_1_4_4] = DATA_READ_MODE_QUAD,
3676 + idx = mtk_snand_select_opcode(snf, snfi_caps, &snf->opcode_rfc,
3677 + &snf->dummy_rfc, op_cap);
3678 + if (idx >= __SNAND_IO_MAX) {
3679 + snand_log_snfi(snf->pdev,
3680 + "No capable opcode for read from cache\n");
3684 + snf->mode_rfc = rfc_modes[idx];
3686 + if (idx == SNAND_IO_1_1_4 || idx == SNAND_IO_1_4_4)
3687 + snf->quad_spi_op = true;
3692 +static int mtk_snand_select_opcode_pl(struct mtk_snand *snf, uint32_t snfi_caps,
3693 + const struct snand_io_cap *op_cap)
3695 + enum snand_flash_io idx;
3697 + static const uint8_t pl_modes[__SNAND_IO_MAX] = {
3698 + [SNAND_IO_1_1_1] = 0,
3699 + [SNAND_IO_1_1_4] = 1,
3702 + idx = mtk_snand_select_opcode(snf, snfi_caps, &snf->opcode_pl,
3704 + if (idx >= __SNAND_IO_MAX) {
3705 + snand_log_snfi(snf->pdev,
3706 + "No capable opcode for program load\n");
3710 + snf->mode_pl = pl_modes[idx];
3712 + if (idx == SNAND_IO_1_1_4)
3713 + snf->quad_spi_op = true;
3718 +static int mtk_snand_setup(struct mtk_snand *snf,
3719 + const struct snand_flash_info *snand_info)
3721 + const struct snand_mem_org *memorg = &snand_info->memorg;
3722 + uint32_t i, msg_size, snfi_caps;
3725 + /* Calculate flash memory organization */
3726 + snf->model = snand_info->model;
3727 + snf->writesize = memorg->pagesize;
3728 + snf->oobsize = memorg->sparesize;
3729 + snf->erasesize = snf->writesize * memorg->pages_per_block;
3730 + snf->die_size = (uint64_t)snf->erasesize * memorg->blocks_per_die;
3731 + snf->size = snf->die_size * memorg->ndies;
3732 + snf->num_dies = memorg->ndies;
3734 + snf->writesize_mask = snf->writesize - 1;
3735 + snf->erasesize_mask = snf->erasesize - 1;
3736 + snf->die_mask = snf->die_size - 1;
3738 + snf->writesize_shift = ffs(snf->writesize) - 1;
3739 + snf->erasesize_shift = ffs(snf->erasesize) - 1;
3740 + snf->die_shift = mtk_snand_ffs64(snf->die_size) - 1;
3742 + snf->select_die = snand_info->select_die;
3744 + /* Determine opcodes for read from cache/program load */
3745 + snfi_caps = SPI_IO_1_1_1 | SPI_IO_1_1_2 | SPI_IO_1_2_2;
3746 + if (snf->snfi_quad_spi)
3747 + snfi_caps |= SPI_IO_1_1_4 | SPI_IO_1_4_4;
3749 + ret = mtk_snand_select_opcode_rfc(snf, snfi_caps, snand_info->cap_rd);
3753 + ret = mtk_snand_select_opcode_pl(snf, snfi_caps, snand_info->cap_pl);
3757 + /* ECC and page format */
3758 + snf->ecc_steps = snf->writesize / snf->nfi_soc->sector_size;
3759 + if (snf->ecc_steps > snf->nfi_soc->max_sectors) {
3760 + snand_log_nfi(snf->pdev, "Page size %u is not supported\n",
3765 + ret = mtk_snand_pagefmt_setup(snf);
3769 + msg_size = snf->nfi_soc->sector_size + snf->nfi_soc->fdm_ecc_size;
3770 + ret = mtk_ecc_setup(snf, snf->nfi_base + NFI_FDM0L,
3771 + snf->spare_per_sector - snf->nfi_soc->fdm_size,
3776 + nfi_write16(snf, NFI_CNFG, 0);
3778 + /* Tuning options */
3779 + nfi_write16(snf, NFI_DEBUG_CON1, WBUF_EN);
3780 + nfi_write32(snf, SNF_DLY_CTL3, (snf->nfi_soc->sample_delay << SFCK_SAM_DLY_S));
3783 + nfi_read32(snf, NFI_INTR_STA);
3784 + nfi_write32(snf, NFI_INTR_EN, 0);
3786 + /* Clear SNF done flag */
3787 + nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_READ_DONE | CUS_PG_DONE);
3788 + nfi_write32(snf, SNF_STA_CTL1, 0);
3790 + /* Initialization on all dies */
3791 + for (i = 0; i < snf->num_dies; i++) {
3792 + mtk_snand_select_die(snf, i);
3794 + /* Disable On-Die ECC engine */
3795 + ret = mtk_snand_ondie_ecc_control(snf, false);
3799 + /* Disable block protection */
3800 + mtk_snand_unlock(snf);
3802 + /* Enable/disable quad-spi */
3803 + mtk_snand_qspi_control(snf, snf->quad_spi_op);
3806 + mtk_snand_select_die(snf, 0);
3811 +static int mtk_snand_id_probe(struct mtk_snand *snf,
3812 + const struct snand_flash_info **snand_info)
3814 + uint8_t id[4], op[2];
3817 + /* Read SPI-NAND JEDEC ID, OP + dummy/addr + ID */
3818 + op[0] = SNAND_CMD_READID;
3820 + ret = mtk_snand_mac_io(snf, op, 2, id, sizeof(id));
3824 + *snand_info = snand_flash_id_lookup(SNAND_ID_DYMMY, id);
3828 + /* Read SPI-NAND JEDEC ID, OP + ID */
3829 + op[0] = SNAND_CMD_READID;
3830 + ret = mtk_snand_mac_io(snf, op, 1, id, sizeof(id));
3834 + *snand_info = snand_flash_id_lookup(SNAND_ID_DYMMY, id);
3838 + snand_log_chip(snf->pdev,
3839 + "Unrecognized SPI-NAND ID: %02x %02x %02x %02x\n",
3840 + id[0], id[1], id[2], id[3]);
3845 +int mtk_snand_init(void *dev, const struct mtk_snand_platdata *pdata,
3846 + struct mtk_snand **psnf)
3848 + const struct snand_flash_info *snand_info;
3849 + uint32_t rawpage_size, sect_bf_size;
3850 + struct mtk_snand tmpsnf, *snf;
3853 + if (!pdata || !psnf)
3856 + if (pdata->soc >= __SNAND_SOC_MAX) {
3857 + snand_log_chip(dev, "Invalid SOC %u for MTK-SNAND\n",
3862 + /* Dummy instance only for initial reset and id probe */
3863 + tmpsnf.nfi_base = pdata->nfi_base;
3864 + tmpsnf.ecc_base = pdata->ecc_base;
3865 + tmpsnf.soc = pdata->soc;
3866 + tmpsnf.nfi_soc = &mtk_snand_socs[pdata->soc];
3867 + tmpsnf.pdev = dev;
3869 + /* Switch to SNFI mode */
3870 + writel(SPI_MODE, tmpsnf.nfi_base + SNF_CFG);
3872 + /* Reset SNFI & NFI */
3873 + mtk_snand_mac_reset(&tmpsnf);
3874 + mtk_nfi_reset(&tmpsnf);
3876 + /* Reset SPI-NAND chip */
3877 + ret = mtk_snand_chip_reset(&tmpsnf);
3879 + snand_log_chip(dev, "Failed to reset SPI-NAND chip\n");
3883 + /* Probe SPI-NAND flash by JEDEC ID */
3884 + ret = mtk_snand_id_probe(&tmpsnf, &snand_info);
3888 + rawpage_size = snand_info->memorg.pagesize +
3889 + snand_info->memorg.sparesize;
3891 + sect_bf_size = mtk_snand_socs[pdata->soc].max_sectors *
3892 + sizeof(*snf->sect_bf);
3894 + /* Allocate memory for instance and cache */
3895 + snf = generic_mem_alloc(dev,
3896 + sizeof(*snf) + rawpage_size + sect_bf_size);
3898 + snand_log_chip(dev, "Failed to allocate memory for instance\n");
3902 + snf->sect_bf = (int *)((uintptr_t)snf + sizeof(*snf));
3903 + snf->buf_cache = (uint8_t *)((uintptr_t)snf->sect_bf + sect_bf_size);
3905 + /* Allocate memory for DMA buffer */
3906 + snf->page_cache = dma_mem_alloc(dev, rawpage_size);
3907 + if (!snf->page_cache) {
3908 + generic_mem_free(dev, snf);
3909 + snand_log_chip(dev,
3910 + "Failed to allocate memory for DMA buffer\n");
3914 + /* Fill up instance */
3916 + snf->nfi_base = pdata->nfi_base;
3917 + snf->ecc_base = pdata->ecc_base;
3918 + snf->soc = pdata->soc;
3919 + snf->nfi_soc = &mtk_snand_socs[pdata->soc];
3920 + snf->snfi_quad_spi = pdata->quad_spi;
3922 + /* Initialize SNFI & ECC engine */
3923 + ret = mtk_snand_setup(snf, snand_info);
3925 + dma_mem_free(dev, snf->page_cache);
3926 + generic_mem_free(dev, snf);
3935 +int mtk_snand_cleanup(struct mtk_snand *snf)
3940 + dma_mem_free(snf->pdev, snf->page_cache);
3941 + generic_mem_free(snf->pdev, snf);
3946 +++ b/drivers/mtd/mtk-snand/mtk-snand.h
3948 +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
3950 + * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
3952 + * Author: Weijie Gao <weijie.gao@mediatek.com>
3955 +#ifndef _MTK_SNAND_H_
3956 +#define _MTK_SNAND_H_
3958 +#ifndef PRIVATE_MTK_SNAND_HEADER
3959 +#include <stddef.h>
3960 +#include <stdint.h>
3961 +#include <stdbool.h>
3964 +enum mtk_snand_soc {
3972 +struct mtk_snand_platdata {
3975 + enum mtk_snand_soc soc;
3979 +struct mtk_snand_chip_info {
3980 + const char *model;
3981 + uint64_t chipsize;
3982 + uint32_t blocksize;
3983 + uint32_t pagesize;
3984 + uint32_t sparesize;
3985 + uint32_t spare_per_sector;
3986 + uint32_t fdm_size;
3987 + uint32_t fdm_ecc_size;
3988 + uint32_t num_sectors;
3989 + uint32_t sector_size;
3990 + uint32_t ecc_strength;
3991 + uint32_t ecc_bytes;
3995 +struct snand_flash_info;
3997 +int mtk_snand_init(void *dev, const struct mtk_snand_platdata *pdata,
3998 + struct mtk_snand **psnf);
3999 +int mtk_snand_cleanup(struct mtk_snand *snf);
4001 +int mtk_snand_chip_reset(struct mtk_snand *snf);
4002 +int mtk_snand_read_page(struct mtk_snand *snf, uint64_t addr, void *buf,
4003 + void *oob, bool raw);
4004 +int mtk_snand_write_page(struct mtk_snand *snf, uint64_t addr, const void *buf,
4005 + const void *oob, bool raw);
4006 +int mtk_snand_erase_block(struct mtk_snand *snf, uint64_t addr);
4007 +int mtk_snand_block_isbad(struct mtk_snand *snf, uint64_t addr);
4008 +int mtk_snand_block_markbad(struct mtk_snand *snf, uint64_t addr);
4009 +int mtk_snand_fill_oob(struct mtk_snand *snf, uint8_t *oobraw,
4010 + const uint8_t *oobbuf, size_t ooblen);
4011 +int mtk_snand_transfer_oob(struct mtk_snand *snf, uint8_t *oobbuf,
4012 + size_t ooblen, const uint8_t *oobraw);
4013 +int mtk_snand_read_page_auto_oob(struct mtk_snand *snf, uint64_t addr,
4014 + void *buf, void *oob, size_t ooblen,
4015 + size_t *actualooblen, bool raw);
4016 +int mtk_snand_write_page_auto_oob(struct mtk_snand *snf, uint64_t addr,
4017 + const void *buf, const void *oob,
4018 + size_t ooblen, size_t *actualooblen,
4020 +int mtk_snand_get_chip_info(struct mtk_snand *snf,
4021 + struct mtk_snand_chip_info *info);
4022 +int mtk_snand_irq_process(struct mtk_snand *snf);
4024 +#endif /* _MTK_SNAND_H_ */