diff options
Diffstat (limited to 'drivers/mtd')
-rw-r--r-- | drivers/mtd/Kconfig | 1 | ||||
-rw-r--r-- | drivers/mtd/mtdcore.c | 4 | ||||
-rw-r--r-- | drivers/mtd/nand/Kconfig | 42 | ||||
-rw-r--r-- | drivers/mtd/nand/Makefile | 2 | ||||
-rw-r--r-- | drivers/mtd/nand/am335x_spl_bch.c | 12 | ||||
-rw-r--r-- | drivers/mtd/nand/atmel_nand.c | 22 | ||||
-rw-r--r-- | drivers/mtd/nand/atmel_nand_ecc.h | 3 | ||||
-rw-r--r-- | drivers/mtd/nand/davinci_nand.c | 1 | ||||
-rw-r--r-- | drivers/mtd/nand/denali.c | 1205 | ||||
-rw-r--r-- | drivers/mtd/nand/denali.h | 467 | ||||
-rw-r--r-- | drivers/mtd/nand/denali_spl.c | 231 | ||||
-rw-r--r-- | drivers/mtd/nand/fsl_elbc_nand.c | 8 | ||||
-rw-r--r-- | drivers/mtd/nand/fsl_ifc_nand.c | 21 | ||||
-rw-r--r-- | drivers/mtd/nand/nand_base.c | 4 | ||||
-rw-r--r-- | drivers/mtd/spi/sandbox.c | 114 | ||||
-rw-r--r-- | drivers/mtd/spi/sf_params.c | 4 | ||||
-rw-r--r-- | drivers/mtd/spi/spi_spl_load.c | 6 |
17 files changed, 2035 insertions, 112 deletions
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig new file mode 100644 index 0000000..415ab4e --- /dev/null +++ b/drivers/mtd/Kconfig @@ -0,0 +1 @@ +source "drivers/mtd/nand/Kconfig" diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index e0b7e3a..cb27ff2 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -803,7 +803,7 @@ void mtd_get_len_incl_bad(struct mtd_info *mtd, uint64_t offset, *truncated = 0; *len_incl_bad = 0; - if (!mtd->block_isbad) { + if (!mtd->_block_isbad) { *len_incl_bad = length; return; } @@ -819,7 +819,7 @@ void mtd_get_len_incl_bad(struct mtd_info *mtd, uint64_t offset, block_len = mtd->erasesize - (offset & (mtd->erasesize - 1)); - if (!mtd->block_isbad(mtd, offset & ~(mtd->erasesize - 1))) + if (!mtd->_block_isbad(mtd, offset & ~(mtd->erasesize - 1))) len_excl_bad += block_len; *len_incl_bad += block_len; diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig new file mode 100644 index 0000000..75c2c06 --- /dev/null +++ b/drivers/mtd/nand/Kconfig @@ -0,0 +1,42 @@ +menu "NAND Device Support" + +if !SPL_BUILD + +config NAND_DENALI + bool "Support Denali NAND controller" + help + Enable support for the Denali NAND controller. + +config SYS_NAND_DENALI_64BIT + bool "Use 64-bit variant of Denali NAND controller" + depends on NAND_DENALI + help + The Denali NAND controller IP has some variations in terms of + the bus interface. The DMA setup sequence is completely differenct + between 32bit / 64bit AXI bus variants. + + If your Denali NAND controller is the 64-bit variant, say Y. + Otherwise (32 bit), say N. + +config NAND_DENALI_SPARE_AREA_SKIP_BYTES + int "Number of bytes skipped in OOB area" + depends on NAND_DENALI + range 0 63 + help + This option specifies the number of bytes to skip from the beginning + of OOB area before last ECC sector data starts. This is potentially + used to preserve the bad block marker in the OOB area. + +endif + +if SPL_BUILD + +config SPL_NAND_DENALI + bool "Support Denali NAND controller for SPL" + help + This is a small implementation of the Denali NAND controller + for use on SPL. + +endif + +endmenu diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile index bf1312a..47eb34f 100644 --- a/drivers/mtd/nand/Makefile +++ b/drivers/mtd/nand/Makefile @@ -12,6 +12,7 @@ NORMAL_DRIVERS=y endif obj-$(CONFIG_SPL_NAND_AM33XX_BCH) += am335x_spl_bch.o +obj-$(CONFIG_SPL_NAND_DENALI) += denali_spl.o obj-$(CONFIG_SPL_NAND_DOCG4) += docg4_spl.o obj-$(CONFIG_SPL_NAND_SIMPLE) += nand_spl_simple.o obj-$(CONFIG_SPL_NAND_LOAD) += nand_spl_load.o @@ -42,6 +43,7 @@ obj-$(CONFIG_NAND_ECC_BCH) += nand_bch.o obj-$(CONFIG_NAND_ATMEL) += atmel_nand.o obj-$(CONFIG_DRIVER_NAND_BFIN) += bfin_nand.o obj-$(CONFIG_NAND_DAVINCI) += davinci_nand.o +obj-$(CONFIG_NAND_DENALI) += denali.o obj-$(CONFIG_NAND_FSL_ELBC) += fsl_elbc_nand.o obj-$(CONFIG_NAND_FSL_IFC) += fsl_ifc_nand.o obj-$(CONFIG_NAND_FSL_UPM) += fsl_upm.o diff --git a/drivers/mtd/nand/am335x_spl_bch.c b/drivers/mtd/nand/am335x_spl_bch.c index ce65d8e..bf8b2ee 100644 --- a/drivers/mtd/nand/am335x_spl_bch.c +++ b/drivers/mtd/nand/am335x_spl_bch.c @@ -64,14 +64,18 @@ static int nand_command(int block, int page, uint32_t offs, NAND_CTRL_ALE | NAND_CTRL_CHANGE); /* A[7:0] */ hwctrl(&nand_info[0], (offs >> 8) & 0xff, NAND_CTRL_ALE); /* A[11:9] */ /* Row address */ - hwctrl(&nand_info[0], (page_addr & 0xff), NAND_CTRL_ALE); /* A[19:12] */ - hwctrl(&nand_info[0], ((page_addr >> 8) & 0xff), + if (cmd != NAND_CMD_RNDOUT) { + hwctrl(&nand_info[0], (page_addr & 0xff), + NAND_CTRL_ALE); /* A[19:12] */ + hwctrl(&nand_info[0], ((page_addr >> 8) & 0xff), NAND_CTRL_ALE); /* A[27:20] */ #ifdef CONFIG_SYS_NAND_5_ADDR_CYCLE - /* One more address cycle for devices > 128MiB */ - hwctrl(&nand_info[0], (page_addr >> 16) & 0x0f, + /* One more address cycle for devices > 128MiB */ + hwctrl(&nand_info[0], (page_addr >> 16) & 0x0f, NAND_CTRL_ALE); /* A[31:28] */ #endif + } + hwctrl(&nand_info[0], NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); if (cmd == NAND_CMD_READ0) { diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c index e73834d..9114a86 100644 --- a/drivers/mtd/nand/atmel_nand.c +++ b/drivers/mtd/nand/atmel_nand.c @@ -164,7 +164,7 @@ static void pmecc_gen_syndrome(struct mtd_info *mtd, int sector) /* Fill odd syndromes */ for (i = 0; i < host->pmecc_corr_cap; i++) { - value = readl(&host->pmecc->rem_port[sector].rem[i / 2]); + value = pmecc_readl(host->pmecc, rem_port[sector].rem[i / 2]); if (i & 1) value >>= 16; value &= 0xffff; @@ -392,10 +392,11 @@ static int pmecc_err_location(struct mtd_info *mtd) int16_t *smu = host->pmecc_smu; int timeout = PMECC_MAX_TIMEOUT_US; - writel(PMERRLOC_DISABLE, &host->pmerrloc->eldis); + pmecc_writel(host->pmerrloc, eldis, PMERRLOC_DISABLE); for (i = 0; i <= host->pmecc_lmu[cap + 1] >> 1; i++) { - writel(smu[(cap + 1) * num + i], &host->pmerrloc->sigma[i]); + pmecc_writel(host->pmerrloc, sigma[i], + smu[(cap + 1) * num + i]); err_nbr++; } @@ -403,12 +404,12 @@ static int pmecc_err_location(struct mtd_info *mtd) if (sector_size == 1024) val |= PMERRLOC_ELCFG_SECTOR_1024; - writel(val, &host->pmerrloc->elcfg); - writel(sector_size * 8 + host->pmecc_degree * cap, - &host->pmerrloc->elen); + pmecc_writel(host->pmerrloc, elcfg, val); + pmecc_writel(host->pmerrloc, elen, + sector_size * 8 + host->pmecc_degree * cap); while (--timeout) { - if (readl(&host->pmerrloc->elisr) & PMERRLOC_CALC_DONE) + if (pmecc_readl(host->pmerrloc, elisr) & PMERRLOC_CALC_DONE) break; WATCHDOG_RESET(); udelay(1); @@ -419,7 +420,7 @@ static int pmecc_err_location(struct mtd_info *mtd) return -1; } - roots_nbr = (readl(&host->pmerrloc->elisr) & PMERRLOC_ERR_NUM_MASK) + roots_nbr = (pmecc_readl(host->pmerrloc, elisr) & PMERRLOC_ERR_NUM_MASK) >> 8; /* Number of roots == degree of smu hence <= cap */ if (roots_nbr == host->pmecc_lmu[cap + 1] >> 1) @@ -443,7 +444,7 @@ static void pmecc_correct_data(struct mtd_info *mtd, uint8_t *buf, uint8_t *ecc, sector_size = host->pmecc_sector_size; while (err_nbr) { - tmp = readl(&host->pmerrloc->el[i]) - 1; + tmp = pmecc_readl(host->pmerrloc, el[i]) - 1; byte_pos = tmp / 8; bit_pos = tmp % 8; @@ -597,7 +598,7 @@ static int atmel_nand_pmecc_write_page(struct mtd_info *mtd, pos = i * host->pmecc_bytes_per_sector + j; chip->oob_poi[eccpos[pos]] = - readb(&host->pmecc->ecc_port[i].ecc[j]); + pmecc_readb(host->pmecc, ecc_port[i].ecc[j]); } } chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); @@ -881,6 +882,7 @@ static int atmel_pmecc_nand_init_params(struct nand_chip *nand, return -ENOMEM; } + nand->options |= NAND_NO_SUBPAGE_WRITE; nand->ecc.read_page = atmel_nand_pmecc_read_page; nand->ecc.write_page = atmel_nand_pmecc_write_page; nand->ecc.strength = cap; diff --git a/drivers/mtd/nand/atmel_nand_ecc.h b/drivers/mtd/nand/atmel_nand_ecc.h index 55d7711..92d4ec5 100644 --- a/drivers/mtd/nand/atmel_nand_ecc.h +++ b/drivers/mtd/nand/atmel_nand_ecc.h @@ -34,6 +34,9 @@ #define pmecc_readl(addr, reg) \ readl(&addr->reg) +#define pmecc_readb(addr, reg) \ + readb(&addr->reg) + #define pmecc_writel(addr, reg, value) \ writel((value), &addr->reg) diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c index 02a1130..41689b5 100644 --- a/drivers/mtd/nand/davinci_nand.c +++ b/drivers/mtd/nand/davinci_nand.c @@ -363,6 +363,7 @@ static struct nand_ecclayout nand_keystone_rbl_4bit_layout_oobfirst = { * @raw: use _raw version of write_page */ static int nand_davinci_write_page(struct mtd_info *mtd, struct nand_chip *chip, + uint32_t offset, int data_len, const uint8_t *buf, int oob_required, int page, int cached, int raw) { diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c new file mode 100644 index 0000000..ba3de1a --- /dev/null +++ b/drivers/mtd/nand/denali.c @@ -0,0 +1,1205 @@ +/* + * Copyright (C) 2014 Panasonic Corporation + * Copyright (C) 2013-2014, Altera Corporation <www.altera.com> + * Copyright (C) 2009-2010, Intel Corporation and its suppliers. + * + * SPDX-License-Identifier: GPL-2.0+ + */ + +#include <common.h> +#include <malloc.h> +#include <nand.h> +#include <asm/errno.h> +#include <asm/io.h> + +#include "denali.h" + +#define NAND_DEFAULT_TIMINGS -1 + +static int onfi_timing_mode = NAND_DEFAULT_TIMINGS; + +/* We define a macro here that combines all interrupts this driver uses into + * a single constant value, for convenience. */ +#define DENALI_IRQ_ALL (INTR_STATUS__DMA_CMD_COMP | \ + INTR_STATUS__ECC_TRANSACTION_DONE | \ + INTR_STATUS__ECC_ERR | \ + INTR_STATUS__PROGRAM_FAIL | \ + INTR_STATUS__LOAD_COMP | \ + INTR_STATUS__PROGRAM_COMP | \ + INTR_STATUS__TIME_OUT | \ + INTR_STATUS__ERASE_FAIL | \ + INTR_STATUS__RST_COMP | \ + INTR_STATUS__ERASE_COMP | \ + INTR_STATUS__ECC_UNCOR_ERR | \ + INTR_STATUS__INT_ACT | \ + INTR_STATUS__LOCKED_BLK) + +/* indicates whether or not the internal value for the flash bank is + * valid or not */ +#define CHIP_SELECT_INVALID -1 + +#define SUPPORT_8BITECC 1 + +/* + * this macro allows us to convert from an MTD structure to our own + * device context (denali) structure. + */ +#define mtd_to_denali(m) (((struct nand_chip *)mtd->priv)->priv) + +/* These constants are defined by the driver to enable common driver + * configuration options. */ +#define SPARE_ACCESS 0x41 +#define MAIN_ACCESS 0x42 +#define MAIN_SPARE_ACCESS 0x43 + +#define DENALI_UNLOCK_START 0x10 +#define DENALI_UNLOCK_END 0x11 +#define DENALI_LOCK 0x21 +#define DENALI_LOCK_TIGHT 0x31 +#define DENALI_BUFFER_LOAD 0x60 +#define DENALI_BUFFER_WRITE 0x62 + +#define DENALI_READ 0 +#define DENALI_WRITE 0x100 + +/* types of device accesses. We can issue commands and get status */ +#define COMMAND_CYCLE 0 +#define ADDR_CYCLE 1 +#define STATUS_CYCLE 2 + +/* this is a helper macro that allows us to + * format the bank into the proper bits for the controller */ +#define BANK(x) ((x) << 24) + +/* Interrupts are cleared by writing a 1 to the appropriate status bit */ +static inline void clear_interrupt(struct denali_nand_info *denali, + uint32_t irq_mask) +{ + uint32_t intr_status_reg; + + intr_status_reg = INTR_STATUS(denali->flash_bank); + + writel(irq_mask, denali->flash_reg + intr_status_reg); +} + +static uint32_t read_interrupt_status(struct denali_nand_info *denali) +{ + uint32_t intr_status_reg; + + intr_status_reg = INTR_STATUS(denali->flash_bank); + + return readl(denali->flash_reg + intr_status_reg); +} + +static void clear_interrupts(struct denali_nand_info *denali) +{ + uint32_t status; + + status = read_interrupt_status(denali); + clear_interrupt(denali, status); + + denali->irq_status = 0; +} + +static void denali_irq_enable(struct denali_nand_info *denali, + uint32_t int_mask) +{ + int i; + + for (i = 0; i < denali->max_banks; ++i) + writel(int_mask, denali->flash_reg + INTR_EN(i)); +} + +static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask) +{ + unsigned long timeout = 1000000; + uint32_t intr_status; + + do { + intr_status = read_interrupt_status(denali) & DENALI_IRQ_ALL; + if (intr_status & irq_mask) { + denali->irq_status &= ~irq_mask; + /* our interrupt was detected */ + break; + } + udelay(1); + timeout--; + } while (timeout != 0); + + if (timeout == 0) { + /* timeout */ + printf("Denali timeout with interrupt status %08x\n", + read_interrupt_status(denali)); + intr_status = 0; + } + return intr_status; +} + +/* + * Certain operations for the denali NAND controller use an indexed mode to + * read/write data. The operation is performed by writing the address value + * of the command to the device memory followed by the data. This function + * abstracts this common operation. +*/ +static void index_addr(struct denali_nand_info *denali, + uint32_t address, uint32_t data) +{ + writel(address, denali->flash_mem + INDEX_CTRL_REG); + writel(data, denali->flash_mem + INDEX_DATA_REG); +} + +/* Perform an indexed read of the device */ +static void index_addr_read_data(struct denali_nand_info *denali, + uint32_t address, uint32_t *pdata) +{ + writel(address, denali->flash_mem + INDEX_CTRL_REG); + *pdata = readl(denali->flash_mem + INDEX_DATA_REG); +} + +/* We need to buffer some data for some of the NAND core routines. + * The operations manage buffering that data. */ +static void reset_buf(struct denali_nand_info *denali) +{ + denali->buf.head = 0; + denali->buf.tail = 0; +} + +static void write_byte_to_buf(struct denali_nand_info *denali, uint8_t byte) +{ + denali->buf.buf[denali->buf.tail++] = byte; +} + +/* resets a specific device connected to the core */ +static void reset_bank(struct denali_nand_info *denali) +{ + uint32_t irq_status; + uint32_t irq_mask = INTR_STATUS__RST_COMP | + INTR_STATUS__TIME_OUT; + + clear_interrupts(denali); + + writel(1 << denali->flash_bank, denali->flash_reg + DEVICE_RESET); + + irq_status = wait_for_irq(denali, irq_mask); + if (irq_status & INTR_STATUS__TIME_OUT) + debug("reset bank failed.\n"); +} + +/* Reset the flash controller */ +static uint32_t denali_nand_reset(struct denali_nand_info *denali) +{ + uint32_t i; + + for (i = 0; i < denali->max_banks; i++) + writel(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT, + denali->flash_reg + INTR_STATUS(i)); + + for (i = 0; i < denali->max_banks; i++) { + writel(1 << i, denali->flash_reg + DEVICE_RESET); + while (!(readl(denali->flash_reg + INTR_STATUS(i)) & + (INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT))) + if (readl(denali->flash_reg + INTR_STATUS(i)) & + INTR_STATUS__TIME_OUT) + debug("NAND Reset operation timed out on bank" + " %d\n", i); + } + + for (i = 0; i < denali->max_banks; i++) + writel(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT, + denali->flash_reg + INTR_STATUS(i)); + + return 0; +} + +/* + * this routine calculates the ONFI timing values for a given mode and + * programs the clocking register accordingly. The mode is determined by + * the get_onfi_nand_para routine. + */ +static void nand_onfi_timing_set(struct denali_nand_info *denali, + uint16_t mode) +{ + uint32_t trea[6] = {40, 30, 25, 20, 20, 16}; + uint32_t trp[6] = {50, 25, 17, 15, 12, 10}; + uint32_t treh[6] = {30, 15, 15, 10, 10, 7}; + uint32_t trc[6] = {100, 50, 35, 30, 25, 20}; + uint32_t trhoh[6] = {0, 15, 15, 15, 15, 15}; + uint32_t trloh[6] = {0, 0, 0, 0, 5, 5}; + uint32_t tcea[6] = {100, 45, 30, 25, 25, 25}; + uint32_t tadl[6] = {200, 100, 100, 100, 70, 70}; + uint32_t trhw[6] = {200, 100, 100, 100, 100, 100}; + uint32_t trhz[6] = {200, 100, 100, 100, 100, 100}; + uint32_t twhr[6] = {120, 80, 80, 60, 60, 60}; + uint32_t tcs[6] = {70, 35, 25, 25, 20, 15}; + + uint32_t tclsrising = 1; + uint32_t data_invalid_rhoh, data_invalid_rloh, data_invalid; + uint32_t dv_window = 0; + uint32_t en_lo, en_hi; + uint32_t acc_clks; + uint32_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt; + + en_lo = DIV_ROUND_UP(trp[mode], CLK_X); + en_hi = DIV_ROUND_UP(treh[mode], CLK_X); + if ((en_hi * CLK_X) < (treh[mode] + 2)) + en_hi++; + + if ((en_lo + en_hi) * CLK_X < trc[mode]) + en_lo += DIV_ROUND_UP((trc[mode] - (en_lo + en_hi) * CLK_X), + CLK_X); + + if ((en_lo + en_hi) < CLK_MULTI) + en_lo += CLK_MULTI - en_lo - en_hi; + + while (dv_window < 8) { + data_invalid_rhoh = en_lo * CLK_X + trhoh[mode]; + + data_invalid_rloh = (en_lo + en_hi) * CLK_X + trloh[mode]; + + data_invalid = + data_invalid_rhoh < + data_invalid_rloh ? data_invalid_rhoh : data_invalid_rloh; + + dv_window = data_invalid - trea[mode]; + + if (dv_window < 8) + en_lo++; + } + + acc_clks = DIV_ROUND_UP(trea[mode], CLK_X); + + while (((acc_clks * CLK_X) - trea[mode]) < 3) + acc_clks++; + + if ((data_invalid - acc_clks * CLK_X) < 2) + debug("%s, Line %d: Warning!\n", __FILE__, __LINE__); + + addr_2_data = DIV_ROUND_UP(tadl[mode], CLK_X); + re_2_we = DIV_ROUND_UP(trhw[mode], CLK_X); + re_2_re = DIV_ROUND_UP(trhz[mode], CLK_X); + we_2_re = DIV_ROUND_UP(twhr[mode], CLK_X); + cs_cnt = DIV_ROUND_UP((tcs[mode] - trp[mode]), CLK_X); + if (!tclsrising) + cs_cnt = DIV_ROUND_UP(tcs[mode], CLK_X); + if (cs_cnt == 0) + cs_cnt = 1; + + if (tcea[mode]) { + while (((cs_cnt * CLK_X) + trea[mode]) < tcea[mode]) + cs_cnt++; + } + + /* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */ + if ((readl(denali->flash_reg + MANUFACTURER_ID) == 0) && + (readl(denali->flash_reg + DEVICE_ID) == 0x88)) + acc_clks = 6; + + writel(acc_clks, denali->flash_reg + ACC_CLKS); + writel(re_2_we, denali->flash_reg + RE_2_WE); + writel(re_2_re, denali->flash_reg + RE_2_RE); + writel(we_2_re, denali->flash_reg + WE_2_RE); + writel(addr_2_data, denali->flash_reg + ADDR_2_DATA); + writel(en_lo, denali->flash_reg + RDWR_EN_LO_CNT); + writel(en_hi, denali->flash_reg + RDWR_EN_HI_CNT); + writel(cs_cnt, denali->flash_reg + CS_SETUP_CNT); +} + +/* queries the NAND device to see what ONFI modes it supports. */ +static uint32_t get_onfi_nand_para(struct denali_nand_info *denali) +{ + int i; + /* + * we needn't to do a reset here because driver has already + * reset all the banks before + */ + if (!(readl(denali->flash_reg + ONFI_TIMING_MODE) & + ONFI_TIMING_MODE__VALUE)) + return -EIO; + + for (i = 5; i > 0; i--) { + if (readl(denali->flash_reg + ONFI_TIMING_MODE) & + (0x01 << i)) + break; + } + + nand_onfi_timing_set(denali, i); + + /* By now, all the ONFI devices we know support the page cache */ + /* rw feature. So here we enable the pipeline_rw_ahead feature */ + return 0; +} + +static void get_samsung_nand_para(struct denali_nand_info *denali, + uint8_t device_id) +{ + if (device_id == 0xd3) { /* Samsung K9WAG08U1A */ + /* Set timing register values according to datasheet */ + writel(5, denali->flash_reg + ACC_CLKS); + writel(20, denali->flash_reg + RE_2_WE); + writel(12, denali->flash_reg + WE_2_RE); + writel(14, denali->flash_reg + ADDR_2_DATA); + writel(3, denali->flash_reg + RDWR_EN_LO_CNT); + writel(2, denali->flash_reg + RDWR_EN_HI_CNT); + writel(2, denali->flash_reg + CS_SETUP_CNT); + } +} + +static void get_toshiba_nand_para(struct denali_nand_info *denali) +{ + uint32_t tmp; + + /* Workaround to fix a controller bug which reports a wrong */ + /* spare area size for some kind of Toshiba NAND device */ + if ((readl(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) == 4096) && + (readl(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) == 64)) { + writel(216, denali->flash_reg + DEVICE_SPARE_AREA_SIZE); + tmp = readl(denali->flash_reg + DEVICES_CONNECTED) * + readl(denali->flash_reg + DEVICE_SPARE_AREA_SIZE); + writel(tmp, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE); + } +} + +static void get_hynix_nand_para(struct denali_nand_info *denali, + uint8_t device_id) +{ + uint32_t main_size, spare_size; + + switch (device_id) { + case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */ + case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */ + writel(128, denali->flash_reg + PAGES_PER_BLOCK); + writel(4096, denali->flash_reg + DEVICE_MAIN_AREA_SIZE); + writel(224, denali->flash_reg + DEVICE_SPARE_AREA_SIZE); + main_size = 4096 * + readl(denali->flash_reg + DEVICES_CONNECTED); + spare_size = 224 * + readl(denali->flash_reg + DEVICES_CONNECTED); + writel(main_size, denali->flash_reg + LOGICAL_PAGE_DATA_SIZE); + writel(spare_size, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE); + writel(0, denali->flash_reg + DEVICE_WIDTH); + break; + default: + debug("Spectra: Unknown Hynix NAND (Device ID: 0x%x)." + "Will use default parameter values instead.\n", + device_id); + } +} + +/* + * determines how many NAND chips are connected to the controller. Note for + * Intel CE4100 devices we don't support more than one device. + */ +static void find_valid_banks(struct denali_nand_info *denali) +{ + uint32_t id[denali->max_banks]; + int i; + + denali->total_used_banks = 1; + for (i = 0; i < denali->max_banks; i++) { + index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 0), 0x90); + index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 1), 0); + index_addr_read_data(denali, + (uint32_t)(MODE_11 | (i << 24) | 2), + &id[i]); + + if (i == 0) { + if (!(id[i] & 0x0ff)) + break; + } else { + if ((id[i] & 0x0ff) == (id[0] & 0x0ff)) + denali->total_used_banks++; + else + break; + } + } +} + +/* + * Use the configuration feature register to determine the maximum number of + * banks that the hardware supports. + */ +static void detect_max_banks(struct denali_nand_info *denali) +{ + uint32_t features = readl(denali->flash_reg + FEATURES); + denali->max_banks = 2 << (features & FEATURES__N_BANKS); +} + +static void detect_partition_feature(struct denali_nand_info *denali) +{ + /* + * For MRST platform, denali->fwblks represent the + * number of blocks firmware is taken, + * FW is in protect partition and MTD driver has no + * permission to access it. So let driver know how many + * blocks it can't touch. + */ + if (readl(denali->flash_reg + FEATURES) & FEATURES__PARTITION) { + if ((readl(denali->flash_reg + PERM_SRC_ID(1)) & + PERM_SRC_ID__SRCID) == SPECTRA_PARTITION_ID) { + denali->fwblks = + ((readl(denali->flash_reg + MIN_MAX_BANK(1)) & + MIN_MAX_BANK__MIN_VALUE) * + denali->blksperchip) + + + (readl(denali->flash_reg + MIN_BLK_ADDR(1)) & + MIN_BLK_ADDR__VALUE); + } else { + denali->fwblks = SPECTRA_START_BLOCK; + } + } else { + denali->fwblks = SPECTRA_START_BLOCK; + } +} + +static uint32_t denali_nand_timing_set(struct denali_nand_info *denali) +{ + uint32_t id_bytes[5], addr; + uint8_t i, maf_id, device_id; + + /* Use read id method to get device ID and other + * params. For some NAND chips, controller can't + * report the correct device ID by reading from + * DEVICE_ID register + * */ + addr = (uint32_t)MODE_11 | BANK(denali->flash_bank); + index_addr(denali, (uint32_t)addr | 0, 0x90); + index_addr(denali, (uint32_t)addr | 1, 0); + for (i = 0; i < 5; i++) + index_addr_read_data(denali, addr | 2, &id_bytes[i]); + maf_id = id_bytes[0]; + device_id = id_bytes[1]; + + if (readl(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) & + ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */ + if (get_onfi_nand_para(denali)) + return -EIO; + } else if (maf_id == 0xEC) { /* Samsung NAND */ + get_samsung_nand_para(denali, device_id); + } else if (maf_id == 0x98) { /* Toshiba NAND */ + get_toshiba_nand_para(denali); + } else if (maf_id == 0xAD) { /* Hynix NAND */ + get_hynix_nand_para(denali, device_id); + } + + find_valid_banks(denali); + + detect_partition_feature(denali); + + /* If the user specified to override the default timings + * with a specific ONFI mode, we apply those changes here. + */ + if (onfi_timing_mode != NAND_DEFAULT_TIMINGS) + nand_onfi_timing_set(denali, onfi_timing_mode); + + return 0; +} + +/* validation function to verify that the controlling software is making + * a valid request + */ +static inline bool is_flash_bank_valid(int flash_bank) +{ + return flash_bank >= 0 && flash_bank < 4; +} + +static void denali_irq_init(struct denali_nand_info *denali) +{ + uint32_t int_mask = 0; + int i; + + /* Disable global interrupts */ + writel(0, denali->flash_reg + GLOBAL_INT_ENABLE); + + int_mask = DENALI_IRQ_ALL; + + /* Clear all status bits */ + for (i = 0; i < denali->max_banks; ++i) + writel(0xFFFF, denali->flash_reg + INTR_STATUS(i)); + + denali_irq_enable(denali, int_mask); +} + +/* This helper function setups the registers for ECC and whether or not + * the spare area will be transferred. */ +static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en, + bool transfer_spare) +{ + int ecc_en_flag = 0, transfer_spare_flag = 0; + + /* set ECC, transfer spare bits if needed */ + ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0; + transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0; + + /* Enable spare area/ECC per user's request. */ + writel(ecc_en_flag, denali->flash_reg + ECC_ENABLE); + /* applicable for MAP01 only */ + writel(transfer_spare_flag, denali->flash_reg + TRANSFER_SPARE_REG); +} + +/* sends a pipeline command operation to the controller. See the Denali NAND + * controller's user guide for more information (section 4.2.3.6). + */ +static int denali_send_pipeline_cmd(struct denali_nand_info *denali, + bool ecc_en, bool transfer_spare, + int access_type, int op) +{ + uint32_t addr, cmd, irq_status; + static uint32_t page_count = 1; + + setup_ecc_for_xfer(denali, ecc_en, transfer_spare); + + /* clear interrupts */ + clear_interrupts(denali); + + addr = BANK(denali->flash_bank) | denali->page; + + /* setup the acccess type */ + cmd = MODE_10 | addr; + index_addr(denali, cmd, access_type); + + /* setup the pipeline command */ + index_addr(denali, cmd, 0x2000 | op | page_count); + + cmd = MODE_01 | addr; + writel(cmd, denali->flash_mem + INDEX_CTRL_REG); + + if (op == DENALI_READ) { + /* wait for command to be accepted */ + irq_status = wait_for_irq(denali, INTR_STATUS__LOAD_COMP); + + if (irq_status == 0) + return -EIO; + } + + return 0; +} + +/* helper function that simply writes a buffer to the flash */ +static int write_data_to_flash_mem(struct denali_nand_info *denali, + const uint8_t *buf, int len) +{ + uint32_t i = 0, *buf32; + + /* verify that the len is a multiple of 4. see comment in + * read_data_from_flash_mem() */ + BUG_ON((len % 4) != 0); + + /* write the data to the flash memory */ + buf32 = (uint32_t *)buf; + for (i = 0; i < len / 4; i++) + writel(*buf32++, denali->flash_mem + INDEX_DATA_REG); + return i * 4; /* intent is to return the number of bytes read */ +} + +/* helper function that simply reads a buffer from the flash */ +static int read_data_from_flash_mem(struct denali_nand_info *denali, + uint8_t *buf, int len) +{ + uint32_t i, *buf32; + + /* + * we assume that len will be a multiple of 4, if not + * it would be nice to know about it ASAP rather than + * have random failures... + * This assumption is based on the fact that this + * function is designed to be used to read flash pages, + * which are typically multiples of 4... + */ + + BUG_ON((len % 4) != 0); + + /* transfer the data from the flash */ + buf32 = (uint32_t *)buf; + for (i = 0; i < len / 4; i++) + *buf32++ = readl(denali->flash_mem + INDEX_DATA_REG); + + return i * 4; /* intent is to return the number of bytes read */ +} + +static void denali_mode_main_access(struct denali_nand_info *denali) +{ + uint32_t addr, cmd; + + addr = BANK(denali->flash_bank) | denali->page; + cmd = MODE_10 | addr; + index_addr(denali, cmd, MAIN_ACCESS); +} + +static void denali_mode_main_spare_access(struct denali_nand_info *denali) +{ + uint32_t addr, cmd; + + addr = BANK(denali->flash_bank) | denali->page; + cmd = MODE_10 | addr; + index_addr(denali, cmd, MAIN_SPARE_ACCESS); +} + +/* writes OOB data to the device */ +static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page) +{ + struct denali_nand_info *denali = mtd_to_denali(mtd); + uint32_t irq_status; + uint32_t irq_mask = INTR_STATUS__PROGRAM_COMP | + INTR_STATUS__PROGRAM_FAIL; + int status = 0; + + denali->page = page; + + if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS, + DENALI_WRITE) == 0) { + write_data_to_flash_mem(denali, buf, mtd->oobsize); + + /* wait for operation to complete */ + irq_status = wait_for_irq(denali, irq_mask); + + if (irq_status == 0) { + dev_err(denali->dev, "OOB write failed\n"); + status = -EIO; + } + } else { + printf("unable to send pipeline command\n"); + status = -EIO; + } + return status; +} + +/* reads OOB data from the device */ +static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page) +{ + struct denali_nand_info *denali = mtd_to_denali(mtd); + uint32_t irq_mask = INTR_STATUS__LOAD_COMP, + irq_status = 0, addr = 0x0, cmd = 0x0; + + denali->page = page; + + if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS, + DENALI_READ) == 0) { + read_data_from_flash_mem(denali, buf, mtd->oobsize); + + /* wait for command to be accepted + * can always use status0 bit as the mask is identical for each + * bank. */ + irq_status = wait_for_irq(denali, irq_mask); + + if (irq_status == 0) + printf("page on OOB timeout %d\n", denali->page); + + /* We set the device back to MAIN_ACCESS here as I observed + * instability with the controller if you do a block erase + * and the last transaction was a SPARE_ACCESS. Block erase + * is reliable (according to the MTD test infrastructure) + * if you are in MAIN_ACCESS. + */ + addr = BANK(denali->flash_bank) | denali->page; + cmd = MODE_10 | addr; + index_addr(denali, cmd, MAIN_ACCESS); + } +} + +/* this function examines buffers to see if they contain data that + * indicate that the buffer is part of an erased region of flash. + */ +static bool is_erased(uint8_t *buf, int len) +{ + int i = 0; + for (i = 0; i < len; i++) + if (buf[i] != 0xFF) + return false; + return true; +} + +/* programs the controller to either enable/disable DMA transfers */ +static void denali_enable_dma(struct denali_nand_info *denali, bool en) +{ + uint32_t reg_val = 0x0; + + if (en) + reg_val = DMA_ENABLE__FLAG; + + writel(reg_val, denali->flash_reg + DMA_ENABLE); + readl(denali->flash_reg + DMA_ENABLE); +} + +/* setups the HW to perform the data DMA */ +static void denali_setup_dma(struct denali_nand_info *denali, int op) +{ + uint32_t mode; + const int page_count = 1; + uint32_t addr = (uint32_t)denali->buf.dma_buf; + + flush_dcache_range(addr, addr + sizeof(denali->buf.dma_buf)); + +/* For Denali controller that is 64 bit bus IP core */ +#ifdef CONFIG_SYS_NAND_DENALI_64BIT + mode = MODE_10 | BANK(denali->flash_bank) | denali->page; + + /* DMA is a three step process */ + + /* 1. setup transfer type, interrupt when complete, + burst len = 64 bytes, the number of pages */ + index_addr(denali, mode, 0x01002000 | (64 << 16) | op | page_count); + + /* 2. set memory low address bits 31:0 */ + index_addr(denali, mode, addr); + + /* 3. set memory high address bits 64:32 */ + index_addr(denali, mode, 0); +#else + mode = MODE_10 | BANK(denali->flash_bank); + + /* DMA is a four step process */ + + /* 1. setup transfer type and # of pages */ + index_addr(denali, mode | denali->page, 0x2000 | op | page_count); + + /* 2. set memory high address bits 23:8 */ + index_addr(denali, mode | ((uint32_t)(addr >> 16) << 8), 0x2200); + + /* 3. set memory low address bits 23:8 */ + index_addr(denali, mode | ((uint32_t)addr << 8), 0x2300); + + /* 4. interrupt when complete, burst len = 64 bytes*/ + index_addr(denali, mode | 0x14000, 0x2400); +#endif +} + +/* Common DMA function */ +static uint32_t denali_dma_configuration(struct denali_nand_info *denali, + uint32_t ops, bool raw_xfer, + uint32_t irq_mask, int oob_required) +{ + uint32_t irq_status = 0; + /* setup_ecc_for_xfer(bool ecc_en, bool transfer_spare) */ + setup_ecc_for_xfer(denali, !raw_xfer, oob_required); + + /* clear any previous interrupt flags */ + clear_interrupts(denali); + + /* enable the DMA */ + denali_enable_dma(denali, true); + + /* setup the DMA */ + denali_setup_dma(denali, ops); + + /* wait for operation to complete */ + irq_status = wait_for_irq(denali, irq_mask); + + /* if ECC fault happen, seems we need delay before turning off DMA. + * If not, the controller will go into non responsive condition */ + if (irq_status & INTR_STATUS__ECC_UNCOR_ERR) + udelay(100); + + /* disable the DMA */ + denali_enable_dma(denali, false); + + return irq_status; +} + +static int write_page(struct mtd_info *mtd, struct nand_chip *chip, + const uint8_t *buf, bool raw_xfer, int oob_required) +{ + struct denali_nand_info *denali = mtd_to_denali(mtd); + + uint32_t irq_status = 0; + uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP; + + denali->status = 0; + + /* copy buffer into DMA buffer */ + memcpy(denali->buf.dma_buf, buf, mtd->writesize); + + /* need extra memcpy for raw transfer */ + if (raw_xfer) + memcpy(denali->buf.dma_buf + mtd->writesize, + chip->oob_poi, mtd->oobsize); + + /* setting up DMA */ + irq_status = denali_dma_configuration(denali, DENALI_WRITE, raw_xfer, + irq_mask, oob_required); + + /* if timeout happen, error out */ + if (!(irq_status & INTR_STATUS__DMA_CMD_COMP)) { + debug("DMA timeout for denali write_page\n"); + denali->status = NAND_STATUS_FAIL; + return -EIO; + } + + if (irq_status & INTR_STATUS__LOCKED_BLK) { + debug("Failed as write to locked block\n"); + denali->status = NAND_STATUS_FAIL; + return -EIO; + } + return 0; +} + +/* NAND core entry points */ + +/* + * this is the callback that the NAND core calls to write a page. Since + * writing a page with ECC or without is similar, all the work is done + * by write_page above. + */ +static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip, + const uint8_t *buf, int oob_required) +{ + struct denali_nand_info *denali = mtd_to_denali(mtd); + + /* + * for regular page writes, we let HW handle all the ECC + * data written to the device. + */ + if (oob_required) + /* switch to main + spare access */ + denali_mode_main_spare_access(denali); + else + /* switch to main access only */ + denali_mode_main_access(denali); + + return write_page(mtd, chip, buf, false, oob_required); +} + +/* + * This is the callback that the NAND core calls to write a page without ECC. + * raw access is similar to ECC page writes, so all the work is done in the + * write_page() function above. + */ +static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, + const uint8_t *buf, int oob_required) +{ + struct denali_nand_info *denali = mtd_to_denali(mtd); + + /* + * for raw page writes, we want to disable ECC and simply write + * whatever data is in the buffer. + */ + + if (oob_required) + /* switch to main + spare access */ + denali_mode_main_spare_access(denali); + else + /* switch to main access only */ + denali_mode_main_access(denali); + + return write_page(mtd, chip, buf, true, oob_required); +} + +static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip, + int page) +{ + return write_oob_data(mtd, chip->oob_poi, page); +} + +/* raw include ECC value and all the spare area */ +static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, + uint8_t *buf, int oob_required, int page) +{ + struct denali_nand_info *denali = mtd_to_denali(mtd); + + uint32_t irq_status, irq_mask = INTR_STATUS__DMA_CMD_COMP; + + if (denali->page != page) { + debug("Missing NAND_CMD_READ0 command\n"); + return -EIO; + } + + if (oob_required) + /* switch to main + spare access */ + denali_mode_main_spare_access(denali); + else + /* switch to main access only */ + denali_mode_main_access(denali); + + /* setting up the DMA where ecc_enable is false */ + irq_status = denali_dma_configuration(denali, DENALI_READ, true, + irq_mask, oob_required); + + /* if timeout happen, error out */ + if (!(irq_status & INTR_STATUS__DMA_CMD_COMP)) { + debug("DMA timeout for denali_read_page_raw\n"); + return -EIO; + } + + /* splitting the content to destination buffer holder */ + memcpy(chip->oob_poi, (denali->buf.dma_buf + mtd->writesize), + mtd->oobsize); + memcpy(buf, denali->buf.dma_buf, mtd->writesize); + + return 0; +} + +static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip, + uint8_t *buf, int oob_required, int page) +{ + struct denali_nand_info *denali = mtd_to_denali(mtd); + uint32_t irq_status, irq_mask = INTR_STATUS__DMA_CMD_COMP; + + if (denali->page != page) { + debug("Missing NAND_CMD_READ0 command\n"); + return -EIO; + } + + if (oob_required) + /* switch to main + spare access */ + denali_mode_main_spare_access(denali); + else + /* switch to main access only */ + denali_mode_main_access(denali); + + /* setting up the DMA where ecc_enable is true */ + irq_status = denali_dma_configuration(denali, DENALI_READ, false, + irq_mask, oob_required); + + memcpy(buf, denali->buf.dma_buf, mtd->writesize); + + /* check whether any ECC error */ + if (irq_status & INTR_STATUS__ECC_UNCOR_ERR) { + /* is the ECC cause by erase page, check using read_page_raw */ + debug(" Uncorrected ECC detected\n"); + denali_read_page_raw(mtd, chip, buf, oob_required, + denali->page); + + if (is_erased(buf, mtd->writesize) == true && + is_erased(chip->oob_poi, mtd->oobsize) == true) { + debug(" ECC error cause by erased block\n"); + /* false alarm, return the 0xFF */ + } else { + return -EIO; + } + } + memcpy(buf, denali->buf.dma_buf, mtd->writesize); + return 0; +} + +static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip, + int page) +{ + read_oob_data(mtd, chip->oob_poi, page); + + return 0; +} + +static uint8_t denali_read_byte(struct mtd_info *mtd) +{ + struct denali_nand_info *denali = mtd_to_denali(mtd); + uint32_t addr, result; + + addr = (uint32_t)MODE_11 | BANK(denali->flash_bank); + index_addr_read_data(denali, addr | 2, &result); + return (uint8_t)result & 0xFF; +} + +static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) +{ + struct denali_nand_info *denali = mtd_to_denali(mtd); + uint32_t i, addr, result; + + /* delay for tR (data transfer from Flash array to data register) */ + udelay(25); + + /* ensure device completed else additional delay and polling */ + wait_for_irq(denali, INTR_STATUS__INT_ACT); + + addr = (uint32_t)MODE_11 | BANK(denali->flash_bank); + for (i = 0; i < len; i++) { + index_addr_read_data(denali, (uint32_t)addr | 2, &result); + write_byte_to_buf(denali, result); + } + memcpy(buf, denali->buf.buf, len); +} + +static void denali_select_chip(struct mtd_info *mtd, int chip) +{ + struct denali_nand_info *denali = mtd_to_denali(mtd); + + denali->flash_bank = chip; +} + +static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip) +{ + struct denali_nand_info *denali = mtd_to_denali(mtd); + int status = denali->status; + denali->status = 0; + + return status; +} + +static void denali_erase(struct mtd_info *mtd, int page) +{ + struct denali_nand_info *denali = mtd_to_denali(mtd); + uint32_t cmd, irq_status; + + /* clear interrupts */ + clear_interrupts(denali); + + /* setup page read request for access type */ + cmd = MODE_10 | BANK(denali->flash_bank) | page; + index_addr(denali, cmd, 0x1); + + /* wait for erase to complete or failure to occur */ + irq_status = wait_for_irq(denali, INTR_STATUS__ERASE_COMP | + INTR_STATUS__ERASE_FAIL); + + if (irq_status & INTR_STATUS__ERASE_FAIL || + irq_status & INTR_STATUS__LOCKED_BLK) + denali->status = NAND_STATUS_FAIL; + else + denali->status = 0; +} + +static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col, + int page) +{ + struct denali_nand_info *denali = mtd_to_denali(mtd); + uint32_t addr; + + switch (cmd) { + case NAND_CMD_PAGEPROG: + break; + case NAND_CMD_STATUS: + addr = MODE_11 | BANK(denali->flash_bank); + index_addr(denali, addr | 0, cmd); + break; + case NAND_CMD_PARAM: + clear_interrupts(denali); + case NAND_CMD_READID: + reset_buf(denali); + /* sometimes ManufactureId read from register is not right + * e.g. some of Micron MT29F32G08QAA MLC NAND chips + * So here we send READID cmd to NAND insteand + * */ + addr = MODE_11 | BANK(denali->flash_bank); + index_addr(denali, addr | 0, cmd); + index_addr(denali, addr | 1, col & 0xFF); + break; + case NAND_CMD_READ0: + case NAND_CMD_SEQIN: + denali->page = page; + break; + case NAND_CMD_RESET: + reset_bank(denali); + break; + case NAND_CMD_READOOB: + /* TODO: Read OOB data */ + break; + case NAND_CMD_ERASE1: + /* + * supporting block erase only, not multiblock erase as + * it will cross plane and software need complex calculation + * to identify the block count for the cross plane + */ + denali_erase(mtd, page); + break; + case NAND_CMD_ERASE2: + /* nothing to do here as it was done during NAND_CMD_ERASE1 */ + break; + case NAND_CMD_UNLOCK1: + addr = MODE_10 | BANK(denali->flash_bank) | page; + index_addr(denali, addr | 0, DENALI_UNLOCK_START); + break; + case NAND_CMD_UNLOCK2: + addr = MODE_10 | BANK(denali->flash_bank) | page; + index_addr(denali, addr | 0, DENALI_UNLOCK_END); + break; + case NAND_CMD_LOCK: + addr = MODE_10 | BANK(denali->flash_bank); + index_addr(denali, addr | 0, DENALI_LOCK); + break; + default: + printf(": unsupported command received 0x%x\n", cmd); + break; + } +} +/* end NAND core entry points */ + +/* Initialization code to bring the device up to a known good state */ +static void denali_hw_init(struct denali_nand_info *denali) +{ + /* + * tell driver how many bit controller will skip before writing + * ECC code in OOB. This is normally used for bad block marker + */ + writel(CONFIG_NAND_DENALI_SPARE_AREA_SKIP_BYTES, + denali->flash_reg + SPARE_AREA_SKIP_BYTES); + detect_max_banks(denali); + denali_nand_reset(denali); + writel(0x0F, denali->flash_reg + RB_PIN_ENABLED); + writel(CHIP_EN_DONT_CARE__FLAG, + denali->flash_reg + CHIP_ENABLE_DONT_CARE); + writel(0xffff, denali->flash_reg + SPARE_AREA_MARKER); + + /* Should set value for these registers when init */ + writel(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES); + writel(1, denali->flash_reg + ECC_ENABLE); + denali_nand_timing_set(denali); + denali_irq_init(denali); +} + +static struct nand_ecclayout nand_oob; + +static int denali_nand_init(struct nand_chip *nand) +{ + struct denali_nand_info *denali; + + denali = malloc(sizeof(*denali)); + if (!denali) + return -ENOMEM; + + nand->priv = denali; + + denali->flash_reg = (void __iomem *)CONFIG_SYS_NAND_REGS_BASE; + denali->flash_mem = (void __iomem *)CONFIG_SYS_NAND_DATA_BASE; + +#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT + /* check whether flash got BBT table (located at end of flash). As we + * use NAND_BBT_NO_OOB, the BBT page will start with + * bbt_pattern. We will have mirror pattern too */ + nand->bbt_options |= NAND_BBT_USE_FLASH; + /* + * We are using main + spare with ECC support. As BBT need ECC support, + * we need to ensure BBT code don't write to OOB for the BBT pattern. + * All BBT info will be stored into data area with ECC support. + */ + nand->bbt_options |= NAND_BBT_NO_OOB; +#endif + + nand->ecc.mode = NAND_ECC_HW; + nand->ecc.size = CONFIG_NAND_DENALI_ECC_SIZE; + nand->ecc.read_oob = denali_read_oob; + nand->ecc.write_oob = denali_write_oob; + nand->ecc.read_page = denali_read_page; + nand->ecc.read_page_raw = denali_read_page_raw; + nand->ecc.write_page = denali_write_page; + nand->ecc.write_page_raw = denali_write_page_raw; + /* + * Tell driver the ecc strength. This register may be already set + * correctly. So we read this value out. + */ + nand->ecc.strength = readl(denali->flash_reg + ECC_CORRECTION); + switch (nand->ecc.size) { + case 512: + nand->ecc.bytes = (nand->ecc.strength * 13 + 15) / 16 * 2; + break; + case 1024: + nand->ecc.bytes = (nand->ecc.strength * 14 + 15) / 16 * 2; + break; + default: + pr_err("Unsupported ECC size\n"); + return -EINVAL; + } + nand_oob.eccbytes = nand->ecc.bytes; + nand->ecc.layout = &nand_oob; + + /* Set address of hardware control function */ + nand->cmdfunc = denali_cmdfunc; + nand->read_byte = denali_read_byte; + nand->read_buf = denali_read_buf; + nand->select_chip = denali_select_chip; + nand->waitfunc = denali_waitfunc; + denali_hw_init(denali); + return 0; +} + +int board_nand_init(struct nand_chip *chip) +{ + return denali_nand_init(chip); +} diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h new file mode 100644 index 0000000..3277da7 --- /dev/null +++ b/drivers/mtd/nand/denali.h @@ -0,0 +1,467 @@ +/* + * Copyright (C) 2013-2014 Altera Corporation <www.altera.com> + * Copyright (C) 2009-2010, Intel Corporation and its suppliers. + * + * SPDX-License-Identifier: GPL-2.0+ + */ + +#include <linux/mtd/nand.h> + +#define DEVICE_RESET 0x0 +#define DEVICE_RESET__BANK0 0x0001 +#define DEVICE_RESET__BANK1 0x0002 +#define DEVICE_RESET__BANK2 0x0004 +#define DEVICE_RESET__BANK3 0x0008 + +#define TRANSFER_SPARE_REG 0x10 +#define TRANSFER_SPARE_REG__FLAG 0x0001 + +#define LOAD_WAIT_CNT 0x20 +#define LOAD_WAIT_CNT__VALUE 0xffff + +#define PROGRAM_WAIT_CNT 0x30 +#define PROGRAM_WAIT_CNT__VALUE 0xffff + +#define ERASE_WAIT_CNT 0x40 +#define ERASE_WAIT_CNT__VALUE 0xffff + +#define INT_MON_CYCCNT 0x50 +#define INT_MON_CYCCNT__VALUE 0xffff + +#define RB_PIN_ENABLED 0x60 +#define RB_PIN_ENABLED__BANK0 0x0001 +#define RB_PIN_ENABLED__BANK1 0x0002 +#define RB_PIN_ENABLED__BANK2 0x0004 +#define RB_PIN_ENABLED__BANK3 0x0008 + +#define MULTIPLANE_OPERATION 0x70 +#define MULTIPLANE_OPERATION__FLAG 0x0001 + +#define MULTIPLANE_READ_ENABLE 0x80 +#define MULTIPLANE_READ_ENABLE__FLAG 0x0001 + +#define COPYBACK_DISABLE 0x90 +#define COPYBACK_DISABLE__FLAG 0x0001 + +#define CACHE_WRITE_ENABLE 0xa0 +#define CACHE_WRITE_ENABLE__FLAG 0x0001 + +#define CACHE_READ_ENABLE 0xb0 +#define CACHE_READ_ENABLE__FLAG 0x0001 + +#define PREFETCH_MODE 0xc0 +#define PREFETCH_MODE__PREFETCH_EN 0x0001 +#define PREFETCH_MODE__PREFETCH_BURST_LENGTH 0xfff0 + +#define CHIP_ENABLE_DONT_CARE 0xd0 +#define CHIP_EN_DONT_CARE__FLAG 0x01 + +#define ECC_ENABLE 0xe0 +#define ECC_ENABLE__FLAG 0x0001 + +#define GLOBAL_INT_ENABLE 0xf0 +#define GLOBAL_INT_EN_FLAG 0x01 + +#define WE_2_RE 0x100 +#define WE_2_RE__VALUE 0x003f + +#define ADDR_2_DATA 0x110 +#define ADDR_2_DATA__VALUE 0x003f + +#define RE_2_WE 0x120 +#define RE_2_WE__VALUE 0x003f + +#define ACC_CLKS 0x130 +#define ACC_CLKS__VALUE 0x000f + +#define NUMBER_OF_PLANES 0x140 +#define NUMBER_OF_PLANES__VALUE 0x0007 + +#define PAGES_PER_BLOCK 0x150 +#define PAGES_PER_BLOCK__VALUE 0xffff + +#define DEVICE_WIDTH 0x160 +#define DEVICE_WIDTH__VALUE 0x0003 + +#define DEVICE_MAIN_AREA_SIZE 0x170 +#define DEVICE_MAIN_AREA_SIZE__VALUE 0xffff + +#define DEVICE_SPARE_AREA_SIZE 0x180 +#define DEVICE_SPARE_AREA_SIZE__VALUE 0xffff + +#define TWO_ROW_ADDR_CYCLES 0x190 +#define TWO_ROW_ADDR_CYCLES__FLAG 0x0001 + +#define MULTIPLANE_ADDR_RESTRICT 0x1a0 +#define MULTIPLANE_ADDR_RESTRICT__FLAG 0x0001 + +#define ECC_CORRECTION 0x1b0 +#define ECC_CORRECTION__VALUE 0x001f + +#define READ_MODE 0x1c0 +#define READ_MODE__VALUE 0x000f + +#define WRITE_MODE 0x1d0 +#define WRITE_MODE__VALUE 0x000f + +#define COPYBACK_MODE 0x1e0 +#define COPYBACK_MODE__VALUE 0x000f + +#define RDWR_EN_LO_CNT 0x1f0 +#define RDWR_EN_LO_CNT__VALUE 0x001f + +#define RDWR_EN_HI_CNT 0x200 +#define RDWR_EN_HI_CNT__VALUE 0x001f + +#define MAX_RD_DELAY 0x210 +#define MAX_RD_DELAY__VALUE 0x000f + +#define CS_SETUP_CNT 0x220 +#define CS_SETUP_CNT__VALUE 0x001f + +#define SPARE_AREA_SKIP_BYTES 0x230 +#define SPARE_AREA_SKIP_BYTES__VALUE 0x003f + +#define SPARE_AREA_MARKER 0x240 +#define SPARE_AREA_MARKER__VALUE 0xffff + +#define DEVICES_CONNECTED 0x250 +#define DEVICES_CONNECTED__VALUE 0x0007 + +#define DIE_MASK 0x260 +#define DIE_MASK__VALUE 0x00ff + +#define FIRST_BLOCK_OF_NEXT_PLANE 0x270 +#define FIRST_BLOCK_OF_NEXT_PLANE__VALUE 0xffff + +#define WRITE_PROTECT 0x280 +#define WRITE_PROTECT__FLAG 0x0001 + +#define RE_2_RE 0x290 +#define RE_2_RE__VALUE 0x003f + +#define MANUFACTURER_ID 0x300 +#define MANUFACTURER_ID__VALUE 0x00ff + +#define DEVICE_ID 0x310 +#define DEVICE_ID__VALUE 0x00ff + +#define DEVICE_PARAM_0 0x320 +#define DEVICE_PARAM_0__VALUE 0x00ff + +#define DEVICE_PARAM_1 0x330 +#define DEVICE_PARAM_1__VALUE 0x00ff + +#define DEVICE_PARAM_2 0x340 +#define DEVICE_PARAM_2__VALUE 0x00ff + +#define LOGICAL_PAGE_DATA_SIZE 0x350 +#define LOGICAL_PAGE_DATA_SIZE__VALUE 0xffff + +#define LOGICAL_PAGE_SPARE_SIZE 0x360 +#define LOGICAL_PAGE_SPARE_SIZE__VALUE 0xffff + +#define REVISION 0x370 +#define REVISION__VALUE 0xffff + +#define ONFI_DEVICE_FEATURES 0x380 +#define ONFI_DEVICE_FEATURES__VALUE 0x003f + +#define ONFI_OPTIONAL_COMMANDS 0x390 +#define ONFI_OPTIONAL_COMMANDS__VALUE 0x003f + +#define ONFI_TIMING_MODE 0x3a0 +#define ONFI_TIMING_MODE__VALUE 0x003f + +#define ONFI_PGM_CACHE_TIMING_MODE 0x3b0 +#define ONFI_PGM_CACHE_TIMING_MODE__VALUE 0x003f + +#define ONFI_DEVICE_NO_OF_LUNS 0x3c0 +#define ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS 0x00ff +#define ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE 0x0100 + +#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L 0x3d0 +#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L__VALUE 0xffff + +#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U 0x3e0 +#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U__VALUE 0xffff + +#define FEATURES 0x3f0 +#define FEATURES__N_BANKS 0x0003 +#define FEATURES__ECC_MAX_ERR 0x003c +#define FEATURES__DMA 0x0040 +#define FEATURES__CMD_DMA 0x0080 +#define FEATURES__PARTITION 0x0100 +#define FEATURES__XDMA_SIDEBAND 0x0200 +#define FEATURES__GPREG 0x0400 +#define FEATURES__INDEX_ADDR 0x0800 + +#define TRANSFER_MODE 0x400 +#define TRANSFER_MODE__VALUE 0x0003 + +#define INTR_STATUS(__bank) (0x410 + ((__bank) * 0x50)) +#define INTR_EN(__bank) (0x420 + ((__bank) * 0x50)) + +/* + * Some versions of the IP have the ECC fixup handled in hardware. In this + * configuration we only get interrupted when the error is uncorrectable. + * Unfortunately this bit replaces INTR_STATUS__ECC_TRANSACTION_DONE from the + * old IP. + */ +#define INTR_STATUS__ECC_UNCOR_ERR 0x0001 +#define INTR_STATUS__ECC_TRANSACTION_DONE 0x0001 +#define INTR_STATUS__ECC_ERR 0x0002 +#define INTR_STATUS__DMA_CMD_COMP 0x0004 +#define INTR_STATUS__TIME_OUT 0x0008 +#define INTR_STATUS__PROGRAM_FAIL 0x0010 +#define INTR_STATUS__ERASE_FAIL 0x0020 +#define INTR_STATUS__LOAD_COMP 0x0040 +#define INTR_STATUS__PROGRAM_COMP 0x0080 +#define INTR_STATUS__ERASE_COMP 0x0100 +#define INTR_STATUS__PIPE_CPYBCK_CMD_COMP 0x0200 +#define INTR_STATUS__LOCKED_BLK 0x0400 +#define INTR_STATUS__UNSUP_CMD 0x0800 +#define INTR_STATUS__INT_ACT 0x1000 +#define INTR_STATUS__RST_COMP 0x2000 +#define INTR_STATUS__PIPE_CMD_ERR 0x4000 +#define INTR_STATUS__PAGE_XFER_INC 0x8000 + +#define INTR_EN__ECC_TRANSACTION_DONE 0x0001 +#define INTR_EN__ECC_ERR 0x0002 +#define INTR_EN__DMA_CMD_COMP 0x0004 +#define INTR_EN__TIME_OUT 0x0008 +#define INTR_EN__PROGRAM_FAIL 0x0010 +#define INTR_EN__ERASE_FAIL 0x0020 +#define INTR_EN__LOAD_COMP 0x0040 +#define INTR_EN__PROGRAM_COMP 0x0080 +#define INTR_EN__ERASE_COMP 0x0100 +#define INTR_EN__PIPE_CPYBCK_CMD_COMP 0x0200 +#define INTR_EN__LOCKED_BLK 0x0400 +#define INTR_EN__UNSUP_CMD 0x0800 +#define INTR_EN__INT_ACT 0x1000 +#define INTR_EN__RST_COMP 0x2000 +#define INTR_EN__PIPE_CMD_ERR 0x4000 +#define INTR_EN__PAGE_XFER_INC 0x8000 + +#define PAGE_CNT(__bank) (0x430 + ((__bank) * 0x50)) +#define ERR_PAGE_ADDR(__bank) (0x440 + ((__bank) * 0x50)) +#define ERR_BLOCK_ADDR(__bank) (0x450 + ((__bank) * 0x50)) + +#define DATA_INTR 0x550 +#define DATA_INTR__WRITE_SPACE_AV 0x0001 +#define DATA_INTR__READ_DATA_AV 0x0002 + +#define DATA_INTR_EN 0x560 +#define DATA_INTR_EN__WRITE_SPACE_AV 0x0001 +#define DATA_INTR_EN__READ_DATA_AV 0x0002 + +#define GPREG_0 0x570 +#define GPREG_0__VALUE 0xffff + +#define GPREG_1 0x580 +#define GPREG_1__VALUE 0xffff + +#define GPREG_2 0x590 +#define GPREG_2__VALUE 0xffff + +#define GPREG_3 0x5a0 +#define GPREG_3__VALUE 0xffff + +#define ECC_THRESHOLD 0x600 +#define ECC_THRESHOLD__VALUE 0x03ff + +#define ECC_ERROR_BLOCK_ADDRESS 0x610 +#define ECC_ERROR_BLOCK_ADDRESS__VALUE 0xffff + +#define ECC_ERROR_PAGE_ADDRESS 0x620 +#define ECC_ERROR_PAGE_ADDRESS__VALUE 0x0fff +#define ECC_ERROR_PAGE_ADDRESS__BANK 0xf000 + +#define ECC_ERROR_ADDRESS 0x630 +#define ECC_ERROR_ADDRESS__OFFSET 0x0fff +#define ECC_ERROR_ADDRESS__SECTOR_NR 0xf000 + +#define ERR_CORRECTION_INFO 0x640 +#define ERR_CORRECTION_INFO__BYTEMASK 0x00ff +#define ERR_CORRECTION_INFO__DEVICE_NR 0x0f00 +#define ERR_CORRECTION_INFO__ERROR_TYPE 0x4000 +#define ERR_CORRECTION_INFO__LAST_ERR_INFO 0x8000 + +#define DMA_ENABLE 0x700 +#define DMA_ENABLE__FLAG 0x0001 + +#define IGNORE_ECC_DONE 0x710 +#define IGNORE_ECC_DONE__FLAG 0x0001 + +#define DMA_INTR 0x720 +#define DMA_INTR__TARGET_ERROR 0x0001 +#define DMA_INTR__DESC_COMP_CHANNEL0 0x0002 +#define DMA_INTR__DESC_COMP_CHANNEL1 0x0004 +#define DMA_INTR__DESC_COMP_CHANNEL2 0x0008 +#define DMA_INTR__DESC_COMP_CHANNEL3 0x0010 +#define DMA_INTR__MEMCOPY_DESC_COMP 0x0020 + +#define DMA_INTR_EN 0x730 +#define DMA_INTR_EN__TARGET_ERROR 0x0001 +#define DMA_INTR_EN__DESC_COMP_CHANNEL0 0x0002 +#define DMA_INTR_EN__DESC_COMP_CHANNEL1 0x0004 +#define DMA_INTR_EN__DESC_COMP_CHANNEL2 0x0008 +#define DMA_INTR_EN__DESC_COMP_CHANNEL3 0x0010 +#define DMA_INTR_EN__MEMCOPY_DESC_COMP 0x0020 + +#define TARGET_ERR_ADDR_LO 0x740 +#define TARGET_ERR_ADDR_LO__VALUE 0xffff + +#define TARGET_ERR_ADDR_HI 0x750 +#define TARGET_ERR_ADDR_HI__VALUE 0xffff + +#define CHNL_ACTIVE 0x760 +#define CHNL_ACTIVE__CHANNEL0 0x0001 +#define CHNL_ACTIVE__CHANNEL1 0x0002 +#define CHNL_ACTIVE__CHANNEL2 0x0004 +#define CHNL_ACTIVE__CHANNEL3 0x0008 + +#define ACTIVE_SRC_ID 0x800 +#define ACTIVE_SRC_ID__VALUE 0x00ff + +#define PTN_INTR 0x810 +#define PTN_INTR__CONFIG_ERROR 0x0001 +#define PTN_INTR__ACCESS_ERROR_BANK0 0x0002 +#define PTN_INTR__ACCESS_ERROR_BANK1 0x0004 +#define PTN_INTR__ACCESS_ERROR_BANK2 0x0008 +#define PTN_INTR__ACCESS_ERROR_BANK3 0x0010 +#define PTN_INTR__REG_ACCESS_ERROR 0x0020 + +#define PTN_INTR_EN 0x820 +#define PTN_INTR_EN__CONFIG_ERROR 0x0001 +#define PTN_INTR_EN__ACCESS_ERROR_BANK0 0x0002 +#define PTN_INTR_EN__ACCESS_ERROR_BANK1 0x0004 +#define PTN_INTR_EN__ACCESS_ERROR_BANK2 0x0008 +#define PTN_INTR_EN__ACCESS_ERROR_BANK3 0x0010 +#define PTN_INTR_EN__REG_ACCESS_ERROR 0x0020 + +#define PERM_SRC_ID(__bank) (0x830 + ((__bank) * 0x40)) +#define PERM_SRC_ID__SRCID 0x00ff +#define PERM_SRC_ID__DIRECT_ACCESS_ACTIVE 0x0800 +#define PERM_SRC_ID__WRITE_ACTIVE 0x2000 +#define PERM_SRC_ID__READ_ACTIVE 0x4000 +#define PERM_SRC_ID__PARTITION_VALID 0x8000 + +#define MIN_BLK_ADDR(__bank) (0x840 + ((__bank) * 0x40)) +#define MIN_BLK_ADDR__VALUE 0xffff + +#define MAX_BLK_ADDR(__bank) (0x850 + ((__bank) * 0x40)) +#define MAX_BLK_ADDR__VALUE 0xffff + +#define MIN_MAX_BANK(__bank) (0x860 + ((__bank) * 0x40)) +#define MIN_MAX_BANK__MIN_VALUE 0x0003 +#define MIN_MAX_BANK__MAX_VALUE 0x000c + +/* lld.h */ +#define GOOD_BLOCK 0 +#define DEFECTIVE_BLOCK 1 +#define READ_ERROR 2 + +#define CLK_X 5 +#define CLK_MULTI 4 + +/* spectraswconfig.h */ +#define CMD_DMA 0 + +#define SPECTRA_PARTITION_ID 0 +/**** Block Table and Reserved Block Parameters *****/ +#define SPECTRA_START_BLOCK 3 +#define NUM_FREE_BLOCKS_GATE 30 + +/* KBV - Updated to LNW scratch register address */ +#define SCRATCH_REG_ADDR CONFIG_MTD_NAND_DENALI_SCRATCH_REG_ADDR +#define SCRATCH_REG_SIZE 64 + +#define GLOB_HWCTL_DEFAULT_BLKS 2048 + +#define CUSTOM_CONF_PARAMS 0 + +#ifndef _LLD_NAND_ +#define _LLD_NAND_ + +#define INDEX_CTRL_REG 0x0 +#define INDEX_DATA_REG 0x10 + +#define MODE_00 0x00000000 +#define MODE_01 0x04000000 +#define MODE_10 0x08000000 +#define MODE_11 0x0C000000 + + +#define DATA_TRANSFER_MODE 0 +#define PROTECTION_PER_BLOCK 1 +#define LOAD_WAIT_COUNT 2 +#define PROGRAM_WAIT_COUNT 3 +#define ERASE_WAIT_COUNT 4 +#define INT_MONITOR_CYCLE_COUNT 5 +#define READ_BUSY_PIN_ENABLED 6 +#define MULTIPLANE_OPERATION_SUPPORT 7 +#define PRE_FETCH_MODE 8 +#define CE_DONT_CARE_SUPPORT 9 +#define COPYBACK_SUPPORT 10 +#define CACHE_WRITE_SUPPORT 11 +#define CACHE_READ_SUPPORT 12 +#define NUM_PAGES_IN_BLOCK 13 +#define ECC_ENABLE_SELECT 14 +#define WRITE_ENABLE_2_READ_ENABLE 15 +#define ADDRESS_2_DATA 16 +#define READ_ENABLE_2_WRITE_ENABLE 17 +#define TWO_ROW_ADDRESS_CYCLES 18 +#define MULTIPLANE_ADDRESS_RESTRICT 19 +#define ACC_CLOCKS 20 +#define READ_WRITE_ENABLE_LOW_COUNT 21 +#define READ_WRITE_ENABLE_HIGH_COUNT 22 + +#define ECC_SECTOR_SIZE 512 + +#define DENALI_BUF_SIZE (NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) + +struct nand_buf { + int head; + int tail; + /* seprating dma_buf as buf can be used for status read purpose */ + uint8_t dma_buf[DENALI_BUF_SIZE] __aligned(64); + uint8_t buf[DENALI_BUF_SIZE]; +}; + +#define INTEL_CE4100 1 +#define INTEL_MRST 2 +#define DT 3 + +struct denali_nand_info { + struct mtd_info mtd; + struct nand_chip *nand; + + int flash_bank; /* currently selected chip */ + int status; + int platform; + struct nand_buf buf; + struct device *dev; + int total_used_banks; + uint32_t block; /* stored for future use */ + uint32_t page; + void __iomem *flash_reg; /* Mapped io reg base address */ + void __iomem *flash_mem; /* Mapped io reg base address */ + + /* elements used by ISR */ + /*struct completion complete;*/ + + uint32_t irq_status; + int irq_debug_array[32]; + int idx; + int irq; + + uint32_t devnum; /* represent how many nands connected */ + uint32_t fwblks; /* represent how many blocks FW used */ + uint32_t totalblks; + uint32_t blksperchip; + uint32_t bbtskipbytes; + uint32_t max_banks; +}; + +#endif /*_LLD_NAND_*/ diff --git a/drivers/mtd/nand/denali_spl.c b/drivers/mtd/nand/denali_spl.c new file mode 100644 index 0000000..65fdde8 --- /dev/null +++ b/drivers/mtd/nand/denali_spl.c @@ -0,0 +1,231 @@ +/* + * Copyright (C) 2014 Panasonic Corporation + * + * SPDX-License-Identifier: GPL-2.0+ + */ + +#include <common.h> +#include <asm/io.h> +#include <asm/unaligned.h> +#include <linux/mtd/nand.h> +#include "denali.h" + +#define SPARE_ACCESS 0x41 +#define MAIN_ACCESS 0x42 +#define PIPELINE_ACCESS 0x2000 + +#define BANK(x) ((x) << 24) + +static void __iomem *denali_flash_mem = + (void __iomem *)CONFIG_SYS_NAND_DATA_BASE; +static void __iomem *denali_flash_reg = + (void __iomem *)CONFIG_SYS_NAND_REGS_BASE; + +static const int flash_bank; +static uint8_t page_buffer[NAND_MAX_PAGESIZE]; +static int page_size, oob_size, pages_per_block; + +static void index_addr(uint32_t address, uint32_t data) +{ + writel(address, denali_flash_mem + INDEX_CTRL_REG); + writel(data, denali_flash_mem + INDEX_DATA_REG); +} + +static int wait_for_irq(uint32_t irq_mask) +{ + unsigned long timeout = 1000000; + uint32_t intr_status; + + do { + intr_status = readl(denali_flash_reg + INTR_STATUS(flash_bank)); + + if (intr_status & INTR_STATUS__ECC_UNCOR_ERR) { + debug("Uncorrected ECC detected\n"); + return -EIO; + } + + if (intr_status & irq_mask) + break; + + udelay(1); + timeout--; + } while (timeout); + + if (!timeout) { + debug("Timeout with interrupt status %08x\n", intr_status); + return -EIO; + } + + return 0; +} + +static void read_data_from_flash_mem(uint8_t *buf, int len) +{ + int i; + uint32_t *buf32; + + /* transfer the data from the flash */ + buf32 = (uint32_t *)buf; + + /* + * Let's take care of unaligned access although it rarely happens. + * Avoid put_unaligned() for the normal use cases since it leads to + * a bit performance regression. + */ + if ((unsigned long)buf32 % 4) { + for (i = 0; i < len / 4; i++) + put_unaligned(readl(denali_flash_mem + INDEX_DATA_REG), + buf32++); + } else { + for (i = 0; i < len / 4; i++) + *buf32++ = readl(denali_flash_mem + INDEX_DATA_REG); + } + + if (len % 4) { + u32 tmp; + + tmp = cpu_to_le32(readl(denali_flash_mem + INDEX_DATA_REG)); + buf = (uint8_t *)buf32; + for (i = 0; i < len % 4; i++) { + *buf++ = tmp; + tmp >>= 8; + } + } +} + +int denali_send_pipeline_cmd(int page, int ecc_en, int access_type) +{ + uint32_t addr, cmd; + static uint32_t page_count = 1; + + writel(ecc_en, denali_flash_reg + ECC_ENABLE); + + /* clear all bits of intr_status. */ + writel(0xffff, denali_flash_reg + INTR_STATUS(flash_bank)); + + addr = BANK(flash_bank) | page; + + /* setup the acccess type */ + cmd = MODE_10 | addr; + index_addr(cmd, access_type); + + /* setup the pipeline command */ + index_addr(cmd, PIPELINE_ACCESS | page_count); + + cmd = MODE_01 | addr; + writel(cmd, denali_flash_mem + INDEX_CTRL_REG); + + return wait_for_irq(INTR_STATUS__LOAD_COMP); +} + +static int nand_read_oob(void *buf, int page) +{ + int ret; + + ret = denali_send_pipeline_cmd(page, 0, SPARE_ACCESS); + if (ret < 0) + return ret; + + read_data_from_flash_mem(buf, oob_size); + + return 0; +} + +static int nand_read_page(void *buf, int page) +{ + int ret; + + ret = denali_send_pipeline_cmd(page, 1, MAIN_ACCESS); + if (ret < 0) + return ret; + + read_data_from_flash_mem(buf, page_size); + + return 0; +} + +static int nand_block_isbad(int block) +{ + int ret; + + ret = nand_read_oob(page_buffer, block * pages_per_block); + if (ret < 0) + return ret; + + return page_buffer[CONFIG_SYS_NAND_BAD_BLOCK_POS] != 0xff; +} + +/* nand_init() - initialize data to make nand usable by SPL */ +void nand_init(void) +{ + /* access to main area */ + writel(0, denali_flash_reg + TRANSFER_SPARE_REG); + + /* + * These registers are expected to be already set by the hardware + * or earlier boot code. So we read these values out. + */ + page_size = readl(denali_flash_reg + DEVICE_MAIN_AREA_SIZE); + oob_size = readl(denali_flash_reg + DEVICE_SPARE_AREA_SIZE); + pages_per_block = readl(denali_flash_reg + PAGES_PER_BLOCK); +} + +int nand_spl_load_image(uint32_t offs, unsigned int size, void *dst) +{ + int block, page, column, readlen; + int ret; + int force_bad_block_check = 1; + + page = offs / page_size; + column = offs % page_size; + + block = page / pages_per_block; + page = page % pages_per_block; + + while (size) { + if (force_bad_block_check || page == 0) { + ret = nand_block_isbad(block); + if (ret < 0) + return ret; + + if (ret) { + block++; + continue; + } + } + + force_bad_block_check = 0; + + if (unlikely(column || size < page_size)) { + /* Partial page read */ + ret = nand_read_page(page_buffer, + block * pages_per_block + page); + if (ret < 0) + return ret; + + readlen = min(page_size - column, size); + memcpy(dst, page_buffer, readlen); + + column = 0; + } else { + ret = nand_read_page(dst, + block * pages_per_block + page); + if (ret < 0) + return ret; + + readlen = page_size; + } + + size -= readlen; + dst += readlen; + page++; + if (page == pages_per_block) { + block++; + page = 0; + } + } + + return 0; +} + +void nand_deselect(void) {} diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c index 7e1e6ec..3372b64 100644 --- a/drivers/mtd/nand/fsl_elbc_nand.c +++ b/drivers/mtd/nand/fsl_elbc_nand.c @@ -37,7 +37,6 @@ #define MAX_BANKS 8 #define ERR_BYTE 0xFF /* Value returned for read bytes when read failed */ -#define FCM_TIMEOUT_MSECS 10 /* Maximum number of mSecs to wait for FCM */ #define LTESR_NAND_MASK (LTESR_FCT | LTESR_PAR | LTESR_CC) @@ -199,7 +198,8 @@ static int fsl_elbc_run_command(struct mtd_info *mtd) struct fsl_elbc_mtd *priv = chip->priv; struct fsl_elbc_ctrl *ctrl = priv->ctrl; fsl_lbc_t *lbc = ctrl->regs; - long long end_tick; + u32 timeo = (CONFIG_SYS_HZ * 10) / 1000; + u32 time_start; u32 ltesr; /* Setup the FMR[OP] to execute without write protection */ @@ -218,10 +218,10 @@ static int fsl_elbc_run_command(struct mtd_info *mtd) out_be32(&lbc->lsor, priv->bank); /* wait for FCM complete flag or timeout */ - end_tick = usec2ticks(FCM_TIMEOUT_MSECS * 1000) + get_ticks(); + time_start = get_timer(0); ltesr = 0; - while (end_tick > get_ticks()) { + while (get_timer(time_start) < timeo) { ltesr = in_be32(&lbc->ltesr); if (ltesr & LTESR_CC) break; diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c index 2f04c69..81b5070 100644 --- a/drivers/mtd/nand/fsl_ifc_nand.c +++ b/drivers/mtd/nand/fsl_ifc_nand.c @@ -26,8 +26,6 @@ #define MAX_BANKS CONFIG_SYS_FSL_IFC_BANK_COUNT #define ERR_BYTE 0xFF /* Value returned for read bytes when read failed */ -#define IFC_TIMEOUT_MSECS 10 /* Maximum number of mSecs to wait for IFC - NAND Machine */ struct fsl_ifc_ctrl; @@ -292,7 +290,8 @@ static int fsl_ifc_run_command(struct mtd_info *mtd) struct fsl_ifc_mtd *priv = chip->priv; struct fsl_ifc_ctrl *ctrl = priv->ctrl; struct fsl_ifc *ifc = ctrl->regs; - long long end_tick; + u32 timeo = (CONFIG_SYS_HZ * 10) / 1000; + u32 time_start; u32 eccstat[4]; int i; @@ -304,9 +303,9 @@ static int fsl_ifc_run_command(struct mtd_info *mtd) IFC_NAND_SEQ_STRT_FIR_STRT); /* wait for NAND Machine complete flag or timeout */ - end_tick = usec2ticks(IFC_TIMEOUT_MSECS * 1000) + get_ticks(); + time_start = get_timer(0); - while (end_tick > get_ticks()) { + while (get_timer(time_start) < timeo) { ctrl->status = ifc_in32(&ifc->ifc_nand.nand_evter_stat); if (ctrl->status & IFC_NAND_EVTER_STAT_OPC) @@ -812,15 +811,16 @@ static int fsl_ifc_sram_init(uint32_t ver) struct fsl_ifc *ifc = ifc_ctrl->regs; uint32_t cs = 0, csor = 0, csor_8k = 0, csor_ext = 0; uint32_t ncfgr = 0; - long long end_tick; + u32 timeo = (CONFIG_SYS_HZ * 10) / 1000; + u32 time_start; if (ver > FSL_IFC_V1_1_0) { ncfgr = ifc_in32(&ifc->ifc_nand.ncfgr); ifc_out32(&ifc->ifc_nand.ncfgr, ncfgr | IFC_NAND_SRAM_INIT_EN); /* wait for SRAM_INIT bit to be clear or timeout */ - end_tick = usec2ticks(IFC_TIMEOUT_MSECS * 1000) + get_ticks(); - while (end_tick > get_ticks()) { + time_start = get_timer(0); + while (get_timer(time_start) < timeo) { ifc_ctrl->status = ifc_in32(&ifc->ifc_nand.nand_evter_stat); @@ -863,10 +863,9 @@ static int fsl_ifc_sram_init(uint32_t ver) /* start read seq */ ifc_out32(&ifc->ifc_nand.nandseq_strt, IFC_NAND_SEQ_STRT_FIR_STRT); - /* wait for NAND Machine complete flag or timeout */ - end_tick = usec2ticks(IFC_TIMEOUT_MSECS * 1000) + get_ticks(); + time_start = get_timer(0); - while (end_tick > get_ticks()) { + while (get_timer(time_start) < timeo) { ifc_ctrl->status = ifc_in32(&ifc->ifc_nand.nand_evter_stat); if (ifc_ctrl->status & IFC_NAND_EVTER_STAT_OPC) diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 7153e3c..0b6e7ee 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -308,8 +308,7 @@ static void ioread16_rep(void *addr, void *buf, int len) { int i; u16 *p = (u16 *) buf; - len >>= 1; - + for (i = 0; i < len; i++) p[i] = readw(addr); } @@ -318,7 +317,6 @@ static void iowrite16_rep(void *addr, void *buf, int len) { int i; u16 *p = (u16 *) buf; - len >>= 1; for (i = 0; i < len; i++) writew(p[i], addr); diff --git a/drivers/mtd/spi/sandbox.c b/drivers/mtd/spi/sandbox.c index a62ef4c..98e0a34 100644 --- a/drivers/mtd/spi/sandbox.c +++ b/drivers/mtd/spi/sandbox.c @@ -51,46 +51,7 @@ static const char *sandbox_sf_state_name(enum sandbox_sf_state state) /* Assume all SPI flashes have 3 byte addresses since they do atm */ #define SF_ADDR_LEN 3 -struct sandbox_spi_flash_erase_commands { - u8 cmd; - u32 size; -}; -#define IDCODE_LEN 5 -#define MAX_ERASE_CMDS 3 -struct sandbox_spi_flash_data { - const char *name; - u8 idcode[IDCODE_LEN]; - u32 size; - const struct sandbox_spi_flash_erase_commands - erase_cmds[MAX_ERASE_CMDS]; -}; - -/* Structure describing all the flashes we know how to emulate */ -static const struct sandbox_spi_flash_data sandbox_sf_flashes[] = { - { - "M25P16", { 0x20, 0x20, 0x15 }, (2 << 20), - { /* erase commands */ - { 0xd8, (64 << 10), }, /* sector */ - { 0xc7, (2 << 20), }, /* bulk */ - }, - }, - { - "W25Q32", { 0xef, 0x40, 0x16 }, (4 << 20), - { /* erase commands */ - { 0x20, (4 << 10), }, /* 4KB */ - { 0xd8, (64 << 10), }, /* sector */ - { 0xc7, (4 << 20), }, /* bulk */ - }, - }, - { - "W25Q128", { 0xef, 0x40, 0x18 }, (16 << 20), - { /* erase commands */ - { 0x20, (4 << 10), }, /* 4KB */ - { 0xd8, (64 << 10), }, /* sector */ - { 0xc7, (16 << 20), }, /* bulk */ - }, - }, -}; +#define IDCODE_LEN 3 /* Used to quickly bulk erase backing store */ static u8 sandbox_sf_0xff[0x1000]; @@ -109,7 +70,8 @@ struct sandbox_spi_flash { */ enum sandbox_sf_state state; uint cmd; - const void *cmd_data; + /* Erase size of current erase command */ + uint erase_size; /* Current position in the flash; used when reading/writing/etc... */ uint off; /* How many address bytes we've consumed */ @@ -117,7 +79,7 @@ struct sandbox_spi_flash { /* The current flash status (see STAT_XXX defines above) */ u16 status; /* Data describing the flash we're emulating */ - const struct sandbox_spi_flash_data *data; + const struct spi_flash_params *data; /* The file on disk to serv up data from */ int fd; }; @@ -127,8 +89,8 @@ static int sandbox_sf_setup(void **priv, const char *spec) /* spec = idcode:file */ struct sandbox_spi_flash *sbsf; const char *file; - size_t i, len, idname_len; - const struct sandbox_spi_flash_data *data; + size_t len, idname_len; + const struct spi_flash_params *data; file = strchr(spec, ':'); if (!file) { @@ -138,15 +100,14 @@ static int sandbox_sf_setup(void **priv, const char *spec) idname_len = file - spec; ++file; - for (i = 0; i < ARRAY_SIZE(sandbox_sf_flashes); ++i) { - data = &sandbox_sf_flashes[i]; + for (data = spi_flash_params_table; data->name; data++) { len = strlen(data->name); if (idname_len != len) continue; if (!memcmp(spec, data->name, len)) break; } - if (i == ARRAY_SIZE(sandbox_sf_flashes)) { + if (!data->name) { printf("sandbox_sf: unknown flash '%*s'\n", (int)idname_len, spec); goto error; @@ -223,7 +184,6 @@ static int sandbox_sf_process_cmd(struct sandbox_spi_flash *sbsf, const u8 *rx, sbsf->pad_addr_bytes = 1; case CMD_READ_ARRAY_SLOW: case CMD_PAGE_PROGRAM: - state_addr: sbsf->state = SF_ADDR; break; case CMD_WRITE_DISABLE: @@ -241,24 +201,25 @@ static int sandbox_sf_process_cmd(struct sandbox_spi_flash *sbsf, const u8 *rx, sbsf->status |= STAT_WEL; break; default: { - size_t i; - - /* handle erase commands first */ - for (i = 0; i < MAX_ERASE_CMDS; ++i) { - const struct sandbox_spi_flash_erase_commands * - erase_cmd = &sbsf->data->erase_cmds[i]; - - if (erase_cmd->cmd == 0x00) - continue; - if (sbsf->cmd != erase_cmd->cmd) - continue; - - sbsf->cmd_data = erase_cmd; - goto state_addr; + int flags = sbsf->data->flags; + + /* we only support erase here */ + if (sbsf->cmd == CMD_ERASE_CHIP) { + sbsf->erase_size = sbsf->data->sector_size * + sbsf->data->nr_sectors; + } else if (sbsf->cmd == CMD_ERASE_4K && (flags & SECT_4K)) { + sbsf->erase_size = 4 << 10; + } else if (sbsf->cmd == CMD_ERASE_32K && (flags & SECT_32K)) { + sbsf->erase_size = 32 << 10; + } else if (sbsf->cmd == CMD_ERASE_64K && + !(flags & (SECT_4K | SECT_32K))) { + sbsf->erase_size = 64 << 10; + } else { + debug(" cmd unknown: %#x\n", sbsf->cmd); + return 1; } - - debug(" cmd unknown: %#x\n", sbsf->cmd); - return 1; + sbsf->state = SF_ADDR; + break; } } @@ -309,11 +270,14 @@ static int sandbox_sf_xfer(void *priv, const u8 *rx, u8 *tx, u8 id; debug(" id: off:%u tx:", sbsf->off); - if (sbsf->off < IDCODE_LEN) - id = sbsf->data->idcode[sbsf->off]; - else + if (sbsf->off < IDCODE_LEN) { + /* Extract correct byte from ID 0x00aabbcc */ + id = sbsf->data->jedec >> + (8 * (IDCODE_LEN - 1 - sbsf->off)); + } else { id = 0; - debug("%02x\n", id); + } + debug("%d %02x\n", sbsf->off, id); tx[pos++] = id; ++sbsf->off; break; @@ -406,24 +370,22 @@ static int sandbox_sf_xfer(void *priv, const u8 *rx, u8 *tx, break; case SF_ERASE: case_sf_erase: { - const struct sandbox_spi_flash_erase_commands * - erase_cmd = sbsf->cmd_data; - if (!(sbsf->status & STAT_WEL)) { puts("sandbox_sf: write enable not set before erase\n"); goto done; } /* verify address is aligned */ - if (sbsf->off & (erase_cmd->size - 1)) { + if (sbsf->off & (sbsf->erase_size - 1)) { debug(" sector erase: cmd:%#x needs align:%#x, but we got %#x\n", - erase_cmd->cmd, erase_cmd->size, + sbsf->cmd, sbsf->erase_size, sbsf->off); sbsf->status &= ~STAT_WEL; goto done; } - debug(" sector erase addr: %u\n", sbsf->off); + debug(" sector erase addr: %u, size: %u\n", sbsf->off, + sbsf->erase_size); cnt = bytes - pos; sandbox_spi_tristate(&tx[pos], cnt); @@ -433,7 +395,7 @@ static int sandbox_sf_xfer(void *priv, const u8 *rx, u8 *tx, * TODO(vapier@gentoo.org): latch WIP in status, and * delay before clearing it ? */ - ret = sandbox_erase_part(sbsf, erase_cmd->size); + ret = sandbox_erase_part(sbsf, sbsf->erase_size); sbsf->status &= ~STAT_WEL; if (ret) { debug("sandbox_sf: Erase failed\n"); diff --git a/drivers/mtd/spi/sf_params.c b/drivers/mtd/spi/sf_params.c index ac886fd..453edf0 100644 --- a/drivers/mtd/spi/sf_params.c +++ b/drivers/mtd/spi/sf_params.c @@ -68,9 +68,12 @@ const struct spi_flash_params spi_flash_params_table[] = { {"M25P40", 0x202013, 0x0, 64 * 1024, 8, 0, 0}, {"M25P80", 0x202014, 0x0, 64 * 1024, 16, 0, 0}, {"M25P16", 0x202015, 0x0, 64 * 1024, 32, 0, 0}, + {"M25PE16", 0x208015, 0x1000, 64 * 1024, 32, 0, 0}, + {"M25PX16", 0x207115, 0x1000, 64 * 1024, 32, RD_EXTN, 0}, {"M25P32", 0x202016, 0x0, 64 * 1024, 64, 0, 0}, {"M25P64", 0x202017, 0x0, 64 * 1024, 128, 0, 0}, {"M25P128", 0x202018, 0x0, 256 * 1024, 64, 0, 0}, + {"M25PX64", 0x207117, 0x0, 64 * 1024, 128, 0, SECT_4K}, {"N25Q32", 0x20ba16, 0x0, 64 * 1024, 64, RD_FULL, WR_QPP | SECT_4K}, {"N25Q32A", 0x20bb16, 0x0, 64 * 1024, 64, RD_FULL, WR_QPP | SECT_4K}, {"N25Q64", 0x20ba17, 0x0, 64 * 1024, 128, RD_FULL, WR_QPP | SECT_4K}, @@ -116,6 +119,7 @@ const struct spi_flash_params spi_flash_params_table[] = { {"W25Q64DW", 0xef6017, 0x0, 64 * 1024, 128, RD_FULL, WR_QPP | SECT_4K}, {"W25Q128FW", 0xef6018, 0x0, 64 * 1024, 256, RD_FULL, WR_QPP | SECT_4K}, #endif + {}, /* Empty entry to terminate the list */ /* * Note: * Below paired flash devices has similar spi_flash params. diff --git a/drivers/mtd/spi/spi_spl_load.c b/drivers/mtd/spi/spi_spl_load.c index 1954b7e..59cca0f 100644 --- a/drivers/mtd/spi/spi_spl_load.c +++ b/drivers/mtd/spi/spi_spl_load.c @@ -56,8 +56,10 @@ void spl_spi_load_image(void) * Load U-Boot image from SPI flash into RAM */ - flash = spi_flash_probe(CONFIG_SPL_SPI_BUS, CONFIG_SPL_SPI_CS, - CONFIG_SF_DEFAULT_SPEED, SPI_MODE_3); + flash = spi_flash_probe(CONFIG_SF_DEFAULT_BUS, + CONFIG_SF_DEFAULT_CS, + CONFIG_SF_DEFAULT_SPEED, + CONFIG_SF_DEFAULT_MODE); if (!flash) { puts("SPI probe failed.\n"); hang(); |