2 * drivers/mtd/nand/pxa3xx_nand.c
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/platform_device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/delay.h>
20 #include <linux/clk.h>
21 #include <linux/mtd/mtd.h>
22 #include <linux/mtd/nand.h>
23 #include <linux/mtd/partitions.h>
25 #include <linux/irq.h>
26 #include <linux/slab.h>
28 #include <linux/of_device.h>
29 #include <linux/of_mtd.h>
31 #if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
39 #include <linux/platform_data/mtd-nand-pxa3xx.h>
41 #define CHIP_DELAY_TIMEOUT (2 * HZ/10)
42 #define NAND_STOP_DELAY (2 * HZ/50)
43 #define PAGE_CHUNK_SIZE (2048)
46 * Define a buffer size for the initial command that detects the flash device:
47 * STATUS, READID and PARAM. The largest of these is the PARAM command,
50 #define INIT_BUFFER_SIZE 256
52 /* registers and bit definitions */
53 #define NDCR (0x00) /* Control register */
54 #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
55 #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
56 #define NDSR (0x14) /* Status Register */
57 #define NDPCR (0x18) /* Page Count Register */
58 #define NDBDR0 (0x1C) /* Bad Block Register 0 */
59 #define NDBDR1 (0x20) /* Bad Block Register 1 */
60 #define NDECCCTRL (0x28) /* ECC control */
61 #define NDDB (0x40) /* Data Buffer */
62 #define NDCB0 (0x48) /* Command Buffer0 */
63 #define NDCB1 (0x4C) /* Command Buffer1 */
64 #define NDCB2 (0x50) /* Command Buffer2 */
66 #define NDCR_SPARE_EN (0x1 << 31)
67 #define NDCR_ECC_EN (0x1 << 30)
68 #define NDCR_DMA_EN (0x1 << 29)
69 #define NDCR_ND_RUN (0x1 << 28)
70 #define NDCR_DWIDTH_C (0x1 << 27)
71 #define NDCR_DWIDTH_M (0x1 << 26)
72 #define NDCR_PAGE_SZ (0x1 << 24)
73 #define NDCR_NCSX (0x1 << 23)
74 #define NDCR_ND_MODE (0x3 << 21)
75 #define NDCR_NAND_MODE (0x0)
76 #define NDCR_CLR_PG_CNT (0x1 << 20)
77 #define NDCR_STOP_ON_UNCOR (0x1 << 19)
78 #define NDCR_RD_ID_CNT_MASK (0x7 << 16)
79 #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
81 #define NDCR_RA_START (0x1 << 15)
82 #define NDCR_PG_PER_BLK (0x1 << 14)
83 #define NDCR_ND_ARB_EN (0x1 << 12)
84 #define NDCR_INT_MASK (0xFFF)
86 #define NDSR_MASK (0xfff)
87 #define NDSR_ERR_CNT_OFF (16)
88 #define NDSR_ERR_CNT_MASK (0x1f)
89 #define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
90 #define NDSR_RDY (0x1 << 12)
91 #define NDSR_FLASH_RDY (0x1 << 11)
92 #define NDSR_CS0_PAGED (0x1 << 10)
93 #define NDSR_CS1_PAGED (0x1 << 9)
94 #define NDSR_CS0_CMDD (0x1 << 8)
95 #define NDSR_CS1_CMDD (0x1 << 7)
96 #define NDSR_CS0_BBD (0x1 << 6)
97 #define NDSR_CS1_BBD (0x1 << 5)
98 #define NDSR_UNCORERR (0x1 << 4)
99 #define NDSR_CORERR (0x1 << 3)
100 #define NDSR_WRDREQ (0x1 << 2)
101 #define NDSR_RDDREQ (0x1 << 1)
102 #define NDSR_WRCMDREQ (0x1)
104 #define NDCB0_LEN_OVRD (0x1 << 28)
105 #define NDCB0_ST_ROW_EN (0x1 << 26)
106 #define NDCB0_AUTO_RS (0x1 << 25)
107 #define NDCB0_CSEL (0x1 << 24)
108 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
109 #define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
110 #define NDCB0_CMD_TYPE_MASK (0x7 << 21)
111 #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
112 #define NDCB0_NC (0x1 << 20)
113 #define NDCB0_DBC (0x1 << 19)
114 #define NDCB0_ADDR_CYC_MASK (0x7 << 16)
115 #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
116 #define NDCB0_CMD2_MASK (0xff << 8)
117 #define NDCB0_CMD1_MASK (0xff)
118 #define NDCB0_ADDR_CYC_SHIFT (16)
120 #define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
121 #define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
122 #define EXT_CMD_TYPE_READ 4 /* Read */
123 #define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
124 #define EXT_CMD_TYPE_FINAL 3 /* Final command */
125 #define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
126 #define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
128 /* macros for registers read/write */
129 #define nand_writel(info, off, val) \
130 writel_relaxed((val), (info)->mmio_base + (off))
132 #define nand_readl(info, off) \
133 readl_relaxed((info)->mmio_base + (off))
135 /* error code and state */
158 enum pxa3xx_nand_variant {
159 PXA3XX_NAND_VARIANT_PXA,
160 PXA3XX_NAND_VARIANT_ARMADA370,
163 struct pxa3xx_nand_host {
164 struct nand_chip chip;
165 struct mtd_info *mtd;
168 /* page size of attached chip */
172 /* calculated from pxa3xx_nand_flash data */
173 unsigned int col_addr_cycles;
174 unsigned int row_addr_cycles;
175 size_t read_id_bytes;
179 struct pxa3xx_nand_info {
180 struct nand_hw_control controller;
181 struct platform_device *pdev;
184 void __iomem *mmio_base;
185 unsigned long mmio_phys;
186 struct completion cmd_complete, dev_ready;
188 unsigned int buf_start;
189 unsigned int buf_count;
190 unsigned int buf_size;
191 unsigned int data_buff_pos;
192 unsigned int oob_buff_pos;
194 /* DMA information */
198 unsigned char *data_buff;
199 unsigned char *oob_buff;
200 dma_addr_t data_buff_phys;
202 struct pxa_dma_desc *data_desc;
203 dma_addr_t data_desc_addr;
205 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
209 * This driver supports NFCv1 (as found in PXA SoC)
210 * and NFCv2 (as found in Armada 370/XP SoC).
212 enum pxa3xx_nand_variant variant;
215 int use_ecc; /* use HW ECC ? */
216 int ecc_bch; /* using BCH ECC? */
217 int use_dma; /* use DMA ? */
218 int use_spare; /* use spare ? */
221 unsigned int data_size; /* data to be read from FIFO */
222 unsigned int chunk_size; /* split commands chunk size */
223 unsigned int oob_size;
224 unsigned int spare_size;
225 unsigned int ecc_size;
226 unsigned int ecc_err_cnt;
227 unsigned int max_bitflips;
230 /* cached register value */
235 /* generated NDCBx register values */
242 static bool use_dma = 1;
243 module_param(use_dma, bool, 0444);
244 MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
246 static struct pxa3xx_nand_timing timing[] = {
247 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
248 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
249 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
250 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
253 static struct pxa3xx_nand_flash builtin_flash_types[] = {
254 { "DEFAULT FLASH", 0, 0, 2048, 8, 8, 0, &timing[0] },
255 { "64MiB 16-bit", 0x46ec, 32, 512, 16, 16, 4096, &timing[1] },
256 { "256MiB 8-bit", 0xdaec, 64, 2048, 8, 8, 2048, &timing[1] },
257 { "4GiB 8-bit", 0xd7ec, 128, 4096, 8, 8, 8192, &timing[1] },
258 { "128MiB 8-bit", 0xa12c, 64, 2048, 8, 8, 1024, &timing[2] },
259 { "128MiB 16-bit", 0xb12c, 64, 2048, 16, 16, 1024, &timing[2] },
260 { "512MiB 8-bit", 0xdc2c, 64, 2048, 8, 8, 4096, &timing[2] },
261 { "512MiB 16-bit", 0xcc2c, 64, 2048, 16, 16, 4096, &timing[2] },
262 { "256MiB 16-bit", 0xba20, 64, 2048, 16, 16, 2048, &timing[3] },
265 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
266 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
268 static struct nand_bbt_descr bbt_main_descr = {
269 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
270 | NAND_BBT_2BIT | NAND_BBT_VERSION,
274 .maxblocks = 8, /* Last 8 blocks in each chip */
275 .pattern = bbt_pattern
278 static struct nand_bbt_descr bbt_mirror_descr = {
279 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
280 | NAND_BBT_2BIT | NAND_BBT_VERSION,
284 .maxblocks = 8, /* Last 8 blocks in each chip */
285 .pattern = bbt_mirror_pattern
288 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
291 32, 33, 34, 35, 36, 37, 38, 39,
292 40, 41, 42, 43, 44, 45, 46, 47,
293 48, 49, 50, 51, 52, 53, 54, 55,
294 56, 57, 58, 59, 60, 61, 62, 63},
295 .oobfree = { {2, 30} }
298 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
301 32, 33, 34, 35, 36, 37, 38, 39,
302 40, 41, 42, 43, 44, 45, 46, 47,
303 48, 49, 50, 51, 52, 53, 54, 55,
304 56, 57, 58, 59, 60, 61, 62, 63,
305 96, 97, 98, 99, 100, 101, 102, 103,
306 104, 105, 106, 107, 108, 109, 110, 111,
307 112, 113, 114, 115, 116, 117, 118, 119,
308 120, 121, 122, 123, 124, 125, 126, 127},
309 /* Bootrom looks in bytes 0 & 5 for bad blocks */
310 .oobfree = { {6, 26}, { 64, 32} }
313 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
316 32, 33, 34, 35, 36, 37, 38, 39,
317 40, 41, 42, 43, 44, 45, 46, 47,
318 48, 49, 50, 51, 52, 53, 54, 55,
319 56, 57, 58, 59, 60, 61, 62, 63},
323 /* Define a default flash type setting serve as flash detecting only */
324 #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
326 #define NDTR0_tCH(c) (min((c), 7) << 19)
327 #define NDTR0_tCS(c) (min((c), 7) << 16)
328 #define NDTR0_tWH(c) (min((c), 7) << 11)
329 #define NDTR0_tWP(c) (min((c), 7) << 8)
330 #define NDTR0_tRH(c) (min((c), 7) << 3)
331 #define NDTR0_tRP(c) (min((c), 7) << 0)
333 #define NDTR1_tR(c) (min((c), 65535) << 16)
334 #define NDTR1_tWHR(c) (min((c), 15) << 4)
335 #define NDTR1_tAR(c) (min((c), 15) << 0)
337 /* convert nano-seconds to nand flash controller clock cycles */
338 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
340 static const struct of_device_id pxa3xx_nand_dt_ids[] = {
342 .compatible = "marvell,pxa3xx-nand",
343 .data = (void *)PXA3XX_NAND_VARIANT_PXA,
346 .compatible = "marvell,armada370-nand",
347 .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
351 MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
353 static enum pxa3xx_nand_variant
354 pxa3xx_nand_get_variant(struct platform_device *pdev)
356 const struct of_device_id *of_id =
357 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
359 return PXA3XX_NAND_VARIANT_PXA;
360 return (enum pxa3xx_nand_variant)of_id->data;
363 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
364 const struct pxa3xx_nand_timing *t)
366 struct pxa3xx_nand_info *info = host->info_data;
367 unsigned long nand_clk = clk_get_rate(info->clk);
368 uint32_t ndtr0, ndtr1;
370 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
371 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
372 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
373 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
374 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
375 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
377 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
378 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
379 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
381 info->ndtr0cs0 = ndtr0;
382 info->ndtr1cs0 = ndtr1;
383 nand_writel(info, NDTR0CS0, ndtr0);
384 nand_writel(info, NDTR1CS0, ndtr1);
388 * Set the data and OOB size, depending on the selected
389 * spare and ECC configuration.
390 * Only applicable to READ0, READOOB and PAGEPROG commands.
392 static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
393 struct mtd_info *mtd)
395 int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
397 info->data_size = mtd->writesize;
401 info->oob_size = info->spare_size;
403 info->oob_size += info->ecc_size;
407 * NOTE: it is a must to set ND_RUN firstly, then write
408 * command buffer, otherwise, it does not work.
409 * We enable all the interrupt at the same time, and
410 * let pxa3xx_nand_irq to handle all logic.
412 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
416 ndcr = info->reg_ndcr;
421 nand_writel(info, NDECCCTRL, 0x1);
423 ndcr &= ~NDCR_ECC_EN;
425 nand_writel(info, NDECCCTRL, 0x0);
431 ndcr &= ~NDCR_DMA_EN;
434 ndcr |= NDCR_SPARE_EN;
436 ndcr &= ~NDCR_SPARE_EN;
440 /* clear status bits and run */
441 nand_writel(info, NDCR, 0);
442 nand_writel(info, NDSR, NDSR_MASK);
443 nand_writel(info, NDCR, ndcr);
446 static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
449 int timeout = NAND_STOP_DELAY;
451 /* wait RUN bit in NDCR become 0 */
452 ndcr = nand_readl(info, NDCR);
453 while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
454 ndcr = nand_readl(info, NDCR);
459 ndcr &= ~NDCR_ND_RUN;
460 nand_writel(info, NDCR, ndcr);
462 /* clear status bits */
463 nand_writel(info, NDSR, NDSR_MASK);
466 static void __maybe_unused
467 enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
471 ndcr = nand_readl(info, NDCR);
472 nand_writel(info, NDCR, ndcr & ~int_mask);
475 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
479 ndcr = nand_readl(info, NDCR);
480 nand_writel(info, NDCR, ndcr | int_mask);
483 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
489 * According to the datasheet, when reading from NDDB
490 * with BCH enabled, after each 32 bytes reads, we
491 * have to make sure that the NDSR.RDDREQ bit is set.
493 * Drain the FIFO 8 32 bits reads at a time, and skip
494 * the polling on the last read.
497 __raw_readsl(info->mmio_base + NDDB, data, 8);
500 !(nand_readl(info, NDSR) & NDSR_RDDREQ);
503 dev_err(&info->pdev->dev,
504 "Timeout on RDDREQ while draining the FIFO\n");
516 __raw_readsl(info->mmio_base + NDDB, data, len);
519 static void handle_data_pio(struct pxa3xx_nand_info *info)
521 unsigned int do_bytes = min(info->data_size, info->chunk_size);
523 switch (info->state) {
524 case STATE_PIO_WRITING:
525 __raw_writesl(info->mmio_base + NDDB,
526 info->data_buff + info->data_buff_pos,
527 DIV_ROUND_UP(do_bytes, 4));
529 if (info->oob_size > 0)
530 __raw_writesl(info->mmio_base + NDDB,
531 info->oob_buff + info->oob_buff_pos,
532 DIV_ROUND_UP(info->oob_size, 4));
534 case STATE_PIO_READING:
536 info->data_buff + info->data_buff_pos,
537 DIV_ROUND_UP(do_bytes, 4));
539 if (info->oob_size > 0)
541 info->oob_buff + info->oob_buff_pos,
542 DIV_ROUND_UP(info->oob_size, 4));
545 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
550 /* Update buffer pointers for multi-page read/write */
551 info->data_buff_pos += do_bytes;
552 info->oob_buff_pos += info->oob_size;
553 info->data_size -= do_bytes;
557 static void start_data_dma(struct pxa3xx_nand_info *info)
559 struct pxa_dma_desc *desc = info->data_desc;
560 int dma_len = ALIGN(info->data_size + info->oob_size, 32);
562 desc->ddadr = DDADR_STOP;
563 desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len;
565 switch (info->state) {
566 case STATE_DMA_WRITING:
567 desc->dsadr = info->data_buff_phys;
568 desc->dtadr = info->mmio_phys + NDDB;
569 desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG;
571 case STATE_DMA_READING:
572 desc->dtadr = info->data_buff_phys;
573 desc->dsadr = info->mmio_phys + NDDB;
574 desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
577 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
582 DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch;
583 DDADR(info->data_dma_ch) = info->data_desc_addr;
584 DCSR(info->data_dma_ch) |= DCSR_RUN;
587 static void pxa3xx_nand_data_dma_irq(int channel, void *data)
589 struct pxa3xx_nand_info *info = data;
592 dcsr = DCSR(channel);
593 DCSR(channel) = dcsr;
595 if (dcsr & DCSR_BUSERR) {
596 info->retcode = ERR_DMABUSERR;
599 info->state = STATE_DMA_DONE;
600 enable_int(info, NDCR_INT_MASK);
601 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
604 static void start_data_dma(struct pxa3xx_nand_info *info)
608 static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
610 struct pxa3xx_nand_info *info = devid;
611 unsigned int status, is_completed = 0, is_ready = 0;
612 unsigned int ready, cmd_done;
615 ready = NDSR_FLASH_RDY;
616 cmd_done = NDSR_CS0_CMDD;
619 cmd_done = NDSR_CS1_CMDD;
622 status = nand_readl(info, NDSR);
624 if (status & NDSR_UNCORERR)
625 info->retcode = ERR_UNCORERR;
626 if (status & NDSR_CORERR) {
627 info->retcode = ERR_CORERR;
628 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
630 info->ecc_err_cnt = NDSR_ERR_CNT(status);
632 info->ecc_err_cnt = 1;
635 * Each chunk composing a page is corrected independently,
636 * and we need to store maximum number of corrected bitflips
637 * to return it to the MTD layer in ecc.read_page().
639 info->max_bitflips = max_t(unsigned int,
643 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
644 /* whether use dma to transfer data */
646 disable_int(info, NDCR_INT_MASK);
647 info->state = (status & NDSR_RDDREQ) ?
648 STATE_DMA_READING : STATE_DMA_WRITING;
649 start_data_dma(info);
650 goto NORMAL_IRQ_EXIT;
652 info->state = (status & NDSR_RDDREQ) ?
653 STATE_PIO_READING : STATE_PIO_WRITING;
654 handle_data_pio(info);
657 if (status & cmd_done) {
658 info->state = STATE_CMD_DONE;
661 if (status & ready) {
662 info->state = STATE_READY;
666 if (status & NDSR_WRCMDREQ) {
667 nand_writel(info, NDSR, NDSR_WRCMDREQ);
668 status &= ~NDSR_WRCMDREQ;
669 info->state = STATE_CMD_HANDLE;
672 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
673 * must be loaded by writing directly either 12 or 16
674 * bytes directly to NDCB0, four bytes at a time.
676 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
677 * but each NDCBx register can be read.
679 nand_writel(info, NDCB0, info->ndcb0);
680 nand_writel(info, NDCB0, info->ndcb1);
681 nand_writel(info, NDCB0, info->ndcb2);
683 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
684 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
685 nand_writel(info, NDCB0, info->ndcb3);
688 /* clear NDSR to let the controller exit the IRQ */
689 nand_writel(info, NDSR, status);
691 complete(&info->cmd_complete);
693 complete(&info->dev_ready);
698 static inline int is_buf_blank(uint8_t *buf, size_t len)
700 for (; len > 0; len--)
706 static void set_command_address(struct pxa3xx_nand_info *info,
707 unsigned int page_size, uint16_t column, int page_addr)
709 /* small page addr setting */
710 if (page_size < PAGE_CHUNK_SIZE) {
711 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
716 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
719 if (page_addr & 0xFF0000)
720 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
726 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
728 struct pxa3xx_nand_host *host = info->host[info->cs];
729 struct mtd_info *mtd = host->mtd;
731 /* reset data and oob column point to handle data */
735 info->data_buff_pos = 0;
736 info->oob_buff_pos = 0;
739 info->retcode = ERR_NONE;
740 info->ecc_err_cnt = 0;
746 case NAND_CMD_PAGEPROG:
748 case NAND_CMD_READOOB:
749 pxa3xx_set_datasize(info, mtd);
761 * If we are about to issue a read command, or about to set
762 * the write address, then clean the data buffer.
764 if (command == NAND_CMD_READ0 ||
765 command == NAND_CMD_READOOB ||
766 command == NAND_CMD_SEQIN) {
768 info->buf_count = mtd->writesize + mtd->oobsize;
769 memset(info->data_buff, 0xFF, info->buf_count);
774 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
775 int ext_cmd_type, uint16_t column, int page_addr)
777 int addr_cycle, exec_cmd;
778 struct pxa3xx_nand_host *host;
779 struct mtd_info *mtd;
781 host = info->host[info->cs];
787 info->ndcb0 = NDCB0_CSEL;
791 if (command == NAND_CMD_SEQIN)
794 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
795 + host->col_addr_cycles);
798 case NAND_CMD_READOOB:
800 info->buf_start = column;
801 info->ndcb0 |= NDCB0_CMD_TYPE(0)
805 if (command == NAND_CMD_READOOB)
806 info->buf_start += mtd->writesize;
809 * Multiple page read needs an 'extended command type' field,
810 * which is either naked-read or last-read according to the
813 if (mtd->writesize == PAGE_CHUNK_SIZE) {
814 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
815 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
816 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
818 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
819 info->ndcb3 = info->chunk_size +
823 set_command_address(info, mtd->writesize, column, page_addr);
828 info->buf_start = column;
829 set_command_address(info, mtd->writesize, 0, page_addr);
832 * Multiple page programming needs to execute the initial
833 * SEQIN command that sets the page address.
835 if (mtd->writesize > PAGE_CHUNK_SIZE) {
836 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
837 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
840 /* No data transfer in this case */
846 case NAND_CMD_PAGEPROG:
847 if (is_buf_blank(info->data_buff,
848 (mtd->writesize + mtd->oobsize))) {
853 /* Second command setting for large pages */
854 if (mtd->writesize > PAGE_CHUNK_SIZE) {
856 * Multiple page write uses the 'extended command'
857 * field. This can be used to issue a command dispatch
858 * or a naked-write depending on the current stage.
860 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
862 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
863 info->ndcb3 = info->chunk_size +
867 * This is the command dispatch that completes a chunked
868 * page program operation.
870 if (info->data_size == 0) {
871 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
872 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
879 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
883 | (NAND_CMD_PAGEPROG << 8)
890 info->buf_count = 256;
891 info->ndcb0 |= NDCB0_CMD_TYPE(0)
895 info->ndcb1 = (column & 0xFF);
897 info->data_size = 256;
900 case NAND_CMD_READID:
901 info->buf_count = host->read_id_bytes;
902 info->ndcb0 |= NDCB0_CMD_TYPE(3)
905 info->ndcb1 = (column & 0xFF);
909 case NAND_CMD_STATUS:
911 info->ndcb0 |= NDCB0_CMD_TYPE(4)
918 case NAND_CMD_ERASE1:
919 info->ndcb0 |= NDCB0_CMD_TYPE(2)
923 | (NAND_CMD_ERASE2 << 8)
925 info->ndcb1 = page_addr;
930 info->ndcb0 |= NDCB0_CMD_TYPE(5)
935 case NAND_CMD_ERASE2:
941 dev_err(&info->pdev->dev, "non-supported command %x\n",
949 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
950 int column, int page_addr)
952 struct pxa3xx_nand_host *host = mtd->priv;
953 struct pxa3xx_nand_info *info = host->info_data;
957 * if this is a x16 device ,then convert the input
958 * "byte" address into a "word" address appropriate
959 * for indexing a word-oriented device
961 if (info->reg_ndcr & NDCR_DWIDTH_M)
965 * There may be different NAND chip hooked to
966 * different chip select, so check whether
967 * chip select has been changed, if yes, reset the timing
969 if (info->cs != host->cs) {
971 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
972 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
975 prepare_start_command(info, command);
977 info->state = STATE_PREPARED;
978 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
981 init_completion(&info->cmd_complete);
982 init_completion(&info->dev_ready);
984 pxa3xx_nand_start(info);
986 ret = wait_for_completion_timeout(&info->cmd_complete,
989 dev_err(&info->pdev->dev, "Wait time out!!!\n");
990 /* Stop State Machine for next command cycle */
991 pxa3xx_nand_stop(info);
994 info->state = STATE_IDLE;
997 static void nand_cmdfunc_extended(struct mtd_info *mtd,
998 const unsigned command,
999 int column, int page_addr)
1001 struct pxa3xx_nand_host *host = mtd->priv;
1002 struct pxa3xx_nand_info *info = host->info_data;
1003 int ret, exec_cmd, ext_cmd_type;
1006 * if this is a x16 device then convert the input
1007 * "byte" address into a "word" address appropriate
1008 * for indexing a word-oriented device
1010 if (info->reg_ndcr & NDCR_DWIDTH_M)
1014 * There may be different NAND chip hooked to
1015 * different chip select, so check whether
1016 * chip select has been changed, if yes, reset the timing
1018 if (info->cs != host->cs) {
1019 info->cs = host->cs;
1020 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1021 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1024 /* Select the extended command for the first command */
1026 case NAND_CMD_READ0:
1027 case NAND_CMD_READOOB:
1028 ext_cmd_type = EXT_CMD_TYPE_MONO;
1030 case NAND_CMD_SEQIN:
1031 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1033 case NAND_CMD_PAGEPROG:
1034 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1041 prepare_start_command(info, command);
1044 * Prepare the "is ready" completion before starting a command
1045 * transaction sequence. If the command is not executed the
1046 * completion will be completed, see below.
1048 * We can do that inside the loop because the command variable
1049 * is invariant and thus so is the exec_cmd.
1051 info->need_wait = 1;
1052 init_completion(&info->dev_ready);
1054 info->state = STATE_PREPARED;
1055 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1058 info->need_wait = 0;
1059 complete(&info->dev_ready);
1063 init_completion(&info->cmd_complete);
1064 pxa3xx_nand_start(info);
1066 ret = wait_for_completion_timeout(&info->cmd_complete,
1067 CHIP_DELAY_TIMEOUT);
1069 dev_err(&info->pdev->dev, "Wait time out!!!\n");
1070 /* Stop State Machine for next command cycle */
1071 pxa3xx_nand_stop(info);
1075 /* Check if the sequence is complete */
1076 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1080 * After a splitted program command sequence has issued
1081 * the command dispatch, the command sequence is complete.
1083 if (info->data_size == 0 &&
1084 command == NAND_CMD_PAGEPROG &&
1085 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1088 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1089 /* Last read: issue a 'last naked read' */
1090 if (info->data_size == info->chunk_size)
1091 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1093 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1096 * If a splitted program command has no more data to transfer,
1097 * the command dispatch must be issued to complete.
1099 } else if (command == NAND_CMD_PAGEPROG &&
1100 info->data_size == 0) {
1101 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1105 info->state = STATE_IDLE;
1108 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1109 struct nand_chip *chip, const uint8_t *buf, int oob_required)
1111 chip->write_buf(mtd, buf, mtd->writesize);
1112 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1117 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1118 struct nand_chip *chip, uint8_t *buf, int oob_required,
1121 struct pxa3xx_nand_host *host = mtd->priv;
1122 struct pxa3xx_nand_info *info = host->info_data;
1124 chip->read_buf(mtd, buf, mtd->writesize);
1125 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1127 if (info->retcode == ERR_CORERR && info->use_ecc) {
1128 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1130 } else if (info->retcode == ERR_UNCORERR) {
1132 * for blank page (all 0xff), HW will calculate its ECC as
1133 * 0, which is different from the ECC information within
1134 * OOB, ignore such uncorrectable errors
1136 if (is_buf_blank(buf, mtd->writesize))
1137 info->retcode = ERR_NONE;
1139 mtd->ecc_stats.failed++;
1142 return info->max_bitflips;
1145 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1147 struct pxa3xx_nand_host *host = mtd->priv;
1148 struct pxa3xx_nand_info *info = host->info_data;
1151 if (info->buf_start < info->buf_count)
1152 /* Has just send a new command? */
1153 retval = info->data_buff[info->buf_start++];
1158 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1160 struct pxa3xx_nand_host *host = mtd->priv;
1161 struct pxa3xx_nand_info *info = host->info_data;
1162 u16 retval = 0xFFFF;
1164 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1165 retval = *((u16 *)(info->data_buff+info->buf_start));
1166 info->buf_start += 2;
1171 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1173 struct pxa3xx_nand_host *host = mtd->priv;
1174 struct pxa3xx_nand_info *info = host->info_data;
1175 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1177 memcpy(buf, info->data_buff + info->buf_start, real_len);
1178 info->buf_start += real_len;
1181 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1182 const uint8_t *buf, int len)
1184 struct pxa3xx_nand_host *host = mtd->priv;
1185 struct pxa3xx_nand_info *info = host->info_data;
1186 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1188 memcpy(info->data_buff + info->buf_start, buf, real_len);
1189 info->buf_start += real_len;
1192 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1197 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1199 struct pxa3xx_nand_host *host = mtd->priv;
1200 struct pxa3xx_nand_info *info = host->info_data;
1203 if (info->need_wait) {
1204 ret = wait_for_completion_timeout(&info->dev_ready,
1205 CHIP_DELAY_TIMEOUT);
1206 info->need_wait = 0;
1208 dev_err(&info->pdev->dev, "Ready time out!!!\n");
1209 return NAND_STATUS_FAIL;
1213 /* pxa3xx_nand_send_command has waited for command complete */
1214 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1215 if (info->retcode == ERR_NONE)
1218 return NAND_STATUS_FAIL;
1221 return NAND_STATUS_READY;
1224 static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
1225 const struct pxa3xx_nand_flash *f)
1227 struct platform_device *pdev = info->pdev;
1228 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1229 struct pxa3xx_nand_host *host = info->host[info->cs];
1230 uint32_t ndcr = 0x0; /* enable all interrupts */
1232 if (f->page_size != 2048 && f->page_size != 512) {
1233 dev_err(&pdev->dev, "Current only support 2048 and 512 size\n");
1237 if (f->flash_width != 16 && f->flash_width != 8) {
1238 dev_err(&pdev->dev, "Only support 8bit and 16 bit!\n");
1242 /* calculate flash information */
1243 host->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
1245 /* calculate addressing information */
1246 host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
1248 if (f->num_blocks * f->page_per_block > 65536)
1249 host->row_addr_cycles = 3;
1251 host->row_addr_cycles = 2;
1253 ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1254 ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1255 ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
1256 ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
1257 ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
1258 ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
1260 ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
1261 ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1263 info->reg_ndcr = ndcr;
1265 pxa3xx_nand_set_timing(host, f->timing);
1269 static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1272 * We set 0 by hard coding here, for we don't support keep_config
1273 * when there is more than one chip attached to the controller
1275 struct pxa3xx_nand_host *host = info->host[0];
1276 uint32_t ndcr = nand_readl(info, NDCR);
1278 if (ndcr & NDCR_PAGE_SZ) {
1279 /* Controller's FIFO size */
1280 info->chunk_size = 2048;
1281 host->read_id_bytes = 4;
1283 info->chunk_size = 512;
1284 host->read_id_bytes = 2;
1287 /* Set an initial chunk size */
1288 info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
1289 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1290 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1295 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1297 struct platform_device *pdev = info->pdev;
1298 int data_desc_offset = info->buf_size - sizeof(struct pxa_dma_desc);
1301 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1302 if (info->data_buff == NULL)
1307 info->data_buff = dma_alloc_coherent(&pdev->dev, info->buf_size,
1308 &info->data_buff_phys, GFP_KERNEL);
1309 if (info->data_buff == NULL) {
1310 dev_err(&pdev->dev, "failed to allocate dma buffer\n");
1314 info->data_desc = (void *)info->data_buff + data_desc_offset;
1315 info->data_desc_addr = info->data_buff_phys + data_desc_offset;
1317 info->data_dma_ch = pxa_request_dma("nand-data", DMA_PRIO_LOW,
1318 pxa3xx_nand_data_dma_irq, info);
1319 if (info->data_dma_ch < 0) {
1320 dev_err(&pdev->dev, "failed to request data dma\n");
1321 dma_free_coherent(&pdev->dev, info->buf_size,
1322 info->data_buff, info->data_buff_phys);
1323 return info->data_dma_ch;
1327 * Now that DMA buffers are allocated we turn on
1328 * DMA proper for I/O operations.
1334 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1336 struct platform_device *pdev = info->pdev;
1337 if (info->use_dma) {
1338 pxa_free_dma(info->data_dma_ch);
1339 dma_free_coherent(&pdev->dev, info->buf_size,
1340 info->data_buff, info->data_buff_phys);
1342 kfree(info->data_buff);
1346 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1348 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1349 if (info->data_buff == NULL)
1354 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1356 kfree(info->data_buff);
1360 static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
1362 struct mtd_info *mtd;
1363 struct nand_chip *chip;
1366 mtd = info->host[info->cs]->mtd;
1369 /* use the common timing to make a try */
1370 ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
1374 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1375 ret = chip->waitfunc(mtd, chip);
1376 if (ret & NAND_STATUS_FAIL)
1382 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1383 struct nand_ecc_ctrl *ecc,
1384 int strength, int ecc_stepsize, int page_size)
1386 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1387 info->chunk_size = 2048;
1388 info->spare_size = 40;
1389 info->ecc_size = 24;
1390 ecc->mode = NAND_ECC_HW;
1394 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1395 info->chunk_size = 512;
1396 info->spare_size = 8;
1398 ecc->mode = NAND_ECC_HW;
1403 * Required ECC: 4-bit correction per 512 bytes
1404 * Select: 16-bit correction per 2048 bytes
1406 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1408 info->chunk_size = 2048;
1409 info->spare_size = 32;
1410 info->ecc_size = 32;
1411 ecc->mode = NAND_ECC_HW;
1412 ecc->size = info->chunk_size;
1413 ecc->layout = &ecc_layout_2KB_bch4bit;
1416 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1418 info->chunk_size = 2048;
1419 info->spare_size = 32;
1420 info->ecc_size = 32;
1421 ecc->mode = NAND_ECC_HW;
1422 ecc->size = info->chunk_size;
1423 ecc->layout = &ecc_layout_4KB_bch4bit;
1427 * Required ECC: 8-bit correction per 512 bytes
1428 * Select: 16-bit correction per 1024 bytes
1430 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1432 info->chunk_size = 1024;
1433 info->spare_size = 0;
1434 info->ecc_size = 32;
1435 ecc->mode = NAND_ECC_HW;
1436 ecc->size = info->chunk_size;
1437 ecc->layout = &ecc_layout_4KB_bch8bit;
1440 dev_err(&info->pdev->dev,
1441 "ECC strength %d at page size %d is not supported\n",
1442 strength, page_size);
1446 dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
1447 ecc->strength, ecc->size);
1451 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1453 struct pxa3xx_nand_host *host = mtd->priv;
1454 struct pxa3xx_nand_info *info = host->info_data;
1455 struct platform_device *pdev = info->pdev;
1456 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1457 struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL;
1458 const struct pxa3xx_nand_flash *f = NULL;
1459 struct nand_chip *chip = mtd->priv;
1463 uint16_t ecc_strength, ecc_step;
1465 if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
1468 ret = pxa3xx_nand_sensing(info);
1470 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
1476 chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0);
1477 id = *((uint16_t *)(info->data_buff));
1479 dev_info(&info->pdev->dev, "Detect a flash id %x\n", id);
1481 dev_warn(&info->pdev->dev,
1482 "Read out ID 0, potential timing set wrong!!\n");
1487 num = ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1;
1488 for (i = 0; i < num; i++) {
1489 if (i < pdata->num_flash)
1490 f = pdata->flash + i;
1492 f = &builtin_flash_types[i - pdata->num_flash + 1];
1494 /* find the chip in default list */
1495 if (f->chip_id == id)
1499 if (i >= (ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1)) {
1500 dev_err(&info->pdev->dev, "ERROR!! flash not defined!!!\n");
1505 ret = pxa3xx_nand_config_flash(info, f);
1507 dev_err(&info->pdev->dev, "ERROR! Configure failed\n");
1511 pxa3xx_flash_ids[0].name = f->name;
1512 pxa3xx_flash_ids[0].dev_id = (f->chip_id >> 8) & 0xffff;
1513 pxa3xx_flash_ids[0].pagesize = f->page_size;
1514 chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size;
1515 pxa3xx_flash_ids[0].chipsize = chipsize >> 20;
1516 pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block;
1517 if (f->flash_width == 16)
1518 pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16;
1519 pxa3xx_flash_ids[1].name = NULL;
1520 def = pxa3xx_flash_ids;
1522 if (info->reg_ndcr & NDCR_DWIDTH_M)
1523 chip->options |= NAND_BUSWIDTH_16;
1525 /* Device detection must be done with ECC disabled */
1526 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1527 nand_writel(info, NDECCCTRL, 0x0);
1529 if (nand_scan_ident(mtd, 1, def))
1532 if (pdata->flash_bbt) {
1534 * We'll use a bad block table stored in-flash and don't
1535 * allow writing the bad block marker to the flash.
1537 chip->bbt_options |= NAND_BBT_USE_FLASH |
1538 NAND_BBT_NO_OOB_BBM;
1539 chip->bbt_td = &bbt_main_descr;
1540 chip->bbt_md = &bbt_mirror_descr;
1544 * If the page size is bigger than the FIFO size, let's check
1545 * we are given the right variant and then switch to the extended
1546 * (aka splitted) command handling,
1548 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1549 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1550 chip->cmdfunc = nand_cmdfunc_extended;
1552 dev_err(&info->pdev->dev,
1553 "unsupported page size on this variant\n");
1558 if (pdata->ecc_strength && pdata->ecc_step_size) {
1559 ecc_strength = pdata->ecc_strength;
1560 ecc_step = pdata->ecc_step_size;
1562 ecc_strength = chip->ecc_strength_ds;
1563 ecc_step = chip->ecc_step_ds;
1566 /* Set default ECC strength requirements on non-ONFI devices */
1567 if (ecc_strength < 1 && ecc_step < 1) {
1572 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1573 ecc_step, mtd->writesize);
1577 /* calculate addressing information */
1578 if (mtd->writesize >= 2048)
1579 host->col_addr_cycles = 2;
1581 host->col_addr_cycles = 1;
1583 /* release the initial buffer */
1584 kfree(info->data_buff);
1586 /* allocate the real data + oob buffer */
1587 info->buf_size = mtd->writesize + mtd->oobsize;
1588 ret = pxa3xx_nand_init_buff(info);
1591 info->oob_buff = info->data_buff + mtd->writesize;
1593 if ((mtd->size >> chip->page_shift) > 65536)
1594 host->row_addr_cycles = 3;
1596 host->row_addr_cycles = 2;
1597 return nand_scan_tail(mtd);
1600 static int alloc_nand_resource(struct platform_device *pdev)
1602 struct pxa3xx_nand_platform_data *pdata;
1603 struct pxa3xx_nand_info *info;
1604 struct pxa3xx_nand_host *host;
1605 struct nand_chip *chip = NULL;
1606 struct mtd_info *mtd;
1610 pdata = dev_get_platdata(&pdev->dev);
1611 info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) +
1612 sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
1617 info->variant = pxa3xx_nand_get_variant(pdev);
1618 for (cs = 0; cs < pdata->num_cs; cs++) {
1619 mtd = (struct mtd_info *)((unsigned int)&info[1] +
1620 (sizeof(*mtd) + sizeof(*host)) * cs);
1621 chip = (struct nand_chip *)(&mtd[1]);
1622 host = (struct pxa3xx_nand_host *)chip;
1623 info->host[cs] = host;
1626 host->info_data = info;
1628 mtd->owner = THIS_MODULE;
1630 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1631 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1632 chip->controller = &info->controller;
1633 chip->waitfunc = pxa3xx_nand_waitfunc;
1634 chip->select_chip = pxa3xx_nand_select_chip;
1635 chip->read_word = pxa3xx_nand_read_word;
1636 chip->read_byte = pxa3xx_nand_read_byte;
1637 chip->read_buf = pxa3xx_nand_read_buf;
1638 chip->write_buf = pxa3xx_nand_write_buf;
1639 chip->options |= NAND_NO_SUBPAGE_WRITE;
1640 chip->cmdfunc = nand_cmdfunc;
1643 spin_lock_init(&chip->controller->lock);
1644 init_waitqueue_head(&chip->controller->wq);
1645 info->clk = devm_clk_get(&pdev->dev, NULL);
1646 if (IS_ERR(info->clk)) {
1647 dev_err(&pdev->dev, "failed to get nand clock\n");
1648 return PTR_ERR(info->clk);
1650 ret = clk_prepare_enable(info->clk);
1656 * This is a dirty hack to make this driver work from
1657 * devicetree bindings. It can be removed once we have
1658 * a prober DMA controller framework for DT.
1660 if (pdev->dev.of_node &&
1661 of_machine_is_compatible("marvell,pxa3xx")) {
1662 info->drcmr_dat = 97;
1663 info->drcmr_cmd = 99;
1665 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1668 "no resource defined for data DMA\n");
1670 goto fail_disable_clk;
1672 info->drcmr_dat = r->start;
1674 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1677 "no resource defined for cmd DMA\n");
1679 goto fail_disable_clk;
1681 info->drcmr_cmd = r->start;
1685 irq = platform_get_irq(pdev, 0);
1687 dev_err(&pdev->dev, "no IRQ resource defined\n");
1689 goto fail_disable_clk;
1692 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1693 info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
1694 if (IS_ERR(info->mmio_base)) {
1695 ret = PTR_ERR(info->mmio_base);
1696 goto fail_disable_clk;
1698 info->mmio_phys = r->start;
1700 /* Allocate a buffer to allow flash detection */
1701 info->buf_size = INIT_BUFFER_SIZE;
1702 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1703 if (info->data_buff == NULL) {
1705 goto fail_disable_clk;
1708 /* initialize all interrupts to be disabled */
1709 disable_int(info, NDSR_MASK);
1711 ret = request_irq(irq, pxa3xx_nand_irq, 0, pdev->name, info);
1713 dev_err(&pdev->dev, "failed to request IRQ\n");
1717 platform_set_drvdata(pdev, info);
1722 free_irq(irq, info);
1723 kfree(info->data_buff);
1725 clk_disable_unprepare(info->clk);
1729 static int pxa3xx_nand_remove(struct platform_device *pdev)
1731 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1732 struct pxa3xx_nand_platform_data *pdata;
1738 pdata = dev_get_platdata(&pdev->dev);
1740 irq = platform_get_irq(pdev, 0);
1742 free_irq(irq, info);
1743 pxa3xx_nand_free_buff(info);
1745 clk_disable_unprepare(info->clk);
1747 for (cs = 0; cs < pdata->num_cs; cs++)
1748 nand_release(info->host[cs]->mtd);
1752 static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1754 struct pxa3xx_nand_platform_data *pdata;
1755 struct device_node *np = pdev->dev.of_node;
1756 const struct of_device_id *of_id =
1757 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1762 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1766 if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1767 pdata->enable_arbiter = 1;
1768 if (of_get_property(np, "marvell,nand-keep-config", NULL))
1769 pdata->keep_config = 1;
1770 of_property_read_u32(np, "num-cs", &pdata->num_cs);
1771 pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
1773 pdata->ecc_strength = of_get_nand_ecc_strength(np);
1774 if (pdata->ecc_strength < 0)
1775 pdata->ecc_strength = 0;
1777 pdata->ecc_step_size = of_get_nand_ecc_step_size(np);
1778 if (pdata->ecc_step_size < 0)
1779 pdata->ecc_step_size = 0;
1781 pdev->dev.platform_data = pdata;
1786 static int pxa3xx_nand_probe(struct platform_device *pdev)
1788 struct pxa3xx_nand_platform_data *pdata;
1789 struct mtd_part_parser_data ppdata = {};
1790 struct pxa3xx_nand_info *info;
1791 int ret, cs, probe_success;
1793 #ifndef ARCH_HAS_DMA
1796 dev_warn(&pdev->dev,
1797 "This platform can't do DMA on this device\n");
1800 ret = pxa3xx_nand_probe_dt(pdev);
1804 pdata = dev_get_platdata(&pdev->dev);
1806 dev_err(&pdev->dev, "no platform data defined\n");
1810 ret = alloc_nand_resource(pdev);
1812 dev_err(&pdev->dev, "alloc nand resource failed\n");
1816 info = platform_get_drvdata(pdev);
1818 for (cs = 0; cs < pdata->num_cs; cs++) {
1819 struct mtd_info *mtd = info->host[cs]->mtd;
1822 * The mtd name matches the one used in 'mtdparts' kernel
1823 * parameter. This name cannot be changed or otherwise
1824 * user's mtd partitions configuration would get broken.
1826 mtd->name = "pxa3xx_nand-0";
1828 ret = pxa3xx_nand_scan(mtd);
1830 dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
1835 ppdata.of_node = pdev->dev.of_node;
1836 ret = mtd_device_parse_register(mtd, NULL,
1837 &ppdata, pdata->parts[cs],
1838 pdata->nr_parts[cs]);
1843 if (!probe_success) {
1844 pxa3xx_nand_remove(pdev);
1852 static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
1854 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1855 struct pxa3xx_nand_platform_data *pdata;
1856 struct mtd_info *mtd;
1859 pdata = dev_get_platdata(&pdev->dev);
1861 dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
1865 for (cs = 0; cs < pdata->num_cs; cs++) {
1866 mtd = info->host[cs]->mtd;
1873 static int pxa3xx_nand_resume(struct platform_device *pdev)
1875 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1876 struct pxa3xx_nand_platform_data *pdata;
1877 struct mtd_info *mtd;
1880 pdata = dev_get_platdata(&pdev->dev);
1881 /* We don't want to handle interrupt without calling mtd routine */
1882 disable_int(info, NDCR_INT_MASK);
1885 * Directly set the chip select to a invalid value,
1886 * then the driver would reset the timing according
1887 * to current chip select at the beginning of cmdfunc
1892 * As the spec says, the NDSR would be updated to 0x1800 when
1893 * doing the nand_clk disable/enable.
1894 * To prevent it damaging state machine of the driver, clear
1895 * all status before resume
1897 nand_writel(info, NDSR, NDSR_MASK);
1898 for (cs = 0; cs < pdata->num_cs; cs++) {
1899 mtd = info->host[cs]->mtd;
1906 #define pxa3xx_nand_suspend NULL
1907 #define pxa3xx_nand_resume NULL
1910 static struct platform_driver pxa3xx_nand_driver = {
1912 .name = "pxa3xx-nand",
1913 .of_match_table = pxa3xx_nand_dt_ids,
1915 .probe = pxa3xx_nand_probe,
1916 .remove = pxa3xx_nand_remove,
1917 .suspend = pxa3xx_nand_suspend,
1918 .resume = pxa3xx_nand_resume,
1921 module_platform_driver(pxa3xx_nand_driver);
1923 MODULE_LICENSE("GPL");
1924 MODULE_DESCRIPTION("PXA3xx NAND controller driver");