Merge branch 'stable-3.2' into pandora-3.2
[pandora-kernel.git] / drivers / mtd / nand / omap2.c
1 /*
2  * Copyright © 2004 Texas Instruments, Jian Zhang <jzhang@ti.com>
3  * Copyright © 2004 Micron Technology Inc.
4  * Copyright © 2004 David Brownell
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10
11 #include <linux/platform_device.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/delay.h>
14 #include <linux/module.h>
15 #include <linux/interrupt.h>
16 #include <linux/jiffies.h>
17 #include <linux/sched.h>
18 #include <linux/mtd/mtd.h>
19 #include <linux/mtd/nand.h>
20 #include <linux/mtd/partitions.h>
21 #include <linux/io.h>
22 #include <linux/slab.h>
23
24 #include <plat/dma.h>
25 #include <plat/gpmc.h>
26 #include <plat/nand.h>
27 #include <asm/system.h>
28
29 #define DRIVER_NAME     "omap2-nand"
30 #define OMAP_NAND_TIMEOUT_MS    5000
31
32 #define NAND_Ecc_P1e            (1 << 0)
33 #define NAND_Ecc_P2e            (1 << 1)
34 #define NAND_Ecc_P4e            (1 << 2)
35 #define NAND_Ecc_P8e            (1 << 3)
36 #define NAND_Ecc_P16e           (1 << 4)
37 #define NAND_Ecc_P32e           (1 << 5)
38 #define NAND_Ecc_P64e           (1 << 6)
39 #define NAND_Ecc_P128e          (1 << 7)
40 #define NAND_Ecc_P256e          (1 << 8)
41 #define NAND_Ecc_P512e          (1 << 9)
42 #define NAND_Ecc_P1024e         (1 << 10)
43 #define NAND_Ecc_P2048e         (1 << 11)
44
45 #define NAND_Ecc_P1o            (1 << 16)
46 #define NAND_Ecc_P2o            (1 << 17)
47 #define NAND_Ecc_P4o            (1 << 18)
48 #define NAND_Ecc_P8o            (1 << 19)
49 #define NAND_Ecc_P16o           (1 << 20)
50 #define NAND_Ecc_P32o           (1 << 21)
51 #define NAND_Ecc_P64o           (1 << 22)
52 #define NAND_Ecc_P128o          (1 << 23)
53 #define NAND_Ecc_P256o          (1 << 24)
54 #define NAND_Ecc_P512o          (1 << 25)
55 #define NAND_Ecc_P1024o         (1 << 26)
56 #define NAND_Ecc_P2048o         (1 << 27)
57
58 #define TF(value)       (value ? 1 : 0)
59
60 #define P2048e(a)       (TF(a & NAND_Ecc_P2048e)        << 0)
61 #define P2048o(a)       (TF(a & NAND_Ecc_P2048o)        << 1)
62 #define P1e(a)          (TF(a & NAND_Ecc_P1e)           << 2)
63 #define P1o(a)          (TF(a & NAND_Ecc_P1o)           << 3)
64 #define P2e(a)          (TF(a & NAND_Ecc_P2e)           << 4)
65 #define P2o(a)          (TF(a & NAND_Ecc_P2o)           << 5)
66 #define P4e(a)          (TF(a & NAND_Ecc_P4e)           << 6)
67 #define P4o(a)          (TF(a & NAND_Ecc_P4o)           << 7)
68
69 #define P8e(a)          (TF(a & NAND_Ecc_P8e)           << 0)
70 #define P8o(a)          (TF(a & NAND_Ecc_P8o)           << 1)
71 #define P16e(a)         (TF(a & NAND_Ecc_P16e)          << 2)
72 #define P16o(a)         (TF(a & NAND_Ecc_P16o)          << 3)
73 #define P32e(a)         (TF(a & NAND_Ecc_P32e)          << 4)
74 #define P32o(a)         (TF(a & NAND_Ecc_P32o)          << 5)
75 #define P64e(a)         (TF(a & NAND_Ecc_P64e)          << 6)
76 #define P64o(a)         (TF(a & NAND_Ecc_P64o)          << 7)
77
78 #define P128e(a)        (TF(a & NAND_Ecc_P128e)         << 0)
79 #define P128o(a)        (TF(a & NAND_Ecc_P128o)         << 1)
80 #define P256e(a)        (TF(a & NAND_Ecc_P256e)         << 2)
81 #define P256o(a)        (TF(a & NAND_Ecc_P256o)         << 3)
82 #define P512e(a)        (TF(a & NAND_Ecc_P512e)         << 4)
83 #define P512o(a)        (TF(a & NAND_Ecc_P512o)         << 5)
84 #define P1024e(a)       (TF(a & NAND_Ecc_P1024e)        << 6)
85 #define P1024o(a)       (TF(a & NAND_Ecc_P1024o)        << 7)
86
87 #define P8e_s(a)        (TF(a & NAND_Ecc_P8e)           << 0)
88 #define P8o_s(a)        (TF(a & NAND_Ecc_P8o)           << 1)
89 #define P16e_s(a)       (TF(a & NAND_Ecc_P16e)          << 2)
90 #define P16o_s(a)       (TF(a & NAND_Ecc_P16o)          << 3)
91 #define P1e_s(a)        (TF(a & NAND_Ecc_P1e)           << 4)
92 #define P1o_s(a)        (TF(a & NAND_Ecc_P1o)           << 5)
93 #define P2e_s(a)        (TF(a & NAND_Ecc_P2e)           << 6)
94 #define P2o_s(a)        (TF(a & NAND_Ecc_P2o)           << 7)
95
96 #define P4e_s(a)        (TF(a & NAND_Ecc_P4e)           << 0)
97 #define P4o_s(a)        (TF(a & NAND_Ecc_P4o)           << 1)
98
99 /* oob info generated runtime depending on ecc algorithm and layout selected */
100 static struct nand_ecclayout omap_oobinfo;
101 /* Define some generic bad / good block scan pattern which are used
102  * while scanning a device for factory marked good / bad blocks
103  */
104 static uint8_t scan_ff_pattern[] = { 0xff };
105 static struct nand_bbt_descr bb_descrip_flashbased = {
106         .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
107         .offs = 0,
108         .len = 1,
109         .pattern = scan_ff_pattern,
110 };
111
112
113 struct omap_nand_info {
114         struct nand_hw_control          controller;
115         struct omap_nand_platform_data  *pdata;
116         struct mtd_info                 mtd;
117         struct nand_chip                nand;
118         struct platform_device          *pdev;
119
120         int                             gpmc_cs;
121         unsigned long                   phys_base;
122         struct completion               comp;
123         int                             dma_ch;
124         int                             gpmc_irq;
125         enum {
126                 OMAP_NAND_IO_READ = 0,  /* read */
127                 OMAP_NAND_IO_WRITE,     /* write */
128         } iomode;
129         u_char                          *buf;
130         int                                     buf_len;
131 };
132
133 /**
134  * omap_hwcontrol - hardware specific access to control-lines
135  * @mtd: MTD device structure
136  * @cmd: command to device
137  * @ctrl:
138  * NAND_NCE: bit 0 -> don't care
139  * NAND_CLE: bit 1 -> Command Latch
140  * NAND_ALE: bit 2 -> Address Latch
141  *
142  * NOTE: boards may use different bits for these!!
143  */
144 static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
145 {
146         struct omap_nand_info *info = container_of(mtd,
147                                         struct omap_nand_info, mtd);
148
149         if (cmd != NAND_CMD_NONE) {
150                 if (ctrl & NAND_CLE)
151                         gpmc_nand_write(info->gpmc_cs, GPMC_NAND_COMMAND, cmd);
152
153                 else if (ctrl & NAND_ALE)
154                         gpmc_nand_write(info->gpmc_cs, GPMC_NAND_ADDRESS, cmd);
155
156                 else /* NAND_NCE */
157                         gpmc_nand_write(info->gpmc_cs, GPMC_NAND_DATA, cmd);
158         }
159 }
160
161 /**
162  * omap_read_buf8 - read data from NAND controller into buffer
163  * @mtd: MTD device structure
164  * @buf: buffer to store date
165  * @len: number of bytes to read
166  */
167 static void omap_read_buf8(struct mtd_info *mtd, u_char *buf, int len)
168 {
169         struct nand_chip *nand = mtd->priv;
170
171         ioread8_rep(nand->IO_ADDR_R, buf, len);
172 }
173
174 /**
175  * omap_write_buf8 - write buffer to NAND controller
176  * @mtd: MTD device structure
177  * @buf: data buffer
178  * @len: number of bytes to write
179  */
180 static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
181 {
182         struct omap_nand_info *info = container_of(mtd,
183                                                 struct omap_nand_info, mtd);
184         u_char *p = (u_char *)buf;
185         u32     status = 0;
186
187         while (len--) {
188                 iowrite8(*p++, info->nand.IO_ADDR_W);
189                 /* wait until buffer is available for write */
190                 do {
191                         status = gpmc_read_status(GPMC_STATUS_BUFFER);
192                 } while (!status);
193         }
194 }
195
196 /**
197  * omap_read_buf16 - read data from NAND controller into buffer
198  * @mtd: MTD device structure
199  * @buf: buffer to store date
200  * @len: number of bytes to read
201  */
202 static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
203 {
204         struct nand_chip *nand = mtd->priv;
205
206         ioread16_rep(nand->IO_ADDR_R, buf, len / 2);
207 }
208
209 /**
210  * omap_write_buf16 - write buffer to NAND controller
211  * @mtd: MTD device structure
212  * @buf: data buffer
213  * @len: number of bytes to write
214  */
215 static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
216 {
217         struct omap_nand_info *info = container_of(mtd,
218                                                 struct omap_nand_info, mtd);
219         u16 *p = (u16 *) buf;
220         u32     status = 0;
221         /* FIXME try bursts of writesw() or DMA ... */
222         len >>= 1;
223
224         while (len--) {
225                 iowrite16(*p++, info->nand.IO_ADDR_W);
226                 /* wait until buffer is available for write */
227                 do {
228                         status = gpmc_read_status(GPMC_STATUS_BUFFER);
229                 } while (!status);
230         }
231 }
232
233 /**
234  * omap_read_buf_pref - read data from NAND controller into buffer
235  * @mtd: MTD device structure
236  * @buf: buffer to store date
237  * @len: number of bytes to read
238  */
239 static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
240 {
241         struct omap_nand_info *info = container_of(mtd,
242                                                 struct omap_nand_info, mtd);
243         uint32_t r_count = 0;
244         int ret = 0;
245         u32 *p = (u32 *)buf;
246
247         /* take care of subpage reads */
248         if (len % 4) {
249                 if (info->nand.options & NAND_BUSWIDTH_16)
250                         omap_read_buf16(mtd, buf, len % 4);
251                 else
252                         omap_read_buf8(mtd, buf, len % 4);
253                 p = (u32 *) (buf + len % 4);
254                 len -= len % 4;
255         }
256
257         /* configure and start prefetch transfer */
258         ret = gpmc_prefetch_enable(info->gpmc_cs,
259                         PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0);
260         if (ret) {
261                 /* PFPW engine is busy, use cpu copy method */
262                 if (info->nand.options & NAND_BUSWIDTH_16)
263                         omap_read_buf16(mtd, (u_char *)p, len);
264                 else
265                         omap_read_buf8(mtd, (u_char *)p, len);
266         } else {
267                 do {
268                         r_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
269                         r_count = r_count >> 2;
270                         ioread32_rep(info->nand.IO_ADDR_R, p, r_count);
271                         p += r_count;
272                         len -= r_count << 2;
273                 } while (len);
274                 /* disable and stop the PFPW engine */
275                 gpmc_prefetch_reset(info->gpmc_cs);
276         }
277 }
278
279 /**
280  * omap_write_buf_pref - write buffer to NAND controller
281  * @mtd: MTD device structure
282  * @buf: data buffer
283  * @len: number of bytes to write
284  */
285 static void omap_write_buf_pref(struct mtd_info *mtd,
286                                         const u_char *buf, int len)
287 {
288         struct omap_nand_info *info = container_of(mtd,
289                                                 struct omap_nand_info, mtd);
290         uint32_t w_count = 0;
291         int i = 0, ret = 0;
292         u16 *p = (u16 *)buf;
293         unsigned long tim, limit;
294
295         /* take care of subpage writes */
296         if (len % 2 != 0) {
297                 writeb(*buf, info->nand.IO_ADDR_W);
298                 p = (u16 *)(buf + 1);
299                 len--;
300         }
301
302         /*  configure and start prefetch transfer */
303         ret = gpmc_prefetch_enable(info->gpmc_cs,
304                         PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1);
305         if (ret) {
306                 /* PFPW engine is busy, use cpu copy method */
307                 if (info->nand.options & NAND_BUSWIDTH_16)
308                         omap_write_buf16(mtd, (u_char *)p, len);
309                 else
310                         omap_write_buf8(mtd, (u_char *)p, len);
311         } else {
312                 while (len) {
313                         w_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
314                         w_count = w_count >> 1;
315                         for (i = 0; (i < w_count) && len; i++, len -= 2)
316                                 iowrite16(*p++, info->nand.IO_ADDR_W);
317                 }
318                 /* wait for data to flushed-out before reset the prefetch */
319                 tim = 0;
320                 limit = (loops_per_jiffy *
321                                         msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
322                 while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
323                         cpu_relax();
324
325                 /* disable and stop the PFPW engine */
326                 gpmc_prefetch_reset(info->gpmc_cs);
327         }
328 }
329
330 /*
331  * omap_nand_dma_cb: callback on the completion of dma transfer
332  * @lch: logical channel
333  * @ch_satuts: channel status
334  * @data: pointer to completion data structure
335  */
336 static void omap_nand_dma_cb(int lch, u16 ch_status, void *data)
337 {
338         complete((struct completion *) data);
339 }
340
341 /*
342  * omap_nand_dma_transfer: configer and start dma transfer
343  * @mtd: MTD device structure
344  * @addr: virtual address in RAM of source/destination
345  * @len: number of data bytes to be transferred
346  * @is_write: flag for read/write operation
347  */
348 static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
349                                         unsigned int len, int is_write)
350 {
351         struct omap_nand_info *info = container_of(mtd,
352                                         struct omap_nand_info, mtd);
353         enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
354                                                         DMA_FROM_DEVICE;
355         dma_addr_t dma_addr;
356         int ret;
357         unsigned long tim, limit;
358
359         /* The fifo depth is 64 bytes max.
360          * But configure the FIFO-threahold to 32 to get a sync at each frame
361          * and frame length is 32 bytes.
362          */
363         int buf_len = len >> 6;
364
365         if (addr >= high_memory) {
366                 struct page *p1;
367
368                 if (((size_t)addr & PAGE_MASK) !=
369                         ((size_t)(addr + len - 1) & PAGE_MASK))
370                         goto out_copy;
371                 p1 = vmalloc_to_page(addr);
372                 if (!p1)
373                         goto out_copy;
374                 addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
375         }
376
377         dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir);
378         if (dma_mapping_error(&info->pdev->dev, dma_addr)) {
379                 dev_err(&info->pdev->dev,
380                         "Couldn't DMA map a %d byte buffer\n", len);
381                 goto out_copy;
382         }
383
384         if (is_write) {
385             omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
386                                                 info->phys_base, 0, 0);
387             omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
388                                                         dma_addr, 0, 0);
389             omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
390                                         0x10, buf_len, OMAP_DMA_SYNC_FRAME,
391                                         OMAP24XX_DMA_GPMC,
392                                         OMAP_DMA_DST_SYNC_PREFETCH);
393         } else {
394             omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
395                                                 info->phys_base, 0, 0);
396             omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
397                                                         dma_addr, 0, 0);
398             omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
399                                         0x10, buf_len, OMAP_DMA_SYNC_FRAME,
400                                         OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC);
401         }
402         /*  configure and start prefetch transfer */
403         ret = gpmc_prefetch_enable(info->gpmc_cs,
404                         PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
405         if (ret)
406                 /* PFPW engine is busy, use cpu copy method */
407                 goto out_copy_unmap;
408
409         /* this will be short, avoid CPU wakeup latency */
410         disable_hlt();
411         init_completion(&info->comp);
412
413         omap_start_dma(info->dma_ch);
414
415         /* setup and start DMA using dma_addr */
416         wait_for_completion(&info->comp);
417         enable_hlt();
418
419         tim = 0;
420         limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
421         while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
422                 cpu_relax();
423
424         /* disable and stop the PFPW engine */
425         gpmc_prefetch_reset(info->gpmc_cs);
426
427         dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
428         return 0;
429
430 out_copy_unmap:
431         dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
432 out_copy:
433         if (info->nand.options & NAND_BUSWIDTH_16)
434                 is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
435                         : omap_write_buf16(mtd, (u_char *) addr, len);
436         else
437                 is_write == 0 ? omap_read_buf8(mtd, (u_char *) addr, len)
438                         : omap_write_buf8(mtd, (u_char *) addr, len);
439         return 0;
440 }
441
442 /**
443  * omap_read_buf_dma_pref - read data from NAND controller into buffer
444  * @mtd: MTD device structure
445  * @buf: buffer to store date
446  * @len: number of bytes to read
447  */
448 static void omap_read_buf_dma_pref(struct mtd_info *mtd, u_char *buf, int len)
449 {
450         if (len <= mtd->oobsize)
451                 omap_read_buf_pref(mtd, buf, len);
452         else
453                 /* start transfer in DMA mode */
454                 omap_nand_dma_transfer(mtd, buf, len, 0x0);
455 }
456
457 /**
458  * omap_write_buf_dma_pref - write buffer to NAND controller
459  * @mtd: MTD device structure
460  * @buf: data buffer
461  * @len: number of bytes to write
462  */
463 static void omap_write_buf_dma_pref(struct mtd_info *mtd,
464                                         const u_char *buf, int len)
465 {
466         if (len <= mtd->oobsize)
467                 omap_write_buf_pref(mtd, buf, len);
468         else
469                 /* start transfer in DMA mode */
470                 omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1);
471 }
472
473 /*
474  * omap_nand_irq - GMPC irq handler
475  * @this_irq: gpmc irq number
476  * @dev: omap_nand_info structure pointer is passed here
477  */
478 static irqreturn_t omap_nand_irq(int this_irq, void *dev)
479 {
480         struct omap_nand_info *info = (struct omap_nand_info *) dev;
481         u32 bytes;
482         u32 irq_stat;
483
484         irq_stat = gpmc_read_status(GPMC_GET_IRQ_STATUS);
485         bytes = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
486         bytes = bytes  & 0xFFFC; /* io in multiple of 4 bytes */
487         if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
488                 if (irq_stat & 0x2)
489                         goto done;
490
491                 if (info->buf_len && (info->buf_len < bytes))
492                         bytes = info->buf_len;
493                 else if (!info->buf_len)
494                         bytes = 0;
495                 iowrite32_rep(info->nand.IO_ADDR_W,
496                                                 (u32 *)info->buf, bytes >> 2);
497                 info->buf = info->buf + bytes;
498                 info->buf_len -= bytes;
499
500         } else {
501                 ioread32_rep(info->nand.IO_ADDR_R,
502                                                 (u32 *)info->buf, bytes >> 2);
503                 info->buf = info->buf + bytes;
504
505                 if (irq_stat & 0x2)
506                         goto done;
507         }
508         gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
509
510         return IRQ_HANDLED;
511
512 done:
513         complete(&info->comp);
514         /* disable irq */
515         gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 0);
516
517         /* clear status */
518         gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
519
520         return IRQ_HANDLED;
521 }
522
523 /*
524  * omap_read_buf_irq_pref - read data from NAND controller into buffer
525  * @mtd: MTD device structure
526  * @buf: buffer to store date
527  * @len: number of bytes to read
528  */
529 static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len)
530 {
531         struct omap_nand_info *info = container_of(mtd,
532                                                 struct omap_nand_info, mtd);
533         int ret = 0;
534
535         if (len <= mtd->oobsize) {
536                 omap_read_buf_pref(mtd, buf, len);
537                 return;
538         }
539
540         info->iomode = OMAP_NAND_IO_READ;
541         info->buf = buf;
542         init_completion(&info->comp);
543
544         /*  configure and start prefetch transfer */
545         ret = gpmc_prefetch_enable(info->gpmc_cs,
546                         PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0);
547         if (ret)
548                 /* PFPW engine is busy, use cpu copy method */
549                 goto out_copy;
550
551         info->buf_len = len;
552         /* enable irq */
553         gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
554                 (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
555
556         /* waiting for read to complete */
557         wait_for_completion(&info->comp);
558
559         /* disable and stop the PFPW engine */
560         gpmc_prefetch_reset(info->gpmc_cs);
561         return;
562
563 out_copy:
564         if (info->nand.options & NAND_BUSWIDTH_16)
565                 omap_read_buf16(mtd, buf, len);
566         else
567                 omap_read_buf8(mtd, buf, len);
568 }
569
570 /*
571  * omap_write_buf_irq_pref - write buffer to NAND controller
572  * @mtd: MTD device structure
573  * @buf: data buffer
574  * @len: number of bytes to write
575  */
576 static void omap_write_buf_irq_pref(struct mtd_info *mtd,
577                                         const u_char *buf, int len)
578 {
579         struct omap_nand_info *info = container_of(mtd,
580                                                 struct omap_nand_info, mtd);
581         int ret = 0;
582         unsigned long tim, limit;
583
584         if (len <= mtd->oobsize) {
585                 omap_write_buf_pref(mtd, buf, len);
586                 return;
587         }
588
589         info->iomode = OMAP_NAND_IO_WRITE;
590         info->buf = (u_char *) buf;
591         init_completion(&info->comp);
592
593         /* configure and start prefetch transfer : size=24 */
594         ret = gpmc_prefetch_enable(info->gpmc_cs,
595                         (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1);
596         if (ret)
597                 /* PFPW engine is busy, use cpu copy method */
598                 goto out_copy;
599
600         info->buf_len = len;
601         /* enable irq */
602         gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
603                         (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
604
605         /* waiting for write to complete */
606         wait_for_completion(&info->comp);
607         /* wait for data to flushed-out before reset the prefetch */
608         tim = 0;
609         limit = (loops_per_jiffy *  msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
610         while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
611                 cpu_relax();
612
613         /* disable and stop the PFPW engine */
614         gpmc_prefetch_reset(info->gpmc_cs);
615         return;
616
617 out_copy:
618         if (info->nand.options & NAND_BUSWIDTH_16)
619                 omap_write_buf16(mtd, buf, len);
620         else
621                 omap_write_buf8(mtd, buf, len);
622 }
623
624 /**
625  * omap_verify_buf - Verify chip data against buffer
626  * @mtd: MTD device structure
627  * @buf: buffer containing the data to compare
628  * @len: number of bytes to compare
629  */
630 static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len)
631 {
632         struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
633                                                         mtd);
634         u16 *p = (u16 *) buf;
635
636         len >>= 1;
637         while (len--) {
638                 if (*p++ != cpu_to_le16(readw(info->nand.IO_ADDR_R)))
639                         return -EFAULT;
640         }
641
642         return 0;
643 }
644
645 /**
646  * gen_true_ecc - This function will generate true ECC value
647  * @ecc_buf: buffer to store ecc code
648  *
649  * This generated true ECC value can be used when correcting
650  * data read from NAND flash memory core
651  */
652 static void gen_true_ecc(u8 *ecc_buf)
653 {
654         u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) |
655                 ((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
656
657         ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) |
658                         P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp));
659         ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) |
660                         P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
661         ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) |
662                         P1e(tmp) | P2048o(tmp) | P2048e(tmp));
663 }
664
665 /**
666  * omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data
667  * @ecc_data1:  ecc code from nand spare area
668  * @ecc_data2:  ecc code from hardware register obtained from hardware ecc
669  * @page_data:  page data
670  *
671  * This function compares two ECC's and indicates if there is an error.
672  * If the error can be corrected it will be corrected to the buffer.
673  * If there is no error, %0 is returned. If there is an error but it
674  * was corrected, %1 is returned. Otherwise, %-1 is returned.
675  */
676 static int omap_compare_ecc(u8 *ecc_data1,      /* read from NAND memory */
677                             u8 *ecc_data2,      /* read from register */
678                             u8 *page_data)
679 {
680         uint    i;
681         u8      tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
682         u8      comp0_bit[8], comp1_bit[8], comp2_bit[8];
683         u8      ecc_bit[24];
684         u8      ecc_sum = 0;
685         u8      find_bit = 0;
686         uint    find_byte = 0;
687         int     isEccFF;
688
689         isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
690
691         gen_true_ecc(ecc_data1);
692         gen_true_ecc(ecc_data2);
693
694         for (i = 0; i <= 2; i++) {
695                 *(ecc_data1 + i) = ~(*(ecc_data1 + i));
696                 *(ecc_data2 + i) = ~(*(ecc_data2 + i));
697         }
698
699         for (i = 0; i < 8; i++) {
700                 tmp0_bit[i]     = *ecc_data1 % 2;
701                 *ecc_data1      = *ecc_data1 / 2;
702         }
703
704         for (i = 0; i < 8; i++) {
705                 tmp1_bit[i]      = *(ecc_data1 + 1) % 2;
706                 *(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
707         }
708
709         for (i = 0; i < 8; i++) {
710                 tmp2_bit[i]      = *(ecc_data1 + 2) % 2;
711                 *(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
712         }
713
714         for (i = 0; i < 8; i++) {
715                 comp0_bit[i]     = *ecc_data2 % 2;
716                 *ecc_data2       = *ecc_data2 / 2;
717         }
718
719         for (i = 0; i < 8; i++) {
720                 comp1_bit[i]     = *(ecc_data2 + 1) % 2;
721                 *(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
722         }
723
724         for (i = 0; i < 8; i++) {
725                 comp2_bit[i]     = *(ecc_data2 + 2) % 2;
726                 *(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
727         }
728
729         for (i = 0; i < 6; i++)
730                 ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
731
732         for (i = 0; i < 8; i++)
733                 ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
734
735         for (i = 0; i < 8; i++)
736                 ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
737
738         ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
739         ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
740
741         for (i = 0; i < 24; i++)
742                 ecc_sum += ecc_bit[i];
743
744         switch (ecc_sum) {
745         case 0:
746                 /* Not reached because this function is not called if
747                  *  ECC values are equal
748                  */
749                 return 0;
750
751         case 1:
752                 /* Uncorrectable error */
753                 pr_debug("ECC UNCORRECTED_ERROR 1\n");
754                 return -1;
755
756         case 11:
757                 /* UN-Correctable error */
758                 pr_debug("ECC UNCORRECTED_ERROR B\n");
759                 return -1;
760
761         case 12:
762                 /* Correctable error */
763                 find_byte = (ecc_bit[23] << 8) +
764                             (ecc_bit[21] << 7) +
765                             (ecc_bit[19] << 6) +
766                             (ecc_bit[17] << 5) +
767                             (ecc_bit[15] << 4) +
768                             (ecc_bit[13] << 3) +
769                             (ecc_bit[11] << 2) +
770                             (ecc_bit[9]  << 1) +
771                             ecc_bit[7];
772
773                 find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
774
775                 pr_debug("Correcting single bit ECC error at offset: "
776                                 "%d, bit: %d\n", find_byte, find_bit);
777
778                 page_data[find_byte] ^= (1 << find_bit);
779
780                 return 1;
781         default:
782                 if (isEccFF) {
783                         if (ecc_data2[0] == 0 &&
784                             ecc_data2[1] == 0 &&
785                             ecc_data2[2] == 0)
786                                 return 0;
787                 }
788                 pr_debug("UNCORRECTED_ERROR default\n");
789                 return -1;
790         }
791 }
792
793 /**
794  * omap_correct_data - Compares the ECC read with HW generated ECC
795  * @mtd: MTD device structure
796  * @dat: page data
797  * @read_ecc: ecc read from nand flash
798  * @calc_ecc: ecc read from HW ECC registers
799  *
800  * Compares the ecc read from nand spare area with ECC registers values
801  * and if ECC's mismatched, it will call 'omap_compare_ecc' for error
802  * detection and correction. If there are no errors, %0 is returned. If
803  * there were errors and all of the errors were corrected, the number of
804  * corrected errors is returned. If uncorrectable errors exist, %-1 is
805  * returned.
806  */
807 static int omap_correct_data(struct mtd_info *mtd, u_char *dat,
808                                 u_char *read_ecc, u_char *calc_ecc)
809 {
810         struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
811                                                         mtd);
812         int blockCnt = 0, i = 0, ret = 0;
813         int stat = 0;
814
815         /* Ex NAND_ECC_HW12_2048 */
816         if ((info->nand.ecc.mode == NAND_ECC_HW) &&
817                         (info->nand.ecc.size  == 2048))
818                 blockCnt = 4;
819         else
820                 blockCnt = 1;
821
822         for (i = 0; i < blockCnt; i++) {
823                 if (memcmp(read_ecc, calc_ecc, 3) != 0) {
824                         ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
825                         if (ret < 0)
826                                 return ret;
827                         /* keep track of the number of corrected errors */
828                         stat += ret;
829                 }
830                 read_ecc += 3;
831                 calc_ecc += 3;
832                 dat      += 512;
833         }
834         return stat;
835 }
836
837 /**
838  * omap_calcuate_ecc - Generate non-inverted ECC bytes.
839  * @mtd: MTD device structure
840  * @dat: The pointer to data on which ecc is computed
841  * @ecc_code: The ecc_code buffer
842  *
843  * Using noninverted ECC can be considered ugly since writing a blank
844  * page ie. padding will clear the ECC bytes. This is no problem as long
845  * nobody is trying to write data on the seemingly unused page. Reading
846  * an erased page will produce an ECC mismatch between generated and read
847  * ECC bytes that has to be dealt with separately.
848  */
849 static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
850                                 u_char *ecc_code)
851 {
852         struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
853                                                         mtd);
854         return gpmc_calculate_ecc(info->gpmc_cs, dat, ecc_code);
855 }
856
857 /**
858  * omap_enable_hwecc - This function enables the hardware ecc functionality
859  * @mtd: MTD device structure
860  * @mode: Read/Write mode
861  */
862 static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
863 {
864         struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
865                                                         mtd);
866         struct nand_chip *chip = mtd->priv;
867         unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
868
869         gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size);
870 }
871
872 /**
873  * omap_dev_ready - calls the platform specific dev_ready function
874  * @mtd: MTD device structure
875  */
876 static int omap_dev_ready(struct mtd_info *mtd)
877 {
878         return !!gpmc_read_status(GPMC_STATUS_WAIT);
879 }
880
881 static int __devinit omap_nand_probe(struct platform_device *pdev)
882 {
883         struct omap_nand_info           *info;
884         struct omap_nand_platform_data  *pdata;
885         int                             err;
886         int                             i, offset;
887
888         pdata = pdev->dev.platform_data;
889         if (pdata == NULL) {
890                 dev_err(&pdev->dev, "platform data missing\n");
891                 return -ENODEV;
892         }
893
894         info = kzalloc(sizeof(struct omap_nand_info), GFP_KERNEL);
895         if (!info)
896                 return -ENOMEM;
897
898         platform_set_drvdata(pdev, info);
899
900         spin_lock_init(&info->controller.lock);
901         init_waitqueue_head(&info->controller.wq);
902
903         info->pdev = pdev;
904
905         info->gpmc_cs           = pdata->cs;
906         info->phys_base         = pdata->phys_base;
907
908         info->mtd.priv          = &info->nand;
909         info->mtd.name          = dev_name(&pdev->dev);
910         info->mtd.owner         = THIS_MODULE;
911
912         info->nand.options      = pdata->devsize;
913         info->nand.options      |= NAND_SKIP_BBTSCAN;
914
915         /* NAND write protect off */
916         gpmc_cs_configure(info->gpmc_cs, GPMC_CONFIG_WP, 0);
917
918         if (!request_mem_region(info->phys_base, NAND_IO_SIZE,
919                                 pdev->dev.driver->name)) {
920                 err = -EBUSY;
921                 goto out_free_info;
922         }
923
924         info->nand.IO_ADDR_R = ioremap(info->phys_base, NAND_IO_SIZE);
925         if (!info->nand.IO_ADDR_R) {
926                 err = -ENOMEM;
927                 goto out_release_mem_region;
928         }
929
930         info->nand.controller = &info->controller;
931
932         info->nand.IO_ADDR_W = info->nand.IO_ADDR_R;
933         info->nand.cmd_ctrl  = omap_hwcontrol;
934
935         /*
936          * If RDY/BSY line is connected to OMAP then use the omap ready
937          * funcrtion and the generic nand_wait function which reads the status
938          * register after monitoring the RDY/BSY line.Otherwise use a standard
939          * chip delay which is slightly more than tR (AC Timing) of the NAND
940          * device and read status register until you get a failure or success
941          */
942         if (pdata->dev_ready) {
943                 info->nand.dev_ready = omap_dev_ready;
944                 info->nand.chip_delay = 0;
945         } else {
946                 info->nand.chip_delay = 50;
947         }
948
949         switch (pdata->xfer_type) {
950         case NAND_OMAP_PREFETCH_POLLED:
951                 info->nand.read_buf   = omap_read_buf_pref;
952                 info->nand.write_buf  = omap_write_buf_pref;
953                 break;
954
955         case NAND_OMAP_POLLED:
956                 if (info->nand.options & NAND_BUSWIDTH_16) {
957                         info->nand.read_buf   = omap_read_buf16;
958                         info->nand.write_buf  = omap_write_buf16;
959                 } else {
960                         info->nand.read_buf   = omap_read_buf8;
961                         info->nand.write_buf  = omap_write_buf8;
962                 }
963                 break;
964
965         case NAND_OMAP_PREFETCH_DMA:
966                 err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
967                                 omap_nand_dma_cb, &info->comp, &info->dma_ch);
968                 if (err < 0) {
969                         info->dma_ch = -1;
970                         dev_err(&pdev->dev, "DMA request failed!\n");
971                         goto out_release_mem_region;
972                 } else {
973                         omap_set_dma_dest_burst_mode(info->dma_ch,
974                                         OMAP_DMA_DATA_BURST_16);
975                         omap_set_dma_dest_data_pack(info->dma_ch, 1);
976                         omap_set_dma_src_burst_mode(info->dma_ch,
977                                         OMAP_DMA_DATA_BURST_16);
978                         omap_set_dma_src_data_pack(info->dma_ch, 1);
979
980                         info->nand.read_buf   = omap_read_buf_dma_pref;
981                         info->nand.write_buf  = omap_write_buf_dma_pref;
982                 }
983                 break;
984
985         case NAND_OMAP_PREFETCH_IRQ:
986                 err = request_irq(pdata->gpmc_irq,
987                                 omap_nand_irq, IRQF_SHARED, "gpmc-nand", info);
988                 if (err) {
989                         dev_err(&pdev->dev, "requesting irq(%d) error:%d",
990                                                         pdata->gpmc_irq, err);
991                         goto out_release_mem_region;
992                 } else {
993                         info->gpmc_irq       = pdata->gpmc_irq;
994                         info->nand.read_buf  = omap_read_buf_irq_pref;
995                         info->nand.write_buf = omap_write_buf_irq_pref;
996                 }
997                 break;
998
999         default:
1000                 dev_err(&pdev->dev,
1001                         "xfer_type(%d) not supported!\n", pdata->xfer_type);
1002                 err = -EINVAL;
1003                 goto out_release_mem_region;
1004         }
1005
1006         info->nand.verify_buf = omap_verify_buf;
1007
1008         /* selsect the ecc type */
1009         if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT)
1010                 info->nand.ecc.mode = NAND_ECC_SOFT;
1011         else if ((pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW) ||
1012                 (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE)) {
1013                 info->nand.ecc.bytes            = 3;
1014                 info->nand.ecc.size             = 512;
1015                 info->nand.ecc.calculate        = omap_calculate_ecc;
1016                 info->nand.ecc.hwctl            = omap_enable_hwecc;
1017                 info->nand.ecc.correct          = omap_correct_data;
1018                 info->nand.ecc.mode             = NAND_ECC_HW;
1019         }
1020
1021         /* DIP switches on some boards change between 8 and 16 bit
1022          * bus widths for flash.  Try the other width if the first try fails.
1023          */
1024         if (nand_scan_ident(&info->mtd, 1, NULL)) {
1025                 info->nand.options ^= NAND_BUSWIDTH_16;
1026                 if (nand_scan_ident(&info->mtd, 1, NULL)) {
1027                         err = -ENXIO;
1028                         goto out_release_mem_region;
1029                 }
1030         }
1031
1032         /* rom code layout */
1033         if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE) {
1034
1035                 if (info->nand.options & NAND_BUSWIDTH_16)
1036                         offset = 2;
1037                 else {
1038                         offset = 1;
1039                         info->nand.badblock_pattern = &bb_descrip_flashbased;
1040                 }
1041                 omap_oobinfo.eccbytes = 3 * (info->mtd.oobsize/16);
1042                 for (i = 0; i < omap_oobinfo.eccbytes; i++)
1043                         omap_oobinfo.eccpos[i] = i+offset;
1044
1045                 omap_oobinfo.oobfree->offset = offset + omap_oobinfo.eccbytes;
1046                 omap_oobinfo.oobfree->length = info->mtd.oobsize -
1047                                         (offset + omap_oobinfo.eccbytes);
1048
1049                 info->nand.ecc.layout = &omap_oobinfo;
1050         }
1051
1052         /* second phase scan */
1053         if (nand_scan_tail(&info->mtd)) {
1054                 err = -ENXIO;
1055                 goto out_release_mem_region;
1056         }
1057
1058         mtd_device_parse_register(&info->mtd, NULL, 0,
1059                         pdata->parts, pdata->nr_parts);
1060
1061         platform_set_drvdata(pdev, &info->mtd);
1062
1063         return 0;
1064
1065 out_release_mem_region:
1066         release_mem_region(info->phys_base, NAND_IO_SIZE);
1067 out_free_info:
1068         kfree(info);
1069
1070         return err;
1071 }
1072
1073 static int omap_nand_remove(struct platform_device *pdev)
1074 {
1075         struct mtd_info *mtd = platform_get_drvdata(pdev);
1076         struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1077                                                         mtd);
1078
1079         platform_set_drvdata(pdev, NULL);
1080         if (info->dma_ch != -1)
1081                 omap_free_dma(info->dma_ch);
1082
1083         if (info->gpmc_irq)
1084                 free_irq(info->gpmc_irq, info);
1085
1086         /* Release NAND device, its internal structures and partitions */
1087         nand_release(&info->mtd);
1088         iounmap(info->nand.IO_ADDR_R);
1089         release_mem_region(info->phys_base, NAND_IO_SIZE);
1090         kfree(info);
1091         return 0;
1092 }
1093
1094 static struct platform_driver omap_nand_driver = {
1095         .probe          = omap_nand_probe,
1096         .remove         = omap_nand_remove,
1097         .driver         = {
1098                 .name   = DRIVER_NAME,
1099                 .owner  = THIS_MODULE,
1100         },
1101 };
1102
1103 static int __init omap_nand_init(void)
1104 {
1105         pr_info("%s driver initializing\n", DRIVER_NAME);
1106
1107         return platform_driver_register(&omap_nand_driver);
1108 }
1109
1110 static void __exit omap_nand_exit(void)
1111 {
1112         platform_driver_unregister(&omap_nand_driver);
1113 }
1114
1115 module_init(omap_nand_init);
1116 module_exit(omap_nand_exit);
1117
1118 MODULE_ALIAS("platform:" DRIVER_NAME);
1119 MODULE_LICENSE("GPL");
1120 MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");