2 * Common Flash Interface support:
3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
9 * 2_by_8 routines added by Simon Munton
11 * 4_by_16 work by Carolyn J. Smith
13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/sched.h>
27 #include <linux/init.h>
29 #include <asm/byteorder.h>
31 #include <linux/errno.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/interrupt.h>
35 #include <linux/reboot.h>
36 #include <linux/mtd/map.h>
37 #include <linux/mtd/mtd.h>
38 #include <linux/mtd/cfi.h>
39 #include <linux/mtd/xip.h>
41 #define AMD_BOOTLOC_BUG
42 #define FORCE_WORD_WRITE 0
44 #define MAX_WORD_RETRIES 3
46 #define SST49LF004B 0x0060
47 #define SST49LF040B 0x0050
48 #define SST49LF008A 0x005a
49 #define AT49BV6416 0x00d6
51 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
52 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
55 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
56 static void cfi_amdstd_sync (struct mtd_info *);
57 static int cfi_amdstd_suspend (struct mtd_info *);
58 static void cfi_amdstd_resume (struct mtd_info *);
59 static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
60 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
63 size_t *retlen, const u_char *buf);
65 static void cfi_amdstd_destroy(struct mtd_info *);
67 struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
68 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
70 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
71 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
74 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
75 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
77 static struct mtd_chip_driver cfi_amdstd_chipdrv = {
78 .probe = NULL, /* Not usable directly */
79 .destroy = cfi_amdstd_destroy,
80 .name = "cfi_cmdset_0002",
85 /* #define DEBUG_CFI_FEATURES */
88 #ifdef DEBUG_CFI_FEATURES
89 static void cfi_tell_features(struct cfi_pri_amdstd *extp)
91 const char* erase_suspend[3] = {
92 "Not supported", "Read only", "Read/write"
94 const char* top_bottom[6] = {
95 "No WP", "8x8KiB sectors at top & bottom, no WP",
96 "Bottom boot", "Top boot",
97 "Uniform, Bottom WP", "Uniform, Top WP"
100 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
101 printk(" Address sensitive unlock: %s\n",
102 (extp->SiliconRevision & 1) ? "Not required" : "Required");
104 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
105 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
107 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
109 if (extp->BlkProt == 0)
110 printk(" Block protection: Not supported\n");
112 printk(" Block protection: %d sectors per group\n", extp->BlkProt);
115 printk(" Temporary block unprotect: %s\n",
116 extp->TmpBlkUnprotect ? "Supported" : "Not supported");
117 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
118 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
119 printk(" Burst mode: %s\n",
120 extp->BurstMode ? "Supported" : "Not supported");
121 if (extp->PageMode == 0)
122 printk(" Page mode: Not supported\n");
124 printk(" Page mode: %d word page\n", extp->PageMode << 2);
126 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
127 extp->VppMin >> 4, extp->VppMin & 0xf);
128 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
129 extp->VppMax >> 4, extp->VppMax & 0xf);
131 if (extp->TopBottom < ARRAY_SIZE(top_bottom))
132 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
134 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
138 #ifdef AMD_BOOTLOC_BUG
139 /* Wheee. Bring me the head of someone at AMD. */
140 static void fixup_amd_bootblock(struct mtd_info *mtd)
142 struct map_info *map = mtd->priv;
143 struct cfi_private *cfi = map->fldrv_priv;
144 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
145 __u8 major = extp->MajorVersion;
146 __u8 minor = extp->MinorVersion;
148 if (((major << 8) | minor) < 0x3131) {
149 /* CFI version 1.0 => don't trust bootloc */
151 pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
152 map->name, cfi->mfr, cfi->id);
154 /* AFAICS all 29LV400 with a bottom boot block have a device ID
155 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
156 * These were badly detected as they have the 0x80 bit set
157 * so treat them as a special case.
159 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
161 /* Macronix added CFI to their 2nd generation
162 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
163 * Fujitsu, Spansion, EON, ESI and older Macronix)
166 * Therefore also check the manufacturer.
167 * This reduces the risk of false detection due to
168 * the 8-bit device ID.
170 (cfi->mfr == CFI_MFR_MACRONIX)) {
171 pr_debug("%s: Macronix MX29LV400C with bottom boot block"
172 " detected\n", map->name);
173 extp->TopBottom = 2; /* bottom boot */
175 if (cfi->id & 0x80) {
176 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
177 extp->TopBottom = 3; /* top boot */
179 extp->TopBottom = 2; /* bottom boot */
182 pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;"
183 " deduced %s from Device ID\n", map->name, major, minor,
184 extp->TopBottom == 2 ? "bottom" : "top");
189 static void fixup_use_write_buffers(struct mtd_info *mtd)
191 struct map_info *map = mtd->priv;
192 struct cfi_private *cfi = map->fldrv_priv;
193 if (cfi->cfiq->BufWriteTimeoutTyp) {
194 pr_debug("Using buffer write method\n" );
195 mtd->write = cfi_amdstd_write_buffers;
199 /* Atmel chips don't use the same PRI format as AMD chips */
200 static void fixup_convert_atmel_pri(struct mtd_info *mtd)
202 struct map_info *map = mtd->priv;
203 struct cfi_private *cfi = map->fldrv_priv;
204 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
205 struct cfi_pri_atmel atmel_pri;
207 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
208 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
210 if (atmel_pri.Features & 0x02)
211 extp->EraseSuspend = 2;
213 /* Some chips got it backwards... */
214 if (cfi->id == AT49BV6416) {
215 if (atmel_pri.BottomBoot)
220 if (atmel_pri.BottomBoot)
226 /* burst write mode not supported */
227 cfi->cfiq->BufWriteTimeoutTyp = 0;
228 cfi->cfiq->BufWriteTimeoutMax = 0;
231 static void fixup_use_secsi(struct mtd_info *mtd)
233 /* Setup for chips with a secsi area */
234 mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
235 mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
238 static void fixup_use_erase_chip(struct mtd_info *mtd)
240 struct map_info *map = mtd->priv;
241 struct cfi_private *cfi = map->fldrv_priv;
242 if ((cfi->cfiq->NumEraseRegions == 1) &&
243 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
244 mtd->erase = cfi_amdstd_erase_chip;
250 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
253 static void fixup_use_atmel_lock(struct mtd_info *mtd)
255 mtd->lock = cfi_atmel_lock;
256 mtd->unlock = cfi_atmel_unlock;
257 mtd->flags |= MTD_POWERUP_LOCK;
260 static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
262 struct map_info *map = mtd->priv;
263 struct cfi_private *cfi = map->fldrv_priv;
266 * These flashes report two separate eraseblock regions based on the
267 * sector_erase-size and block_erase-size, although they both operate on the
268 * same memory. This is not allowed according to CFI, so we just pick the
271 cfi->cfiq->NumEraseRegions = 1;
274 static void fixup_sst39vf(struct mtd_info *mtd)
276 struct map_info *map = mtd->priv;
277 struct cfi_private *cfi = map->fldrv_priv;
279 fixup_old_sst_eraseregion(mtd);
281 cfi->addr_unlock1 = 0x5555;
282 cfi->addr_unlock2 = 0x2AAA;
285 static void fixup_sst39vf_rev_b(struct mtd_info *mtd)
287 struct map_info *map = mtd->priv;
288 struct cfi_private *cfi = map->fldrv_priv;
290 fixup_old_sst_eraseregion(mtd);
292 cfi->addr_unlock1 = 0x555;
293 cfi->addr_unlock2 = 0x2AA;
295 cfi->sector_erase_cmd = CMD(0x50);
298 static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd)
300 struct map_info *map = mtd->priv;
301 struct cfi_private *cfi = map->fldrv_priv;
303 fixup_sst39vf_rev_b(mtd);
306 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where
307 * it should report a size of 8KBytes (0x0020*256).
309 cfi->cfiq->EraseRegionInfo[0] = 0x002003ff;
310 pr_warning("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", mtd->name);
313 static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
315 struct map_info *map = mtd->priv;
316 struct cfi_private *cfi = map->fldrv_priv;
318 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
319 cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
320 pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name);
324 static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
326 struct map_info *map = mtd->priv;
327 struct cfi_private *cfi = map->fldrv_priv;
329 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
330 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
331 pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name);
335 /* Used to fix CFI-Tables of chips without Extended Query Tables */
336 static struct cfi_fixup cfi_nopri_fixup_table[] = {
337 { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
338 { CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */
339 { CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */
340 { CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */
341 { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */
342 { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */
343 { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */
344 { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */
348 static struct cfi_fixup cfi_fixup_table[] = {
349 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
350 #ifdef AMD_BOOTLOC_BUG
351 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock },
352 { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock },
353 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock },
355 { CFI_MFR_AMD, 0x0050, fixup_use_secsi },
356 { CFI_MFR_AMD, 0x0053, fixup_use_secsi },
357 { CFI_MFR_AMD, 0x0055, fixup_use_secsi },
358 { CFI_MFR_AMD, 0x0056, fixup_use_secsi },
359 { CFI_MFR_AMD, 0x005C, fixup_use_secsi },
360 { CFI_MFR_AMD, 0x005F, fixup_use_secsi },
361 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors },
362 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
363 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
364 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
365 { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
366 { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
367 { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
368 { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */
369 #if !FORCE_WORD_WRITE
370 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
374 static struct cfi_fixup jedec_fixup_table[] = {
375 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock },
376 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock },
377 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock },
381 static struct cfi_fixup fixup_table[] = {
382 /* The CFI vendor ids and the JEDEC vendor IDs appear
383 * to be common. It is like the devices id's are as
384 * well. This table is to pick all cases where
385 * we know that is the case.
387 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip },
388 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock },
393 static void cfi_fixup_major_minor(struct cfi_private *cfi,
394 struct cfi_pri_amdstd *extp)
396 if (cfi->mfr == CFI_MFR_SAMSUNG) {
397 if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') ||
398 (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
400 * Samsung K8P2815UQB and K8D6x16UxM chips
401 * report major=0 / minor=0.
402 * K8D3x16UxC chips report major=3 / minor=3.
404 printk(KERN_NOTICE " Fixing Samsung's Amd/Fujitsu"
405 " Extended Query version to 1.%c\n",
407 extp->MajorVersion = '1';
412 * SST 38VF640x chips report major=0xFF / minor=0xFF.
414 if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) {
415 extp->MajorVersion = '1';
416 extp->MinorVersion = '0';
420 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
422 struct cfi_private *cfi = map->fldrv_priv;
423 struct mtd_info *mtd;
426 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
428 printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
432 mtd->type = MTD_NORFLASH;
434 /* Fill in the default mtd operations */
435 mtd->erase = cfi_amdstd_erase_varsize;
436 mtd->write = cfi_amdstd_write_words;
437 mtd->read = cfi_amdstd_read;
438 mtd->sync = cfi_amdstd_sync;
439 mtd->suspend = cfi_amdstd_suspend;
440 mtd->resume = cfi_amdstd_resume;
441 mtd->flags = MTD_CAP_NORFLASH;
442 mtd->name = map->name;
444 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
446 pr_debug("MTD %s(): write buffer size %d\n", __func__,
449 mtd->panic_write = cfi_amdstd_panic_write;
450 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
452 if (cfi->cfi_mode==CFI_MODE_CFI){
453 unsigned char bootloc;
454 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
455 struct cfi_pri_amdstd *extp;
457 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
460 * It's a real CFI chip, not one for which the probe
461 * routine faked a CFI structure.
463 cfi_fixup_major_minor(cfi, extp);
466 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5
467 * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19
468 * http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf
469 * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
470 * http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf
472 if (extp->MajorVersion != '1' ||
473 (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) {
474 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
475 "version %c.%c (%#02x/%#02x).\n",
476 extp->MajorVersion, extp->MinorVersion,
477 extp->MajorVersion, extp->MinorVersion);
483 printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n",
484 extp->MajorVersion, extp->MinorVersion);
486 /* Install our own private info structure */
487 cfi->cmdset_priv = extp;
489 /* Apply cfi device specific fixups */
490 cfi_fixup(mtd, cfi_fixup_table);
492 #ifdef DEBUG_CFI_FEATURES
493 /* Tell the user about it in lots of lovely detail */
494 cfi_tell_features(extp);
497 bootloc = extp->TopBottom;
498 if ((bootloc < 2) || (bootloc > 5)) {
499 printk(KERN_WARNING "%s: CFI contains unrecognised boot "
500 "bank location (%d). Assuming bottom.\n",
505 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
506 printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
508 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
509 int j = (cfi->cfiq->NumEraseRegions-1)-i;
512 swap = cfi->cfiq->EraseRegionInfo[i];
513 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
514 cfi->cfiq->EraseRegionInfo[j] = swap;
517 /* Set the default CFI lock/unlock addresses */
518 cfi->addr_unlock1 = 0x555;
519 cfi->addr_unlock2 = 0x2aa;
521 cfi_fixup(mtd, cfi_nopri_fixup_table);
523 if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
529 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
530 /* Apply jedec specific fixups */
531 cfi_fixup(mtd, jedec_fixup_table);
533 /* Apply generic fixups */
534 cfi_fixup(mtd, fixup_table);
536 for (i=0; i< cfi->numchips; i++) {
537 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
538 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
539 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
540 cfi->chips[i].ref_point_counter = 0;
541 init_waitqueue_head(&(cfi->chips[i].wq));
544 map->fldrv = &cfi_amdstd_chipdrv;
546 return cfi_amdstd_setup(mtd);
548 struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
549 struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
550 EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
551 EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
552 EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
554 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
556 struct map_info *map = mtd->priv;
557 struct cfi_private *cfi = map->fldrv_priv;
558 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
559 unsigned long offset = 0;
562 printk(KERN_NOTICE "number of %s chips: %d\n",
563 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
564 /* Select the correct geometry setup */
565 mtd->size = devsize * cfi->numchips;
567 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
568 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
569 * mtd->numeraseregions, GFP_KERNEL);
570 if (!mtd->eraseregions) {
571 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
575 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
576 unsigned long ernum, ersize;
577 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
578 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
580 if (mtd->erasesize < ersize) {
581 mtd->erasesize = ersize;
583 for (j=0; j<cfi->numchips; j++) {
584 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
585 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
586 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
588 offset += (ersize * ernum);
590 if (offset != devsize) {
592 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
596 __module_get(THIS_MODULE);
597 register_reboot_notifier(&mtd->reboot_notifier);
601 kfree(mtd->eraseregions);
603 kfree(cfi->cmdset_priv);
609 * Return true if the chip is ready.
611 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
612 * non-suspended sector) and is indicated by no toggle bits toggling.
614 * Note that anything more complicated than checking if no bits are toggling
615 * (including checking DQ5 for an error status) is tricky to get working
616 * correctly and is therefore not done (particularly with interleaved chips
617 * as each chip must be checked independently of the others).
619 static int __xipram chip_ready(struct map_info *map, unsigned long addr)
623 d = map_read(map, addr);
624 t = map_read(map, addr);
626 return map_word_equal(map, d, t);
630 * Return true if the chip is ready and has the correct value.
632 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
633 * non-suspended sector) and it is indicated by no bits toggling.
635 * Error are indicated by toggling bits or bits held with the wrong value,
636 * or with bits toggling.
638 * Note that anything more complicated than checking if no bits are toggling
639 * (including checking DQ5 for an error status) is tricky to get working
640 * correctly and is therefore not done (particularly with interleaved chips
641 * as each chip must be checked independently of the others).
644 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
648 oldd = map_read(map, addr);
649 curd = map_read(map, addr);
651 return map_word_equal(map, oldd, curd) &&
652 map_word_equal(map, curd, expected);
655 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
657 DECLARE_WAITQUEUE(wait, current);
658 struct cfi_private *cfi = map->fldrv_priv;
660 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
663 timeo = jiffies + HZ;
665 switch (chip->state) {
669 if (chip_ready(map, adr))
672 if (time_after(jiffies, timeo)) {
673 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
676 mutex_unlock(&chip->mutex);
678 mutex_lock(&chip->mutex);
679 /* Someone else might have been playing with it. */
689 if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
690 !(mode == FL_READY || mode == FL_POINT ||
691 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
694 /* We could check to see if we're trying to access the sector
695 * that is currently being erased. However, no user will try
696 * anything like that so we just wait for the timeout. */
699 /* It's harmless to issue the Erase-Suspend and Erase-Resume
700 * commands when the erase algorithm isn't in progress. */
701 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
702 chip->oldstate = FL_ERASING;
703 chip->state = FL_ERASE_SUSPENDING;
704 chip->erase_suspended = 1;
706 if (chip_ready(map, adr))
709 if (time_after(jiffies, timeo)) {
710 /* Should have suspended the erase by now.
711 * Send an Erase-Resume command as either
712 * there was an error (so leave the erase
713 * routine to recover from it) or we trying to
714 * use the erase-in-progress sector. */
715 put_chip(map, chip, adr);
716 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
720 mutex_unlock(&chip->mutex);
722 mutex_lock(&chip->mutex);
723 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
724 So we can just loop here. */
726 chip->state = FL_READY;
729 case FL_XIP_WHILE_ERASING:
730 if (mode != FL_READY && mode != FL_POINT &&
731 (!cfip || !(cfip->EraseSuspend&2)))
733 chip->oldstate = chip->state;
734 chip->state = FL_READY;
738 /* The machine is rebooting */
742 /* Only if there's no operation suspended... */
743 if (mode == FL_READY && chip->oldstate == FL_READY)
748 set_current_state(TASK_UNINTERRUPTIBLE);
749 add_wait_queue(&chip->wq, &wait);
750 mutex_unlock(&chip->mutex);
752 remove_wait_queue(&chip->wq, &wait);
753 mutex_lock(&chip->mutex);
759 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
761 struct cfi_private *cfi = map->fldrv_priv;
763 switch(chip->oldstate) {
765 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
766 chip->oldstate = FL_READY;
767 chip->state = FL_ERASING;
770 case FL_XIP_WHILE_ERASING:
771 chip->state = chip->oldstate;
772 chip->oldstate = FL_READY;
777 /* We should really make set_vpp() count, rather than doing this */
781 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
786 #ifdef CONFIG_MTD_XIP
789 * No interrupt what so ever can be serviced while the flash isn't in array
790 * mode. This is ensured by the xip_disable() and xip_enable() functions
791 * enclosing any code path where the flash is known not to be in array mode.
792 * And within a XIP disabled code path, only functions marked with __xipram
793 * may be called and nothing else (it's a good thing to inspect generated
794 * assembly to make sure inline functions were actually inlined and that gcc
795 * didn't emit calls to its own support functions). Also configuring MTD CFI
796 * support to a single buswidth and a single interleave is also recommended.
799 static void xip_disable(struct map_info *map, struct flchip *chip,
802 /* TODO: chips with no XIP use should ignore and return */
803 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
807 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
810 struct cfi_private *cfi = map->fldrv_priv;
812 if (chip->state != FL_POINT && chip->state != FL_READY) {
813 map_write(map, CMD(0xf0), adr);
814 chip->state = FL_READY;
816 (void) map_read(map, adr);
822 * When a delay is required for the flash operation to complete, the
823 * xip_udelay() function is polling for both the given timeout and pending
824 * (but still masked) hardware interrupts. Whenever there is an interrupt
825 * pending then the flash erase operation is suspended, array mode restored
826 * and interrupts unmasked. Task scheduling might also happen at that
827 * point. The CPU eventually returns from the interrupt or the call to
828 * schedule() and the suspended flash operation is resumed for the remaining
829 * of the delay period.
831 * Warning: this function _will_ fool interrupt latency tracing tools.
834 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
835 unsigned long adr, int usec)
837 struct cfi_private *cfi = map->fldrv_priv;
838 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
839 map_word status, OK = CMD(0x80);
840 unsigned long suspended, start = xip_currtime();
845 if (xip_irqpending() && extp &&
846 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
847 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
849 * Let's suspend the erase operation when supported.
850 * Note that we currently don't try to suspend
851 * interleaved chips if there is already another
852 * operation suspended (imagine what happens
853 * when one chip was already done with the current
854 * operation while another chip suspended it, then
855 * we resume the whole thing at once). Yes, it
858 map_write(map, CMD(0xb0), adr);
859 usec -= xip_elapsed_since(start);
860 suspended = xip_currtime();
862 if (xip_elapsed_since(suspended) > 100000) {
864 * The chip doesn't want to suspend
865 * after waiting for 100 msecs.
866 * This is a critical error but there
867 * is not much we can do here.
871 status = map_read(map, adr);
872 } while (!map_word_andequal(map, status, OK, OK));
874 /* Suspend succeeded */
875 oldstate = chip->state;
876 if (!map_word_bitsset(map, status, CMD(0x40)))
878 chip->state = FL_XIP_WHILE_ERASING;
879 chip->erase_suspended = 1;
880 map_write(map, CMD(0xf0), adr);
881 (void) map_read(map, adr);
884 mutex_unlock(&chip->mutex);
889 * We're back. However someone else might have
890 * decided to go write to the chip if we are in
891 * a suspended erase state. If so let's wait
894 mutex_lock(&chip->mutex);
895 while (chip->state != FL_XIP_WHILE_ERASING) {
896 DECLARE_WAITQUEUE(wait, current);
897 set_current_state(TASK_UNINTERRUPTIBLE);
898 add_wait_queue(&chip->wq, &wait);
899 mutex_unlock(&chip->mutex);
901 remove_wait_queue(&chip->wq, &wait);
902 mutex_lock(&chip->mutex);
904 /* Disallow XIP again */
907 /* Resume the write or erase operation */
908 map_write(map, cfi->sector_erase_cmd, adr);
909 chip->state = oldstate;
910 start = xip_currtime();
911 } else if (usec >= 1000000/HZ) {
913 * Try to save on CPU power when waiting delay
914 * is at least a system timer tick period.
915 * No need to be extremely accurate here.
919 status = map_read(map, adr);
920 } while (!map_word_andequal(map, status, OK, OK)
921 && xip_elapsed_since(start) < usec);
924 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
927 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
928 * the flash is actively programming or erasing since we have to poll for
929 * the operation to complete anyway. We can't do that in a generic way with
930 * a XIP setup so do it before the actual flash operation in this case
931 * and stub it out from INVALIDATE_CACHE_UDELAY.
933 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
934 INVALIDATE_CACHED_RANGE(map, from, size)
936 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
937 UDELAY(map, chip, adr, usec)
942 * Activating this XIP support changes the way the code works a bit. For
943 * example the code to suspend the current process when concurrent access
944 * happens is never executed because xip_udelay() will always return with the
945 * same chip state as it was entered with. This is why there is no care for
946 * the presence of add_wait_queue() or schedule() calls from within a couple
947 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
948 * The queueing and scheduling are always happening within xip_udelay().
950 * Similarly, get_chip() and put_chip() just happen to always be executed
951 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
952 * is in array mode, therefore never executing many cases therein and not
953 * causing any problem with XIP.
958 #define xip_disable(map, chip, adr)
959 #define xip_enable(map, chip, adr)
960 #define XIP_INVAL_CACHED_RANGE(x...)
962 #define UDELAY(map, chip, adr, usec) \
964 mutex_unlock(&chip->mutex); \
966 mutex_lock(&chip->mutex); \
969 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
971 mutex_unlock(&chip->mutex); \
972 INVALIDATE_CACHED_RANGE(map, adr, len); \
974 mutex_lock(&chip->mutex); \
979 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
981 unsigned long cmd_addr;
982 struct cfi_private *cfi = map->fldrv_priv;
987 /* Ensure cmd read/writes are aligned. */
988 cmd_addr = adr & ~(map_bankwidth(map)-1);
990 mutex_lock(&chip->mutex);
991 ret = get_chip(map, chip, cmd_addr, FL_READY);
993 mutex_unlock(&chip->mutex);
997 if (chip->state != FL_POINT && chip->state != FL_READY) {
998 map_write(map, CMD(0xf0), cmd_addr);
999 chip->state = FL_READY;
1002 map_copy_from(map, buf, adr, len);
1004 put_chip(map, chip, cmd_addr);
1006 mutex_unlock(&chip->mutex);
1011 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1013 struct map_info *map = mtd->priv;
1014 struct cfi_private *cfi = map->fldrv_priv;
1019 /* ofs: offset within the first chip that the first read should start */
1021 chipnum = (from >> cfi->chipshift);
1022 ofs = from - (chipnum << cfi->chipshift);
1028 unsigned long thislen;
1030 if (chipnum >= cfi->numchips)
1033 if ((len + ofs -1) >> cfi->chipshift)
1034 thislen = (1<<cfi->chipshift) - ofs;
1038 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1053 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1055 DECLARE_WAITQUEUE(wait, current);
1056 unsigned long timeo = jiffies + HZ;
1057 struct cfi_private *cfi = map->fldrv_priv;
1060 mutex_lock(&chip->mutex);
1062 if (chip->state != FL_READY){
1063 set_current_state(TASK_UNINTERRUPTIBLE);
1064 add_wait_queue(&chip->wq, &wait);
1066 mutex_unlock(&chip->mutex);
1069 remove_wait_queue(&chip->wq, &wait);
1070 timeo = jiffies + HZ;
1077 chip->state = FL_READY;
1079 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1080 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1081 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1083 map_copy_from(map, buf, adr, len);
1085 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1086 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1087 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1088 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1091 mutex_unlock(&chip->mutex);
1096 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1098 struct map_info *map = mtd->priv;
1099 struct cfi_private *cfi = map->fldrv_priv;
1105 /* ofs: offset within the first chip that the first read should start */
1107 /* 8 secsi bytes per chip */
1115 unsigned long thislen;
1117 if (chipnum >= cfi->numchips)
1120 if ((len + ofs -1) >> 3)
1121 thislen = (1<<3) - ofs;
1125 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1140 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
1142 struct cfi_private *cfi = map->fldrv_priv;
1143 unsigned long timeo = jiffies + HZ;
1145 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1146 * have a max write time of a few hundreds usec). However, we should
1147 * use the maximum timeout value given by the chip at probe time
1148 * instead. Unfortunately, struct flchip does have a field for
1149 * maximum timeout, only for typical which can be far too short
1150 * depending of the conditions. The ' + 1' is to avoid having a
1151 * timeout of 0 jiffies if HZ is smaller than 1000.
1153 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1160 mutex_lock(&chip->mutex);
1161 ret = get_chip(map, chip, adr, FL_WRITING);
1163 mutex_unlock(&chip->mutex);
1167 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1168 __func__, adr, datum.x[0] );
1171 * Check for a NOP for the case when the datum to write is already
1172 * present - it saves time and works around buggy chips that corrupt
1173 * data at other locations when 0xff is written to a location that
1174 * already contains 0xff.
1176 oldd = map_read(map, adr);
1177 if (map_word_equal(map, oldd, datum)) {
1178 pr_debug("MTD %s(): NOP\n",
1183 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1185 xip_disable(map, chip, adr);
1187 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1188 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1189 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1190 map_write(map, datum, adr);
1191 chip->state = FL_WRITING;
1193 INVALIDATE_CACHE_UDELAY(map, chip,
1194 adr, map_bankwidth(map),
1195 chip->word_write_time);
1197 /* See comment above for timeout value. */
1198 timeo = jiffies + uWriteTimeout;
1200 if (chip->state != FL_WRITING) {
1201 /* Someone's suspended the write. Sleep */
1202 DECLARE_WAITQUEUE(wait, current);
1204 set_current_state(TASK_UNINTERRUPTIBLE);
1205 add_wait_queue(&chip->wq, &wait);
1206 mutex_unlock(&chip->mutex);
1208 remove_wait_queue(&chip->wq, &wait);
1209 timeo = jiffies + (HZ / 2); /* FIXME */
1210 mutex_lock(&chip->mutex);
1214 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
1215 xip_enable(map, chip, adr);
1216 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1217 xip_disable(map, chip, adr);
1221 if (chip_ready(map, adr))
1224 /* Latency issues. Drop the lock, wait a while and retry */
1225 UDELAY(map, chip, adr, 1);
1227 /* Did we succeed? */
1228 if (!chip_good(map, adr, datum)) {
1229 /* reset on all failures. */
1230 map_write( map, CMD(0xF0), chip->start );
1231 /* FIXME - should have reset delay before continuing */
1233 if (++retry_cnt <= MAX_WORD_RETRIES)
1238 xip_enable(map, chip, adr);
1240 chip->state = FL_READY;
1241 put_chip(map, chip, adr);
1242 mutex_unlock(&chip->mutex);
1248 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1249 size_t *retlen, const u_char *buf)
1251 struct map_info *map = mtd->priv;
1252 struct cfi_private *cfi = map->fldrv_priv;
1255 unsigned long ofs, chipstart;
1256 DECLARE_WAITQUEUE(wait, current);
1262 chipnum = to >> cfi->chipshift;
1263 ofs = to - (chipnum << cfi->chipshift);
1264 chipstart = cfi->chips[chipnum].start;
1266 /* If it's not bus-aligned, do the first byte write */
1267 if (ofs & (map_bankwidth(map)-1)) {
1268 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1269 int i = ofs - bus_ofs;
1274 mutex_lock(&cfi->chips[chipnum].mutex);
1276 if (cfi->chips[chipnum].state != FL_READY) {
1277 set_current_state(TASK_UNINTERRUPTIBLE);
1278 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1280 mutex_unlock(&cfi->chips[chipnum].mutex);
1283 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1287 /* Load 'tmp_buf' with old contents of flash */
1288 tmp_buf = map_read(map, bus_ofs+chipstart);
1290 mutex_unlock(&cfi->chips[chipnum].mutex);
1292 /* Number of bytes to copy from buffer */
1293 n = min_t(int, len, map_bankwidth(map)-i);
1295 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1297 ret = do_write_oneword(map, &cfi->chips[chipnum],
1307 if (ofs >> cfi->chipshift) {
1310 if (chipnum == cfi->numchips)
1315 /* We are now aligned, write as much as possible */
1316 while(len >= map_bankwidth(map)) {
1319 datum = map_word_load(map, buf);
1321 ret = do_write_oneword(map, &cfi->chips[chipnum],
1326 ofs += map_bankwidth(map);
1327 buf += map_bankwidth(map);
1328 (*retlen) += map_bankwidth(map);
1329 len -= map_bankwidth(map);
1331 if (ofs >> cfi->chipshift) {
1334 if (chipnum == cfi->numchips)
1336 chipstart = cfi->chips[chipnum].start;
1340 /* Write the trailing bytes if any */
1341 if (len & (map_bankwidth(map)-1)) {
1345 mutex_lock(&cfi->chips[chipnum].mutex);
1347 if (cfi->chips[chipnum].state != FL_READY) {
1348 set_current_state(TASK_UNINTERRUPTIBLE);
1349 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1351 mutex_unlock(&cfi->chips[chipnum].mutex);
1354 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1358 tmp_buf = map_read(map, ofs + chipstart);
1360 mutex_unlock(&cfi->chips[chipnum].mutex);
1362 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1364 ret = do_write_oneword(map, &cfi->chips[chipnum],
1377 * FIXME: interleaved mode not tested, and probably not supported!
1379 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1380 unsigned long adr, const u_char *buf,
1383 struct cfi_private *cfi = map->fldrv_priv;
1384 unsigned long timeo = jiffies + HZ;
1385 /* see comments in do_write_oneword() regarding uWriteTimeo. */
1386 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1388 unsigned long cmd_adr;
1395 mutex_lock(&chip->mutex);
1396 ret = get_chip(map, chip, adr, FL_WRITING);
1398 mutex_unlock(&chip->mutex);
1402 datum = map_word_load(map, buf);
1404 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1405 __func__, adr, datum.x[0] );
1407 XIP_INVAL_CACHED_RANGE(map, adr, len);
1409 xip_disable(map, chip, cmd_adr);
1411 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1412 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1414 /* Write Buffer Load */
1415 map_write(map, CMD(0x25), cmd_adr);
1417 chip->state = FL_WRITING_TO_BUFFER;
1419 /* Write length of data to come */
1420 words = len / map_bankwidth(map);
1421 map_write(map, CMD(words - 1), cmd_adr);
1424 while(z < words * map_bankwidth(map)) {
1425 datum = map_word_load(map, buf);
1426 map_write(map, datum, adr + z);
1428 z += map_bankwidth(map);
1429 buf += map_bankwidth(map);
1431 z -= map_bankwidth(map);
1435 /* Write Buffer Program Confirm: GO GO GO */
1436 map_write(map, CMD(0x29), cmd_adr);
1437 chip->state = FL_WRITING;
1439 INVALIDATE_CACHE_UDELAY(map, chip,
1440 adr, map_bankwidth(map),
1441 chip->word_write_time);
1443 timeo = jiffies + uWriteTimeout;
1446 if (chip->state != FL_WRITING) {
1447 /* Someone's suspended the write. Sleep */
1448 DECLARE_WAITQUEUE(wait, current);
1450 set_current_state(TASK_UNINTERRUPTIBLE);
1451 add_wait_queue(&chip->wq, &wait);
1452 mutex_unlock(&chip->mutex);
1454 remove_wait_queue(&chip->wq, &wait);
1455 timeo = jiffies + (HZ / 2); /* FIXME */
1456 mutex_lock(&chip->mutex);
1460 if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1463 if (chip_ready(map, adr)) {
1464 xip_enable(map, chip, adr);
1468 /* Latency issues. Drop the lock, wait a while and retry */
1469 UDELAY(map, chip, adr, 1);
1472 /* reset on all failures. */
1473 map_write( map, CMD(0xF0), chip->start );
1474 xip_enable(map, chip, adr);
1475 /* FIXME - should have reset delay before continuing */
1477 printk(KERN_WARNING "MTD %s(): software timeout\n",
1482 chip->state = FL_READY;
1483 put_chip(map, chip, adr);
1484 mutex_unlock(&chip->mutex);
1490 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1491 size_t *retlen, const u_char *buf)
1493 struct map_info *map = mtd->priv;
1494 struct cfi_private *cfi = map->fldrv_priv;
1495 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1504 chipnum = to >> cfi->chipshift;
1505 ofs = to - (chipnum << cfi->chipshift);
1507 /* If it's not bus-aligned, do the first word write */
1508 if (ofs & (map_bankwidth(map)-1)) {
1509 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1510 if (local_len > len)
1512 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1513 local_len, retlen, buf);
1520 if (ofs >> cfi->chipshift) {
1523 if (chipnum == cfi->numchips)
1528 /* Write buffer is worth it only if more than one word to write... */
1529 while (len >= map_bankwidth(map) * 2) {
1530 /* We must not cross write block boundaries */
1531 int size = wbufsize - (ofs & (wbufsize-1));
1535 if (size % map_bankwidth(map))
1536 size -= size % map_bankwidth(map);
1538 ret = do_write_buffer(map, &cfi->chips[chipnum],
1548 if (ofs >> cfi->chipshift) {
1551 if (chipnum == cfi->numchips)
1557 size_t retlen_dregs = 0;
1559 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1560 len, &retlen_dregs, buf);
1562 *retlen += retlen_dregs;
1570 * Wait for the flash chip to become ready to write data
1572 * This is only called during the panic_write() path. When panic_write()
1573 * is called, the kernel is in the process of a panic, and will soon be
1574 * dead. Therefore we don't take any locks, and attempt to get access
1575 * to the chip as soon as possible.
1577 static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
1580 struct cfi_private *cfi = map->fldrv_priv;
1585 * If the driver thinks the chip is idle, and no toggle bits
1586 * are changing, then the chip is actually idle for sure.
1588 if (chip->state == FL_READY && chip_ready(map, adr))
1592 * Try several times to reset the chip and then wait for it
1593 * to become idle. The upper limit of a few milliseconds of
1594 * delay isn't a big problem: the kernel is dying anyway. It
1595 * is more important to save the messages.
1597 while (retries > 0) {
1598 const unsigned long timeo = (HZ / 1000) + 1;
1600 /* send the reset command */
1601 map_write(map, CMD(0xF0), chip->start);
1603 /* wait for the chip to become ready */
1604 for (i = 0; i < jiffies_to_usecs(timeo); i++) {
1605 if (chip_ready(map, adr))
1612 /* the chip never became ready */
1617 * Write out one word of data to a single flash chip during a kernel panic
1619 * This is only called during the panic_write() path. When panic_write()
1620 * is called, the kernel is in the process of a panic, and will soon be
1621 * dead. Therefore we don't take any locks, and attempt to get access
1622 * to the chip as soon as possible.
1624 * The implementation of this routine is intentionally similar to
1625 * do_write_oneword(), in order to ease code maintenance.
1627 static int do_panic_write_oneword(struct map_info *map, struct flchip *chip,
1628 unsigned long adr, map_word datum)
1630 const unsigned long uWriteTimeout = (HZ / 1000) + 1;
1631 struct cfi_private *cfi = map->fldrv_priv;
1639 ret = cfi_amdstd_panic_wait(map, chip, adr);
1643 pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n",
1644 __func__, adr, datum.x[0]);
1647 * Check for a NOP for the case when the datum to write is already
1648 * present - it saves time and works around buggy chips that corrupt
1649 * data at other locations when 0xff is written to a location that
1650 * already contains 0xff.
1652 oldd = map_read(map, adr);
1653 if (map_word_equal(map, oldd, datum)) {
1654 pr_debug("MTD %s(): NOP\n", __func__);
1661 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1662 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1663 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1664 map_write(map, datum, adr);
1666 for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) {
1667 if (chip_ready(map, adr))
1673 if (!chip_good(map, adr, datum)) {
1674 /* reset on all failures. */
1675 map_write(map, CMD(0xF0), chip->start);
1676 /* FIXME - should have reset delay before continuing */
1678 if (++retry_cnt <= MAX_WORD_RETRIES)
1690 * Write out some data during a kernel panic
1692 * This is used by the mtdoops driver to save the dying messages from a
1693 * kernel which has panic'd.
1695 * This routine ignores all of the locking used throughout the rest of the
1696 * driver, in order to ensure that the data gets written out no matter what
1697 * state this driver (and the flash chip itself) was in when the kernel crashed.
1699 * The implementation of this routine is intentionally similar to
1700 * cfi_amdstd_write_words(), in order to ease code maintenance.
1702 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
1703 size_t *retlen, const u_char *buf)
1705 struct map_info *map = mtd->priv;
1706 struct cfi_private *cfi = map->fldrv_priv;
1707 unsigned long ofs, chipstart;
1715 chipnum = to >> cfi->chipshift;
1716 ofs = to - (chipnum << cfi->chipshift);
1717 chipstart = cfi->chips[chipnum].start;
1719 /* If it's not bus aligned, do the first byte write */
1720 if (ofs & (map_bankwidth(map) - 1)) {
1721 unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1);
1722 int i = ofs - bus_ofs;
1726 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs);
1730 /* Load 'tmp_buf' with old contents of flash */
1731 tmp_buf = map_read(map, bus_ofs + chipstart);
1733 /* Number of bytes to copy from buffer */
1734 n = min_t(int, len, map_bankwidth(map) - i);
1736 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1738 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1748 if (ofs >> cfi->chipshift) {
1751 if (chipnum == cfi->numchips)
1756 /* We are now aligned, write as much as possible */
1757 while (len >= map_bankwidth(map)) {
1760 datum = map_word_load(map, buf);
1762 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1767 ofs += map_bankwidth(map);
1768 buf += map_bankwidth(map);
1769 (*retlen) += map_bankwidth(map);
1770 len -= map_bankwidth(map);
1772 if (ofs >> cfi->chipshift) {
1775 if (chipnum == cfi->numchips)
1778 chipstart = cfi->chips[chipnum].start;
1782 /* Write the trailing bytes if any */
1783 if (len & (map_bankwidth(map) - 1)) {
1786 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs);
1790 tmp_buf = map_read(map, ofs + chipstart);
1792 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1794 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1807 * Handle devices with one erase region, that only implement
1808 * the chip erase command.
1810 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1812 struct cfi_private *cfi = map->fldrv_priv;
1813 unsigned long timeo = jiffies + HZ;
1814 unsigned long int adr;
1815 DECLARE_WAITQUEUE(wait, current);
1818 adr = cfi->addr_unlock1;
1820 mutex_lock(&chip->mutex);
1821 ret = get_chip(map, chip, adr, FL_WRITING);
1823 mutex_unlock(&chip->mutex);
1827 pr_debug("MTD %s(): ERASE 0x%.8lx\n",
1828 __func__, chip->start );
1830 XIP_INVAL_CACHED_RANGE(map, adr, map->size);
1832 xip_disable(map, chip, adr);
1834 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1835 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1836 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1837 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1838 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1839 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1841 chip->state = FL_ERASING;
1842 chip->erase_suspended = 0;
1843 chip->in_progress_block_addr = adr;
1845 INVALIDATE_CACHE_UDELAY(map, chip,
1847 chip->erase_time*500);
1849 timeo = jiffies + (HZ*20);
1852 if (chip->state != FL_ERASING) {
1853 /* Someone's suspended the erase. Sleep */
1854 set_current_state(TASK_UNINTERRUPTIBLE);
1855 add_wait_queue(&chip->wq, &wait);
1856 mutex_unlock(&chip->mutex);
1858 remove_wait_queue(&chip->wq, &wait);
1859 mutex_lock(&chip->mutex);
1862 if (chip->erase_suspended) {
1863 /* This erase was suspended and resumed.
1864 Adjust the timeout */
1865 timeo = jiffies + (HZ*20); /* FIXME */
1866 chip->erase_suspended = 0;
1869 if (chip_ready(map, adr))
1872 if (time_after(jiffies, timeo)) {
1873 printk(KERN_WARNING "MTD %s(): software timeout\n",
1878 /* Latency issues. Drop the lock, wait a while and retry */
1879 UDELAY(map, chip, adr, 1000000/HZ);
1881 /* Did we succeed? */
1882 if (!chip_good(map, adr, map_word_ff(map))) {
1883 /* reset on all failures. */
1884 map_write( map, CMD(0xF0), chip->start );
1885 /* FIXME - should have reset delay before continuing */
1890 chip->state = FL_READY;
1891 xip_enable(map, chip, adr);
1892 put_chip(map, chip, adr);
1893 mutex_unlock(&chip->mutex);
1899 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1901 struct cfi_private *cfi = map->fldrv_priv;
1902 unsigned long timeo = jiffies + HZ;
1903 DECLARE_WAITQUEUE(wait, current);
1908 mutex_lock(&chip->mutex);
1909 ret = get_chip(map, chip, adr, FL_ERASING);
1911 mutex_unlock(&chip->mutex);
1915 pr_debug("MTD %s(): ERASE 0x%.8lx\n",
1918 XIP_INVAL_CACHED_RANGE(map, adr, len);
1920 xip_disable(map, chip, adr);
1922 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1923 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1924 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1925 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1926 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1927 map_write(map, cfi->sector_erase_cmd, adr);
1929 chip->state = FL_ERASING;
1930 chip->erase_suspended = 0;
1931 chip->in_progress_block_addr = adr;
1933 INVALIDATE_CACHE_UDELAY(map, chip,
1935 chip->erase_time*500);
1937 timeo = jiffies + (HZ*20);
1940 if (chip->state != FL_ERASING) {
1941 /* Someone's suspended the erase. Sleep */
1942 set_current_state(TASK_UNINTERRUPTIBLE);
1943 add_wait_queue(&chip->wq, &wait);
1944 mutex_unlock(&chip->mutex);
1946 remove_wait_queue(&chip->wq, &wait);
1947 mutex_lock(&chip->mutex);
1950 if (chip->erase_suspended) {
1951 /* This erase was suspended and resumed.
1952 Adjust the timeout */
1953 timeo = jiffies + (HZ*20); /* FIXME */
1954 chip->erase_suspended = 0;
1957 if (chip_ready(map, adr)) {
1958 xip_enable(map, chip, adr);
1962 if (time_after(jiffies, timeo)) {
1963 xip_enable(map, chip, adr);
1964 printk(KERN_WARNING "MTD %s(): software timeout\n",
1969 /* Latency issues. Drop the lock, wait a while and retry */
1970 UDELAY(map, chip, adr, 1000000/HZ);
1972 /* Did we succeed? */
1973 if (!chip_good(map, adr, map_word_ff(map))) {
1974 /* reset on all failures. */
1975 map_write( map, CMD(0xF0), chip->start );
1976 /* FIXME - should have reset delay before continuing */
1981 chip->state = FL_READY;
1982 put_chip(map, chip, adr);
1983 mutex_unlock(&chip->mutex);
1988 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1990 unsigned long ofs, len;
1996 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
2000 instr->state = MTD_ERASE_DONE;
2001 mtd_erase_callback(instr);
2007 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
2009 struct map_info *map = mtd->priv;
2010 struct cfi_private *cfi = map->fldrv_priv;
2013 if (instr->addr != 0)
2016 if (instr->len != mtd->size)
2019 ret = do_erase_chip(map, &cfi->chips[0]);
2023 instr->state = MTD_ERASE_DONE;
2024 mtd_erase_callback(instr);
2029 static int do_atmel_lock(struct map_info *map, struct flchip *chip,
2030 unsigned long adr, int len, void *thunk)
2032 struct cfi_private *cfi = map->fldrv_priv;
2035 mutex_lock(&chip->mutex);
2036 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
2039 chip->state = FL_LOCKING;
2041 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2043 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2044 cfi->device_type, NULL);
2045 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2046 cfi->device_type, NULL);
2047 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
2048 cfi->device_type, NULL);
2049 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2050 cfi->device_type, NULL);
2051 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2052 cfi->device_type, NULL);
2053 map_write(map, CMD(0x40), chip->start + adr);
2055 chip->state = FL_READY;
2056 put_chip(map, chip, adr + chip->start);
2060 mutex_unlock(&chip->mutex);
2064 static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
2065 unsigned long adr, int len, void *thunk)
2067 struct cfi_private *cfi = map->fldrv_priv;
2070 mutex_lock(&chip->mutex);
2071 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
2074 chip->state = FL_UNLOCKING;
2076 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2078 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2079 cfi->device_type, NULL);
2080 map_write(map, CMD(0x70), adr);
2082 chip->state = FL_READY;
2083 put_chip(map, chip, adr + chip->start);
2087 mutex_unlock(&chip->mutex);
2091 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2093 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
2096 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2098 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
2102 static void cfi_amdstd_sync (struct mtd_info *mtd)
2104 struct map_info *map = mtd->priv;
2105 struct cfi_private *cfi = map->fldrv_priv;
2107 struct flchip *chip;
2109 DECLARE_WAITQUEUE(wait, current);
2111 for (i=0; !ret && i<cfi->numchips; i++) {
2112 chip = &cfi->chips[i];
2115 mutex_lock(&chip->mutex);
2117 switch(chip->state) {
2121 case FL_JEDEC_QUERY:
2122 chip->oldstate = chip->state;
2123 chip->state = FL_SYNCING;
2124 /* No need to wake_up() on this state change -
2125 * as the whole point is that nobody can do anything
2126 * with the chip now anyway.
2129 mutex_unlock(&chip->mutex);
2133 /* Not an idle state */
2134 set_current_state(TASK_UNINTERRUPTIBLE);
2135 add_wait_queue(&chip->wq, &wait);
2137 mutex_unlock(&chip->mutex);
2141 remove_wait_queue(&chip->wq, &wait);
2147 /* Unlock the chips again */
2149 for (i--; i >=0; i--) {
2150 chip = &cfi->chips[i];
2152 mutex_lock(&chip->mutex);
2154 if (chip->state == FL_SYNCING) {
2155 chip->state = chip->oldstate;
2158 mutex_unlock(&chip->mutex);
2163 static int cfi_amdstd_suspend(struct mtd_info *mtd)
2165 struct map_info *map = mtd->priv;
2166 struct cfi_private *cfi = map->fldrv_priv;
2168 struct flchip *chip;
2171 for (i=0; !ret && i<cfi->numchips; i++) {
2172 chip = &cfi->chips[i];
2174 mutex_lock(&chip->mutex);
2176 switch(chip->state) {
2180 case FL_JEDEC_QUERY:
2181 chip->oldstate = chip->state;
2182 chip->state = FL_PM_SUSPENDED;
2183 /* No need to wake_up() on this state change -
2184 * as the whole point is that nobody can do anything
2185 * with the chip now anyway.
2187 case FL_PM_SUSPENDED:
2194 mutex_unlock(&chip->mutex);
2197 /* Unlock the chips again */
2200 for (i--; i >=0; i--) {
2201 chip = &cfi->chips[i];
2203 mutex_lock(&chip->mutex);
2205 if (chip->state == FL_PM_SUSPENDED) {
2206 chip->state = chip->oldstate;
2209 mutex_unlock(&chip->mutex);
2217 static void cfi_amdstd_resume(struct mtd_info *mtd)
2219 struct map_info *map = mtd->priv;
2220 struct cfi_private *cfi = map->fldrv_priv;
2222 struct flchip *chip;
2224 for (i=0; i<cfi->numchips; i++) {
2226 chip = &cfi->chips[i];
2228 mutex_lock(&chip->mutex);
2230 if (chip->state == FL_PM_SUSPENDED) {
2231 chip->state = FL_READY;
2232 map_write(map, CMD(0xF0), chip->start);
2236 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
2238 mutex_unlock(&chip->mutex);
2244 * Ensure that the flash device is put back into read array mode before
2245 * unloading the driver or rebooting. On some systems, rebooting while
2246 * the flash is in query/program/erase mode will prevent the CPU from
2247 * fetching the bootloader code, requiring a hard reset or power cycle.
2249 static int cfi_amdstd_reset(struct mtd_info *mtd)
2251 struct map_info *map = mtd->priv;
2252 struct cfi_private *cfi = map->fldrv_priv;
2254 struct flchip *chip;
2256 for (i = 0; i < cfi->numchips; i++) {
2258 chip = &cfi->chips[i];
2260 mutex_lock(&chip->mutex);
2262 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2264 map_write(map, CMD(0xF0), chip->start);
2265 chip->state = FL_SHUTDOWN;
2266 put_chip(map, chip, chip->start);
2269 mutex_unlock(&chip->mutex);
2276 static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
2279 struct mtd_info *mtd;
2281 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2282 cfi_amdstd_reset(mtd);
2287 static void cfi_amdstd_destroy(struct mtd_info *mtd)
2289 struct map_info *map = mtd->priv;
2290 struct cfi_private *cfi = map->fldrv_priv;
2292 cfi_amdstd_reset(mtd);
2293 unregister_reboot_notifier(&mtd->reboot_notifier);
2294 kfree(cfi->cmdset_priv);
2297 kfree(mtd->eraseregions);
2300 MODULE_LICENSE("GPL");
2301 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
2302 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
2303 MODULE_ALIAS("cfi_cmdset_0006");
2304 MODULE_ALIAS("cfi_cmdset_0701");