2 * Common Flash Interface support:
3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
9 * 2_by_8 routines added by Simon Munton
11 * 4_by_16 work by Carolyn J. Smith
13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/sched.h>
27 #include <linux/init.h>
29 #include <asm/byteorder.h>
31 #include <linux/errno.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/interrupt.h>
35 #include <linux/mtd/compatmac.h>
36 #include <linux/mtd/map.h>
37 #include <linux/mtd/mtd.h>
38 #include <linux/mtd/cfi.h>
39 #include <linux/mtd/xip.h>
41 #define AMD_BOOTLOC_BUG
42 #define FORCE_WORD_WRITE 0
44 #define MAX_WORD_RETRIES 3
46 #define SST49LF004B 0x0060
47 #define SST49LF040B 0x0050
48 #define SST49LF008A 0x005a
49 #define AT49BV6416 0x00d6
51 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
52 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
55 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
56 static void cfi_amdstd_sync (struct mtd_info *);
57 static int cfi_amdstd_suspend (struct mtd_info *);
58 static void cfi_amdstd_resume (struct mtd_info *);
59 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
61 static void cfi_amdstd_destroy(struct mtd_info *);
63 struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
64 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
66 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
67 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
70 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
71 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
73 static struct mtd_chip_driver cfi_amdstd_chipdrv = {
74 .probe = NULL, /* Not usable directly */
75 .destroy = cfi_amdstd_destroy,
76 .name = "cfi_cmdset_0002",
81 /* #define DEBUG_CFI_FEATURES */
84 #ifdef DEBUG_CFI_FEATURES
85 static void cfi_tell_features(struct cfi_pri_amdstd *extp)
87 const char* erase_suspend[3] = {
88 "Not supported", "Read only", "Read/write"
90 const char* top_bottom[6] = {
91 "No WP", "8x8KiB sectors at top & bottom, no WP",
92 "Bottom boot", "Top boot",
93 "Uniform, Bottom WP", "Uniform, Top WP"
96 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
97 printk(" Address sensitive unlock: %s\n",
98 (extp->SiliconRevision & 1) ? "Not required" : "Required");
100 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
101 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
103 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
105 if (extp->BlkProt == 0)
106 printk(" Block protection: Not supported\n");
108 printk(" Block protection: %d sectors per group\n", extp->BlkProt);
111 printk(" Temporary block unprotect: %s\n",
112 extp->TmpBlkUnprotect ? "Supported" : "Not supported");
113 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
114 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
115 printk(" Burst mode: %s\n",
116 extp->BurstMode ? "Supported" : "Not supported");
117 if (extp->PageMode == 0)
118 printk(" Page mode: Not supported\n");
120 printk(" Page mode: %d word page\n", extp->PageMode << 2);
122 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
123 extp->VppMin >> 4, extp->VppMin & 0xf);
124 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
125 extp->VppMax >> 4, extp->VppMax & 0xf);
127 if (extp->TopBottom < ARRAY_SIZE(top_bottom))
128 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
130 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
134 #ifdef AMD_BOOTLOC_BUG
135 /* Wheee. Bring me the head of someone at AMD. */
136 static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
138 struct map_info *map = mtd->priv;
139 struct cfi_private *cfi = map->fldrv_priv;
140 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
141 __u8 major = extp->MajorVersion;
142 __u8 minor = extp->MinorVersion;
144 if (((major << 8) | minor) < 0x3131) {
145 /* CFI version 1.0 => don't trust bootloc */
147 DEBUG(MTD_DEBUG_LEVEL1,
148 "%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
149 map->name, cfi->mfr, cfi->id);
151 /* AFAICS all 29LV400 with a bottom boot block have a device ID
152 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
153 * These were badly detected as they have the 0x80 bit set
154 * so treat them as a special case.
156 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
158 /* Macronix added CFI to their 2nd generation
159 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
160 * Fujitsu, Spansion, EON, ESI and older Macronix)
163 * Therefore also check the manufacturer.
164 * This reduces the risk of false detection due to
165 * the 8-bit device ID.
167 (cfi->mfr == CFI_MFR_MACRONIX)) {
168 DEBUG(MTD_DEBUG_LEVEL1,
169 "%s: Macronix MX29LV400C with bottom boot block"
170 " detected\n", map->name);
171 extp->TopBottom = 2; /* bottom boot */
173 if (cfi->id & 0x80) {
174 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
175 extp->TopBottom = 3; /* top boot */
177 extp->TopBottom = 2; /* bottom boot */
180 DEBUG(MTD_DEBUG_LEVEL1,
181 "%s: AMD CFI PRI V%c.%c has no boot block field;"
182 " deduced %s from Device ID\n", map->name, major, minor,
183 extp->TopBottom == 2 ? "bottom" : "top");
188 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
190 struct map_info *map = mtd->priv;
191 struct cfi_private *cfi = map->fldrv_priv;
192 if (cfi->cfiq->BufWriteTimeoutTyp) {
193 DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
194 mtd->write = cfi_amdstd_write_buffers;
198 /* Atmel chips don't use the same PRI format as AMD chips */
199 static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
201 struct map_info *map = mtd->priv;
202 struct cfi_private *cfi = map->fldrv_priv;
203 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
204 struct cfi_pri_atmel atmel_pri;
206 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
207 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
209 if (atmel_pri.Features & 0x02)
210 extp->EraseSuspend = 2;
212 /* Some chips got it backwards... */
213 if (cfi->id == AT49BV6416) {
214 if (atmel_pri.BottomBoot)
219 if (atmel_pri.BottomBoot)
225 /* burst write mode not supported */
226 cfi->cfiq->BufWriteTimeoutTyp = 0;
227 cfi->cfiq->BufWriteTimeoutMax = 0;
230 static void fixup_use_secsi(struct mtd_info *mtd, void *param)
232 /* Setup for chips with a secsi area */
233 mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
234 mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
237 static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
239 struct map_info *map = mtd->priv;
240 struct cfi_private *cfi = map->fldrv_priv;
241 if ((cfi->cfiq->NumEraseRegions == 1) &&
242 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
243 mtd->erase = cfi_amdstd_erase_chip;
249 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
252 static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
254 mtd->lock = cfi_atmel_lock;
255 mtd->unlock = cfi_atmel_unlock;
256 mtd->flags |= MTD_POWERUP_LOCK;
259 static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param)
261 struct map_info *map = mtd->priv;
262 struct cfi_private *cfi = map->fldrv_priv;
264 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
265 cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
266 pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name);
270 static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param)
272 struct map_info *map = mtd->priv;
273 struct cfi_private *cfi = map->fldrv_priv;
275 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
276 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
277 pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name);
281 static struct cfi_fixup cfi_fixup_table[] = {
282 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
283 #ifdef AMD_BOOTLOC_BUG
284 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
285 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL },
287 { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
288 { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
289 { CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, },
290 { CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, },
291 { CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, },
292 { CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, },
293 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors, NULL, },
294 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors, NULL, },
295 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors, NULL, },
296 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors, NULL, },
297 #if !FORCE_WORD_WRITE
298 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
302 static struct cfi_fixup jedec_fixup_table[] = {
303 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
304 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock, NULL, },
305 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
309 static struct cfi_fixup fixup_table[] = {
310 /* The CFI vendor ids and the JEDEC vendor IDs appear
311 * to be common. It is like the devices id's are as
312 * well. This table is to pick all cases where
313 * we know that is the case.
315 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL },
316 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock, NULL },
321 static void cfi_fixup_major_minor(struct cfi_private *cfi,
322 struct cfi_pri_amdstd *extp)
324 if (cfi->mfr == CFI_MFR_SAMSUNG && cfi->id == 0x257e &&
325 extp->MajorVersion == '0')
326 extp->MajorVersion = '1';
329 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
331 struct cfi_private *cfi = map->fldrv_priv;
332 struct mtd_info *mtd;
335 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
337 printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
341 mtd->type = MTD_NORFLASH;
343 /* Fill in the default mtd operations */
344 mtd->erase = cfi_amdstd_erase_varsize;
345 mtd->write = cfi_amdstd_write_words;
346 mtd->read = cfi_amdstd_read;
347 mtd->sync = cfi_amdstd_sync;
348 mtd->suspend = cfi_amdstd_suspend;
349 mtd->resume = cfi_amdstd_resume;
350 mtd->flags = MTD_CAP_NORFLASH;
351 mtd->name = map->name;
354 if (cfi->cfi_mode==CFI_MODE_CFI){
355 unsigned char bootloc;
357 * It's a real CFI chip, not one for which the probe
358 * routine faked a CFI structure. So we read the feature
361 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
362 struct cfi_pri_amdstd *extp;
364 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
370 cfi_fixup_major_minor(cfi, extp);
372 if (extp->MajorVersion != '1' ||
373 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
374 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
375 "version %c.%c.\n", extp->MajorVersion,
382 /* Install our own private info structure */
383 cfi->cmdset_priv = extp;
385 /* Apply cfi device specific fixups */
386 cfi_fixup(mtd, cfi_fixup_table);
388 #ifdef DEBUG_CFI_FEATURES
389 /* Tell the user about it in lots of lovely detail */
390 cfi_tell_features(extp);
393 bootloc = extp->TopBottom;
394 if ((bootloc != 2) && (bootloc != 3)) {
395 printk(KERN_WARNING "%s: CFI does not contain boot "
396 "bank location. Assuming top.\n", map->name);
400 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
401 printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
403 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
404 int j = (cfi->cfiq->NumEraseRegions-1)-i;
407 swap = cfi->cfiq->EraseRegionInfo[i];
408 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
409 cfi->cfiq->EraseRegionInfo[j] = swap;
412 /* Set the default CFI lock/unlock addresses */
413 cfi->addr_unlock1 = 0x555;
414 cfi->addr_unlock2 = 0x2aa;
417 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
418 /* Apply jedec specific fixups */
419 cfi_fixup(mtd, jedec_fixup_table);
421 /* Apply generic fixups */
422 cfi_fixup(mtd, fixup_table);
424 for (i=0; i< cfi->numchips; i++) {
425 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
426 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
427 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
428 cfi->chips[i].ref_point_counter = 0;
429 init_waitqueue_head(&(cfi->chips[i].wq));
432 map->fldrv = &cfi_amdstd_chipdrv;
434 return cfi_amdstd_setup(mtd);
436 EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
438 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
440 struct map_info *map = mtd->priv;
441 struct cfi_private *cfi = map->fldrv_priv;
442 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
443 unsigned long offset = 0;
446 printk(KERN_NOTICE "number of %s chips: %d\n",
447 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
448 /* Select the correct geometry setup */
449 mtd->size = devsize * cfi->numchips;
451 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
452 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
453 * mtd->numeraseregions, GFP_KERNEL);
454 if (!mtd->eraseregions) {
455 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
459 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
460 unsigned long ernum, ersize;
461 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
462 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
464 if (mtd->erasesize < ersize) {
465 mtd->erasesize = ersize;
467 for (j=0; j<cfi->numchips; j++) {
468 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
469 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
470 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
472 offset += (ersize * ernum);
474 if (offset != devsize) {
476 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
481 for (i=0; i<mtd->numeraseregions;i++){
482 printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
483 i,mtd->eraseregions[i].offset,
484 mtd->eraseregions[i].erasesize,
485 mtd->eraseregions[i].numblocks);
489 __module_get(THIS_MODULE);
493 kfree(mtd->eraseregions);
495 kfree(cfi->cmdset_priv);
501 * Return true if the chip is ready.
503 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
504 * non-suspended sector) and is indicated by no toggle bits toggling.
506 * Note that anything more complicated than checking if no bits are toggling
507 * (including checking DQ5 for an error status) is tricky to get working
508 * correctly and is therefore not done (particulary with interleaved chips
509 * as each chip must be checked independantly of the others).
511 static int __xipram chip_ready(struct map_info *map, unsigned long addr)
515 d = map_read(map, addr);
516 t = map_read(map, addr);
518 return map_word_equal(map, d, t);
522 * Return true if the chip is ready and has the correct value.
524 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
525 * non-suspended sector) and it is indicated by no bits toggling.
527 * Error are indicated by toggling bits or bits held with the wrong value,
528 * or with bits toggling.
530 * Note that anything more complicated than checking if no bits are toggling
531 * (including checking DQ5 for an error status) is tricky to get working
532 * correctly and is therefore not done (particulary with interleaved chips
533 * as each chip must be checked independantly of the others).
536 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
540 oldd = map_read(map, addr);
541 curd = map_read(map, addr);
543 return map_word_equal(map, oldd, curd) &&
544 map_word_equal(map, curd, expected);
547 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
549 DECLARE_WAITQUEUE(wait, current);
550 struct cfi_private *cfi = map->fldrv_priv;
552 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
555 timeo = jiffies + HZ;
557 switch (chip->state) {
561 if (chip_ready(map, adr))
564 if (time_after(jiffies, timeo)) {
565 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
568 mutex_unlock(&chip->mutex);
570 mutex_lock(&chip->mutex);
571 /* Someone else might have been playing with it. */
581 if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
582 !(mode == FL_READY || mode == FL_POINT ||
583 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
586 /* We could check to see if we're trying to access the sector
587 * that is currently being erased. However, no user will try
588 * anything like that so we just wait for the timeout. */
591 /* It's harmless to issue the Erase-Suspend and Erase-Resume
592 * commands when the erase algorithm isn't in progress. */
593 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
594 chip->oldstate = FL_ERASING;
595 chip->state = FL_ERASE_SUSPENDING;
596 chip->erase_suspended = 1;
598 if (chip_ready(map, adr))
601 if (time_after(jiffies, timeo)) {
602 /* Should have suspended the erase by now.
603 * Send an Erase-Resume command as either
604 * there was an error (so leave the erase
605 * routine to recover from it) or we trying to
606 * use the erase-in-progress sector. */
607 map_write(map, CMD(0x30), chip->in_progress_block_addr);
608 chip->state = FL_ERASING;
609 chip->oldstate = FL_READY;
610 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
614 mutex_unlock(&chip->mutex);
616 mutex_lock(&chip->mutex);
617 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
618 So we can just loop here. */
620 chip->state = FL_READY;
623 case FL_XIP_WHILE_ERASING:
624 if (mode != FL_READY && mode != FL_POINT &&
625 (!cfip || !(cfip->EraseSuspend&2)))
627 chip->oldstate = chip->state;
628 chip->state = FL_READY;
632 /* Only if there's no operation suspended... */
633 if (mode == FL_READY && chip->oldstate == FL_READY)
638 set_current_state(TASK_UNINTERRUPTIBLE);
639 add_wait_queue(&chip->wq, &wait);
640 mutex_unlock(&chip->mutex);
642 remove_wait_queue(&chip->wq, &wait);
643 mutex_lock(&chip->mutex);
649 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
651 struct cfi_private *cfi = map->fldrv_priv;
653 switch(chip->oldstate) {
655 chip->state = chip->oldstate;
656 map_write(map, CMD(0x30), chip->in_progress_block_addr);
657 chip->oldstate = FL_READY;
658 chip->state = FL_ERASING;
661 case FL_XIP_WHILE_ERASING:
662 chip->state = chip->oldstate;
663 chip->oldstate = FL_READY;
668 /* We should really make set_vpp() count, rather than doing this */
672 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
677 #ifdef CONFIG_MTD_XIP
680 * No interrupt what so ever can be serviced while the flash isn't in array
681 * mode. This is ensured by the xip_disable() and xip_enable() functions
682 * enclosing any code path where the flash is known not to be in array mode.
683 * And within a XIP disabled code path, only functions marked with __xipram
684 * may be called and nothing else (it's a good thing to inspect generated
685 * assembly to make sure inline functions were actually inlined and that gcc
686 * didn't emit calls to its own support functions). Also configuring MTD CFI
687 * support to a single buswidth and a single interleave is also recommended.
690 static void xip_disable(struct map_info *map, struct flchip *chip,
693 /* TODO: chips with no XIP use should ignore and return */
694 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
698 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
701 struct cfi_private *cfi = map->fldrv_priv;
703 if (chip->state != FL_POINT && chip->state != FL_READY) {
704 map_write(map, CMD(0xf0), adr);
705 chip->state = FL_READY;
707 (void) map_read(map, adr);
713 * When a delay is required for the flash operation to complete, the
714 * xip_udelay() function is polling for both the given timeout and pending
715 * (but still masked) hardware interrupts. Whenever there is an interrupt
716 * pending then the flash erase operation is suspended, array mode restored
717 * and interrupts unmasked. Task scheduling might also happen at that
718 * point. The CPU eventually returns from the interrupt or the call to
719 * schedule() and the suspended flash operation is resumed for the remaining
720 * of the delay period.
722 * Warning: this function _will_ fool interrupt latency tracing tools.
725 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
726 unsigned long adr, int usec)
728 struct cfi_private *cfi = map->fldrv_priv;
729 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
730 map_word status, OK = CMD(0x80);
731 unsigned long suspended, start = xip_currtime();
736 if (xip_irqpending() && extp &&
737 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
738 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
740 * Let's suspend the erase operation when supported.
741 * Note that we currently don't try to suspend
742 * interleaved chips if there is already another
743 * operation suspended (imagine what happens
744 * when one chip was already done with the current
745 * operation while another chip suspended it, then
746 * we resume the whole thing at once). Yes, it
749 map_write(map, CMD(0xb0), adr);
750 usec -= xip_elapsed_since(start);
751 suspended = xip_currtime();
753 if (xip_elapsed_since(suspended) > 100000) {
755 * The chip doesn't want to suspend
756 * after waiting for 100 msecs.
757 * This is a critical error but there
758 * is not much we can do here.
762 status = map_read(map, adr);
763 } while (!map_word_andequal(map, status, OK, OK));
765 /* Suspend succeeded */
766 oldstate = chip->state;
767 if (!map_word_bitsset(map, status, CMD(0x40)))
769 chip->state = FL_XIP_WHILE_ERASING;
770 chip->erase_suspended = 1;
771 map_write(map, CMD(0xf0), adr);
772 (void) map_read(map, adr);
775 mutex_unlock(&chip->mutex);
780 * We're back. However someone else might have
781 * decided to go write to the chip if we are in
782 * a suspended erase state. If so let's wait
785 mutex_lock(&chip->mutex);
786 while (chip->state != FL_XIP_WHILE_ERASING) {
787 DECLARE_WAITQUEUE(wait, current);
788 set_current_state(TASK_UNINTERRUPTIBLE);
789 add_wait_queue(&chip->wq, &wait);
790 mutex_unlock(&chip->mutex);
792 remove_wait_queue(&chip->wq, &wait);
793 mutex_lock(&chip->mutex);
795 /* Disallow XIP again */
798 /* Resume the write or erase operation */
799 map_write(map, CMD(0x30), adr);
800 chip->state = oldstate;
801 start = xip_currtime();
802 } else if (usec >= 1000000/HZ) {
804 * Try to save on CPU power when waiting delay
805 * is at least a system timer tick period.
806 * No need to be extremely accurate here.
810 status = map_read(map, adr);
811 } while (!map_word_andequal(map, status, OK, OK)
812 && xip_elapsed_since(start) < usec);
815 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
818 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
819 * the flash is actively programming or erasing since we have to poll for
820 * the operation to complete anyway. We can't do that in a generic way with
821 * a XIP setup so do it before the actual flash operation in this case
822 * and stub it out from INVALIDATE_CACHE_UDELAY.
824 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
825 INVALIDATE_CACHED_RANGE(map, from, size)
827 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
828 UDELAY(map, chip, adr, usec)
833 * Activating this XIP support changes the way the code works a bit. For
834 * example the code to suspend the current process when concurrent access
835 * happens is never executed because xip_udelay() will always return with the
836 * same chip state as it was entered with. This is why there is no care for
837 * the presence of add_wait_queue() or schedule() calls from within a couple
838 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
839 * The queueing and scheduling are always happening within xip_udelay().
841 * Similarly, get_chip() and put_chip() just happen to always be executed
842 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
843 * is in array mode, therefore never executing many cases therein and not
844 * causing any problem with XIP.
849 #define xip_disable(map, chip, adr)
850 #define xip_enable(map, chip, adr)
851 #define XIP_INVAL_CACHED_RANGE(x...)
853 #define UDELAY(map, chip, adr, usec) \
855 mutex_unlock(&chip->mutex); \
857 mutex_lock(&chip->mutex); \
860 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
862 mutex_unlock(&chip->mutex); \
863 INVALIDATE_CACHED_RANGE(map, adr, len); \
865 mutex_lock(&chip->mutex); \
870 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
872 unsigned long cmd_addr;
873 struct cfi_private *cfi = map->fldrv_priv;
878 /* Ensure cmd read/writes are aligned. */
879 cmd_addr = adr & ~(map_bankwidth(map)-1);
881 mutex_lock(&chip->mutex);
882 ret = get_chip(map, chip, cmd_addr, FL_READY);
884 mutex_unlock(&chip->mutex);
888 if (chip->state != FL_POINT && chip->state != FL_READY) {
889 map_write(map, CMD(0xf0), cmd_addr);
890 chip->state = FL_READY;
893 map_copy_from(map, buf, adr, len);
895 put_chip(map, chip, cmd_addr);
897 mutex_unlock(&chip->mutex);
902 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
904 struct map_info *map = mtd->priv;
905 struct cfi_private *cfi = map->fldrv_priv;
910 /* ofs: offset within the first chip that the first read should start */
912 chipnum = (from >> cfi->chipshift);
913 ofs = from - (chipnum << cfi->chipshift);
919 unsigned long thislen;
921 if (chipnum >= cfi->numchips)
924 if ((len + ofs -1) >> cfi->chipshift)
925 thislen = (1<<cfi->chipshift) - ofs;
929 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
944 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
946 DECLARE_WAITQUEUE(wait, current);
947 unsigned long timeo = jiffies + HZ;
948 struct cfi_private *cfi = map->fldrv_priv;
951 mutex_lock(&chip->mutex);
953 if (chip->state != FL_READY){
955 printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
957 set_current_state(TASK_UNINTERRUPTIBLE);
958 add_wait_queue(&chip->wq, &wait);
960 mutex_unlock(&chip->mutex);
963 remove_wait_queue(&chip->wq, &wait);
965 if(signal_pending(current))
968 timeo = jiffies + HZ;
975 chip->state = FL_READY;
977 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
978 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
979 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
981 map_copy_from(map, buf, adr, len);
983 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
984 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
985 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
986 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
989 mutex_unlock(&chip->mutex);
994 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
996 struct map_info *map = mtd->priv;
997 struct cfi_private *cfi = map->fldrv_priv;
1003 /* ofs: offset within the first chip that the first read should start */
1005 /* 8 secsi bytes per chip */
1013 unsigned long thislen;
1015 if (chipnum >= cfi->numchips)
1018 if ((len + ofs -1) >> 3)
1019 thislen = (1<<3) - ofs;
1023 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1038 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
1040 struct cfi_private *cfi = map->fldrv_priv;
1041 unsigned long timeo = jiffies + HZ;
1043 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1044 * have a max write time of a few hundreds usec). However, we should
1045 * use the maximum timeout value given by the chip at probe time
1046 * instead. Unfortunately, struct flchip does have a field for
1047 * maximum timeout, only for typical which can be far too short
1048 * depending of the conditions. The ' + 1' is to avoid having a
1049 * timeout of 0 jiffies if HZ is smaller than 1000.
1051 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1058 mutex_lock(&chip->mutex);
1059 ret = get_chip(map, chip, adr, FL_WRITING);
1061 mutex_unlock(&chip->mutex);
1065 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1066 __func__, adr, datum.x[0] );
1069 * Check for a NOP for the case when the datum to write is already
1070 * present - it saves time and works around buggy chips that corrupt
1071 * data at other locations when 0xff is written to a location that
1072 * already contains 0xff.
1074 oldd = map_read(map, adr);
1075 if (map_word_equal(map, oldd, datum)) {
1076 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
1081 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1083 xip_disable(map, chip, adr);
1085 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1086 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1087 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1088 map_write(map, datum, adr);
1089 chip->state = FL_WRITING;
1091 INVALIDATE_CACHE_UDELAY(map, chip,
1092 adr, map_bankwidth(map),
1093 chip->word_write_time);
1095 /* See comment above for timeout value. */
1096 timeo = jiffies + uWriteTimeout;
1098 if (chip->state != FL_WRITING) {
1099 /* Someone's suspended the write. Sleep */
1100 DECLARE_WAITQUEUE(wait, current);
1102 set_current_state(TASK_UNINTERRUPTIBLE);
1103 add_wait_queue(&chip->wq, &wait);
1104 mutex_unlock(&chip->mutex);
1106 remove_wait_queue(&chip->wq, &wait);
1107 timeo = jiffies + (HZ / 2); /* FIXME */
1108 mutex_lock(&chip->mutex);
1112 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
1113 xip_enable(map, chip, adr);
1114 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1115 xip_disable(map, chip, adr);
1119 if (chip_ready(map, adr))
1122 /* Latency issues. Drop the lock, wait a while and retry */
1123 UDELAY(map, chip, adr, 1);
1125 /* Did we succeed? */
1126 if (!chip_good(map, adr, datum)) {
1127 /* reset on all failures. */
1128 map_write( map, CMD(0xF0), chip->start );
1129 /* FIXME - should have reset delay before continuing */
1131 if (++retry_cnt <= MAX_WORD_RETRIES)
1136 xip_enable(map, chip, adr);
1138 chip->state = FL_READY;
1139 put_chip(map, chip, adr);
1140 mutex_unlock(&chip->mutex);
1146 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1147 size_t *retlen, const u_char *buf)
1149 struct map_info *map = mtd->priv;
1150 struct cfi_private *cfi = map->fldrv_priv;
1153 unsigned long ofs, chipstart;
1154 DECLARE_WAITQUEUE(wait, current);
1160 chipnum = to >> cfi->chipshift;
1161 ofs = to - (chipnum << cfi->chipshift);
1162 chipstart = cfi->chips[chipnum].start;
1164 /* If it's not bus-aligned, do the first byte write */
1165 if (ofs & (map_bankwidth(map)-1)) {
1166 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1167 int i = ofs - bus_ofs;
1172 mutex_lock(&cfi->chips[chipnum].mutex);
1174 if (cfi->chips[chipnum].state != FL_READY) {
1176 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1178 set_current_state(TASK_UNINTERRUPTIBLE);
1179 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1181 mutex_unlock(&cfi->chips[chipnum].mutex);
1184 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1186 if(signal_pending(current))
1192 /* Load 'tmp_buf' with old contents of flash */
1193 tmp_buf = map_read(map, bus_ofs+chipstart);
1195 mutex_unlock(&cfi->chips[chipnum].mutex);
1197 /* Number of bytes to copy from buffer */
1198 n = min_t(int, len, map_bankwidth(map)-i);
1200 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1202 ret = do_write_oneword(map, &cfi->chips[chipnum],
1212 if (ofs >> cfi->chipshift) {
1215 if (chipnum == cfi->numchips)
1220 /* We are now aligned, write as much as possible */
1221 while(len >= map_bankwidth(map)) {
1224 datum = map_word_load(map, buf);
1226 ret = do_write_oneword(map, &cfi->chips[chipnum],
1231 ofs += map_bankwidth(map);
1232 buf += map_bankwidth(map);
1233 (*retlen) += map_bankwidth(map);
1234 len -= map_bankwidth(map);
1236 if (ofs >> cfi->chipshift) {
1239 if (chipnum == cfi->numchips)
1241 chipstart = cfi->chips[chipnum].start;
1245 /* Write the trailing bytes if any */
1246 if (len & (map_bankwidth(map)-1)) {
1250 mutex_lock(&cfi->chips[chipnum].mutex);
1252 if (cfi->chips[chipnum].state != FL_READY) {
1254 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1256 set_current_state(TASK_UNINTERRUPTIBLE);
1257 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1259 mutex_unlock(&cfi->chips[chipnum].mutex);
1262 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1264 if(signal_pending(current))
1270 tmp_buf = map_read(map, ofs + chipstart);
1272 mutex_unlock(&cfi->chips[chipnum].mutex);
1274 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1276 ret = do_write_oneword(map, &cfi->chips[chipnum],
1289 * FIXME: interleaved mode not tested, and probably not supported!
1291 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1292 unsigned long adr, const u_char *buf,
1295 struct cfi_private *cfi = map->fldrv_priv;
1296 unsigned long timeo = jiffies + HZ;
1297 /* see comments in do_write_oneword() regarding uWriteTimeo. */
1298 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1300 unsigned long cmd_adr;
1307 mutex_lock(&chip->mutex);
1308 ret = get_chip(map, chip, adr, FL_WRITING);
1310 mutex_unlock(&chip->mutex);
1314 datum = map_word_load(map, buf);
1316 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1317 __func__, adr, datum.x[0] );
1319 XIP_INVAL_CACHED_RANGE(map, adr, len);
1321 xip_disable(map, chip, cmd_adr);
1323 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1324 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1325 //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1327 /* Write Buffer Load */
1328 map_write(map, CMD(0x25), cmd_adr);
1330 chip->state = FL_WRITING_TO_BUFFER;
1332 /* Write length of data to come */
1333 words = len / map_bankwidth(map);
1334 map_write(map, CMD(words - 1), cmd_adr);
1337 while(z < words * map_bankwidth(map)) {
1338 datum = map_word_load(map, buf);
1339 map_write(map, datum, adr + z);
1341 z += map_bankwidth(map);
1342 buf += map_bankwidth(map);
1344 z -= map_bankwidth(map);
1348 /* Write Buffer Program Confirm: GO GO GO */
1349 map_write(map, CMD(0x29), cmd_adr);
1350 chip->state = FL_WRITING;
1352 INVALIDATE_CACHE_UDELAY(map, chip,
1353 adr, map_bankwidth(map),
1354 chip->word_write_time);
1356 timeo = jiffies + uWriteTimeout;
1359 if (chip->state != FL_WRITING) {
1360 /* Someone's suspended the write. Sleep */
1361 DECLARE_WAITQUEUE(wait, current);
1363 set_current_state(TASK_UNINTERRUPTIBLE);
1364 add_wait_queue(&chip->wq, &wait);
1365 mutex_unlock(&chip->mutex);
1367 remove_wait_queue(&chip->wq, &wait);
1368 timeo = jiffies + (HZ / 2); /* FIXME */
1369 mutex_lock(&chip->mutex);
1373 if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1376 if (chip_ready(map, adr)) {
1377 xip_enable(map, chip, adr);
1381 /* Latency issues. Drop the lock, wait a while and retry */
1382 UDELAY(map, chip, adr, 1);
1385 /* reset on all failures. */
1386 map_write( map, CMD(0xF0), chip->start );
1387 xip_enable(map, chip, adr);
1388 /* FIXME - should have reset delay before continuing */
1390 printk(KERN_WARNING "MTD %s(): software timeout\n",
1395 chip->state = FL_READY;
1396 put_chip(map, chip, adr);
1397 mutex_unlock(&chip->mutex);
1403 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1404 size_t *retlen, const u_char *buf)
1406 struct map_info *map = mtd->priv;
1407 struct cfi_private *cfi = map->fldrv_priv;
1408 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1417 chipnum = to >> cfi->chipshift;
1418 ofs = to - (chipnum << cfi->chipshift);
1420 /* If it's not bus-aligned, do the first word write */
1421 if (ofs & (map_bankwidth(map)-1)) {
1422 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1423 if (local_len > len)
1425 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1426 local_len, retlen, buf);
1433 if (ofs >> cfi->chipshift) {
1436 if (chipnum == cfi->numchips)
1441 /* Write buffer is worth it only if more than one word to write... */
1442 while (len >= map_bankwidth(map) * 2) {
1443 /* We must not cross write block boundaries */
1444 int size = wbufsize - (ofs & (wbufsize-1));
1448 if (size % map_bankwidth(map))
1449 size -= size % map_bankwidth(map);
1451 ret = do_write_buffer(map, &cfi->chips[chipnum],
1461 if (ofs >> cfi->chipshift) {
1464 if (chipnum == cfi->numchips)
1470 size_t retlen_dregs = 0;
1472 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1473 len, &retlen_dregs, buf);
1475 *retlen += retlen_dregs;
1484 * Handle devices with one erase region, that only implement
1485 * the chip erase command.
1487 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1489 struct cfi_private *cfi = map->fldrv_priv;
1490 unsigned long timeo = jiffies + HZ;
1491 unsigned long int adr;
1492 DECLARE_WAITQUEUE(wait, current);
1495 adr = cfi->addr_unlock1;
1497 mutex_lock(&chip->mutex);
1498 ret = get_chip(map, chip, adr, FL_WRITING);
1500 mutex_unlock(&chip->mutex);
1504 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1505 __func__, chip->start );
1507 XIP_INVAL_CACHED_RANGE(map, adr, map->size);
1509 xip_disable(map, chip, adr);
1511 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1512 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1513 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1514 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1515 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1516 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1518 chip->state = FL_ERASING;
1519 chip->erase_suspended = 0;
1520 chip->in_progress_block_addr = adr;
1522 INVALIDATE_CACHE_UDELAY(map, chip,
1524 chip->erase_time*500);
1526 timeo = jiffies + (HZ*20);
1529 if (chip->state != FL_ERASING) {
1530 /* Someone's suspended the erase. Sleep */
1531 set_current_state(TASK_UNINTERRUPTIBLE);
1532 add_wait_queue(&chip->wq, &wait);
1533 mutex_unlock(&chip->mutex);
1535 remove_wait_queue(&chip->wq, &wait);
1536 mutex_lock(&chip->mutex);
1539 if (chip->erase_suspended) {
1540 /* This erase was suspended and resumed.
1541 Adjust the timeout */
1542 timeo = jiffies + (HZ*20); /* FIXME */
1543 chip->erase_suspended = 0;
1546 if (chip_ready(map, adr))
1549 if (time_after(jiffies, timeo)) {
1550 printk(KERN_WARNING "MTD %s(): software timeout\n",
1555 /* Latency issues. Drop the lock, wait a while and retry */
1556 UDELAY(map, chip, adr, 1000000/HZ);
1558 /* Did we succeed? */
1559 if (!chip_good(map, adr, map_word_ff(map))) {
1560 /* reset on all failures. */
1561 map_write( map, CMD(0xF0), chip->start );
1562 /* FIXME - should have reset delay before continuing */
1567 chip->state = FL_READY;
1568 xip_enable(map, chip, adr);
1569 put_chip(map, chip, adr);
1570 mutex_unlock(&chip->mutex);
1576 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1578 struct cfi_private *cfi = map->fldrv_priv;
1579 unsigned long timeo = jiffies + HZ;
1580 DECLARE_WAITQUEUE(wait, current);
1585 mutex_lock(&chip->mutex);
1586 ret = get_chip(map, chip, adr, FL_ERASING);
1588 mutex_unlock(&chip->mutex);
1592 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1595 XIP_INVAL_CACHED_RANGE(map, adr, len);
1597 xip_disable(map, chip, adr);
1599 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1600 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1601 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1602 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1603 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1604 map_write(map, CMD(0x30), adr);
1606 chip->state = FL_ERASING;
1607 chip->erase_suspended = 0;
1608 chip->in_progress_block_addr = adr;
1610 INVALIDATE_CACHE_UDELAY(map, chip,
1612 chip->erase_time*500);
1614 timeo = jiffies + (HZ*20);
1617 if (chip->state != FL_ERASING) {
1618 /* Someone's suspended the erase. Sleep */
1619 set_current_state(TASK_UNINTERRUPTIBLE);
1620 add_wait_queue(&chip->wq, &wait);
1621 mutex_unlock(&chip->mutex);
1623 remove_wait_queue(&chip->wq, &wait);
1624 mutex_lock(&chip->mutex);
1627 if (chip->erase_suspended) {
1628 /* This erase was suspended and resumed.
1629 Adjust the timeout */
1630 timeo = jiffies + (HZ*20); /* FIXME */
1631 chip->erase_suspended = 0;
1634 if (chip_ready(map, adr)) {
1635 xip_enable(map, chip, adr);
1639 if (time_after(jiffies, timeo)) {
1640 xip_enable(map, chip, adr);
1641 printk(KERN_WARNING "MTD %s(): software timeout\n",
1646 /* Latency issues. Drop the lock, wait a while and retry */
1647 UDELAY(map, chip, adr, 1000000/HZ);
1649 /* Did we succeed? */
1650 if (!chip_good(map, adr, map_word_ff(map))) {
1651 /* reset on all failures. */
1652 map_write( map, CMD(0xF0), chip->start );
1653 /* FIXME - should have reset delay before continuing */
1658 chip->state = FL_READY;
1659 put_chip(map, chip, adr);
1660 mutex_unlock(&chip->mutex);
1665 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1667 unsigned long ofs, len;
1673 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1677 instr->state = MTD_ERASE_DONE;
1678 mtd_erase_callback(instr);
1684 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1686 struct map_info *map = mtd->priv;
1687 struct cfi_private *cfi = map->fldrv_priv;
1690 if (instr->addr != 0)
1693 if (instr->len != mtd->size)
1696 ret = do_erase_chip(map, &cfi->chips[0]);
1700 instr->state = MTD_ERASE_DONE;
1701 mtd_erase_callback(instr);
1706 static int do_atmel_lock(struct map_info *map, struct flchip *chip,
1707 unsigned long adr, int len, void *thunk)
1709 struct cfi_private *cfi = map->fldrv_priv;
1712 mutex_lock(&chip->mutex);
1713 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
1716 chip->state = FL_LOCKING;
1718 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1719 __func__, adr, len);
1721 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1722 cfi->device_type, NULL);
1723 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1724 cfi->device_type, NULL);
1725 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
1726 cfi->device_type, NULL);
1727 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1728 cfi->device_type, NULL);
1729 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1730 cfi->device_type, NULL);
1731 map_write(map, CMD(0x40), chip->start + adr);
1733 chip->state = FL_READY;
1734 put_chip(map, chip, adr + chip->start);
1738 mutex_unlock(&chip->mutex);
1742 static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
1743 unsigned long adr, int len, void *thunk)
1745 struct cfi_private *cfi = map->fldrv_priv;
1748 mutex_lock(&chip->mutex);
1749 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
1752 chip->state = FL_UNLOCKING;
1754 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1755 __func__, adr, len);
1757 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1758 cfi->device_type, NULL);
1759 map_write(map, CMD(0x70), adr);
1761 chip->state = FL_READY;
1762 put_chip(map, chip, adr + chip->start);
1766 mutex_unlock(&chip->mutex);
1770 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1772 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
1775 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1777 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
1781 static void cfi_amdstd_sync (struct mtd_info *mtd)
1783 struct map_info *map = mtd->priv;
1784 struct cfi_private *cfi = map->fldrv_priv;
1786 struct flchip *chip;
1788 DECLARE_WAITQUEUE(wait, current);
1790 for (i=0; !ret && i<cfi->numchips; i++) {
1791 chip = &cfi->chips[i];
1794 mutex_lock(&chip->mutex);
1796 switch(chip->state) {
1800 case FL_JEDEC_QUERY:
1801 chip->oldstate = chip->state;
1802 chip->state = FL_SYNCING;
1803 /* No need to wake_up() on this state change -
1804 * as the whole point is that nobody can do anything
1805 * with the chip now anyway.
1808 mutex_unlock(&chip->mutex);
1812 /* Not an idle state */
1813 set_current_state(TASK_UNINTERRUPTIBLE);
1814 add_wait_queue(&chip->wq, &wait);
1816 mutex_unlock(&chip->mutex);
1820 remove_wait_queue(&chip->wq, &wait);
1826 /* Unlock the chips again */
1828 for (i--; i >=0; i--) {
1829 chip = &cfi->chips[i];
1831 mutex_lock(&chip->mutex);
1833 if (chip->state == FL_SYNCING) {
1834 chip->state = chip->oldstate;
1837 mutex_unlock(&chip->mutex);
1842 static int cfi_amdstd_suspend(struct mtd_info *mtd)
1844 struct map_info *map = mtd->priv;
1845 struct cfi_private *cfi = map->fldrv_priv;
1847 struct flchip *chip;
1850 for (i=0; !ret && i<cfi->numchips; i++) {
1851 chip = &cfi->chips[i];
1853 mutex_lock(&chip->mutex);
1855 switch(chip->state) {
1859 case FL_JEDEC_QUERY:
1860 chip->oldstate = chip->state;
1861 chip->state = FL_PM_SUSPENDED;
1862 /* No need to wake_up() on this state change -
1863 * as the whole point is that nobody can do anything
1864 * with the chip now anyway.
1866 case FL_PM_SUSPENDED:
1873 mutex_unlock(&chip->mutex);
1876 /* Unlock the chips again */
1879 for (i--; i >=0; i--) {
1880 chip = &cfi->chips[i];
1882 mutex_lock(&chip->mutex);
1884 if (chip->state == FL_PM_SUSPENDED) {
1885 chip->state = chip->oldstate;
1888 mutex_unlock(&chip->mutex);
1896 static void cfi_amdstd_resume(struct mtd_info *mtd)
1898 struct map_info *map = mtd->priv;
1899 struct cfi_private *cfi = map->fldrv_priv;
1901 struct flchip *chip;
1903 for (i=0; i<cfi->numchips; i++) {
1905 chip = &cfi->chips[i];
1907 mutex_lock(&chip->mutex);
1909 if (chip->state == FL_PM_SUSPENDED) {
1910 chip->state = FL_READY;
1911 map_write(map, CMD(0xF0), chip->start);
1915 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
1917 mutex_unlock(&chip->mutex);
1921 static void cfi_amdstd_destroy(struct mtd_info *mtd)
1923 struct map_info *map = mtd->priv;
1924 struct cfi_private *cfi = map->fldrv_priv;
1926 kfree(cfi->cmdset_priv);
1929 kfree(mtd->eraseregions);
1932 MODULE_LICENSE("GPL");
1933 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
1934 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");