MTD/JFFS2: remove CVS keywords
[pandora-kernel.git] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  *
8  * 10/10/2000   Nicolas Pitre <nico@cam.org>
9  *      - completely revamped method functions so they are aware and
10  *        independent of the flash geometry (buswidth, interleave, etc.)
11  *      - scalability vs code size is completely set at compile-time
12  *        (see include/linux/mtd/cfi.h for selection)
13  *      - optimized write buffer method
14  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
15  *      - reworked lock/unlock/erase support for var size flash
16  * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
17  *      - auto unlock sectors on resume for auto locking flash on power up
18  */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/bitmap.h>
34 #include <linux/mtd/xip.h>
35 #include <linux/mtd/map.h>
36 #include <linux/mtd/mtd.h>
37 #include <linux/mtd/compatmac.h>
38 #include <linux/mtd/cfi.h>
39
40 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
41 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
42
43 // debugging, turns off buffer write mode if set to 1
44 #define FORCE_WORD_WRITE 0
45
46 #define MANUFACTURER_INTEL      0x0089
47 #define I82802AB        0x00ad
48 #define I82802AC        0x00ac
49 #define MANUFACTURER_ST         0x0020
50 #define M50LPW080       0x002F
51 #define M50FLW080A      0x0080
52 #define M50FLW080B      0x0081
53 #define AT49BV640D      0x02de
54
55 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
56 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
57 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
58 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
59 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
60 static void cfi_intelext_sync (struct mtd_info *);
61 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
62 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
63 #ifdef CONFIG_MTD_OTP
64 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
65 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
66 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
67 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
68 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
69                                             struct otp_info *, size_t);
70 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
71                                             struct otp_info *, size_t);
72 #endif
73 static int cfi_intelext_suspend (struct mtd_info *);
74 static void cfi_intelext_resume (struct mtd_info *);
75 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
76
77 static void cfi_intelext_destroy(struct mtd_info *);
78
79 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
80
81 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
82 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
83
84 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
85                      size_t *retlen, void **virt, resource_size_t *phys);
86 static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
87
88 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
89 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
90 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
91 #include "fwh_lock.h"
92
93
94
95 /*
96  *  *********** SETUP AND PROBE BITS  ***********
97  */
98
99 static struct mtd_chip_driver cfi_intelext_chipdrv = {
100         .probe          = NULL, /* Not usable directly */
101         .destroy        = cfi_intelext_destroy,
102         .name           = "cfi_cmdset_0001",
103         .module         = THIS_MODULE
104 };
105
106 /* #define DEBUG_LOCK_BITS */
107 /* #define DEBUG_CFI_FEATURES */
108
109 #ifdef DEBUG_CFI_FEATURES
110 static void cfi_tell_features(struct cfi_pri_intelext *extp)
111 {
112         int i;
113         printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
114         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
115         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
116         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
117         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
118         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
119         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
120         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
121         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
122         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
123         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
124         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
125         printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
126         for (i=11; i<32; i++) {
127                 if (extp->FeatureSupport & (1<<i))
128                         printk("     - Unknown Bit %X:      supported\n", i);
129         }
130
131         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
132         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
133         for (i=1; i<8; i++) {
134                 if (extp->SuspendCmdSupport & (1<<i))
135                         printk("     - Unknown Bit %X:               supported\n", i);
136         }
137
138         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
139         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
140         printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
141         for (i=2; i<3; i++) {
142                 if (extp->BlkStatusRegMask & (1<<i))
143                         printk("     - Unknown Bit %X Active: yes\n",i);
144         }
145         printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
146         printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
147         for (i=6; i<16; i++) {
148                 if (extp->BlkStatusRegMask & (1<<i))
149                         printk("     - Unknown Bit %X Active: yes\n",i);
150         }
151
152         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
153                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
154         if (extp->VppOptimal)
155                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
156                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
157 }
158 #endif
159
160 /* Atmel chips don't use the same PRI format as Intel chips */
161 static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
162 {
163         struct map_info *map = mtd->priv;
164         struct cfi_private *cfi = map->fldrv_priv;
165         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
166         struct cfi_pri_atmel atmel_pri;
167         uint32_t features = 0;
168
169         /* Reverse byteswapping */
170         extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
171         extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
172         extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
173
174         memcpy(&atmel_pri, extp, sizeof(atmel_pri));
175         memset((char *)extp + 5, 0, sizeof(*extp) - 5);
176
177         printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
178
179         if (atmel_pri.Features & 0x01) /* chip erase supported */
180                 features |= (1<<0);
181         if (atmel_pri.Features & 0x02) /* erase suspend supported */
182                 features |= (1<<1);
183         if (atmel_pri.Features & 0x04) /* program suspend supported */
184                 features |= (1<<2);
185         if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
186                 features |= (1<<9);
187         if (atmel_pri.Features & 0x20) /* page mode read supported */
188                 features |= (1<<7);
189         if (atmel_pri.Features & 0x40) /* queued erase supported */
190                 features |= (1<<4);
191         if (atmel_pri.Features & 0x80) /* Protection bits supported */
192                 features |= (1<<6);
193
194         extp->FeatureSupport = features;
195
196         /* burst write mode not supported */
197         cfi->cfiq->BufWriteTimeoutTyp = 0;
198         cfi->cfiq->BufWriteTimeoutMax = 0;
199 }
200
201 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
202 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
203 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
204 {
205         struct map_info *map = mtd->priv;
206         struct cfi_private *cfi = map->fldrv_priv;
207         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
208
209         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
210                             "erase on write disabled.\n");
211         extp->SuspendCmdSupport &= ~1;
212 }
213 #endif
214
215 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
216 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
217 {
218         struct map_info *map = mtd->priv;
219         struct cfi_private *cfi = map->fldrv_priv;
220         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
221
222         if (cfip && (cfip->FeatureSupport&4)) {
223                 cfip->FeatureSupport &= ~4;
224                 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
225         }
226 }
227 #endif
228
229 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
230 {
231         struct map_info *map = mtd->priv;
232         struct cfi_private *cfi = map->fldrv_priv;
233
234         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
235         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
236 }
237
238 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
239 {
240         struct map_info *map = mtd->priv;
241         struct cfi_private *cfi = map->fldrv_priv;
242
243         /* Note this is done after the region info is endian swapped */
244         cfi->cfiq->EraseRegionInfo[1] =
245                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
246 };
247
248 static void fixup_use_point(struct mtd_info *mtd, void *param)
249 {
250         struct map_info *map = mtd->priv;
251         if (!mtd->point && map_is_linear(map)) {
252                 mtd->point   = cfi_intelext_point;
253                 mtd->unpoint = cfi_intelext_unpoint;
254         }
255 }
256
257 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
258 {
259         struct map_info *map = mtd->priv;
260         struct cfi_private *cfi = map->fldrv_priv;
261         if (cfi->cfiq->BufWriteTimeoutTyp) {
262                 printk(KERN_INFO "Using buffer write method\n" );
263                 mtd->write = cfi_intelext_write_buffers;
264                 mtd->writev = cfi_intelext_writev;
265         }
266 }
267
268 /*
269  * Some chips power-up with all sectors locked by default.
270  */
271 static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param)
272 {
273         struct map_info *map = mtd->priv;
274         struct cfi_private *cfi = map->fldrv_priv;
275         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
276
277         if (cfip->FeatureSupport&32) {
278                 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
279                 mtd->flags |= MTD_POWERUP_LOCK;
280         }
281 }
282
283 static struct cfi_fixup cfi_fixup_table[] = {
284         { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
285 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
286         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
287 #endif
288 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
289         { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
290 #endif
291 #if !FORCE_WORD_WRITE
292         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
293 #endif
294         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
295         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
296         { MANUFACTURER_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, },
297         { 0, 0, NULL, NULL }
298 };
299
300 static struct cfi_fixup jedec_fixup_table[] = {
301         { MANUFACTURER_INTEL, I82802AB,   fixup_use_fwh_lock, NULL, },
302         { MANUFACTURER_INTEL, I82802AC,   fixup_use_fwh_lock, NULL, },
303         { MANUFACTURER_ST,    M50LPW080,  fixup_use_fwh_lock, NULL, },
304         { MANUFACTURER_ST,    M50FLW080A, fixup_use_fwh_lock, NULL, },
305         { MANUFACTURER_ST,    M50FLW080B, fixup_use_fwh_lock, NULL, },
306         { 0, 0, NULL, NULL }
307 };
308 static struct cfi_fixup fixup_table[] = {
309         /* The CFI vendor ids and the JEDEC vendor IDs appear
310          * to be common.  It is like the devices id's are as
311          * well.  This table is to pick all cases where
312          * we know that is the case.
313          */
314         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
315         { 0, 0, NULL, NULL }
316 };
317
318 static inline struct cfi_pri_intelext *
319 read_pri_intelext(struct map_info *map, __u16 adr)
320 {
321         struct cfi_pri_intelext *extp;
322         unsigned int extp_size = sizeof(*extp);
323
324  again:
325         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
326         if (!extp)
327                 return NULL;
328
329         if (extp->MajorVersion != '1' ||
330             (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
331                 printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
332                        "version %c.%c.\n",  extp->MajorVersion,
333                        extp->MinorVersion);
334                 kfree(extp);
335                 return NULL;
336         }
337
338         /* Do some byteswapping if necessary */
339         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
340         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
341         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
342
343         if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
344                 unsigned int extra_size = 0;
345                 int nb_parts, i;
346
347                 /* Protection Register info */
348                 extra_size += (extp->NumProtectionFields - 1) *
349                               sizeof(struct cfi_intelext_otpinfo);
350
351                 /* Burst Read info */
352                 extra_size += 2;
353                 if (extp_size < sizeof(*extp) + extra_size)
354                         goto need_more;
355                 extra_size += extp->extra[extra_size-1];
356
357                 /* Number of hardware-partitions */
358                 extra_size += 1;
359                 if (extp_size < sizeof(*extp) + extra_size)
360                         goto need_more;
361                 nb_parts = extp->extra[extra_size - 1];
362
363                 /* skip the sizeof(partregion) field in CFI 1.4 */
364                 if (extp->MinorVersion >= '4')
365                         extra_size += 2;
366
367                 for (i = 0; i < nb_parts; i++) {
368                         struct cfi_intelext_regioninfo *rinfo;
369                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
370                         extra_size += sizeof(*rinfo);
371                         if (extp_size < sizeof(*extp) + extra_size)
372                                 goto need_more;
373                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
374                         extra_size += (rinfo->NumBlockTypes - 1)
375                                       * sizeof(struct cfi_intelext_blockinfo);
376                 }
377
378                 if (extp->MinorVersion >= '4')
379                         extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
380
381                 if (extp_size < sizeof(*extp) + extra_size) {
382                         need_more:
383                         extp_size = sizeof(*extp) + extra_size;
384                         kfree(extp);
385                         if (extp_size > 4096) {
386                                 printk(KERN_ERR
387                                         "%s: cfi_pri_intelext is too fat\n",
388                                         __func__);
389                                 return NULL;
390                         }
391                         goto again;
392                 }
393         }
394
395         return extp;
396 }
397
398 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
399 {
400         struct cfi_private *cfi = map->fldrv_priv;
401         struct mtd_info *mtd;
402         int i;
403
404         mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
405         if (!mtd) {
406                 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
407                 return NULL;
408         }
409         mtd->priv = map;
410         mtd->type = MTD_NORFLASH;
411
412         /* Fill in the default mtd operations */
413         mtd->erase   = cfi_intelext_erase_varsize;
414         mtd->read    = cfi_intelext_read;
415         mtd->write   = cfi_intelext_write_words;
416         mtd->sync    = cfi_intelext_sync;
417         mtd->lock    = cfi_intelext_lock;
418         mtd->unlock  = cfi_intelext_unlock;
419         mtd->suspend = cfi_intelext_suspend;
420         mtd->resume  = cfi_intelext_resume;
421         mtd->flags   = MTD_CAP_NORFLASH;
422         mtd->name    = map->name;
423         mtd->writesize = 1;
424
425         mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
426
427         if (cfi->cfi_mode == CFI_MODE_CFI) {
428                 /*
429                  * It's a real CFI chip, not one for which the probe
430                  * routine faked a CFI structure. So we read the feature
431                  * table from it.
432                  */
433                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
434                 struct cfi_pri_intelext *extp;
435
436                 extp = read_pri_intelext(map, adr);
437                 if (!extp) {
438                         kfree(mtd);
439                         return NULL;
440                 }
441
442                 /* Install our own private info structure */
443                 cfi->cmdset_priv = extp;
444
445                 cfi_fixup(mtd, cfi_fixup_table);
446
447 #ifdef DEBUG_CFI_FEATURES
448                 /* Tell the user about it in lots of lovely detail */
449                 cfi_tell_features(extp);
450 #endif
451
452                 if(extp->SuspendCmdSupport & 1) {
453                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
454                 }
455         }
456         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
457                 /* Apply jedec specific fixups */
458                 cfi_fixup(mtd, jedec_fixup_table);
459         }
460         /* Apply generic fixups */
461         cfi_fixup(mtd, fixup_table);
462
463         for (i=0; i< cfi->numchips; i++) {
464                 if (cfi->cfiq->WordWriteTimeoutTyp)
465                         cfi->chips[i].word_write_time =
466                                 1<<cfi->cfiq->WordWriteTimeoutTyp;
467                 else
468                         cfi->chips[i].word_write_time = 50000;
469
470                 if (cfi->cfiq->BufWriteTimeoutTyp)
471                         cfi->chips[i].buffer_write_time =
472                                 1<<cfi->cfiq->BufWriteTimeoutTyp;
473                 /* No default; if it isn't specified, we won't use it */
474
475                 if (cfi->cfiq->BlockEraseTimeoutTyp)
476                         cfi->chips[i].erase_time =
477                                 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
478                 else
479                         cfi->chips[i].erase_time = 2000000;
480
481                 cfi->chips[i].ref_point_counter = 0;
482                 init_waitqueue_head(&(cfi->chips[i].wq));
483         }
484
485         map->fldrv = &cfi_intelext_chipdrv;
486
487         return cfi_intelext_setup(mtd);
488 }
489 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
490 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
491 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
492 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
493 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
494
495 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
496 {
497         struct map_info *map = mtd->priv;
498         struct cfi_private *cfi = map->fldrv_priv;
499         unsigned long offset = 0;
500         int i,j;
501         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
502
503         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
504
505         mtd->size = devsize * cfi->numchips;
506
507         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
508         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
509                         * mtd->numeraseregions, GFP_KERNEL);
510         if (!mtd->eraseregions) {
511                 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
512                 goto setup_err;
513         }
514
515         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
516                 unsigned long ernum, ersize;
517                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
518                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
519
520                 if (mtd->erasesize < ersize) {
521                         mtd->erasesize = ersize;
522                 }
523                 for (j=0; j<cfi->numchips; j++) {
524                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
525                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
526                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
527                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
528                 }
529                 offset += (ersize * ernum);
530         }
531
532         if (offset != devsize) {
533                 /* Argh */
534                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
535                 goto setup_err;
536         }
537
538         for (i=0; i<mtd->numeraseregions;i++){
539                 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
540                        i,mtd->eraseregions[i].offset,
541                        mtd->eraseregions[i].erasesize,
542                        mtd->eraseregions[i].numblocks);
543         }
544
545 #ifdef CONFIG_MTD_OTP
546         mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
547         mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
548         mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
549         mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
550         mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
551         mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
552 #endif
553
554         /* This function has the potential to distort the reality
555            a bit and therefore should be called last. */
556         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
557                 goto setup_err;
558
559         __module_get(THIS_MODULE);
560         register_reboot_notifier(&mtd->reboot_notifier);
561         return mtd;
562
563  setup_err:
564         if(mtd) {
565                 kfree(mtd->eraseregions);
566                 kfree(mtd);
567         }
568         kfree(cfi->cmdset_priv);
569         return NULL;
570 }
571
572 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
573                                         struct cfi_private **pcfi)
574 {
575         struct map_info *map = mtd->priv;
576         struct cfi_private *cfi = *pcfi;
577         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
578
579         /*
580          * Probing of multi-partition flash chips.
581          *
582          * To support multiple partitions when available, we simply arrange
583          * for each of them to have their own flchip structure even if they
584          * are on the same physical chip.  This means completely recreating
585          * a new cfi_private structure right here which is a blatent code
586          * layering violation, but this is still the least intrusive
587          * arrangement at this point. This can be rearranged in the future
588          * if someone feels motivated enough.  --nico
589          */
590         if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
591             && extp->FeatureSupport & (1 << 9)) {
592                 struct cfi_private *newcfi;
593                 struct flchip *chip;
594                 struct flchip_shared *shared;
595                 int offs, numregions, numparts, partshift, numvirtchips, i, j;
596
597                 /* Protection Register info */
598                 offs = (extp->NumProtectionFields - 1) *
599                        sizeof(struct cfi_intelext_otpinfo);
600
601                 /* Burst Read info */
602                 offs += extp->extra[offs+1]+2;
603
604                 /* Number of partition regions */
605                 numregions = extp->extra[offs];
606                 offs += 1;
607
608                 /* skip the sizeof(partregion) field in CFI 1.4 */
609                 if (extp->MinorVersion >= '4')
610                         offs += 2;
611
612                 /* Number of hardware partitions */
613                 numparts = 0;
614                 for (i = 0; i < numregions; i++) {
615                         struct cfi_intelext_regioninfo *rinfo;
616                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
617                         numparts += rinfo->NumIdentPartitions;
618                         offs += sizeof(*rinfo)
619                                 + (rinfo->NumBlockTypes - 1) *
620                                   sizeof(struct cfi_intelext_blockinfo);
621                 }
622
623                 if (!numparts)
624                         numparts = 1;
625
626                 /* Programming Region info */
627                 if (extp->MinorVersion >= '4') {
628                         struct cfi_intelext_programming_regioninfo *prinfo;
629                         prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
630                         mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
631                         mtd->flags &= ~MTD_BIT_WRITEABLE;
632                         printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
633                                map->name, mtd->writesize,
634                                cfi->interleave * prinfo->ControlValid,
635                                cfi->interleave * prinfo->ControlInvalid);
636                 }
637
638                 /*
639                  * All functions below currently rely on all chips having
640                  * the same geometry so we'll just assume that all hardware
641                  * partitions are of the same size too.
642                  */
643                 partshift = cfi->chipshift - __ffs(numparts);
644
645                 if ((1 << partshift) < mtd->erasesize) {
646                         printk( KERN_ERR
647                                 "%s: bad number of hw partitions (%d)\n",
648                                 __func__, numparts);
649                         return -EINVAL;
650                 }
651
652                 numvirtchips = cfi->numchips * numparts;
653                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
654                 if (!newcfi)
655                         return -ENOMEM;
656                 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
657                 if (!shared) {
658                         kfree(newcfi);
659                         return -ENOMEM;
660                 }
661                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
662                 newcfi->numchips = numvirtchips;
663                 newcfi->chipshift = partshift;
664
665                 chip = &newcfi->chips[0];
666                 for (i = 0; i < cfi->numchips; i++) {
667                         shared[i].writing = shared[i].erasing = NULL;
668                         spin_lock_init(&shared[i].lock);
669                         for (j = 0; j < numparts; j++) {
670                                 *chip = cfi->chips[i];
671                                 chip->start += j << partshift;
672                                 chip->priv = &shared[i];
673                                 /* those should be reset too since
674                                    they create memory references. */
675                                 init_waitqueue_head(&chip->wq);
676                                 spin_lock_init(&chip->_spinlock);
677                                 chip->mutex = &chip->_spinlock;
678                                 chip++;
679                         }
680                 }
681
682                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
683                                   "--> %d partitions of %d KiB\n",
684                                   map->name, cfi->numchips, cfi->interleave,
685                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
686
687                 map->fldrv_priv = newcfi;
688                 *pcfi = newcfi;
689                 kfree(cfi);
690         }
691
692         return 0;
693 }
694
695 /*
696  *  *********** CHIP ACCESS FUNCTIONS ***********
697  */
698 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
699 {
700         DECLARE_WAITQUEUE(wait, current);
701         struct cfi_private *cfi = map->fldrv_priv;
702         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
703         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
704         unsigned long timeo = jiffies + HZ;
705
706         switch (chip->state) {
707
708         case FL_STATUS:
709                 for (;;) {
710                         status = map_read(map, adr);
711                         if (map_word_andequal(map, status, status_OK, status_OK))
712                                 break;
713
714                         /* At this point we're fine with write operations
715                            in other partitions as they don't conflict. */
716                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
717                                 break;
718
719                         spin_unlock(chip->mutex);
720                         cfi_udelay(1);
721                         spin_lock(chip->mutex);
722                         /* Someone else might have been playing with it. */
723                         return -EAGAIN;
724                 }
725                 /* Fall through */
726         case FL_READY:
727         case FL_CFI_QUERY:
728         case FL_JEDEC_QUERY:
729                 return 0;
730
731         case FL_ERASING:
732                 if (!cfip ||
733                     !(cfip->FeatureSupport & 2) ||
734                     !(mode == FL_READY || mode == FL_POINT ||
735                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
736                         goto sleep;
737
738
739                 /* Erase suspend */
740                 map_write(map, CMD(0xB0), adr);
741
742                 /* If the flash has finished erasing, then 'erase suspend'
743                  * appears to make some (28F320) flash devices switch to
744                  * 'read' mode.  Make sure that we switch to 'read status'
745                  * mode so we get the right data. --rmk
746                  */
747                 map_write(map, CMD(0x70), adr);
748                 chip->oldstate = FL_ERASING;
749                 chip->state = FL_ERASE_SUSPENDING;
750                 chip->erase_suspended = 1;
751                 for (;;) {
752                         status = map_read(map, adr);
753                         if (map_word_andequal(map, status, status_OK, status_OK))
754                                 break;
755
756                         if (time_after(jiffies, timeo)) {
757                                 /* Urgh. Resume and pretend we weren't here.  */
758                                 map_write(map, CMD(0xd0), adr);
759                                 /* Make sure we're in 'read status' mode if it had finished */
760                                 map_write(map, CMD(0x70), adr);
761                                 chip->state = FL_ERASING;
762                                 chip->oldstate = FL_READY;
763                                 printk(KERN_ERR "%s: Chip not ready after erase "
764                                        "suspended: status = 0x%lx\n", map->name, status.x[0]);
765                                 return -EIO;
766                         }
767
768                         spin_unlock(chip->mutex);
769                         cfi_udelay(1);
770                         spin_lock(chip->mutex);
771                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
772                            So we can just loop here. */
773                 }
774                 chip->state = FL_STATUS;
775                 return 0;
776
777         case FL_XIP_WHILE_ERASING:
778                 if (mode != FL_READY && mode != FL_POINT &&
779                     (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
780                         goto sleep;
781                 chip->oldstate = chip->state;
782                 chip->state = FL_READY;
783                 return 0;
784
785         case FL_SHUTDOWN:
786                 /* The machine is rebooting now,so no one can get chip anymore */
787                 return -EIO;
788         case FL_POINT:
789                 /* Only if there's no operation suspended... */
790                 if (mode == FL_READY && chip->oldstate == FL_READY)
791                         return 0;
792                 /* Fall through */
793         default:
794         sleep:
795                 set_current_state(TASK_UNINTERRUPTIBLE);
796                 add_wait_queue(&chip->wq, &wait);
797                 spin_unlock(chip->mutex);
798                 schedule();
799                 remove_wait_queue(&chip->wq, &wait);
800                 spin_lock(chip->mutex);
801                 return -EAGAIN;
802         }
803 }
804
805 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
806 {
807         int ret;
808         DECLARE_WAITQUEUE(wait, current);
809
810  retry:
811         if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING
812                            || mode == FL_OTP_WRITE || mode == FL_SHUTDOWN)) {
813                 /*
814                  * OK. We have possibility for contention on the write/erase
815                  * operations which are global to the real chip and not per
816                  * partition.  So let's fight it over in the partition which
817                  * currently has authority on the operation.
818                  *
819                  * The rules are as follows:
820                  *
821                  * - any write operation must own shared->writing.
822                  *
823                  * - any erase operation must own _both_ shared->writing and
824                  *   shared->erasing.
825                  *
826                  * - contention arbitration is handled in the owner's context.
827                  *
828                  * The 'shared' struct can be read and/or written only when
829                  * its lock is taken.
830                  */
831                 struct flchip_shared *shared = chip->priv;
832                 struct flchip *contender;
833                 spin_lock(&shared->lock);
834                 contender = shared->writing;
835                 if (contender && contender != chip) {
836                         /*
837                          * The engine to perform desired operation on this
838                          * partition is already in use by someone else.
839                          * Let's fight over it in the context of the chip
840                          * currently using it.  If it is possible to suspend,
841                          * that other partition will do just that, otherwise
842                          * it'll happily send us to sleep.  In any case, when
843                          * get_chip returns success we're clear to go ahead.
844                          */
845                         ret = spin_trylock(contender->mutex);
846                         spin_unlock(&shared->lock);
847                         if (!ret)
848                                 goto retry;
849                         spin_unlock(chip->mutex);
850                         ret = chip_ready(map, contender, contender->start, mode);
851                         spin_lock(chip->mutex);
852
853                         if (ret == -EAGAIN) {
854                                 spin_unlock(contender->mutex);
855                                 goto retry;
856                         }
857                         if (ret) {
858                                 spin_unlock(contender->mutex);
859                                 return ret;
860                         }
861                         spin_lock(&shared->lock);
862                         spin_unlock(contender->mutex);
863                 }
864
865                 /* Check if we already have suspended erase
866                  * on this chip. Sleep. */
867                 if (mode == FL_ERASING && shared->erasing
868                     && shared->erasing->oldstate == FL_ERASING) {
869                         spin_unlock(&shared->lock);
870                         set_current_state(TASK_UNINTERRUPTIBLE);
871                         add_wait_queue(&chip->wq, &wait);
872                         spin_unlock(chip->mutex);
873                         schedule();
874                         remove_wait_queue(&chip->wq, &wait);
875                         spin_lock(chip->mutex);
876                         goto retry;
877                 }
878
879                 /* We now own it */
880                 shared->writing = chip;
881                 if (mode == FL_ERASING)
882                         shared->erasing = chip;
883                 spin_unlock(&shared->lock);
884         }
885         ret = chip_ready(map, chip, adr, mode);
886         if (ret == -EAGAIN)
887                 goto retry;
888
889         return ret;
890 }
891
892 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
893 {
894         struct cfi_private *cfi = map->fldrv_priv;
895
896         if (chip->priv) {
897                 struct flchip_shared *shared = chip->priv;
898                 spin_lock(&shared->lock);
899                 if (shared->writing == chip && chip->oldstate == FL_READY) {
900                         /* We own the ability to write, but we're done */
901                         shared->writing = shared->erasing;
902                         if (shared->writing && shared->writing != chip) {
903                                 /* give back ownership to who we loaned it from */
904                                 struct flchip *loaner = shared->writing;
905                                 spin_lock(loaner->mutex);
906                                 spin_unlock(&shared->lock);
907                                 spin_unlock(chip->mutex);
908                                 put_chip(map, loaner, loaner->start);
909                                 spin_lock(chip->mutex);
910                                 spin_unlock(loaner->mutex);
911                                 wake_up(&chip->wq);
912                                 return;
913                         }
914                         shared->erasing = NULL;
915                         shared->writing = NULL;
916                 } else if (shared->erasing == chip && shared->writing != chip) {
917                         /*
918                          * We own the ability to erase without the ability
919                          * to write, which means the erase was suspended
920                          * and some other partition is currently writing.
921                          * Don't let the switch below mess things up since
922                          * we don't have ownership to resume anything.
923                          */
924                         spin_unlock(&shared->lock);
925                         wake_up(&chip->wq);
926                         return;
927                 }
928                 spin_unlock(&shared->lock);
929         }
930
931         switch(chip->oldstate) {
932         case FL_ERASING:
933                 chip->state = chip->oldstate;
934                 /* What if one interleaved chip has finished and the
935                    other hasn't? The old code would leave the finished
936                    one in READY mode. That's bad, and caused -EROFS
937                    errors to be returned from do_erase_oneblock because
938                    that's the only bit it checked for at the time.
939                    As the state machine appears to explicitly allow
940                    sending the 0x70 (Read Status) command to an erasing
941                    chip and expecting it to be ignored, that's what we
942                    do. */
943                 map_write(map, CMD(0xd0), adr);
944                 map_write(map, CMD(0x70), adr);
945                 chip->oldstate = FL_READY;
946                 chip->state = FL_ERASING;
947                 break;
948
949         case FL_XIP_WHILE_ERASING:
950                 chip->state = chip->oldstate;
951                 chip->oldstate = FL_READY;
952                 break;
953
954         case FL_READY:
955         case FL_STATUS:
956         case FL_JEDEC_QUERY:
957                 /* We should really make set_vpp() count, rather than doing this */
958                 DISABLE_VPP(map);
959                 break;
960         default:
961                 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
962         }
963         wake_up(&chip->wq);
964 }
965
966 #ifdef CONFIG_MTD_XIP
967
968 /*
969  * No interrupt what so ever can be serviced while the flash isn't in array
970  * mode.  This is ensured by the xip_disable() and xip_enable() functions
971  * enclosing any code path where the flash is known not to be in array mode.
972  * And within a XIP disabled code path, only functions marked with __xipram
973  * may be called and nothing else (it's a good thing to inspect generated
974  * assembly to make sure inline functions were actually inlined and that gcc
975  * didn't emit calls to its own support functions). Also configuring MTD CFI
976  * support to a single buswidth and a single interleave is also recommended.
977  */
978
979 static void xip_disable(struct map_info *map, struct flchip *chip,
980                         unsigned long adr)
981 {
982         /* TODO: chips with no XIP use should ignore and return */
983         (void) map_read(map, adr); /* ensure mmu mapping is up to date */
984         local_irq_disable();
985 }
986
987 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
988                                 unsigned long adr)
989 {
990         struct cfi_private *cfi = map->fldrv_priv;
991         if (chip->state != FL_POINT && chip->state != FL_READY) {
992                 map_write(map, CMD(0xff), adr);
993                 chip->state = FL_READY;
994         }
995         (void) map_read(map, adr);
996         xip_iprefetch();
997         local_irq_enable();
998 }
999
1000 /*
1001  * When a delay is required for the flash operation to complete, the
1002  * xip_wait_for_operation() function is polling for both the given timeout
1003  * and pending (but still masked) hardware interrupts.  Whenever there is an
1004  * interrupt pending then the flash erase or write operation is suspended,
1005  * array mode restored and interrupts unmasked.  Task scheduling might also
1006  * happen at that point.  The CPU eventually returns from the interrupt or
1007  * the call to schedule() and the suspended flash operation is resumed for
1008  * the remaining of the delay period.
1009  *
1010  * Warning: this function _will_ fool interrupt latency tracing tools.
1011  */
1012
1013 static int __xipram xip_wait_for_operation(
1014                 struct map_info *map, struct flchip *chip,
1015                 unsigned long adr, unsigned int chip_op_time )
1016 {
1017         struct cfi_private *cfi = map->fldrv_priv;
1018         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1019         map_word status, OK = CMD(0x80);
1020         unsigned long usec, suspended, start, done;
1021         flstate_t oldstate, newstate;
1022
1023         start = xip_currtime();
1024         usec = chip_op_time * 8;
1025         if (usec == 0)
1026                 usec = 500000;
1027         done = 0;
1028
1029         do {
1030                 cpu_relax();
1031                 if (xip_irqpending() && cfip &&
1032                     ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1033                      (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1034                     (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1035                         /*
1036                          * Let's suspend the erase or write operation when
1037                          * supported.  Note that we currently don't try to
1038                          * suspend interleaved chips if there is already
1039                          * another operation suspended (imagine what happens
1040                          * when one chip was already done with the current
1041                          * operation while another chip suspended it, then
1042                          * we resume the whole thing at once).  Yes, it
1043                          * can happen!
1044                          */
1045                         usec -= done;
1046                         map_write(map, CMD(0xb0), adr);
1047                         map_write(map, CMD(0x70), adr);
1048                         suspended = xip_currtime();
1049                         do {
1050                                 if (xip_elapsed_since(suspended) > 100000) {
1051                                         /*
1052                                          * The chip doesn't want to suspend
1053                                          * after waiting for 100 msecs.
1054                                          * This is a critical error but there
1055                                          * is not much we can do here.
1056                                          */
1057                                         return -EIO;
1058                                 }
1059                                 status = map_read(map, adr);
1060                         } while (!map_word_andequal(map, status, OK, OK));
1061
1062                         /* Suspend succeeded */
1063                         oldstate = chip->state;
1064                         if (oldstate == FL_ERASING) {
1065                                 if (!map_word_bitsset(map, status, CMD(0x40)))
1066                                         break;
1067                                 newstate = FL_XIP_WHILE_ERASING;
1068                                 chip->erase_suspended = 1;
1069                         } else {
1070                                 if (!map_word_bitsset(map, status, CMD(0x04)))
1071                                         break;
1072                                 newstate = FL_XIP_WHILE_WRITING;
1073                                 chip->write_suspended = 1;
1074                         }
1075                         chip->state = newstate;
1076                         map_write(map, CMD(0xff), adr);
1077                         (void) map_read(map, adr);
1078                         xip_iprefetch();
1079                         local_irq_enable();
1080                         spin_unlock(chip->mutex);
1081                         xip_iprefetch();
1082                         cond_resched();
1083
1084                         /*
1085                          * We're back.  However someone else might have
1086                          * decided to go write to the chip if we are in
1087                          * a suspended erase state.  If so let's wait
1088                          * until it's done.
1089                          */
1090                         spin_lock(chip->mutex);
1091                         while (chip->state != newstate) {
1092                                 DECLARE_WAITQUEUE(wait, current);
1093                                 set_current_state(TASK_UNINTERRUPTIBLE);
1094                                 add_wait_queue(&chip->wq, &wait);
1095                                 spin_unlock(chip->mutex);
1096                                 schedule();
1097                                 remove_wait_queue(&chip->wq, &wait);
1098                                 spin_lock(chip->mutex);
1099                         }
1100                         /* Disallow XIP again */
1101                         local_irq_disable();
1102
1103                         /* Resume the write or erase operation */
1104                         map_write(map, CMD(0xd0), adr);
1105                         map_write(map, CMD(0x70), adr);
1106                         chip->state = oldstate;
1107                         start = xip_currtime();
1108                 } else if (usec >= 1000000/HZ) {
1109                         /*
1110                          * Try to save on CPU power when waiting delay
1111                          * is at least a system timer tick period.
1112                          * No need to be extremely accurate here.
1113                          */
1114                         xip_cpu_idle();
1115                 }
1116                 status = map_read(map, adr);
1117                 done = xip_elapsed_since(start);
1118         } while (!map_word_andequal(map, status, OK, OK)
1119                  && done < usec);
1120
1121         return (done >= usec) ? -ETIME : 0;
1122 }
1123
1124 /*
1125  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1126  * the flash is actively programming or erasing since we have to poll for
1127  * the operation to complete anyway.  We can't do that in a generic way with
1128  * a XIP setup so do it before the actual flash operation in this case
1129  * and stub it out from INVAL_CACHE_AND_WAIT.
1130  */
1131 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1132         INVALIDATE_CACHED_RANGE(map, from, size)
1133
1134 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec) \
1135         xip_wait_for_operation(map, chip, cmd_adr, usec)
1136
1137 #else
1138
1139 #define xip_disable(map, chip, adr)
1140 #define xip_enable(map, chip, adr)
1141 #define XIP_INVAL_CACHED_RANGE(x...)
1142 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1143
1144 static int inval_cache_and_wait_for_operation(
1145                 struct map_info *map, struct flchip *chip,
1146                 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1147                 unsigned int chip_op_time)
1148 {
1149         struct cfi_private *cfi = map->fldrv_priv;
1150         map_word status, status_OK = CMD(0x80);
1151         int chip_state = chip->state;
1152         unsigned int timeo, sleep_time;
1153
1154         spin_unlock(chip->mutex);
1155         if (inval_len)
1156                 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1157         spin_lock(chip->mutex);
1158
1159         /* set our timeout to 8 times the expected delay */
1160         timeo = chip_op_time * 8;
1161         if (!timeo)
1162                 timeo = 500000;
1163         sleep_time = chip_op_time / 2;
1164
1165         for (;;) {
1166                 status = map_read(map, cmd_adr);
1167                 if (map_word_andequal(map, status, status_OK, status_OK))
1168                         break;
1169
1170                 if (!timeo) {
1171                         map_write(map, CMD(0x70), cmd_adr);
1172                         chip->state = FL_STATUS;
1173                         return -ETIME;
1174                 }
1175
1176                 /* OK Still waiting. Drop the lock, wait a while and retry. */
1177                 spin_unlock(chip->mutex);
1178                 if (sleep_time >= 1000000/HZ) {
1179                         /*
1180                          * Half of the normal delay still remaining
1181                          * can be performed with a sleeping delay instead
1182                          * of busy waiting.
1183                          */
1184                         msleep(sleep_time/1000);
1185                         timeo -= sleep_time;
1186                         sleep_time = 1000000/HZ;
1187                 } else {
1188                         udelay(1);
1189                         cond_resched();
1190                         timeo--;
1191                 }
1192                 spin_lock(chip->mutex);
1193
1194                 while (chip->state != chip_state) {
1195                         /* Someone's suspended the operation: sleep */
1196                         DECLARE_WAITQUEUE(wait, current);
1197                         set_current_state(TASK_UNINTERRUPTIBLE);
1198                         add_wait_queue(&chip->wq, &wait);
1199                         spin_unlock(chip->mutex);
1200                         schedule();
1201                         remove_wait_queue(&chip->wq, &wait);
1202                         spin_lock(chip->mutex);
1203                 }
1204         }
1205
1206         /* Done and happy. */
1207         chip->state = FL_STATUS;
1208         return 0;
1209 }
1210
1211 #endif
1212
1213 #define WAIT_TIMEOUT(map, chip, adr, udelay) \
1214         INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay);
1215
1216
1217 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1218 {
1219         unsigned long cmd_addr;
1220         struct cfi_private *cfi = map->fldrv_priv;
1221         int ret = 0;
1222
1223         adr += chip->start;
1224
1225         /* Ensure cmd read/writes are aligned. */
1226         cmd_addr = adr & ~(map_bankwidth(map)-1);
1227
1228         spin_lock(chip->mutex);
1229
1230         ret = get_chip(map, chip, cmd_addr, FL_POINT);
1231
1232         if (!ret) {
1233                 if (chip->state != FL_POINT && chip->state != FL_READY)
1234                         map_write(map, CMD(0xff), cmd_addr);
1235
1236                 chip->state = FL_POINT;
1237                 chip->ref_point_counter++;
1238         }
1239         spin_unlock(chip->mutex);
1240
1241         return ret;
1242 }
1243
1244 static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1245                 size_t *retlen, void **virt, resource_size_t *phys)
1246 {
1247         struct map_info *map = mtd->priv;
1248         struct cfi_private *cfi = map->fldrv_priv;
1249         unsigned long ofs, last_end = 0;
1250         int chipnum;
1251         int ret = 0;
1252
1253         if (!map->virt || (from + len > mtd->size))
1254                 return -EINVAL;
1255
1256         /* Now lock the chip(s) to POINT state */
1257
1258         /* ofs: offset within the first chip that the first read should start */
1259         chipnum = (from >> cfi->chipshift);
1260         ofs = from - (chipnum << cfi->chipshift);
1261
1262         *virt = map->virt + cfi->chips[chipnum].start + ofs;
1263         *retlen = 0;
1264         if (phys)
1265                 *phys = map->phys + cfi->chips[chipnum].start + ofs;
1266
1267         while (len) {
1268                 unsigned long thislen;
1269
1270                 if (chipnum >= cfi->numchips)
1271                         break;
1272
1273                 /* We cannot point across chips that are virtually disjoint */
1274                 if (!last_end)
1275                         last_end = cfi->chips[chipnum].start;
1276                 else if (cfi->chips[chipnum].start != last_end)
1277                         break;
1278
1279                 if ((len + ofs -1) >> cfi->chipshift)
1280                         thislen = (1<<cfi->chipshift) - ofs;
1281                 else
1282                         thislen = len;
1283
1284                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1285                 if (ret)
1286                         break;
1287
1288                 *retlen += thislen;
1289                 len -= thislen;
1290
1291                 ofs = 0;
1292                 last_end += 1 << cfi->chipshift;
1293                 chipnum++;
1294         }
1295         return 0;
1296 }
1297
1298 static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1299 {
1300         struct map_info *map = mtd->priv;
1301         struct cfi_private *cfi = map->fldrv_priv;
1302         unsigned long ofs;
1303         int chipnum;
1304
1305         /* Now unlock the chip(s) POINT state */
1306
1307         /* ofs: offset within the first chip that the first read should start */
1308         chipnum = (from >> cfi->chipshift);
1309         ofs = from - (chipnum <<  cfi->chipshift);
1310
1311         while (len) {
1312                 unsigned long thislen;
1313                 struct flchip *chip;
1314
1315                 chip = &cfi->chips[chipnum];
1316                 if (chipnum >= cfi->numchips)
1317                         break;
1318
1319                 if ((len + ofs -1) >> cfi->chipshift)
1320                         thislen = (1<<cfi->chipshift) - ofs;
1321                 else
1322                         thislen = len;
1323
1324                 spin_lock(chip->mutex);
1325                 if (chip->state == FL_POINT) {
1326                         chip->ref_point_counter--;
1327                         if(chip->ref_point_counter == 0)
1328                                 chip->state = FL_READY;
1329                 } else
1330                         printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1331
1332                 put_chip(map, chip, chip->start);
1333                 spin_unlock(chip->mutex);
1334
1335                 len -= thislen;
1336                 ofs = 0;
1337                 chipnum++;
1338         }
1339 }
1340
1341 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1342 {
1343         unsigned long cmd_addr;
1344         struct cfi_private *cfi = map->fldrv_priv;
1345         int ret;
1346
1347         adr += chip->start;
1348
1349         /* Ensure cmd read/writes are aligned. */
1350         cmd_addr = adr & ~(map_bankwidth(map)-1);
1351
1352         spin_lock(chip->mutex);
1353         ret = get_chip(map, chip, cmd_addr, FL_READY);
1354         if (ret) {
1355                 spin_unlock(chip->mutex);
1356                 return ret;
1357         }
1358
1359         if (chip->state != FL_POINT && chip->state != FL_READY) {
1360                 map_write(map, CMD(0xff), cmd_addr);
1361
1362                 chip->state = FL_READY;
1363         }
1364
1365         map_copy_from(map, buf, adr, len);
1366
1367         put_chip(map, chip, cmd_addr);
1368
1369         spin_unlock(chip->mutex);
1370         return 0;
1371 }
1372
1373 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1374 {
1375         struct map_info *map = mtd->priv;
1376         struct cfi_private *cfi = map->fldrv_priv;
1377         unsigned long ofs;
1378         int chipnum;
1379         int ret = 0;
1380
1381         /* ofs: offset within the first chip that the first read should start */
1382         chipnum = (from >> cfi->chipshift);
1383         ofs = from - (chipnum <<  cfi->chipshift);
1384
1385         *retlen = 0;
1386
1387         while (len) {
1388                 unsigned long thislen;
1389
1390                 if (chipnum >= cfi->numchips)
1391                         break;
1392
1393                 if ((len + ofs -1) >> cfi->chipshift)
1394                         thislen = (1<<cfi->chipshift) - ofs;
1395                 else
1396                         thislen = len;
1397
1398                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1399                 if (ret)
1400                         break;
1401
1402                 *retlen += thislen;
1403                 len -= thislen;
1404                 buf += thislen;
1405
1406                 ofs = 0;
1407                 chipnum++;
1408         }
1409         return ret;
1410 }
1411
1412 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1413                                      unsigned long adr, map_word datum, int mode)
1414 {
1415         struct cfi_private *cfi = map->fldrv_priv;
1416         map_word status, write_cmd;
1417         int ret=0;
1418
1419         adr += chip->start;
1420
1421         switch (mode) {
1422         case FL_WRITING:
1423                 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1424                 break;
1425         case FL_OTP_WRITE:
1426                 write_cmd = CMD(0xc0);
1427                 break;
1428         default:
1429                 return -EINVAL;
1430         }
1431
1432         spin_lock(chip->mutex);
1433         ret = get_chip(map, chip, adr, mode);
1434         if (ret) {
1435                 spin_unlock(chip->mutex);
1436                 return ret;
1437         }
1438
1439         XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1440         ENABLE_VPP(map);
1441         xip_disable(map, chip, adr);
1442         map_write(map, write_cmd, adr);
1443         map_write(map, datum, adr);
1444         chip->state = mode;
1445
1446         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1447                                    adr, map_bankwidth(map),
1448                                    chip->word_write_time);
1449         if (ret) {
1450                 xip_enable(map, chip, adr);
1451                 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1452                 goto out;
1453         }
1454
1455         /* check for errors */
1456         status = map_read(map, adr);
1457         if (map_word_bitsset(map, status, CMD(0x1a))) {
1458                 unsigned long chipstatus = MERGESTATUS(status);
1459
1460                 /* reset status */
1461                 map_write(map, CMD(0x50), adr);
1462                 map_write(map, CMD(0x70), adr);
1463                 xip_enable(map, chip, adr);
1464
1465                 if (chipstatus & 0x02) {
1466                         ret = -EROFS;
1467                 } else if (chipstatus & 0x08) {
1468                         printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1469                         ret = -EIO;
1470                 } else {
1471                         printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1472                         ret = -EINVAL;
1473                 }
1474
1475                 goto out;
1476         }
1477
1478         xip_enable(map, chip, adr);
1479  out:   put_chip(map, chip, adr);
1480         spin_unlock(chip->mutex);
1481         return ret;
1482 }
1483
1484
1485 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1486 {
1487         struct map_info *map = mtd->priv;
1488         struct cfi_private *cfi = map->fldrv_priv;
1489         int ret = 0;
1490         int chipnum;
1491         unsigned long ofs;
1492
1493         *retlen = 0;
1494         if (!len)
1495                 return 0;
1496
1497         chipnum = to >> cfi->chipshift;
1498         ofs = to  - (chipnum << cfi->chipshift);
1499
1500         /* If it's not bus-aligned, do the first byte write */
1501         if (ofs & (map_bankwidth(map)-1)) {
1502                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1503                 int gap = ofs - bus_ofs;
1504                 int n;
1505                 map_word datum;
1506
1507                 n = min_t(int, len, map_bankwidth(map)-gap);
1508                 datum = map_word_ff(map);
1509                 datum = map_word_load_partial(map, datum, buf, gap, n);
1510
1511                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1512                                                bus_ofs, datum, FL_WRITING);
1513                 if (ret)
1514                         return ret;
1515
1516                 len -= n;
1517                 ofs += n;
1518                 buf += n;
1519                 (*retlen) += n;
1520
1521                 if (ofs >> cfi->chipshift) {
1522                         chipnum ++;
1523                         ofs = 0;
1524                         if (chipnum == cfi->numchips)
1525                                 return 0;
1526                 }
1527         }
1528
1529         while(len >= map_bankwidth(map)) {
1530                 map_word datum = map_word_load(map, buf);
1531
1532                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1533                                        ofs, datum, FL_WRITING);
1534                 if (ret)
1535                         return ret;
1536
1537                 ofs += map_bankwidth(map);
1538                 buf += map_bankwidth(map);
1539                 (*retlen) += map_bankwidth(map);
1540                 len -= map_bankwidth(map);
1541
1542                 if (ofs >> cfi->chipshift) {
1543                         chipnum ++;
1544                         ofs = 0;
1545                         if (chipnum == cfi->numchips)
1546                                 return 0;
1547                 }
1548         }
1549
1550         if (len & (map_bankwidth(map)-1)) {
1551                 map_word datum;
1552
1553                 datum = map_word_ff(map);
1554                 datum = map_word_load_partial(map, datum, buf, 0, len);
1555
1556                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1557                                        ofs, datum, FL_WRITING);
1558                 if (ret)
1559                         return ret;
1560
1561                 (*retlen) += len;
1562         }
1563
1564         return 0;
1565 }
1566
1567
1568 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1569                                     unsigned long adr, const struct kvec **pvec,
1570                                     unsigned long *pvec_seek, int len)
1571 {
1572         struct cfi_private *cfi = map->fldrv_priv;
1573         map_word status, write_cmd, datum;
1574         unsigned long cmd_adr;
1575         int ret, wbufsize, word_gap, words;
1576         const struct kvec *vec;
1577         unsigned long vec_seek;
1578         unsigned long initial_adr;
1579         int initial_len = len;
1580
1581         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1582         adr += chip->start;
1583         initial_adr = adr;
1584         cmd_adr = adr & ~(wbufsize-1);
1585
1586         /* Let's determine this according to the interleave only once */
1587         write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1588
1589         spin_lock(chip->mutex);
1590         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1591         if (ret) {
1592                 spin_unlock(chip->mutex);
1593                 return ret;
1594         }
1595
1596         XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1597         ENABLE_VPP(map);
1598         xip_disable(map, chip, cmd_adr);
1599
1600         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1601            [...], the device will not accept any more Write to Buffer commands".
1602            So we must check here and reset those bits if they're set. Otherwise
1603            we're just pissing in the wind */
1604         if (chip->state != FL_STATUS) {
1605                 map_write(map, CMD(0x70), cmd_adr);
1606                 chip->state = FL_STATUS;
1607         }
1608         status = map_read(map, cmd_adr);
1609         if (map_word_bitsset(map, status, CMD(0x30))) {
1610                 xip_enable(map, chip, cmd_adr);
1611                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1612                 xip_disable(map, chip, cmd_adr);
1613                 map_write(map, CMD(0x50), cmd_adr);
1614                 map_write(map, CMD(0x70), cmd_adr);
1615         }
1616
1617         chip->state = FL_WRITING_TO_BUFFER;
1618         map_write(map, write_cmd, cmd_adr);
1619         ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0);
1620         if (ret) {
1621                 /* Argh. Not ready for write to buffer */
1622                 map_word Xstatus = map_read(map, cmd_adr);
1623                 map_write(map, CMD(0x70), cmd_adr);
1624                 chip->state = FL_STATUS;
1625                 status = map_read(map, cmd_adr);
1626                 map_write(map, CMD(0x50), cmd_adr);
1627                 map_write(map, CMD(0x70), cmd_adr);
1628                 xip_enable(map, chip, cmd_adr);
1629                 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1630                                 map->name, Xstatus.x[0], status.x[0]);
1631                 goto out;
1632         }
1633
1634         /* Figure out the number of words to write */
1635         word_gap = (-adr & (map_bankwidth(map)-1));
1636         words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
1637         if (!word_gap) {
1638                 words--;
1639         } else {
1640                 word_gap = map_bankwidth(map) - word_gap;
1641                 adr -= word_gap;
1642                 datum = map_word_ff(map);
1643         }
1644
1645         /* Write length of data to come */
1646         map_write(map, CMD(words), cmd_adr );
1647
1648         /* Write data */
1649         vec = *pvec;
1650         vec_seek = *pvec_seek;
1651         do {
1652                 int n = map_bankwidth(map) - word_gap;
1653                 if (n > vec->iov_len - vec_seek)
1654                         n = vec->iov_len - vec_seek;
1655                 if (n > len)
1656                         n = len;
1657
1658                 if (!word_gap && len < map_bankwidth(map))
1659                         datum = map_word_ff(map);
1660
1661                 datum = map_word_load_partial(map, datum,
1662                                               vec->iov_base + vec_seek,
1663                                               word_gap, n);
1664
1665                 len -= n;
1666                 word_gap += n;
1667                 if (!len || word_gap == map_bankwidth(map)) {
1668                         map_write(map, datum, adr);
1669                         adr += map_bankwidth(map);
1670                         word_gap = 0;
1671                 }
1672
1673                 vec_seek += n;
1674                 if (vec_seek == vec->iov_len) {
1675                         vec++;
1676                         vec_seek = 0;
1677                 }
1678         } while (len);
1679         *pvec = vec;
1680         *pvec_seek = vec_seek;
1681
1682         /* GO GO GO */
1683         map_write(map, CMD(0xd0), cmd_adr);
1684         chip->state = FL_WRITING;
1685
1686         ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1687                                    initial_adr, initial_len,
1688                                    chip->buffer_write_time);
1689         if (ret) {
1690                 map_write(map, CMD(0x70), cmd_adr);
1691                 chip->state = FL_STATUS;
1692                 xip_enable(map, chip, cmd_adr);
1693                 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1694                 goto out;
1695         }
1696
1697         /* check for errors */
1698         status = map_read(map, cmd_adr);
1699         if (map_word_bitsset(map, status, CMD(0x1a))) {
1700                 unsigned long chipstatus = MERGESTATUS(status);
1701
1702                 /* reset status */
1703                 map_write(map, CMD(0x50), cmd_adr);
1704                 map_write(map, CMD(0x70), cmd_adr);
1705                 xip_enable(map, chip, cmd_adr);
1706
1707                 if (chipstatus & 0x02) {
1708                         ret = -EROFS;
1709                 } else if (chipstatus & 0x08) {
1710                         printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1711                         ret = -EIO;
1712                 } else {
1713                         printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1714                         ret = -EINVAL;
1715                 }
1716
1717                 goto out;
1718         }
1719
1720         xip_enable(map, chip, cmd_adr);
1721  out:   put_chip(map, chip, cmd_adr);
1722         spin_unlock(chip->mutex);
1723         return ret;
1724 }
1725
1726 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1727                                 unsigned long count, loff_t to, size_t *retlen)
1728 {
1729         struct map_info *map = mtd->priv;
1730         struct cfi_private *cfi = map->fldrv_priv;
1731         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1732         int ret = 0;
1733         int chipnum;
1734         unsigned long ofs, vec_seek, i;
1735         size_t len = 0;
1736
1737         for (i = 0; i < count; i++)
1738                 len += vecs[i].iov_len;
1739
1740         *retlen = 0;
1741         if (!len)
1742                 return 0;
1743
1744         chipnum = to >> cfi->chipshift;
1745         ofs = to - (chipnum << cfi->chipshift);
1746         vec_seek = 0;
1747
1748         do {
1749                 /* We must not cross write block boundaries */
1750                 int size = wbufsize - (ofs & (wbufsize-1));
1751
1752                 if (size > len)
1753                         size = len;
1754                 ret = do_write_buffer(map, &cfi->chips[chipnum],
1755                                       ofs, &vecs, &vec_seek, size);
1756                 if (ret)
1757                         return ret;
1758
1759                 ofs += size;
1760                 (*retlen) += size;
1761                 len -= size;
1762
1763                 if (ofs >> cfi->chipshift) {
1764                         chipnum ++;
1765                         ofs = 0;
1766                         if (chipnum == cfi->numchips)
1767                                 return 0;
1768                 }
1769
1770                 /* Be nice and reschedule with the chip in a usable state for other
1771                    processes. */
1772                 cond_resched();
1773
1774         } while (len);
1775
1776         return 0;
1777 }
1778
1779 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1780                                        size_t len, size_t *retlen, const u_char *buf)
1781 {
1782         struct kvec vec;
1783
1784         vec.iov_base = (void *) buf;
1785         vec.iov_len = len;
1786
1787         return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1788 }
1789
1790 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1791                                       unsigned long adr, int len, void *thunk)
1792 {
1793         struct cfi_private *cfi = map->fldrv_priv;
1794         map_word status;
1795         int retries = 3;
1796         int ret;
1797
1798         adr += chip->start;
1799
1800  retry:
1801         spin_lock(chip->mutex);
1802         ret = get_chip(map, chip, adr, FL_ERASING);
1803         if (ret) {
1804                 spin_unlock(chip->mutex);
1805                 return ret;
1806         }
1807
1808         XIP_INVAL_CACHED_RANGE(map, adr, len);
1809         ENABLE_VPP(map);
1810         xip_disable(map, chip, adr);
1811
1812         /* Clear the status register first */
1813         map_write(map, CMD(0x50), adr);
1814
1815         /* Now erase */
1816         map_write(map, CMD(0x20), adr);
1817         map_write(map, CMD(0xD0), adr);
1818         chip->state = FL_ERASING;
1819         chip->erase_suspended = 0;
1820
1821         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1822                                    adr, len,
1823                                    chip->erase_time);
1824         if (ret) {
1825                 map_write(map, CMD(0x70), adr);
1826                 chip->state = FL_STATUS;
1827                 xip_enable(map, chip, adr);
1828                 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1829                 goto out;
1830         }
1831
1832         /* We've broken this before. It doesn't hurt to be safe */
1833         map_write(map, CMD(0x70), adr);
1834         chip->state = FL_STATUS;
1835         status = map_read(map, adr);
1836
1837         /* check for errors */
1838         if (map_word_bitsset(map, status, CMD(0x3a))) {
1839                 unsigned long chipstatus = MERGESTATUS(status);
1840
1841                 /* Reset the error bits */
1842                 map_write(map, CMD(0x50), adr);
1843                 map_write(map, CMD(0x70), adr);
1844                 xip_enable(map, chip, adr);
1845
1846                 if ((chipstatus & 0x30) == 0x30) {
1847                         printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1848                         ret = -EINVAL;
1849                 } else if (chipstatus & 0x02) {
1850                         /* Protection bit set */
1851                         ret = -EROFS;
1852                 } else if (chipstatus & 0x8) {
1853                         /* Voltage */
1854                         printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1855                         ret = -EIO;
1856                 } else if (chipstatus & 0x20 && retries--) {
1857                         printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1858                         put_chip(map, chip, adr);
1859                         spin_unlock(chip->mutex);
1860                         goto retry;
1861                 } else {
1862                         printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1863                         ret = -EIO;
1864                 }
1865
1866                 goto out;
1867         }
1868
1869         xip_enable(map, chip, adr);
1870  out:   put_chip(map, chip, adr);
1871         spin_unlock(chip->mutex);
1872         return ret;
1873 }
1874
1875 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1876 {
1877         unsigned long ofs, len;
1878         int ret;
1879
1880         ofs = instr->addr;
1881         len = instr->len;
1882
1883         ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1884         if (ret)
1885                 return ret;
1886
1887         instr->state = MTD_ERASE_DONE;
1888         mtd_erase_callback(instr);
1889
1890         return 0;
1891 }
1892
1893 static void cfi_intelext_sync (struct mtd_info *mtd)
1894 {
1895         struct map_info *map = mtd->priv;
1896         struct cfi_private *cfi = map->fldrv_priv;
1897         int i;
1898         struct flchip *chip;
1899         int ret = 0;
1900
1901         for (i=0; !ret && i<cfi->numchips; i++) {
1902                 chip = &cfi->chips[i];
1903
1904                 spin_lock(chip->mutex);
1905                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1906
1907                 if (!ret) {
1908                         chip->oldstate = chip->state;
1909                         chip->state = FL_SYNCING;
1910                         /* No need to wake_up() on this state change -
1911                          * as the whole point is that nobody can do anything
1912                          * with the chip now anyway.
1913                          */
1914                 }
1915                 spin_unlock(chip->mutex);
1916         }
1917
1918         /* Unlock the chips again */
1919
1920         for (i--; i >=0; i--) {
1921                 chip = &cfi->chips[i];
1922
1923                 spin_lock(chip->mutex);
1924
1925                 if (chip->state == FL_SYNCING) {
1926                         chip->state = chip->oldstate;
1927                         chip->oldstate = FL_READY;
1928                         wake_up(&chip->wq);
1929                 }
1930                 spin_unlock(chip->mutex);
1931         }
1932 }
1933
1934 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
1935                                                 struct flchip *chip,
1936                                                 unsigned long adr,
1937                                                 int len, void *thunk)
1938 {
1939         struct cfi_private *cfi = map->fldrv_priv;
1940         int status, ofs_factor = cfi->interleave * cfi->device_type;
1941
1942         adr += chip->start;
1943         xip_disable(map, chip, adr+(2*ofs_factor));
1944         map_write(map, CMD(0x90), adr+(2*ofs_factor));
1945         chip->state = FL_JEDEC_QUERY;
1946         status = cfi_read_query(map, adr+(2*ofs_factor));
1947         xip_enable(map, chip, 0);
1948         return status;
1949 }
1950
1951 #ifdef DEBUG_LOCK_BITS
1952 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1953                                                 struct flchip *chip,
1954                                                 unsigned long adr,
1955                                                 int len, void *thunk)
1956 {
1957         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1958                adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
1959         return 0;
1960 }
1961 #endif
1962
1963 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
1964 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
1965
1966 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1967                                        unsigned long adr, int len, void *thunk)
1968 {
1969         struct cfi_private *cfi = map->fldrv_priv;
1970         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1971         int udelay;
1972         int ret;
1973
1974         adr += chip->start;
1975
1976         spin_lock(chip->mutex);
1977         ret = get_chip(map, chip, adr, FL_LOCKING);
1978         if (ret) {
1979                 spin_unlock(chip->mutex);
1980                 return ret;
1981         }
1982
1983         ENABLE_VPP(map);
1984         xip_disable(map, chip, adr);
1985
1986         map_write(map, CMD(0x60), adr);
1987         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1988                 map_write(map, CMD(0x01), adr);
1989                 chip->state = FL_LOCKING;
1990         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1991                 map_write(map, CMD(0xD0), adr);
1992                 chip->state = FL_UNLOCKING;
1993         } else
1994                 BUG();
1995
1996         /*
1997          * If Instant Individual Block Locking supported then no need
1998          * to delay.
1999          */
2000         udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
2001
2002         ret = WAIT_TIMEOUT(map, chip, adr, udelay);
2003         if (ret) {
2004                 map_write(map, CMD(0x70), adr);
2005                 chip->state = FL_STATUS;
2006                 xip_enable(map, chip, adr);
2007                 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2008                 goto out;
2009         }
2010
2011         xip_enable(map, chip, adr);
2012 out:    put_chip(map, chip, adr);
2013         spin_unlock(chip->mutex);
2014         return ret;
2015 }
2016
2017 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
2018 {
2019         int ret;
2020
2021 #ifdef DEBUG_LOCK_BITS
2022         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2023                __func__, ofs, len);
2024         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2025                 ofs, len, NULL);
2026 #endif
2027
2028         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2029                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2030
2031 #ifdef DEBUG_LOCK_BITS
2032         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2033                __func__, ret);
2034         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2035                 ofs, len, NULL);
2036 #endif
2037
2038         return ret;
2039 }
2040
2041 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
2042 {
2043         int ret;
2044
2045 #ifdef DEBUG_LOCK_BITS
2046         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2047                __func__, ofs, len);
2048         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2049                 ofs, len, NULL);
2050 #endif
2051
2052         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2053                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2054
2055 #ifdef DEBUG_LOCK_BITS
2056         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2057                __func__, ret);
2058         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2059                 ofs, len, NULL);
2060 #endif
2061
2062         return ret;
2063 }
2064
2065 #ifdef CONFIG_MTD_OTP
2066
2067 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2068                         u_long data_offset, u_char *buf, u_int size,
2069                         u_long prot_offset, u_int groupno, u_int groupsize);
2070
2071 static int __xipram
2072 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2073             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2074 {
2075         struct cfi_private *cfi = map->fldrv_priv;
2076         int ret;
2077
2078         spin_lock(chip->mutex);
2079         ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2080         if (ret) {
2081                 spin_unlock(chip->mutex);
2082                 return ret;
2083         }
2084
2085         /* let's ensure we're not reading back cached data from array mode */
2086         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2087
2088         xip_disable(map, chip, chip->start);
2089         if (chip->state != FL_JEDEC_QUERY) {
2090                 map_write(map, CMD(0x90), chip->start);
2091                 chip->state = FL_JEDEC_QUERY;
2092         }
2093         map_copy_from(map, buf, chip->start + offset, size);
2094         xip_enable(map, chip, chip->start);
2095
2096         /* then ensure we don't keep OTP data in the cache */
2097         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2098
2099         put_chip(map, chip, chip->start);
2100         spin_unlock(chip->mutex);
2101         return 0;
2102 }
2103
2104 static int
2105 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2106              u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2107 {
2108         int ret;
2109
2110         while (size) {
2111                 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2112                 int gap = offset - bus_ofs;
2113                 int n = min_t(int, size, map_bankwidth(map)-gap);
2114                 map_word datum = map_word_ff(map);
2115
2116                 datum = map_word_load_partial(map, datum, buf, gap, n);
2117                 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2118                 if (ret)
2119                         return ret;
2120
2121                 offset += n;
2122                 buf += n;
2123                 size -= n;
2124         }
2125
2126         return 0;
2127 }
2128
2129 static int
2130 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2131             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2132 {
2133         struct cfi_private *cfi = map->fldrv_priv;
2134         map_word datum;
2135
2136         /* make sure area matches group boundaries */
2137         if (size != grpsz)
2138                 return -EXDEV;
2139
2140         datum = map_word_ff(map);
2141         datum = map_word_clr(map, datum, CMD(1 << grpno));
2142         return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2143 }
2144
2145 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2146                                  size_t *retlen, u_char *buf,
2147                                  otp_op_t action, int user_regs)
2148 {
2149         struct map_info *map = mtd->priv;
2150         struct cfi_private *cfi = map->fldrv_priv;
2151         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2152         struct flchip *chip;
2153         struct cfi_intelext_otpinfo *otp;
2154         u_long devsize, reg_prot_offset, data_offset;
2155         u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2156         u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2157         int ret;
2158
2159         *retlen = 0;
2160
2161         /* Check that we actually have some OTP registers */
2162         if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2163                 return -ENODATA;
2164
2165         /* we need real chips here not virtual ones */
2166         devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2167         chip_step = devsize >> cfi->chipshift;
2168         chip_num = 0;
2169
2170         /* Some chips have OTP located in the _top_ partition only.
2171            For example: Intel 28F256L18T (T means top-parameter device) */
2172         if (cfi->mfr == MANUFACTURER_INTEL) {
2173                 switch (cfi->id) {
2174                 case 0x880b:
2175                 case 0x880c:
2176                 case 0x880d:
2177                         chip_num = chip_step - 1;
2178                 }
2179         }
2180
2181         for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2182                 chip = &cfi->chips[chip_num];
2183                 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2184
2185                 /* first OTP region */
2186                 field = 0;
2187                 reg_prot_offset = extp->ProtRegAddr;
2188                 reg_fact_groups = 1;
2189                 reg_fact_size = 1 << extp->FactProtRegSize;
2190                 reg_user_groups = 1;
2191                 reg_user_size = 1 << extp->UserProtRegSize;
2192
2193                 while (len > 0) {
2194                         /* flash geometry fixup */
2195                         data_offset = reg_prot_offset + 1;
2196                         data_offset *= cfi->interleave * cfi->device_type;
2197                         reg_prot_offset *= cfi->interleave * cfi->device_type;
2198                         reg_fact_size *= cfi->interleave;
2199                         reg_user_size *= cfi->interleave;
2200
2201                         if (user_regs) {
2202                                 groups = reg_user_groups;
2203                                 groupsize = reg_user_size;
2204                                 /* skip over factory reg area */
2205                                 groupno = reg_fact_groups;
2206                                 data_offset += reg_fact_groups * reg_fact_size;
2207                         } else {
2208                                 groups = reg_fact_groups;
2209                                 groupsize = reg_fact_size;
2210                                 groupno = 0;
2211                         }
2212
2213                         while (len > 0 && groups > 0) {
2214                                 if (!action) {
2215                                         /*
2216                                          * Special case: if action is NULL
2217                                          * we fill buf with otp_info records.
2218                                          */
2219                                         struct otp_info *otpinfo;
2220                                         map_word lockword;
2221                                         len -= sizeof(struct otp_info);
2222                                         if (len <= 0)
2223                                                 return -ENOSPC;
2224                                         ret = do_otp_read(map, chip,
2225                                                           reg_prot_offset,
2226                                                           (u_char *)&lockword,
2227                                                           map_bankwidth(map),
2228                                                           0, 0,  0);
2229                                         if (ret)
2230                                                 return ret;
2231                                         otpinfo = (struct otp_info *)buf;
2232                                         otpinfo->start = from;
2233                                         otpinfo->length = groupsize;
2234                                         otpinfo->locked =
2235                                            !map_word_bitsset(map, lockword,
2236                                                              CMD(1 << groupno));
2237                                         from += groupsize;
2238                                         buf += sizeof(*otpinfo);
2239                                         *retlen += sizeof(*otpinfo);
2240                                 } else if (from >= groupsize) {
2241                                         from -= groupsize;
2242                                         data_offset += groupsize;
2243                                 } else {
2244                                         int size = groupsize;
2245                                         data_offset += from;
2246                                         size -= from;
2247                                         from = 0;
2248                                         if (size > len)
2249                                                 size = len;
2250                                         ret = action(map, chip, data_offset,
2251                                                      buf, size, reg_prot_offset,
2252                                                      groupno, groupsize);
2253                                         if (ret < 0)
2254                                                 return ret;
2255                                         buf += size;
2256                                         len -= size;
2257                                         *retlen += size;
2258                                         data_offset += size;
2259                                 }
2260                                 groupno++;
2261                                 groups--;
2262                         }
2263
2264                         /* next OTP region */
2265                         if (++field == extp->NumProtectionFields)
2266                                 break;
2267                         reg_prot_offset = otp->ProtRegAddr;
2268                         reg_fact_groups = otp->FactGroups;
2269                         reg_fact_size = 1 << otp->FactProtRegSize;
2270                         reg_user_groups = otp->UserGroups;
2271                         reg_user_size = 1 << otp->UserProtRegSize;
2272                         otp++;
2273                 }
2274         }
2275
2276         return 0;
2277 }
2278
2279 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2280                                            size_t len, size_t *retlen,
2281                                             u_char *buf)
2282 {
2283         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2284                                      buf, do_otp_read, 0);
2285 }
2286
2287 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2288                                            size_t len, size_t *retlen,
2289                                             u_char *buf)
2290 {
2291         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2292                                      buf, do_otp_read, 1);
2293 }
2294
2295 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2296                                             size_t len, size_t *retlen,
2297                                              u_char *buf)
2298 {
2299         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2300                                      buf, do_otp_write, 1);
2301 }
2302
2303 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2304                                            loff_t from, size_t len)
2305 {
2306         size_t retlen;
2307         return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2308                                      NULL, do_otp_lock, 1);
2309 }
2310
2311 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2312                                            struct otp_info *buf, size_t len)
2313 {
2314         size_t retlen;
2315         int ret;
2316
2317         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2318         return ret ? : retlen;
2319 }
2320
2321 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2322                                            struct otp_info *buf, size_t len)
2323 {
2324         size_t retlen;
2325         int ret;
2326
2327         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2328         return ret ? : retlen;
2329 }
2330
2331 #endif
2332
2333 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2334 {
2335         struct mtd_erase_region_info *region;
2336         int block, status, i;
2337         unsigned long adr;
2338         size_t len;
2339
2340         for (i = 0; i < mtd->numeraseregions; i++) {
2341                 region = &mtd->eraseregions[i];
2342                 if (!region->lockmap)
2343                         continue;
2344
2345                 for (block = 0; block < region->numblocks; block++){
2346                         len = region->erasesize;
2347                         adr = region->offset + block * len;
2348
2349                         status = cfi_varsize_frob(mtd,
2350                                         do_getlockstatus_oneblock, adr, len, NULL);
2351                         if (status)
2352                                 set_bit(block, region->lockmap);
2353                         else
2354                                 clear_bit(block, region->lockmap);
2355                 }
2356         }
2357 }
2358
2359 static int cfi_intelext_suspend(struct mtd_info *mtd)
2360 {
2361         struct map_info *map = mtd->priv;
2362         struct cfi_private *cfi = map->fldrv_priv;
2363         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2364         int i;
2365         struct flchip *chip;
2366         int ret = 0;
2367
2368         if ((mtd->flags & MTD_POWERUP_LOCK)
2369             && extp && (extp->FeatureSupport & (1 << 5)))
2370                 cfi_intelext_save_locks(mtd);
2371
2372         for (i=0; !ret && i<cfi->numchips; i++) {
2373                 chip = &cfi->chips[i];
2374
2375                 spin_lock(chip->mutex);
2376
2377                 switch (chip->state) {
2378                 case FL_READY:
2379                 case FL_STATUS:
2380                 case FL_CFI_QUERY:
2381                 case FL_JEDEC_QUERY:
2382                         if (chip->oldstate == FL_READY) {
2383                                 /* place the chip in a known state before suspend */
2384                                 map_write(map, CMD(0xFF), cfi->chips[i].start);
2385                                 chip->oldstate = chip->state;
2386                                 chip->state = FL_PM_SUSPENDED;
2387                                 /* No need to wake_up() on this state change -
2388                                  * as the whole point is that nobody can do anything
2389                                  * with the chip now anyway.
2390                                  */
2391                         } else {
2392                                 /* There seems to be an operation pending. We must wait for it. */
2393                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2394                                 ret = -EAGAIN;
2395                         }
2396                         break;
2397                 default:
2398                         /* Should we actually wait? Once upon a time these routines weren't
2399                            allowed to. Or should we return -EAGAIN, because the upper layers
2400                            ought to have already shut down anything which was using the device
2401                            anyway? The latter for now. */
2402                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2403                         ret = -EAGAIN;
2404                 case FL_PM_SUSPENDED:
2405                         break;
2406                 }
2407                 spin_unlock(chip->mutex);
2408         }
2409
2410         /* Unlock the chips again */
2411
2412         if (ret) {
2413                 for (i--; i >=0; i--) {
2414                         chip = &cfi->chips[i];
2415
2416                         spin_lock(chip->mutex);
2417
2418                         if (chip->state == FL_PM_SUSPENDED) {
2419                                 /* No need to force it into a known state here,
2420                                    because we're returning failure, and it didn't
2421                                    get power cycled */
2422                                 chip->state = chip->oldstate;
2423                                 chip->oldstate = FL_READY;
2424                                 wake_up(&chip->wq);
2425                         }
2426                         spin_unlock(chip->mutex);
2427                 }
2428         }
2429
2430         return ret;
2431 }
2432
2433 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2434 {
2435         struct mtd_erase_region_info *region;
2436         int block, i;
2437         unsigned long adr;
2438         size_t len;
2439
2440         for (i = 0; i < mtd->numeraseregions; i++) {
2441                 region = &mtd->eraseregions[i];
2442                 if (!region->lockmap)
2443                         continue;
2444
2445                 for (block = 0; block < region->numblocks; block++) {
2446                         len = region->erasesize;
2447                         adr = region->offset + block * len;
2448
2449                         if (!test_bit(block, region->lockmap))
2450                                 cfi_intelext_unlock(mtd, adr, len);
2451                 }
2452         }
2453 }
2454
2455 static void cfi_intelext_resume(struct mtd_info *mtd)
2456 {
2457         struct map_info *map = mtd->priv;
2458         struct cfi_private *cfi = map->fldrv_priv;
2459         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2460         int i;
2461         struct flchip *chip;
2462
2463         for (i=0; i<cfi->numchips; i++) {
2464
2465                 chip = &cfi->chips[i];
2466
2467                 spin_lock(chip->mutex);
2468
2469                 /* Go to known state. Chip may have been power cycled */
2470                 if (chip->state == FL_PM_SUSPENDED) {
2471                         map_write(map, CMD(0xFF), cfi->chips[i].start);
2472                         chip->oldstate = chip->state = FL_READY;
2473                         wake_up(&chip->wq);
2474                 }
2475
2476                 spin_unlock(chip->mutex);
2477         }
2478
2479         if ((mtd->flags & MTD_POWERUP_LOCK)
2480             && extp && (extp->FeatureSupport & (1 << 5)))
2481                 cfi_intelext_restore_locks(mtd);
2482 }
2483
2484 static int cfi_intelext_reset(struct mtd_info *mtd)
2485 {
2486         struct map_info *map = mtd->priv;
2487         struct cfi_private *cfi = map->fldrv_priv;
2488         int i, ret;
2489
2490         for (i=0; i < cfi->numchips; i++) {
2491                 struct flchip *chip = &cfi->chips[i];
2492
2493                 /* force the completion of any ongoing operation
2494                    and switch to array mode so any bootloader in
2495                    flash is accessible for soft reboot. */
2496                 spin_lock(chip->mutex);
2497                 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2498                 if (!ret) {
2499                         map_write(map, CMD(0xff), chip->start);
2500                         chip->state = FL_SHUTDOWN;
2501                 }
2502                 spin_unlock(chip->mutex);
2503         }
2504
2505         return 0;
2506 }
2507
2508 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2509                                void *v)
2510 {
2511         struct mtd_info *mtd;
2512
2513         mtd = container_of(nb, struct mtd_info, reboot_notifier);
2514         cfi_intelext_reset(mtd);
2515         return NOTIFY_DONE;
2516 }
2517
2518 static void cfi_intelext_destroy(struct mtd_info *mtd)
2519 {
2520         struct map_info *map = mtd->priv;
2521         struct cfi_private *cfi = map->fldrv_priv;
2522         struct mtd_erase_region_info *region;
2523         int i;
2524         cfi_intelext_reset(mtd);
2525         unregister_reboot_notifier(&mtd->reboot_notifier);
2526         kfree(cfi->cmdset_priv);
2527         kfree(cfi->cfiq);
2528         kfree(cfi->chips[0].priv);
2529         kfree(cfi);
2530         for (i = 0; i < mtd->numeraseregions; i++) {
2531                 region = &mtd->eraseregions[i];
2532                 if (region->lockmap)
2533                         kfree(region->lockmap);
2534         }
2535         kfree(mtd->eraseregions);
2536 }
2537
2538 MODULE_LICENSE("GPL");
2539 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2540 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2541 MODULE_ALIAS("cfi_cmdset_0003");
2542 MODULE_ALIAS("cfi_cmdset_0200");