Merge branch 'fix/asoc' into for-linus
[pandora-kernel.git] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  *
8  * 10/10/2000   Nicolas Pitre <nico@fluxnic.net>
9  *      - completely revamped method functions so they are aware and
10  *        independent of the flash geometry (buswidth, interleave, etc.)
11  *      - scalability vs code size is completely set at compile-time
12  *        (see include/linux/mtd/cfi.h for selection)
13  *      - optimized write buffer method
14  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
15  *      - reworked lock/unlock/erase support for var size flash
16  * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
17  *      - auto unlock sectors on resume for auto locking flash on power up
18  */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/bitmap.h>
34 #include <linux/mtd/xip.h>
35 #include <linux/mtd/map.h>
36 #include <linux/mtd/mtd.h>
37 #include <linux/mtd/cfi.h>
38
39 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
40 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41
42 // debugging, turns off buffer write mode if set to 1
43 #define FORCE_WORD_WRITE 0
44
45 /* Intel chips */
46 #define I82802AB        0x00ad
47 #define I82802AC        0x00ac
48 #define PF38F4476       0x881c
49 /* STMicroelectronics chips */
50 #define M50LPW080       0x002F
51 #define M50FLW080A      0x0080
52 #define M50FLW080B      0x0081
53 /* Atmel chips */
54 #define AT49BV640D      0x02de
55 #define AT49BV640DT     0x02db
56
57 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
58 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
59 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
60 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
61 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
62 static void cfi_intelext_sync (struct mtd_info *);
63 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
64 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
65 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
66                                   uint64_t len);
67 #ifdef CONFIG_MTD_OTP
68 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
69 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
70 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
71 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
72 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
73                                             struct otp_info *, size_t);
74 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
75                                             struct otp_info *, size_t);
76 #endif
77 static int cfi_intelext_suspend (struct mtd_info *);
78 static void cfi_intelext_resume (struct mtd_info *);
79 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
80
81 static void cfi_intelext_destroy(struct mtd_info *);
82
83 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
84
85 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
86 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
87
88 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
89                      size_t *retlen, void **virt, resource_size_t *phys);
90 static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
91
92 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
93 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
94 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
95 #include "fwh_lock.h"
96
97
98
99 /*
100  *  *********** SETUP AND PROBE BITS  ***********
101  */
102
103 static struct mtd_chip_driver cfi_intelext_chipdrv = {
104         .probe          = NULL, /* Not usable directly */
105         .destroy        = cfi_intelext_destroy,
106         .name           = "cfi_cmdset_0001",
107         .module         = THIS_MODULE
108 };
109
110 /* #define DEBUG_LOCK_BITS */
111 /* #define DEBUG_CFI_FEATURES */
112
113 #ifdef DEBUG_CFI_FEATURES
114 static void cfi_tell_features(struct cfi_pri_intelext *extp)
115 {
116         int i;
117         printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
118         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
119         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
120         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
121         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
122         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
123         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
124         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
125         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
126         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
127         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
128         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
129         printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
130         for (i=11; i<32; i++) {
131                 if (extp->FeatureSupport & (1<<i))
132                         printk("     - Unknown Bit %X:      supported\n", i);
133         }
134
135         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
136         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
137         for (i=1; i<8; i++) {
138                 if (extp->SuspendCmdSupport & (1<<i))
139                         printk("     - Unknown Bit %X:               supported\n", i);
140         }
141
142         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
143         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
144         printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
145         for (i=2; i<3; i++) {
146                 if (extp->BlkStatusRegMask & (1<<i))
147                         printk("     - Unknown Bit %X Active: yes\n",i);
148         }
149         printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
150         printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
151         for (i=6; i<16; i++) {
152                 if (extp->BlkStatusRegMask & (1<<i))
153                         printk("     - Unknown Bit %X Active: yes\n",i);
154         }
155
156         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
157                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
158         if (extp->VppOptimal)
159                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
160                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
161 }
162 #endif
163
164 /* Atmel chips don't use the same PRI format as Intel chips */
165 static void fixup_convert_atmel_pri(struct mtd_info *mtd)
166 {
167         struct map_info *map = mtd->priv;
168         struct cfi_private *cfi = map->fldrv_priv;
169         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
170         struct cfi_pri_atmel atmel_pri;
171         uint32_t features = 0;
172
173         /* Reverse byteswapping */
174         extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
175         extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
176         extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
177
178         memcpy(&atmel_pri, extp, sizeof(atmel_pri));
179         memset((char *)extp + 5, 0, sizeof(*extp) - 5);
180
181         printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
182
183         if (atmel_pri.Features & 0x01) /* chip erase supported */
184                 features |= (1<<0);
185         if (atmel_pri.Features & 0x02) /* erase suspend supported */
186                 features |= (1<<1);
187         if (atmel_pri.Features & 0x04) /* program suspend supported */
188                 features |= (1<<2);
189         if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
190                 features |= (1<<9);
191         if (atmel_pri.Features & 0x20) /* page mode read supported */
192                 features |= (1<<7);
193         if (atmel_pri.Features & 0x40) /* queued erase supported */
194                 features |= (1<<4);
195         if (atmel_pri.Features & 0x80) /* Protection bits supported */
196                 features |= (1<<6);
197
198         extp->FeatureSupport = features;
199
200         /* burst write mode not supported */
201         cfi->cfiq->BufWriteTimeoutTyp = 0;
202         cfi->cfiq->BufWriteTimeoutMax = 0;
203 }
204
205 static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
206 {
207         struct map_info *map = mtd->priv;
208         struct cfi_private *cfi = map->fldrv_priv;
209         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
210
211         cfip->FeatureSupport |= (1 << 5);
212         mtd->flags |= MTD_POWERUP_LOCK;
213 }
214
215 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
216 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
217 static void fixup_intel_strataflash(struct mtd_info *mtd)
218 {
219         struct map_info *map = mtd->priv;
220         struct cfi_private *cfi = map->fldrv_priv;
221         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
222
223         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
224                             "erase on write disabled.\n");
225         extp->SuspendCmdSupport &= ~1;
226 }
227 #endif
228
229 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
230 static void fixup_no_write_suspend(struct mtd_info *mtd)
231 {
232         struct map_info *map = mtd->priv;
233         struct cfi_private *cfi = map->fldrv_priv;
234         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
235
236         if (cfip && (cfip->FeatureSupport&4)) {
237                 cfip->FeatureSupport &= ~4;
238                 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
239         }
240 }
241 #endif
242
243 static void fixup_st_m28w320ct(struct mtd_info *mtd)
244 {
245         struct map_info *map = mtd->priv;
246         struct cfi_private *cfi = map->fldrv_priv;
247
248         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
249         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
250 }
251
252 static void fixup_st_m28w320cb(struct mtd_info *mtd)
253 {
254         struct map_info *map = mtd->priv;
255         struct cfi_private *cfi = map->fldrv_priv;
256
257         /* Note this is done after the region info is endian swapped */
258         cfi->cfiq->EraseRegionInfo[1] =
259                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
260 };
261
262 static void fixup_use_point(struct mtd_info *mtd)
263 {
264         struct map_info *map = mtd->priv;
265         if (!mtd->point && map_is_linear(map)) {
266                 mtd->point   = cfi_intelext_point;
267                 mtd->unpoint = cfi_intelext_unpoint;
268         }
269 }
270
271 static void fixup_use_write_buffers(struct mtd_info *mtd)
272 {
273         struct map_info *map = mtd->priv;
274         struct cfi_private *cfi = map->fldrv_priv;
275         if (cfi->cfiq->BufWriteTimeoutTyp) {
276                 printk(KERN_INFO "Using buffer write method\n" );
277                 mtd->write = cfi_intelext_write_buffers;
278                 mtd->writev = cfi_intelext_writev;
279         }
280 }
281
282 /*
283  * Some chips power-up with all sectors locked by default.
284  */
285 static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
286 {
287         struct map_info *map = mtd->priv;
288         struct cfi_private *cfi = map->fldrv_priv;
289         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
290
291         if (cfip->FeatureSupport&32) {
292                 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
293                 mtd->flags |= MTD_POWERUP_LOCK;
294         }
295 }
296
297 static struct cfi_fixup cfi_fixup_table[] = {
298         { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
299         { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
300         { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
301 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
302         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
303 #endif
304 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
305         { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
306 #endif
307 #if !FORCE_WORD_WRITE
308         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
309 #endif
310         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
311         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
312         { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
313         { 0, 0, NULL }
314 };
315
316 static struct cfi_fixup jedec_fixup_table[] = {
317         { CFI_MFR_INTEL, I82802AB,   fixup_use_fwh_lock },
318         { CFI_MFR_INTEL, I82802AC,   fixup_use_fwh_lock },
319         { CFI_MFR_ST,    M50LPW080,  fixup_use_fwh_lock },
320         { CFI_MFR_ST,    M50FLW080A, fixup_use_fwh_lock },
321         { CFI_MFR_ST,    M50FLW080B, fixup_use_fwh_lock },
322         { 0, 0, NULL }
323 };
324 static struct cfi_fixup fixup_table[] = {
325         /* The CFI vendor ids and the JEDEC vendor IDs appear
326          * to be common.  It is like the devices id's are as
327          * well.  This table is to pick all cases where
328          * we know that is the case.
329          */
330         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
331         { 0, 0, NULL }
332 };
333
334 static void cfi_fixup_major_minor(struct cfi_private *cfi,
335                                                 struct cfi_pri_intelext *extp)
336 {
337         if (cfi->mfr == CFI_MFR_INTEL &&
338                         cfi->id == PF38F4476 && extp->MinorVersion == '3')
339                 extp->MinorVersion = '1';
340 }
341
342 static inline struct cfi_pri_intelext *
343 read_pri_intelext(struct map_info *map, __u16 adr)
344 {
345         struct cfi_private *cfi = map->fldrv_priv;
346         struct cfi_pri_intelext *extp;
347         unsigned int extra_size = 0;
348         unsigned int extp_size = sizeof(*extp);
349
350  again:
351         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
352         if (!extp)
353                 return NULL;
354
355         cfi_fixup_major_minor(cfi, extp);
356
357         if (extp->MajorVersion != '1' ||
358             (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
359                 printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
360                        "version %c.%c.\n",  extp->MajorVersion,
361                        extp->MinorVersion);
362                 kfree(extp);
363                 return NULL;
364         }
365
366         /* Do some byteswapping if necessary */
367         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
368         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
369         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
370
371         if (extp->MinorVersion >= '0') {
372                 extra_size = 0;
373
374                 /* Protection Register info */
375                 extra_size += (extp->NumProtectionFields - 1) *
376                               sizeof(struct cfi_intelext_otpinfo);
377         }
378
379         if (extp->MinorVersion >= '1') {
380                 /* Burst Read info */
381                 extra_size += 2;
382                 if (extp_size < sizeof(*extp) + extra_size)
383                         goto need_more;
384                 extra_size += extp->extra[extra_size - 1];
385         }
386
387         if (extp->MinorVersion >= '3') {
388                 int nb_parts, i;
389
390                 /* Number of hardware-partitions */
391                 extra_size += 1;
392                 if (extp_size < sizeof(*extp) + extra_size)
393                         goto need_more;
394                 nb_parts = extp->extra[extra_size - 1];
395
396                 /* skip the sizeof(partregion) field in CFI 1.4 */
397                 if (extp->MinorVersion >= '4')
398                         extra_size += 2;
399
400                 for (i = 0; i < nb_parts; i++) {
401                         struct cfi_intelext_regioninfo *rinfo;
402                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
403                         extra_size += sizeof(*rinfo);
404                         if (extp_size < sizeof(*extp) + extra_size)
405                                 goto need_more;
406                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
407                         extra_size += (rinfo->NumBlockTypes - 1)
408                                       * sizeof(struct cfi_intelext_blockinfo);
409                 }
410
411                 if (extp->MinorVersion >= '4')
412                         extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
413
414                 if (extp_size < sizeof(*extp) + extra_size) {
415                         need_more:
416                         extp_size = sizeof(*extp) + extra_size;
417                         kfree(extp);
418                         if (extp_size > 4096) {
419                                 printk(KERN_ERR
420                                         "%s: cfi_pri_intelext is too fat\n",
421                                         __func__);
422                                 return NULL;
423                         }
424                         goto again;
425                 }
426         }
427
428         return extp;
429 }
430
431 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
432 {
433         struct cfi_private *cfi = map->fldrv_priv;
434         struct mtd_info *mtd;
435         int i;
436
437         mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
438         if (!mtd) {
439                 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
440                 return NULL;
441         }
442         mtd->priv = map;
443         mtd->type = MTD_NORFLASH;
444
445         /* Fill in the default mtd operations */
446         mtd->erase   = cfi_intelext_erase_varsize;
447         mtd->read    = cfi_intelext_read;
448         mtd->write   = cfi_intelext_write_words;
449         mtd->sync    = cfi_intelext_sync;
450         mtd->lock    = cfi_intelext_lock;
451         mtd->unlock  = cfi_intelext_unlock;
452         mtd->is_locked = cfi_intelext_is_locked;
453         mtd->suspend = cfi_intelext_suspend;
454         mtd->resume  = cfi_intelext_resume;
455         mtd->flags   = MTD_CAP_NORFLASH;
456         mtd->name    = map->name;
457         mtd->writesize = 1;
458         mtd->writebufsize = 1 << cfi->cfiq->MaxBufWriteSize;
459
460         mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
461
462         if (cfi->cfi_mode == CFI_MODE_CFI) {
463                 /*
464                  * It's a real CFI chip, not one for which the probe
465                  * routine faked a CFI structure. So we read the feature
466                  * table from it.
467                  */
468                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
469                 struct cfi_pri_intelext *extp;
470
471                 extp = read_pri_intelext(map, adr);
472                 if (!extp) {
473                         kfree(mtd);
474                         return NULL;
475                 }
476
477                 /* Install our own private info structure */
478                 cfi->cmdset_priv = extp;
479
480                 cfi_fixup(mtd, cfi_fixup_table);
481
482 #ifdef DEBUG_CFI_FEATURES
483                 /* Tell the user about it in lots of lovely detail */
484                 cfi_tell_features(extp);
485 #endif
486
487                 if(extp->SuspendCmdSupport & 1) {
488                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
489                 }
490         }
491         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
492                 /* Apply jedec specific fixups */
493                 cfi_fixup(mtd, jedec_fixup_table);
494         }
495         /* Apply generic fixups */
496         cfi_fixup(mtd, fixup_table);
497
498         for (i=0; i< cfi->numchips; i++) {
499                 if (cfi->cfiq->WordWriteTimeoutTyp)
500                         cfi->chips[i].word_write_time =
501                                 1<<cfi->cfiq->WordWriteTimeoutTyp;
502                 else
503                         cfi->chips[i].word_write_time = 50000;
504
505                 if (cfi->cfiq->BufWriteTimeoutTyp)
506                         cfi->chips[i].buffer_write_time =
507                                 1<<cfi->cfiq->BufWriteTimeoutTyp;
508                 /* No default; if it isn't specified, we won't use it */
509
510                 if (cfi->cfiq->BlockEraseTimeoutTyp)
511                         cfi->chips[i].erase_time =
512                                 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
513                 else
514                         cfi->chips[i].erase_time = 2000000;
515
516                 if (cfi->cfiq->WordWriteTimeoutTyp &&
517                     cfi->cfiq->WordWriteTimeoutMax)
518                         cfi->chips[i].word_write_time_max =
519                                 1<<(cfi->cfiq->WordWriteTimeoutTyp +
520                                     cfi->cfiq->WordWriteTimeoutMax);
521                 else
522                         cfi->chips[i].word_write_time_max = 50000 * 8;
523
524                 if (cfi->cfiq->BufWriteTimeoutTyp &&
525                     cfi->cfiq->BufWriteTimeoutMax)
526                         cfi->chips[i].buffer_write_time_max =
527                                 1<<(cfi->cfiq->BufWriteTimeoutTyp +
528                                     cfi->cfiq->BufWriteTimeoutMax);
529
530                 if (cfi->cfiq->BlockEraseTimeoutTyp &&
531                     cfi->cfiq->BlockEraseTimeoutMax)
532                         cfi->chips[i].erase_time_max =
533                                 1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
534                                        cfi->cfiq->BlockEraseTimeoutMax);
535                 else
536                         cfi->chips[i].erase_time_max = 2000000 * 8;
537
538                 cfi->chips[i].ref_point_counter = 0;
539                 init_waitqueue_head(&(cfi->chips[i].wq));
540         }
541
542         map->fldrv = &cfi_intelext_chipdrv;
543
544         return cfi_intelext_setup(mtd);
545 }
546 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
547 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
548 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
549 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
550 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
551
552 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
553 {
554         struct map_info *map = mtd->priv;
555         struct cfi_private *cfi = map->fldrv_priv;
556         unsigned long offset = 0;
557         int i,j;
558         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
559
560         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
561
562         mtd->size = devsize * cfi->numchips;
563
564         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
565         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
566                         * mtd->numeraseregions, GFP_KERNEL);
567         if (!mtd->eraseregions) {
568                 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
569                 goto setup_err;
570         }
571
572         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
573                 unsigned long ernum, ersize;
574                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
575                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
576
577                 if (mtd->erasesize < ersize) {
578                         mtd->erasesize = ersize;
579                 }
580                 for (j=0; j<cfi->numchips; j++) {
581                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
582                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
583                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
584                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
585                 }
586                 offset += (ersize * ernum);
587         }
588
589         if (offset != devsize) {
590                 /* Argh */
591                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
592                 goto setup_err;
593         }
594
595         for (i=0; i<mtd->numeraseregions;i++){
596                 printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
597                        i,(unsigned long long)mtd->eraseregions[i].offset,
598                        mtd->eraseregions[i].erasesize,
599                        mtd->eraseregions[i].numblocks);
600         }
601
602 #ifdef CONFIG_MTD_OTP
603         mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
604         mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
605         mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
606         mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
607         mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
608         mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
609 #endif
610
611         /* This function has the potential to distort the reality
612            a bit and therefore should be called last. */
613         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
614                 goto setup_err;
615
616         __module_get(THIS_MODULE);
617         register_reboot_notifier(&mtd->reboot_notifier);
618         return mtd;
619
620  setup_err:
621         kfree(mtd->eraseregions);
622         kfree(mtd);
623         kfree(cfi->cmdset_priv);
624         return NULL;
625 }
626
627 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
628                                         struct cfi_private **pcfi)
629 {
630         struct map_info *map = mtd->priv;
631         struct cfi_private *cfi = *pcfi;
632         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
633
634         /*
635          * Probing of multi-partition flash chips.
636          *
637          * To support multiple partitions when available, we simply arrange
638          * for each of them to have their own flchip structure even if they
639          * are on the same physical chip.  This means completely recreating
640          * a new cfi_private structure right here which is a blatent code
641          * layering violation, but this is still the least intrusive
642          * arrangement at this point. This can be rearranged in the future
643          * if someone feels motivated enough.  --nico
644          */
645         if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
646             && extp->FeatureSupport & (1 << 9)) {
647                 struct cfi_private *newcfi;
648                 struct flchip *chip;
649                 struct flchip_shared *shared;
650                 int offs, numregions, numparts, partshift, numvirtchips, i, j;
651
652                 /* Protection Register info */
653                 offs = (extp->NumProtectionFields - 1) *
654                        sizeof(struct cfi_intelext_otpinfo);
655
656                 /* Burst Read info */
657                 offs += extp->extra[offs+1]+2;
658
659                 /* Number of partition regions */
660                 numregions = extp->extra[offs];
661                 offs += 1;
662
663                 /* skip the sizeof(partregion) field in CFI 1.4 */
664                 if (extp->MinorVersion >= '4')
665                         offs += 2;
666
667                 /* Number of hardware partitions */
668                 numparts = 0;
669                 for (i = 0; i < numregions; i++) {
670                         struct cfi_intelext_regioninfo *rinfo;
671                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
672                         numparts += rinfo->NumIdentPartitions;
673                         offs += sizeof(*rinfo)
674                                 + (rinfo->NumBlockTypes - 1) *
675                                   sizeof(struct cfi_intelext_blockinfo);
676                 }
677
678                 if (!numparts)
679                         numparts = 1;
680
681                 /* Programming Region info */
682                 if (extp->MinorVersion >= '4') {
683                         struct cfi_intelext_programming_regioninfo *prinfo;
684                         prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
685                         mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
686                         mtd->flags &= ~MTD_BIT_WRITEABLE;
687                         printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
688                                map->name, mtd->writesize,
689                                cfi->interleave * prinfo->ControlValid,
690                                cfi->interleave * prinfo->ControlInvalid);
691                 }
692
693                 /*
694                  * All functions below currently rely on all chips having
695                  * the same geometry so we'll just assume that all hardware
696                  * partitions are of the same size too.
697                  */
698                 partshift = cfi->chipshift - __ffs(numparts);
699
700                 if ((1 << partshift) < mtd->erasesize) {
701                         printk( KERN_ERR
702                                 "%s: bad number of hw partitions (%d)\n",
703                                 __func__, numparts);
704                         return -EINVAL;
705                 }
706
707                 numvirtchips = cfi->numchips * numparts;
708                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
709                 if (!newcfi)
710                         return -ENOMEM;
711                 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
712                 if (!shared) {
713                         kfree(newcfi);
714                         return -ENOMEM;
715                 }
716                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
717                 newcfi->numchips = numvirtchips;
718                 newcfi->chipshift = partshift;
719
720                 chip = &newcfi->chips[0];
721                 for (i = 0; i < cfi->numchips; i++) {
722                         shared[i].writing = shared[i].erasing = NULL;
723                         mutex_init(&shared[i].lock);
724                         for (j = 0; j < numparts; j++) {
725                                 *chip = cfi->chips[i];
726                                 chip->start += j << partshift;
727                                 chip->priv = &shared[i];
728                                 /* those should be reset too since
729                                    they create memory references. */
730                                 init_waitqueue_head(&chip->wq);
731                                 mutex_init(&chip->mutex);
732                                 chip++;
733                         }
734                 }
735
736                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
737                                   "--> %d partitions of %d KiB\n",
738                                   map->name, cfi->numchips, cfi->interleave,
739                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
740
741                 map->fldrv_priv = newcfi;
742                 *pcfi = newcfi;
743                 kfree(cfi);
744         }
745
746         return 0;
747 }
748
749 /*
750  *  *********** CHIP ACCESS FUNCTIONS ***********
751  */
752 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
753 {
754         DECLARE_WAITQUEUE(wait, current);
755         struct cfi_private *cfi = map->fldrv_priv;
756         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
757         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
758         unsigned long timeo = jiffies + HZ;
759
760         /* Prevent setting state FL_SYNCING for chip in suspended state. */
761         if (mode == FL_SYNCING && chip->oldstate != FL_READY)
762                 goto sleep;
763
764         switch (chip->state) {
765
766         case FL_STATUS:
767                 for (;;) {
768                         status = map_read(map, adr);
769                         if (map_word_andequal(map, status, status_OK, status_OK))
770                                 break;
771
772                         /* At this point we're fine with write operations
773                            in other partitions as they don't conflict. */
774                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
775                                 break;
776
777                         mutex_unlock(&chip->mutex);
778                         cfi_udelay(1);
779                         mutex_lock(&chip->mutex);
780                         /* Someone else might have been playing with it. */
781                         return -EAGAIN;
782                 }
783                 /* Fall through */
784         case FL_READY:
785         case FL_CFI_QUERY:
786         case FL_JEDEC_QUERY:
787                 return 0;
788
789         case FL_ERASING:
790                 if (!cfip ||
791                     !(cfip->FeatureSupport & 2) ||
792                     !(mode == FL_READY || mode == FL_POINT ||
793                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
794                         goto sleep;
795
796
797                 /* Erase suspend */
798                 map_write(map, CMD(0xB0), adr);
799
800                 /* If the flash has finished erasing, then 'erase suspend'
801                  * appears to make some (28F320) flash devices switch to
802                  * 'read' mode.  Make sure that we switch to 'read status'
803                  * mode so we get the right data. --rmk
804                  */
805                 map_write(map, CMD(0x70), adr);
806                 chip->oldstate = FL_ERASING;
807                 chip->state = FL_ERASE_SUSPENDING;
808                 chip->erase_suspended = 1;
809                 for (;;) {
810                         status = map_read(map, adr);
811                         if (map_word_andequal(map, status, status_OK, status_OK))
812                                 break;
813
814                         if (time_after(jiffies, timeo)) {
815                                 /* Urgh. Resume and pretend we weren't here.  */
816                                 map_write(map, CMD(0xd0), adr);
817                                 /* Make sure we're in 'read status' mode if it had finished */
818                                 map_write(map, CMD(0x70), adr);
819                                 chip->state = FL_ERASING;
820                                 chip->oldstate = FL_READY;
821                                 printk(KERN_ERR "%s: Chip not ready after erase "
822                                        "suspended: status = 0x%lx\n", map->name, status.x[0]);
823                                 return -EIO;
824                         }
825
826                         mutex_unlock(&chip->mutex);
827                         cfi_udelay(1);
828                         mutex_lock(&chip->mutex);
829                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
830                            So we can just loop here. */
831                 }
832                 chip->state = FL_STATUS;
833                 return 0;
834
835         case FL_XIP_WHILE_ERASING:
836                 if (mode != FL_READY && mode != FL_POINT &&
837                     (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
838                         goto sleep;
839                 chip->oldstate = chip->state;
840                 chip->state = FL_READY;
841                 return 0;
842
843         case FL_SHUTDOWN:
844                 /* The machine is rebooting now,so no one can get chip anymore */
845                 return -EIO;
846         case FL_POINT:
847                 /* Only if there's no operation suspended... */
848                 if (mode == FL_READY && chip->oldstate == FL_READY)
849                         return 0;
850                 /* Fall through */
851         default:
852         sleep:
853                 set_current_state(TASK_UNINTERRUPTIBLE);
854                 add_wait_queue(&chip->wq, &wait);
855                 mutex_unlock(&chip->mutex);
856                 schedule();
857                 remove_wait_queue(&chip->wq, &wait);
858                 mutex_lock(&chip->mutex);
859                 return -EAGAIN;
860         }
861 }
862
863 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
864 {
865         int ret;
866         DECLARE_WAITQUEUE(wait, current);
867
868  retry:
869         if (chip->priv &&
870             (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
871             || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
872                 /*
873                  * OK. We have possibility for contention on the write/erase
874                  * operations which are global to the real chip and not per
875                  * partition.  So let's fight it over in the partition which
876                  * currently has authority on the operation.
877                  *
878                  * The rules are as follows:
879                  *
880                  * - any write operation must own shared->writing.
881                  *
882                  * - any erase operation must own _both_ shared->writing and
883                  *   shared->erasing.
884                  *
885                  * - contention arbitration is handled in the owner's context.
886                  *
887                  * The 'shared' struct can be read and/or written only when
888                  * its lock is taken.
889                  */
890                 struct flchip_shared *shared = chip->priv;
891                 struct flchip *contender;
892                 mutex_lock(&shared->lock);
893                 contender = shared->writing;
894                 if (contender && contender != chip) {
895                         /*
896                          * The engine to perform desired operation on this
897                          * partition is already in use by someone else.
898                          * Let's fight over it in the context of the chip
899                          * currently using it.  If it is possible to suspend,
900                          * that other partition will do just that, otherwise
901                          * it'll happily send us to sleep.  In any case, when
902                          * get_chip returns success we're clear to go ahead.
903                          */
904                         ret = mutex_trylock(&contender->mutex);
905                         mutex_unlock(&shared->lock);
906                         if (!ret)
907                                 goto retry;
908                         mutex_unlock(&chip->mutex);
909                         ret = chip_ready(map, contender, contender->start, mode);
910                         mutex_lock(&chip->mutex);
911
912                         if (ret == -EAGAIN) {
913                                 mutex_unlock(&contender->mutex);
914                                 goto retry;
915                         }
916                         if (ret) {
917                                 mutex_unlock(&contender->mutex);
918                                 return ret;
919                         }
920                         mutex_lock(&shared->lock);
921
922                         /* We should not own chip if it is already
923                          * in FL_SYNCING state. Put contender and retry. */
924                         if (chip->state == FL_SYNCING) {
925                                 put_chip(map, contender, contender->start);
926                                 mutex_unlock(&contender->mutex);
927                                 goto retry;
928                         }
929                         mutex_unlock(&contender->mutex);
930                 }
931
932                 /* Check if we already have suspended erase
933                  * on this chip. Sleep. */
934                 if (mode == FL_ERASING && shared->erasing
935                     && shared->erasing->oldstate == FL_ERASING) {
936                         mutex_unlock(&shared->lock);
937                         set_current_state(TASK_UNINTERRUPTIBLE);
938                         add_wait_queue(&chip->wq, &wait);
939                         mutex_unlock(&chip->mutex);
940                         schedule();
941                         remove_wait_queue(&chip->wq, &wait);
942                         mutex_lock(&chip->mutex);
943                         goto retry;
944                 }
945
946                 /* We now own it */
947                 shared->writing = chip;
948                 if (mode == FL_ERASING)
949                         shared->erasing = chip;
950                 mutex_unlock(&shared->lock);
951         }
952         ret = chip_ready(map, chip, adr, mode);
953         if (ret == -EAGAIN)
954                 goto retry;
955
956         return ret;
957 }
958
959 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
960 {
961         struct cfi_private *cfi = map->fldrv_priv;
962
963         if (chip->priv) {
964                 struct flchip_shared *shared = chip->priv;
965                 mutex_lock(&shared->lock);
966                 if (shared->writing == chip && chip->oldstate == FL_READY) {
967                         /* We own the ability to write, but we're done */
968                         shared->writing = shared->erasing;
969                         if (shared->writing && shared->writing != chip) {
970                                 /* give back ownership to who we loaned it from */
971                                 struct flchip *loaner = shared->writing;
972                                 mutex_lock(&loaner->mutex);
973                                 mutex_unlock(&shared->lock);
974                                 mutex_unlock(&chip->mutex);
975                                 put_chip(map, loaner, loaner->start);
976                                 mutex_lock(&chip->mutex);
977                                 mutex_unlock(&loaner->mutex);
978                                 wake_up(&chip->wq);
979                                 return;
980                         }
981                         shared->erasing = NULL;
982                         shared->writing = NULL;
983                 } else if (shared->erasing == chip && shared->writing != chip) {
984                         /*
985                          * We own the ability to erase without the ability
986                          * to write, which means the erase was suspended
987                          * and some other partition is currently writing.
988                          * Don't let the switch below mess things up since
989                          * we don't have ownership to resume anything.
990                          */
991                         mutex_unlock(&shared->lock);
992                         wake_up(&chip->wq);
993                         return;
994                 }
995                 mutex_unlock(&shared->lock);
996         }
997
998         switch(chip->oldstate) {
999         case FL_ERASING:
1000                 chip->state = chip->oldstate;
1001                 /* What if one interleaved chip has finished and the
1002                    other hasn't? The old code would leave the finished
1003                    one in READY mode. That's bad, and caused -EROFS
1004                    errors to be returned from do_erase_oneblock because
1005                    that's the only bit it checked for at the time.
1006                    As the state machine appears to explicitly allow
1007                    sending the 0x70 (Read Status) command to an erasing
1008                    chip and expecting it to be ignored, that's what we
1009                    do. */
1010                 map_write(map, CMD(0xd0), adr);
1011                 map_write(map, CMD(0x70), adr);
1012                 chip->oldstate = FL_READY;
1013                 chip->state = FL_ERASING;
1014                 break;
1015
1016         case FL_XIP_WHILE_ERASING:
1017                 chip->state = chip->oldstate;
1018                 chip->oldstate = FL_READY;
1019                 break;
1020
1021         case FL_READY:
1022         case FL_STATUS:
1023         case FL_JEDEC_QUERY:
1024                 /* We should really make set_vpp() count, rather than doing this */
1025                 DISABLE_VPP(map);
1026                 break;
1027         default:
1028                 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1029         }
1030         wake_up(&chip->wq);
1031 }
1032
1033 #ifdef CONFIG_MTD_XIP
1034
1035 /*
1036  * No interrupt what so ever can be serviced while the flash isn't in array
1037  * mode.  This is ensured by the xip_disable() and xip_enable() functions
1038  * enclosing any code path where the flash is known not to be in array mode.
1039  * And within a XIP disabled code path, only functions marked with __xipram
1040  * may be called and nothing else (it's a good thing to inspect generated
1041  * assembly to make sure inline functions were actually inlined and that gcc
1042  * didn't emit calls to its own support functions). Also configuring MTD CFI
1043  * support to a single buswidth and a single interleave is also recommended.
1044  */
1045
1046 static void xip_disable(struct map_info *map, struct flchip *chip,
1047                         unsigned long adr)
1048 {
1049         /* TODO: chips with no XIP use should ignore and return */
1050         (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1051         local_irq_disable();
1052 }
1053
1054 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1055                                 unsigned long adr)
1056 {
1057         struct cfi_private *cfi = map->fldrv_priv;
1058         if (chip->state != FL_POINT && chip->state != FL_READY) {
1059                 map_write(map, CMD(0xff), adr);
1060                 chip->state = FL_READY;
1061         }
1062         (void) map_read(map, adr);
1063         xip_iprefetch();
1064         local_irq_enable();
1065 }
1066
1067 /*
1068  * When a delay is required for the flash operation to complete, the
1069  * xip_wait_for_operation() function is polling for both the given timeout
1070  * and pending (but still masked) hardware interrupts.  Whenever there is an
1071  * interrupt pending then the flash erase or write operation is suspended,
1072  * array mode restored and interrupts unmasked.  Task scheduling might also
1073  * happen at that point.  The CPU eventually returns from the interrupt or
1074  * the call to schedule() and the suspended flash operation is resumed for
1075  * the remaining of the delay period.
1076  *
1077  * Warning: this function _will_ fool interrupt latency tracing tools.
1078  */
1079
1080 static int __xipram xip_wait_for_operation(
1081                 struct map_info *map, struct flchip *chip,
1082                 unsigned long adr, unsigned int chip_op_time_max)
1083 {
1084         struct cfi_private *cfi = map->fldrv_priv;
1085         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1086         map_word status, OK = CMD(0x80);
1087         unsigned long usec, suspended, start, done;
1088         flstate_t oldstate, newstate;
1089
1090         start = xip_currtime();
1091         usec = chip_op_time_max;
1092         if (usec == 0)
1093                 usec = 500000;
1094         done = 0;
1095
1096         do {
1097                 cpu_relax();
1098                 if (xip_irqpending() && cfip &&
1099                     ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1100                      (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1101                     (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1102                         /*
1103                          * Let's suspend the erase or write operation when
1104                          * supported.  Note that we currently don't try to
1105                          * suspend interleaved chips if there is already
1106                          * another operation suspended (imagine what happens
1107                          * when one chip was already done with the current
1108                          * operation while another chip suspended it, then
1109                          * we resume the whole thing at once).  Yes, it
1110                          * can happen!
1111                          */
1112                         usec -= done;
1113                         map_write(map, CMD(0xb0), adr);
1114                         map_write(map, CMD(0x70), adr);
1115                         suspended = xip_currtime();
1116                         do {
1117                                 if (xip_elapsed_since(suspended) > 100000) {
1118                                         /*
1119                                          * The chip doesn't want to suspend
1120                                          * after waiting for 100 msecs.
1121                                          * This is a critical error but there
1122                                          * is not much we can do here.
1123                                          */
1124                                         return -EIO;
1125                                 }
1126                                 status = map_read(map, adr);
1127                         } while (!map_word_andequal(map, status, OK, OK));
1128
1129                         /* Suspend succeeded */
1130                         oldstate = chip->state;
1131                         if (oldstate == FL_ERASING) {
1132                                 if (!map_word_bitsset(map, status, CMD(0x40)))
1133                                         break;
1134                                 newstate = FL_XIP_WHILE_ERASING;
1135                                 chip->erase_suspended = 1;
1136                         } else {
1137                                 if (!map_word_bitsset(map, status, CMD(0x04)))
1138                                         break;
1139                                 newstate = FL_XIP_WHILE_WRITING;
1140                                 chip->write_suspended = 1;
1141                         }
1142                         chip->state = newstate;
1143                         map_write(map, CMD(0xff), adr);
1144                         (void) map_read(map, adr);
1145                         xip_iprefetch();
1146                         local_irq_enable();
1147                         mutex_unlock(&chip->mutex);
1148                         xip_iprefetch();
1149                         cond_resched();
1150
1151                         /*
1152                          * We're back.  However someone else might have
1153                          * decided to go write to the chip if we are in
1154                          * a suspended erase state.  If so let's wait
1155                          * until it's done.
1156                          */
1157                         mutex_lock(&chip->mutex);
1158                         while (chip->state != newstate) {
1159                                 DECLARE_WAITQUEUE(wait, current);
1160                                 set_current_state(TASK_UNINTERRUPTIBLE);
1161                                 add_wait_queue(&chip->wq, &wait);
1162                                 mutex_unlock(&chip->mutex);
1163                                 schedule();
1164                                 remove_wait_queue(&chip->wq, &wait);
1165                                 mutex_lock(&chip->mutex);
1166                         }
1167                         /* Disallow XIP again */
1168                         local_irq_disable();
1169
1170                         /* Resume the write or erase operation */
1171                         map_write(map, CMD(0xd0), adr);
1172                         map_write(map, CMD(0x70), adr);
1173                         chip->state = oldstate;
1174                         start = xip_currtime();
1175                 } else if (usec >= 1000000/HZ) {
1176                         /*
1177                          * Try to save on CPU power when waiting delay
1178                          * is at least a system timer tick period.
1179                          * No need to be extremely accurate here.
1180                          */
1181                         xip_cpu_idle();
1182                 }
1183                 status = map_read(map, adr);
1184                 done = xip_elapsed_since(start);
1185         } while (!map_word_andequal(map, status, OK, OK)
1186                  && done < usec);
1187
1188         return (done >= usec) ? -ETIME : 0;
1189 }
1190
1191 /*
1192  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1193  * the flash is actively programming or erasing since we have to poll for
1194  * the operation to complete anyway.  We can't do that in a generic way with
1195  * a XIP setup so do it before the actual flash operation in this case
1196  * and stub it out from INVAL_CACHE_AND_WAIT.
1197  */
1198 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1199         INVALIDATE_CACHED_RANGE(map, from, size)
1200
1201 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1202         xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1203
1204 #else
1205
1206 #define xip_disable(map, chip, adr)
1207 #define xip_enable(map, chip, adr)
1208 #define XIP_INVAL_CACHED_RANGE(x...)
1209 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1210
1211 static int inval_cache_and_wait_for_operation(
1212                 struct map_info *map, struct flchip *chip,
1213                 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1214                 unsigned int chip_op_time, unsigned int chip_op_time_max)
1215 {
1216         struct cfi_private *cfi = map->fldrv_priv;
1217         map_word status, status_OK = CMD(0x80);
1218         int chip_state = chip->state;
1219         unsigned int timeo, sleep_time, reset_timeo;
1220
1221         mutex_unlock(&chip->mutex);
1222         if (inval_len)
1223                 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1224         mutex_lock(&chip->mutex);
1225
1226         timeo = chip_op_time_max;
1227         if (!timeo)
1228                 timeo = 500000;
1229         reset_timeo = timeo;
1230         sleep_time = chip_op_time / 2;
1231
1232         for (;;) {
1233                 status = map_read(map, cmd_adr);
1234                 if (map_word_andequal(map, status, status_OK, status_OK))
1235                         break;
1236
1237                 if (!timeo) {
1238                         map_write(map, CMD(0x70), cmd_adr);
1239                         chip->state = FL_STATUS;
1240                         return -ETIME;
1241                 }
1242
1243                 /* OK Still waiting. Drop the lock, wait a while and retry. */
1244                 mutex_unlock(&chip->mutex);
1245                 if (sleep_time >= 1000000/HZ) {
1246                         /*
1247                          * Half of the normal delay still remaining
1248                          * can be performed with a sleeping delay instead
1249                          * of busy waiting.
1250                          */
1251                         msleep(sleep_time/1000);
1252                         timeo -= sleep_time;
1253                         sleep_time = 1000000/HZ;
1254                 } else {
1255                         udelay(1);
1256                         cond_resched();
1257                         timeo--;
1258                 }
1259                 mutex_lock(&chip->mutex);
1260
1261                 while (chip->state != chip_state) {
1262                         /* Someone's suspended the operation: sleep */
1263                         DECLARE_WAITQUEUE(wait, current);
1264                         set_current_state(TASK_UNINTERRUPTIBLE);
1265                         add_wait_queue(&chip->wq, &wait);
1266                         mutex_unlock(&chip->mutex);
1267                         schedule();
1268                         remove_wait_queue(&chip->wq, &wait);
1269                         mutex_lock(&chip->mutex);
1270                 }
1271                 if (chip->erase_suspended && chip_state == FL_ERASING)  {
1272                         /* Erase suspend occured while sleep: reset timeout */
1273                         timeo = reset_timeo;
1274                         chip->erase_suspended = 0;
1275                 }
1276                 if (chip->write_suspended && chip_state == FL_WRITING)  {
1277                         /* Write suspend occured while sleep: reset timeout */
1278                         timeo = reset_timeo;
1279                         chip->write_suspended = 0;
1280                 }
1281         }
1282
1283         /* Done and happy. */
1284         chip->state = FL_STATUS;
1285         return 0;
1286 }
1287
1288 #endif
1289
1290 #define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1291         INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1292
1293
1294 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1295 {
1296         unsigned long cmd_addr;
1297         struct cfi_private *cfi = map->fldrv_priv;
1298         int ret = 0;
1299
1300         adr += chip->start;
1301
1302         /* Ensure cmd read/writes are aligned. */
1303         cmd_addr = adr & ~(map_bankwidth(map)-1);
1304
1305         mutex_lock(&chip->mutex);
1306
1307         ret = get_chip(map, chip, cmd_addr, FL_POINT);
1308
1309         if (!ret) {
1310                 if (chip->state != FL_POINT && chip->state != FL_READY)
1311                         map_write(map, CMD(0xff), cmd_addr);
1312
1313                 chip->state = FL_POINT;
1314                 chip->ref_point_counter++;
1315         }
1316         mutex_unlock(&chip->mutex);
1317
1318         return ret;
1319 }
1320
1321 static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1322                 size_t *retlen, void **virt, resource_size_t *phys)
1323 {
1324         struct map_info *map = mtd->priv;
1325         struct cfi_private *cfi = map->fldrv_priv;
1326         unsigned long ofs, last_end = 0;
1327         int chipnum;
1328         int ret = 0;
1329
1330         if (!map->virt || (from + len > mtd->size))
1331                 return -EINVAL;
1332
1333         /* Now lock the chip(s) to POINT state */
1334
1335         /* ofs: offset within the first chip that the first read should start */
1336         chipnum = (from >> cfi->chipshift);
1337         ofs = from - (chipnum << cfi->chipshift);
1338
1339         *virt = map->virt + cfi->chips[chipnum].start + ofs;
1340         *retlen = 0;
1341         if (phys)
1342                 *phys = map->phys + cfi->chips[chipnum].start + ofs;
1343
1344         while (len) {
1345                 unsigned long thislen;
1346
1347                 if (chipnum >= cfi->numchips)
1348                         break;
1349
1350                 /* We cannot point across chips that are virtually disjoint */
1351                 if (!last_end)
1352                         last_end = cfi->chips[chipnum].start;
1353                 else if (cfi->chips[chipnum].start != last_end)
1354                         break;
1355
1356                 if ((len + ofs -1) >> cfi->chipshift)
1357                         thislen = (1<<cfi->chipshift) - ofs;
1358                 else
1359                         thislen = len;
1360
1361                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1362                 if (ret)
1363                         break;
1364
1365                 *retlen += thislen;
1366                 len -= thislen;
1367
1368                 ofs = 0;
1369                 last_end += 1 << cfi->chipshift;
1370                 chipnum++;
1371         }
1372         return 0;
1373 }
1374
1375 static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1376 {
1377         struct map_info *map = mtd->priv;
1378         struct cfi_private *cfi = map->fldrv_priv;
1379         unsigned long ofs;
1380         int chipnum;
1381
1382         /* Now unlock the chip(s) POINT state */
1383
1384         /* ofs: offset within the first chip that the first read should start */
1385         chipnum = (from >> cfi->chipshift);
1386         ofs = from - (chipnum <<  cfi->chipshift);
1387
1388         while (len) {
1389                 unsigned long thislen;
1390                 struct flchip *chip;
1391
1392                 chip = &cfi->chips[chipnum];
1393                 if (chipnum >= cfi->numchips)
1394                         break;
1395
1396                 if ((len + ofs -1) >> cfi->chipshift)
1397                         thislen = (1<<cfi->chipshift) - ofs;
1398                 else
1399                         thislen = len;
1400
1401                 mutex_lock(&chip->mutex);
1402                 if (chip->state == FL_POINT) {
1403                         chip->ref_point_counter--;
1404                         if(chip->ref_point_counter == 0)
1405                                 chip->state = FL_READY;
1406                 } else
1407                         printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1408
1409                 put_chip(map, chip, chip->start);
1410                 mutex_unlock(&chip->mutex);
1411
1412                 len -= thislen;
1413                 ofs = 0;
1414                 chipnum++;
1415         }
1416 }
1417
1418 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1419 {
1420         unsigned long cmd_addr;
1421         struct cfi_private *cfi = map->fldrv_priv;
1422         int ret;
1423
1424         adr += chip->start;
1425
1426         /* Ensure cmd read/writes are aligned. */
1427         cmd_addr = adr & ~(map_bankwidth(map)-1);
1428
1429         mutex_lock(&chip->mutex);
1430         ret = get_chip(map, chip, cmd_addr, FL_READY);
1431         if (ret) {
1432                 mutex_unlock(&chip->mutex);
1433                 return ret;
1434         }
1435
1436         if (chip->state != FL_POINT && chip->state != FL_READY) {
1437                 map_write(map, CMD(0xff), cmd_addr);
1438
1439                 chip->state = FL_READY;
1440         }
1441
1442         map_copy_from(map, buf, adr, len);
1443
1444         put_chip(map, chip, cmd_addr);
1445
1446         mutex_unlock(&chip->mutex);
1447         return 0;
1448 }
1449
1450 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1451 {
1452         struct map_info *map = mtd->priv;
1453         struct cfi_private *cfi = map->fldrv_priv;
1454         unsigned long ofs;
1455         int chipnum;
1456         int ret = 0;
1457
1458         /* ofs: offset within the first chip that the first read should start */
1459         chipnum = (from >> cfi->chipshift);
1460         ofs = from - (chipnum <<  cfi->chipshift);
1461
1462         *retlen = 0;
1463
1464         while (len) {
1465                 unsigned long thislen;
1466
1467                 if (chipnum >= cfi->numchips)
1468                         break;
1469
1470                 if ((len + ofs -1) >> cfi->chipshift)
1471                         thislen = (1<<cfi->chipshift) - ofs;
1472                 else
1473                         thislen = len;
1474
1475                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1476                 if (ret)
1477                         break;
1478
1479                 *retlen += thislen;
1480                 len -= thislen;
1481                 buf += thislen;
1482
1483                 ofs = 0;
1484                 chipnum++;
1485         }
1486         return ret;
1487 }
1488
1489 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1490                                      unsigned long adr, map_word datum, int mode)
1491 {
1492         struct cfi_private *cfi = map->fldrv_priv;
1493         map_word status, write_cmd;
1494         int ret=0;
1495
1496         adr += chip->start;
1497
1498         switch (mode) {
1499         case FL_WRITING:
1500                 write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
1501                 break;
1502         case FL_OTP_WRITE:
1503                 write_cmd = CMD(0xc0);
1504                 break;
1505         default:
1506                 return -EINVAL;
1507         }
1508
1509         mutex_lock(&chip->mutex);
1510         ret = get_chip(map, chip, adr, mode);
1511         if (ret) {
1512                 mutex_unlock(&chip->mutex);
1513                 return ret;
1514         }
1515
1516         XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1517         ENABLE_VPP(map);
1518         xip_disable(map, chip, adr);
1519         map_write(map, write_cmd, adr);
1520         map_write(map, datum, adr);
1521         chip->state = mode;
1522
1523         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1524                                    adr, map_bankwidth(map),
1525                                    chip->word_write_time,
1526                                    chip->word_write_time_max);
1527         if (ret) {
1528                 xip_enable(map, chip, adr);
1529                 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1530                 goto out;
1531         }
1532
1533         /* check for errors */
1534         status = map_read(map, adr);
1535         if (map_word_bitsset(map, status, CMD(0x1a))) {
1536                 unsigned long chipstatus = MERGESTATUS(status);
1537
1538                 /* reset status */
1539                 map_write(map, CMD(0x50), adr);
1540                 map_write(map, CMD(0x70), adr);
1541                 xip_enable(map, chip, adr);
1542
1543                 if (chipstatus & 0x02) {
1544                         ret = -EROFS;
1545                 } else if (chipstatus & 0x08) {
1546                         printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1547                         ret = -EIO;
1548                 } else {
1549                         printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1550                         ret = -EINVAL;
1551                 }
1552
1553                 goto out;
1554         }
1555
1556         xip_enable(map, chip, adr);
1557  out:   put_chip(map, chip, adr);
1558         mutex_unlock(&chip->mutex);
1559         return ret;
1560 }
1561
1562
1563 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1564 {
1565         struct map_info *map = mtd->priv;
1566         struct cfi_private *cfi = map->fldrv_priv;
1567         int ret = 0;
1568         int chipnum;
1569         unsigned long ofs;
1570
1571         *retlen = 0;
1572         if (!len)
1573                 return 0;
1574
1575         chipnum = to >> cfi->chipshift;
1576         ofs = to  - (chipnum << cfi->chipshift);
1577
1578         /* If it's not bus-aligned, do the first byte write */
1579         if (ofs & (map_bankwidth(map)-1)) {
1580                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1581                 int gap = ofs - bus_ofs;
1582                 int n;
1583                 map_word datum;
1584
1585                 n = min_t(int, len, map_bankwidth(map)-gap);
1586                 datum = map_word_ff(map);
1587                 datum = map_word_load_partial(map, datum, buf, gap, n);
1588
1589                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1590                                                bus_ofs, datum, FL_WRITING);
1591                 if (ret)
1592                         return ret;
1593
1594                 len -= n;
1595                 ofs += n;
1596                 buf += n;
1597                 (*retlen) += n;
1598
1599                 if (ofs >> cfi->chipshift) {
1600                         chipnum ++;
1601                         ofs = 0;
1602                         if (chipnum == cfi->numchips)
1603                                 return 0;
1604                 }
1605         }
1606
1607         while(len >= map_bankwidth(map)) {
1608                 map_word datum = map_word_load(map, buf);
1609
1610                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1611                                        ofs, datum, FL_WRITING);
1612                 if (ret)
1613                         return ret;
1614
1615                 ofs += map_bankwidth(map);
1616                 buf += map_bankwidth(map);
1617                 (*retlen) += map_bankwidth(map);
1618                 len -= map_bankwidth(map);
1619
1620                 if (ofs >> cfi->chipshift) {
1621                         chipnum ++;
1622                         ofs = 0;
1623                         if (chipnum == cfi->numchips)
1624                                 return 0;
1625                 }
1626         }
1627
1628         if (len & (map_bankwidth(map)-1)) {
1629                 map_word datum;
1630
1631                 datum = map_word_ff(map);
1632                 datum = map_word_load_partial(map, datum, buf, 0, len);
1633
1634                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1635                                        ofs, datum, FL_WRITING);
1636                 if (ret)
1637                         return ret;
1638
1639                 (*retlen) += len;
1640         }
1641
1642         return 0;
1643 }
1644
1645
1646 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1647                                     unsigned long adr, const struct kvec **pvec,
1648                                     unsigned long *pvec_seek, int len)
1649 {
1650         struct cfi_private *cfi = map->fldrv_priv;
1651         map_word status, write_cmd, datum;
1652         unsigned long cmd_adr;
1653         int ret, wbufsize, word_gap, words;
1654         const struct kvec *vec;
1655         unsigned long vec_seek;
1656         unsigned long initial_adr;
1657         int initial_len = len;
1658
1659         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1660         adr += chip->start;
1661         initial_adr = adr;
1662         cmd_adr = adr & ~(wbufsize-1);
1663
1664         /* Let's determine this according to the interleave only once */
1665         write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
1666
1667         mutex_lock(&chip->mutex);
1668         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1669         if (ret) {
1670                 mutex_unlock(&chip->mutex);
1671                 return ret;
1672         }
1673
1674         XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1675         ENABLE_VPP(map);
1676         xip_disable(map, chip, cmd_adr);
1677
1678         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1679            [...], the device will not accept any more Write to Buffer commands".
1680            So we must check here and reset those bits if they're set. Otherwise
1681            we're just pissing in the wind */
1682         if (chip->state != FL_STATUS) {
1683                 map_write(map, CMD(0x70), cmd_adr);
1684                 chip->state = FL_STATUS;
1685         }
1686         status = map_read(map, cmd_adr);
1687         if (map_word_bitsset(map, status, CMD(0x30))) {
1688                 xip_enable(map, chip, cmd_adr);
1689                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1690                 xip_disable(map, chip, cmd_adr);
1691                 map_write(map, CMD(0x50), cmd_adr);
1692                 map_write(map, CMD(0x70), cmd_adr);
1693         }
1694
1695         chip->state = FL_WRITING_TO_BUFFER;
1696         map_write(map, write_cmd, cmd_adr);
1697         ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1698         if (ret) {
1699                 /* Argh. Not ready for write to buffer */
1700                 map_word Xstatus = map_read(map, cmd_adr);
1701                 map_write(map, CMD(0x70), cmd_adr);
1702                 chip->state = FL_STATUS;
1703                 status = map_read(map, cmd_adr);
1704                 map_write(map, CMD(0x50), cmd_adr);
1705                 map_write(map, CMD(0x70), cmd_adr);
1706                 xip_enable(map, chip, cmd_adr);
1707                 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1708                                 map->name, Xstatus.x[0], status.x[0]);
1709                 goto out;
1710         }
1711
1712         /* Figure out the number of words to write */
1713         word_gap = (-adr & (map_bankwidth(map)-1));
1714         words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1715         if (!word_gap) {
1716                 words--;
1717         } else {
1718                 word_gap = map_bankwidth(map) - word_gap;
1719                 adr -= word_gap;
1720                 datum = map_word_ff(map);
1721         }
1722
1723         /* Write length of data to come */
1724         map_write(map, CMD(words), cmd_adr );
1725
1726         /* Write data */
1727         vec = *pvec;
1728         vec_seek = *pvec_seek;
1729         do {
1730                 int n = map_bankwidth(map) - word_gap;
1731                 if (n > vec->iov_len - vec_seek)
1732                         n = vec->iov_len - vec_seek;
1733                 if (n > len)
1734                         n = len;
1735
1736                 if (!word_gap && len < map_bankwidth(map))
1737                         datum = map_word_ff(map);
1738
1739                 datum = map_word_load_partial(map, datum,
1740                                               vec->iov_base + vec_seek,
1741                                               word_gap, n);
1742
1743                 len -= n;
1744                 word_gap += n;
1745                 if (!len || word_gap == map_bankwidth(map)) {
1746                         map_write(map, datum, adr);
1747                         adr += map_bankwidth(map);
1748                         word_gap = 0;
1749                 }
1750
1751                 vec_seek += n;
1752                 if (vec_seek == vec->iov_len) {
1753                         vec++;
1754                         vec_seek = 0;
1755                 }
1756         } while (len);
1757         *pvec = vec;
1758         *pvec_seek = vec_seek;
1759
1760         /* GO GO GO */
1761         map_write(map, CMD(0xd0), cmd_adr);
1762         chip->state = FL_WRITING;
1763
1764         ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1765                                    initial_adr, initial_len,
1766                                    chip->buffer_write_time,
1767                                    chip->buffer_write_time_max);
1768         if (ret) {
1769                 map_write(map, CMD(0x70), cmd_adr);
1770                 chip->state = FL_STATUS;
1771                 xip_enable(map, chip, cmd_adr);
1772                 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1773                 goto out;
1774         }
1775
1776         /* check for errors */
1777         status = map_read(map, cmd_adr);
1778         if (map_word_bitsset(map, status, CMD(0x1a))) {
1779                 unsigned long chipstatus = MERGESTATUS(status);
1780
1781                 /* reset status */
1782                 map_write(map, CMD(0x50), cmd_adr);
1783                 map_write(map, CMD(0x70), cmd_adr);
1784                 xip_enable(map, chip, cmd_adr);
1785
1786                 if (chipstatus & 0x02) {
1787                         ret = -EROFS;
1788                 } else if (chipstatus & 0x08) {
1789                         printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1790                         ret = -EIO;
1791                 } else {
1792                         printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1793                         ret = -EINVAL;
1794                 }
1795
1796                 goto out;
1797         }
1798
1799         xip_enable(map, chip, cmd_adr);
1800  out:   put_chip(map, chip, cmd_adr);
1801         mutex_unlock(&chip->mutex);
1802         return ret;
1803 }
1804
1805 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1806                                 unsigned long count, loff_t to, size_t *retlen)
1807 {
1808         struct map_info *map = mtd->priv;
1809         struct cfi_private *cfi = map->fldrv_priv;
1810         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1811         int ret = 0;
1812         int chipnum;
1813         unsigned long ofs, vec_seek, i;
1814         size_t len = 0;
1815
1816         for (i = 0; i < count; i++)
1817                 len += vecs[i].iov_len;
1818
1819         *retlen = 0;
1820         if (!len)
1821                 return 0;
1822
1823         chipnum = to >> cfi->chipshift;
1824         ofs = to - (chipnum << cfi->chipshift);
1825         vec_seek = 0;
1826
1827         do {
1828                 /* We must not cross write block boundaries */
1829                 int size = wbufsize - (ofs & (wbufsize-1));
1830
1831                 if (size > len)
1832                         size = len;
1833                 ret = do_write_buffer(map, &cfi->chips[chipnum],
1834                                       ofs, &vecs, &vec_seek, size);
1835                 if (ret)
1836                         return ret;
1837
1838                 ofs += size;
1839                 (*retlen) += size;
1840                 len -= size;
1841
1842                 if (ofs >> cfi->chipshift) {
1843                         chipnum ++;
1844                         ofs = 0;
1845                         if (chipnum == cfi->numchips)
1846                                 return 0;
1847                 }
1848
1849                 /* Be nice and reschedule with the chip in a usable state for other
1850                    processes. */
1851                 cond_resched();
1852
1853         } while (len);
1854
1855         return 0;
1856 }
1857
1858 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1859                                        size_t len, size_t *retlen, const u_char *buf)
1860 {
1861         struct kvec vec;
1862
1863         vec.iov_base = (void *) buf;
1864         vec.iov_len = len;
1865
1866         return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1867 }
1868
1869 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1870                                       unsigned long adr, int len, void *thunk)
1871 {
1872         struct cfi_private *cfi = map->fldrv_priv;
1873         map_word status;
1874         int retries = 3;
1875         int ret;
1876
1877         adr += chip->start;
1878
1879  retry:
1880         mutex_lock(&chip->mutex);
1881         ret = get_chip(map, chip, adr, FL_ERASING);
1882         if (ret) {
1883                 mutex_unlock(&chip->mutex);
1884                 return ret;
1885         }
1886
1887         XIP_INVAL_CACHED_RANGE(map, adr, len);
1888         ENABLE_VPP(map);
1889         xip_disable(map, chip, adr);
1890
1891         /* Clear the status register first */
1892         map_write(map, CMD(0x50), adr);
1893
1894         /* Now erase */
1895         map_write(map, CMD(0x20), adr);
1896         map_write(map, CMD(0xD0), adr);
1897         chip->state = FL_ERASING;
1898         chip->erase_suspended = 0;
1899
1900         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1901                                    adr, len,
1902                                    chip->erase_time,
1903                                    chip->erase_time_max);
1904         if (ret) {
1905                 map_write(map, CMD(0x70), adr);
1906                 chip->state = FL_STATUS;
1907                 xip_enable(map, chip, adr);
1908                 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1909                 goto out;
1910         }
1911
1912         /* We've broken this before. It doesn't hurt to be safe */
1913         map_write(map, CMD(0x70), adr);
1914         chip->state = FL_STATUS;
1915         status = map_read(map, adr);
1916
1917         /* check for errors */
1918         if (map_word_bitsset(map, status, CMD(0x3a))) {
1919                 unsigned long chipstatus = MERGESTATUS(status);
1920
1921                 /* Reset the error bits */
1922                 map_write(map, CMD(0x50), adr);
1923                 map_write(map, CMD(0x70), adr);
1924                 xip_enable(map, chip, adr);
1925
1926                 if ((chipstatus & 0x30) == 0x30) {
1927                         printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1928                         ret = -EINVAL;
1929                 } else if (chipstatus & 0x02) {
1930                         /* Protection bit set */
1931                         ret = -EROFS;
1932                 } else if (chipstatus & 0x8) {
1933                         /* Voltage */
1934                         printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1935                         ret = -EIO;
1936                 } else if (chipstatus & 0x20 && retries--) {
1937                         printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1938                         put_chip(map, chip, adr);
1939                         mutex_unlock(&chip->mutex);
1940                         goto retry;
1941                 } else {
1942                         printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1943                         ret = -EIO;
1944                 }
1945
1946                 goto out;
1947         }
1948
1949         xip_enable(map, chip, adr);
1950  out:   put_chip(map, chip, adr);
1951         mutex_unlock(&chip->mutex);
1952         return ret;
1953 }
1954
1955 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1956 {
1957         unsigned long ofs, len;
1958         int ret;
1959
1960         ofs = instr->addr;
1961         len = instr->len;
1962
1963         ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1964         if (ret)
1965                 return ret;
1966
1967         instr->state = MTD_ERASE_DONE;
1968         mtd_erase_callback(instr);
1969
1970         return 0;
1971 }
1972
1973 static void cfi_intelext_sync (struct mtd_info *mtd)
1974 {
1975         struct map_info *map = mtd->priv;
1976         struct cfi_private *cfi = map->fldrv_priv;
1977         int i;
1978         struct flchip *chip;
1979         int ret = 0;
1980
1981         for (i=0; !ret && i<cfi->numchips; i++) {
1982                 chip = &cfi->chips[i];
1983
1984                 mutex_lock(&chip->mutex);
1985                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1986
1987                 if (!ret) {
1988                         chip->oldstate = chip->state;
1989                         chip->state = FL_SYNCING;
1990                         /* No need to wake_up() on this state change -
1991                          * as the whole point is that nobody can do anything
1992                          * with the chip now anyway.
1993                          */
1994                 }
1995                 mutex_unlock(&chip->mutex);
1996         }
1997
1998         /* Unlock the chips again */
1999
2000         for (i--; i >=0; i--) {
2001                 chip = &cfi->chips[i];
2002
2003                 mutex_lock(&chip->mutex);
2004
2005                 if (chip->state == FL_SYNCING) {
2006                         chip->state = chip->oldstate;
2007                         chip->oldstate = FL_READY;
2008                         wake_up(&chip->wq);
2009                 }
2010                 mutex_unlock(&chip->mutex);
2011         }
2012 }
2013
2014 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
2015                                                 struct flchip *chip,
2016                                                 unsigned long adr,
2017                                                 int len, void *thunk)
2018 {
2019         struct cfi_private *cfi = map->fldrv_priv;
2020         int status, ofs_factor = cfi->interleave * cfi->device_type;
2021
2022         adr += chip->start;
2023         xip_disable(map, chip, adr+(2*ofs_factor));
2024         map_write(map, CMD(0x90), adr+(2*ofs_factor));
2025         chip->state = FL_JEDEC_QUERY;
2026         status = cfi_read_query(map, adr+(2*ofs_factor));
2027         xip_enable(map, chip, 0);
2028         return status;
2029 }
2030
2031 #ifdef DEBUG_LOCK_BITS
2032 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
2033                                                 struct flchip *chip,
2034                                                 unsigned long adr,
2035                                                 int len, void *thunk)
2036 {
2037         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2038                adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2039         return 0;
2040 }
2041 #endif
2042
2043 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
2044 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
2045
2046 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2047                                        unsigned long adr, int len, void *thunk)
2048 {
2049         struct cfi_private *cfi = map->fldrv_priv;
2050         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2051         int udelay;
2052         int ret;
2053
2054         adr += chip->start;
2055
2056         mutex_lock(&chip->mutex);
2057         ret = get_chip(map, chip, adr, FL_LOCKING);
2058         if (ret) {
2059                 mutex_unlock(&chip->mutex);
2060                 return ret;
2061         }
2062
2063         ENABLE_VPP(map);
2064         xip_disable(map, chip, adr);
2065
2066         map_write(map, CMD(0x60), adr);
2067         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2068                 map_write(map, CMD(0x01), adr);
2069                 chip->state = FL_LOCKING;
2070         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2071                 map_write(map, CMD(0xD0), adr);
2072                 chip->state = FL_UNLOCKING;
2073         } else
2074                 BUG();
2075
2076         /*
2077          * If Instant Individual Block Locking supported then no need
2078          * to delay.
2079          */
2080         udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
2081
2082         ret = WAIT_TIMEOUT(map, chip, adr, udelay, udelay * 100);
2083         if (ret) {
2084                 map_write(map, CMD(0x70), adr);
2085                 chip->state = FL_STATUS;
2086                 xip_enable(map, chip, adr);
2087                 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2088                 goto out;
2089         }
2090
2091         xip_enable(map, chip, adr);
2092 out:    put_chip(map, chip, adr);
2093         mutex_unlock(&chip->mutex);
2094         return ret;
2095 }
2096
2097 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2098 {
2099         int ret;
2100
2101 #ifdef DEBUG_LOCK_BITS
2102         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2103                __func__, ofs, len);
2104         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2105                 ofs, len, NULL);
2106 #endif
2107
2108         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2109                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2110
2111 #ifdef DEBUG_LOCK_BITS
2112         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2113                __func__, ret);
2114         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2115                 ofs, len, NULL);
2116 #endif
2117
2118         return ret;
2119 }
2120
2121 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2122 {
2123         int ret;
2124
2125 #ifdef DEBUG_LOCK_BITS
2126         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2127                __func__, ofs, len);
2128         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2129                 ofs, len, NULL);
2130 #endif
2131
2132         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2133                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2134
2135 #ifdef DEBUG_LOCK_BITS
2136         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2137                __func__, ret);
2138         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2139                 ofs, len, NULL);
2140 #endif
2141
2142         return ret;
2143 }
2144
2145 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
2146                                   uint64_t len)
2147 {
2148         return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
2149                                 ofs, len, NULL) ? 1 : 0;
2150 }
2151
2152 #ifdef CONFIG_MTD_OTP
2153
2154 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2155                         u_long data_offset, u_char *buf, u_int size,
2156                         u_long prot_offset, u_int groupno, u_int groupsize);
2157
2158 static int __xipram
2159 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2160             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2161 {
2162         struct cfi_private *cfi = map->fldrv_priv;
2163         int ret;
2164
2165         mutex_lock(&chip->mutex);
2166         ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2167         if (ret) {
2168                 mutex_unlock(&chip->mutex);
2169                 return ret;
2170         }
2171
2172         /* let's ensure we're not reading back cached data from array mode */
2173         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2174
2175         xip_disable(map, chip, chip->start);
2176         if (chip->state != FL_JEDEC_QUERY) {
2177                 map_write(map, CMD(0x90), chip->start);
2178                 chip->state = FL_JEDEC_QUERY;
2179         }
2180         map_copy_from(map, buf, chip->start + offset, size);
2181         xip_enable(map, chip, chip->start);
2182
2183         /* then ensure we don't keep OTP data in the cache */
2184         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2185
2186         put_chip(map, chip, chip->start);
2187         mutex_unlock(&chip->mutex);
2188         return 0;
2189 }
2190
2191 static int
2192 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2193              u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2194 {
2195         int ret;
2196
2197         while (size) {
2198                 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2199                 int gap = offset - bus_ofs;
2200                 int n = min_t(int, size, map_bankwidth(map)-gap);
2201                 map_word datum = map_word_ff(map);
2202
2203                 datum = map_word_load_partial(map, datum, buf, gap, n);
2204                 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2205                 if (ret)
2206                         return ret;
2207
2208                 offset += n;
2209                 buf += n;
2210                 size -= n;
2211         }
2212
2213         return 0;
2214 }
2215
2216 static int
2217 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2218             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2219 {
2220         struct cfi_private *cfi = map->fldrv_priv;
2221         map_word datum;
2222
2223         /* make sure area matches group boundaries */
2224         if (size != grpsz)
2225                 return -EXDEV;
2226
2227         datum = map_word_ff(map);
2228         datum = map_word_clr(map, datum, CMD(1 << grpno));
2229         return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2230 }
2231
2232 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2233                                  size_t *retlen, u_char *buf,
2234                                  otp_op_t action, int user_regs)
2235 {
2236         struct map_info *map = mtd->priv;
2237         struct cfi_private *cfi = map->fldrv_priv;
2238         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2239         struct flchip *chip;
2240         struct cfi_intelext_otpinfo *otp;
2241         u_long devsize, reg_prot_offset, data_offset;
2242         u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2243         u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2244         int ret;
2245
2246         *retlen = 0;
2247
2248         /* Check that we actually have some OTP registers */
2249         if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2250                 return -ENODATA;
2251
2252         /* we need real chips here not virtual ones */
2253         devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2254         chip_step = devsize >> cfi->chipshift;
2255         chip_num = 0;
2256
2257         /* Some chips have OTP located in the _top_ partition only.
2258            For example: Intel 28F256L18T (T means top-parameter device) */
2259         if (cfi->mfr == CFI_MFR_INTEL) {
2260                 switch (cfi->id) {
2261                 case 0x880b:
2262                 case 0x880c:
2263                 case 0x880d:
2264                         chip_num = chip_step - 1;
2265                 }
2266         }
2267
2268         for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2269                 chip = &cfi->chips[chip_num];
2270                 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2271
2272                 /* first OTP region */
2273                 field = 0;
2274                 reg_prot_offset = extp->ProtRegAddr;
2275                 reg_fact_groups = 1;
2276                 reg_fact_size = 1 << extp->FactProtRegSize;
2277                 reg_user_groups = 1;
2278                 reg_user_size = 1 << extp->UserProtRegSize;
2279
2280                 while (len > 0) {
2281                         /* flash geometry fixup */
2282                         data_offset = reg_prot_offset + 1;
2283                         data_offset *= cfi->interleave * cfi->device_type;
2284                         reg_prot_offset *= cfi->interleave * cfi->device_type;
2285                         reg_fact_size *= cfi->interleave;
2286                         reg_user_size *= cfi->interleave;
2287
2288                         if (user_regs) {
2289                                 groups = reg_user_groups;
2290                                 groupsize = reg_user_size;
2291                                 /* skip over factory reg area */
2292                                 groupno = reg_fact_groups;
2293                                 data_offset += reg_fact_groups * reg_fact_size;
2294                         } else {
2295                                 groups = reg_fact_groups;
2296                                 groupsize = reg_fact_size;
2297                                 groupno = 0;
2298                         }
2299
2300                         while (len > 0 && groups > 0) {
2301                                 if (!action) {
2302                                         /*
2303                                          * Special case: if action is NULL
2304                                          * we fill buf with otp_info records.
2305                                          */
2306                                         struct otp_info *otpinfo;
2307                                         map_word lockword;
2308                                         len -= sizeof(struct otp_info);
2309                                         if (len <= 0)
2310                                                 return -ENOSPC;
2311                                         ret = do_otp_read(map, chip,
2312                                                           reg_prot_offset,
2313                                                           (u_char *)&lockword,
2314                                                           map_bankwidth(map),
2315                                                           0, 0,  0);
2316                                         if (ret)
2317                                                 return ret;
2318                                         otpinfo = (struct otp_info *)buf;
2319                                         otpinfo->start = from;
2320                                         otpinfo->length = groupsize;
2321                                         otpinfo->locked =
2322                                            !map_word_bitsset(map, lockword,
2323                                                              CMD(1 << groupno));
2324                                         from += groupsize;
2325                                         buf += sizeof(*otpinfo);
2326                                         *retlen += sizeof(*otpinfo);
2327                                 } else if (from >= groupsize) {
2328                                         from -= groupsize;
2329                                         data_offset += groupsize;
2330                                 } else {
2331                                         int size = groupsize;
2332                                         data_offset += from;
2333                                         size -= from;
2334                                         from = 0;
2335                                         if (size > len)
2336                                                 size = len;
2337                                         ret = action(map, chip, data_offset,
2338                                                      buf, size, reg_prot_offset,
2339                                                      groupno, groupsize);
2340                                         if (ret < 0)
2341                                                 return ret;
2342                                         buf += size;
2343                                         len -= size;
2344                                         *retlen += size;
2345                                         data_offset += size;
2346                                 }
2347                                 groupno++;
2348                                 groups--;
2349                         }
2350
2351                         /* next OTP region */
2352                         if (++field == extp->NumProtectionFields)
2353                                 break;
2354                         reg_prot_offset = otp->ProtRegAddr;
2355                         reg_fact_groups = otp->FactGroups;
2356                         reg_fact_size = 1 << otp->FactProtRegSize;
2357                         reg_user_groups = otp->UserGroups;
2358                         reg_user_size = 1 << otp->UserProtRegSize;
2359                         otp++;
2360                 }
2361         }
2362
2363         return 0;
2364 }
2365
2366 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2367                                            size_t len, size_t *retlen,
2368                                             u_char *buf)
2369 {
2370         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2371                                      buf, do_otp_read, 0);
2372 }
2373
2374 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2375                                            size_t len, size_t *retlen,
2376                                             u_char *buf)
2377 {
2378         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2379                                      buf, do_otp_read, 1);
2380 }
2381
2382 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2383                                             size_t len, size_t *retlen,
2384                                              u_char *buf)
2385 {
2386         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2387                                      buf, do_otp_write, 1);
2388 }
2389
2390 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2391                                            loff_t from, size_t len)
2392 {
2393         size_t retlen;
2394         return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2395                                      NULL, do_otp_lock, 1);
2396 }
2397
2398 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2399                                            struct otp_info *buf, size_t len)
2400 {
2401         size_t retlen;
2402         int ret;
2403
2404         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2405         return ret ? : retlen;
2406 }
2407
2408 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2409                                            struct otp_info *buf, size_t len)
2410 {
2411         size_t retlen;
2412         int ret;
2413
2414         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2415         return ret ? : retlen;
2416 }
2417
2418 #endif
2419
2420 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2421 {
2422         struct mtd_erase_region_info *region;
2423         int block, status, i;
2424         unsigned long adr;
2425         size_t len;
2426
2427         for (i = 0; i < mtd->numeraseregions; i++) {
2428                 region = &mtd->eraseregions[i];
2429                 if (!region->lockmap)
2430                         continue;
2431
2432                 for (block = 0; block < region->numblocks; block++){
2433                         len = region->erasesize;
2434                         adr = region->offset + block * len;
2435
2436                         status = cfi_varsize_frob(mtd,
2437                                         do_getlockstatus_oneblock, adr, len, NULL);
2438                         if (status)
2439                                 set_bit(block, region->lockmap);
2440                         else
2441                                 clear_bit(block, region->lockmap);
2442                 }
2443         }
2444 }
2445
2446 static int cfi_intelext_suspend(struct mtd_info *mtd)
2447 {
2448         struct map_info *map = mtd->priv;
2449         struct cfi_private *cfi = map->fldrv_priv;
2450         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2451         int i;
2452         struct flchip *chip;
2453         int ret = 0;
2454
2455         if ((mtd->flags & MTD_POWERUP_LOCK)
2456             && extp && (extp->FeatureSupport & (1 << 5)))
2457                 cfi_intelext_save_locks(mtd);
2458
2459         for (i=0; !ret && i<cfi->numchips; i++) {
2460                 chip = &cfi->chips[i];
2461
2462                 mutex_lock(&chip->mutex);
2463
2464                 switch (chip->state) {
2465                 case FL_READY:
2466                 case FL_STATUS:
2467                 case FL_CFI_QUERY:
2468                 case FL_JEDEC_QUERY:
2469                         if (chip->oldstate == FL_READY) {
2470                                 /* place the chip in a known state before suspend */
2471                                 map_write(map, CMD(0xFF), cfi->chips[i].start);
2472                                 chip->oldstate = chip->state;
2473                                 chip->state = FL_PM_SUSPENDED;
2474                                 /* No need to wake_up() on this state change -
2475                                  * as the whole point is that nobody can do anything
2476                                  * with the chip now anyway.
2477                                  */
2478                         } else {
2479                                 /* There seems to be an operation pending. We must wait for it. */
2480                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2481                                 ret = -EAGAIN;
2482                         }
2483                         break;
2484                 default:
2485                         /* Should we actually wait? Once upon a time these routines weren't
2486                            allowed to. Or should we return -EAGAIN, because the upper layers
2487                            ought to have already shut down anything which was using the device
2488                            anyway? The latter for now. */
2489                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2490                         ret = -EAGAIN;
2491                 case FL_PM_SUSPENDED:
2492                         break;
2493                 }
2494                 mutex_unlock(&chip->mutex);
2495         }
2496
2497         /* Unlock the chips again */
2498
2499         if (ret) {
2500                 for (i--; i >=0; i--) {
2501                         chip = &cfi->chips[i];
2502
2503                         mutex_lock(&chip->mutex);
2504
2505                         if (chip->state == FL_PM_SUSPENDED) {
2506                                 /* No need to force it into a known state here,
2507                                    because we're returning failure, and it didn't
2508                                    get power cycled */
2509                                 chip->state = chip->oldstate;
2510                                 chip->oldstate = FL_READY;
2511                                 wake_up(&chip->wq);
2512                         }
2513                         mutex_unlock(&chip->mutex);
2514                 }
2515         }
2516
2517         return ret;
2518 }
2519
2520 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2521 {
2522         struct mtd_erase_region_info *region;
2523         int block, i;
2524         unsigned long adr;
2525         size_t len;
2526
2527         for (i = 0; i < mtd->numeraseregions; i++) {
2528                 region = &mtd->eraseregions[i];
2529                 if (!region->lockmap)
2530                         continue;
2531
2532                 for (block = 0; block < region->numblocks; block++) {
2533                         len = region->erasesize;
2534                         adr = region->offset + block * len;
2535
2536                         if (!test_bit(block, region->lockmap))
2537                                 cfi_intelext_unlock(mtd, adr, len);
2538                 }
2539         }
2540 }
2541
2542 static void cfi_intelext_resume(struct mtd_info *mtd)
2543 {
2544         struct map_info *map = mtd->priv;
2545         struct cfi_private *cfi = map->fldrv_priv;
2546         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2547         int i;
2548         struct flchip *chip;
2549
2550         for (i=0; i<cfi->numchips; i++) {
2551
2552                 chip = &cfi->chips[i];
2553
2554                 mutex_lock(&chip->mutex);
2555
2556                 /* Go to known state. Chip may have been power cycled */
2557                 if (chip->state == FL_PM_SUSPENDED) {
2558                         map_write(map, CMD(0xFF), cfi->chips[i].start);
2559                         chip->oldstate = chip->state = FL_READY;
2560                         wake_up(&chip->wq);
2561                 }
2562
2563                 mutex_unlock(&chip->mutex);
2564         }
2565
2566         if ((mtd->flags & MTD_POWERUP_LOCK)
2567             && extp && (extp->FeatureSupport & (1 << 5)))
2568                 cfi_intelext_restore_locks(mtd);
2569 }
2570
2571 static int cfi_intelext_reset(struct mtd_info *mtd)
2572 {
2573         struct map_info *map = mtd->priv;
2574         struct cfi_private *cfi = map->fldrv_priv;
2575         int i, ret;
2576
2577         for (i=0; i < cfi->numchips; i++) {
2578                 struct flchip *chip = &cfi->chips[i];
2579
2580                 /* force the completion of any ongoing operation
2581                    and switch to array mode so any bootloader in
2582                    flash is accessible for soft reboot. */
2583                 mutex_lock(&chip->mutex);
2584                 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2585                 if (!ret) {
2586                         map_write(map, CMD(0xff), chip->start);
2587                         chip->state = FL_SHUTDOWN;
2588                         put_chip(map, chip, chip->start);
2589                 }
2590                 mutex_unlock(&chip->mutex);
2591         }
2592
2593         return 0;
2594 }
2595
2596 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2597                                void *v)
2598 {
2599         struct mtd_info *mtd;
2600
2601         mtd = container_of(nb, struct mtd_info, reboot_notifier);
2602         cfi_intelext_reset(mtd);
2603         return NOTIFY_DONE;
2604 }
2605
2606 static void cfi_intelext_destroy(struct mtd_info *mtd)
2607 {
2608         struct map_info *map = mtd->priv;
2609         struct cfi_private *cfi = map->fldrv_priv;
2610         struct mtd_erase_region_info *region;
2611         int i;
2612         cfi_intelext_reset(mtd);
2613         unregister_reboot_notifier(&mtd->reboot_notifier);
2614         kfree(cfi->cmdset_priv);
2615         kfree(cfi->cfiq);
2616         kfree(cfi->chips[0].priv);
2617         kfree(cfi);
2618         for (i = 0; i < mtd->numeraseregions; i++) {
2619                 region = &mtd->eraseregions[i];
2620                 if (region->lockmap)
2621                         kfree(region->lockmap);
2622         }
2623         kfree(mtd->eraseregions);
2624 }
2625
2626 MODULE_LICENSE("GPL");
2627 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2628 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2629 MODULE_ALIAS("cfi_cmdset_0003");
2630 MODULE_ALIAS("cfi_cmdset_0200");