driver core: bus: Fix a potential double free
[pandora-kernel.git] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  *
8  * 10/10/2000   Nicolas Pitre <nico@fluxnic.net>
9  *      - completely revamped method functions so they are aware and
10  *        independent of the flash geometry (buswidth, interleave, etc.)
11  *      - scalability vs code size is completely set at compile-time
12  *        (see include/linux/mtd/cfi.h for selection)
13  *      - optimized write buffer method
14  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
15  *      - reworked lock/unlock/erase support for var size flash
16  * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
17  *      - auto unlock sectors on resume for auto locking flash on power up
18  */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/bitmap.h>
34 #include <linux/mtd/xip.h>
35 #include <linux/mtd/map.h>
36 #include <linux/mtd/mtd.h>
37 #include <linux/mtd/cfi.h>
38
39 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
40 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41
42 // debugging, turns off buffer write mode if set to 1
43 #define FORCE_WORD_WRITE 0
44
45 /* Intel chips */
46 #define I82802AB        0x00ad
47 #define I82802AC        0x00ac
48 #define PF38F4476       0x881c
49 /* STMicroelectronics chips */
50 #define M50LPW080       0x002F
51 #define M50FLW080A      0x0080
52 #define M50FLW080B      0x0081
53 /* Atmel chips */
54 #define AT49BV640D      0x02de
55 #define AT49BV640DT     0x02db
56
57 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
58 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
59 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
60 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
61 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
62 static void cfi_intelext_sync (struct mtd_info *);
63 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
64 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
65 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
66                                   uint64_t len);
67 #ifdef CONFIG_MTD_OTP
68 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
69 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
70 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
71 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
72 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
73                                             struct otp_info *, size_t);
74 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
75                                             struct otp_info *, size_t);
76 #endif
77 static int cfi_intelext_suspend (struct mtd_info *);
78 static void cfi_intelext_resume (struct mtd_info *);
79 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
80
81 static void cfi_intelext_destroy(struct mtd_info *);
82
83 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
84
85 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
86 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
87
88 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
89                      size_t *retlen, void **virt, resource_size_t *phys);
90 static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
91
92 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
93 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
94 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
95 #include "fwh_lock.h"
96
97
98
99 /*
100  *  *********** SETUP AND PROBE BITS  ***********
101  */
102
103 static struct mtd_chip_driver cfi_intelext_chipdrv = {
104         .probe          = NULL, /* Not usable directly */
105         .destroy        = cfi_intelext_destroy,
106         .name           = "cfi_cmdset_0001",
107         .module         = THIS_MODULE
108 };
109
110 /* #define DEBUG_LOCK_BITS */
111 /* #define DEBUG_CFI_FEATURES */
112
113 #ifdef DEBUG_CFI_FEATURES
114 static void cfi_tell_features(struct cfi_pri_intelext *extp)
115 {
116         int i;
117         printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
118         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
119         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
120         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
121         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
122         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
123         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
124         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
125         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
126         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
127         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
128         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
129         printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
130         for (i=11; i<32; i++) {
131                 if (extp->FeatureSupport & (1<<i))
132                         printk("     - Unknown Bit %X:      supported\n", i);
133         }
134
135         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
136         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
137         for (i=1; i<8; i++) {
138                 if (extp->SuspendCmdSupport & (1<<i))
139                         printk("     - Unknown Bit %X:               supported\n", i);
140         }
141
142         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
143         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
144         printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
145         for (i=2; i<3; i++) {
146                 if (extp->BlkStatusRegMask & (1<<i))
147                         printk("     - Unknown Bit %X Active: yes\n",i);
148         }
149         printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
150         printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
151         for (i=6; i<16; i++) {
152                 if (extp->BlkStatusRegMask & (1<<i))
153                         printk("     - Unknown Bit %X Active: yes\n",i);
154         }
155
156         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
157                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
158         if (extp->VppOptimal)
159                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
160                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
161 }
162 #endif
163
164 /* Atmel chips don't use the same PRI format as Intel chips */
165 static void fixup_convert_atmel_pri(struct mtd_info *mtd)
166 {
167         struct map_info *map = mtd->priv;
168         struct cfi_private *cfi = map->fldrv_priv;
169         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
170         struct cfi_pri_atmel atmel_pri;
171         uint32_t features = 0;
172
173         /* Reverse byteswapping */
174         extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
175         extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
176         extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
177
178         memcpy(&atmel_pri, extp, sizeof(atmel_pri));
179         memset((char *)extp + 5, 0, sizeof(*extp) - 5);
180
181         printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
182
183         if (atmel_pri.Features & 0x01) /* chip erase supported */
184                 features |= (1<<0);
185         if (atmel_pri.Features & 0x02) /* erase suspend supported */
186                 features |= (1<<1);
187         if (atmel_pri.Features & 0x04) /* program suspend supported */
188                 features |= (1<<2);
189         if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
190                 features |= (1<<9);
191         if (atmel_pri.Features & 0x20) /* page mode read supported */
192                 features |= (1<<7);
193         if (atmel_pri.Features & 0x40) /* queued erase supported */
194                 features |= (1<<4);
195         if (atmel_pri.Features & 0x80) /* Protection bits supported */
196                 features |= (1<<6);
197
198         extp->FeatureSupport = features;
199
200         /* burst write mode not supported */
201         cfi->cfiq->BufWriteTimeoutTyp = 0;
202         cfi->cfiq->BufWriteTimeoutMax = 0;
203 }
204
205 static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
206 {
207         struct map_info *map = mtd->priv;
208         struct cfi_private *cfi = map->fldrv_priv;
209         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
210
211         cfip->FeatureSupport |= (1 << 5);
212         mtd->flags |= MTD_POWERUP_LOCK;
213 }
214
215 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
216 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
217 static void fixup_intel_strataflash(struct mtd_info *mtd)
218 {
219         struct map_info *map = mtd->priv;
220         struct cfi_private *cfi = map->fldrv_priv;
221         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
222
223         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
224                             "erase on write disabled.\n");
225         extp->SuspendCmdSupport &= ~1;
226 }
227 #endif
228
229 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
230 static void fixup_no_write_suspend(struct mtd_info *mtd)
231 {
232         struct map_info *map = mtd->priv;
233         struct cfi_private *cfi = map->fldrv_priv;
234         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
235
236         if (cfip && (cfip->FeatureSupport&4)) {
237                 cfip->FeatureSupport &= ~4;
238                 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
239         }
240 }
241 #endif
242
243 static void fixup_st_m28w320ct(struct mtd_info *mtd)
244 {
245         struct map_info *map = mtd->priv;
246         struct cfi_private *cfi = map->fldrv_priv;
247
248         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
249         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
250 }
251
252 static void fixup_st_m28w320cb(struct mtd_info *mtd)
253 {
254         struct map_info *map = mtd->priv;
255         struct cfi_private *cfi = map->fldrv_priv;
256
257         /* Note this is done after the region info is endian swapped */
258         cfi->cfiq->EraseRegionInfo[1] =
259                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
260 };
261
262 static void fixup_use_point(struct mtd_info *mtd)
263 {
264         struct map_info *map = mtd->priv;
265         if (!mtd->point && map_is_linear(map)) {
266                 mtd->point   = cfi_intelext_point;
267                 mtd->unpoint = cfi_intelext_unpoint;
268         }
269 }
270
271 static void fixup_use_write_buffers(struct mtd_info *mtd)
272 {
273         struct map_info *map = mtd->priv;
274         struct cfi_private *cfi = map->fldrv_priv;
275         if (cfi->cfiq->BufWriteTimeoutTyp) {
276                 printk(KERN_INFO "Using buffer write method\n" );
277                 mtd->write = cfi_intelext_write_buffers;
278                 mtd->writev = cfi_intelext_writev;
279         }
280 }
281
282 /*
283  * Some chips power-up with all sectors locked by default.
284  */
285 static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
286 {
287         struct map_info *map = mtd->priv;
288         struct cfi_private *cfi = map->fldrv_priv;
289         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
290
291         if (cfip->FeatureSupport&32) {
292                 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
293                 mtd->flags |= MTD_POWERUP_LOCK;
294         }
295 }
296
297 static struct cfi_fixup cfi_fixup_table[] = {
298         { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
299         { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
300         { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
301 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
302         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
303 #endif
304 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
305         { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
306 #endif
307 #if !FORCE_WORD_WRITE
308         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
309 #endif
310         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
311         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
312         { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
313         { 0, 0, NULL }
314 };
315
316 static struct cfi_fixup jedec_fixup_table[] = {
317         { CFI_MFR_INTEL, I82802AB,   fixup_use_fwh_lock },
318         { CFI_MFR_INTEL, I82802AC,   fixup_use_fwh_lock },
319         { CFI_MFR_ST,    M50LPW080,  fixup_use_fwh_lock },
320         { CFI_MFR_ST,    M50FLW080A, fixup_use_fwh_lock },
321         { CFI_MFR_ST,    M50FLW080B, fixup_use_fwh_lock },
322         { 0, 0, NULL }
323 };
324 static struct cfi_fixup fixup_table[] = {
325         /* The CFI vendor ids and the JEDEC vendor IDs appear
326          * to be common.  It is like the devices id's are as
327          * well.  This table is to pick all cases where
328          * we know that is the case.
329          */
330         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
331         { 0, 0, NULL }
332 };
333
334 static void cfi_fixup_major_minor(struct cfi_private *cfi,
335                                                 struct cfi_pri_intelext *extp)
336 {
337         if (cfi->mfr == CFI_MFR_INTEL &&
338                         cfi->id == PF38F4476 && extp->MinorVersion == '3')
339                 extp->MinorVersion = '1';
340 }
341
342 static inline struct cfi_pri_intelext *
343 read_pri_intelext(struct map_info *map, __u16 adr)
344 {
345         struct cfi_private *cfi = map->fldrv_priv;
346         struct cfi_pri_intelext *extp;
347         unsigned int extra_size = 0;
348         unsigned int extp_size = sizeof(*extp);
349
350  again:
351         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
352         if (!extp)
353                 return NULL;
354
355         cfi_fixup_major_minor(cfi, extp);
356
357         if (extp->MajorVersion != '1' ||
358             (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
359                 printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
360                        "version %c.%c.\n",  extp->MajorVersion,
361                        extp->MinorVersion);
362                 kfree(extp);
363                 return NULL;
364         }
365
366         /* Do some byteswapping if necessary */
367         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
368         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
369         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
370
371         if (extp->MinorVersion >= '0') {
372                 extra_size = 0;
373
374                 /* Protection Register info */
375                 extra_size += (extp->NumProtectionFields - 1) *
376                               sizeof(struct cfi_intelext_otpinfo);
377         }
378
379         if (extp->MinorVersion >= '1') {
380                 /* Burst Read info */
381                 extra_size += 2;
382                 if (extp_size < sizeof(*extp) + extra_size)
383                         goto need_more;
384                 extra_size += extp->extra[extra_size - 1];
385         }
386
387         if (extp->MinorVersion >= '3') {
388                 int nb_parts, i;
389
390                 /* Number of hardware-partitions */
391                 extra_size += 1;
392                 if (extp_size < sizeof(*extp) + extra_size)
393                         goto need_more;
394                 nb_parts = extp->extra[extra_size - 1];
395
396                 /* skip the sizeof(partregion) field in CFI 1.4 */
397                 if (extp->MinorVersion >= '4')
398                         extra_size += 2;
399
400                 for (i = 0; i < nb_parts; i++) {
401                         struct cfi_intelext_regioninfo *rinfo;
402                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
403                         extra_size += sizeof(*rinfo);
404                         if (extp_size < sizeof(*extp) + extra_size)
405                                 goto need_more;
406                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
407                         extra_size += (rinfo->NumBlockTypes - 1)
408                                       * sizeof(struct cfi_intelext_blockinfo);
409                 }
410
411                 if (extp->MinorVersion >= '4')
412                         extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
413
414                 if (extp_size < sizeof(*extp) + extra_size) {
415                         need_more:
416                         extp_size = sizeof(*extp) + extra_size;
417                         kfree(extp);
418                         if (extp_size > 4096) {
419                                 printk(KERN_ERR
420                                         "%s: cfi_pri_intelext is too fat\n",
421                                         __func__);
422                                 return NULL;
423                         }
424                         goto again;
425                 }
426         }
427
428         return extp;
429 }
430
431 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
432 {
433         struct cfi_private *cfi = map->fldrv_priv;
434         struct mtd_info *mtd;
435         int i;
436
437         mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
438         if (!mtd) {
439                 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
440                 return NULL;
441         }
442         mtd->priv = map;
443         mtd->type = MTD_NORFLASH;
444
445         /* Fill in the default mtd operations */
446         mtd->erase   = cfi_intelext_erase_varsize;
447         mtd->read    = cfi_intelext_read;
448         mtd->write   = cfi_intelext_write_words;
449         mtd->sync    = cfi_intelext_sync;
450         mtd->lock    = cfi_intelext_lock;
451         mtd->unlock  = cfi_intelext_unlock;
452         mtd->is_locked = cfi_intelext_is_locked;
453         mtd->suspend = cfi_intelext_suspend;
454         mtd->resume  = cfi_intelext_resume;
455         mtd->flags   = MTD_CAP_NORFLASH;
456         mtd->name    = map->name;
457         mtd->writesize = 1;
458         mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
459
460         mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
461
462         if (cfi->cfi_mode == CFI_MODE_CFI) {
463                 /*
464                  * It's a real CFI chip, not one for which the probe
465                  * routine faked a CFI structure. So we read the feature
466                  * table from it.
467                  */
468                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
469                 struct cfi_pri_intelext *extp;
470
471                 extp = read_pri_intelext(map, adr);
472                 if (!extp) {
473                         kfree(mtd);
474                         return NULL;
475                 }
476
477                 /* Install our own private info structure */
478                 cfi->cmdset_priv = extp;
479
480                 cfi_fixup(mtd, cfi_fixup_table);
481
482 #ifdef DEBUG_CFI_FEATURES
483                 /* Tell the user about it in lots of lovely detail */
484                 cfi_tell_features(extp);
485 #endif
486
487                 if(extp->SuspendCmdSupport & 1) {
488                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
489                 }
490         }
491         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
492                 /* Apply jedec specific fixups */
493                 cfi_fixup(mtd, jedec_fixup_table);
494         }
495         /* Apply generic fixups */
496         cfi_fixup(mtd, fixup_table);
497
498         for (i=0; i< cfi->numchips; i++) {
499                 if (cfi->cfiq->WordWriteTimeoutTyp)
500                         cfi->chips[i].word_write_time =
501                                 1<<cfi->cfiq->WordWriteTimeoutTyp;
502                 else
503                         cfi->chips[i].word_write_time = 50000;
504
505                 if (cfi->cfiq->BufWriteTimeoutTyp)
506                         cfi->chips[i].buffer_write_time =
507                                 1<<cfi->cfiq->BufWriteTimeoutTyp;
508                 /* No default; if it isn't specified, we won't use it */
509
510                 if (cfi->cfiq->BlockEraseTimeoutTyp)
511                         cfi->chips[i].erase_time =
512                                 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
513                 else
514                         cfi->chips[i].erase_time = 2000000;
515
516                 if (cfi->cfiq->WordWriteTimeoutTyp &&
517                     cfi->cfiq->WordWriteTimeoutMax)
518                         cfi->chips[i].word_write_time_max =
519                                 1<<(cfi->cfiq->WordWriteTimeoutTyp +
520                                     cfi->cfiq->WordWriteTimeoutMax);
521                 else
522                         cfi->chips[i].word_write_time_max = 50000 * 8;
523
524                 if (cfi->cfiq->BufWriteTimeoutTyp &&
525                     cfi->cfiq->BufWriteTimeoutMax)
526                         cfi->chips[i].buffer_write_time_max =
527                                 1<<(cfi->cfiq->BufWriteTimeoutTyp +
528                                     cfi->cfiq->BufWriteTimeoutMax);
529
530                 if (cfi->cfiq->BlockEraseTimeoutTyp &&
531                     cfi->cfiq->BlockEraseTimeoutMax)
532                         cfi->chips[i].erase_time_max =
533                                 1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
534                                        cfi->cfiq->BlockEraseTimeoutMax);
535                 else
536                         cfi->chips[i].erase_time_max = 2000000 * 8;
537
538                 cfi->chips[i].ref_point_counter = 0;
539                 init_waitqueue_head(&(cfi->chips[i].wq));
540         }
541
542         map->fldrv = &cfi_intelext_chipdrv;
543
544         return cfi_intelext_setup(mtd);
545 }
546 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
547 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
548 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
549 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
550 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
551
552 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
553 {
554         struct map_info *map = mtd->priv;
555         struct cfi_private *cfi = map->fldrv_priv;
556         unsigned long offset = 0;
557         int i,j;
558         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
559
560         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
561
562         mtd->size = devsize * cfi->numchips;
563
564         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
565         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
566                         * mtd->numeraseregions, GFP_KERNEL);
567         if (!mtd->eraseregions) {
568                 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
569                 goto setup_err;
570         }
571
572         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
573                 unsigned long ernum, ersize;
574                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
575                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
576
577                 if (mtd->erasesize < ersize) {
578                         mtd->erasesize = ersize;
579                 }
580                 for (j=0; j<cfi->numchips; j++) {
581                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
582                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
583                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
584                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
585                 }
586                 offset += (ersize * ernum);
587         }
588
589         if (offset != devsize) {
590                 /* Argh */
591                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
592                 goto setup_err;
593         }
594
595         for (i=0; i<mtd->numeraseregions;i++){
596                 printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
597                        i,(unsigned long long)mtd->eraseregions[i].offset,
598                        mtd->eraseregions[i].erasesize,
599                        mtd->eraseregions[i].numblocks);
600         }
601
602 #ifdef CONFIG_MTD_OTP
603         mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
604         mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
605         mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
606         mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
607         mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
608         mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
609 #endif
610
611         /* This function has the potential to distort the reality
612            a bit and therefore should be called last. */
613         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
614                 goto setup_err;
615
616         __module_get(THIS_MODULE);
617         register_reboot_notifier(&mtd->reboot_notifier);
618         return mtd;
619
620  setup_err:
621         kfree(mtd->eraseregions);
622         kfree(mtd);
623         kfree(cfi->cmdset_priv);
624         return NULL;
625 }
626
627 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
628                                         struct cfi_private **pcfi)
629 {
630         struct map_info *map = mtd->priv;
631         struct cfi_private *cfi = *pcfi;
632         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
633
634         /*
635          * Probing of multi-partition flash chips.
636          *
637          * To support multiple partitions when available, we simply arrange
638          * for each of them to have their own flchip structure even if they
639          * are on the same physical chip.  This means completely recreating
640          * a new cfi_private structure right here which is a blatent code
641          * layering violation, but this is still the least intrusive
642          * arrangement at this point. This can be rearranged in the future
643          * if someone feels motivated enough.  --nico
644          */
645         if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
646             && extp->FeatureSupport & (1 << 9)) {
647                 struct cfi_private *newcfi;
648                 struct flchip *chip;
649                 struct flchip_shared *shared;
650                 int offs, numregions, numparts, partshift, numvirtchips, i, j;
651
652                 /* Protection Register info */
653                 offs = (extp->NumProtectionFields - 1) *
654                        sizeof(struct cfi_intelext_otpinfo);
655
656                 /* Burst Read info */
657                 offs += extp->extra[offs+1]+2;
658
659                 /* Number of partition regions */
660                 numregions = extp->extra[offs];
661                 offs += 1;
662
663                 /* skip the sizeof(partregion) field in CFI 1.4 */
664                 if (extp->MinorVersion >= '4')
665                         offs += 2;
666
667                 /* Number of hardware partitions */
668                 numparts = 0;
669                 for (i = 0; i < numregions; i++) {
670                         struct cfi_intelext_regioninfo *rinfo;
671                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
672                         numparts += rinfo->NumIdentPartitions;
673                         offs += sizeof(*rinfo)
674                                 + (rinfo->NumBlockTypes - 1) *
675                                   sizeof(struct cfi_intelext_blockinfo);
676                 }
677
678                 if (!numparts)
679                         numparts = 1;
680
681                 /* Programming Region info */
682                 if (extp->MinorVersion >= '4') {
683                         struct cfi_intelext_programming_regioninfo *prinfo;
684                         prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
685                         mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
686                         mtd->flags &= ~MTD_BIT_WRITEABLE;
687                         printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
688                                map->name, mtd->writesize,
689                                cfi->interleave * prinfo->ControlValid,
690                                cfi->interleave * prinfo->ControlInvalid);
691                 }
692
693                 /*
694                  * All functions below currently rely on all chips having
695                  * the same geometry so we'll just assume that all hardware
696                  * partitions are of the same size too.
697                  */
698                 partshift = cfi->chipshift - __ffs(numparts);
699
700                 if ((1 << partshift) < mtd->erasesize) {
701                         printk( KERN_ERR
702                                 "%s: bad number of hw partitions (%d)\n",
703                                 __func__, numparts);
704                         return -EINVAL;
705                 }
706
707                 numvirtchips = cfi->numchips * numparts;
708                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
709                 if (!newcfi)
710                         return -ENOMEM;
711                 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
712                 if (!shared) {
713                         kfree(newcfi);
714                         return -ENOMEM;
715                 }
716                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
717                 newcfi->numchips = numvirtchips;
718                 newcfi->chipshift = partshift;
719
720                 chip = &newcfi->chips[0];
721                 for (i = 0; i < cfi->numchips; i++) {
722                         shared[i].writing = shared[i].erasing = NULL;
723                         mutex_init(&shared[i].lock);
724                         for (j = 0; j < numparts; j++) {
725                                 *chip = cfi->chips[i];
726                                 chip->start += j << partshift;
727                                 chip->priv = &shared[i];
728                                 /* those should be reset too since
729                                    they create memory references. */
730                                 init_waitqueue_head(&chip->wq);
731                                 mutex_init(&chip->mutex);
732                                 chip++;
733                         }
734                 }
735
736                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
737                                   "--> %d partitions of %d KiB\n",
738                                   map->name, cfi->numchips, cfi->interleave,
739                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
740
741                 map->fldrv_priv = newcfi;
742                 *pcfi = newcfi;
743                 kfree(cfi);
744         }
745
746         return 0;
747 }
748
749 /*
750  *  *********** CHIP ACCESS FUNCTIONS ***********
751  */
752 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
753 {
754         DECLARE_WAITQUEUE(wait, current);
755         struct cfi_private *cfi = map->fldrv_priv;
756         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
757         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
758         unsigned long timeo = jiffies + HZ;
759
760         /* Prevent setting state FL_SYNCING for chip in suspended state. */
761         if (mode == FL_SYNCING && chip->oldstate != FL_READY)
762                 goto sleep;
763
764         switch (chip->state) {
765
766         case FL_STATUS:
767                 for (;;) {
768                         status = map_read(map, adr);
769                         if (map_word_andequal(map, status, status_OK, status_OK))
770                                 break;
771
772                         /* At this point we're fine with write operations
773                            in other partitions as they don't conflict. */
774                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
775                                 break;
776
777                         mutex_unlock(&chip->mutex);
778                         cfi_udelay(1);
779                         mutex_lock(&chip->mutex);
780                         /* Someone else might have been playing with it. */
781                         return -EAGAIN;
782                 }
783                 /* Fall through */
784         case FL_READY:
785         case FL_CFI_QUERY:
786         case FL_JEDEC_QUERY:
787                 return 0;
788
789         case FL_ERASING:
790                 if (!cfip ||
791                     !(cfip->FeatureSupport & 2) ||
792                     !(mode == FL_READY || mode == FL_POINT ||
793                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
794                         goto sleep;
795
796
797                 /* Erase suspend */
798                 map_write(map, CMD(0xB0), adr);
799
800                 /* If the flash has finished erasing, then 'erase suspend'
801                  * appears to make some (28F320) flash devices switch to
802                  * 'read' mode.  Make sure that we switch to 'read status'
803                  * mode so we get the right data. --rmk
804                  */
805                 map_write(map, CMD(0x70), adr);
806                 chip->oldstate = FL_ERASING;
807                 chip->state = FL_ERASE_SUSPENDING;
808                 chip->erase_suspended = 1;
809                 for (;;) {
810                         status = map_read(map, adr);
811                         if (map_word_andequal(map, status, status_OK, status_OK))
812                                 break;
813
814                         if (time_after(jiffies, timeo)) {
815                                 /* Urgh. Resume and pretend we weren't here.
816                                  * Make sure we're in 'read status' mode if it had finished */
817                                 put_chip(map, chip, adr);
818                                 printk(KERN_ERR "%s: Chip not ready after erase "
819                                        "suspended: status = 0x%lx\n", map->name, status.x[0]);
820                                 return -EIO;
821                         }
822
823                         mutex_unlock(&chip->mutex);
824                         cfi_udelay(1);
825                         mutex_lock(&chip->mutex);
826                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
827                            So we can just loop here. */
828                 }
829                 chip->state = FL_STATUS;
830                 return 0;
831
832         case FL_XIP_WHILE_ERASING:
833                 if (mode != FL_READY && mode != FL_POINT &&
834                     (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
835                         goto sleep;
836                 chip->oldstate = chip->state;
837                 chip->state = FL_READY;
838                 return 0;
839
840         case FL_SHUTDOWN:
841                 /* The machine is rebooting now,so no one can get chip anymore */
842                 return -EIO;
843         case FL_POINT:
844                 /* Only if there's no operation suspended... */
845                 if (mode == FL_READY && chip->oldstate == FL_READY)
846                         return 0;
847                 /* Fall through */
848         default:
849         sleep:
850                 set_current_state(TASK_UNINTERRUPTIBLE);
851                 add_wait_queue(&chip->wq, &wait);
852                 mutex_unlock(&chip->mutex);
853                 schedule();
854                 remove_wait_queue(&chip->wq, &wait);
855                 mutex_lock(&chip->mutex);
856                 return -EAGAIN;
857         }
858 }
859
860 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
861 {
862         int ret;
863         DECLARE_WAITQUEUE(wait, current);
864
865  retry:
866         if (chip->priv &&
867             (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
868             || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
869                 /*
870                  * OK. We have possibility for contention on the write/erase
871                  * operations which are global to the real chip and not per
872                  * partition.  So let's fight it over in the partition which
873                  * currently has authority on the operation.
874                  *
875                  * The rules are as follows:
876                  *
877                  * - any write operation must own shared->writing.
878                  *
879                  * - any erase operation must own _both_ shared->writing and
880                  *   shared->erasing.
881                  *
882                  * - contention arbitration is handled in the owner's context.
883                  *
884                  * The 'shared' struct can be read and/or written only when
885                  * its lock is taken.
886                  */
887                 struct flchip_shared *shared = chip->priv;
888                 struct flchip *contender;
889                 mutex_lock(&shared->lock);
890                 contender = shared->writing;
891                 if (contender && contender != chip) {
892                         /*
893                          * The engine to perform desired operation on this
894                          * partition is already in use by someone else.
895                          * Let's fight over it in the context of the chip
896                          * currently using it.  If it is possible to suspend,
897                          * that other partition will do just that, otherwise
898                          * it'll happily send us to sleep.  In any case, when
899                          * get_chip returns success we're clear to go ahead.
900                          */
901                         ret = mutex_trylock(&contender->mutex);
902                         mutex_unlock(&shared->lock);
903                         if (!ret)
904                                 goto retry;
905                         mutex_unlock(&chip->mutex);
906                         ret = chip_ready(map, contender, contender->start, mode);
907                         mutex_lock(&chip->mutex);
908
909                         if (ret == -EAGAIN) {
910                                 mutex_unlock(&contender->mutex);
911                                 goto retry;
912                         }
913                         if (ret) {
914                                 mutex_unlock(&contender->mutex);
915                                 return ret;
916                         }
917                         mutex_lock(&shared->lock);
918
919                         /* We should not own chip if it is already
920                          * in FL_SYNCING state. Put contender and retry. */
921                         if (chip->state == FL_SYNCING) {
922                                 put_chip(map, contender, contender->start);
923                                 mutex_unlock(&contender->mutex);
924                                 goto retry;
925                         }
926                         mutex_unlock(&contender->mutex);
927                 }
928
929                 /* Check if we already have suspended erase
930                  * on this chip. Sleep. */
931                 if (mode == FL_ERASING && shared->erasing
932                     && shared->erasing->oldstate == FL_ERASING) {
933                         mutex_unlock(&shared->lock);
934                         set_current_state(TASK_UNINTERRUPTIBLE);
935                         add_wait_queue(&chip->wq, &wait);
936                         mutex_unlock(&chip->mutex);
937                         schedule();
938                         remove_wait_queue(&chip->wq, &wait);
939                         mutex_lock(&chip->mutex);
940                         goto retry;
941                 }
942
943                 /* We now own it */
944                 shared->writing = chip;
945                 if (mode == FL_ERASING)
946                         shared->erasing = chip;
947                 mutex_unlock(&shared->lock);
948         }
949         ret = chip_ready(map, chip, adr, mode);
950         if (ret == -EAGAIN)
951                 goto retry;
952
953         return ret;
954 }
955
956 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
957 {
958         struct cfi_private *cfi = map->fldrv_priv;
959
960         if (chip->priv) {
961                 struct flchip_shared *shared = chip->priv;
962                 mutex_lock(&shared->lock);
963                 if (shared->writing == chip && chip->oldstate == FL_READY) {
964                         /* We own the ability to write, but we're done */
965                         shared->writing = shared->erasing;
966                         if (shared->writing && shared->writing != chip) {
967                                 /* give back ownership to who we loaned it from */
968                                 struct flchip *loaner = shared->writing;
969                                 mutex_lock(&loaner->mutex);
970                                 mutex_unlock(&shared->lock);
971                                 mutex_unlock(&chip->mutex);
972                                 put_chip(map, loaner, loaner->start);
973                                 mutex_lock(&chip->mutex);
974                                 mutex_unlock(&loaner->mutex);
975                                 wake_up(&chip->wq);
976                                 return;
977                         }
978                         shared->erasing = NULL;
979                         shared->writing = NULL;
980                 } else if (shared->erasing == chip && shared->writing != chip) {
981                         /*
982                          * We own the ability to erase without the ability
983                          * to write, which means the erase was suspended
984                          * and some other partition is currently writing.
985                          * Don't let the switch below mess things up since
986                          * we don't have ownership to resume anything.
987                          */
988                         mutex_unlock(&shared->lock);
989                         wake_up(&chip->wq);
990                         return;
991                 }
992                 mutex_unlock(&shared->lock);
993         }
994
995         switch(chip->oldstate) {
996         case FL_ERASING:
997                 /* What if one interleaved chip has finished and the
998                    other hasn't? The old code would leave the finished
999                    one in READY mode. That's bad, and caused -EROFS
1000                    errors to be returned from do_erase_oneblock because
1001                    that's the only bit it checked for at the time.
1002                    As the state machine appears to explicitly allow
1003                    sending the 0x70 (Read Status) command to an erasing
1004                    chip and expecting it to be ignored, that's what we
1005                    do. */
1006                 map_write(map, CMD(0xd0), adr);
1007                 map_write(map, CMD(0x70), adr);
1008                 chip->oldstate = FL_READY;
1009                 chip->state = FL_ERASING;
1010                 break;
1011
1012         case FL_XIP_WHILE_ERASING:
1013                 chip->state = chip->oldstate;
1014                 chip->oldstate = FL_READY;
1015                 break;
1016
1017         case FL_READY:
1018         case FL_STATUS:
1019         case FL_JEDEC_QUERY:
1020                 /* We should really make set_vpp() count, rather than doing this */
1021                 DISABLE_VPP(map);
1022                 break;
1023         default:
1024                 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1025         }
1026         wake_up(&chip->wq);
1027 }
1028
1029 #ifdef CONFIG_MTD_XIP
1030
1031 /*
1032  * No interrupt what so ever can be serviced while the flash isn't in array
1033  * mode.  This is ensured by the xip_disable() and xip_enable() functions
1034  * enclosing any code path where the flash is known not to be in array mode.
1035  * And within a XIP disabled code path, only functions marked with __xipram
1036  * may be called and nothing else (it's a good thing to inspect generated
1037  * assembly to make sure inline functions were actually inlined and that gcc
1038  * didn't emit calls to its own support functions). Also configuring MTD CFI
1039  * support to a single buswidth and a single interleave is also recommended.
1040  */
1041
1042 static void xip_disable(struct map_info *map, struct flchip *chip,
1043                         unsigned long adr)
1044 {
1045         /* TODO: chips with no XIP use should ignore and return */
1046         (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1047         local_irq_disable();
1048 }
1049
1050 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1051                                 unsigned long adr)
1052 {
1053         struct cfi_private *cfi = map->fldrv_priv;
1054         if (chip->state != FL_POINT && chip->state != FL_READY) {
1055                 map_write(map, CMD(0xff), adr);
1056                 chip->state = FL_READY;
1057         }
1058         (void) map_read(map, adr);
1059         xip_iprefetch();
1060         local_irq_enable();
1061 }
1062
1063 /*
1064  * When a delay is required for the flash operation to complete, the
1065  * xip_wait_for_operation() function is polling for both the given timeout
1066  * and pending (but still masked) hardware interrupts.  Whenever there is an
1067  * interrupt pending then the flash erase or write operation is suspended,
1068  * array mode restored and interrupts unmasked.  Task scheduling might also
1069  * happen at that point.  The CPU eventually returns from the interrupt or
1070  * the call to schedule() and the suspended flash operation is resumed for
1071  * the remaining of the delay period.
1072  *
1073  * Warning: this function _will_ fool interrupt latency tracing tools.
1074  */
1075
1076 static int __xipram xip_wait_for_operation(
1077                 struct map_info *map, struct flchip *chip,
1078                 unsigned long adr, unsigned int chip_op_time_max)
1079 {
1080         struct cfi_private *cfi = map->fldrv_priv;
1081         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1082         map_word status, OK = CMD(0x80);
1083         unsigned long usec, suspended, start, done;
1084         flstate_t oldstate, newstate;
1085
1086         start = xip_currtime();
1087         usec = chip_op_time_max;
1088         if (usec == 0)
1089                 usec = 500000;
1090         done = 0;
1091
1092         do {
1093                 cpu_relax();
1094                 if (xip_irqpending() && cfip &&
1095                     ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1096                      (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1097                     (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1098                         /*
1099                          * Let's suspend the erase or write operation when
1100                          * supported.  Note that we currently don't try to
1101                          * suspend interleaved chips if there is already
1102                          * another operation suspended (imagine what happens
1103                          * when one chip was already done with the current
1104                          * operation while another chip suspended it, then
1105                          * we resume the whole thing at once).  Yes, it
1106                          * can happen!
1107                          */
1108                         usec -= done;
1109                         map_write(map, CMD(0xb0), adr);
1110                         map_write(map, CMD(0x70), adr);
1111                         suspended = xip_currtime();
1112                         do {
1113                                 if (xip_elapsed_since(suspended) > 100000) {
1114                                         /*
1115                                          * The chip doesn't want to suspend
1116                                          * after waiting for 100 msecs.
1117                                          * This is a critical error but there
1118                                          * is not much we can do here.
1119                                          */
1120                                         return -EIO;
1121                                 }
1122                                 status = map_read(map, adr);
1123                         } while (!map_word_andequal(map, status, OK, OK));
1124
1125                         /* Suspend succeeded */
1126                         oldstate = chip->state;
1127                         if (oldstate == FL_ERASING) {
1128                                 if (!map_word_bitsset(map, status, CMD(0x40)))
1129                                         break;
1130                                 newstate = FL_XIP_WHILE_ERASING;
1131                                 chip->erase_suspended = 1;
1132                         } else {
1133                                 if (!map_word_bitsset(map, status, CMD(0x04)))
1134                                         break;
1135                                 newstate = FL_XIP_WHILE_WRITING;
1136                                 chip->write_suspended = 1;
1137                         }
1138                         chip->state = newstate;
1139                         map_write(map, CMD(0xff), adr);
1140                         (void) map_read(map, adr);
1141                         xip_iprefetch();
1142                         local_irq_enable();
1143                         mutex_unlock(&chip->mutex);
1144                         xip_iprefetch();
1145                         cond_resched();
1146
1147                         /*
1148                          * We're back.  However someone else might have
1149                          * decided to go write to the chip if we are in
1150                          * a suspended erase state.  If so let's wait
1151                          * until it's done.
1152                          */
1153                         mutex_lock(&chip->mutex);
1154                         while (chip->state != newstate) {
1155                                 DECLARE_WAITQUEUE(wait, current);
1156                                 set_current_state(TASK_UNINTERRUPTIBLE);
1157                                 add_wait_queue(&chip->wq, &wait);
1158                                 mutex_unlock(&chip->mutex);
1159                                 schedule();
1160                                 remove_wait_queue(&chip->wq, &wait);
1161                                 mutex_lock(&chip->mutex);
1162                         }
1163                         /* Disallow XIP again */
1164                         local_irq_disable();
1165
1166                         /* Resume the write or erase operation */
1167                         map_write(map, CMD(0xd0), adr);
1168                         map_write(map, CMD(0x70), adr);
1169                         chip->state = oldstate;
1170                         start = xip_currtime();
1171                 } else if (usec >= 1000000/HZ) {
1172                         /*
1173                          * Try to save on CPU power when waiting delay
1174                          * is at least a system timer tick period.
1175                          * No need to be extremely accurate here.
1176                          */
1177                         xip_cpu_idle();
1178                 }
1179                 status = map_read(map, adr);
1180                 done = xip_elapsed_since(start);
1181         } while (!map_word_andequal(map, status, OK, OK)
1182                  && done < usec);
1183
1184         return (done >= usec) ? -ETIME : 0;
1185 }
1186
1187 /*
1188  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1189  * the flash is actively programming or erasing since we have to poll for
1190  * the operation to complete anyway.  We can't do that in a generic way with
1191  * a XIP setup so do it before the actual flash operation in this case
1192  * and stub it out from INVAL_CACHE_AND_WAIT.
1193  */
1194 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1195         INVALIDATE_CACHED_RANGE(map, from, size)
1196
1197 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1198         xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1199
1200 #else
1201
1202 #define xip_disable(map, chip, adr)
1203 #define xip_enable(map, chip, adr)
1204 #define XIP_INVAL_CACHED_RANGE(x...)
1205 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1206
1207 static int inval_cache_and_wait_for_operation(
1208                 struct map_info *map, struct flchip *chip,
1209                 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1210                 unsigned int chip_op_time, unsigned int chip_op_time_max)
1211 {
1212         struct cfi_private *cfi = map->fldrv_priv;
1213         map_word status, status_OK = CMD(0x80);
1214         int chip_state = chip->state;
1215         unsigned int timeo, sleep_time, reset_timeo;
1216
1217         mutex_unlock(&chip->mutex);
1218         if (inval_len)
1219                 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1220         mutex_lock(&chip->mutex);
1221
1222         timeo = chip_op_time_max;
1223         if (!timeo)
1224                 timeo = 500000;
1225         reset_timeo = timeo;
1226         sleep_time = chip_op_time / 2;
1227
1228         for (;;) {
1229                 if (chip->state != chip_state) {
1230                         /* Someone's suspended the operation: sleep */
1231                         DECLARE_WAITQUEUE(wait, current);
1232                         set_current_state(TASK_UNINTERRUPTIBLE);
1233                         add_wait_queue(&chip->wq, &wait);
1234                         mutex_unlock(&chip->mutex);
1235                         schedule();
1236                         remove_wait_queue(&chip->wq, &wait);
1237                         mutex_lock(&chip->mutex);
1238                         continue;
1239                 }
1240
1241                 status = map_read(map, cmd_adr);
1242                 if (map_word_andequal(map, status, status_OK, status_OK))
1243                         break;
1244
1245                 if (chip->erase_suspended && chip_state == FL_ERASING)  {
1246                         /* Erase suspend occurred while sleep: reset timeout */
1247                         timeo = reset_timeo;
1248                         chip->erase_suspended = 0;
1249                 }
1250                 if (chip->write_suspended && chip_state == FL_WRITING)  {
1251                         /* Write suspend occurred while sleep: reset timeout */
1252                         timeo = reset_timeo;
1253                         chip->write_suspended = 0;
1254                 }
1255                 if (!timeo) {
1256                         map_write(map, CMD(0x70), cmd_adr);
1257                         chip->state = FL_STATUS;
1258                         return -ETIME;
1259                 }
1260
1261                 /* OK Still waiting. Drop the lock, wait a while and retry. */
1262                 mutex_unlock(&chip->mutex);
1263                 if (sleep_time >= 1000000/HZ) {
1264                         /*
1265                          * Half of the normal delay still remaining
1266                          * can be performed with a sleeping delay instead
1267                          * of busy waiting.
1268                          */
1269                         msleep(sleep_time/1000);
1270                         timeo -= sleep_time;
1271                         sleep_time = 1000000/HZ;
1272                 } else {
1273                         udelay(1);
1274                         cond_resched();
1275                         timeo--;
1276                 }
1277                 mutex_lock(&chip->mutex);
1278         }
1279
1280         /* Done and happy. */
1281         chip->state = FL_STATUS;
1282         return 0;
1283 }
1284
1285 #endif
1286
1287 #define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1288         INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1289
1290
1291 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1292 {
1293         unsigned long cmd_addr;
1294         struct cfi_private *cfi = map->fldrv_priv;
1295         int ret = 0;
1296
1297         adr += chip->start;
1298
1299         /* Ensure cmd read/writes are aligned. */
1300         cmd_addr = adr & ~(map_bankwidth(map)-1);
1301
1302         mutex_lock(&chip->mutex);
1303
1304         ret = get_chip(map, chip, cmd_addr, FL_POINT);
1305
1306         if (!ret) {
1307                 if (chip->state != FL_POINT && chip->state != FL_READY)
1308                         map_write(map, CMD(0xff), cmd_addr);
1309
1310                 chip->state = FL_POINT;
1311                 chip->ref_point_counter++;
1312         }
1313         mutex_unlock(&chip->mutex);
1314
1315         return ret;
1316 }
1317
1318 static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1319                 size_t *retlen, void **virt, resource_size_t *phys)
1320 {
1321         struct map_info *map = mtd->priv;
1322         struct cfi_private *cfi = map->fldrv_priv;
1323         unsigned long ofs, last_end = 0;
1324         int chipnum;
1325         int ret = 0;
1326
1327         if (!map->virt || (from + len > mtd->size))
1328                 return -EINVAL;
1329
1330         /* Now lock the chip(s) to POINT state */
1331
1332         /* ofs: offset within the first chip that the first read should start */
1333         chipnum = (from >> cfi->chipshift);
1334         ofs = from - (chipnum << cfi->chipshift);
1335
1336         *virt = map->virt + cfi->chips[chipnum].start + ofs;
1337         *retlen = 0;
1338         if (phys)
1339                 *phys = map->phys + cfi->chips[chipnum].start + ofs;
1340
1341         while (len) {
1342                 unsigned long thislen;
1343
1344                 if (chipnum >= cfi->numchips)
1345                         break;
1346
1347                 /* We cannot point across chips that are virtually disjoint */
1348                 if (!last_end)
1349                         last_end = cfi->chips[chipnum].start;
1350                 else if (cfi->chips[chipnum].start != last_end)
1351                         break;
1352
1353                 if ((len + ofs -1) >> cfi->chipshift)
1354                         thislen = (1<<cfi->chipshift) - ofs;
1355                 else
1356                         thislen = len;
1357
1358                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1359                 if (ret)
1360                         break;
1361
1362                 *retlen += thislen;
1363                 len -= thislen;
1364
1365                 ofs = 0;
1366                 last_end += 1 << cfi->chipshift;
1367                 chipnum++;
1368         }
1369         return 0;
1370 }
1371
1372 static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1373 {
1374         struct map_info *map = mtd->priv;
1375         struct cfi_private *cfi = map->fldrv_priv;
1376         unsigned long ofs;
1377         int chipnum;
1378
1379         /* Now unlock the chip(s) POINT state */
1380
1381         /* ofs: offset within the first chip that the first read should start */
1382         chipnum = (from >> cfi->chipshift);
1383         ofs = from - (chipnum <<  cfi->chipshift);
1384
1385         while (len) {
1386                 unsigned long thislen;
1387                 struct flchip *chip;
1388
1389                 chip = &cfi->chips[chipnum];
1390                 if (chipnum >= cfi->numchips)
1391                         break;
1392
1393                 if ((len + ofs -1) >> cfi->chipshift)
1394                         thislen = (1<<cfi->chipshift) - ofs;
1395                 else
1396                         thislen = len;
1397
1398                 mutex_lock(&chip->mutex);
1399                 if (chip->state == FL_POINT) {
1400                         chip->ref_point_counter--;
1401                         if(chip->ref_point_counter == 0)
1402                                 chip->state = FL_READY;
1403                 } else
1404                         printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1405
1406                 put_chip(map, chip, chip->start);
1407                 mutex_unlock(&chip->mutex);
1408
1409                 len -= thislen;
1410                 ofs = 0;
1411                 chipnum++;
1412         }
1413 }
1414
1415 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1416 {
1417         unsigned long cmd_addr;
1418         struct cfi_private *cfi = map->fldrv_priv;
1419         int ret;
1420
1421         adr += chip->start;
1422
1423         /* Ensure cmd read/writes are aligned. */
1424         cmd_addr = adr & ~(map_bankwidth(map)-1);
1425
1426         mutex_lock(&chip->mutex);
1427         ret = get_chip(map, chip, cmd_addr, FL_READY);
1428         if (ret) {
1429                 mutex_unlock(&chip->mutex);
1430                 return ret;
1431         }
1432
1433         if (chip->state != FL_POINT && chip->state != FL_READY) {
1434                 map_write(map, CMD(0xff), cmd_addr);
1435
1436                 chip->state = FL_READY;
1437         }
1438
1439         map_copy_from(map, buf, adr, len);
1440
1441         put_chip(map, chip, cmd_addr);
1442
1443         mutex_unlock(&chip->mutex);
1444         return 0;
1445 }
1446
1447 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1448 {
1449         struct map_info *map = mtd->priv;
1450         struct cfi_private *cfi = map->fldrv_priv;
1451         unsigned long ofs;
1452         int chipnum;
1453         int ret = 0;
1454
1455         /* ofs: offset within the first chip that the first read should start */
1456         chipnum = (from >> cfi->chipshift);
1457         ofs = from - (chipnum <<  cfi->chipshift);
1458
1459         *retlen = 0;
1460
1461         while (len) {
1462                 unsigned long thislen;
1463
1464                 if (chipnum >= cfi->numchips)
1465                         break;
1466
1467                 if ((len + ofs -1) >> cfi->chipshift)
1468                         thislen = (1<<cfi->chipshift) - ofs;
1469                 else
1470                         thislen = len;
1471
1472                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1473                 if (ret)
1474                         break;
1475
1476                 *retlen += thislen;
1477                 len -= thislen;
1478                 buf += thislen;
1479
1480                 ofs = 0;
1481                 chipnum++;
1482         }
1483         return ret;
1484 }
1485
1486 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1487                                      unsigned long adr, map_word datum, int mode)
1488 {
1489         struct cfi_private *cfi = map->fldrv_priv;
1490         map_word status, write_cmd;
1491         int ret=0;
1492
1493         adr += chip->start;
1494
1495         switch (mode) {
1496         case FL_WRITING:
1497                 write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
1498                 break;
1499         case FL_OTP_WRITE:
1500                 write_cmd = CMD(0xc0);
1501                 break;
1502         default:
1503                 return -EINVAL;
1504         }
1505
1506         mutex_lock(&chip->mutex);
1507         ret = get_chip(map, chip, adr, mode);
1508         if (ret) {
1509                 mutex_unlock(&chip->mutex);
1510                 return ret;
1511         }
1512
1513         XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1514         ENABLE_VPP(map);
1515         xip_disable(map, chip, adr);
1516         map_write(map, write_cmd, adr);
1517         map_write(map, datum, adr);
1518         chip->state = mode;
1519
1520         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1521                                    adr, map_bankwidth(map),
1522                                    chip->word_write_time,
1523                                    chip->word_write_time_max);
1524         if (ret) {
1525                 xip_enable(map, chip, adr);
1526                 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1527                 goto out;
1528         }
1529
1530         /* check for errors */
1531         status = map_read(map, adr);
1532         if (map_word_bitsset(map, status, CMD(0x1a))) {
1533                 unsigned long chipstatus = MERGESTATUS(status);
1534
1535                 /* reset status */
1536                 map_write(map, CMD(0x50), adr);
1537                 map_write(map, CMD(0x70), adr);
1538                 xip_enable(map, chip, adr);
1539
1540                 if (chipstatus & 0x02) {
1541                         ret = -EROFS;
1542                 } else if (chipstatus & 0x08) {
1543                         printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1544                         ret = -EIO;
1545                 } else {
1546                         printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1547                         ret = -EINVAL;
1548                 }
1549
1550                 goto out;
1551         }
1552
1553         xip_enable(map, chip, adr);
1554  out:   put_chip(map, chip, adr);
1555         mutex_unlock(&chip->mutex);
1556         return ret;
1557 }
1558
1559
1560 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1561 {
1562         struct map_info *map = mtd->priv;
1563         struct cfi_private *cfi = map->fldrv_priv;
1564         int ret = 0;
1565         int chipnum;
1566         unsigned long ofs;
1567
1568         *retlen = 0;
1569         if (!len)
1570                 return 0;
1571
1572         chipnum = to >> cfi->chipshift;
1573         ofs = to  - (chipnum << cfi->chipshift);
1574
1575         /* If it's not bus-aligned, do the first byte write */
1576         if (ofs & (map_bankwidth(map)-1)) {
1577                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1578                 int gap = ofs - bus_ofs;
1579                 int n;
1580                 map_word datum;
1581
1582                 n = min_t(int, len, map_bankwidth(map)-gap);
1583                 datum = map_word_ff(map);
1584                 datum = map_word_load_partial(map, datum, buf, gap, n);
1585
1586                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1587                                                bus_ofs, datum, FL_WRITING);
1588                 if (ret)
1589                         return ret;
1590
1591                 len -= n;
1592                 ofs += n;
1593                 buf += n;
1594                 (*retlen) += n;
1595
1596                 if (ofs >> cfi->chipshift) {
1597                         chipnum ++;
1598                         ofs = 0;
1599                         if (chipnum == cfi->numchips)
1600                                 return 0;
1601                 }
1602         }
1603
1604         while(len >= map_bankwidth(map)) {
1605                 map_word datum = map_word_load(map, buf);
1606
1607                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1608                                        ofs, datum, FL_WRITING);
1609                 if (ret)
1610                         return ret;
1611
1612                 ofs += map_bankwidth(map);
1613                 buf += map_bankwidth(map);
1614                 (*retlen) += map_bankwidth(map);
1615                 len -= map_bankwidth(map);
1616
1617                 if (ofs >> cfi->chipshift) {
1618                         chipnum ++;
1619                         ofs = 0;
1620                         if (chipnum == cfi->numchips)
1621                                 return 0;
1622                 }
1623         }
1624
1625         if (len & (map_bankwidth(map)-1)) {
1626                 map_word datum;
1627
1628                 datum = map_word_ff(map);
1629                 datum = map_word_load_partial(map, datum, buf, 0, len);
1630
1631                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1632                                        ofs, datum, FL_WRITING);
1633                 if (ret)
1634                         return ret;
1635
1636                 (*retlen) += len;
1637         }
1638
1639         return 0;
1640 }
1641
1642
1643 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1644                                     unsigned long adr, const struct kvec **pvec,
1645                                     unsigned long *pvec_seek, int len)
1646 {
1647         struct cfi_private *cfi = map->fldrv_priv;
1648         map_word status, write_cmd, datum;
1649         unsigned long cmd_adr;
1650         int ret, wbufsize, word_gap, words;
1651         const struct kvec *vec;
1652         unsigned long vec_seek;
1653         unsigned long initial_adr;
1654         int initial_len = len;
1655
1656         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1657         adr += chip->start;
1658         initial_adr = adr;
1659         cmd_adr = adr & ~(wbufsize-1);
1660
1661         /* Let's determine this according to the interleave only once */
1662         write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
1663
1664         mutex_lock(&chip->mutex);
1665         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1666         if (ret) {
1667                 mutex_unlock(&chip->mutex);
1668                 return ret;
1669         }
1670
1671         XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1672         ENABLE_VPP(map);
1673         xip_disable(map, chip, cmd_adr);
1674
1675         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1676            [...], the device will not accept any more Write to Buffer commands".
1677            So we must check here and reset those bits if they're set. Otherwise
1678            we're just pissing in the wind */
1679         if (chip->state != FL_STATUS) {
1680                 map_write(map, CMD(0x70), cmd_adr);
1681                 chip->state = FL_STATUS;
1682         }
1683         status = map_read(map, cmd_adr);
1684         if (map_word_bitsset(map, status, CMD(0x30))) {
1685                 xip_enable(map, chip, cmd_adr);
1686                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1687                 xip_disable(map, chip, cmd_adr);
1688                 map_write(map, CMD(0x50), cmd_adr);
1689                 map_write(map, CMD(0x70), cmd_adr);
1690         }
1691
1692         chip->state = FL_WRITING_TO_BUFFER;
1693         map_write(map, write_cmd, cmd_adr);
1694         ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1695         if (ret) {
1696                 /* Argh. Not ready for write to buffer */
1697                 map_word Xstatus = map_read(map, cmd_adr);
1698                 map_write(map, CMD(0x70), cmd_adr);
1699                 chip->state = FL_STATUS;
1700                 status = map_read(map, cmd_adr);
1701                 map_write(map, CMD(0x50), cmd_adr);
1702                 map_write(map, CMD(0x70), cmd_adr);
1703                 xip_enable(map, chip, cmd_adr);
1704                 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1705                                 map->name, Xstatus.x[0], status.x[0]);
1706                 goto out;
1707         }
1708
1709         /* Figure out the number of words to write */
1710         word_gap = (-adr & (map_bankwidth(map)-1));
1711         words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1712         if (!word_gap) {
1713                 words--;
1714         } else {
1715                 word_gap = map_bankwidth(map) - word_gap;
1716                 adr -= word_gap;
1717                 datum = map_word_ff(map);
1718         }
1719
1720         /* Write length of data to come */
1721         map_write(map, CMD(words), cmd_adr );
1722
1723         /* Write data */
1724         vec = *pvec;
1725         vec_seek = *pvec_seek;
1726         do {
1727                 int n = map_bankwidth(map) - word_gap;
1728                 if (n > vec->iov_len - vec_seek)
1729                         n = vec->iov_len - vec_seek;
1730                 if (n > len)
1731                         n = len;
1732
1733                 if (!word_gap && len < map_bankwidth(map))
1734                         datum = map_word_ff(map);
1735
1736                 datum = map_word_load_partial(map, datum,
1737                                               vec->iov_base + vec_seek,
1738                                               word_gap, n);
1739
1740                 len -= n;
1741                 word_gap += n;
1742                 if (!len || word_gap == map_bankwidth(map)) {
1743                         map_write(map, datum, adr);
1744                         adr += map_bankwidth(map);
1745                         word_gap = 0;
1746                 }
1747
1748                 vec_seek += n;
1749                 if (vec_seek == vec->iov_len) {
1750                         vec++;
1751                         vec_seek = 0;
1752                 }
1753         } while (len);
1754         *pvec = vec;
1755         *pvec_seek = vec_seek;
1756
1757         /* GO GO GO */
1758         map_write(map, CMD(0xd0), cmd_adr);
1759         chip->state = FL_WRITING;
1760
1761         ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1762                                    initial_adr, initial_len,
1763                                    chip->buffer_write_time,
1764                                    chip->buffer_write_time_max);
1765         if (ret) {
1766                 map_write(map, CMD(0x70), cmd_adr);
1767                 chip->state = FL_STATUS;
1768                 xip_enable(map, chip, cmd_adr);
1769                 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1770                 goto out;
1771         }
1772
1773         /* check for errors */
1774         status = map_read(map, cmd_adr);
1775         if (map_word_bitsset(map, status, CMD(0x1a))) {
1776                 unsigned long chipstatus = MERGESTATUS(status);
1777
1778                 /* reset status */
1779                 map_write(map, CMD(0x50), cmd_adr);
1780                 map_write(map, CMD(0x70), cmd_adr);
1781                 xip_enable(map, chip, cmd_adr);
1782
1783                 if (chipstatus & 0x02) {
1784                         ret = -EROFS;
1785                 } else if (chipstatus & 0x08) {
1786                         printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1787                         ret = -EIO;
1788                 } else {
1789                         printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1790                         ret = -EINVAL;
1791                 }
1792
1793                 goto out;
1794         }
1795
1796         xip_enable(map, chip, cmd_adr);
1797  out:   put_chip(map, chip, cmd_adr);
1798         mutex_unlock(&chip->mutex);
1799         return ret;
1800 }
1801
1802 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1803                                 unsigned long count, loff_t to, size_t *retlen)
1804 {
1805         struct map_info *map = mtd->priv;
1806         struct cfi_private *cfi = map->fldrv_priv;
1807         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1808         int ret = 0;
1809         int chipnum;
1810         unsigned long ofs, vec_seek, i;
1811         size_t len = 0;
1812
1813         for (i = 0; i < count; i++)
1814                 len += vecs[i].iov_len;
1815
1816         *retlen = 0;
1817         if (!len)
1818                 return 0;
1819
1820         chipnum = to >> cfi->chipshift;
1821         ofs = to - (chipnum << cfi->chipshift);
1822         vec_seek = 0;
1823
1824         do {
1825                 /* We must not cross write block boundaries */
1826                 int size = wbufsize - (ofs & (wbufsize-1));
1827
1828                 if (size > len)
1829                         size = len;
1830                 ret = do_write_buffer(map, &cfi->chips[chipnum],
1831                                       ofs, &vecs, &vec_seek, size);
1832                 if (ret)
1833                         return ret;
1834
1835                 ofs += size;
1836                 (*retlen) += size;
1837                 len -= size;
1838
1839                 if (ofs >> cfi->chipshift) {
1840                         chipnum ++;
1841                         ofs = 0;
1842                         if (chipnum == cfi->numchips)
1843                                 return 0;
1844                 }
1845
1846                 /* Be nice and reschedule with the chip in a usable state for other
1847                    processes. */
1848                 cond_resched();
1849
1850         } while (len);
1851
1852         return 0;
1853 }
1854
1855 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1856                                        size_t len, size_t *retlen, const u_char *buf)
1857 {
1858         struct kvec vec;
1859
1860         vec.iov_base = (void *) buf;
1861         vec.iov_len = len;
1862
1863         return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1864 }
1865
1866 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1867                                       unsigned long adr, int len, void *thunk)
1868 {
1869         struct cfi_private *cfi = map->fldrv_priv;
1870         map_word status;
1871         int retries = 3;
1872         int ret;
1873
1874         adr += chip->start;
1875
1876  retry:
1877         mutex_lock(&chip->mutex);
1878         ret = get_chip(map, chip, adr, FL_ERASING);
1879         if (ret) {
1880                 mutex_unlock(&chip->mutex);
1881                 return ret;
1882         }
1883
1884         XIP_INVAL_CACHED_RANGE(map, adr, len);
1885         ENABLE_VPP(map);
1886         xip_disable(map, chip, adr);
1887
1888         /* Clear the status register first */
1889         map_write(map, CMD(0x50), adr);
1890
1891         /* Now erase */
1892         map_write(map, CMD(0x20), adr);
1893         map_write(map, CMD(0xD0), adr);
1894         chip->state = FL_ERASING;
1895         chip->erase_suspended = 0;
1896
1897         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1898                                    adr, len,
1899                                    chip->erase_time,
1900                                    chip->erase_time_max);
1901         if (ret) {
1902                 map_write(map, CMD(0x70), adr);
1903                 chip->state = FL_STATUS;
1904                 xip_enable(map, chip, adr);
1905                 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1906                 goto out;
1907         }
1908
1909         /* We've broken this before. It doesn't hurt to be safe */
1910         map_write(map, CMD(0x70), adr);
1911         chip->state = FL_STATUS;
1912         status = map_read(map, adr);
1913
1914         /* check for errors */
1915         if (map_word_bitsset(map, status, CMD(0x3a))) {
1916                 unsigned long chipstatus = MERGESTATUS(status);
1917
1918                 /* Reset the error bits */
1919                 map_write(map, CMD(0x50), adr);
1920                 map_write(map, CMD(0x70), adr);
1921                 xip_enable(map, chip, adr);
1922
1923                 if ((chipstatus & 0x30) == 0x30) {
1924                         printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1925                         ret = -EINVAL;
1926                 } else if (chipstatus & 0x02) {
1927                         /* Protection bit set */
1928                         ret = -EROFS;
1929                 } else if (chipstatus & 0x8) {
1930                         /* Voltage */
1931                         printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1932                         ret = -EIO;
1933                 } else if (chipstatus & 0x20 && retries--) {
1934                         printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1935                         put_chip(map, chip, adr);
1936                         mutex_unlock(&chip->mutex);
1937                         goto retry;
1938                 } else {
1939                         printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1940                         ret = -EIO;
1941                 }
1942
1943                 goto out;
1944         }
1945
1946         xip_enable(map, chip, adr);
1947  out:   put_chip(map, chip, adr);
1948         mutex_unlock(&chip->mutex);
1949         return ret;
1950 }
1951
1952 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1953 {
1954         unsigned long ofs, len;
1955         int ret;
1956
1957         ofs = instr->addr;
1958         len = instr->len;
1959
1960         ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1961         if (ret)
1962                 return ret;
1963
1964         instr->state = MTD_ERASE_DONE;
1965         mtd_erase_callback(instr);
1966
1967         return 0;
1968 }
1969
1970 static void cfi_intelext_sync (struct mtd_info *mtd)
1971 {
1972         struct map_info *map = mtd->priv;
1973         struct cfi_private *cfi = map->fldrv_priv;
1974         int i;
1975         struct flchip *chip;
1976         int ret = 0;
1977
1978         for (i=0; !ret && i<cfi->numchips; i++) {
1979                 chip = &cfi->chips[i];
1980
1981                 mutex_lock(&chip->mutex);
1982                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1983
1984                 if (!ret) {
1985                         chip->oldstate = chip->state;
1986                         chip->state = FL_SYNCING;
1987                         /* No need to wake_up() on this state change -
1988                          * as the whole point is that nobody can do anything
1989                          * with the chip now anyway.
1990                          */
1991                 }
1992                 mutex_unlock(&chip->mutex);
1993         }
1994
1995         /* Unlock the chips again */
1996
1997         for (i--; i >=0; i--) {
1998                 chip = &cfi->chips[i];
1999
2000                 mutex_lock(&chip->mutex);
2001
2002                 if (chip->state == FL_SYNCING) {
2003                         chip->state = chip->oldstate;
2004                         chip->oldstate = FL_READY;
2005                         wake_up(&chip->wq);
2006                 }
2007                 mutex_unlock(&chip->mutex);
2008         }
2009 }
2010
2011 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
2012                                                 struct flchip *chip,
2013                                                 unsigned long adr,
2014                                                 int len, void *thunk)
2015 {
2016         struct cfi_private *cfi = map->fldrv_priv;
2017         int status, ofs_factor = cfi->interleave * cfi->device_type;
2018
2019         adr += chip->start;
2020         xip_disable(map, chip, adr+(2*ofs_factor));
2021         map_write(map, CMD(0x90), adr+(2*ofs_factor));
2022         chip->state = FL_JEDEC_QUERY;
2023         status = cfi_read_query(map, adr+(2*ofs_factor));
2024         xip_enable(map, chip, 0);
2025         return status;
2026 }
2027
2028 #ifdef DEBUG_LOCK_BITS
2029 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
2030                                                 struct flchip *chip,
2031                                                 unsigned long adr,
2032                                                 int len, void *thunk)
2033 {
2034         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2035                adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2036         return 0;
2037 }
2038 #endif
2039
2040 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
2041 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
2042
2043 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2044                                        unsigned long adr, int len, void *thunk)
2045 {
2046         struct cfi_private *cfi = map->fldrv_priv;
2047         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2048         int udelay;
2049         int ret;
2050
2051         adr += chip->start;
2052
2053         mutex_lock(&chip->mutex);
2054         ret = get_chip(map, chip, adr, FL_LOCKING);
2055         if (ret) {
2056                 mutex_unlock(&chip->mutex);
2057                 return ret;
2058         }
2059
2060         ENABLE_VPP(map);
2061         xip_disable(map, chip, adr);
2062
2063         map_write(map, CMD(0x60), adr);
2064         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2065                 map_write(map, CMD(0x01), adr);
2066                 chip->state = FL_LOCKING;
2067         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2068                 map_write(map, CMD(0xD0), adr);
2069                 chip->state = FL_UNLOCKING;
2070         } else
2071                 BUG();
2072
2073         /*
2074          * If Instant Individual Block Locking supported then no need
2075          * to delay.
2076          */
2077         udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
2078
2079         ret = WAIT_TIMEOUT(map, chip, adr, udelay, udelay * 100);
2080         if (ret) {
2081                 map_write(map, CMD(0x70), adr);
2082                 chip->state = FL_STATUS;
2083                 xip_enable(map, chip, adr);
2084                 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2085                 goto out;
2086         }
2087
2088         xip_enable(map, chip, adr);
2089 out:    put_chip(map, chip, adr);
2090         mutex_unlock(&chip->mutex);
2091         return ret;
2092 }
2093
2094 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2095 {
2096         int ret;
2097
2098 #ifdef DEBUG_LOCK_BITS
2099         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2100                __func__, ofs, len);
2101         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2102                 ofs, len, NULL);
2103 #endif
2104
2105         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2106                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2107
2108 #ifdef DEBUG_LOCK_BITS
2109         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2110                __func__, ret);
2111         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2112                 ofs, len, NULL);
2113 #endif
2114
2115         return ret;
2116 }
2117
2118 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2119 {
2120         int ret;
2121
2122 #ifdef DEBUG_LOCK_BITS
2123         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2124                __func__, ofs, len);
2125         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2126                 ofs, len, NULL);
2127 #endif
2128
2129         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2130                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2131
2132 #ifdef DEBUG_LOCK_BITS
2133         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2134                __func__, ret);
2135         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2136                 ofs, len, NULL);
2137 #endif
2138
2139         return ret;
2140 }
2141
2142 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
2143                                   uint64_t len)
2144 {
2145         return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
2146                                 ofs, len, NULL) ? 1 : 0;
2147 }
2148
2149 #ifdef CONFIG_MTD_OTP
2150
2151 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2152                         u_long data_offset, u_char *buf, u_int size,
2153                         u_long prot_offset, u_int groupno, u_int groupsize);
2154
2155 static int __xipram
2156 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2157             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2158 {
2159         struct cfi_private *cfi = map->fldrv_priv;
2160         int ret;
2161
2162         mutex_lock(&chip->mutex);
2163         ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2164         if (ret) {
2165                 mutex_unlock(&chip->mutex);
2166                 return ret;
2167         }
2168
2169         /* let's ensure we're not reading back cached data from array mode */
2170         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2171
2172         xip_disable(map, chip, chip->start);
2173         if (chip->state != FL_JEDEC_QUERY) {
2174                 map_write(map, CMD(0x90), chip->start);
2175                 chip->state = FL_JEDEC_QUERY;
2176         }
2177         map_copy_from(map, buf, chip->start + offset, size);
2178         xip_enable(map, chip, chip->start);
2179
2180         /* then ensure we don't keep OTP data in the cache */
2181         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2182
2183         put_chip(map, chip, chip->start);
2184         mutex_unlock(&chip->mutex);
2185         return 0;
2186 }
2187
2188 static int
2189 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2190              u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2191 {
2192         int ret;
2193
2194         while (size) {
2195                 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2196                 int gap = offset - bus_ofs;
2197                 int n = min_t(int, size, map_bankwidth(map)-gap);
2198                 map_word datum = map_word_ff(map);
2199
2200                 datum = map_word_load_partial(map, datum, buf, gap, n);
2201                 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2202                 if (ret)
2203                         return ret;
2204
2205                 offset += n;
2206                 buf += n;
2207                 size -= n;
2208         }
2209
2210         return 0;
2211 }
2212
2213 static int
2214 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2215             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2216 {
2217         struct cfi_private *cfi = map->fldrv_priv;
2218         map_word datum;
2219
2220         /* make sure area matches group boundaries */
2221         if (size != grpsz)
2222                 return -EXDEV;
2223
2224         datum = map_word_ff(map);
2225         datum = map_word_clr(map, datum, CMD(1 << grpno));
2226         return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2227 }
2228
2229 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2230                                  size_t *retlen, u_char *buf,
2231                                  otp_op_t action, int user_regs)
2232 {
2233         struct map_info *map = mtd->priv;
2234         struct cfi_private *cfi = map->fldrv_priv;
2235         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2236         struct flchip *chip;
2237         struct cfi_intelext_otpinfo *otp;
2238         u_long devsize, reg_prot_offset, data_offset;
2239         u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2240         u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2241         int ret;
2242
2243         *retlen = 0;
2244
2245         /* Check that we actually have some OTP registers */
2246         if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2247                 return -ENODATA;
2248
2249         /* we need real chips here not virtual ones */
2250         devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2251         chip_step = devsize >> cfi->chipshift;
2252         chip_num = 0;
2253
2254         /* Some chips have OTP located in the _top_ partition only.
2255            For example: Intel 28F256L18T (T means top-parameter device) */
2256         if (cfi->mfr == CFI_MFR_INTEL) {
2257                 switch (cfi->id) {
2258                 case 0x880b:
2259                 case 0x880c:
2260                 case 0x880d:
2261                         chip_num = chip_step - 1;
2262                 }
2263         }
2264
2265         for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2266                 chip = &cfi->chips[chip_num];
2267                 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2268
2269                 /* first OTP region */
2270                 field = 0;
2271                 reg_prot_offset = extp->ProtRegAddr;
2272                 reg_fact_groups = 1;
2273                 reg_fact_size = 1 << extp->FactProtRegSize;
2274                 reg_user_groups = 1;
2275                 reg_user_size = 1 << extp->UserProtRegSize;
2276
2277                 while (len > 0) {
2278                         /* flash geometry fixup */
2279                         data_offset = reg_prot_offset + 1;
2280                         data_offset *= cfi->interleave * cfi->device_type;
2281                         reg_prot_offset *= cfi->interleave * cfi->device_type;
2282                         reg_fact_size *= cfi->interleave;
2283                         reg_user_size *= cfi->interleave;
2284
2285                         if (user_regs) {
2286                                 groups = reg_user_groups;
2287                                 groupsize = reg_user_size;
2288                                 /* skip over factory reg area */
2289                                 groupno = reg_fact_groups;
2290                                 data_offset += reg_fact_groups * reg_fact_size;
2291                         } else {
2292                                 groups = reg_fact_groups;
2293                                 groupsize = reg_fact_size;
2294                                 groupno = 0;
2295                         }
2296
2297                         while (len > 0 && groups > 0) {
2298                                 if (!action) {
2299                                         /*
2300                                          * Special case: if action is NULL
2301                                          * we fill buf with otp_info records.
2302                                          */
2303                                         struct otp_info *otpinfo;
2304                                         map_word lockword;
2305                                         len -= sizeof(struct otp_info);
2306                                         if (len <= 0)
2307                                                 return -ENOSPC;
2308                                         ret = do_otp_read(map, chip,
2309                                                           reg_prot_offset,
2310                                                           (u_char *)&lockword,
2311                                                           map_bankwidth(map),
2312                                                           0, 0,  0);
2313                                         if (ret)
2314                                                 return ret;
2315                                         otpinfo = (struct otp_info *)buf;
2316                                         otpinfo->start = from;
2317                                         otpinfo->length = groupsize;
2318                                         otpinfo->locked =
2319                                            !map_word_bitsset(map, lockword,
2320                                                              CMD(1 << groupno));
2321                                         from += groupsize;
2322                                         buf += sizeof(*otpinfo);
2323                                         *retlen += sizeof(*otpinfo);
2324                                 } else if (from >= groupsize) {
2325                                         from -= groupsize;
2326                                         data_offset += groupsize;
2327                                 } else {
2328                                         int size = groupsize;
2329                                         data_offset += from;
2330                                         size -= from;
2331                                         from = 0;
2332                                         if (size > len)
2333                                                 size = len;
2334                                         ret = action(map, chip, data_offset,
2335                                                      buf, size, reg_prot_offset,
2336                                                      groupno, groupsize);
2337                                         if (ret < 0)
2338                                                 return ret;
2339                                         buf += size;
2340                                         len -= size;
2341                                         *retlen += size;
2342                                         data_offset += size;
2343                                 }
2344                                 groupno++;
2345                                 groups--;
2346                         }
2347
2348                         /* next OTP region */
2349                         if (++field == extp->NumProtectionFields)
2350                                 break;
2351                         reg_prot_offset = otp->ProtRegAddr;
2352                         reg_fact_groups = otp->FactGroups;
2353                         reg_fact_size = 1 << otp->FactProtRegSize;
2354                         reg_user_groups = otp->UserGroups;
2355                         reg_user_size = 1 << otp->UserProtRegSize;
2356                         otp++;
2357                 }
2358         }
2359
2360         return 0;
2361 }
2362
2363 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2364                                            size_t len, size_t *retlen,
2365                                             u_char *buf)
2366 {
2367         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2368                                      buf, do_otp_read, 0);
2369 }
2370
2371 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2372                                            size_t len, size_t *retlen,
2373                                             u_char *buf)
2374 {
2375         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2376                                      buf, do_otp_read, 1);
2377 }
2378
2379 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2380                                             size_t len, size_t *retlen,
2381                                              u_char *buf)
2382 {
2383         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2384                                      buf, do_otp_write, 1);
2385 }
2386
2387 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2388                                            loff_t from, size_t len)
2389 {
2390         size_t retlen;
2391         return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2392                                      NULL, do_otp_lock, 1);
2393 }
2394
2395 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2396                                            struct otp_info *buf, size_t len)
2397 {
2398         size_t retlen;
2399         int ret;
2400
2401         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2402         return ret ? : retlen;
2403 }
2404
2405 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2406                                            struct otp_info *buf, size_t len)
2407 {
2408         size_t retlen;
2409         int ret;
2410
2411         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2412         return ret ? : retlen;
2413 }
2414
2415 #endif
2416
2417 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2418 {
2419         struct mtd_erase_region_info *region;
2420         int block, status, i;
2421         unsigned long adr;
2422         size_t len;
2423
2424         for (i = 0; i < mtd->numeraseregions; i++) {
2425                 region = &mtd->eraseregions[i];
2426                 if (!region->lockmap)
2427                         continue;
2428
2429                 for (block = 0; block < region->numblocks; block++){
2430                         len = region->erasesize;
2431                         adr = region->offset + block * len;
2432
2433                         status = cfi_varsize_frob(mtd,
2434                                         do_getlockstatus_oneblock, adr, len, NULL);
2435                         if (status)
2436                                 set_bit(block, region->lockmap);
2437                         else
2438                                 clear_bit(block, region->lockmap);
2439                 }
2440         }
2441 }
2442
2443 static int cfi_intelext_suspend(struct mtd_info *mtd)
2444 {
2445         struct map_info *map = mtd->priv;
2446         struct cfi_private *cfi = map->fldrv_priv;
2447         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2448         int i;
2449         struct flchip *chip;
2450         int ret = 0;
2451
2452         if ((mtd->flags & MTD_POWERUP_LOCK)
2453             && extp && (extp->FeatureSupport & (1 << 5)))
2454                 cfi_intelext_save_locks(mtd);
2455
2456         for (i=0; !ret && i<cfi->numchips; i++) {
2457                 chip = &cfi->chips[i];
2458
2459                 mutex_lock(&chip->mutex);
2460
2461                 switch (chip->state) {
2462                 case FL_READY:
2463                 case FL_STATUS:
2464                 case FL_CFI_QUERY:
2465                 case FL_JEDEC_QUERY:
2466                         if (chip->oldstate == FL_READY) {
2467                                 /* place the chip in a known state before suspend */
2468                                 map_write(map, CMD(0xFF), cfi->chips[i].start);
2469                                 chip->oldstate = chip->state;
2470                                 chip->state = FL_PM_SUSPENDED;
2471                                 /* No need to wake_up() on this state change -
2472                                  * as the whole point is that nobody can do anything
2473                                  * with the chip now anyway.
2474                                  */
2475                         } else {
2476                                 /* There seems to be an operation pending. We must wait for it. */
2477                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2478                                 ret = -EAGAIN;
2479                         }
2480                         break;
2481                 default:
2482                         /* Should we actually wait? Once upon a time these routines weren't
2483                            allowed to. Or should we return -EAGAIN, because the upper layers
2484                            ought to have already shut down anything which was using the device
2485                            anyway? The latter for now. */
2486                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2487                         ret = -EAGAIN;
2488                 case FL_PM_SUSPENDED:
2489                         break;
2490                 }
2491                 mutex_unlock(&chip->mutex);
2492         }
2493
2494         /* Unlock the chips again */
2495
2496         if (ret) {
2497                 for (i--; i >=0; i--) {
2498                         chip = &cfi->chips[i];
2499
2500                         mutex_lock(&chip->mutex);
2501
2502                         if (chip->state == FL_PM_SUSPENDED) {
2503                                 /* No need to force it into a known state here,
2504                                    because we're returning failure, and it didn't
2505                                    get power cycled */
2506                                 chip->state = chip->oldstate;
2507                                 chip->oldstate = FL_READY;
2508                                 wake_up(&chip->wq);
2509                         }
2510                         mutex_unlock(&chip->mutex);
2511                 }
2512         }
2513
2514         return ret;
2515 }
2516
2517 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2518 {
2519         struct mtd_erase_region_info *region;
2520         int block, i;
2521         unsigned long adr;
2522         size_t len;
2523
2524         for (i = 0; i < mtd->numeraseregions; i++) {
2525                 region = &mtd->eraseregions[i];
2526                 if (!region->lockmap)
2527                         continue;
2528
2529                 for (block = 0; block < region->numblocks; block++) {
2530                         len = region->erasesize;
2531                         adr = region->offset + block * len;
2532
2533                         if (!test_bit(block, region->lockmap))
2534                                 cfi_intelext_unlock(mtd, adr, len);
2535                 }
2536         }
2537 }
2538
2539 static void cfi_intelext_resume(struct mtd_info *mtd)
2540 {
2541         struct map_info *map = mtd->priv;
2542         struct cfi_private *cfi = map->fldrv_priv;
2543         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2544         int i;
2545         struct flchip *chip;
2546
2547         for (i=0; i<cfi->numchips; i++) {
2548
2549                 chip = &cfi->chips[i];
2550
2551                 mutex_lock(&chip->mutex);
2552
2553                 /* Go to known state. Chip may have been power cycled */
2554                 if (chip->state == FL_PM_SUSPENDED) {
2555                         map_write(map, CMD(0xFF), cfi->chips[i].start);
2556                         chip->oldstate = chip->state = FL_READY;
2557                         wake_up(&chip->wq);
2558                 }
2559
2560                 mutex_unlock(&chip->mutex);
2561         }
2562
2563         if ((mtd->flags & MTD_POWERUP_LOCK)
2564             && extp && (extp->FeatureSupport & (1 << 5)))
2565                 cfi_intelext_restore_locks(mtd);
2566 }
2567
2568 static int cfi_intelext_reset(struct mtd_info *mtd)
2569 {
2570         struct map_info *map = mtd->priv;
2571         struct cfi_private *cfi = map->fldrv_priv;
2572         int i, ret;
2573
2574         for (i=0; i < cfi->numchips; i++) {
2575                 struct flchip *chip = &cfi->chips[i];
2576
2577                 /* force the completion of any ongoing operation
2578                    and switch to array mode so any bootloader in
2579                    flash is accessible for soft reboot. */
2580                 mutex_lock(&chip->mutex);
2581                 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2582                 if (!ret) {
2583                         map_write(map, CMD(0xff), chip->start);
2584                         chip->state = FL_SHUTDOWN;
2585                         put_chip(map, chip, chip->start);
2586                 }
2587                 mutex_unlock(&chip->mutex);
2588         }
2589
2590         return 0;
2591 }
2592
2593 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2594                                void *v)
2595 {
2596         struct mtd_info *mtd;
2597
2598         mtd = container_of(nb, struct mtd_info, reboot_notifier);
2599         cfi_intelext_reset(mtd);
2600         return NOTIFY_DONE;
2601 }
2602
2603 static void cfi_intelext_destroy(struct mtd_info *mtd)
2604 {
2605         struct map_info *map = mtd->priv;
2606         struct cfi_private *cfi = map->fldrv_priv;
2607         struct mtd_erase_region_info *region;
2608         int i;
2609         cfi_intelext_reset(mtd);
2610         unregister_reboot_notifier(&mtd->reboot_notifier);
2611         kfree(cfi->cmdset_priv);
2612         kfree(cfi->cfiq);
2613         kfree(cfi->chips[0].priv);
2614         kfree(cfi);
2615         for (i = 0; i < mtd->numeraseregions; i++) {
2616                 region = &mtd->eraseregions[i];
2617                 if (region->lockmap)
2618                         kfree(region->lockmap);
2619         }
2620         kfree(mtd->eraseregions);
2621 }
2622
2623 MODULE_LICENSE("GPL");
2624 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2625 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2626 MODULE_ALIAS("cfi_cmdset_0003");
2627 MODULE_ALIAS("cfi_cmdset_0200");