Merge branch 'omap-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind...
[pandora-kernel.git] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  *
8  * 10/10/2000   Nicolas Pitre <nico@fluxnic.net>
9  *      - completely revamped method functions so they are aware and
10  *        independent of the flash geometry (buswidth, interleave, etc.)
11  *      - scalability vs code size is completely set at compile-time
12  *        (see include/linux/mtd/cfi.h for selection)
13  *      - optimized write buffer method
14  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
15  *      - reworked lock/unlock/erase support for var size flash
16  * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
17  *      - auto unlock sectors on resume for auto locking flash on power up
18  */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/bitmap.h>
34 #include <linux/mtd/xip.h>
35 #include <linux/mtd/map.h>
36 #include <linux/mtd/mtd.h>
37 #include <linux/mtd/compatmac.h>
38 #include <linux/mtd/cfi.h>
39
40 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
41 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
42
43 // debugging, turns off buffer write mode if set to 1
44 #define FORCE_WORD_WRITE 0
45
46 /* Intel chips */
47 #define I82802AB        0x00ad
48 #define I82802AC        0x00ac
49 #define PF38F4476       0x881c
50 /* STMicroelectronics chips */
51 #define M50LPW080       0x002F
52 #define M50FLW080A      0x0080
53 #define M50FLW080B      0x0081
54 /* Atmel chips */
55 #define AT49BV640D      0x02de
56 #define AT49BV640DT     0x02db
57
58 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
59 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
60 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
61 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
62 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
63 static void cfi_intelext_sync (struct mtd_info *);
64 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
65 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
66 #ifdef CONFIG_MTD_OTP
67 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
68 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
69 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
70 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
71 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
72                                             struct otp_info *, size_t);
73 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
74                                             struct otp_info *, size_t);
75 #endif
76 static int cfi_intelext_suspend (struct mtd_info *);
77 static void cfi_intelext_resume (struct mtd_info *);
78 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
79
80 static void cfi_intelext_destroy(struct mtd_info *);
81
82 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
83
84 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
85 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
86
87 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
88                      size_t *retlen, void **virt, resource_size_t *phys);
89 static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
90
91 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
92 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
93 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
94 #include "fwh_lock.h"
95
96
97
98 /*
99  *  *********** SETUP AND PROBE BITS  ***********
100  */
101
102 static struct mtd_chip_driver cfi_intelext_chipdrv = {
103         .probe          = NULL, /* Not usable directly */
104         .destroy        = cfi_intelext_destroy,
105         .name           = "cfi_cmdset_0001",
106         .module         = THIS_MODULE
107 };
108
109 /* #define DEBUG_LOCK_BITS */
110 /* #define DEBUG_CFI_FEATURES */
111
112 #ifdef DEBUG_CFI_FEATURES
113 static void cfi_tell_features(struct cfi_pri_intelext *extp)
114 {
115         int i;
116         printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
117         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
118         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
119         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
120         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
121         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
122         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
123         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
124         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
125         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
126         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
127         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
128         printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
129         for (i=11; i<32; i++) {
130                 if (extp->FeatureSupport & (1<<i))
131                         printk("     - Unknown Bit %X:      supported\n", i);
132         }
133
134         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
135         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
136         for (i=1; i<8; i++) {
137                 if (extp->SuspendCmdSupport & (1<<i))
138                         printk("     - Unknown Bit %X:               supported\n", i);
139         }
140
141         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
142         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
143         printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
144         for (i=2; i<3; i++) {
145                 if (extp->BlkStatusRegMask & (1<<i))
146                         printk("     - Unknown Bit %X Active: yes\n",i);
147         }
148         printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
149         printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
150         for (i=6; i<16; i++) {
151                 if (extp->BlkStatusRegMask & (1<<i))
152                         printk("     - Unknown Bit %X Active: yes\n",i);
153         }
154
155         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
156                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
157         if (extp->VppOptimal)
158                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
159                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
160 }
161 #endif
162
163 /* Atmel chips don't use the same PRI format as Intel chips */
164 static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
165 {
166         struct map_info *map = mtd->priv;
167         struct cfi_private *cfi = map->fldrv_priv;
168         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
169         struct cfi_pri_atmel atmel_pri;
170         uint32_t features = 0;
171
172         /* Reverse byteswapping */
173         extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
174         extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
175         extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
176
177         memcpy(&atmel_pri, extp, sizeof(atmel_pri));
178         memset((char *)extp + 5, 0, sizeof(*extp) - 5);
179
180         printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
181
182         if (atmel_pri.Features & 0x01) /* chip erase supported */
183                 features |= (1<<0);
184         if (atmel_pri.Features & 0x02) /* erase suspend supported */
185                 features |= (1<<1);
186         if (atmel_pri.Features & 0x04) /* program suspend supported */
187                 features |= (1<<2);
188         if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
189                 features |= (1<<9);
190         if (atmel_pri.Features & 0x20) /* page mode read supported */
191                 features |= (1<<7);
192         if (atmel_pri.Features & 0x40) /* queued erase supported */
193                 features |= (1<<4);
194         if (atmel_pri.Features & 0x80) /* Protection bits supported */
195                 features |= (1<<6);
196
197         extp->FeatureSupport = features;
198
199         /* burst write mode not supported */
200         cfi->cfiq->BufWriteTimeoutTyp = 0;
201         cfi->cfiq->BufWriteTimeoutMax = 0;
202 }
203
204 static void fixup_at49bv640dx_lock(struct mtd_info *mtd, void *param)
205 {
206         struct map_info *map = mtd->priv;
207         struct cfi_private *cfi = map->fldrv_priv;
208         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
209
210         cfip->FeatureSupport |= (1 << 5);
211         mtd->flags |= MTD_POWERUP_LOCK;
212 }
213
214 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
215 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
216 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
217 {
218         struct map_info *map = mtd->priv;
219         struct cfi_private *cfi = map->fldrv_priv;
220         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
221
222         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
223                             "erase on write disabled.\n");
224         extp->SuspendCmdSupport &= ~1;
225 }
226 #endif
227
228 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
229 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
230 {
231         struct map_info *map = mtd->priv;
232         struct cfi_private *cfi = map->fldrv_priv;
233         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
234
235         if (cfip && (cfip->FeatureSupport&4)) {
236                 cfip->FeatureSupport &= ~4;
237                 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
238         }
239 }
240 #endif
241
242 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
243 {
244         struct map_info *map = mtd->priv;
245         struct cfi_private *cfi = map->fldrv_priv;
246
247         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
248         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
249 }
250
251 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
252 {
253         struct map_info *map = mtd->priv;
254         struct cfi_private *cfi = map->fldrv_priv;
255
256         /* Note this is done after the region info is endian swapped */
257         cfi->cfiq->EraseRegionInfo[1] =
258                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
259 };
260
261 static void fixup_use_point(struct mtd_info *mtd, void *param)
262 {
263         struct map_info *map = mtd->priv;
264         if (!mtd->point && map_is_linear(map)) {
265                 mtd->point   = cfi_intelext_point;
266                 mtd->unpoint = cfi_intelext_unpoint;
267         }
268 }
269
270 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
271 {
272         struct map_info *map = mtd->priv;
273         struct cfi_private *cfi = map->fldrv_priv;
274         if (cfi->cfiq->BufWriteTimeoutTyp) {
275                 printk(KERN_INFO "Using buffer write method\n" );
276                 mtd->write = cfi_intelext_write_buffers;
277                 mtd->writev = cfi_intelext_writev;
278         }
279 }
280
281 /*
282  * Some chips power-up with all sectors locked by default.
283  */
284 static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param)
285 {
286         struct map_info *map = mtd->priv;
287         struct cfi_private *cfi = map->fldrv_priv;
288         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
289
290         if (cfip->FeatureSupport&32) {
291                 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
292                 mtd->flags |= MTD_POWERUP_LOCK;
293         }
294 }
295
296 static struct cfi_fixup cfi_fixup_table[] = {
297         { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
298         { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock, NULL },
299         { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock, NULL },
300 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
301         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
302 #endif
303 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
304         { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
305 #endif
306 #if !FORCE_WORD_WRITE
307         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
308 #endif
309         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
310         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
311         { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, },
312         { 0, 0, NULL, NULL }
313 };
314
315 static struct cfi_fixup jedec_fixup_table[] = {
316         { CFI_MFR_INTEL, I82802AB,   fixup_use_fwh_lock, NULL, },
317         { CFI_MFR_INTEL, I82802AC,   fixup_use_fwh_lock, NULL, },
318         { CFI_MFR_ST,    M50LPW080,  fixup_use_fwh_lock, NULL, },
319         { CFI_MFR_ST,    M50FLW080A, fixup_use_fwh_lock, NULL, },
320         { CFI_MFR_ST,    M50FLW080B, fixup_use_fwh_lock, NULL, },
321         { 0, 0, NULL, NULL }
322 };
323 static struct cfi_fixup fixup_table[] = {
324         /* The CFI vendor ids and the JEDEC vendor IDs appear
325          * to be common.  It is like the devices id's are as
326          * well.  This table is to pick all cases where
327          * we know that is the case.
328          */
329         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
330         { 0, 0, NULL, NULL }
331 };
332
333 static void cfi_fixup_major_minor(struct cfi_private *cfi,
334                                                 struct cfi_pri_intelext *extp)
335 {
336         if (cfi->mfr == CFI_MFR_INTEL &&
337                         cfi->id == PF38F4476 && extp->MinorVersion == '3')
338                 extp->MinorVersion = '1';
339 }
340
341 static inline struct cfi_pri_intelext *
342 read_pri_intelext(struct map_info *map, __u16 adr)
343 {
344         struct cfi_private *cfi = map->fldrv_priv;
345         struct cfi_pri_intelext *extp;
346         unsigned int extra_size = 0;
347         unsigned int extp_size = sizeof(*extp);
348
349  again:
350         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
351         if (!extp)
352                 return NULL;
353
354         cfi_fixup_major_minor(cfi, extp);
355
356         if (extp->MajorVersion != '1' ||
357             (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
358                 printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
359                        "version %c.%c.\n",  extp->MajorVersion,
360                        extp->MinorVersion);
361                 kfree(extp);
362                 return NULL;
363         }
364
365         /* Do some byteswapping if necessary */
366         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
367         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
368         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
369
370         if (extp->MinorVersion >= '0') {
371                 extra_size = 0;
372
373                 /* Protection Register info */
374                 extra_size += (extp->NumProtectionFields - 1) *
375                               sizeof(struct cfi_intelext_otpinfo);
376         }
377
378         if (extp->MinorVersion >= '1') {
379                 /* Burst Read info */
380                 extra_size += 2;
381                 if (extp_size < sizeof(*extp) + extra_size)
382                         goto need_more;
383                 extra_size += extp->extra[extra_size - 1];
384         }
385
386         if (extp->MinorVersion >= '3') {
387                 int nb_parts, i;
388
389                 /* Number of hardware-partitions */
390                 extra_size += 1;
391                 if (extp_size < sizeof(*extp) + extra_size)
392                         goto need_more;
393                 nb_parts = extp->extra[extra_size - 1];
394
395                 /* skip the sizeof(partregion) field in CFI 1.4 */
396                 if (extp->MinorVersion >= '4')
397                         extra_size += 2;
398
399                 for (i = 0; i < nb_parts; i++) {
400                         struct cfi_intelext_regioninfo *rinfo;
401                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
402                         extra_size += sizeof(*rinfo);
403                         if (extp_size < sizeof(*extp) + extra_size)
404                                 goto need_more;
405                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
406                         extra_size += (rinfo->NumBlockTypes - 1)
407                                       * sizeof(struct cfi_intelext_blockinfo);
408                 }
409
410                 if (extp->MinorVersion >= '4')
411                         extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
412
413                 if (extp_size < sizeof(*extp) + extra_size) {
414                         need_more:
415                         extp_size = sizeof(*extp) + extra_size;
416                         kfree(extp);
417                         if (extp_size > 4096) {
418                                 printk(KERN_ERR
419                                         "%s: cfi_pri_intelext is too fat\n",
420                                         __func__);
421                                 return NULL;
422                         }
423                         goto again;
424                 }
425         }
426
427         return extp;
428 }
429
430 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
431 {
432         struct cfi_private *cfi = map->fldrv_priv;
433         struct mtd_info *mtd;
434         int i;
435
436         mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
437         if (!mtd) {
438                 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
439                 return NULL;
440         }
441         mtd->priv = map;
442         mtd->type = MTD_NORFLASH;
443
444         /* Fill in the default mtd operations */
445         mtd->erase   = cfi_intelext_erase_varsize;
446         mtd->read    = cfi_intelext_read;
447         mtd->write   = cfi_intelext_write_words;
448         mtd->sync    = cfi_intelext_sync;
449         mtd->lock    = cfi_intelext_lock;
450         mtd->unlock  = cfi_intelext_unlock;
451         mtd->suspend = cfi_intelext_suspend;
452         mtd->resume  = cfi_intelext_resume;
453         mtd->flags   = MTD_CAP_NORFLASH;
454         mtd->name    = map->name;
455         mtd->writesize = 1;
456
457         mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
458
459         if (cfi->cfi_mode == CFI_MODE_CFI) {
460                 /*
461                  * It's a real CFI chip, not one for which the probe
462                  * routine faked a CFI structure. So we read the feature
463                  * table from it.
464                  */
465                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
466                 struct cfi_pri_intelext *extp;
467
468                 extp = read_pri_intelext(map, adr);
469                 if (!extp) {
470                         kfree(mtd);
471                         return NULL;
472                 }
473
474                 /* Install our own private info structure */
475                 cfi->cmdset_priv = extp;
476
477                 cfi_fixup(mtd, cfi_fixup_table);
478
479 #ifdef DEBUG_CFI_FEATURES
480                 /* Tell the user about it in lots of lovely detail */
481                 cfi_tell_features(extp);
482 #endif
483
484                 if(extp->SuspendCmdSupport & 1) {
485                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
486                 }
487         }
488         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
489                 /* Apply jedec specific fixups */
490                 cfi_fixup(mtd, jedec_fixup_table);
491         }
492         /* Apply generic fixups */
493         cfi_fixup(mtd, fixup_table);
494
495         for (i=0; i< cfi->numchips; i++) {
496                 if (cfi->cfiq->WordWriteTimeoutTyp)
497                         cfi->chips[i].word_write_time =
498                                 1<<cfi->cfiq->WordWriteTimeoutTyp;
499                 else
500                         cfi->chips[i].word_write_time = 50000;
501
502                 if (cfi->cfiq->BufWriteTimeoutTyp)
503                         cfi->chips[i].buffer_write_time =
504                                 1<<cfi->cfiq->BufWriteTimeoutTyp;
505                 /* No default; if it isn't specified, we won't use it */
506
507                 if (cfi->cfiq->BlockEraseTimeoutTyp)
508                         cfi->chips[i].erase_time =
509                                 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
510                 else
511                         cfi->chips[i].erase_time = 2000000;
512
513                 if (cfi->cfiq->WordWriteTimeoutTyp &&
514                     cfi->cfiq->WordWriteTimeoutMax)
515                         cfi->chips[i].word_write_time_max =
516                                 1<<(cfi->cfiq->WordWriteTimeoutTyp +
517                                     cfi->cfiq->WordWriteTimeoutMax);
518                 else
519                         cfi->chips[i].word_write_time_max = 50000 * 8;
520
521                 if (cfi->cfiq->BufWriteTimeoutTyp &&
522                     cfi->cfiq->BufWriteTimeoutMax)
523                         cfi->chips[i].buffer_write_time_max =
524                                 1<<(cfi->cfiq->BufWriteTimeoutTyp +
525                                     cfi->cfiq->BufWriteTimeoutMax);
526
527                 if (cfi->cfiq->BlockEraseTimeoutTyp &&
528                     cfi->cfiq->BlockEraseTimeoutMax)
529                         cfi->chips[i].erase_time_max =
530                                 1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
531                                        cfi->cfiq->BlockEraseTimeoutMax);
532                 else
533                         cfi->chips[i].erase_time_max = 2000000 * 8;
534
535                 cfi->chips[i].ref_point_counter = 0;
536                 init_waitqueue_head(&(cfi->chips[i].wq));
537         }
538
539         map->fldrv = &cfi_intelext_chipdrv;
540
541         return cfi_intelext_setup(mtd);
542 }
543 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
544 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
545 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
546 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
547 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
548
549 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
550 {
551         struct map_info *map = mtd->priv;
552         struct cfi_private *cfi = map->fldrv_priv;
553         unsigned long offset = 0;
554         int i,j;
555         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
556
557         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
558
559         mtd->size = devsize * cfi->numchips;
560
561         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
562         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
563                         * mtd->numeraseregions, GFP_KERNEL);
564         if (!mtd->eraseregions) {
565                 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
566                 goto setup_err;
567         }
568
569         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
570                 unsigned long ernum, ersize;
571                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
572                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
573
574                 if (mtd->erasesize < ersize) {
575                         mtd->erasesize = ersize;
576                 }
577                 for (j=0; j<cfi->numchips; j++) {
578                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
579                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
580                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
581                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
582                 }
583                 offset += (ersize * ernum);
584         }
585
586         if (offset != devsize) {
587                 /* Argh */
588                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
589                 goto setup_err;
590         }
591
592         for (i=0; i<mtd->numeraseregions;i++){
593                 printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
594                        i,(unsigned long long)mtd->eraseregions[i].offset,
595                        mtd->eraseregions[i].erasesize,
596                        mtd->eraseregions[i].numblocks);
597         }
598
599 #ifdef CONFIG_MTD_OTP
600         mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
601         mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
602         mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
603         mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
604         mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
605         mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
606 #endif
607
608         /* This function has the potential to distort the reality
609            a bit and therefore should be called last. */
610         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
611                 goto setup_err;
612
613         __module_get(THIS_MODULE);
614         register_reboot_notifier(&mtd->reboot_notifier);
615         return mtd;
616
617  setup_err:
618         kfree(mtd->eraseregions);
619         kfree(mtd);
620         kfree(cfi->cmdset_priv);
621         return NULL;
622 }
623
624 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
625                                         struct cfi_private **pcfi)
626 {
627         struct map_info *map = mtd->priv;
628         struct cfi_private *cfi = *pcfi;
629         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
630
631         /*
632          * Probing of multi-partition flash chips.
633          *
634          * To support multiple partitions when available, we simply arrange
635          * for each of them to have their own flchip structure even if they
636          * are on the same physical chip.  This means completely recreating
637          * a new cfi_private structure right here which is a blatent code
638          * layering violation, but this is still the least intrusive
639          * arrangement at this point. This can be rearranged in the future
640          * if someone feels motivated enough.  --nico
641          */
642         if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
643             && extp->FeatureSupport & (1 << 9)) {
644                 struct cfi_private *newcfi;
645                 struct flchip *chip;
646                 struct flchip_shared *shared;
647                 int offs, numregions, numparts, partshift, numvirtchips, i, j;
648
649                 /* Protection Register info */
650                 offs = (extp->NumProtectionFields - 1) *
651                        sizeof(struct cfi_intelext_otpinfo);
652
653                 /* Burst Read info */
654                 offs += extp->extra[offs+1]+2;
655
656                 /* Number of partition regions */
657                 numregions = extp->extra[offs];
658                 offs += 1;
659
660                 /* skip the sizeof(partregion) field in CFI 1.4 */
661                 if (extp->MinorVersion >= '4')
662                         offs += 2;
663
664                 /* Number of hardware partitions */
665                 numparts = 0;
666                 for (i = 0; i < numregions; i++) {
667                         struct cfi_intelext_regioninfo *rinfo;
668                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
669                         numparts += rinfo->NumIdentPartitions;
670                         offs += sizeof(*rinfo)
671                                 + (rinfo->NumBlockTypes - 1) *
672                                   sizeof(struct cfi_intelext_blockinfo);
673                 }
674
675                 if (!numparts)
676                         numparts = 1;
677
678                 /* Programming Region info */
679                 if (extp->MinorVersion >= '4') {
680                         struct cfi_intelext_programming_regioninfo *prinfo;
681                         prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
682                         mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
683                         mtd->flags &= ~MTD_BIT_WRITEABLE;
684                         printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
685                                map->name, mtd->writesize,
686                                cfi->interleave * prinfo->ControlValid,
687                                cfi->interleave * prinfo->ControlInvalid);
688                 }
689
690                 /*
691                  * All functions below currently rely on all chips having
692                  * the same geometry so we'll just assume that all hardware
693                  * partitions are of the same size too.
694                  */
695                 partshift = cfi->chipshift - __ffs(numparts);
696
697                 if ((1 << partshift) < mtd->erasesize) {
698                         printk( KERN_ERR
699                                 "%s: bad number of hw partitions (%d)\n",
700                                 __func__, numparts);
701                         return -EINVAL;
702                 }
703
704                 numvirtchips = cfi->numchips * numparts;
705                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
706                 if (!newcfi)
707                         return -ENOMEM;
708                 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
709                 if (!shared) {
710                         kfree(newcfi);
711                         return -ENOMEM;
712                 }
713                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
714                 newcfi->numchips = numvirtchips;
715                 newcfi->chipshift = partshift;
716
717                 chip = &newcfi->chips[0];
718                 for (i = 0; i < cfi->numchips; i++) {
719                         shared[i].writing = shared[i].erasing = NULL;
720                         spin_lock_init(&shared[i].lock);
721                         for (j = 0; j < numparts; j++) {
722                                 *chip = cfi->chips[i];
723                                 chip->start += j << partshift;
724                                 chip->priv = &shared[i];
725                                 /* those should be reset too since
726                                    they create memory references. */
727                                 init_waitqueue_head(&chip->wq);
728                                 mutex_init(&chip->mutex);
729                                 chip++;
730                         }
731                 }
732
733                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
734                                   "--> %d partitions of %d KiB\n",
735                                   map->name, cfi->numchips, cfi->interleave,
736                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
737
738                 map->fldrv_priv = newcfi;
739                 *pcfi = newcfi;
740                 kfree(cfi);
741         }
742
743         return 0;
744 }
745
746 /*
747  *  *********** CHIP ACCESS FUNCTIONS ***********
748  */
749 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
750 {
751         DECLARE_WAITQUEUE(wait, current);
752         struct cfi_private *cfi = map->fldrv_priv;
753         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
754         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
755         unsigned long timeo = jiffies + HZ;
756
757         /* Prevent setting state FL_SYNCING for chip in suspended state. */
758         if (mode == FL_SYNCING && chip->oldstate != FL_READY)
759                 goto sleep;
760
761         switch (chip->state) {
762
763         case FL_STATUS:
764                 for (;;) {
765                         status = map_read(map, adr);
766                         if (map_word_andequal(map, status, status_OK, status_OK))
767                                 break;
768
769                         /* At this point we're fine with write operations
770                            in other partitions as they don't conflict. */
771                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
772                                 break;
773
774                         mutex_unlock(&chip->mutex);
775                         cfi_udelay(1);
776                         mutex_lock(&chip->mutex);
777                         /* Someone else might have been playing with it. */
778                         return -EAGAIN;
779                 }
780                 /* Fall through */
781         case FL_READY:
782         case FL_CFI_QUERY:
783         case FL_JEDEC_QUERY:
784                 return 0;
785
786         case FL_ERASING:
787                 if (!cfip ||
788                     !(cfip->FeatureSupport & 2) ||
789                     !(mode == FL_READY || mode == FL_POINT ||
790                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
791                         goto sleep;
792
793
794                 /* Erase suspend */
795                 map_write(map, CMD(0xB0), adr);
796
797                 /* If the flash has finished erasing, then 'erase suspend'
798                  * appears to make some (28F320) flash devices switch to
799                  * 'read' mode.  Make sure that we switch to 'read status'
800                  * mode so we get the right data. --rmk
801                  */
802                 map_write(map, CMD(0x70), adr);
803                 chip->oldstate = FL_ERASING;
804                 chip->state = FL_ERASE_SUSPENDING;
805                 chip->erase_suspended = 1;
806                 for (;;) {
807                         status = map_read(map, adr);
808                         if (map_word_andequal(map, status, status_OK, status_OK))
809                                 break;
810
811                         if (time_after(jiffies, timeo)) {
812                                 /* Urgh. Resume and pretend we weren't here.  */
813                                 map_write(map, CMD(0xd0), adr);
814                                 /* Make sure we're in 'read status' mode if it had finished */
815                                 map_write(map, CMD(0x70), adr);
816                                 chip->state = FL_ERASING;
817                                 chip->oldstate = FL_READY;
818                                 printk(KERN_ERR "%s: Chip not ready after erase "
819                                        "suspended: status = 0x%lx\n", map->name, status.x[0]);
820                                 return -EIO;
821                         }
822
823                         mutex_unlock(&chip->mutex);
824                         cfi_udelay(1);
825                         mutex_lock(&chip->mutex);
826                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
827                            So we can just loop here. */
828                 }
829                 chip->state = FL_STATUS;
830                 return 0;
831
832         case FL_XIP_WHILE_ERASING:
833                 if (mode != FL_READY && mode != FL_POINT &&
834                     (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
835                         goto sleep;
836                 chip->oldstate = chip->state;
837                 chip->state = FL_READY;
838                 return 0;
839
840         case FL_SHUTDOWN:
841                 /* The machine is rebooting now,so no one can get chip anymore */
842                 return -EIO;
843         case FL_POINT:
844                 /* Only if there's no operation suspended... */
845                 if (mode == FL_READY && chip->oldstate == FL_READY)
846                         return 0;
847                 /* Fall through */
848         default:
849         sleep:
850                 set_current_state(TASK_UNINTERRUPTIBLE);
851                 add_wait_queue(&chip->wq, &wait);
852                 mutex_unlock(&chip->mutex);
853                 schedule();
854                 remove_wait_queue(&chip->wq, &wait);
855                 mutex_lock(&chip->mutex);
856                 return -EAGAIN;
857         }
858 }
859
860 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
861 {
862         int ret;
863         DECLARE_WAITQUEUE(wait, current);
864
865  retry:
866         if (chip->priv &&
867             (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
868             || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
869                 /*
870                  * OK. We have possibility for contention on the write/erase
871                  * operations which are global to the real chip and not per
872                  * partition.  So let's fight it over in the partition which
873                  * currently has authority on the operation.
874                  *
875                  * The rules are as follows:
876                  *
877                  * - any write operation must own shared->writing.
878                  *
879                  * - any erase operation must own _both_ shared->writing and
880                  *   shared->erasing.
881                  *
882                  * - contention arbitration is handled in the owner's context.
883                  *
884                  * The 'shared' struct can be read and/or written only when
885                  * its lock is taken.
886                  */
887                 struct flchip_shared *shared = chip->priv;
888                 struct flchip *contender;
889                 spin_lock(&shared->lock);
890                 contender = shared->writing;
891                 if (contender && contender != chip) {
892                         /*
893                          * The engine to perform desired operation on this
894                          * partition is already in use by someone else.
895                          * Let's fight over it in the context of the chip
896                          * currently using it.  If it is possible to suspend,
897                          * that other partition will do just that, otherwise
898                          * it'll happily send us to sleep.  In any case, when
899                          * get_chip returns success we're clear to go ahead.
900                          */
901                         ret = mutex_trylock(&contender->mutex);
902                         spin_unlock(&shared->lock);
903                         if (!ret)
904                                 goto retry;
905                         mutex_unlock(&chip->mutex);
906                         ret = chip_ready(map, contender, contender->start, mode);
907                         mutex_lock(&chip->mutex);
908
909                         if (ret == -EAGAIN) {
910                                 mutex_unlock(&contender->mutex);
911                                 goto retry;
912                         }
913                         if (ret) {
914                                 mutex_unlock(&contender->mutex);
915                                 return ret;
916                         }
917                         spin_lock(&shared->lock);
918
919                         /* We should not own chip if it is already
920                          * in FL_SYNCING state. Put contender and retry. */
921                         if (chip->state == FL_SYNCING) {
922                                 put_chip(map, contender, contender->start);
923                                 mutex_unlock(&contender->mutex);
924                                 goto retry;
925                         }
926                         mutex_unlock(&contender->mutex);
927                 }
928
929                 /* Check if we already have suspended erase
930                  * on this chip. Sleep. */
931                 if (mode == FL_ERASING && shared->erasing
932                     && shared->erasing->oldstate == FL_ERASING) {
933                         spin_unlock(&shared->lock);
934                         set_current_state(TASK_UNINTERRUPTIBLE);
935                         add_wait_queue(&chip->wq, &wait);
936                         mutex_unlock(&chip->mutex);
937                         schedule();
938                         remove_wait_queue(&chip->wq, &wait);
939                         mutex_lock(&chip->mutex);
940                         goto retry;
941                 }
942
943                 /* We now own it */
944                 shared->writing = chip;
945                 if (mode == FL_ERASING)
946                         shared->erasing = chip;
947                 spin_unlock(&shared->lock);
948         }
949         ret = chip_ready(map, chip, adr, mode);
950         if (ret == -EAGAIN)
951                 goto retry;
952
953         return ret;
954 }
955
956 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
957 {
958         struct cfi_private *cfi = map->fldrv_priv;
959
960         if (chip->priv) {
961                 struct flchip_shared *shared = chip->priv;
962                 spin_lock(&shared->lock);
963                 if (shared->writing == chip && chip->oldstate == FL_READY) {
964                         /* We own the ability to write, but we're done */
965                         shared->writing = shared->erasing;
966                         if (shared->writing && shared->writing != chip) {
967                                 /* give back ownership to who we loaned it from */
968                                 struct flchip *loaner = shared->writing;
969                                 mutex_lock(&loaner->mutex);
970                                 spin_unlock(&shared->lock);
971                                 mutex_unlock(&chip->mutex);
972                                 put_chip(map, loaner, loaner->start);
973                                 mutex_lock(&chip->mutex);
974                                 mutex_unlock(&loaner->mutex);
975                                 wake_up(&chip->wq);
976                                 return;
977                         }
978                         shared->erasing = NULL;
979                         shared->writing = NULL;
980                 } else if (shared->erasing == chip && shared->writing != chip) {
981                         /*
982                          * We own the ability to erase without the ability
983                          * to write, which means the erase was suspended
984                          * and some other partition is currently writing.
985                          * Don't let the switch below mess things up since
986                          * we don't have ownership to resume anything.
987                          */
988                         spin_unlock(&shared->lock);
989                         wake_up(&chip->wq);
990                         return;
991                 }
992                 spin_unlock(&shared->lock);
993         }
994
995         switch(chip->oldstate) {
996         case FL_ERASING:
997                 chip->state = chip->oldstate;
998                 /* What if one interleaved chip has finished and the
999                    other hasn't? The old code would leave the finished
1000                    one in READY mode. That's bad, and caused -EROFS
1001                    errors to be returned from do_erase_oneblock because
1002                    that's the only bit it checked for at the time.
1003                    As the state machine appears to explicitly allow
1004                    sending the 0x70 (Read Status) command to an erasing
1005                    chip and expecting it to be ignored, that's what we
1006                    do. */
1007                 map_write(map, CMD(0xd0), adr);
1008                 map_write(map, CMD(0x70), adr);
1009                 chip->oldstate = FL_READY;
1010                 chip->state = FL_ERASING;
1011                 break;
1012
1013         case FL_XIP_WHILE_ERASING:
1014                 chip->state = chip->oldstate;
1015                 chip->oldstate = FL_READY;
1016                 break;
1017
1018         case FL_READY:
1019         case FL_STATUS:
1020         case FL_JEDEC_QUERY:
1021                 /* We should really make set_vpp() count, rather than doing this */
1022                 DISABLE_VPP(map);
1023                 break;
1024         default:
1025                 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1026         }
1027         wake_up(&chip->wq);
1028 }
1029
1030 #ifdef CONFIG_MTD_XIP
1031
1032 /*
1033  * No interrupt what so ever can be serviced while the flash isn't in array
1034  * mode.  This is ensured by the xip_disable() and xip_enable() functions
1035  * enclosing any code path where the flash is known not to be in array mode.
1036  * And within a XIP disabled code path, only functions marked with __xipram
1037  * may be called and nothing else (it's a good thing to inspect generated
1038  * assembly to make sure inline functions were actually inlined and that gcc
1039  * didn't emit calls to its own support functions). Also configuring MTD CFI
1040  * support to a single buswidth and a single interleave is also recommended.
1041  */
1042
1043 static void xip_disable(struct map_info *map, struct flchip *chip,
1044                         unsigned long adr)
1045 {
1046         /* TODO: chips with no XIP use should ignore and return */
1047         (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1048         local_irq_disable();
1049 }
1050
1051 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1052                                 unsigned long adr)
1053 {
1054         struct cfi_private *cfi = map->fldrv_priv;
1055         if (chip->state != FL_POINT && chip->state != FL_READY) {
1056                 map_write(map, CMD(0xff), adr);
1057                 chip->state = FL_READY;
1058         }
1059         (void) map_read(map, adr);
1060         xip_iprefetch();
1061         local_irq_enable();
1062 }
1063
1064 /*
1065  * When a delay is required for the flash operation to complete, the
1066  * xip_wait_for_operation() function is polling for both the given timeout
1067  * and pending (but still masked) hardware interrupts.  Whenever there is an
1068  * interrupt pending then the flash erase or write operation is suspended,
1069  * array mode restored and interrupts unmasked.  Task scheduling might also
1070  * happen at that point.  The CPU eventually returns from the interrupt or
1071  * the call to schedule() and the suspended flash operation is resumed for
1072  * the remaining of the delay period.
1073  *
1074  * Warning: this function _will_ fool interrupt latency tracing tools.
1075  */
1076
1077 static int __xipram xip_wait_for_operation(
1078                 struct map_info *map, struct flchip *chip,
1079                 unsigned long adr, unsigned int chip_op_time_max)
1080 {
1081         struct cfi_private *cfi = map->fldrv_priv;
1082         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1083         map_word status, OK = CMD(0x80);
1084         unsigned long usec, suspended, start, done;
1085         flstate_t oldstate, newstate;
1086
1087         start = xip_currtime();
1088         usec = chip_op_time_max;
1089         if (usec == 0)
1090                 usec = 500000;
1091         done = 0;
1092
1093         do {
1094                 cpu_relax();
1095                 if (xip_irqpending() && cfip &&
1096                     ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1097                      (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1098                     (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1099                         /*
1100                          * Let's suspend the erase or write operation when
1101                          * supported.  Note that we currently don't try to
1102                          * suspend interleaved chips if there is already
1103                          * another operation suspended (imagine what happens
1104                          * when one chip was already done with the current
1105                          * operation while another chip suspended it, then
1106                          * we resume the whole thing at once).  Yes, it
1107                          * can happen!
1108                          */
1109                         usec -= done;
1110                         map_write(map, CMD(0xb0), adr);
1111                         map_write(map, CMD(0x70), adr);
1112                         suspended = xip_currtime();
1113                         do {
1114                                 if (xip_elapsed_since(suspended) > 100000) {
1115                                         /*
1116                                          * The chip doesn't want to suspend
1117                                          * after waiting for 100 msecs.
1118                                          * This is a critical error but there
1119                                          * is not much we can do here.
1120                                          */
1121                                         return -EIO;
1122                                 }
1123                                 status = map_read(map, adr);
1124                         } while (!map_word_andequal(map, status, OK, OK));
1125
1126                         /* Suspend succeeded */
1127                         oldstate = chip->state;
1128                         if (oldstate == FL_ERASING) {
1129                                 if (!map_word_bitsset(map, status, CMD(0x40)))
1130                                         break;
1131                                 newstate = FL_XIP_WHILE_ERASING;
1132                                 chip->erase_suspended = 1;
1133                         } else {
1134                                 if (!map_word_bitsset(map, status, CMD(0x04)))
1135                                         break;
1136                                 newstate = FL_XIP_WHILE_WRITING;
1137                                 chip->write_suspended = 1;
1138                         }
1139                         chip->state = newstate;
1140                         map_write(map, CMD(0xff), adr);
1141                         (void) map_read(map, adr);
1142                         xip_iprefetch();
1143                         local_irq_enable();
1144                         mutex_unlock(&chip->mutex);
1145                         xip_iprefetch();
1146                         cond_resched();
1147
1148                         /*
1149                          * We're back.  However someone else might have
1150                          * decided to go write to the chip if we are in
1151                          * a suspended erase state.  If so let's wait
1152                          * until it's done.
1153                          */
1154                         mutex_lock(&chip->mutex);
1155                         while (chip->state != newstate) {
1156                                 DECLARE_WAITQUEUE(wait, current);
1157                                 set_current_state(TASK_UNINTERRUPTIBLE);
1158                                 add_wait_queue(&chip->wq, &wait);
1159                                 mutex_unlock(&chip->mutex);
1160                                 schedule();
1161                                 remove_wait_queue(&chip->wq, &wait);
1162                                 mutex_lock(&chip->mutex);
1163                         }
1164                         /* Disallow XIP again */
1165                         local_irq_disable();
1166
1167                         /* Resume the write or erase operation */
1168                         map_write(map, CMD(0xd0), adr);
1169                         map_write(map, CMD(0x70), adr);
1170                         chip->state = oldstate;
1171                         start = xip_currtime();
1172                 } else if (usec >= 1000000/HZ) {
1173                         /*
1174                          * Try to save on CPU power when waiting delay
1175                          * is at least a system timer tick period.
1176                          * No need to be extremely accurate here.
1177                          */
1178                         xip_cpu_idle();
1179                 }
1180                 status = map_read(map, adr);
1181                 done = xip_elapsed_since(start);
1182         } while (!map_word_andequal(map, status, OK, OK)
1183                  && done < usec);
1184
1185         return (done >= usec) ? -ETIME : 0;
1186 }
1187
1188 /*
1189  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1190  * the flash is actively programming or erasing since we have to poll for
1191  * the operation to complete anyway.  We can't do that in a generic way with
1192  * a XIP setup so do it before the actual flash operation in this case
1193  * and stub it out from INVAL_CACHE_AND_WAIT.
1194  */
1195 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1196         INVALIDATE_CACHED_RANGE(map, from, size)
1197
1198 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1199         xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1200
1201 #else
1202
1203 #define xip_disable(map, chip, adr)
1204 #define xip_enable(map, chip, adr)
1205 #define XIP_INVAL_CACHED_RANGE(x...)
1206 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1207
1208 static int inval_cache_and_wait_for_operation(
1209                 struct map_info *map, struct flchip *chip,
1210                 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1211                 unsigned int chip_op_time, unsigned int chip_op_time_max)
1212 {
1213         struct cfi_private *cfi = map->fldrv_priv;
1214         map_word status, status_OK = CMD(0x80);
1215         int chip_state = chip->state;
1216         unsigned int timeo, sleep_time, reset_timeo;
1217
1218         mutex_unlock(&chip->mutex);
1219         if (inval_len)
1220                 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1221         mutex_lock(&chip->mutex);
1222
1223         timeo = chip_op_time_max;
1224         if (!timeo)
1225                 timeo = 500000;
1226         reset_timeo = timeo;
1227         sleep_time = chip_op_time / 2;
1228
1229         for (;;) {
1230                 status = map_read(map, cmd_adr);
1231                 if (map_word_andequal(map, status, status_OK, status_OK))
1232                         break;
1233
1234                 if (!timeo) {
1235                         map_write(map, CMD(0x70), cmd_adr);
1236                         chip->state = FL_STATUS;
1237                         return -ETIME;
1238                 }
1239
1240                 /* OK Still waiting. Drop the lock, wait a while and retry. */
1241                 mutex_unlock(&chip->mutex);
1242                 if (sleep_time >= 1000000/HZ) {
1243                         /*
1244                          * Half of the normal delay still remaining
1245                          * can be performed with a sleeping delay instead
1246                          * of busy waiting.
1247                          */
1248                         msleep(sleep_time/1000);
1249                         timeo -= sleep_time;
1250                         sleep_time = 1000000/HZ;
1251                 } else {
1252                         udelay(1);
1253                         cond_resched();
1254                         timeo--;
1255                 }
1256                 mutex_lock(&chip->mutex);
1257
1258                 while (chip->state != chip_state) {
1259                         /* Someone's suspended the operation: sleep */
1260                         DECLARE_WAITQUEUE(wait, current);
1261                         set_current_state(TASK_UNINTERRUPTIBLE);
1262                         add_wait_queue(&chip->wq, &wait);
1263                         mutex_unlock(&chip->mutex);
1264                         schedule();
1265                         remove_wait_queue(&chip->wq, &wait);
1266                         mutex_lock(&chip->mutex);
1267                 }
1268                 if (chip->erase_suspended && chip_state == FL_ERASING)  {
1269                         /* Erase suspend occured while sleep: reset timeout */
1270                         timeo = reset_timeo;
1271                         chip->erase_suspended = 0;
1272                 }
1273                 if (chip->write_suspended && chip_state == FL_WRITING)  {
1274                         /* Write suspend occured while sleep: reset timeout */
1275                         timeo = reset_timeo;
1276                         chip->write_suspended = 0;
1277                 }
1278         }
1279
1280         /* Done and happy. */
1281         chip->state = FL_STATUS;
1282         return 0;
1283 }
1284
1285 #endif
1286
1287 #define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1288         INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1289
1290
1291 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1292 {
1293         unsigned long cmd_addr;
1294         struct cfi_private *cfi = map->fldrv_priv;
1295         int ret = 0;
1296
1297         adr += chip->start;
1298
1299         /* Ensure cmd read/writes are aligned. */
1300         cmd_addr = adr & ~(map_bankwidth(map)-1);
1301
1302         mutex_lock(&chip->mutex);
1303
1304         ret = get_chip(map, chip, cmd_addr, FL_POINT);
1305
1306         if (!ret) {
1307                 if (chip->state != FL_POINT && chip->state != FL_READY)
1308                         map_write(map, CMD(0xff), cmd_addr);
1309
1310                 chip->state = FL_POINT;
1311                 chip->ref_point_counter++;
1312         }
1313         mutex_unlock(&chip->mutex);
1314
1315         return ret;
1316 }
1317
1318 static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1319                 size_t *retlen, void **virt, resource_size_t *phys)
1320 {
1321         struct map_info *map = mtd->priv;
1322         struct cfi_private *cfi = map->fldrv_priv;
1323         unsigned long ofs, last_end = 0;
1324         int chipnum;
1325         int ret = 0;
1326
1327         if (!map->virt || (from + len > mtd->size))
1328                 return -EINVAL;
1329
1330         /* Now lock the chip(s) to POINT state */
1331
1332         /* ofs: offset within the first chip that the first read should start */
1333         chipnum = (from >> cfi->chipshift);
1334         ofs = from - (chipnum << cfi->chipshift);
1335
1336         *virt = map->virt + cfi->chips[chipnum].start + ofs;
1337         *retlen = 0;
1338         if (phys)
1339                 *phys = map->phys + cfi->chips[chipnum].start + ofs;
1340
1341         while (len) {
1342                 unsigned long thislen;
1343
1344                 if (chipnum >= cfi->numchips)
1345                         break;
1346
1347                 /* We cannot point across chips that are virtually disjoint */
1348                 if (!last_end)
1349                         last_end = cfi->chips[chipnum].start;
1350                 else if (cfi->chips[chipnum].start != last_end)
1351                         break;
1352
1353                 if ((len + ofs -1) >> cfi->chipshift)
1354                         thislen = (1<<cfi->chipshift) - ofs;
1355                 else
1356                         thislen = len;
1357
1358                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1359                 if (ret)
1360                         break;
1361
1362                 *retlen += thislen;
1363                 len -= thislen;
1364
1365                 ofs = 0;
1366                 last_end += 1 << cfi->chipshift;
1367                 chipnum++;
1368         }
1369         return 0;
1370 }
1371
1372 static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1373 {
1374         struct map_info *map = mtd->priv;
1375         struct cfi_private *cfi = map->fldrv_priv;
1376         unsigned long ofs;
1377         int chipnum;
1378
1379         /* Now unlock the chip(s) POINT state */
1380
1381         /* ofs: offset within the first chip that the first read should start */
1382         chipnum = (from >> cfi->chipshift);
1383         ofs = from - (chipnum <<  cfi->chipshift);
1384
1385         while (len) {
1386                 unsigned long thislen;
1387                 struct flchip *chip;
1388
1389                 chip = &cfi->chips[chipnum];
1390                 if (chipnum >= cfi->numchips)
1391                         break;
1392
1393                 if ((len + ofs -1) >> cfi->chipshift)
1394                         thislen = (1<<cfi->chipshift) - ofs;
1395                 else
1396                         thislen = len;
1397
1398                 mutex_lock(&chip->mutex);
1399                 if (chip->state == FL_POINT) {
1400                         chip->ref_point_counter--;
1401                         if(chip->ref_point_counter == 0)
1402                                 chip->state = FL_READY;
1403                 } else
1404                         printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1405
1406                 put_chip(map, chip, chip->start);
1407                 mutex_unlock(&chip->mutex);
1408
1409                 len -= thislen;
1410                 ofs = 0;
1411                 chipnum++;
1412         }
1413 }
1414
1415 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1416 {
1417         unsigned long cmd_addr;
1418         struct cfi_private *cfi = map->fldrv_priv;
1419         int ret;
1420
1421         adr += chip->start;
1422
1423         /* Ensure cmd read/writes are aligned. */
1424         cmd_addr = adr & ~(map_bankwidth(map)-1);
1425
1426         mutex_lock(&chip->mutex);
1427         ret = get_chip(map, chip, cmd_addr, FL_READY);
1428         if (ret) {
1429                 mutex_unlock(&chip->mutex);
1430                 return ret;
1431         }
1432
1433         if (chip->state != FL_POINT && chip->state != FL_READY) {
1434                 map_write(map, CMD(0xff), cmd_addr);
1435
1436                 chip->state = FL_READY;
1437         }
1438
1439         map_copy_from(map, buf, adr, len);
1440
1441         put_chip(map, chip, cmd_addr);
1442
1443         mutex_unlock(&chip->mutex);
1444         return 0;
1445 }
1446
1447 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1448 {
1449         struct map_info *map = mtd->priv;
1450         struct cfi_private *cfi = map->fldrv_priv;
1451         unsigned long ofs;
1452         int chipnum;
1453         int ret = 0;
1454
1455         /* ofs: offset within the first chip that the first read should start */
1456         chipnum = (from >> cfi->chipshift);
1457         ofs = from - (chipnum <<  cfi->chipshift);
1458
1459         *retlen = 0;
1460
1461         while (len) {
1462                 unsigned long thislen;
1463
1464                 if (chipnum >= cfi->numchips)
1465                         break;
1466
1467                 if ((len + ofs -1) >> cfi->chipshift)
1468                         thislen = (1<<cfi->chipshift) - ofs;
1469                 else
1470                         thislen = len;
1471
1472                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1473                 if (ret)
1474                         break;
1475
1476                 *retlen += thislen;
1477                 len -= thislen;
1478                 buf += thislen;
1479
1480                 ofs = 0;
1481                 chipnum++;
1482         }
1483         return ret;
1484 }
1485
1486 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1487                                      unsigned long adr, map_word datum, int mode)
1488 {
1489         struct cfi_private *cfi = map->fldrv_priv;
1490         map_word status, write_cmd;
1491         int ret=0;
1492
1493         adr += chip->start;
1494
1495         switch (mode) {
1496         case FL_WRITING:
1497                 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1498                 break;
1499         case FL_OTP_WRITE:
1500                 write_cmd = CMD(0xc0);
1501                 break;
1502         default:
1503                 return -EINVAL;
1504         }
1505
1506         mutex_lock(&chip->mutex);
1507         ret = get_chip(map, chip, adr, mode);
1508         if (ret) {
1509                 mutex_unlock(&chip->mutex);
1510                 return ret;
1511         }
1512
1513         XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1514         ENABLE_VPP(map);
1515         xip_disable(map, chip, adr);
1516         map_write(map, write_cmd, adr);
1517         map_write(map, datum, adr);
1518         chip->state = mode;
1519
1520         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1521                                    adr, map_bankwidth(map),
1522                                    chip->word_write_time,
1523                                    chip->word_write_time_max);
1524         if (ret) {
1525                 xip_enable(map, chip, adr);
1526                 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1527                 goto out;
1528         }
1529
1530         /* check for errors */
1531         status = map_read(map, adr);
1532         if (map_word_bitsset(map, status, CMD(0x1a))) {
1533                 unsigned long chipstatus = MERGESTATUS(status);
1534
1535                 /* reset status */
1536                 map_write(map, CMD(0x50), adr);
1537                 map_write(map, CMD(0x70), adr);
1538                 xip_enable(map, chip, adr);
1539
1540                 if (chipstatus & 0x02) {
1541                         ret = -EROFS;
1542                 } else if (chipstatus & 0x08) {
1543                         printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1544                         ret = -EIO;
1545                 } else {
1546                         printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1547                         ret = -EINVAL;
1548                 }
1549
1550                 goto out;
1551         }
1552
1553         xip_enable(map, chip, adr);
1554  out:   put_chip(map, chip, adr);
1555         mutex_unlock(&chip->mutex);
1556         return ret;
1557 }
1558
1559
1560 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1561 {
1562         struct map_info *map = mtd->priv;
1563         struct cfi_private *cfi = map->fldrv_priv;
1564         int ret = 0;
1565         int chipnum;
1566         unsigned long ofs;
1567
1568         *retlen = 0;
1569         if (!len)
1570                 return 0;
1571
1572         chipnum = to >> cfi->chipshift;
1573         ofs = to  - (chipnum << cfi->chipshift);
1574
1575         /* If it's not bus-aligned, do the first byte write */
1576         if (ofs & (map_bankwidth(map)-1)) {
1577                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1578                 int gap = ofs - bus_ofs;
1579                 int n;
1580                 map_word datum;
1581
1582                 n = min_t(int, len, map_bankwidth(map)-gap);
1583                 datum = map_word_ff(map);
1584                 datum = map_word_load_partial(map, datum, buf, gap, n);
1585
1586                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1587                                                bus_ofs, datum, FL_WRITING);
1588                 if (ret)
1589                         return ret;
1590
1591                 len -= n;
1592                 ofs += n;
1593                 buf += n;
1594                 (*retlen) += n;
1595
1596                 if (ofs >> cfi->chipshift) {
1597                         chipnum ++;
1598                         ofs = 0;
1599                         if (chipnum == cfi->numchips)
1600                                 return 0;
1601                 }
1602         }
1603
1604         while(len >= map_bankwidth(map)) {
1605                 map_word datum = map_word_load(map, buf);
1606
1607                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1608                                        ofs, datum, FL_WRITING);
1609                 if (ret)
1610                         return ret;
1611
1612                 ofs += map_bankwidth(map);
1613                 buf += map_bankwidth(map);
1614                 (*retlen) += map_bankwidth(map);
1615                 len -= map_bankwidth(map);
1616
1617                 if (ofs >> cfi->chipshift) {
1618                         chipnum ++;
1619                         ofs = 0;
1620                         if (chipnum == cfi->numchips)
1621                                 return 0;
1622                 }
1623         }
1624
1625         if (len & (map_bankwidth(map)-1)) {
1626                 map_word datum;
1627
1628                 datum = map_word_ff(map);
1629                 datum = map_word_load_partial(map, datum, buf, 0, len);
1630
1631                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1632                                        ofs, datum, FL_WRITING);
1633                 if (ret)
1634                         return ret;
1635
1636                 (*retlen) += len;
1637         }
1638
1639         return 0;
1640 }
1641
1642
1643 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1644                                     unsigned long adr, const struct kvec **pvec,
1645                                     unsigned long *pvec_seek, int len)
1646 {
1647         struct cfi_private *cfi = map->fldrv_priv;
1648         map_word status, write_cmd, datum;
1649         unsigned long cmd_adr;
1650         int ret, wbufsize, word_gap, words;
1651         const struct kvec *vec;
1652         unsigned long vec_seek;
1653         unsigned long initial_adr;
1654         int initial_len = len;
1655
1656         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1657         adr += chip->start;
1658         initial_adr = adr;
1659         cmd_adr = adr & ~(wbufsize-1);
1660
1661         /* Let's determine this according to the interleave only once */
1662         write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1663
1664         mutex_lock(&chip->mutex);
1665         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1666         if (ret) {
1667                 mutex_unlock(&chip->mutex);
1668                 return ret;
1669         }
1670
1671         XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1672         ENABLE_VPP(map);
1673         xip_disable(map, chip, cmd_adr);
1674
1675         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1676            [...], the device will not accept any more Write to Buffer commands".
1677            So we must check here and reset those bits if they're set. Otherwise
1678            we're just pissing in the wind */
1679         if (chip->state != FL_STATUS) {
1680                 map_write(map, CMD(0x70), cmd_adr);
1681                 chip->state = FL_STATUS;
1682         }
1683         status = map_read(map, cmd_adr);
1684         if (map_word_bitsset(map, status, CMD(0x30))) {
1685                 xip_enable(map, chip, cmd_adr);
1686                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1687                 xip_disable(map, chip, cmd_adr);
1688                 map_write(map, CMD(0x50), cmd_adr);
1689                 map_write(map, CMD(0x70), cmd_adr);
1690         }
1691
1692         chip->state = FL_WRITING_TO_BUFFER;
1693         map_write(map, write_cmd, cmd_adr);
1694         ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1695         if (ret) {
1696                 /* Argh. Not ready for write to buffer */
1697                 map_word Xstatus = map_read(map, cmd_adr);
1698                 map_write(map, CMD(0x70), cmd_adr);
1699                 chip->state = FL_STATUS;
1700                 status = map_read(map, cmd_adr);
1701                 map_write(map, CMD(0x50), cmd_adr);
1702                 map_write(map, CMD(0x70), cmd_adr);
1703                 xip_enable(map, chip, cmd_adr);
1704                 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1705                                 map->name, Xstatus.x[0], status.x[0]);
1706                 goto out;
1707         }
1708
1709         /* Figure out the number of words to write */
1710         word_gap = (-adr & (map_bankwidth(map)-1));
1711         words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1712         if (!word_gap) {
1713                 words--;
1714         } else {
1715                 word_gap = map_bankwidth(map) - word_gap;
1716                 adr -= word_gap;
1717                 datum = map_word_ff(map);
1718         }
1719
1720         /* Write length of data to come */
1721         map_write(map, CMD(words), cmd_adr );
1722
1723         /* Write data */
1724         vec = *pvec;
1725         vec_seek = *pvec_seek;
1726         do {
1727                 int n = map_bankwidth(map) - word_gap;
1728                 if (n > vec->iov_len - vec_seek)
1729                         n = vec->iov_len - vec_seek;
1730                 if (n > len)
1731                         n = len;
1732
1733                 if (!word_gap && len < map_bankwidth(map))
1734                         datum = map_word_ff(map);
1735
1736                 datum = map_word_load_partial(map, datum,
1737                                               vec->iov_base + vec_seek,
1738                                               word_gap, n);
1739
1740                 len -= n;
1741                 word_gap += n;
1742                 if (!len || word_gap == map_bankwidth(map)) {
1743                         map_write(map, datum, adr);
1744                         adr += map_bankwidth(map);
1745                         word_gap = 0;
1746                 }
1747
1748                 vec_seek += n;
1749                 if (vec_seek == vec->iov_len) {
1750                         vec++;
1751                         vec_seek = 0;
1752                 }
1753         } while (len);
1754         *pvec = vec;
1755         *pvec_seek = vec_seek;
1756
1757         /* GO GO GO */
1758         map_write(map, CMD(0xd0), cmd_adr);
1759         chip->state = FL_WRITING;
1760
1761         ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1762                                    initial_adr, initial_len,
1763                                    chip->buffer_write_time,
1764                                    chip->buffer_write_time_max);
1765         if (ret) {
1766                 map_write(map, CMD(0x70), cmd_adr);
1767                 chip->state = FL_STATUS;
1768                 xip_enable(map, chip, cmd_adr);
1769                 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1770                 goto out;
1771         }
1772
1773         /* check for errors */
1774         status = map_read(map, cmd_adr);
1775         if (map_word_bitsset(map, status, CMD(0x1a))) {
1776                 unsigned long chipstatus = MERGESTATUS(status);
1777
1778                 /* reset status */
1779                 map_write(map, CMD(0x50), cmd_adr);
1780                 map_write(map, CMD(0x70), cmd_adr);
1781                 xip_enable(map, chip, cmd_adr);
1782
1783                 if (chipstatus & 0x02) {
1784                         ret = -EROFS;
1785                 } else if (chipstatus & 0x08) {
1786                         printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1787                         ret = -EIO;
1788                 } else {
1789                         printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1790                         ret = -EINVAL;
1791                 }
1792
1793                 goto out;
1794         }
1795
1796         xip_enable(map, chip, cmd_adr);
1797  out:   put_chip(map, chip, cmd_adr);
1798         mutex_unlock(&chip->mutex);
1799         return ret;
1800 }
1801
1802 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1803                                 unsigned long count, loff_t to, size_t *retlen)
1804 {
1805         struct map_info *map = mtd->priv;
1806         struct cfi_private *cfi = map->fldrv_priv;
1807         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1808         int ret = 0;
1809         int chipnum;
1810         unsigned long ofs, vec_seek, i;
1811         size_t len = 0;
1812
1813         for (i = 0; i < count; i++)
1814                 len += vecs[i].iov_len;
1815
1816         *retlen = 0;
1817         if (!len)
1818                 return 0;
1819
1820         chipnum = to >> cfi->chipshift;
1821         ofs = to - (chipnum << cfi->chipshift);
1822         vec_seek = 0;
1823
1824         do {
1825                 /* We must not cross write block boundaries */
1826                 int size = wbufsize - (ofs & (wbufsize-1));
1827
1828                 if (size > len)
1829                         size = len;
1830                 ret = do_write_buffer(map, &cfi->chips[chipnum],
1831                                       ofs, &vecs, &vec_seek, size);
1832                 if (ret)
1833                         return ret;
1834
1835                 ofs += size;
1836                 (*retlen) += size;
1837                 len -= size;
1838
1839                 if (ofs >> cfi->chipshift) {
1840                         chipnum ++;
1841                         ofs = 0;
1842                         if (chipnum == cfi->numchips)
1843                                 return 0;
1844                 }
1845
1846                 /* Be nice and reschedule with the chip in a usable state for other
1847                    processes. */
1848                 cond_resched();
1849
1850         } while (len);
1851
1852         return 0;
1853 }
1854
1855 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1856                                        size_t len, size_t *retlen, const u_char *buf)
1857 {
1858         struct kvec vec;
1859
1860         vec.iov_base = (void *) buf;
1861         vec.iov_len = len;
1862
1863         return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1864 }
1865
1866 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1867                                       unsigned long adr, int len, void *thunk)
1868 {
1869         struct cfi_private *cfi = map->fldrv_priv;
1870         map_word status;
1871         int retries = 3;
1872         int ret;
1873
1874         adr += chip->start;
1875
1876  retry:
1877         mutex_lock(&chip->mutex);
1878         ret = get_chip(map, chip, adr, FL_ERASING);
1879         if (ret) {
1880                 mutex_unlock(&chip->mutex);
1881                 return ret;
1882         }
1883
1884         XIP_INVAL_CACHED_RANGE(map, adr, len);
1885         ENABLE_VPP(map);
1886         xip_disable(map, chip, adr);
1887
1888         /* Clear the status register first */
1889         map_write(map, CMD(0x50), adr);
1890
1891         /* Now erase */
1892         map_write(map, CMD(0x20), adr);
1893         map_write(map, CMD(0xD0), adr);
1894         chip->state = FL_ERASING;
1895         chip->erase_suspended = 0;
1896
1897         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1898                                    adr, len,
1899                                    chip->erase_time,
1900                                    chip->erase_time_max);
1901         if (ret) {
1902                 map_write(map, CMD(0x70), adr);
1903                 chip->state = FL_STATUS;
1904                 xip_enable(map, chip, adr);
1905                 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1906                 goto out;
1907         }
1908
1909         /* We've broken this before. It doesn't hurt to be safe */
1910         map_write(map, CMD(0x70), adr);
1911         chip->state = FL_STATUS;
1912         status = map_read(map, adr);
1913
1914         /* check for errors */
1915         if (map_word_bitsset(map, status, CMD(0x3a))) {
1916                 unsigned long chipstatus = MERGESTATUS(status);
1917
1918                 /* Reset the error bits */
1919                 map_write(map, CMD(0x50), adr);
1920                 map_write(map, CMD(0x70), adr);
1921                 xip_enable(map, chip, adr);
1922
1923                 if ((chipstatus & 0x30) == 0x30) {
1924                         printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1925                         ret = -EINVAL;
1926                 } else if (chipstatus & 0x02) {
1927                         /* Protection bit set */
1928                         ret = -EROFS;
1929                 } else if (chipstatus & 0x8) {
1930                         /* Voltage */
1931                         printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1932                         ret = -EIO;
1933                 } else if (chipstatus & 0x20 && retries--) {
1934                         printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1935                         put_chip(map, chip, adr);
1936                         mutex_unlock(&chip->mutex);
1937                         goto retry;
1938                 } else {
1939                         printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1940                         ret = -EIO;
1941                 }
1942
1943                 goto out;
1944         }
1945
1946         xip_enable(map, chip, adr);
1947  out:   put_chip(map, chip, adr);
1948         mutex_unlock(&chip->mutex);
1949         return ret;
1950 }
1951
1952 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1953 {
1954         unsigned long ofs, len;
1955         int ret;
1956
1957         ofs = instr->addr;
1958         len = instr->len;
1959
1960         ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1961         if (ret)
1962                 return ret;
1963
1964         instr->state = MTD_ERASE_DONE;
1965         mtd_erase_callback(instr);
1966
1967         return 0;
1968 }
1969
1970 static void cfi_intelext_sync (struct mtd_info *mtd)
1971 {
1972         struct map_info *map = mtd->priv;
1973         struct cfi_private *cfi = map->fldrv_priv;
1974         int i;
1975         struct flchip *chip;
1976         int ret = 0;
1977
1978         for (i=0; !ret && i<cfi->numchips; i++) {
1979                 chip = &cfi->chips[i];
1980
1981                 mutex_lock(&chip->mutex);
1982                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1983
1984                 if (!ret) {
1985                         chip->oldstate = chip->state;
1986                         chip->state = FL_SYNCING;
1987                         /* No need to wake_up() on this state change -
1988                          * as the whole point is that nobody can do anything
1989                          * with the chip now anyway.
1990                          */
1991                 }
1992                 mutex_unlock(&chip->mutex);
1993         }
1994
1995         /* Unlock the chips again */
1996
1997         for (i--; i >=0; i--) {
1998                 chip = &cfi->chips[i];
1999
2000                 mutex_lock(&chip->mutex);
2001
2002                 if (chip->state == FL_SYNCING) {
2003                         chip->state = chip->oldstate;
2004                         chip->oldstate = FL_READY;
2005                         wake_up(&chip->wq);
2006                 }
2007                 mutex_unlock(&chip->mutex);
2008         }
2009 }
2010
2011 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
2012                                                 struct flchip *chip,
2013                                                 unsigned long adr,
2014                                                 int len, void *thunk)
2015 {
2016         struct cfi_private *cfi = map->fldrv_priv;
2017         int status, ofs_factor = cfi->interleave * cfi->device_type;
2018
2019         adr += chip->start;
2020         xip_disable(map, chip, adr+(2*ofs_factor));
2021         map_write(map, CMD(0x90), adr+(2*ofs_factor));
2022         chip->state = FL_JEDEC_QUERY;
2023         status = cfi_read_query(map, adr+(2*ofs_factor));
2024         xip_enable(map, chip, 0);
2025         return status;
2026 }
2027
2028 #ifdef DEBUG_LOCK_BITS
2029 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
2030                                                 struct flchip *chip,
2031                                                 unsigned long adr,
2032                                                 int len, void *thunk)
2033 {
2034         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2035                adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2036         return 0;
2037 }
2038 #endif
2039
2040 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
2041 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
2042
2043 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2044                                        unsigned long adr, int len, void *thunk)
2045 {
2046         struct cfi_private *cfi = map->fldrv_priv;
2047         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2048         int udelay;
2049         int ret;
2050
2051         adr += chip->start;
2052
2053         mutex_lock(&chip->mutex);
2054         ret = get_chip(map, chip, adr, FL_LOCKING);
2055         if (ret) {
2056                 mutex_unlock(&chip->mutex);
2057                 return ret;
2058         }
2059
2060         ENABLE_VPP(map);
2061         xip_disable(map, chip, adr);
2062
2063         map_write(map, CMD(0x60), adr);
2064         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2065                 map_write(map, CMD(0x01), adr);
2066                 chip->state = FL_LOCKING;
2067         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2068                 map_write(map, CMD(0xD0), adr);
2069                 chip->state = FL_UNLOCKING;
2070         } else
2071                 BUG();
2072
2073         /*
2074          * If Instant Individual Block Locking supported then no need
2075          * to delay.
2076          */
2077         udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
2078
2079         ret = WAIT_TIMEOUT(map, chip, adr, udelay, udelay * 100);
2080         if (ret) {
2081                 map_write(map, CMD(0x70), adr);
2082                 chip->state = FL_STATUS;
2083                 xip_enable(map, chip, adr);
2084                 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2085                 goto out;
2086         }
2087
2088         xip_enable(map, chip, adr);
2089 out:    put_chip(map, chip, adr);
2090         mutex_unlock(&chip->mutex);
2091         return ret;
2092 }
2093
2094 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2095 {
2096         int ret;
2097
2098 #ifdef DEBUG_LOCK_BITS
2099         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2100                __func__, ofs, len);
2101         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2102                 ofs, len, NULL);
2103 #endif
2104
2105         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2106                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2107
2108 #ifdef DEBUG_LOCK_BITS
2109         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2110                __func__, ret);
2111         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2112                 ofs, len, NULL);
2113 #endif
2114
2115         return ret;
2116 }
2117
2118 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2119 {
2120         int ret;
2121
2122 #ifdef DEBUG_LOCK_BITS
2123         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2124                __func__, ofs, len);
2125         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2126                 ofs, len, NULL);
2127 #endif
2128
2129         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2130                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2131
2132 #ifdef DEBUG_LOCK_BITS
2133         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2134                __func__, ret);
2135         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2136                 ofs, len, NULL);
2137 #endif
2138
2139         return ret;
2140 }
2141
2142 #ifdef CONFIG_MTD_OTP
2143
2144 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2145                         u_long data_offset, u_char *buf, u_int size,
2146                         u_long prot_offset, u_int groupno, u_int groupsize);
2147
2148 static int __xipram
2149 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2150             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2151 {
2152         struct cfi_private *cfi = map->fldrv_priv;
2153         int ret;
2154
2155         mutex_lock(&chip->mutex);
2156         ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2157         if (ret) {
2158                 mutex_unlock(&chip->mutex);
2159                 return ret;
2160         }
2161
2162         /* let's ensure we're not reading back cached data from array mode */
2163         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2164
2165         xip_disable(map, chip, chip->start);
2166         if (chip->state != FL_JEDEC_QUERY) {
2167                 map_write(map, CMD(0x90), chip->start);
2168                 chip->state = FL_JEDEC_QUERY;
2169         }
2170         map_copy_from(map, buf, chip->start + offset, size);
2171         xip_enable(map, chip, chip->start);
2172
2173         /* then ensure we don't keep OTP data in the cache */
2174         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2175
2176         put_chip(map, chip, chip->start);
2177         mutex_unlock(&chip->mutex);
2178         return 0;
2179 }
2180
2181 static int
2182 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2183              u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2184 {
2185         int ret;
2186
2187         while (size) {
2188                 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2189                 int gap = offset - bus_ofs;
2190                 int n = min_t(int, size, map_bankwidth(map)-gap);
2191                 map_word datum = map_word_ff(map);
2192
2193                 datum = map_word_load_partial(map, datum, buf, gap, n);
2194                 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2195                 if (ret)
2196                         return ret;
2197
2198                 offset += n;
2199                 buf += n;
2200                 size -= n;
2201         }
2202
2203         return 0;
2204 }
2205
2206 static int
2207 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2208             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2209 {
2210         struct cfi_private *cfi = map->fldrv_priv;
2211         map_word datum;
2212
2213         /* make sure area matches group boundaries */
2214         if (size != grpsz)
2215                 return -EXDEV;
2216
2217         datum = map_word_ff(map);
2218         datum = map_word_clr(map, datum, CMD(1 << grpno));
2219         return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2220 }
2221
2222 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2223                                  size_t *retlen, u_char *buf,
2224                                  otp_op_t action, int user_regs)
2225 {
2226         struct map_info *map = mtd->priv;
2227         struct cfi_private *cfi = map->fldrv_priv;
2228         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2229         struct flchip *chip;
2230         struct cfi_intelext_otpinfo *otp;
2231         u_long devsize, reg_prot_offset, data_offset;
2232         u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2233         u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2234         int ret;
2235
2236         *retlen = 0;
2237
2238         /* Check that we actually have some OTP registers */
2239         if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2240                 return -ENODATA;
2241
2242         /* we need real chips here not virtual ones */
2243         devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2244         chip_step = devsize >> cfi->chipshift;
2245         chip_num = 0;
2246
2247         /* Some chips have OTP located in the _top_ partition only.
2248            For example: Intel 28F256L18T (T means top-parameter device) */
2249         if (cfi->mfr == CFI_MFR_INTEL) {
2250                 switch (cfi->id) {
2251                 case 0x880b:
2252                 case 0x880c:
2253                 case 0x880d:
2254                         chip_num = chip_step - 1;
2255                 }
2256         }
2257
2258         for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2259                 chip = &cfi->chips[chip_num];
2260                 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2261
2262                 /* first OTP region */
2263                 field = 0;
2264                 reg_prot_offset = extp->ProtRegAddr;
2265                 reg_fact_groups = 1;
2266                 reg_fact_size = 1 << extp->FactProtRegSize;
2267                 reg_user_groups = 1;
2268                 reg_user_size = 1 << extp->UserProtRegSize;
2269
2270                 while (len > 0) {
2271                         /* flash geometry fixup */
2272                         data_offset = reg_prot_offset + 1;
2273                         data_offset *= cfi->interleave * cfi->device_type;
2274                         reg_prot_offset *= cfi->interleave * cfi->device_type;
2275                         reg_fact_size *= cfi->interleave;
2276                         reg_user_size *= cfi->interleave;
2277
2278                         if (user_regs) {
2279                                 groups = reg_user_groups;
2280                                 groupsize = reg_user_size;
2281                                 /* skip over factory reg area */
2282                                 groupno = reg_fact_groups;
2283                                 data_offset += reg_fact_groups * reg_fact_size;
2284                         } else {
2285                                 groups = reg_fact_groups;
2286                                 groupsize = reg_fact_size;
2287                                 groupno = 0;
2288                         }
2289
2290                         while (len > 0 && groups > 0) {
2291                                 if (!action) {
2292                                         /*
2293                                          * Special case: if action is NULL
2294                                          * we fill buf with otp_info records.
2295                                          */
2296                                         struct otp_info *otpinfo;
2297                                         map_word lockword;
2298                                         len -= sizeof(struct otp_info);
2299                                         if (len <= 0)
2300                                                 return -ENOSPC;
2301                                         ret = do_otp_read(map, chip,
2302                                                           reg_prot_offset,
2303                                                           (u_char *)&lockword,
2304                                                           map_bankwidth(map),
2305                                                           0, 0,  0);
2306                                         if (ret)
2307                                                 return ret;
2308                                         otpinfo = (struct otp_info *)buf;
2309                                         otpinfo->start = from;
2310                                         otpinfo->length = groupsize;
2311                                         otpinfo->locked =
2312                                            !map_word_bitsset(map, lockword,
2313                                                              CMD(1 << groupno));
2314                                         from += groupsize;
2315                                         buf += sizeof(*otpinfo);
2316                                         *retlen += sizeof(*otpinfo);
2317                                 } else if (from >= groupsize) {
2318                                         from -= groupsize;
2319                                         data_offset += groupsize;
2320                                 } else {
2321                                         int size = groupsize;
2322                                         data_offset += from;
2323                                         size -= from;
2324                                         from = 0;
2325                                         if (size > len)
2326                                                 size = len;
2327                                         ret = action(map, chip, data_offset,
2328                                                      buf, size, reg_prot_offset,
2329                                                      groupno, groupsize);
2330                                         if (ret < 0)
2331                                                 return ret;
2332                                         buf += size;
2333                                         len -= size;
2334                                         *retlen += size;
2335                                         data_offset += size;
2336                                 }
2337                                 groupno++;
2338                                 groups--;
2339                         }
2340
2341                         /* next OTP region */
2342                         if (++field == extp->NumProtectionFields)
2343                                 break;
2344                         reg_prot_offset = otp->ProtRegAddr;
2345                         reg_fact_groups = otp->FactGroups;
2346                         reg_fact_size = 1 << otp->FactProtRegSize;
2347                         reg_user_groups = otp->UserGroups;
2348                         reg_user_size = 1 << otp->UserProtRegSize;
2349                         otp++;
2350                 }
2351         }
2352
2353         return 0;
2354 }
2355
2356 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2357                                            size_t len, size_t *retlen,
2358                                             u_char *buf)
2359 {
2360         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2361                                      buf, do_otp_read, 0);
2362 }
2363
2364 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2365                                            size_t len, size_t *retlen,
2366                                             u_char *buf)
2367 {
2368         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2369                                      buf, do_otp_read, 1);
2370 }
2371
2372 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2373                                             size_t len, size_t *retlen,
2374                                              u_char *buf)
2375 {
2376         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2377                                      buf, do_otp_write, 1);
2378 }
2379
2380 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2381                                            loff_t from, size_t len)
2382 {
2383         size_t retlen;
2384         return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2385                                      NULL, do_otp_lock, 1);
2386 }
2387
2388 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2389                                            struct otp_info *buf, size_t len)
2390 {
2391         size_t retlen;
2392         int ret;
2393
2394         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2395         return ret ? : retlen;
2396 }
2397
2398 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2399                                            struct otp_info *buf, size_t len)
2400 {
2401         size_t retlen;
2402         int ret;
2403
2404         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2405         return ret ? : retlen;
2406 }
2407
2408 #endif
2409
2410 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2411 {
2412         struct mtd_erase_region_info *region;
2413         int block, status, i;
2414         unsigned long adr;
2415         size_t len;
2416
2417         for (i = 0; i < mtd->numeraseregions; i++) {
2418                 region = &mtd->eraseregions[i];
2419                 if (!region->lockmap)
2420                         continue;
2421
2422                 for (block = 0; block < region->numblocks; block++){
2423                         len = region->erasesize;
2424                         adr = region->offset + block * len;
2425
2426                         status = cfi_varsize_frob(mtd,
2427                                         do_getlockstatus_oneblock, adr, len, NULL);
2428                         if (status)
2429                                 set_bit(block, region->lockmap);
2430                         else
2431                                 clear_bit(block, region->lockmap);
2432                 }
2433         }
2434 }
2435
2436 static int cfi_intelext_suspend(struct mtd_info *mtd)
2437 {
2438         struct map_info *map = mtd->priv;
2439         struct cfi_private *cfi = map->fldrv_priv;
2440         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2441         int i;
2442         struct flchip *chip;
2443         int ret = 0;
2444
2445         if ((mtd->flags & MTD_POWERUP_LOCK)
2446             && extp && (extp->FeatureSupport & (1 << 5)))
2447                 cfi_intelext_save_locks(mtd);
2448
2449         for (i=0; !ret && i<cfi->numchips; i++) {
2450                 chip = &cfi->chips[i];
2451
2452                 mutex_lock(&chip->mutex);
2453
2454                 switch (chip->state) {
2455                 case FL_READY:
2456                 case FL_STATUS:
2457                 case FL_CFI_QUERY:
2458                 case FL_JEDEC_QUERY:
2459                         if (chip->oldstate == FL_READY) {
2460                                 /* place the chip in a known state before suspend */
2461                                 map_write(map, CMD(0xFF), cfi->chips[i].start);
2462                                 chip->oldstate = chip->state;
2463                                 chip->state = FL_PM_SUSPENDED;
2464                                 /* No need to wake_up() on this state change -
2465                                  * as the whole point is that nobody can do anything
2466                                  * with the chip now anyway.
2467                                  */
2468                         } else {
2469                                 /* There seems to be an operation pending. We must wait for it. */
2470                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2471                                 ret = -EAGAIN;
2472                         }
2473                         break;
2474                 default:
2475                         /* Should we actually wait? Once upon a time these routines weren't
2476                            allowed to. Or should we return -EAGAIN, because the upper layers
2477                            ought to have already shut down anything which was using the device
2478                            anyway? The latter for now. */
2479                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2480                         ret = -EAGAIN;
2481                 case FL_PM_SUSPENDED:
2482                         break;
2483                 }
2484                 mutex_unlock(&chip->mutex);
2485         }
2486
2487         /* Unlock the chips again */
2488
2489         if (ret) {
2490                 for (i--; i >=0; i--) {
2491                         chip = &cfi->chips[i];
2492
2493                         mutex_lock(&chip->mutex);
2494
2495                         if (chip->state == FL_PM_SUSPENDED) {
2496                                 /* No need to force it into a known state here,
2497                                    because we're returning failure, and it didn't
2498                                    get power cycled */
2499                                 chip->state = chip->oldstate;
2500                                 chip->oldstate = FL_READY;
2501                                 wake_up(&chip->wq);
2502                         }
2503                         mutex_unlock(&chip->mutex);
2504                 }
2505         }
2506
2507         return ret;
2508 }
2509
2510 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2511 {
2512         struct mtd_erase_region_info *region;
2513         int block, i;
2514         unsigned long adr;
2515         size_t len;
2516
2517         for (i = 0; i < mtd->numeraseregions; i++) {
2518                 region = &mtd->eraseregions[i];
2519                 if (!region->lockmap)
2520                         continue;
2521
2522                 for (block = 0; block < region->numblocks; block++) {
2523                         len = region->erasesize;
2524                         adr = region->offset + block * len;
2525
2526                         if (!test_bit(block, region->lockmap))
2527                                 cfi_intelext_unlock(mtd, adr, len);
2528                 }
2529         }
2530 }
2531
2532 static void cfi_intelext_resume(struct mtd_info *mtd)
2533 {
2534         struct map_info *map = mtd->priv;
2535         struct cfi_private *cfi = map->fldrv_priv;
2536         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2537         int i;
2538         struct flchip *chip;
2539
2540         for (i=0; i<cfi->numchips; i++) {
2541
2542                 chip = &cfi->chips[i];
2543
2544                 mutex_lock(&chip->mutex);
2545
2546                 /* Go to known state. Chip may have been power cycled */
2547                 if (chip->state == FL_PM_SUSPENDED) {
2548                         map_write(map, CMD(0xFF), cfi->chips[i].start);
2549                         chip->oldstate = chip->state = FL_READY;
2550                         wake_up(&chip->wq);
2551                 }
2552
2553                 mutex_unlock(&chip->mutex);
2554         }
2555
2556         if ((mtd->flags & MTD_POWERUP_LOCK)
2557             && extp && (extp->FeatureSupport & (1 << 5)))
2558                 cfi_intelext_restore_locks(mtd);
2559 }
2560
2561 static int cfi_intelext_reset(struct mtd_info *mtd)
2562 {
2563         struct map_info *map = mtd->priv;
2564         struct cfi_private *cfi = map->fldrv_priv;
2565         int i, ret;
2566
2567         for (i=0; i < cfi->numchips; i++) {
2568                 struct flchip *chip = &cfi->chips[i];
2569
2570                 /* force the completion of any ongoing operation
2571                    and switch to array mode so any bootloader in
2572                    flash is accessible for soft reboot. */
2573                 mutex_lock(&chip->mutex);
2574                 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2575                 if (!ret) {
2576                         map_write(map, CMD(0xff), chip->start);
2577                         chip->state = FL_SHUTDOWN;
2578                         put_chip(map, chip, chip->start);
2579                 }
2580                 mutex_unlock(&chip->mutex);
2581         }
2582
2583         return 0;
2584 }
2585
2586 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2587                                void *v)
2588 {
2589         struct mtd_info *mtd;
2590
2591         mtd = container_of(nb, struct mtd_info, reboot_notifier);
2592         cfi_intelext_reset(mtd);
2593         return NOTIFY_DONE;
2594 }
2595
2596 static void cfi_intelext_destroy(struct mtd_info *mtd)
2597 {
2598         struct map_info *map = mtd->priv;
2599         struct cfi_private *cfi = map->fldrv_priv;
2600         struct mtd_erase_region_info *region;
2601         int i;
2602         cfi_intelext_reset(mtd);
2603         unregister_reboot_notifier(&mtd->reboot_notifier);
2604         kfree(cfi->cmdset_priv);
2605         kfree(cfi->cfiq);
2606         kfree(cfi->chips[0].priv);
2607         kfree(cfi);
2608         for (i = 0; i < mtd->numeraseregions; i++) {
2609                 region = &mtd->eraseregions[i];
2610                 if (region->lockmap)
2611                         kfree(region->lockmap);
2612         }
2613         kfree(mtd->eraseregions);
2614 }
2615
2616 MODULE_LICENSE("GPL");
2617 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2618 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2619 MODULE_ALIAS("cfi_cmdset_0003");
2620 MODULE_ALIAS("cfi_cmdset_0200");