Merge git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/devfs-2.6
[pandora-kernel.git] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * $Id: cfi_cmdset_0001.c,v 1.186 2005/11/23 22:07:52 nico Exp $
8  *
9  *
10  * 10/10/2000   Nicolas Pitre <nico@cam.org>
11  *      - completely revamped method functions so they are aware and
12  *        independent of the flash geometry (buswidth, interleave, etc.)
13  *      - scalability vs code size is completely set at compile-time
14  *        (see include/linux/mtd/cfi.h for selection)
15  *      - optimized write buffer method
16  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17  *      - reworked lock/unlock/erase support for var size flash
18  */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/mtd/xip.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/compatmac.h>
37 #include <linux/mtd/cfi.h>
38
39 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
40 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41
42 // debugging, turns off buffer write mode if set to 1
43 #define FORCE_WORD_WRITE 0
44
45 #define MANUFACTURER_INTEL      0x0089
46 #define I82802AB        0x00ad
47 #define I82802AC        0x00ac
48 #define MANUFACTURER_ST         0x0020
49 #define M50LPW080       0x002F
50
51 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
52 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
55 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
56 static void cfi_intelext_sync (struct mtd_info *);
57 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
58 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
59 #ifdef CONFIG_MTD_OTP
60 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
61 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
64 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
65                                             struct otp_info *, size_t);
66 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
67                                             struct otp_info *, size_t);
68 #endif
69 static int cfi_intelext_suspend (struct mtd_info *);
70 static void cfi_intelext_resume (struct mtd_info *);
71 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
72
73 static void cfi_intelext_destroy(struct mtd_info *);
74
75 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
76
77 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
78 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
79
80 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
81                      size_t *retlen, u_char **mtdbuf);
82 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
83                         size_t len);
84
85 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
86 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
87 #include "fwh_lock.h"
88
89
90
91 /*
92  *  *********** SETUP AND PROBE BITS  ***********
93  */
94
95 static struct mtd_chip_driver cfi_intelext_chipdrv = {
96         .probe          = NULL, /* Not usable directly */
97         .destroy        = cfi_intelext_destroy,
98         .name           = "cfi_cmdset_0001",
99         .module         = THIS_MODULE
100 };
101
102 /* #define DEBUG_LOCK_BITS */
103 /* #define DEBUG_CFI_FEATURES */
104
105 #ifdef DEBUG_CFI_FEATURES
106 static void cfi_tell_features(struct cfi_pri_intelext *extp)
107 {
108         int i;
109         printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
110         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
111         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
112         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
113         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
114         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
115         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
116         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
117         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
118         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
119         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
120         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
121         printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
122         for (i=11; i<32; i++) {
123                 if (extp->FeatureSupport & (1<<i))
124                         printk("     - Unknown Bit %X:      supported\n", i);
125         }
126
127         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
128         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
129         for (i=1; i<8; i++) {
130                 if (extp->SuspendCmdSupport & (1<<i))
131                         printk("     - Unknown Bit %X:               supported\n", i);
132         }
133
134         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
135         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
136         printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
137         for (i=2; i<3; i++) {
138                 if (extp->BlkStatusRegMask & (1<<i))
139                         printk("     - Unknown Bit %X Active: yes\n",i);
140         }
141         printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
142         printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
143         for (i=6; i<16; i++) {
144                 if (extp->BlkStatusRegMask & (1<<i))
145                         printk("     - Unknown Bit %X Active: yes\n",i);
146         }
147
148         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
149                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
150         if (extp->VppOptimal)
151                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
152                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
153 }
154 #endif
155
156 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
157 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
158 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
159 {
160         struct map_info *map = mtd->priv;
161         struct cfi_private *cfi = map->fldrv_priv;
162         struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
163
164         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
165                             "erase on write disabled.\n");
166         extp->SuspendCmdSupport &= ~1;
167 }
168 #endif
169
170 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
171 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
172 {
173         struct map_info *map = mtd->priv;
174         struct cfi_private *cfi = map->fldrv_priv;
175         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
176
177         if (cfip && (cfip->FeatureSupport&4)) {
178                 cfip->FeatureSupport &= ~4;
179                 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
180         }
181 }
182 #endif
183
184 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
185 {
186         struct map_info *map = mtd->priv;
187         struct cfi_private *cfi = map->fldrv_priv;
188
189         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
190         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
191 }
192
193 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
194 {
195         struct map_info *map = mtd->priv;
196         struct cfi_private *cfi = map->fldrv_priv;
197
198         /* Note this is done after the region info is endian swapped */
199         cfi->cfiq->EraseRegionInfo[1] =
200                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
201 };
202
203 static void fixup_use_point(struct mtd_info *mtd, void *param)
204 {
205         struct map_info *map = mtd->priv;
206         if (!mtd->point && map_is_linear(map)) {
207                 mtd->point   = cfi_intelext_point;
208                 mtd->unpoint = cfi_intelext_unpoint;
209         }
210 }
211
212 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
213 {
214         struct map_info *map = mtd->priv;
215         struct cfi_private *cfi = map->fldrv_priv;
216         if (cfi->cfiq->BufWriteTimeoutTyp) {
217                 printk(KERN_INFO "Using buffer write method\n" );
218                 mtd->write = cfi_intelext_write_buffers;
219                 mtd->writev = cfi_intelext_writev;
220         }
221 }
222
223 static struct cfi_fixup cfi_fixup_table[] = {
224 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
225         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
226 #endif
227 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
228         { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
229 #endif
230 #if !FORCE_WORD_WRITE
231         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
232 #endif
233         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
234         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
235         { 0, 0, NULL, NULL }
236 };
237
238 static struct cfi_fixup jedec_fixup_table[] = {
239         { MANUFACTURER_INTEL, I82802AB,   fixup_use_fwh_lock, NULL, },
240         { MANUFACTURER_INTEL, I82802AC,   fixup_use_fwh_lock, NULL, },
241         { MANUFACTURER_ST,    M50LPW080,  fixup_use_fwh_lock, NULL, },
242         { 0, 0, NULL, NULL }
243 };
244 static struct cfi_fixup fixup_table[] = {
245         /* The CFI vendor ids and the JEDEC vendor IDs appear
246          * to be common.  It is like the devices id's are as
247          * well.  This table is to pick all cases where
248          * we know that is the case.
249          */
250         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
251         { 0, 0, NULL, NULL }
252 };
253
254 static inline struct cfi_pri_intelext *
255 read_pri_intelext(struct map_info *map, __u16 adr)
256 {
257         struct cfi_pri_intelext *extp;
258         unsigned int extp_size = sizeof(*extp);
259
260  again:
261         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
262         if (!extp)
263                 return NULL;
264
265         if (extp->MajorVersion != '1' ||
266             (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
267                 printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
268                        "version %c.%c.\n",  extp->MajorVersion,
269                        extp->MinorVersion);
270                 kfree(extp);
271                 return NULL;
272         }
273
274         /* Do some byteswapping if necessary */
275         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
276         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
277         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
278
279         if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
280                 unsigned int extra_size = 0;
281                 int nb_parts, i;
282
283                 /* Protection Register info */
284                 extra_size += (extp->NumProtectionFields - 1) *
285                               sizeof(struct cfi_intelext_otpinfo);
286
287                 /* Burst Read info */
288                 extra_size += 2;
289                 if (extp_size < sizeof(*extp) + extra_size)
290                         goto need_more;
291                 extra_size += extp->extra[extra_size-1];
292
293                 /* Number of hardware-partitions */
294                 extra_size += 1;
295                 if (extp_size < sizeof(*extp) + extra_size)
296                         goto need_more;
297                 nb_parts = extp->extra[extra_size - 1];
298
299                 /* skip the sizeof(partregion) field in CFI 1.4 */
300                 if (extp->MinorVersion >= '4')
301                         extra_size += 2;
302
303                 for (i = 0; i < nb_parts; i++) {
304                         struct cfi_intelext_regioninfo *rinfo;
305                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
306                         extra_size += sizeof(*rinfo);
307                         if (extp_size < sizeof(*extp) + extra_size)
308                                 goto need_more;
309                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
310                         extra_size += (rinfo->NumBlockTypes - 1)
311                                       * sizeof(struct cfi_intelext_blockinfo);
312                 }
313
314                 if (extp->MinorVersion >= '4')
315                         extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
316
317                 if (extp_size < sizeof(*extp) + extra_size) {
318                         need_more:
319                         extp_size = sizeof(*extp) + extra_size;
320                         kfree(extp);
321                         if (extp_size > 4096) {
322                                 printk(KERN_ERR
323                                         "%s: cfi_pri_intelext is too fat\n",
324                                         __FUNCTION__);
325                                 return NULL;
326                         }
327                         goto again;
328                 }
329         }
330
331         return extp;
332 }
333
334 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
335 {
336         struct cfi_private *cfi = map->fldrv_priv;
337         struct mtd_info *mtd;
338         int i;
339
340         mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
341         if (!mtd) {
342                 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
343                 return NULL;
344         }
345         memset(mtd, 0, sizeof(*mtd));
346         mtd->priv = map;
347         mtd->type = MTD_NORFLASH;
348
349         /* Fill in the default mtd operations */
350         mtd->erase   = cfi_intelext_erase_varsize;
351         mtd->read    = cfi_intelext_read;
352         mtd->write   = cfi_intelext_write_words;
353         mtd->sync    = cfi_intelext_sync;
354         mtd->lock    = cfi_intelext_lock;
355         mtd->unlock  = cfi_intelext_unlock;
356         mtd->suspend = cfi_intelext_suspend;
357         mtd->resume  = cfi_intelext_resume;
358         mtd->flags   = MTD_CAP_NORFLASH;
359         mtd->name    = map->name;
360         mtd->writesize = 1;
361
362         mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
363
364         if (cfi->cfi_mode == CFI_MODE_CFI) {
365                 /*
366                  * It's a real CFI chip, not one for which the probe
367                  * routine faked a CFI structure. So we read the feature
368                  * table from it.
369                  */
370                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
371                 struct cfi_pri_intelext *extp;
372
373                 extp = read_pri_intelext(map, adr);
374                 if (!extp) {
375                         kfree(mtd);
376                         return NULL;
377                 }
378
379                 /* Install our own private info structure */
380                 cfi->cmdset_priv = extp;
381
382                 cfi_fixup(mtd, cfi_fixup_table);
383
384 #ifdef DEBUG_CFI_FEATURES
385                 /* Tell the user about it in lots of lovely detail */
386                 cfi_tell_features(extp);
387 #endif
388
389                 if(extp->SuspendCmdSupport & 1) {
390                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
391                 }
392         }
393         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
394                 /* Apply jedec specific fixups */
395                 cfi_fixup(mtd, jedec_fixup_table);
396         }
397         /* Apply generic fixups */
398         cfi_fixup(mtd, fixup_table);
399
400         for (i=0; i< cfi->numchips; i++) {
401                 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
402                 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
403                 cfi->chips[i].erase_time = 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
404                 cfi->chips[i].ref_point_counter = 0;
405                 init_waitqueue_head(&(cfi->chips[i].wq));
406         }
407
408         map->fldrv = &cfi_intelext_chipdrv;
409
410         return cfi_intelext_setup(mtd);
411 }
412 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
413 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
414 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
415 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
416 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
417
418 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
419 {
420         struct map_info *map = mtd->priv;
421         struct cfi_private *cfi = map->fldrv_priv;
422         unsigned long offset = 0;
423         int i,j;
424         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
425
426         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
427
428         mtd->size = devsize * cfi->numchips;
429
430         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
431         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
432                         * mtd->numeraseregions, GFP_KERNEL);
433         if (!mtd->eraseregions) {
434                 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
435                 goto setup_err;
436         }
437
438         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
439                 unsigned long ernum, ersize;
440                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
441                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
442
443                 if (mtd->erasesize < ersize) {
444                         mtd->erasesize = ersize;
445                 }
446                 for (j=0; j<cfi->numchips; j++) {
447                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
448                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
449                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
450                 }
451                 offset += (ersize * ernum);
452         }
453
454         if (offset != devsize) {
455                 /* Argh */
456                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
457                 goto setup_err;
458         }
459
460         for (i=0; i<mtd->numeraseregions;i++){
461                 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
462                        i,mtd->eraseregions[i].offset,
463                        mtd->eraseregions[i].erasesize,
464                        mtd->eraseregions[i].numblocks);
465         }
466
467 #ifdef CONFIG_MTD_OTP
468         mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
469         mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
470         mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
471         mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
472         mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
473         mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
474 #endif
475
476         /* This function has the potential to distort the reality
477            a bit and therefore should be called last. */
478         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
479                 goto setup_err;
480
481         __module_get(THIS_MODULE);
482         register_reboot_notifier(&mtd->reboot_notifier);
483         return mtd;
484
485  setup_err:
486         if(mtd) {
487                 kfree(mtd->eraseregions);
488                 kfree(mtd);
489         }
490         kfree(cfi->cmdset_priv);
491         return NULL;
492 }
493
494 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
495                                         struct cfi_private **pcfi)
496 {
497         struct map_info *map = mtd->priv;
498         struct cfi_private *cfi = *pcfi;
499         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
500
501         /*
502          * Probing of multi-partition flash ships.
503          *
504          * To support multiple partitions when available, we simply arrange
505          * for each of them to have their own flchip structure even if they
506          * are on the same physical chip.  This means completely recreating
507          * a new cfi_private structure right here which is a blatent code
508          * layering violation, but this is still the least intrusive
509          * arrangement at this point. This can be rearranged in the future
510          * if someone feels motivated enough.  --nico
511          */
512         if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
513             && extp->FeatureSupport & (1 << 9)) {
514                 struct cfi_private *newcfi;
515                 struct flchip *chip;
516                 struct flchip_shared *shared;
517                 int offs, numregions, numparts, partshift, numvirtchips, i, j;
518
519                 /* Protection Register info */
520                 offs = (extp->NumProtectionFields - 1) *
521                        sizeof(struct cfi_intelext_otpinfo);
522
523                 /* Burst Read info */
524                 offs += extp->extra[offs+1]+2;
525
526                 /* Number of partition regions */
527                 numregions = extp->extra[offs];
528                 offs += 1;
529
530                 /* skip the sizeof(partregion) field in CFI 1.4 */
531                 if (extp->MinorVersion >= '4')
532                         offs += 2;
533
534                 /* Number of hardware partitions */
535                 numparts = 0;
536                 for (i = 0; i < numregions; i++) {
537                         struct cfi_intelext_regioninfo *rinfo;
538                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
539                         numparts += rinfo->NumIdentPartitions;
540                         offs += sizeof(*rinfo)
541                                 + (rinfo->NumBlockTypes - 1) *
542                                   sizeof(struct cfi_intelext_blockinfo);
543                 }
544
545                 /* Programming Region info */
546                 if (extp->MinorVersion >= '4') {
547                         struct cfi_intelext_programming_regioninfo *prinfo;
548                         prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
549                         mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
550                         MTD_PROGREGION_CTRLMODE_VALID(mtd) = cfi->interleave * prinfo->ControlValid;
551                         MTD_PROGREGION_CTRLMODE_INVALID(mtd) = cfi->interleave * prinfo->ControlInvalid;
552                         mtd->flags &= ~MTD_BIT_WRITEABLE;
553                         printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
554                                map->name, mtd->writesize,
555                                MTD_PROGREGION_CTRLMODE_VALID(mtd),
556                                MTD_PROGREGION_CTRLMODE_INVALID(mtd));
557                 }
558
559                 /*
560                  * All functions below currently rely on all chips having
561                  * the same geometry so we'll just assume that all hardware
562                  * partitions are of the same size too.
563                  */
564                 partshift = cfi->chipshift - __ffs(numparts);
565
566                 if ((1 << partshift) < mtd->erasesize) {
567                         printk( KERN_ERR
568                                 "%s: bad number of hw partitions (%d)\n",
569                                 __FUNCTION__, numparts);
570                         return -EINVAL;
571                 }
572
573                 numvirtchips = cfi->numchips * numparts;
574                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
575                 if (!newcfi)
576                         return -ENOMEM;
577                 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
578                 if (!shared) {
579                         kfree(newcfi);
580                         return -ENOMEM;
581                 }
582                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
583                 newcfi->numchips = numvirtchips;
584                 newcfi->chipshift = partshift;
585
586                 chip = &newcfi->chips[0];
587                 for (i = 0; i < cfi->numchips; i++) {
588                         shared[i].writing = shared[i].erasing = NULL;
589                         spin_lock_init(&shared[i].lock);
590                         for (j = 0; j < numparts; j++) {
591                                 *chip = cfi->chips[i];
592                                 chip->start += j << partshift;
593                                 chip->priv = &shared[i];
594                                 /* those should be reset too since
595                                    they create memory references. */
596                                 init_waitqueue_head(&chip->wq);
597                                 spin_lock_init(&chip->_spinlock);
598                                 chip->mutex = &chip->_spinlock;
599                                 chip++;
600                         }
601                 }
602
603                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
604                                   "--> %d partitions of %d KiB\n",
605                                   map->name, cfi->numchips, cfi->interleave,
606                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
607
608                 map->fldrv_priv = newcfi;
609                 *pcfi = newcfi;
610                 kfree(cfi);
611         }
612
613         return 0;
614 }
615
616 /*
617  *  *********** CHIP ACCESS FUNCTIONS ***********
618  */
619
620 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
621 {
622         DECLARE_WAITQUEUE(wait, current);
623         struct cfi_private *cfi = map->fldrv_priv;
624         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
625         unsigned long timeo;
626         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
627
628  resettime:
629         timeo = jiffies + HZ;
630  retry:
631         if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
632                 /*
633                  * OK. We have possibility for contension on the write/erase
634                  * operations which are global to the real chip and not per
635                  * partition.  So let's fight it over in the partition which
636                  * currently has authority on the operation.
637                  *
638                  * The rules are as follows:
639                  *
640                  * - any write operation must own shared->writing.
641                  *
642                  * - any erase operation must own _both_ shared->writing and
643                  *   shared->erasing.
644                  *
645                  * - contension arbitration is handled in the owner's context.
646                  *
647                  * The 'shared' struct can be read and/or written only when
648                  * its lock is taken.
649                  */
650                 struct flchip_shared *shared = chip->priv;
651                 struct flchip *contender;
652                 spin_lock(&shared->lock);
653                 contender = shared->writing;
654                 if (contender && contender != chip) {
655                         /*
656                          * The engine to perform desired operation on this
657                          * partition is already in use by someone else.
658                          * Let's fight over it in the context of the chip
659                          * currently using it.  If it is possible to suspend,
660                          * that other partition will do just that, otherwise
661                          * it'll happily send us to sleep.  In any case, when
662                          * get_chip returns success we're clear to go ahead.
663                          */
664                         int ret = spin_trylock(contender->mutex);
665                         spin_unlock(&shared->lock);
666                         if (!ret)
667                                 goto retry;
668                         spin_unlock(chip->mutex);
669                         ret = get_chip(map, contender, contender->start, mode);
670                         spin_lock(chip->mutex);
671                         if (ret) {
672                                 spin_unlock(contender->mutex);
673                                 return ret;
674                         }
675                         timeo = jiffies + HZ;
676                         spin_lock(&shared->lock);
677                         spin_unlock(contender->mutex);
678                 }
679
680                 /* We now own it */
681                 shared->writing = chip;
682                 if (mode == FL_ERASING)
683                         shared->erasing = chip;
684                 spin_unlock(&shared->lock);
685         }
686
687         switch (chip->state) {
688
689         case FL_STATUS:
690                 for (;;) {
691                         status = map_read(map, adr);
692                         if (map_word_andequal(map, status, status_OK, status_OK))
693                                 break;
694
695                         /* At this point we're fine with write operations
696                            in other partitions as they don't conflict. */
697                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
698                                 break;
699
700                         if (time_after(jiffies, timeo)) {
701                                 printk(KERN_ERR "%s: Waiting for chip to be ready timed out. Status %lx\n",
702                                        map->name, status.x[0]);
703                                 return -EIO;
704                         }
705                         spin_unlock(chip->mutex);
706                         cfi_udelay(1);
707                         spin_lock(chip->mutex);
708                         /* Someone else might have been playing with it. */
709                         goto retry;
710                 }
711
712         case FL_READY:
713         case FL_CFI_QUERY:
714         case FL_JEDEC_QUERY:
715                 return 0;
716
717         case FL_ERASING:
718                 if (!cfip ||
719                     !(cfip->FeatureSupport & 2) ||
720                     !(mode == FL_READY || mode == FL_POINT ||
721                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
722                         goto sleep;
723
724
725                 /* Erase suspend */
726                 map_write(map, CMD(0xB0), adr);
727
728                 /* If the flash has finished erasing, then 'erase suspend'
729                  * appears to make some (28F320) flash devices switch to
730                  * 'read' mode.  Make sure that we switch to 'read status'
731                  * mode so we get the right data. --rmk
732                  */
733                 map_write(map, CMD(0x70), adr);
734                 chip->oldstate = FL_ERASING;
735                 chip->state = FL_ERASE_SUSPENDING;
736                 chip->erase_suspended = 1;
737                 for (;;) {
738                         status = map_read(map, adr);
739                         if (map_word_andequal(map, status, status_OK, status_OK))
740                                 break;
741
742                         if (time_after(jiffies, timeo)) {
743                                 /* Urgh. Resume and pretend we weren't here.  */
744                                 map_write(map, CMD(0xd0), adr);
745                                 /* Make sure we're in 'read status' mode if it had finished */
746                                 map_write(map, CMD(0x70), adr);
747                                 chip->state = FL_ERASING;
748                                 chip->oldstate = FL_READY;
749                                 printk(KERN_ERR "%s: Chip not ready after erase "
750                                        "suspended: status = 0x%lx\n", map->name, status.x[0]);
751                                 return -EIO;
752                         }
753
754                         spin_unlock(chip->mutex);
755                         cfi_udelay(1);
756                         spin_lock(chip->mutex);
757                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
758                            So we can just loop here. */
759                 }
760                 chip->state = FL_STATUS;
761                 return 0;
762
763         case FL_XIP_WHILE_ERASING:
764                 if (mode != FL_READY && mode != FL_POINT &&
765                     (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
766                         goto sleep;
767                 chip->oldstate = chip->state;
768                 chip->state = FL_READY;
769                 return 0;
770
771         case FL_POINT:
772                 /* Only if there's no operation suspended... */
773                 if (mode == FL_READY && chip->oldstate == FL_READY)
774                         return 0;
775
776         default:
777         sleep:
778                 set_current_state(TASK_UNINTERRUPTIBLE);
779                 add_wait_queue(&chip->wq, &wait);
780                 spin_unlock(chip->mutex);
781                 schedule();
782                 remove_wait_queue(&chip->wq, &wait);
783                 spin_lock(chip->mutex);
784                 goto resettime;
785         }
786 }
787
788 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
789 {
790         struct cfi_private *cfi = map->fldrv_priv;
791
792         if (chip->priv) {
793                 struct flchip_shared *shared = chip->priv;
794                 spin_lock(&shared->lock);
795                 if (shared->writing == chip && chip->oldstate == FL_READY) {
796                         /* We own the ability to write, but we're done */
797                         shared->writing = shared->erasing;
798                         if (shared->writing && shared->writing != chip) {
799                                 /* give back ownership to who we loaned it from */
800                                 struct flchip *loaner = shared->writing;
801                                 spin_lock(loaner->mutex);
802                                 spin_unlock(&shared->lock);
803                                 spin_unlock(chip->mutex);
804                                 put_chip(map, loaner, loaner->start);
805                                 spin_lock(chip->mutex);
806                                 spin_unlock(loaner->mutex);
807                                 wake_up(&chip->wq);
808                                 return;
809                         }
810                         shared->erasing = NULL;
811                         shared->writing = NULL;
812                 } else if (shared->erasing == chip && shared->writing != chip) {
813                         /*
814                          * We own the ability to erase without the ability
815                          * to write, which means the erase was suspended
816                          * and some other partition is currently writing.
817                          * Don't let the switch below mess things up since
818                          * we don't have ownership to resume anything.
819                          */
820                         spin_unlock(&shared->lock);
821                         wake_up(&chip->wq);
822                         return;
823                 }
824                 spin_unlock(&shared->lock);
825         }
826
827         switch(chip->oldstate) {
828         case FL_ERASING:
829                 chip->state = chip->oldstate;
830                 /* What if one interleaved chip has finished and the
831                    other hasn't? The old code would leave the finished
832                    one in READY mode. That's bad, and caused -EROFS
833                    errors to be returned from do_erase_oneblock because
834                    that's the only bit it checked for at the time.
835                    As the state machine appears to explicitly allow
836                    sending the 0x70 (Read Status) command to an erasing
837                    chip and expecting it to be ignored, that's what we
838                    do. */
839                 map_write(map, CMD(0xd0), adr);
840                 map_write(map, CMD(0x70), adr);
841                 chip->oldstate = FL_READY;
842                 chip->state = FL_ERASING;
843                 break;
844
845         case FL_XIP_WHILE_ERASING:
846                 chip->state = chip->oldstate;
847                 chip->oldstate = FL_READY;
848                 break;
849
850         case FL_READY:
851         case FL_STATUS:
852         case FL_JEDEC_QUERY:
853                 /* We should really make set_vpp() count, rather than doing this */
854                 DISABLE_VPP(map);
855                 break;
856         default:
857                 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
858         }
859         wake_up(&chip->wq);
860 }
861
862 #ifdef CONFIG_MTD_XIP
863
864 /*
865  * No interrupt what so ever can be serviced while the flash isn't in array
866  * mode.  This is ensured by the xip_disable() and xip_enable() functions
867  * enclosing any code path where the flash is known not to be in array mode.
868  * And within a XIP disabled code path, only functions marked with __xipram
869  * may be called and nothing else (it's a good thing to inspect generated
870  * assembly to make sure inline functions were actually inlined and that gcc
871  * didn't emit calls to its own support functions). Also configuring MTD CFI
872  * support to a single buswidth and a single interleave is also recommended.
873  */
874
875 static void xip_disable(struct map_info *map, struct flchip *chip,
876                         unsigned long adr)
877 {
878         /* TODO: chips with no XIP use should ignore and return */
879         (void) map_read(map, adr); /* ensure mmu mapping is up to date */
880         local_irq_disable();
881 }
882
883 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
884                                 unsigned long adr)
885 {
886         struct cfi_private *cfi = map->fldrv_priv;
887         if (chip->state != FL_POINT && chip->state != FL_READY) {
888                 map_write(map, CMD(0xff), adr);
889                 chip->state = FL_READY;
890         }
891         (void) map_read(map, adr);
892         xip_iprefetch();
893         local_irq_enable();
894 }
895
896 /*
897  * When a delay is required for the flash operation to complete, the
898  * xip_wait_for_operation() function is polling for both the given timeout
899  * and pending (but still masked) hardware interrupts.  Whenever there is an
900  * interrupt pending then the flash erase or write operation is suspended,
901  * array mode restored and interrupts unmasked.  Task scheduling might also
902  * happen at that point.  The CPU eventually returns from the interrupt or
903  * the call to schedule() and the suspended flash operation is resumed for
904  * the remaining of the delay period.
905  *
906  * Warning: this function _will_ fool interrupt latency tracing tools.
907  */
908
909 static int __xipram xip_wait_for_operation(
910                 struct map_info *map, struct flchip *chip,
911                 unsigned long adr, int *chip_op_time )
912 {
913         struct cfi_private *cfi = map->fldrv_priv;
914         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
915         map_word status, OK = CMD(0x80);
916         unsigned long usec, suspended, start, done;
917         flstate_t oldstate, newstate;
918
919         start = xip_currtime();
920         usec = *chip_op_time * 8;
921         if (usec == 0)
922                 usec = 500000;
923         done = 0;
924
925         do {
926                 cpu_relax();
927                 if (xip_irqpending() && cfip &&
928                     ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
929                      (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
930                     (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
931                         /*
932                          * Let's suspend the erase or write operation when
933                          * supported.  Note that we currently don't try to
934                          * suspend interleaved chips if there is already
935                          * another operation suspended (imagine what happens
936                          * when one chip was already done with the current
937                          * operation while another chip suspended it, then
938                          * we resume the whole thing at once).  Yes, it
939                          * can happen!
940                          */
941                         usec -= done;
942                         map_write(map, CMD(0xb0), adr);
943                         map_write(map, CMD(0x70), adr);
944                         suspended = xip_currtime();
945                         do {
946                                 if (xip_elapsed_since(suspended) > 100000) {
947                                         /*
948                                          * The chip doesn't want to suspend
949                                          * after waiting for 100 msecs.
950                                          * This is a critical error but there
951                                          * is not much we can do here.
952                                          */
953                                         return -EIO;
954                                 }
955                                 status = map_read(map, adr);
956                         } while (!map_word_andequal(map, status, OK, OK));
957
958                         /* Suspend succeeded */
959                         oldstate = chip->state;
960                         if (oldstate == FL_ERASING) {
961                                 if (!map_word_bitsset(map, status, CMD(0x40)))
962                                         break;
963                                 newstate = FL_XIP_WHILE_ERASING;
964                                 chip->erase_suspended = 1;
965                         } else {
966                                 if (!map_word_bitsset(map, status, CMD(0x04)))
967                                         break;
968                                 newstate = FL_XIP_WHILE_WRITING;
969                                 chip->write_suspended = 1;
970                         }
971                         chip->state = newstate;
972                         map_write(map, CMD(0xff), adr);
973                         (void) map_read(map, adr);
974                         asm volatile (".rep 8; nop; .endr");
975                         local_irq_enable();
976                         spin_unlock(chip->mutex);
977                         asm volatile (".rep 8; nop; .endr");
978                         cond_resched();
979
980                         /*
981                          * We're back.  However someone else might have
982                          * decided to go write to the chip if we are in
983                          * a suspended erase state.  If so let's wait
984                          * until it's done.
985                          */
986                         spin_lock(chip->mutex);
987                         while (chip->state != newstate) {
988                                 DECLARE_WAITQUEUE(wait, current);
989                                 set_current_state(TASK_UNINTERRUPTIBLE);
990                                 add_wait_queue(&chip->wq, &wait);
991                                 spin_unlock(chip->mutex);
992                                 schedule();
993                                 remove_wait_queue(&chip->wq, &wait);
994                                 spin_lock(chip->mutex);
995                         }
996                         /* Disallow XIP again */
997                         local_irq_disable();
998
999                         /* Resume the write or erase operation */
1000                         map_write(map, CMD(0xd0), adr);
1001                         map_write(map, CMD(0x70), adr);
1002                         chip->state = oldstate;
1003                         start = xip_currtime();
1004                 } else if (usec >= 1000000/HZ) {
1005                         /*
1006                          * Try to save on CPU power when waiting delay
1007                          * is at least a system timer tick period.
1008                          * No need to be extremely accurate here.
1009                          */
1010                         xip_cpu_idle();
1011                 }
1012                 status = map_read(map, adr);
1013                 done = xip_elapsed_since(start);
1014         } while (!map_word_andequal(map, status, OK, OK)
1015                  && done < usec);
1016
1017         return (done >= usec) ? -ETIME : 0;
1018 }
1019
1020 /*
1021  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1022  * the flash is actively programming or erasing since we have to poll for
1023  * the operation to complete anyway.  We can't do that in a generic way with
1024  * a XIP setup so do it before the actual flash operation in this case
1025  * and stub it out from INVAL_CACHE_AND_WAIT.
1026  */
1027 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1028         INVALIDATE_CACHED_RANGE(map, from, size)
1029
1030 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, p_usec) \
1031         xip_wait_for_operation(map, chip, cmd_adr, p_usec)
1032
1033 #else
1034
1035 #define xip_disable(map, chip, adr)
1036 #define xip_enable(map, chip, adr)
1037 #define XIP_INVAL_CACHED_RANGE(x...)
1038 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1039
1040 static int inval_cache_and_wait_for_operation(
1041                 struct map_info *map, struct flchip *chip,
1042                 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1043                 int *chip_op_time )
1044 {
1045         struct cfi_private *cfi = map->fldrv_priv;
1046         map_word status, status_OK = CMD(0x80);
1047         int z, chip_state = chip->state;
1048         unsigned long timeo;
1049
1050         spin_unlock(chip->mutex);
1051         if (inval_len)
1052                 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1053         if (*chip_op_time)
1054                 cfi_udelay(*chip_op_time);
1055         spin_lock(chip->mutex);
1056
1057         timeo = *chip_op_time * 8 * HZ / 1000000;
1058         if (timeo < HZ/2)
1059                 timeo = HZ/2;
1060         timeo += jiffies;
1061
1062         z = 0;
1063         for (;;) {
1064                 if (chip->state != chip_state) {
1065                         /* Someone's suspended the operation: sleep */
1066                         DECLARE_WAITQUEUE(wait, current);
1067
1068                         set_current_state(TASK_UNINTERRUPTIBLE);
1069                         add_wait_queue(&chip->wq, &wait);
1070                         spin_unlock(chip->mutex);
1071                         schedule();
1072                         remove_wait_queue(&chip->wq, &wait);
1073                         timeo = jiffies + (HZ / 2); /* FIXME */
1074                         spin_lock(chip->mutex);
1075                         continue;
1076                 }
1077
1078                 status = map_read(map, cmd_adr);
1079                 if (map_word_andequal(map, status, status_OK, status_OK))
1080                         break;
1081
1082                 /* OK Still waiting */
1083                 if (time_after(jiffies, timeo)) {
1084                         map_write(map, CMD(0x70), cmd_adr);
1085                         chip->state = FL_STATUS;
1086                         return -ETIME;
1087                 }
1088
1089                 /* Latency issues. Drop the lock, wait a while and retry */
1090                 z++;
1091                 spin_unlock(chip->mutex);
1092                 cfi_udelay(1);
1093                 spin_lock(chip->mutex);
1094         }
1095
1096         if (!z) {
1097                 if (!--(*chip_op_time))
1098                         *chip_op_time = 1;
1099         } else if (z > 1)
1100                 ++(*chip_op_time);
1101
1102         /* Done and happy. */
1103         chip->state = FL_STATUS;
1104         return 0;
1105 }
1106
1107 #endif
1108
1109 #define WAIT_TIMEOUT(map, chip, adr, udelay) \
1110         ({ int __udelay = (udelay); \
1111            INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, &__udelay); })
1112
1113
1114 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1115 {
1116         unsigned long cmd_addr;
1117         struct cfi_private *cfi = map->fldrv_priv;
1118         int ret = 0;
1119
1120         adr += chip->start;
1121
1122         /* Ensure cmd read/writes are aligned. */
1123         cmd_addr = adr & ~(map_bankwidth(map)-1);
1124
1125         spin_lock(chip->mutex);
1126
1127         ret = get_chip(map, chip, cmd_addr, FL_POINT);
1128
1129         if (!ret) {
1130                 if (chip->state != FL_POINT && chip->state != FL_READY)
1131                         map_write(map, CMD(0xff), cmd_addr);
1132
1133                 chip->state = FL_POINT;
1134                 chip->ref_point_counter++;
1135         }
1136         spin_unlock(chip->mutex);
1137
1138         return ret;
1139 }
1140
1141 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1142 {
1143         struct map_info *map = mtd->priv;
1144         struct cfi_private *cfi = map->fldrv_priv;
1145         unsigned long ofs;
1146         int chipnum;
1147         int ret = 0;
1148
1149         if (!map->virt || (from + len > mtd->size))
1150                 return -EINVAL;
1151
1152         *mtdbuf = (void *)map->virt + from;
1153         *retlen = 0;
1154
1155         /* Now lock the chip(s) to POINT state */
1156
1157         /* ofs: offset within the first chip that the first read should start */
1158         chipnum = (from >> cfi->chipshift);
1159         ofs = from - (chipnum << cfi->chipshift);
1160
1161         while (len) {
1162                 unsigned long thislen;
1163
1164                 if (chipnum >= cfi->numchips)
1165                         break;
1166
1167                 if ((len + ofs -1) >> cfi->chipshift)
1168                         thislen = (1<<cfi->chipshift) - ofs;
1169                 else
1170                         thislen = len;
1171
1172                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1173                 if (ret)
1174                         break;
1175
1176                 *retlen += thislen;
1177                 len -= thislen;
1178
1179                 ofs = 0;
1180                 chipnum++;
1181         }
1182         return 0;
1183 }
1184
1185 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1186 {
1187         struct map_info *map = mtd->priv;
1188         struct cfi_private *cfi = map->fldrv_priv;
1189         unsigned long ofs;
1190         int chipnum;
1191
1192         /* Now unlock the chip(s) POINT state */
1193
1194         /* ofs: offset within the first chip that the first read should start */
1195         chipnum = (from >> cfi->chipshift);
1196         ofs = from - (chipnum <<  cfi->chipshift);
1197
1198         while (len) {
1199                 unsigned long thislen;
1200                 struct flchip *chip;
1201
1202                 chip = &cfi->chips[chipnum];
1203                 if (chipnum >= cfi->numchips)
1204                         break;
1205
1206                 if ((len + ofs -1) >> cfi->chipshift)
1207                         thislen = (1<<cfi->chipshift) - ofs;
1208                 else
1209                         thislen = len;
1210
1211                 spin_lock(chip->mutex);
1212                 if (chip->state == FL_POINT) {
1213                         chip->ref_point_counter--;
1214                         if(chip->ref_point_counter == 0)
1215                                 chip->state = FL_READY;
1216                 } else
1217                         printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1218
1219                 put_chip(map, chip, chip->start);
1220                 spin_unlock(chip->mutex);
1221
1222                 len -= thislen;
1223                 ofs = 0;
1224                 chipnum++;
1225         }
1226 }
1227
1228 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1229 {
1230         unsigned long cmd_addr;
1231         struct cfi_private *cfi = map->fldrv_priv;
1232         int ret;
1233
1234         adr += chip->start;
1235
1236         /* Ensure cmd read/writes are aligned. */
1237         cmd_addr = adr & ~(map_bankwidth(map)-1);
1238
1239         spin_lock(chip->mutex);
1240         ret = get_chip(map, chip, cmd_addr, FL_READY);
1241         if (ret) {
1242                 spin_unlock(chip->mutex);
1243                 return ret;
1244         }
1245
1246         if (chip->state != FL_POINT && chip->state != FL_READY) {
1247                 map_write(map, CMD(0xff), cmd_addr);
1248
1249                 chip->state = FL_READY;
1250         }
1251
1252         map_copy_from(map, buf, adr, len);
1253
1254         put_chip(map, chip, cmd_addr);
1255
1256         spin_unlock(chip->mutex);
1257         return 0;
1258 }
1259
1260 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1261 {
1262         struct map_info *map = mtd->priv;
1263         struct cfi_private *cfi = map->fldrv_priv;
1264         unsigned long ofs;
1265         int chipnum;
1266         int ret = 0;
1267
1268         /* ofs: offset within the first chip that the first read should start */
1269         chipnum = (from >> cfi->chipshift);
1270         ofs = from - (chipnum <<  cfi->chipshift);
1271
1272         *retlen = 0;
1273
1274         while (len) {
1275                 unsigned long thislen;
1276
1277                 if (chipnum >= cfi->numchips)
1278                         break;
1279
1280                 if ((len + ofs -1) >> cfi->chipshift)
1281                         thislen = (1<<cfi->chipshift) - ofs;
1282                 else
1283                         thislen = len;
1284
1285                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1286                 if (ret)
1287                         break;
1288
1289                 *retlen += thislen;
1290                 len -= thislen;
1291                 buf += thislen;
1292
1293                 ofs = 0;
1294                 chipnum++;
1295         }
1296         return ret;
1297 }
1298
1299 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1300                                      unsigned long adr, map_word datum, int mode)
1301 {
1302         struct cfi_private *cfi = map->fldrv_priv;
1303         map_word status, write_cmd;
1304         int ret=0;
1305
1306         adr += chip->start;
1307
1308         switch (mode) {
1309         case FL_WRITING:
1310                 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1311                 break;
1312         case FL_OTP_WRITE:
1313                 write_cmd = CMD(0xc0);
1314                 break;
1315         default:
1316                 return -EINVAL;
1317         }
1318
1319         spin_lock(chip->mutex);
1320         ret = get_chip(map, chip, adr, mode);
1321         if (ret) {
1322                 spin_unlock(chip->mutex);
1323                 return ret;
1324         }
1325
1326         XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1327         ENABLE_VPP(map);
1328         xip_disable(map, chip, adr);
1329         map_write(map, write_cmd, adr);
1330         map_write(map, datum, adr);
1331         chip->state = mode;
1332
1333         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1334                                    adr, map_bankwidth(map),
1335                                    &chip->word_write_time);
1336         if (ret) {
1337                 xip_enable(map, chip, adr);
1338                 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1339                 goto out;
1340         }
1341
1342         /* check for errors */
1343         status = map_read(map, adr);
1344         if (map_word_bitsset(map, status, CMD(0x1a))) {
1345                 unsigned long chipstatus = MERGESTATUS(status);
1346
1347                 /* reset status */
1348                 map_write(map, CMD(0x50), adr);
1349                 map_write(map, CMD(0x70), adr);
1350                 xip_enable(map, chip, adr);
1351
1352                 if (chipstatus & 0x02) {
1353                         ret = -EROFS;
1354                 } else if (chipstatus & 0x08) {
1355                         printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1356                         ret = -EIO;
1357                 } else {
1358                         printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1359                         ret = -EINVAL;
1360                 }
1361
1362                 goto out;
1363         }
1364
1365         xip_enable(map, chip, adr);
1366  out:   put_chip(map, chip, adr);
1367         spin_unlock(chip->mutex);
1368         return ret;
1369 }
1370
1371
1372 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1373 {
1374         struct map_info *map = mtd->priv;
1375         struct cfi_private *cfi = map->fldrv_priv;
1376         int ret = 0;
1377         int chipnum;
1378         unsigned long ofs;
1379
1380         *retlen = 0;
1381         if (!len)
1382                 return 0;
1383
1384         chipnum = to >> cfi->chipshift;
1385         ofs = to  - (chipnum << cfi->chipshift);
1386
1387         /* If it's not bus-aligned, do the first byte write */
1388         if (ofs & (map_bankwidth(map)-1)) {
1389                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1390                 int gap = ofs - bus_ofs;
1391                 int n;
1392                 map_word datum;
1393
1394                 n = min_t(int, len, map_bankwidth(map)-gap);
1395                 datum = map_word_ff(map);
1396                 datum = map_word_load_partial(map, datum, buf, gap, n);
1397
1398                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1399                                                bus_ofs, datum, FL_WRITING);
1400                 if (ret)
1401                         return ret;
1402
1403                 len -= n;
1404                 ofs += n;
1405                 buf += n;
1406                 (*retlen) += n;
1407
1408                 if (ofs >> cfi->chipshift) {
1409                         chipnum ++;
1410                         ofs = 0;
1411                         if (chipnum == cfi->numchips)
1412                                 return 0;
1413                 }
1414         }
1415
1416         while(len >= map_bankwidth(map)) {
1417                 map_word datum = map_word_load(map, buf);
1418
1419                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1420                                        ofs, datum, FL_WRITING);
1421                 if (ret)
1422                         return ret;
1423
1424                 ofs += map_bankwidth(map);
1425                 buf += map_bankwidth(map);
1426                 (*retlen) += map_bankwidth(map);
1427                 len -= map_bankwidth(map);
1428
1429                 if (ofs >> cfi->chipshift) {
1430                         chipnum ++;
1431                         ofs = 0;
1432                         if (chipnum == cfi->numchips)
1433                                 return 0;
1434                 }
1435         }
1436
1437         if (len & (map_bankwidth(map)-1)) {
1438                 map_word datum;
1439
1440                 datum = map_word_ff(map);
1441                 datum = map_word_load_partial(map, datum, buf, 0, len);
1442
1443                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1444                                        ofs, datum, FL_WRITING);
1445                 if (ret)
1446                         return ret;
1447
1448                 (*retlen) += len;
1449         }
1450
1451         return 0;
1452 }
1453
1454
1455 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1456                                     unsigned long adr, const struct kvec **pvec,
1457                                     unsigned long *pvec_seek, int len)
1458 {
1459         struct cfi_private *cfi = map->fldrv_priv;
1460         map_word status, write_cmd, datum;
1461         unsigned long cmd_adr;
1462         int ret, wbufsize, word_gap, words;
1463         const struct kvec *vec;
1464         unsigned long vec_seek;
1465
1466         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1467         adr += chip->start;
1468         cmd_adr = adr & ~(wbufsize-1);
1469
1470         /* Let's determine this according to the interleave only once */
1471         write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1472
1473         spin_lock(chip->mutex);
1474         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1475         if (ret) {
1476                 spin_unlock(chip->mutex);
1477                 return ret;
1478         }
1479
1480         XIP_INVAL_CACHED_RANGE(map, adr, len);
1481         ENABLE_VPP(map);
1482         xip_disable(map, chip, cmd_adr);
1483
1484         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1485            [...], the device will not accept any more Write to Buffer commands".
1486            So we must check here and reset those bits if they're set. Otherwise
1487            we're just pissing in the wind */
1488         if (chip->state != FL_STATUS) {
1489                 map_write(map, CMD(0x70), cmd_adr);
1490                 chip->state = FL_STATUS;
1491         }
1492         status = map_read(map, cmd_adr);
1493         if (map_word_bitsset(map, status, CMD(0x30))) {
1494                 xip_enable(map, chip, cmd_adr);
1495                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1496                 xip_disable(map, chip, cmd_adr);
1497                 map_write(map, CMD(0x50), cmd_adr);
1498                 map_write(map, CMD(0x70), cmd_adr);
1499         }
1500
1501         chip->state = FL_WRITING_TO_BUFFER;
1502         map_write(map, write_cmd, cmd_adr);
1503         ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0);
1504         if (ret) {
1505                 /* Argh. Not ready for write to buffer */
1506                 map_word Xstatus = map_read(map, cmd_adr);
1507                 map_write(map, CMD(0x70), cmd_adr);
1508                 chip->state = FL_STATUS;
1509                 status = map_read(map, cmd_adr);
1510                 map_write(map, CMD(0x50), cmd_adr);
1511                 map_write(map, CMD(0x70), cmd_adr);
1512                 xip_enable(map, chip, cmd_adr);
1513                 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1514                                 map->name, Xstatus.x[0], status.x[0]);
1515                 goto out;
1516         }
1517
1518         /* Figure out the number of words to write */
1519         word_gap = (-adr & (map_bankwidth(map)-1));
1520         words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
1521         if (!word_gap) {
1522                 words--;
1523         } else {
1524                 word_gap = map_bankwidth(map) - word_gap;
1525                 adr -= word_gap;
1526                 datum = map_word_ff(map);
1527         }
1528
1529         /* Write length of data to come */
1530         map_write(map, CMD(words), cmd_adr );
1531
1532         /* Write data */
1533         vec = *pvec;
1534         vec_seek = *pvec_seek;
1535         do {
1536                 int n = map_bankwidth(map) - word_gap;
1537                 if (n > vec->iov_len - vec_seek)
1538                         n = vec->iov_len - vec_seek;
1539                 if (n > len)
1540                         n = len;
1541
1542                 if (!word_gap && len < map_bankwidth(map))
1543                         datum = map_word_ff(map);
1544
1545                 datum = map_word_load_partial(map, datum,
1546                                               vec->iov_base + vec_seek,
1547                                               word_gap, n);
1548
1549                 len -= n;
1550                 word_gap += n;
1551                 if (!len || word_gap == map_bankwidth(map)) {
1552                         map_write(map, datum, adr);
1553                         adr += map_bankwidth(map);
1554                         word_gap = 0;
1555                 }
1556
1557                 vec_seek += n;
1558                 if (vec_seek == vec->iov_len) {
1559                         vec++;
1560                         vec_seek = 0;
1561                 }
1562         } while (len);
1563         *pvec = vec;
1564         *pvec_seek = vec_seek;
1565
1566         /* GO GO GO */
1567         map_write(map, CMD(0xd0), cmd_adr);
1568         chip->state = FL_WRITING;
1569
1570         ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1571                                    adr, len,
1572                                    &chip->buffer_write_time);
1573         if (ret) {
1574                 map_write(map, CMD(0x70), cmd_adr);
1575                 chip->state = FL_STATUS;
1576                 xip_enable(map, chip, cmd_adr);
1577                 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1578                 goto out;
1579         }
1580
1581         /* check for errors */
1582         status = map_read(map, cmd_adr);
1583         if (map_word_bitsset(map, status, CMD(0x1a))) {
1584                 unsigned long chipstatus = MERGESTATUS(status);
1585
1586                 /* reset status */
1587                 map_write(map, CMD(0x50), cmd_adr);
1588                 map_write(map, CMD(0x70), cmd_adr);
1589                 xip_enable(map, chip, cmd_adr);
1590
1591                 if (chipstatus & 0x02) {
1592                         ret = -EROFS;
1593                 } else if (chipstatus & 0x08) {
1594                         printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1595                         ret = -EIO;
1596                 } else {
1597                         printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1598                         ret = -EINVAL;
1599                 }
1600
1601                 goto out;
1602         }
1603
1604         xip_enable(map, chip, cmd_adr);
1605  out:   put_chip(map, chip, cmd_adr);
1606         spin_unlock(chip->mutex);
1607         return ret;
1608 }
1609
1610 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1611                                 unsigned long count, loff_t to, size_t *retlen)
1612 {
1613         struct map_info *map = mtd->priv;
1614         struct cfi_private *cfi = map->fldrv_priv;
1615         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1616         int ret = 0;
1617         int chipnum;
1618         unsigned long ofs, vec_seek, i;
1619         size_t len = 0;
1620
1621         for (i = 0; i < count; i++)
1622                 len += vecs[i].iov_len;
1623
1624         *retlen = 0;
1625         if (!len)
1626                 return 0;
1627
1628         chipnum = to >> cfi->chipshift;
1629         ofs = to - (chipnum << cfi->chipshift);
1630         vec_seek = 0;
1631
1632         do {
1633                 /* We must not cross write block boundaries */
1634                 int size = wbufsize - (ofs & (wbufsize-1));
1635
1636                 if (size > len)
1637                         size = len;
1638                 ret = do_write_buffer(map, &cfi->chips[chipnum],
1639                                       ofs, &vecs, &vec_seek, size);
1640                 if (ret)
1641                         return ret;
1642
1643                 ofs += size;
1644                 (*retlen) += size;
1645                 len -= size;
1646
1647                 if (ofs >> cfi->chipshift) {
1648                         chipnum ++;
1649                         ofs = 0;
1650                         if (chipnum == cfi->numchips)
1651                                 return 0;
1652                 }
1653
1654                 /* Be nice and reschedule with the chip in a usable state for other
1655                    processes. */
1656                 cond_resched();
1657
1658         } while (len);
1659
1660         return 0;
1661 }
1662
1663 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1664                                        size_t len, size_t *retlen, const u_char *buf)
1665 {
1666         struct kvec vec;
1667
1668         vec.iov_base = (void *) buf;
1669         vec.iov_len = len;
1670
1671         return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1672 }
1673
1674 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1675                                       unsigned long adr, int len, void *thunk)
1676 {
1677         struct cfi_private *cfi = map->fldrv_priv;
1678         map_word status;
1679         int retries = 3;
1680         int ret;
1681
1682         adr += chip->start;
1683
1684  retry:
1685         spin_lock(chip->mutex);
1686         ret = get_chip(map, chip, adr, FL_ERASING);
1687         if (ret) {
1688                 spin_unlock(chip->mutex);
1689                 return ret;
1690         }
1691
1692         XIP_INVAL_CACHED_RANGE(map, adr, len);
1693         ENABLE_VPP(map);
1694         xip_disable(map, chip, adr);
1695
1696         /* Clear the status register first */
1697         map_write(map, CMD(0x50), adr);
1698
1699         /* Now erase */
1700         map_write(map, CMD(0x20), adr);
1701         map_write(map, CMD(0xD0), adr);
1702         chip->state = FL_ERASING;
1703         chip->erase_suspended = 0;
1704
1705         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1706                                    adr, len,
1707                                    &chip->erase_time);
1708         if (ret) {
1709                 map_write(map, CMD(0x70), adr);
1710                 chip->state = FL_STATUS;
1711                 xip_enable(map, chip, adr);
1712                 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1713                 goto out;
1714         }
1715
1716         /* We've broken this before. It doesn't hurt to be safe */
1717         map_write(map, CMD(0x70), adr);
1718         chip->state = FL_STATUS;
1719         status = map_read(map, adr);
1720
1721         /* check for errors */
1722         if (map_word_bitsset(map, status, CMD(0x3a))) {
1723                 unsigned long chipstatus = MERGESTATUS(status);
1724
1725                 /* Reset the error bits */
1726                 map_write(map, CMD(0x50), adr);
1727                 map_write(map, CMD(0x70), adr);
1728                 xip_enable(map, chip, adr);
1729
1730                 if ((chipstatus & 0x30) == 0x30) {
1731                         printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1732                         ret = -EINVAL;
1733                 } else if (chipstatus & 0x02) {
1734                         /* Protection bit set */
1735                         ret = -EROFS;
1736                 } else if (chipstatus & 0x8) {
1737                         /* Voltage */
1738                         printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1739                         ret = -EIO;
1740                 } else if (chipstatus & 0x20 && retries--) {
1741                         printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1742                         put_chip(map, chip, adr);
1743                         spin_unlock(chip->mutex);
1744                         goto retry;
1745                 } else {
1746                         printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1747                         ret = -EIO;
1748                 }
1749
1750                 goto out;
1751         }
1752
1753         xip_enable(map, chip, adr);
1754  out:   put_chip(map, chip, adr);
1755         spin_unlock(chip->mutex);
1756         return ret;
1757 }
1758
1759 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1760 {
1761         unsigned long ofs, len;
1762         int ret;
1763
1764         ofs = instr->addr;
1765         len = instr->len;
1766
1767         ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1768         if (ret)
1769                 return ret;
1770
1771         instr->state = MTD_ERASE_DONE;
1772         mtd_erase_callback(instr);
1773
1774         return 0;
1775 }
1776
1777 static void cfi_intelext_sync (struct mtd_info *mtd)
1778 {
1779         struct map_info *map = mtd->priv;
1780         struct cfi_private *cfi = map->fldrv_priv;
1781         int i;
1782         struct flchip *chip;
1783         int ret = 0;
1784
1785         for (i=0; !ret && i<cfi->numchips; i++) {
1786                 chip = &cfi->chips[i];
1787
1788                 spin_lock(chip->mutex);
1789                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1790
1791                 if (!ret) {
1792                         chip->oldstate = chip->state;
1793                         chip->state = FL_SYNCING;
1794                         /* No need to wake_up() on this state change -
1795                          * as the whole point is that nobody can do anything
1796                          * with the chip now anyway.
1797                          */
1798                 }
1799                 spin_unlock(chip->mutex);
1800         }
1801
1802         /* Unlock the chips again */
1803
1804         for (i--; i >=0; i--) {
1805                 chip = &cfi->chips[i];
1806
1807                 spin_lock(chip->mutex);
1808
1809                 if (chip->state == FL_SYNCING) {
1810                         chip->state = chip->oldstate;
1811                         chip->oldstate = FL_READY;
1812                         wake_up(&chip->wq);
1813                 }
1814                 spin_unlock(chip->mutex);
1815         }
1816 }
1817
1818 #ifdef DEBUG_LOCK_BITS
1819 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1820                                                 struct flchip *chip,
1821                                                 unsigned long adr,
1822                                                 int len, void *thunk)
1823 {
1824         struct cfi_private *cfi = map->fldrv_priv;
1825         int status, ofs_factor = cfi->interleave * cfi->device_type;
1826
1827         adr += chip->start;
1828         xip_disable(map, chip, adr+(2*ofs_factor));
1829         map_write(map, CMD(0x90), adr+(2*ofs_factor));
1830         chip->state = FL_JEDEC_QUERY;
1831         status = cfi_read_query(map, adr+(2*ofs_factor));
1832         xip_enable(map, chip, 0);
1833         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1834                adr, status);
1835         return 0;
1836 }
1837 #endif
1838
1839 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
1840 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
1841
1842 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1843                                        unsigned long adr, int len, void *thunk)
1844 {
1845         struct cfi_private *cfi = map->fldrv_priv;
1846         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1847         int udelay;
1848         int ret;
1849
1850         adr += chip->start;
1851
1852         spin_lock(chip->mutex);
1853         ret = get_chip(map, chip, adr, FL_LOCKING);
1854         if (ret) {
1855                 spin_unlock(chip->mutex);
1856                 return ret;
1857         }
1858
1859         ENABLE_VPP(map);
1860         xip_disable(map, chip, adr);
1861
1862         map_write(map, CMD(0x60), adr);
1863         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1864                 map_write(map, CMD(0x01), adr);
1865                 chip->state = FL_LOCKING;
1866         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1867                 map_write(map, CMD(0xD0), adr);
1868                 chip->state = FL_UNLOCKING;
1869         } else
1870                 BUG();
1871
1872         /*
1873          * If Instant Individual Block Locking supported then no need
1874          * to delay.
1875          */
1876         udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
1877
1878         ret = WAIT_TIMEOUT(map, chip, adr, udelay);
1879         if (ret) {
1880                 map_write(map, CMD(0x70), adr);
1881                 chip->state = FL_STATUS;
1882                 xip_enable(map, chip, adr);
1883                 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
1884                 goto out;
1885         }
1886
1887         xip_enable(map, chip, adr);
1888 out:    put_chip(map, chip, adr);
1889         spin_unlock(chip->mutex);
1890         return ret;
1891 }
1892
1893 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1894 {
1895         int ret;
1896
1897 #ifdef DEBUG_LOCK_BITS
1898         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1899                __FUNCTION__, ofs, len);
1900         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1901                 ofs, len, 0);
1902 #endif
1903
1904         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1905                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1906
1907 #ifdef DEBUG_LOCK_BITS
1908         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1909                __FUNCTION__, ret);
1910         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1911                 ofs, len, 0);
1912 #endif
1913
1914         return ret;
1915 }
1916
1917 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1918 {
1919         int ret;
1920
1921 #ifdef DEBUG_LOCK_BITS
1922         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1923                __FUNCTION__, ofs, len);
1924         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1925                 ofs, len, 0);
1926 #endif
1927
1928         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1929                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1930
1931 #ifdef DEBUG_LOCK_BITS
1932         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1933                __FUNCTION__, ret);
1934         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1935                 ofs, len, 0);
1936 #endif
1937
1938         return ret;
1939 }
1940
1941 #ifdef CONFIG_MTD_OTP
1942
1943 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
1944                         u_long data_offset, u_char *buf, u_int size,
1945                         u_long prot_offset, u_int groupno, u_int groupsize);
1946
1947 static int __xipram
1948 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
1949             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1950 {
1951         struct cfi_private *cfi = map->fldrv_priv;
1952         int ret;
1953
1954         spin_lock(chip->mutex);
1955         ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
1956         if (ret) {
1957                 spin_unlock(chip->mutex);
1958                 return ret;
1959         }
1960
1961         /* let's ensure we're not reading back cached data from array mode */
1962         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
1963
1964         xip_disable(map, chip, chip->start);
1965         if (chip->state != FL_JEDEC_QUERY) {
1966                 map_write(map, CMD(0x90), chip->start);
1967                 chip->state = FL_JEDEC_QUERY;
1968         }
1969         map_copy_from(map, buf, chip->start + offset, size);
1970         xip_enable(map, chip, chip->start);
1971
1972         /* then ensure we don't keep OTP data in the cache */
1973         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
1974
1975         put_chip(map, chip, chip->start);
1976         spin_unlock(chip->mutex);
1977         return 0;
1978 }
1979
1980 static int
1981 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
1982              u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1983 {
1984         int ret;
1985
1986         while (size) {
1987                 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
1988                 int gap = offset - bus_ofs;
1989                 int n = min_t(int, size, map_bankwidth(map)-gap);
1990                 map_word datum = map_word_ff(map);
1991
1992                 datum = map_word_load_partial(map, datum, buf, gap, n);
1993                 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
1994                 if (ret)
1995                         return ret;
1996
1997                 offset += n;
1998                 buf += n;
1999                 size -= n;
2000         }
2001
2002         return 0;
2003 }
2004
2005 static int
2006 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2007             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2008 {
2009         struct cfi_private *cfi = map->fldrv_priv;
2010         map_word datum;
2011
2012         /* make sure area matches group boundaries */
2013         if (size != grpsz)
2014                 return -EXDEV;
2015
2016         datum = map_word_ff(map);
2017         datum = map_word_clr(map, datum, CMD(1 << grpno));
2018         return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2019 }
2020
2021 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2022                                  size_t *retlen, u_char *buf,
2023                                  otp_op_t action, int user_regs)
2024 {
2025         struct map_info *map = mtd->priv;
2026         struct cfi_private *cfi = map->fldrv_priv;
2027         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2028         struct flchip *chip;
2029         struct cfi_intelext_otpinfo *otp;
2030         u_long devsize, reg_prot_offset, data_offset;
2031         u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2032         u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2033         int ret;
2034
2035         *retlen = 0;
2036
2037         /* Check that we actually have some OTP registers */
2038         if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2039                 return -ENODATA;
2040
2041         /* we need real chips here not virtual ones */
2042         devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2043         chip_step = devsize >> cfi->chipshift;
2044         chip_num = 0;
2045
2046         /* Some chips have OTP located in the _top_ partition only.
2047            For example: Intel 28F256L18T (T means top-parameter device) */
2048         if (cfi->mfr == MANUFACTURER_INTEL) {
2049                 switch (cfi->id) {
2050                 case 0x880b:
2051                 case 0x880c:
2052                 case 0x880d:
2053                         chip_num = chip_step - 1;
2054                 }
2055         }
2056
2057         for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2058                 chip = &cfi->chips[chip_num];
2059                 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2060
2061                 /* first OTP region */
2062                 field = 0;
2063                 reg_prot_offset = extp->ProtRegAddr;
2064                 reg_fact_groups = 1;
2065                 reg_fact_size = 1 << extp->FactProtRegSize;
2066                 reg_user_groups = 1;
2067                 reg_user_size = 1 << extp->UserProtRegSize;
2068
2069                 while (len > 0) {
2070                         /* flash geometry fixup */
2071                         data_offset = reg_prot_offset + 1;
2072                         data_offset *= cfi->interleave * cfi->device_type;
2073                         reg_prot_offset *= cfi->interleave * cfi->device_type;
2074                         reg_fact_size *= cfi->interleave;
2075                         reg_user_size *= cfi->interleave;
2076
2077                         if (user_regs) {
2078                                 groups = reg_user_groups;
2079                                 groupsize = reg_user_size;
2080                                 /* skip over factory reg area */
2081                                 groupno = reg_fact_groups;
2082                                 data_offset += reg_fact_groups * reg_fact_size;
2083                         } else {
2084                                 groups = reg_fact_groups;
2085                                 groupsize = reg_fact_size;
2086                                 groupno = 0;
2087                         }
2088
2089                         while (len > 0 && groups > 0) {
2090                                 if (!action) {
2091                                         /*
2092                                          * Special case: if action is NULL
2093                                          * we fill buf with otp_info records.
2094                                          */
2095                                         struct otp_info *otpinfo;
2096                                         map_word lockword;
2097                                         len -= sizeof(struct otp_info);
2098                                         if (len <= 0)
2099                                                 return -ENOSPC;
2100                                         ret = do_otp_read(map, chip,
2101                                                           reg_prot_offset,
2102                                                           (u_char *)&lockword,
2103                                                           map_bankwidth(map),
2104                                                           0, 0,  0);
2105                                         if (ret)
2106                                                 return ret;
2107                                         otpinfo = (struct otp_info *)buf;
2108                                         otpinfo->start = from;
2109                                         otpinfo->length = groupsize;
2110                                         otpinfo->locked =
2111                                            !map_word_bitsset(map, lockword,
2112                                                              CMD(1 << groupno));
2113                                         from += groupsize;
2114                                         buf += sizeof(*otpinfo);
2115                                         *retlen += sizeof(*otpinfo);
2116                                 } else if (from >= groupsize) {
2117                                         from -= groupsize;
2118                                         data_offset += groupsize;
2119                                 } else {
2120                                         int size = groupsize;
2121                                         data_offset += from;
2122                                         size -= from;
2123                                         from = 0;
2124                                         if (size > len)
2125                                                 size = len;
2126                                         ret = action(map, chip, data_offset,
2127                                                      buf, size, reg_prot_offset,
2128                                                      groupno, groupsize);
2129                                         if (ret < 0)
2130                                                 return ret;
2131                                         buf += size;
2132                                         len -= size;
2133                                         *retlen += size;
2134                                         data_offset += size;
2135                                 }
2136                                 groupno++;
2137                                 groups--;
2138                         }
2139
2140                         /* next OTP region */
2141                         if (++field == extp->NumProtectionFields)
2142                                 break;
2143                         reg_prot_offset = otp->ProtRegAddr;
2144                         reg_fact_groups = otp->FactGroups;
2145                         reg_fact_size = 1 << otp->FactProtRegSize;
2146                         reg_user_groups = otp->UserGroups;
2147                         reg_user_size = 1 << otp->UserProtRegSize;
2148                         otp++;
2149                 }
2150         }
2151
2152         return 0;
2153 }
2154
2155 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2156                                            size_t len, size_t *retlen,
2157                                             u_char *buf)
2158 {
2159         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2160                                      buf, do_otp_read, 0);
2161 }
2162
2163 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2164                                            size_t len, size_t *retlen,
2165                                             u_char *buf)
2166 {
2167         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2168                                      buf, do_otp_read, 1);
2169 }
2170
2171 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2172                                             size_t len, size_t *retlen,
2173                                              u_char *buf)
2174 {
2175         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2176                                      buf, do_otp_write, 1);
2177 }
2178
2179 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2180                                            loff_t from, size_t len)
2181 {
2182         size_t retlen;
2183         return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2184                                      NULL, do_otp_lock, 1);
2185 }
2186
2187 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2188                                            struct otp_info *buf, size_t len)
2189 {
2190         size_t retlen;
2191         int ret;
2192
2193         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2194         return ret ? : retlen;
2195 }
2196
2197 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2198                                            struct otp_info *buf, size_t len)
2199 {
2200         size_t retlen;
2201         int ret;
2202
2203         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2204         return ret ? : retlen;
2205 }
2206
2207 #endif
2208
2209 static int cfi_intelext_suspend(struct mtd_info *mtd)
2210 {
2211         struct map_info *map = mtd->priv;
2212         struct cfi_private *cfi = map->fldrv_priv;
2213         int i;
2214         struct flchip *chip;
2215         int ret = 0;
2216
2217         for (i=0; !ret && i<cfi->numchips; i++) {
2218                 chip = &cfi->chips[i];
2219
2220                 spin_lock(chip->mutex);
2221
2222                 switch (chip->state) {
2223                 case FL_READY:
2224                 case FL_STATUS:
2225                 case FL_CFI_QUERY:
2226                 case FL_JEDEC_QUERY:
2227                         if (chip->oldstate == FL_READY) {
2228                                 chip->oldstate = chip->state;
2229                                 chip->state = FL_PM_SUSPENDED;
2230                                 /* No need to wake_up() on this state change -
2231                                  * as the whole point is that nobody can do anything
2232                                  * with the chip now anyway.
2233                                  */
2234                         } else {
2235                                 /* There seems to be an operation pending. We must wait for it. */
2236                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2237                                 ret = -EAGAIN;
2238                         }
2239                         break;
2240                 default:
2241                         /* Should we actually wait? Once upon a time these routines weren't
2242                            allowed to. Or should we return -EAGAIN, because the upper layers
2243                            ought to have already shut down anything which was using the device
2244                            anyway? The latter for now. */
2245                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2246                         ret = -EAGAIN;
2247                 case FL_PM_SUSPENDED:
2248                         break;
2249                 }
2250                 spin_unlock(chip->mutex);
2251         }
2252
2253         /* Unlock the chips again */
2254
2255         if (ret) {
2256                 for (i--; i >=0; i--) {
2257                         chip = &cfi->chips[i];
2258
2259                         spin_lock(chip->mutex);
2260
2261                         if (chip->state == FL_PM_SUSPENDED) {
2262                                 /* No need to force it into a known state here,
2263                                    because we're returning failure, and it didn't
2264                                    get power cycled */
2265                                 chip->state = chip->oldstate;
2266                                 chip->oldstate = FL_READY;
2267                                 wake_up(&chip->wq);
2268                         }
2269                         spin_unlock(chip->mutex);
2270                 }
2271         }
2272
2273         return ret;
2274 }
2275
2276 static void cfi_intelext_resume(struct mtd_info *mtd)
2277 {
2278         struct map_info *map = mtd->priv;
2279         struct cfi_private *cfi = map->fldrv_priv;
2280         int i;
2281         struct flchip *chip;
2282
2283         for (i=0; i<cfi->numchips; i++) {
2284
2285                 chip = &cfi->chips[i];
2286
2287                 spin_lock(chip->mutex);
2288
2289                 /* Go to known state. Chip may have been power cycled */
2290                 if (chip->state == FL_PM_SUSPENDED) {
2291                         map_write(map, CMD(0xFF), cfi->chips[i].start);
2292                         chip->oldstate = chip->state = FL_READY;
2293                         wake_up(&chip->wq);
2294                 }
2295
2296                 spin_unlock(chip->mutex);
2297         }
2298 }
2299
2300 static int cfi_intelext_reset(struct mtd_info *mtd)
2301 {
2302         struct map_info *map = mtd->priv;
2303         struct cfi_private *cfi = map->fldrv_priv;
2304         int i, ret;
2305
2306         for (i=0; i < cfi->numchips; i++) {
2307                 struct flchip *chip = &cfi->chips[i];
2308
2309                 /* force the completion of any ongoing operation
2310                    and switch to array mode so any bootloader in
2311                    flash is accessible for soft reboot. */
2312                 spin_lock(chip->mutex);
2313                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2314                 if (!ret) {
2315                         map_write(map, CMD(0xff), chip->start);
2316                         chip->state = FL_READY;
2317                 }
2318                 spin_unlock(chip->mutex);
2319         }
2320
2321         return 0;
2322 }
2323
2324 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2325                                void *v)
2326 {
2327         struct mtd_info *mtd;
2328
2329         mtd = container_of(nb, struct mtd_info, reboot_notifier);
2330         cfi_intelext_reset(mtd);
2331         return NOTIFY_DONE;
2332 }
2333
2334 static void cfi_intelext_destroy(struct mtd_info *mtd)
2335 {
2336         struct map_info *map = mtd->priv;
2337         struct cfi_private *cfi = map->fldrv_priv;
2338         cfi_intelext_reset(mtd);
2339         unregister_reboot_notifier(&mtd->reboot_notifier);
2340         kfree(cfi->cmdset_priv);
2341         kfree(cfi->cfiq);
2342         kfree(cfi->chips[0].priv);
2343         kfree(cfi);
2344         kfree(mtd->eraseregions);
2345 }
2346
2347 MODULE_LICENSE("GPL");
2348 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2349 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2350 MODULE_ALIAS("cfi_cmdset_0003");
2351 MODULE_ALIAS("cfi_cmdset_0200");