CHIPS: Fix potential starvation in cfi_cmdset_0001
[pandora-kernel.git] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * $Id: cfi_cmdset_0001.c,v 1.186 2005/11/23 22:07:52 nico Exp $
8  *
9  *
10  * 10/10/2000   Nicolas Pitre <nico@cam.org>
11  *      - completely revamped method functions so they are aware and
12  *        independent of the flash geometry (buswidth, interleave, etc.)
13  *      - scalability vs code size is completely set at compile-time
14  *        (see include/linux/mtd/cfi.h for selection)
15  *      - optimized write buffer method
16  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17  *      - reworked lock/unlock/erase support for var size flash
18  */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/mtd/xip.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/compatmac.h>
37 #include <linux/mtd/cfi.h>
38
39 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
40 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41
42 // debugging, turns off buffer write mode if set to 1
43 #define FORCE_WORD_WRITE 0
44
45 #define MANUFACTURER_INTEL      0x0089
46 #define I82802AB        0x00ad
47 #define I82802AC        0x00ac
48 #define MANUFACTURER_ST         0x0020
49 #define M50LPW080       0x002F
50
51 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
52 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
55 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
56 static void cfi_intelext_sync (struct mtd_info *);
57 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
58 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
59 #ifdef CONFIG_MTD_OTP
60 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
61 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
64 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
65                                             struct otp_info *, size_t);
66 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
67                                             struct otp_info *, size_t);
68 #endif
69 static int cfi_intelext_suspend (struct mtd_info *);
70 static void cfi_intelext_resume (struct mtd_info *);
71 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
72
73 static void cfi_intelext_destroy(struct mtd_info *);
74
75 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
76
77 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
78 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
79
80 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
81                      size_t *retlen, u_char **mtdbuf);
82 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
83                         size_t len);
84
85 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
86 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
87 #include "fwh_lock.h"
88
89
90
91 /*
92  *  *********** SETUP AND PROBE BITS  ***********
93  */
94
95 static struct mtd_chip_driver cfi_intelext_chipdrv = {
96         .probe          = NULL, /* Not usable directly */
97         .destroy        = cfi_intelext_destroy,
98         .name           = "cfi_cmdset_0001",
99         .module         = THIS_MODULE
100 };
101
102 /* #define DEBUG_LOCK_BITS */
103 /* #define DEBUG_CFI_FEATURES */
104
105 #ifdef DEBUG_CFI_FEATURES
106 static void cfi_tell_features(struct cfi_pri_intelext *extp)
107 {
108         int i;
109         printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
110         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
111         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
112         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
113         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
114         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
115         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
116         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
117         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
118         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
119         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
120         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
121         printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
122         for (i=11; i<32; i++) {
123                 if (extp->FeatureSupport & (1<<i))
124                         printk("     - Unknown Bit %X:      supported\n", i);
125         }
126
127         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
128         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
129         for (i=1; i<8; i++) {
130                 if (extp->SuspendCmdSupport & (1<<i))
131                         printk("     - Unknown Bit %X:               supported\n", i);
132         }
133
134         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
135         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
136         printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
137         for (i=2; i<3; i++) {
138                 if (extp->BlkStatusRegMask & (1<<i))
139                         printk("     - Unknown Bit %X Active: yes\n",i);
140         }
141         printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
142         printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
143         for (i=6; i<16; i++) {
144                 if (extp->BlkStatusRegMask & (1<<i))
145                         printk("     - Unknown Bit %X Active: yes\n",i);
146         }
147
148         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
149                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
150         if (extp->VppOptimal)
151                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
152                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
153 }
154 #endif
155
156 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
157 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
158 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
159 {
160         struct map_info *map = mtd->priv;
161         struct cfi_private *cfi = map->fldrv_priv;
162         struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
163
164         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
165                             "erase on write disabled.\n");
166         extp->SuspendCmdSupport &= ~1;
167 }
168 #endif
169
170 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
171 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
172 {
173         struct map_info *map = mtd->priv;
174         struct cfi_private *cfi = map->fldrv_priv;
175         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
176
177         if (cfip && (cfip->FeatureSupport&4)) {
178                 cfip->FeatureSupport &= ~4;
179                 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
180         }
181 }
182 #endif
183
184 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
185 {
186         struct map_info *map = mtd->priv;
187         struct cfi_private *cfi = map->fldrv_priv;
188
189         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
190         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
191 }
192
193 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
194 {
195         struct map_info *map = mtd->priv;
196         struct cfi_private *cfi = map->fldrv_priv;
197
198         /* Note this is done after the region info is endian swapped */
199         cfi->cfiq->EraseRegionInfo[1] =
200                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
201 };
202
203 static void fixup_use_point(struct mtd_info *mtd, void *param)
204 {
205         struct map_info *map = mtd->priv;
206         if (!mtd->point && map_is_linear(map)) {
207                 mtd->point   = cfi_intelext_point;
208                 mtd->unpoint = cfi_intelext_unpoint;
209         }
210 }
211
212 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
213 {
214         struct map_info *map = mtd->priv;
215         struct cfi_private *cfi = map->fldrv_priv;
216         if (cfi->cfiq->BufWriteTimeoutTyp) {
217                 printk(KERN_INFO "Using buffer write method\n" );
218                 mtd->write = cfi_intelext_write_buffers;
219                 mtd->writev = cfi_intelext_writev;
220         }
221 }
222
223 static struct cfi_fixup cfi_fixup_table[] = {
224 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
225         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
226 #endif
227 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
228         { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
229 #endif
230 #if !FORCE_WORD_WRITE
231         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
232 #endif
233         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
234         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
235         { 0, 0, NULL, NULL }
236 };
237
238 static struct cfi_fixup jedec_fixup_table[] = {
239         { MANUFACTURER_INTEL, I82802AB,   fixup_use_fwh_lock, NULL, },
240         { MANUFACTURER_INTEL, I82802AC,   fixup_use_fwh_lock, NULL, },
241         { MANUFACTURER_ST,    M50LPW080,  fixup_use_fwh_lock, NULL, },
242         { 0, 0, NULL, NULL }
243 };
244 static struct cfi_fixup fixup_table[] = {
245         /* The CFI vendor ids and the JEDEC vendor IDs appear
246          * to be common.  It is like the devices id's are as
247          * well.  This table is to pick all cases where
248          * we know that is the case.
249          */
250         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
251         { 0, 0, NULL, NULL }
252 };
253
254 static inline struct cfi_pri_intelext *
255 read_pri_intelext(struct map_info *map, __u16 adr)
256 {
257         struct cfi_pri_intelext *extp;
258         unsigned int extp_size = sizeof(*extp);
259
260  again:
261         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
262         if (!extp)
263                 return NULL;
264
265         if (extp->MajorVersion != '1' ||
266             (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
267                 printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
268                        "version %c.%c.\n",  extp->MajorVersion,
269                        extp->MinorVersion);
270                 kfree(extp);
271                 return NULL;
272         }
273
274         /* Do some byteswapping if necessary */
275         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
276         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
277         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
278
279         if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
280                 unsigned int extra_size = 0;
281                 int nb_parts, i;
282
283                 /* Protection Register info */
284                 extra_size += (extp->NumProtectionFields - 1) *
285                               sizeof(struct cfi_intelext_otpinfo);
286
287                 /* Burst Read info */
288                 extra_size += 2;
289                 if (extp_size < sizeof(*extp) + extra_size)
290                         goto need_more;
291                 extra_size += extp->extra[extra_size-1];
292
293                 /* Number of hardware-partitions */
294                 extra_size += 1;
295                 if (extp_size < sizeof(*extp) + extra_size)
296                         goto need_more;
297                 nb_parts = extp->extra[extra_size - 1];
298
299                 /* skip the sizeof(partregion) field in CFI 1.4 */
300                 if (extp->MinorVersion >= '4')
301                         extra_size += 2;
302
303                 for (i = 0; i < nb_parts; i++) {
304                         struct cfi_intelext_regioninfo *rinfo;
305                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
306                         extra_size += sizeof(*rinfo);
307                         if (extp_size < sizeof(*extp) + extra_size)
308                                 goto need_more;
309                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
310                         extra_size += (rinfo->NumBlockTypes - 1)
311                                       * sizeof(struct cfi_intelext_blockinfo);
312                 }
313
314                 if (extp->MinorVersion >= '4')
315                         extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
316
317                 if (extp_size < sizeof(*extp) + extra_size) {
318                         need_more:
319                         extp_size = sizeof(*extp) + extra_size;
320                         kfree(extp);
321                         if (extp_size > 4096) {
322                                 printk(KERN_ERR
323                                         "%s: cfi_pri_intelext is too fat\n",
324                                         __FUNCTION__);
325                                 return NULL;
326                         }
327                         goto again;
328                 }
329         }
330
331         return extp;
332 }
333
334 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
335 {
336         struct cfi_private *cfi = map->fldrv_priv;
337         struct mtd_info *mtd;
338         int i;
339
340         mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
341         if (!mtd) {
342                 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
343                 return NULL;
344         }
345         memset(mtd, 0, sizeof(*mtd));
346         mtd->priv = map;
347         mtd->type = MTD_NORFLASH;
348
349         /* Fill in the default mtd operations */
350         mtd->erase   = cfi_intelext_erase_varsize;
351         mtd->read    = cfi_intelext_read;
352         mtd->write   = cfi_intelext_write_words;
353         mtd->sync    = cfi_intelext_sync;
354         mtd->lock    = cfi_intelext_lock;
355         mtd->unlock  = cfi_intelext_unlock;
356         mtd->suspend = cfi_intelext_suspend;
357         mtd->resume  = cfi_intelext_resume;
358         mtd->flags   = MTD_CAP_NORFLASH;
359         mtd->name    = map->name;
360
361         mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
362
363         if (cfi->cfi_mode == CFI_MODE_CFI) {
364                 /*
365                  * It's a real CFI chip, not one for which the probe
366                  * routine faked a CFI structure. So we read the feature
367                  * table from it.
368                  */
369                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
370                 struct cfi_pri_intelext *extp;
371
372                 extp = read_pri_intelext(map, adr);
373                 if (!extp) {
374                         kfree(mtd);
375                         return NULL;
376                 }
377
378                 /* Install our own private info structure */
379                 cfi->cmdset_priv = extp;
380
381                 cfi_fixup(mtd, cfi_fixup_table);
382
383 #ifdef DEBUG_CFI_FEATURES
384                 /* Tell the user about it in lots of lovely detail */
385                 cfi_tell_features(extp);
386 #endif
387
388                 if(extp->SuspendCmdSupport & 1) {
389                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
390                 }
391         }
392         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
393                 /* Apply jedec specific fixups */
394                 cfi_fixup(mtd, jedec_fixup_table);
395         }
396         /* Apply generic fixups */
397         cfi_fixup(mtd, fixup_table);
398
399         for (i=0; i< cfi->numchips; i++) {
400                 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
401                 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
402                 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
403                 cfi->chips[i].ref_point_counter = 0;
404                 init_waitqueue_head(&(cfi->chips[i].wq));
405         }
406
407         map->fldrv = &cfi_intelext_chipdrv;
408
409         return cfi_intelext_setup(mtd);
410 }
411 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
412 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
413 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
414 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
415 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
416
417 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
418 {
419         struct map_info *map = mtd->priv;
420         struct cfi_private *cfi = map->fldrv_priv;
421         unsigned long offset = 0;
422         int i,j;
423         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
424
425         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
426
427         mtd->size = devsize * cfi->numchips;
428
429         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
430         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
431                         * mtd->numeraseregions, GFP_KERNEL);
432         if (!mtd->eraseregions) {
433                 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
434                 goto setup_err;
435         }
436
437         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
438                 unsigned long ernum, ersize;
439                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
440                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
441
442                 if (mtd->erasesize < ersize) {
443                         mtd->erasesize = ersize;
444                 }
445                 for (j=0; j<cfi->numchips; j++) {
446                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
447                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
448                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
449                 }
450                 offset += (ersize * ernum);
451         }
452
453         if (offset != devsize) {
454                 /* Argh */
455                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
456                 goto setup_err;
457         }
458
459         for (i=0; i<mtd->numeraseregions;i++){
460                 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
461                        i,mtd->eraseregions[i].offset,
462                        mtd->eraseregions[i].erasesize,
463                        mtd->eraseregions[i].numblocks);
464         }
465
466 #ifdef CONFIG_MTD_OTP
467         mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
468         mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
469         mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
470         mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
471         mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
472         mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
473 #endif
474
475         /* This function has the potential to distort the reality
476            a bit and therefore should be called last. */
477         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
478                 goto setup_err;
479
480         __module_get(THIS_MODULE);
481         register_reboot_notifier(&mtd->reboot_notifier);
482         return mtd;
483
484  setup_err:
485         if(mtd) {
486                 kfree(mtd->eraseregions);
487                 kfree(mtd);
488         }
489         kfree(cfi->cmdset_priv);
490         return NULL;
491 }
492
493 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
494                                         struct cfi_private **pcfi)
495 {
496         struct map_info *map = mtd->priv;
497         struct cfi_private *cfi = *pcfi;
498         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
499
500         /*
501          * Probing of multi-partition flash ships.
502          *
503          * To support multiple partitions when available, we simply arrange
504          * for each of them to have their own flchip structure even if they
505          * are on the same physical chip.  This means completely recreating
506          * a new cfi_private structure right here which is a blatent code
507          * layering violation, but this is still the least intrusive
508          * arrangement at this point. This can be rearranged in the future
509          * if someone feels motivated enough.  --nico
510          */
511         if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
512             && extp->FeatureSupport & (1 << 9)) {
513                 struct cfi_private *newcfi;
514                 struct flchip *chip;
515                 struct flchip_shared *shared;
516                 int offs, numregions, numparts, partshift, numvirtchips, i, j;
517
518                 /* Protection Register info */
519                 offs = (extp->NumProtectionFields - 1) *
520                        sizeof(struct cfi_intelext_otpinfo);
521
522                 /* Burst Read info */
523                 offs += extp->extra[offs+1]+2;
524
525                 /* Number of partition regions */
526                 numregions = extp->extra[offs];
527                 offs += 1;
528
529                 /* skip the sizeof(partregion) field in CFI 1.4 */
530                 if (extp->MinorVersion >= '4')
531                         offs += 2;
532
533                 /* Number of hardware partitions */
534                 numparts = 0;
535                 for (i = 0; i < numregions; i++) {
536                         struct cfi_intelext_regioninfo *rinfo;
537                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
538                         numparts += rinfo->NumIdentPartitions;
539                         offs += sizeof(*rinfo)
540                                 + (rinfo->NumBlockTypes - 1) *
541                                   sizeof(struct cfi_intelext_blockinfo);
542                 }
543
544                 /* Programming Region info */
545                 if (extp->MinorVersion >= '4') {
546                         struct cfi_intelext_programming_regioninfo *prinfo;
547                         prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
548                         MTD_PROGREGION_SIZE(mtd) = cfi->interleave << prinfo->ProgRegShift;
549                         MTD_PROGREGION_CTRLMODE_VALID(mtd) = cfi->interleave * prinfo->ControlValid;
550                         MTD_PROGREGION_CTRLMODE_INVALID(mtd) = cfi->interleave * prinfo->ControlInvalid;
551                         mtd->flags |= MTD_PROGRAM_REGIONS;
552                         printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
553                                map->name, MTD_PROGREGION_SIZE(mtd),
554                                MTD_PROGREGION_CTRLMODE_VALID(mtd),
555                                MTD_PROGREGION_CTRLMODE_INVALID(mtd));
556                 }
557
558                 /*
559                  * All functions below currently rely on all chips having
560                  * the same geometry so we'll just assume that all hardware
561                  * partitions are of the same size too.
562                  */
563                 partshift = cfi->chipshift - __ffs(numparts);
564
565                 if ((1 << partshift) < mtd->erasesize) {
566                         printk( KERN_ERR
567                                 "%s: bad number of hw partitions (%d)\n",
568                                 __FUNCTION__, numparts);
569                         return -EINVAL;
570                 }
571
572                 numvirtchips = cfi->numchips * numparts;
573                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
574                 if (!newcfi)
575                         return -ENOMEM;
576                 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
577                 if (!shared) {
578                         kfree(newcfi);
579                         return -ENOMEM;
580                 }
581                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
582                 newcfi->numchips = numvirtchips;
583                 newcfi->chipshift = partshift;
584
585                 chip = &newcfi->chips[0];
586                 for (i = 0; i < cfi->numchips; i++) {
587                         shared[i].writing = shared[i].erasing = NULL;
588                         spin_lock_init(&shared[i].lock);
589                         for (j = 0; j < numparts; j++) {
590                                 *chip = cfi->chips[i];
591                                 chip->start += j << partshift;
592                                 chip->priv = &shared[i];
593                                 /* those should be reset too since
594                                    they create memory references. */
595                                 init_waitqueue_head(&chip->wq);
596                                 spin_lock_init(&chip->_spinlock);
597                                 chip->mutex = &chip->_spinlock;
598                                 chip++;
599                         }
600                 }
601
602                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
603                                   "--> %d partitions of %d KiB\n",
604                                   map->name, cfi->numchips, cfi->interleave,
605                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
606
607                 map->fldrv_priv = newcfi;
608                 *pcfi = newcfi;
609                 kfree(cfi);
610         }
611
612         return 0;
613 }
614
615 /*
616  *  *********** CHIP ACCESS FUNCTIONS ***********
617  */
618
619 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
620 {
621         DECLARE_WAITQUEUE(wait, current);
622         struct cfi_private *cfi = map->fldrv_priv;
623         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
624         unsigned long timeo;
625         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
626
627  resettime:
628         timeo = jiffies + HZ;
629  retry:
630         if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
631                 /*
632                  * OK. We have possibility for contension on the write/erase
633                  * operations which are global to the real chip and not per
634                  * partition.  So let's fight it over in the partition which
635                  * currently has authority on the operation.
636                  *
637                  * The rules are as follows:
638                  *
639                  * - any write operation must own shared->writing.
640                  *
641                  * - any erase operation must own _both_ shared->writing and
642                  *   shared->erasing.
643                  *
644                  * - contension arbitration is handled in the owner's context.
645                  *
646                  * The 'shared' struct can be read and/or written only when
647                  * its lock is taken.
648                  */
649                 struct flchip_shared *shared = chip->priv;
650                 struct flchip *contender;
651                 spin_lock(&shared->lock);
652                 contender = shared->writing;
653                 if (contender && contender != chip) {
654                         /*
655                          * The engine to perform desired operation on this
656                          * partition is already in use by someone else.
657                          * Let's fight over it in the context of the chip
658                          * currently using it.  If it is possible to suspend,
659                          * that other partition will do just that, otherwise
660                          * it'll happily send us to sleep.  In any case, when
661                          * get_chip returns success we're clear to go ahead.
662                          */
663                         int ret = spin_trylock(contender->mutex);
664                         spin_unlock(&shared->lock);
665                         if (!ret)
666                                 goto retry;
667                         spin_unlock(chip->mutex);
668                         ret = get_chip(map, contender, contender->start, mode);
669                         spin_lock(chip->mutex);
670                         if (ret) {
671                                 spin_unlock(contender->mutex);
672                                 return ret;
673                         }
674                         timeo = jiffies + HZ;
675                         spin_lock(&shared->lock);
676                         spin_unlock(contender->mutex);
677                 }
678
679                 /* We now own it */
680                 shared->writing = chip;
681                 if (mode == FL_ERASING)
682                         shared->erasing = chip;
683                 spin_unlock(&shared->lock);
684         }
685
686         switch (chip->state) {
687
688         case FL_STATUS:
689                 for (;;) {
690                         status = map_read(map, adr);
691                         if (map_word_andequal(map, status, status_OK, status_OK))
692                                 break;
693
694                         /* At this point we're fine with write operations
695                            in other partitions as they don't conflict. */
696                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
697                                 break;
698
699                         if (time_after(jiffies, timeo)) {
700                                 printk(KERN_ERR "%s: Waiting for chip to be ready timed out. Status %lx\n",
701                                        map->name, status.x[0]);
702                                 return -EIO;
703                         }
704                         spin_unlock(chip->mutex);
705                         cfi_udelay(1);
706                         spin_lock(chip->mutex);
707                         /* Someone else might have been playing with it. */
708                         goto retry;
709                 }
710
711         case FL_READY:
712         case FL_CFI_QUERY:
713         case FL_JEDEC_QUERY:
714                 return 0;
715
716         case FL_ERASING:
717                 if (!cfip ||
718                     !(cfip->FeatureSupport & 2) ||
719                     !(mode == FL_READY || mode == FL_POINT ||
720                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
721                         goto sleep;
722
723
724                 /* Erase suspend */
725                 map_write(map, CMD(0xB0), adr);
726
727                 /* If the flash has finished erasing, then 'erase suspend'
728                  * appears to make some (28F320) flash devices switch to
729                  * 'read' mode.  Make sure that we switch to 'read status'
730                  * mode so we get the right data. --rmk
731                  */
732                 map_write(map, CMD(0x70), adr);
733                 chip->oldstate = FL_ERASING;
734                 chip->state = FL_ERASE_SUSPENDING;
735                 chip->erase_suspended = 1;
736                 for (;;) {
737                         status = map_read(map, adr);
738                         if (map_word_andequal(map, status, status_OK, status_OK))
739                                 break;
740
741                         if (time_after(jiffies, timeo)) {
742                                 /* Urgh. Resume and pretend we weren't here.  */
743                                 map_write(map, CMD(0xd0), adr);
744                                 /* Make sure we're in 'read status' mode if it had finished */
745                                 map_write(map, CMD(0x70), adr);
746                                 chip->state = FL_ERASING;
747                                 chip->oldstate = FL_READY;
748                                 printk(KERN_ERR "%s: Chip not ready after erase "
749                                        "suspended: status = 0x%lx\n", map->name, status.x[0]);
750                                 return -EIO;
751                         }
752
753                         spin_unlock(chip->mutex);
754                         cfi_udelay(1);
755                         spin_lock(chip->mutex);
756                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
757                            So we can just loop here. */
758                 }
759                 chip->state = FL_STATUS;
760                 return 0;
761
762         case FL_XIP_WHILE_ERASING:
763                 if (mode != FL_READY && mode != FL_POINT &&
764                     (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
765                         goto sleep;
766                 chip->oldstate = chip->state;
767                 chip->state = FL_READY;
768                 return 0;
769
770         case FL_POINT:
771                 /* Only if there's no operation suspended... */
772                 if (mode == FL_READY && chip->oldstate == FL_READY)
773                         return 0;
774
775         default:
776         sleep:
777                 set_current_state(TASK_UNINTERRUPTIBLE);
778                 add_wait_queue(&chip->wq, &wait);
779                 spin_unlock(chip->mutex);
780                 schedule();
781                 remove_wait_queue(&chip->wq, &wait);
782                 spin_lock(chip->mutex);
783                 goto resettime;
784         }
785 }
786
787 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
788 {
789         struct cfi_private *cfi = map->fldrv_priv;
790
791         if (chip->priv) {
792                 struct flchip_shared *shared = chip->priv;
793                 spin_lock(&shared->lock);
794                 if (shared->writing == chip && chip->oldstate == FL_READY) {
795                         /* We own the ability to write, but we're done */
796                         shared->writing = shared->erasing;
797                         if (shared->writing && shared->writing != chip) {
798                                 /* give back ownership to who we loaned it from */
799                                 struct flchip *loaner = shared->writing;
800                                 spin_lock(loaner->mutex);
801                                 spin_unlock(&shared->lock);
802                                 spin_unlock(chip->mutex);
803                                 put_chip(map, loaner, loaner->start);
804                                 spin_lock(chip->mutex);
805                                 spin_unlock(loaner->mutex);
806                                 wake_up(&chip->wq);
807                                 return;
808                         }
809                         shared->erasing = NULL;
810                         shared->writing = NULL;
811                 } else if (shared->erasing == chip && shared->writing != chip) {
812                         /*
813                          * We own the ability to erase without the ability
814                          * to write, which means the erase was suspended
815                          * and some other partition is currently writing.
816                          * Don't let the switch below mess things up since
817                          * we don't have ownership to resume anything.
818                          */
819                         spin_unlock(&shared->lock);
820                         wake_up(&chip->wq);
821                         return;
822                 }
823                 spin_unlock(&shared->lock);
824         }
825
826         switch(chip->oldstate) {
827         case FL_ERASING:
828                 chip->state = chip->oldstate;
829                 /* What if one interleaved chip has finished and the
830                    other hasn't? The old code would leave the finished
831                    one in READY mode. That's bad, and caused -EROFS
832                    errors to be returned from do_erase_oneblock because
833                    that's the only bit it checked for at the time.
834                    As the state machine appears to explicitly allow
835                    sending the 0x70 (Read Status) command to an erasing
836                    chip and expecting it to be ignored, that's what we
837                    do. */
838                 map_write(map, CMD(0xd0), adr);
839                 map_write(map, CMD(0x70), adr);
840                 chip->oldstate = FL_READY;
841                 chip->state = FL_ERASING;
842                 break;
843
844         case FL_XIP_WHILE_ERASING:
845                 chip->state = chip->oldstate;
846                 chip->oldstate = FL_READY;
847                 break;
848
849         case FL_READY:
850         case FL_STATUS:
851         case FL_JEDEC_QUERY:
852                 /* We should really make set_vpp() count, rather than doing this */
853                 DISABLE_VPP(map);
854                 break;
855         default:
856                 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
857         }
858         wake_up(&chip->wq);
859 }
860
861 #ifdef CONFIG_MTD_XIP
862
863 /*
864  * No interrupt what so ever can be serviced while the flash isn't in array
865  * mode.  This is ensured by the xip_disable() and xip_enable() functions
866  * enclosing any code path where the flash is known not to be in array mode.
867  * And within a XIP disabled code path, only functions marked with __xipram
868  * may be called and nothing else (it's a good thing to inspect generated
869  * assembly to make sure inline functions were actually inlined and that gcc
870  * didn't emit calls to its own support functions). Also configuring MTD CFI
871  * support to a single buswidth and a single interleave is also recommended.
872  */
873
874 static void xip_disable(struct map_info *map, struct flchip *chip,
875                         unsigned long adr)
876 {
877         /* TODO: chips with no XIP use should ignore and return */
878         (void) map_read(map, adr); /* ensure mmu mapping is up to date */
879         local_irq_disable();
880 }
881
882 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
883                                 unsigned long adr)
884 {
885         struct cfi_private *cfi = map->fldrv_priv;
886         if (chip->state != FL_POINT && chip->state != FL_READY) {
887                 map_write(map, CMD(0xff), adr);
888                 chip->state = FL_READY;
889         }
890         (void) map_read(map, adr);
891         xip_iprefetch();
892         local_irq_enable();
893 }
894
895 /*
896  * When a delay is required for the flash operation to complete, the
897  * xip_udelay() function is polling for both the given timeout and pending
898  * (but still masked) hardware interrupts.  Whenever there is an interrupt
899  * pending then the flash erase or write operation is suspended, array mode
900  * restored and interrupts unmasked.  Task scheduling might also happen at that
901  * point.  The CPU eventually returns from the interrupt or the call to
902  * schedule() and the suspended flash operation is resumed for the remaining
903  * of the delay period.
904  *
905  * Warning: this function _will_ fool interrupt latency tracing tools.
906  */
907
908 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
909                                 unsigned long adr, int usec)
910 {
911         struct cfi_private *cfi = map->fldrv_priv;
912         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
913         map_word status, OK = CMD(0x80);
914         unsigned long suspended, start = xip_currtime();
915         flstate_t oldstate, newstate;
916
917         do {
918                 cpu_relax();
919                 if (xip_irqpending() && cfip &&
920                     ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
921                      (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
922                     (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
923                         /*
924                          * Let's suspend the erase or write operation when
925                          * supported.  Note that we currently don't try to
926                          * suspend interleaved chips if there is already
927                          * another operation suspended (imagine what happens
928                          * when one chip was already done with the current
929                          * operation while another chip suspended it, then
930                          * we resume the whole thing at once).  Yes, it
931                          * can happen!
932                          */
933                         map_write(map, CMD(0xb0), adr);
934                         map_write(map, CMD(0x70), adr);
935                         usec -= xip_elapsed_since(start);
936                         suspended = xip_currtime();
937                         do {
938                                 if (xip_elapsed_since(suspended) > 100000) {
939                                         /*
940                                          * The chip doesn't want to suspend
941                                          * after waiting for 100 msecs.
942                                          * This is a critical error but there
943                                          * is not much we can do here.
944                                          */
945                                         return;
946                                 }
947                                 status = map_read(map, adr);
948                         } while (!map_word_andequal(map, status, OK, OK));
949
950                         /* Suspend succeeded */
951                         oldstate = chip->state;
952                         if (oldstate == FL_ERASING) {
953                                 if (!map_word_bitsset(map, status, CMD(0x40)))
954                                         break;
955                                 newstate = FL_XIP_WHILE_ERASING;
956                                 chip->erase_suspended = 1;
957                         } else {
958                                 if (!map_word_bitsset(map, status, CMD(0x04)))
959                                         break;
960                                 newstate = FL_XIP_WHILE_WRITING;
961                                 chip->write_suspended = 1;
962                         }
963                         chip->state = newstate;
964                         map_write(map, CMD(0xff), adr);
965                         (void) map_read(map, adr);
966                         asm volatile (".rep 8; nop; .endr");
967                         local_irq_enable();
968                         spin_unlock(chip->mutex);
969                         asm volatile (".rep 8; nop; .endr");
970                         cond_resched();
971
972                         /*
973                          * We're back.  However someone else might have
974                          * decided to go write to the chip if we are in
975                          * a suspended erase state.  If so let's wait
976                          * until it's done.
977                          */
978                         spin_lock(chip->mutex);
979                         while (chip->state != newstate) {
980                                 DECLARE_WAITQUEUE(wait, current);
981                                 set_current_state(TASK_UNINTERRUPTIBLE);
982                                 add_wait_queue(&chip->wq, &wait);
983                                 spin_unlock(chip->mutex);
984                                 schedule();
985                                 remove_wait_queue(&chip->wq, &wait);
986                                 spin_lock(chip->mutex);
987                         }
988                         /* Disallow XIP again */
989                         local_irq_disable();
990
991                         /* Resume the write or erase operation */
992                         map_write(map, CMD(0xd0), adr);
993                         map_write(map, CMD(0x70), adr);
994                         chip->state = oldstate;
995                         start = xip_currtime();
996                 } else if (usec >= 1000000/HZ) {
997                         /*
998                          * Try to save on CPU power when waiting delay
999                          * is at least a system timer tick period.
1000                          * No need to be extremely accurate here.
1001                          */
1002                         xip_cpu_idle();
1003                 }
1004                 status = map_read(map, adr);
1005         } while (!map_word_andequal(map, status, OK, OK)
1006                  && xip_elapsed_since(start) < usec);
1007 }
1008
1009 #define UDELAY(map, chip, adr, usec)  xip_udelay(map, chip, adr, usec)
1010
1011 /*
1012  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1013  * the flash is actively programming or erasing since we have to poll for
1014  * the operation to complete anyway.  We can't do that in a generic way with
1015  * a XIP setup so do it before the actual flash operation in this case
1016  * and stub it out from INVALIDATE_CACHE_UDELAY.
1017  */
1018 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1019         INVALIDATE_CACHED_RANGE(map, from, size)
1020
1021 #define INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr, adr, len, usec)  \
1022         UDELAY(map, chip, cmd_adr, usec)
1023
1024 /*
1025  * Extra notes:
1026  *
1027  * Activating this XIP support changes the way the code works a bit.  For
1028  * example the code to suspend the current process when concurrent access
1029  * happens is never executed because xip_udelay() will always return with the
1030  * same chip state as it was entered with.  This is why there is no care for
1031  * the presence of add_wait_queue() or schedule() calls from within a couple
1032  * xip_disable()'d  areas of code, like in do_erase_oneblock for example.
1033  * The queueing and scheduling are always happening within xip_udelay().
1034  *
1035  * Similarly, get_chip() and put_chip() just happen to always be executed
1036  * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
1037  * is in array mode, therefore never executing many cases therein and not
1038  * causing any problem with XIP.
1039  */
1040
1041 #else
1042
1043 #define xip_disable(map, chip, adr)
1044 #define xip_enable(map, chip, adr)
1045 #define XIP_INVAL_CACHED_RANGE(x...)
1046
1047 #define UDELAY(map, chip, adr, usec)  \
1048 do {  \
1049         spin_unlock(chip->mutex);  \
1050         cfi_udelay(usec);  \
1051         spin_lock(chip->mutex);  \
1052 } while (0)
1053
1054 #define INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr, adr, len, usec)  \
1055 do {  \
1056         spin_unlock(chip->mutex);  \
1057         INVALIDATE_CACHED_RANGE(map, adr, len);  \
1058         cfi_udelay(usec);  \
1059         spin_lock(chip->mutex);  \
1060 } while (0)
1061
1062 #endif
1063
1064 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1065 {
1066         unsigned long cmd_addr;
1067         struct cfi_private *cfi = map->fldrv_priv;
1068         int ret = 0;
1069
1070         adr += chip->start;
1071
1072         /* Ensure cmd read/writes are aligned. */
1073         cmd_addr = adr & ~(map_bankwidth(map)-1);
1074
1075         spin_lock(chip->mutex);
1076
1077         ret = get_chip(map, chip, cmd_addr, FL_POINT);
1078
1079         if (!ret) {
1080                 if (chip->state != FL_POINT && chip->state != FL_READY)
1081                         map_write(map, CMD(0xff), cmd_addr);
1082
1083                 chip->state = FL_POINT;
1084                 chip->ref_point_counter++;
1085         }
1086         spin_unlock(chip->mutex);
1087
1088         return ret;
1089 }
1090
1091 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1092 {
1093         struct map_info *map = mtd->priv;
1094         struct cfi_private *cfi = map->fldrv_priv;
1095         unsigned long ofs;
1096         int chipnum;
1097         int ret = 0;
1098
1099         if (!map->virt || (from + len > mtd->size))
1100                 return -EINVAL;
1101
1102         *mtdbuf = (void *)map->virt + from;
1103         *retlen = 0;
1104
1105         /* Now lock the chip(s) to POINT state */
1106
1107         /* ofs: offset within the first chip that the first read should start */
1108         chipnum = (from >> cfi->chipshift);
1109         ofs = from - (chipnum << cfi->chipshift);
1110
1111         while (len) {
1112                 unsigned long thislen;
1113
1114                 if (chipnum >= cfi->numchips)
1115                         break;
1116
1117                 if ((len + ofs -1) >> cfi->chipshift)
1118                         thislen = (1<<cfi->chipshift) - ofs;
1119                 else
1120                         thislen = len;
1121
1122                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1123                 if (ret)
1124                         break;
1125
1126                 *retlen += thislen;
1127                 len -= thislen;
1128
1129                 ofs = 0;
1130                 chipnum++;
1131         }
1132         return 0;
1133 }
1134
1135 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1136 {
1137         struct map_info *map = mtd->priv;
1138         struct cfi_private *cfi = map->fldrv_priv;
1139         unsigned long ofs;
1140         int chipnum;
1141
1142         /* Now unlock the chip(s) POINT state */
1143
1144         /* ofs: offset within the first chip that the first read should start */
1145         chipnum = (from >> cfi->chipshift);
1146         ofs = from - (chipnum <<  cfi->chipshift);
1147
1148         while (len) {
1149                 unsigned long thislen;
1150                 struct flchip *chip;
1151
1152                 chip = &cfi->chips[chipnum];
1153                 if (chipnum >= cfi->numchips)
1154                         break;
1155
1156                 if ((len + ofs -1) >> cfi->chipshift)
1157                         thislen = (1<<cfi->chipshift) - ofs;
1158                 else
1159                         thislen = len;
1160
1161                 spin_lock(chip->mutex);
1162                 if (chip->state == FL_POINT) {
1163                         chip->ref_point_counter--;
1164                         if(chip->ref_point_counter == 0)
1165                                 chip->state = FL_READY;
1166                 } else
1167                         printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1168
1169                 put_chip(map, chip, chip->start);
1170                 spin_unlock(chip->mutex);
1171
1172                 len -= thislen;
1173                 ofs = 0;
1174                 chipnum++;
1175         }
1176 }
1177
1178 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1179 {
1180         unsigned long cmd_addr;
1181         struct cfi_private *cfi = map->fldrv_priv;
1182         int ret;
1183
1184         adr += chip->start;
1185
1186         /* Ensure cmd read/writes are aligned. */
1187         cmd_addr = adr & ~(map_bankwidth(map)-1);
1188
1189         spin_lock(chip->mutex);
1190         ret = get_chip(map, chip, cmd_addr, FL_READY);
1191         if (ret) {
1192                 spin_unlock(chip->mutex);
1193                 return ret;
1194         }
1195
1196         if (chip->state != FL_POINT && chip->state != FL_READY) {
1197                 map_write(map, CMD(0xff), cmd_addr);
1198
1199                 chip->state = FL_READY;
1200         }
1201
1202         map_copy_from(map, buf, adr, len);
1203
1204         put_chip(map, chip, cmd_addr);
1205
1206         spin_unlock(chip->mutex);
1207         return 0;
1208 }
1209
1210 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1211 {
1212         struct map_info *map = mtd->priv;
1213         struct cfi_private *cfi = map->fldrv_priv;
1214         unsigned long ofs;
1215         int chipnum;
1216         int ret = 0;
1217
1218         /* ofs: offset within the first chip that the first read should start */
1219         chipnum = (from >> cfi->chipshift);
1220         ofs = from - (chipnum <<  cfi->chipshift);
1221
1222         *retlen = 0;
1223
1224         while (len) {
1225                 unsigned long thislen;
1226
1227                 if (chipnum >= cfi->numchips)
1228                         break;
1229
1230                 if ((len + ofs -1) >> cfi->chipshift)
1231                         thislen = (1<<cfi->chipshift) - ofs;
1232                 else
1233                         thislen = len;
1234
1235                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1236                 if (ret)
1237                         break;
1238
1239                 *retlen += thislen;
1240                 len -= thislen;
1241                 buf += thislen;
1242
1243                 ofs = 0;
1244                 chipnum++;
1245         }
1246         return ret;
1247 }
1248
1249 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1250                                      unsigned long adr, map_word datum, int mode)
1251 {
1252         struct cfi_private *cfi = map->fldrv_priv;
1253         map_word status, status_OK, write_cmd;
1254         unsigned long timeo;
1255         int z, ret=0;
1256
1257         adr += chip->start;
1258
1259         /* Let's determine those according to the interleave only once */
1260         status_OK = CMD(0x80);
1261         switch (mode) {
1262         case FL_WRITING:
1263                 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1264                 break;
1265         case FL_OTP_WRITE:
1266                 write_cmd = CMD(0xc0);
1267                 break;
1268         default:
1269                 return -EINVAL;
1270         }
1271
1272         spin_lock(chip->mutex);
1273         ret = get_chip(map, chip, adr, mode);
1274         if (ret) {
1275                 spin_unlock(chip->mutex);
1276                 return ret;
1277         }
1278
1279         XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1280         ENABLE_VPP(map);
1281         xip_disable(map, chip, adr);
1282         map_write(map, write_cmd, adr);
1283         map_write(map, datum, adr);
1284         chip->state = mode;
1285
1286         INVALIDATE_CACHE_UDELAY(map, chip, adr,
1287                                 adr, map_bankwidth(map),
1288                                 chip->word_write_time);
1289
1290         timeo = jiffies + (HZ/2);
1291         z = 0;
1292         for (;;) {
1293                 if (chip->state != mode) {
1294                         /* Someone's suspended the write. Sleep */
1295                         DECLARE_WAITQUEUE(wait, current);
1296
1297                         set_current_state(TASK_UNINTERRUPTIBLE);
1298                         add_wait_queue(&chip->wq, &wait);
1299                         spin_unlock(chip->mutex);
1300                         schedule();
1301                         remove_wait_queue(&chip->wq, &wait);
1302                         timeo = jiffies + (HZ / 2); /* FIXME */
1303                         spin_lock(chip->mutex);
1304                         continue;
1305                 }
1306
1307                 status = map_read(map, adr);
1308                 if (map_word_andequal(map, status, status_OK, status_OK))
1309                         break;
1310
1311                 /* OK Still waiting */
1312                 if (time_after(jiffies, timeo)) {
1313                         map_write(map, CMD(0x70), adr);
1314                         chip->state = FL_STATUS;
1315                         xip_enable(map, chip, adr);
1316                         printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1317                         ret = -EIO;
1318                         goto out;
1319                 }
1320
1321                 /* Latency issues. Drop the lock, wait a while and retry */
1322                 z++;
1323                 UDELAY(map, chip, adr, 1);
1324         }
1325         if (!z) {
1326                 chip->word_write_time--;
1327                 if (!chip->word_write_time)
1328                         chip->word_write_time = 1;
1329         }
1330         if (z > 1)
1331                 chip->word_write_time++;
1332
1333         /* Done and happy. */
1334         chip->state = FL_STATUS;
1335
1336         /* check for errors */
1337         if (map_word_bitsset(map, status, CMD(0x1a))) {
1338                 unsigned long chipstatus = MERGESTATUS(status);
1339
1340                 /* reset status */
1341                 map_write(map, CMD(0x50), adr);
1342                 map_write(map, CMD(0x70), adr);
1343                 xip_enable(map, chip, adr);
1344
1345                 if (chipstatus & 0x02) {
1346                         ret = -EROFS;
1347                 } else if (chipstatus & 0x08) {
1348                         printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1349                         ret = -EIO;
1350                 } else {
1351                         printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1352                         ret = -EINVAL;
1353                 }
1354
1355                 goto out;
1356         }
1357
1358         xip_enable(map, chip, adr);
1359  out:   put_chip(map, chip, adr);
1360         spin_unlock(chip->mutex);
1361         return ret;
1362 }
1363
1364
1365 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1366 {
1367         struct map_info *map = mtd->priv;
1368         struct cfi_private *cfi = map->fldrv_priv;
1369         int ret = 0;
1370         int chipnum;
1371         unsigned long ofs;
1372
1373         *retlen = 0;
1374         if (!len)
1375                 return 0;
1376
1377         chipnum = to >> cfi->chipshift;
1378         ofs = to  - (chipnum << cfi->chipshift);
1379
1380         /* If it's not bus-aligned, do the first byte write */
1381         if (ofs & (map_bankwidth(map)-1)) {
1382                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1383                 int gap = ofs - bus_ofs;
1384                 int n;
1385                 map_word datum;
1386
1387                 n = min_t(int, len, map_bankwidth(map)-gap);
1388                 datum = map_word_ff(map);
1389                 datum = map_word_load_partial(map, datum, buf, gap, n);
1390
1391                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1392                                                bus_ofs, datum, FL_WRITING);
1393                 if (ret)
1394                         return ret;
1395
1396                 len -= n;
1397                 ofs += n;
1398                 buf += n;
1399                 (*retlen) += n;
1400
1401                 if (ofs >> cfi->chipshift) {
1402                         chipnum ++;
1403                         ofs = 0;
1404                         if (chipnum == cfi->numchips)
1405                                 return 0;
1406                 }
1407         }
1408
1409         while(len >= map_bankwidth(map)) {
1410                 map_word datum = map_word_load(map, buf);
1411
1412                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1413                                        ofs, datum, FL_WRITING);
1414                 if (ret)
1415                         return ret;
1416
1417                 ofs += map_bankwidth(map);
1418                 buf += map_bankwidth(map);
1419                 (*retlen) += map_bankwidth(map);
1420                 len -= map_bankwidth(map);
1421
1422                 if (ofs >> cfi->chipshift) {
1423                         chipnum ++;
1424                         ofs = 0;
1425                         if (chipnum == cfi->numchips)
1426                                 return 0;
1427                 }
1428         }
1429
1430         if (len & (map_bankwidth(map)-1)) {
1431                 map_word datum;
1432
1433                 datum = map_word_ff(map);
1434                 datum = map_word_load_partial(map, datum, buf, 0, len);
1435
1436                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1437                                        ofs, datum, FL_WRITING);
1438                 if (ret)
1439                         return ret;
1440
1441                 (*retlen) += len;
1442         }
1443
1444         return 0;
1445 }
1446
1447
1448 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1449                                     unsigned long adr, const struct kvec **pvec,
1450                                     unsigned long *pvec_seek, int len)
1451 {
1452         struct cfi_private *cfi = map->fldrv_priv;
1453         map_word status, status_OK, write_cmd, datum;
1454         unsigned long cmd_adr, timeo;
1455         int wbufsize, z, ret=0, word_gap, words;
1456         const struct kvec *vec;
1457         unsigned long vec_seek;
1458
1459         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1460         adr += chip->start;
1461         cmd_adr = adr & ~(wbufsize-1);
1462
1463         /* Let's determine this according to the interleave only once */
1464         status_OK = CMD(0x80);
1465         write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1466
1467         spin_lock(chip->mutex);
1468         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1469         if (ret) {
1470                 spin_unlock(chip->mutex);
1471                 return ret;
1472         }
1473
1474         XIP_INVAL_CACHED_RANGE(map, adr, len);
1475         ENABLE_VPP(map);
1476         xip_disable(map, chip, cmd_adr);
1477
1478         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1479            [...], the device will not accept any more Write to Buffer commands".
1480            So we must check here and reset those bits if they're set. Otherwise
1481            we're just pissing in the wind */
1482         if (chip->state != FL_STATUS)
1483                 map_write(map, CMD(0x70), cmd_adr);
1484         status = map_read(map, cmd_adr);
1485         if (map_word_bitsset(map, status, CMD(0x30))) {
1486                 xip_enable(map, chip, cmd_adr);
1487                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1488                 xip_disable(map, chip, cmd_adr);
1489                 map_write(map, CMD(0x50), cmd_adr);
1490                 map_write(map, CMD(0x70), cmd_adr);
1491         }
1492
1493         chip->state = FL_WRITING_TO_BUFFER;
1494
1495         z = 0;
1496         for (;;) {
1497                 map_write(map, write_cmd, cmd_adr);
1498
1499                 status = map_read(map, cmd_adr);
1500                 if (map_word_andequal(map, status, status_OK, status_OK))
1501                         break;
1502
1503                 UDELAY(map, chip, cmd_adr, 1);
1504
1505                 if (++z > 20) {
1506                         /* Argh. Not ready for write to buffer */
1507                         map_word Xstatus;
1508                         map_write(map, CMD(0x70), cmd_adr);
1509                         chip->state = FL_STATUS;
1510                         Xstatus = map_read(map, cmd_adr);
1511                         /* Odd. Clear status bits */
1512                         map_write(map, CMD(0x50), cmd_adr);
1513                         map_write(map, CMD(0x70), cmd_adr);
1514                         xip_enable(map, chip, cmd_adr);
1515                         printk(KERN_ERR "%s: Chip not ready for buffer write. status = %lx, Xstatus = %lx\n",
1516                                map->name, status.x[0], Xstatus.x[0]);
1517                         ret = -EIO;
1518                         goto out;
1519                 }
1520         }
1521
1522         /* Figure out the number of words to write */
1523         word_gap = (-adr & (map_bankwidth(map)-1));
1524         words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
1525         if (!word_gap) {
1526                 words--;
1527         } else {
1528                 word_gap = map_bankwidth(map) - word_gap;
1529                 adr -= word_gap;
1530                 datum = map_word_ff(map);
1531         }
1532
1533         /* Write length of data to come */
1534         map_write(map, CMD(words), cmd_adr );
1535
1536         /* Write data */
1537         vec = *pvec;
1538         vec_seek = *pvec_seek;
1539         do {
1540                 int n = map_bankwidth(map) - word_gap;
1541                 if (n > vec->iov_len - vec_seek)
1542                         n = vec->iov_len - vec_seek;
1543                 if (n > len)
1544                         n = len;
1545
1546                 if (!word_gap && len < map_bankwidth(map))
1547                         datum = map_word_ff(map);
1548
1549                 datum = map_word_load_partial(map, datum,
1550                                               vec->iov_base + vec_seek,
1551                                               word_gap, n);
1552
1553                 len -= n;
1554                 word_gap += n;
1555                 if (!len || word_gap == map_bankwidth(map)) {
1556                         map_write(map, datum, adr);
1557                         adr += map_bankwidth(map);
1558                         word_gap = 0;
1559                 }
1560
1561                 vec_seek += n;
1562                 if (vec_seek == vec->iov_len) {
1563                         vec++;
1564                         vec_seek = 0;
1565                 }
1566         } while (len);
1567         *pvec = vec;
1568         *pvec_seek = vec_seek;
1569
1570         /* GO GO GO */
1571         map_write(map, CMD(0xd0), cmd_adr);
1572         chip->state = FL_WRITING;
1573
1574         INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr,
1575                                 adr, len,
1576                                 chip->buffer_write_time);
1577
1578         timeo = jiffies + (HZ/2);
1579         z = 0;
1580         for (;;) {
1581                 if (chip->state != FL_WRITING) {
1582                         /* Someone's suspended the write. Sleep */
1583                         DECLARE_WAITQUEUE(wait, current);
1584                         set_current_state(TASK_UNINTERRUPTIBLE);
1585                         add_wait_queue(&chip->wq, &wait);
1586                         spin_unlock(chip->mutex);
1587                         schedule();
1588                         remove_wait_queue(&chip->wq, &wait);
1589                         timeo = jiffies + (HZ / 2); /* FIXME */
1590                         spin_lock(chip->mutex);
1591                         continue;
1592                 }
1593
1594                 status = map_read(map, cmd_adr);
1595                 if (map_word_andequal(map, status, status_OK, status_OK))
1596                         break;
1597
1598                 /* OK Still waiting */
1599                 if (time_after(jiffies, timeo)) {
1600                         map_write(map, CMD(0x70), cmd_adr);
1601                         chip->state = FL_STATUS;
1602                         xip_enable(map, chip, cmd_adr);
1603                         printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1604                         ret = -EIO;
1605                         goto out;
1606                 }
1607
1608                 /* Latency issues. Drop the lock, wait a while and retry */
1609                 z++;
1610                 UDELAY(map, chip, cmd_adr, 1);
1611         }
1612         if (!z) {
1613                 chip->buffer_write_time--;
1614                 if (!chip->buffer_write_time)
1615                         chip->buffer_write_time = 1;
1616         }
1617         if (z > 1)
1618                 chip->buffer_write_time++;
1619
1620         /* Done and happy. */
1621         chip->state = FL_STATUS;
1622
1623         /* check for errors */
1624         if (map_word_bitsset(map, status, CMD(0x1a))) {
1625                 unsigned long chipstatus = MERGESTATUS(status);
1626
1627                 /* reset status */
1628                 map_write(map, CMD(0x50), cmd_adr);
1629                 map_write(map, CMD(0x70), cmd_adr);
1630                 xip_enable(map, chip, cmd_adr);
1631
1632                 if (chipstatus & 0x02) {
1633                         ret = -EROFS;
1634                 } else if (chipstatus & 0x08) {
1635                         printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1636                         ret = -EIO;
1637                 } else {
1638                         printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1639                         ret = -EINVAL;
1640                 }
1641
1642                 goto out;
1643         }
1644
1645         xip_enable(map, chip, cmd_adr);
1646  out:   put_chip(map, chip, cmd_adr);
1647         spin_unlock(chip->mutex);
1648         return ret;
1649 }
1650
1651 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1652                                 unsigned long count, loff_t to, size_t *retlen)
1653 {
1654         struct map_info *map = mtd->priv;
1655         struct cfi_private *cfi = map->fldrv_priv;
1656         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1657         int ret = 0;
1658         int chipnum;
1659         unsigned long ofs, vec_seek, i;
1660         size_t len = 0;
1661
1662         for (i = 0; i < count; i++)
1663                 len += vecs[i].iov_len;
1664
1665         *retlen = 0;
1666         if (!len)
1667                 return 0;
1668
1669         chipnum = to >> cfi->chipshift;
1670         ofs = to - (chipnum << cfi->chipshift);
1671         vec_seek = 0;
1672
1673         do {
1674                 /* We must not cross write block boundaries */
1675                 int size = wbufsize - (ofs & (wbufsize-1));
1676
1677                 if (size > len)
1678                         size = len;
1679                 ret = do_write_buffer(map, &cfi->chips[chipnum],
1680                                       ofs, &vecs, &vec_seek, size);
1681                 if (ret)
1682                         return ret;
1683
1684                 ofs += size;
1685                 (*retlen) += size;
1686                 len -= size;
1687
1688                 if (ofs >> cfi->chipshift) {
1689                         chipnum ++;
1690                         ofs = 0;
1691                         if (chipnum == cfi->numchips)
1692                                 return 0;
1693                 }
1694
1695                 /* Be nice and reschedule with the chip in a usable state for other
1696                    processes. */
1697                 cond_resched();
1698
1699         } while (len);
1700
1701         return 0;
1702 }
1703
1704 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1705                                        size_t len, size_t *retlen, const u_char *buf)
1706 {
1707         struct kvec vec;
1708
1709         vec.iov_base = (void *) buf;
1710         vec.iov_len = len;
1711
1712         return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1713 }
1714
1715 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1716                                       unsigned long adr, int len, void *thunk)
1717 {
1718         struct cfi_private *cfi = map->fldrv_priv;
1719         map_word status, status_OK;
1720         unsigned long timeo;
1721         int retries = 3;
1722         DECLARE_WAITQUEUE(wait, current);
1723         int ret = 0;
1724
1725         adr += chip->start;
1726
1727         /* Let's determine this according to the interleave only once */
1728         status_OK = CMD(0x80);
1729
1730  retry:
1731         spin_lock(chip->mutex);
1732         ret = get_chip(map, chip, adr, FL_ERASING);
1733         if (ret) {
1734                 spin_unlock(chip->mutex);
1735                 return ret;
1736         }
1737
1738         XIP_INVAL_CACHED_RANGE(map, adr, len);
1739         ENABLE_VPP(map);
1740         xip_disable(map, chip, adr);
1741
1742         /* Clear the status register first */
1743         map_write(map, CMD(0x50), adr);
1744
1745         /* Now erase */
1746         map_write(map, CMD(0x20), adr);
1747         map_write(map, CMD(0xD0), adr);
1748         chip->state = FL_ERASING;
1749         chip->erase_suspended = 0;
1750
1751         INVALIDATE_CACHE_UDELAY(map, chip, adr,
1752                                 adr, len,
1753                                 chip->erase_time*1000/2);
1754
1755         /* FIXME. Use a timer to check this, and return immediately. */
1756         /* Once the state machine's known to be working I'll do that */
1757
1758         timeo = jiffies + (HZ*20);
1759         for (;;) {
1760                 if (chip->state != FL_ERASING) {
1761                         /* Someone's suspended the erase. Sleep */
1762                         set_current_state(TASK_UNINTERRUPTIBLE);
1763                         add_wait_queue(&chip->wq, &wait);
1764                         spin_unlock(chip->mutex);
1765                         schedule();
1766                         remove_wait_queue(&chip->wq, &wait);
1767                         spin_lock(chip->mutex);
1768                         continue;
1769                 }
1770                 if (chip->erase_suspended) {
1771                         /* This erase was suspended and resumed.
1772                            Adjust the timeout */
1773                         timeo = jiffies + (HZ*20); /* FIXME */
1774                         chip->erase_suspended = 0;
1775                 }
1776
1777                 status = map_read(map, adr);
1778                 if (map_word_andequal(map, status, status_OK, status_OK))
1779                         break;
1780
1781                 /* OK Still waiting */
1782                 if (time_after(jiffies, timeo)) {
1783                         map_write(map, CMD(0x70), adr);
1784                         chip->state = FL_STATUS;
1785                         xip_enable(map, chip, adr);
1786                         printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1787                         ret = -EIO;
1788                         goto out;
1789                 }
1790
1791                 /* Latency issues. Drop the lock, wait a while and retry */
1792                 UDELAY(map, chip, adr, 1000000/HZ);
1793         }
1794
1795         /* We've broken this before. It doesn't hurt to be safe */
1796         map_write(map, CMD(0x70), adr);
1797         chip->state = FL_STATUS;
1798         status = map_read(map, adr);
1799
1800         /* check for errors */
1801         if (map_word_bitsset(map, status, CMD(0x3a))) {
1802                 unsigned long chipstatus = MERGESTATUS(status);
1803
1804                 /* Reset the error bits */
1805                 map_write(map, CMD(0x50), adr);
1806                 map_write(map, CMD(0x70), adr);
1807                 xip_enable(map, chip, adr);
1808
1809                 if ((chipstatus & 0x30) == 0x30) {
1810                         printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1811                         ret = -EINVAL;
1812                 } else if (chipstatus & 0x02) {
1813                         /* Protection bit set */
1814                         ret = -EROFS;
1815                 } else if (chipstatus & 0x8) {
1816                         /* Voltage */
1817                         printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1818                         ret = -EIO;
1819                 } else if (chipstatus & 0x20 && retries--) {
1820                         printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1821                         timeo = jiffies + HZ;
1822                         put_chip(map, chip, adr);
1823                         spin_unlock(chip->mutex);
1824                         goto retry;
1825                 } else {
1826                         printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1827                         ret = -EIO;
1828                 }
1829
1830                 goto out;
1831         }
1832
1833         xip_enable(map, chip, adr);
1834  out:   put_chip(map, chip, adr);
1835         spin_unlock(chip->mutex);
1836         return ret;
1837 }
1838
1839 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1840 {
1841         unsigned long ofs, len;
1842         int ret;
1843
1844         ofs = instr->addr;
1845         len = instr->len;
1846
1847         ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1848         if (ret)
1849                 return ret;
1850
1851         instr->state = MTD_ERASE_DONE;
1852         mtd_erase_callback(instr);
1853
1854         return 0;
1855 }
1856
1857 static void cfi_intelext_sync (struct mtd_info *mtd)
1858 {
1859         struct map_info *map = mtd->priv;
1860         struct cfi_private *cfi = map->fldrv_priv;
1861         int i;
1862         struct flchip *chip;
1863         int ret = 0;
1864
1865         for (i=0; !ret && i<cfi->numchips; i++) {
1866                 chip = &cfi->chips[i];
1867
1868                 spin_lock(chip->mutex);
1869                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1870
1871                 if (!ret) {
1872                         chip->oldstate = chip->state;
1873                         chip->state = FL_SYNCING;
1874                         /* No need to wake_up() on this state change -
1875                          * as the whole point is that nobody can do anything
1876                          * with the chip now anyway.
1877                          */
1878                 }
1879                 spin_unlock(chip->mutex);
1880         }
1881
1882         /* Unlock the chips again */
1883
1884         for (i--; i >=0; i--) {
1885                 chip = &cfi->chips[i];
1886
1887                 spin_lock(chip->mutex);
1888
1889                 if (chip->state == FL_SYNCING) {
1890                         chip->state = chip->oldstate;
1891                         chip->oldstate = FL_READY;
1892                         wake_up(&chip->wq);
1893                 }
1894                 spin_unlock(chip->mutex);
1895         }
1896 }
1897
1898 #ifdef DEBUG_LOCK_BITS
1899 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1900                                                 struct flchip *chip,
1901                                                 unsigned long adr,
1902                                                 int len, void *thunk)
1903 {
1904         struct cfi_private *cfi = map->fldrv_priv;
1905         int status, ofs_factor = cfi->interleave * cfi->device_type;
1906
1907         adr += chip->start;
1908         xip_disable(map, chip, adr+(2*ofs_factor));
1909         map_write(map, CMD(0x90), adr+(2*ofs_factor));
1910         chip->state = FL_JEDEC_QUERY;
1911         status = cfi_read_query(map, adr+(2*ofs_factor));
1912         xip_enable(map, chip, 0);
1913         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1914                adr, status);
1915         return 0;
1916 }
1917 #endif
1918
1919 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
1920 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
1921
1922 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1923                                        unsigned long adr, int len, void *thunk)
1924 {
1925         struct cfi_private *cfi = map->fldrv_priv;
1926         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1927         map_word status, status_OK;
1928         unsigned long timeo = jiffies + HZ;
1929         int ret;
1930
1931         adr += chip->start;
1932
1933         /* Let's determine this according to the interleave only once */
1934         status_OK = CMD(0x80);
1935
1936         spin_lock(chip->mutex);
1937         ret = get_chip(map, chip, adr, FL_LOCKING);
1938         if (ret) {
1939                 spin_unlock(chip->mutex);
1940                 return ret;
1941         }
1942
1943         ENABLE_VPP(map);
1944         xip_disable(map, chip, adr);
1945
1946         map_write(map, CMD(0x60), adr);
1947         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1948                 map_write(map, CMD(0x01), adr);
1949                 chip->state = FL_LOCKING;
1950         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1951                 map_write(map, CMD(0xD0), adr);
1952                 chip->state = FL_UNLOCKING;
1953         } else
1954                 BUG();
1955
1956         /*
1957          * If Instant Individual Block Locking supported then no need
1958          * to delay.
1959          */
1960
1961         if (!extp || !(extp->FeatureSupport & (1 << 5)))
1962                 UDELAY(map, chip, adr, 1000000/HZ);
1963
1964         /* FIXME. Use a timer to check this, and return immediately. */
1965         /* Once the state machine's known to be working I'll do that */
1966
1967         timeo = jiffies + (HZ*20);
1968         for (;;) {
1969
1970                 status = map_read(map, adr);
1971                 if (map_word_andequal(map, status, status_OK, status_OK))
1972                         break;
1973
1974                 /* OK Still waiting */
1975                 if (time_after(jiffies, timeo)) {
1976                         map_write(map, CMD(0x70), adr);
1977                         chip->state = FL_STATUS;
1978                         xip_enable(map, chip, adr);
1979                         printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
1980                         put_chip(map, chip, adr);
1981                         spin_unlock(chip->mutex);
1982                         return -EIO;
1983                 }
1984
1985                 /* Latency issues. Drop the lock, wait a while and retry */
1986                 UDELAY(map, chip, adr, 1);
1987         }
1988
1989         /* Done and happy. */
1990         chip->state = FL_STATUS;
1991         xip_enable(map, chip, adr);
1992         put_chip(map, chip, adr);
1993         spin_unlock(chip->mutex);
1994         return 0;
1995 }
1996
1997 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1998 {
1999         int ret;
2000
2001 #ifdef DEBUG_LOCK_BITS
2002         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2003                __FUNCTION__, ofs, len);
2004         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2005                 ofs, len, 0);
2006 #endif
2007
2008         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2009                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2010
2011 #ifdef DEBUG_LOCK_BITS
2012         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2013                __FUNCTION__, ret);
2014         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2015                 ofs, len, 0);
2016 #endif
2017
2018         return ret;
2019 }
2020
2021 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
2022 {
2023         int ret;
2024
2025 #ifdef DEBUG_LOCK_BITS
2026         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2027                __FUNCTION__, ofs, len);
2028         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2029                 ofs, len, 0);
2030 #endif
2031
2032         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2033                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2034
2035 #ifdef DEBUG_LOCK_BITS
2036         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2037                __FUNCTION__, ret);
2038         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2039                 ofs, len, 0);
2040 #endif
2041
2042         return ret;
2043 }
2044
2045 #ifdef CONFIG_MTD_OTP
2046
2047 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2048                         u_long data_offset, u_char *buf, u_int size,
2049                         u_long prot_offset, u_int groupno, u_int groupsize);
2050
2051 static int __xipram
2052 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2053             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2054 {
2055         struct cfi_private *cfi = map->fldrv_priv;
2056         int ret;
2057
2058         spin_lock(chip->mutex);
2059         ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2060         if (ret) {
2061                 spin_unlock(chip->mutex);
2062                 return ret;
2063         }
2064
2065         /* let's ensure we're not reading back cached data from array mode */
2066         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2067
2068         xip_disable(map, chip, chip->start);
2069         if (chip->state != FL_JEDEC_QUERY) {
2070                 map_write(map, CMD(0x90), chip->start);
2071                 chip->state = FL_JEDEC_QUERY;
2072         }
2073         map_copy_from(map, buf, chip->start + offset, size);
2074         xip_enable(map, chip, chip->start);
2075
2076         /* then ensure we don't keep OTP data in the cache */
2077         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2078
2079         put_chip(map, chip, chip->start);
2080         spin_unlock(chip->mutex);
2081         return 0;
2082 }
2083
2084 static int
2085 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2086              u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2087 {
2088         int ret;
2089
2090         while (size) {
2091                 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2092                 int gap = offset - bus_ofs;
2093                 int n = min_t(int, size, map_bankwidth(map)-gap);
2094                 map_word datum = map_word_ff(map);
2095
2096                 datum = map_word_load_partial(map, datum, buf, gap, n);
2097                 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2098                 if (ret)
2099                         return ret;
2100
2101                 offset += n;
2102                 buf += n;
2103                 size -= n;
2104         }
2105
2106         return 0;
2107 }
2108
2109 static int
2110 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2111             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2112 {
2113         struct cfi_private *cfi = map->fldrv_priv;
2114         map_word datum;
2115
2116         /* make sure area matches group boundaries */
2117         if (size != grpsz)
2118                 return -EXDEV;
2119
2120         datum = map_word_ff(map);
2121         datum = map_word_clr(map, datum, CMD(1 << grpno));
2122         return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2123 }
2124
2125 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2126                                  size_t *retlen, u_char *buf,
2127                                  otp_op_t action, int user_regs)
2128 {
2129         struct map_info *map = mtd->priv;
2130         struct cfi_private *cfi = map->fldrv_priv;
2131         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2132         struct flchip *chip;
2133         struct cfi_intelext_otpinfo *otp;
2134         u_long devsize, reg_prot_offset, data_offset;
2135         u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2136         u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2137         int ret;
2138
2139         *retlen = 0;
2140
2141         /* Check that we actually have some OTP registers */
2142         if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2143                 return -ENODATA;
2144
2145         /* we need real chips here not virtual ones */
2146         devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2147         chip_step = devsize >> cfi->chipshift;
2148         chip_num = 0;
2149
2150         /* Some chips have OTP located in the _top_ partition only.
2151            For example: Intel 28F256L18T (T means top-parameter device) */
2152         if (cfi->mfr == MANUFACTURER_INTEL) {
2153                 switch (cfi->id) {
2154                 case 0x880b:
2155                 case 0x880c:
2156                 case 0x880d:
2157                         chip_num = chip_step - 1;
2158                 }
2159         }
2160
2161         for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2162                 chip = &cfi->chips[chip_num];
2163                 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2164
2165                 /* first OTP region */
2166                 field = 0;
2167                 reg_prot_offset = extp->ProtRegAddr;
2168                 reg_fact_groups = 1;
2169                 reg_fact_size = 1 << extp->FactProtRegSize;
2170                 reg_user_groups = 1;
2171                 reg_user_size = 1 << extp->UserProtRegSize;
2172
2173                 while (len > 0) {
2174                         /* flash geometry fixup */
2175                         data_offset = reg_prot_offset + 1;
2176                         data_offset *= cfi->interleave * cfi->device_type;
2177                         reg_prot_offset *= cfi->interleave * cfi->device_type;
2178                         reg_fact_size *= cfi->interleave;
2179                         reg_user_size *= cfi->interleave;
2180
2181                         if (user_regs) {
2182                                 groups = reg_user_groups;
2183                                 groupsize = reg_user_size;
2184                                 /* skip over factory reg area */
2185                                 groupno = reg_fact_groups;
2186                                 data_offset += reg_fact_groups * reg_fact_size;
2187                         } else {
2188                                 groups = reg_fact_groups;
2189                                 groupsize = reg_fact_size;
2190                                 groupno = 0;
2191                         }
2192
2193                         while (len > 0 && groups > 0) {
2194                                 if (!action) {
2195                                         /*
2196                                          * Special case: if action is NULL
2197                                          * we fill buf with otp_info records.
2198                                          */
2199                                         struct otp_info *otpinfo;
2200                                         map_word lockword;
2201                                         len -= sizeof(struct otp_info);
2202                                         if (len <= 0)
2203                                                 return -ENOSPC;
2204                                         ret = do_otp_read(map, chip,
2205                                                           reg_prot_offset,
2206                                                           (u_char *)&lockword,
2207                                                           map_bankwidth(map),
2208                                                           0, 0,  0);
2209                                         if (ret)
2210                                                 return ret;
2211                                         otpinfo = (struct otp_info *)buf;
2212                                         otpinfo->start = from;
2213                                         otpinfo->length = groupsize;
2214                                         otpinfo->locked =
2215                                            !map_word_bitsset(map, lockword,
2216                                                              CMD(1 << groupno));
2217                                         from += groupsize;
2218                                         buf += sizeof(*otpinfo);
2219                                         *retlen += sizeof(*otpinfo);
2220                                 } else if (from >= groupsize) {
2221                                         from -= groupsize;
2222                                         data_offset += groupsize;
2223                                 } else {
2224                                         int size = groupsize;
2225                                         data_offset += from;
2226                                         size -= from;
2227                                         from = 0;
2228                                         if (size > len)
2229                                                 size = len;
2230                                         ret = action(map, chip, data_offset,
2231                                                      buf, size, reg_prot_offset,
2232                                                      groupno, groupsize);
2233                                         if (ret < 0)
2234                                                 return ret;
2235                                         buf += size;
2236                                         len -= size;
2237                                         *retlen += size;
2238                                         data_offset += size;
2239                                 }
2240                                 groupno++;
2241                                 groups--;
2242                         }
2243
2244                         /* next OTP region */
2245                         if (++field == extp->NumProtectionFields)
2246                                 break;
2247                         reg_prot_offset = otp->ProtRegAddr;
2248                         reg_fact_groups = otp->FactGroups;
2249                         reg_fact_size = 1 << otp->FactProtRegSize;
2250                         reg_user_groups = otp->UserGroups;
2251                         reg_user_size = 1 << otp->UserProtRegSize;
2252                         otp++;
2253                 }
2254         }
2255
2256         return 0;
2257 }
2258
2259 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2260                                            size_t len, size_t *retlen,
2261                                             u_char *buf)
2262 {
2263         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2264                                      buf, do_otp_read, 0);
2265 }
2266
2267 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2268                                            size_t len, size_t *retlen,
2269                                             u_char *buf)
2270 {
2271         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2272                                      buf, do_otp_read, 1);
2273 }
2274
2275 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2276                                             size_t len, size_t *retlen,
2277                                              u_char *buf)
2278 {
2279         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2280                                      buf, do_otp_write, 1);
2281 }
2282
2283 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2284                                            loff_t from, size_t len)
2285 {
2286         size_t retlen;
2287         return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2288                                      NULL, do_otp_lock, 1);
2289 }
2290
2291 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2292                                            struct otp_info *buf, size_t len)
2293 {
2294         size_t retlen;
2295         int ret;
2296
2297         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2298         return ret ? : retlen;
2299 }
2300
2301 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2302                                            struct otp_info *buf, size_t len)
2303 {
2304         size_t retlen;
2305         int ret;
2306
2307         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2308         return ret ? : retlen;
2309 }
2310
2311 #endif
2312
2313 static int cfi_intelext_suspend(struct mtd_info *mtd)
2314 {
2315         struct map_info *map = mtd->priv;
2316         struct cfi_private *cfi = map->fldrv_priv;
2317         int i;
2318         struct flchip *chip;
2319         int ret = 0;
2320
2321         for (i=0; !ret && i<cfi->numchips; i++) {
2322                 chip = &cfi->chips[i];
2323
2324                 spin_lock(chip->mutex);
2325
2326                 switch (chip->state) {
2327                 case FL_READY:
2328                 case FL_STATUS:
2329                 case FL_CFI_QUERY:
2330                 case FL_JEDEC_QUERY:
2331                         if (chip->oldstate == FL_READY) {
2332                                 chip->oldstate = chip->state;
2333                                 chip->state = FL_PM_SUSPENDED;
2334                                 /* No need to wake_up() on this state change -
2335                                  * as the whole point is that nobody can do anything
2336                                  * with the chip now anyway.
2337                                  */
2338                         } else {
2339                                 /* There seems to be an operation pending. We must wait for it. */
2340                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2341                                 ret = -EAGAIN;
2342                         }
2343                         break;
2344                 default:
2345                         /* Should we actually wait? Once upon a time these routines weren't
2346                            allowed to. Or should we return -EAGAIN, because the upper layers
2347                            ought to have already shut down anything which was using the device
2348                            anyway? The latter for now. */
2349                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2350                         ret = -EAGAIN;
2351                 case FL_PM_SUSPENDED:
2352                         break;
2353                 }
2354                 spin_unlock(chip->mutex);
2355         }
2356
2357         /* Unlock the chips again */
2358
2359         if (ret) {
2360                 for (i--; i >=0; i--) {
2361                         chip = &cfi->chips[i];
2362
2363                         spin_lock(chip->mutex);
2364
2365                         if (chip->state == FL_PM_SUSPENDED) {
2366                                 /* No need to force it into a known state here,
2367                                    because we're returning failure, and it didn't
2368                                    get power cycled */
2369                                 chip->state = chip->oldstate;
2370                                 chip->oldstate = FL_READY;
2371                                 wake_up(&chip->wq);
2372                         }
2373                         spin_unlock(chip->mutex);
2374                 }
2375         }
2376
2377         return ret;
2378 }
2379
2380 static void cfi_intelext_resume(struct mtd_info *mtd)
2381 {
2382         struct map_info *map = mtd->priv;
2383         struct cfi_private *cfi = map->fldrv_priv;
2384         int i;
2385         struct flchip *chip;
2386
2387         for (i=0; i<cfi->numchips; i++) {
2388
2389                 chip = &cfi->chips[i];
2390
2391                 spin_lock(chip->mutex);
2392
2393                 /* Go to known state. Chip may have been power cycled */
2394                 if (chip->state == FL_PM_SUSPENDED) {
2395                         map_write(map, CMD(0xFF), cfi->chips[i].start);
2396                         chip->oldstate = chip->state = FL_READY;
2397                         wake_up(&chip->wq);
2398                 }
2399
2400                 spin_unlock(chip->mutex);
2401         }
2402 }
2403
2404 static int cfi_intelext_reset(struct mtd_info *mtd)
2405 {
2406         struct map_info *map = mtd->priv;
2407         struct cfi_private *cfi = map->fldrv_priv;
2408         int i, ret;
2409
2410         for (i=0; i < cfi->numchips; i++) {
2411                 struct flchip *chip = &cfi->chips[i];
2412
2413                 /* force the completion of any ongoing operation
2414                    and switch to array mode so any bootloader in
2415                    flash is accessible for soft reboot. */
2416                 spin_lock(chip->mutex);
2417                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2418                 if (!ret) {
2419                         map_write(map, CMD(0xff), chip->start);
2420                         chip->state = FL_READY;
2421                 }
2422                 spin_unlock(chip->mutex);
2423         }
2424
2425         return 0;
2426 }
2427
2428 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2429                                void *v)
2430 {
2431         struct mtd_info *mtd;
2432
2433         mtd = container_of(nb, struct mtd_info, reboot_notifier);
2434         cfi_intelext_reset(mtd);
2435         return NOTIFY_DONE;
2436 }
2437
2438 static void cfi_intelext_destroy(struct mtd_info *mtd)
2439 {
2440         struct map_info *map = mtd->priv;
2441         struct cfi_private *cfi = map->fldrv_priv;
2442         cfi_intelext_reset(mtd);
2443         unregister_reboot_notifier(&mtd->reboot_notifier);
2444         kfree(cfi->cmdset_priv);
2445         kfree(cfi->cfiq);
2446         kfree(cfi->chips[0].priv);
2447         kfree(cfi);
2448         kfree(mtd->eraseregions);
2449 }
2450
2451 MODULE_LICENSE("GPL");
2452 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2453 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2454 MODULE_ALIAS("cfi_cmdset_0003");
2455 MODULE_ALIAS("cfi_cmdset_0200");