308855e80f66eb7ca147456d417303a9fc6c0b7e
[pandora-kernel.git] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * $Id: cfi_cmdset_0001.c,v 1.181 2005/08/06 04:16:48 nico Exp $
8  *
9  * 
10  * 10/10/2000   Nicolas Pitre <nico@cam.org>
11  *      - completely revamped method functions so they are aware and
12  *        independent of the flash geometry (buswidth, interleave, etc.)
13  *      - scalability vs code size is completely set at compile-time
14  *        (see include/linux/mtd/cfi.h for selection)
15  *      - optimized write buffer method
16  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17  *      - reworked lock/unlock/erase support for var size flash
18  */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/mtd/xip.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/compatmac.h>
37 #include <linux/mtd/cfi.h>
38
39 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
40 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41
42 // debugging, turns off buffer write mode if set to 1
43 #define FORCE_WORD_WRITE 0
44
45 #define MANUFACTURER_INTEL      0x0089
46 #define I82802AB        0x00ad
47 #define I82802AC        0x00ac
48 #define MANUFACTURER_ST         0x0020
49 #define M50LPW080       0x002F
50
51 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
52 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
55 static void cfi_intelext_sync (struct mtd_info *);
56 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
57 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
58 #ifdef CONFIG_MTD_OTP
59 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
60 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
61 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
63 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
64                                             struct otp_info *, size_t);
65 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
66                                             struct otp_info *, size_t);
67 #endif
68 static int cfi_intelext_suspend (struct mtd_info *);
69 static void cfi_intelext_resume (struct mtd_info *);
70 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
71
72 static void cfi_intelext_destroy(struct mtd_info *);
73
74 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
75
76 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
77 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
78
79 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
80                      size_t *retlen, u_char **mtdbuf);
81 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
82                         size_t len);
83
84 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
85 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
86 #include "fwh_lock.h"
87
88
89
90 /*
91  *  *********** SETUP AND PROBE BITS  ***********
92  */
93
94 static struct mtd_chip_driver cfi_intelext_chipdrv = {
95         .probe          = NULL, /* Not usable directly */
96         .destroy        = cfi_intelext_destroy,
97         .name           = "cfi_cmdset_0001",
98         .module         = THIS_MODULE
99 };
100
101 /* #define DEBUG_LOCK_BITS */
102 /* #define DEBUG_CFI_FEATURES */
103
104 #ifdef DEBUG_CFI_FEATURES
105 static void cfi_tell_features(struct cfi_pri_intelext *extp)
106 {
107         int i;
108         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
109         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
110         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
111         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
112         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
113         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
114         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
115         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
116         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
117         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
118         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
119         for (i=10; i<32; i++) {
120                 if (extp->FeatureSupport & (1<<i)) 
121                         printk("     - Unknown Bit %X:      supported\n", i);
122         }
123         
124         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
125         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
126         for (i=1; i<8; i++) {
127                 if (extp->SuspendCmdSupport & (1<<i))
128                         printk("     - Unknown Bit %X:               supported\n", i);
129         }
130         
131         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
132         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
133         printk("     - Valid Bit Active:     %s\n", extp->BlkStatusRegMask&2?"yes":"no");
134         for (i=2; i<16; i++) {
135                 if (extp->BlkStatusRegMask & (1<<i))
136                         printk("     - Unknown Bit %X Active: yes\n",i);
137         }
138         
139         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n", 
140                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
141         if (extp->VppOptimal)
142                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n", 
143                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
144 }
145 #endif
146
147 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
148 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */ 
149 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
150 {
151         struct map_info *map = mtd->priv;
152         struct cfi_private *cfi = map->fldrv_priv;
153         struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
154
155         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
156                             "erase on write disabled.\n");
157         extp->SuspendCmdSupport &= ~1;
158 }
159 #endif
160
161 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
162 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
163 {
164         struct map_info *map = mtd->priv;
165         struct cfi_private *cfi = map->fldrv_priv;
166         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
167
168         if (cfip && (cfip->FeatureSupport&4)) {
169                 cfip->FeatureSupport &= ~4;
170                 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
171         }
172 }
173 #endif
174
175 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
176 {
177         struct map_info *map = mtd->priv;
178         struct cfi_private *cfi = map->fldrv_priv;
179         
180         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
181         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
182 }
183
184 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
185 {
186         struct map_info *map = mtd->priv;
187         struct cfi_private *cfi = map->fldrv_priv;
188         
189         /* Note this is done after the region info is endian swapped */
190         cfi->cfiq->EraseRegionInfo[1] =
191                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
192 };
193
194 static void fixup_use_point(struct mtd_info *mtd, void *param)
195 {
196         struct map_info *map = mtd->priv;
197         if (!mtd->point && map_is_linear(map)) {
198                 mtd->point   = cfi_intelext_point;
199                 mtd->unpoint = cfi_intelext_unpoint;
200         }
201 }
202
203 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
204 {
205         struct map_info *map = mtd->priv;
206         struct cfi_private *cfi = map->fldrv_priv;
207         if (cfi->cfiq->BufWriteTimeoutTyp) {
208                 printk(KERN_INFO "Using buffer write method\n" );
209                 mtd->write = cfi_intelext_write_buffers;
210         }
211 }
212
213 static struct cfi_fixup cfi_fixup_table[] = {
214 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
215         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL }, 
216 #endif
217 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
218         { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
219 #endif
220 #if !FORCE_WORD_WRITE
221         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
222 #endif
223         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
224         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
225         { 0, 0, NULL, NULL }
226 };
227
228 static struct cfi_fixup jedec_fixup_table[] = {
229         { MANUFACTURER_INTEL, I82802AB,   fixup_use_fwh_lock, NULL, },
230         { MANUFACTURER_INTEL, I82802AC,   fixup_use_fwh_lock, NULL, },
231         { MANUFACTURER_ST,    M50LPW080,  fixup_use_fwh_lock, NULL, },
232         { 0, 0, NULL, NULL }
233 };
234 static struct cfi_fixup fixup_table[] = {
235         /* The CFI vendor ids and the JEDEC vendor IDs appear
236          * to be common.  It is like the devices id's are as
237          * well.  This table is to pick all cases where
238          * we know that is the case.
239          */
240         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
241         { 0, 0, NULL, NULL }
242 };
243
244 static inline struct cfi_pri_intelext *
245 read_pri_intelext(struct map_info *map, __u16 adr)
246 {
247         struct cfi_pri_intelext *extp;
248         unsigned int extp_size = sizeof(*extp);
249
250  again:
251         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
252         if (!extp)
253                 return NULL;
254
255         if (extp->MajorVersion != '1' ||
256             (extp->MinorVersion < '0' || extp->MinorVersion > '3')) {
257                 printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
258                        "version %c.%c.\n",  extp->MajorVersion,
259                        extp->MinorVersion);
260                 kfree(extp);
261                 return NULL;
262         }
263
264         /* Do some byteswapping if necessary */
265         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
266         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
267         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
268
269         if (extp->MajorVersion == '1' && extp->MinorVersion == '3') {
270                 unsigned int extra_size = 0;
271                 int nb_parts, i;
272
273                 /* Protection Register info */
274                 extra_size += (extp->NumProtectionFields - 1) *
275                               sizeof(struct cfi_intelext_otpinfo);
276
277                 /* Burst Read info */
278                 extra_size += 6;
279
280                 /* Number of hardware-partitions */
281                 extra_size += 1;
282                 if (extp_size < sizeof(*extp) + extra_size)
283                         goto need_more;
284                 nb_parts = extp->extra[extra_size - 1];
285
286                 for (i = 0; i < nb_parts; i++) {
287                         struct cfi_intelext_regioninfo *rinfo;
288                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
289                         extra_size += sizeof(*rinfo);
290                         if (extp_size < sizeof(*extp) + extra_size)
291                                 goto need_more;
292                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
293                         extra_size += (rinfo->NumBlockTypes - 1)
294                                       * sizeof(struct cfi_intelext_blockinfo);
295                 }
296
297                 if (extp_size < sizeof(*extp) + extra_size) {
298                         need_more:
299                         extp_size = sizeof(*extp) + extra_size;
300                         kfree(extp);
301                         if (extp_size > 4096) {
302                                 printk(KERN_ERR
303                                         "%s: cfi_pri_intelext is too fat\n",
304                                         __FUNCTION__);
305                                 return NULL;
306                         }
307                         goto again;
308                 }
309         }
310                 
311         return extp;
312 }
313
314 /* This routine is made available to other mtd code via
315  * inter_module_register.  It must only be accessed through
316  * inter_module_get which will bump the use count of this module.  The
317  * addresses passed back in cfi are valid as long as the use count of
318  * this module is non-zero, i.e. between inter_module_get and
319  * inter_module_put.  Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
320  */
321 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
322 {
323         struct cfi_private *cfi = map->fldrv_priv;
324         struct mtd_info *mtd;
325         int i;
326
327         mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
328         if (!mtd) {
329                 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
330                 return NULL;
331         }
332         memset(mtd, 0, sizeof(*mtd));
333         mtd->priv = map;
334         mtd->type = MTD_NORFLASH;
335
336         /* Fill in the default mtd operations */
337         mtd->erase   = cfi_intelext_erase_varsize;
338         mtd->read    = cfi_intelext_read;
339         mtd->write   = cfi_intelext_write_words;
340         mtd->sync    = cfi_intelext_sync;
341         mtd->lock    = cfi_intelext_lock;
342         mtd->unlock  = cfi_intelext_unlock;
343         mtd->suspend = cfi_intelext_suspend;
344         mtd->resume  = cfi_intelext_resume;
345         mtd->flags   = MTD_CAP_NORFLASH;
346         mtd->name    = map->name;
347
348         mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
349
350         if (cfi->cfi_mode == CFI_MODE_CFI) {
351                 /* 
352                  * It's a real CFI chip, not one for which the probe
353                  * routine faked a CFI structure. So we read the feature
354                  * table from it.
355                  */
356                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
357                 struct cfi_pri_intelext *extp;
358
359                 extp = read_pri_intelext(map, adr);
360                 if (!extp) {
361                         kfree(mtd);
362                         return NULL;
363                 }
364
365                 /* Install our own private info structure */
366                 cfi->cmdset_priv = extp;        
367
368                 cfi_fixup(mtd, cfi_fixup_table);
369
370 #ifdef DEBUG_CFI_FEATURES
371                 /* Tell the user about it in lots of lovely detail */
372                 cfi_tell_features(extp);
373 #endif  
374
375                 if(extp->SuspendCmdSupport & 1) {
376                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
377                 }
378         }
379         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
380                 /* Apply jedec specific fixups */
381                 cfi_fixup(mtd, jedec_fixup_table);
382         }
383         /* Apply generic fixups */
384         cfi_fixup(mtd, fixup_table);
385
386         for (i=0; i< cfi->numchips; i++) {
387                 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
388                 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
389                 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
390                 cfi->chips[i].ref_point_counter = 0;
391         }               
392
393         map->fldrv = &cfi_intelext_chipdrv;
394         
395         return cfi_intelext_setup(mtd);
396 }
397
398 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
399 {
400         struct map_info *map = mtd->priv;
401         struct cfi_private *cfi = map->fldrv_priv;
402         unsigned long offset = 0;
403         int i,j;
404         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
405
406         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
407
408         mtd->size = devsize * cfi->numchips;
409
410         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
411         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) 
412                         * mtd->numeraseregions, GFP_KERNEL);
413         if (!mtd->eraseregions) { 
414                 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
415                 goto setup_err;
416         }
417         
418         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
419                 unsigned long ernum, ersize;
420                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
421                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
422
423                 if (mtd->erasesize < ersize) {
424                         mtd->erasesize = ersize;
425                 }
426                 for (j=0; j<cfi->numchips; j++) {
427                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
428                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
429                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
430                 }
431                 offset += (ersize * ernum);
432         }
433
434         if (offset != devsize) {
435                 /* Argh */
436                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
437                 goto setup_err;
438         }
439
440         for (i=0; i<mtd->numeraseregions;i++){
441                 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
442                        i,mtd->eraseregions[i].offset,
443                        mtd->eraseregions[i].erasesize,
444                        mtd->eraseregions[i].numblocks);
445         }
446
447 #ifdef CONFIG_MTD_OTP
448         mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
449         mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
450         mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
451         mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
452         mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
453         mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
454 #endif
455
456         /* This function has the potential to distort the reality
457            a bit and therefore should be called last. */
458         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
459                 goto setup_err;
460
461         __module_get(THIS_MODULE);
462         register_reboot_notifier(&mtd->reboot_notifier);
463         return mtd;
464
465  setup_err:
466         if(mtd) {
467                 if(mtd->eraseregions)
468                         kfree(mtd->eraseregions);
469                 kfree(mtd);
470         }
471         kfree(cfi->cmdset_priv);
472         return NULL;
473 }
474
475 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
476                                         struct cfi_private **pcfi)
477 {
478         struct map_info *map = mtd->priv;
479         struct cfi_private *cfi = *pcfi;
480         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
481
482         /*
483          * Probing of multi-partition flash ships.
484          *
485          * To support multiple partitions when available, we simply arrange
486          * for each of them to have their own flchip structure even if they
487          * are on the same physical chip.  This means completely recreating
488          * a new cfi_private structure right here which is a blatent code
489          * layering violation, but this is still the least intrusive
490          * arrangement at this point. This can be rearranged in the future
491          * if someone feels motivated enough.  --nico
492          */
493         if (extp && extp->MajorVersion == '1' && extp->MinorVersion == '3'
494             && extp->FeatureSupport & (1 << 9)) {
495                 struct cfi_private *newcfi;
496                 struct flchip *chip;
497                 struct flchip_shared *shared;
498                 int offs, numregions, numparts, partshift, numvirtchips, i, j;
499
500                 /* Protection Register info */
501                 offs = (extp->NumProtectionFields - 1) *
502                        sizeof(struct cfi_intelext_otpinfo);
503
504                 /* Burst Read info */
505                 offs += 6;
506
507                 /* Number of partition regions */
508                 numregions = extp->extra[offs];
509                 offs += 1;
510
511                 /* Number of hardware partitions */
512                 numparts = 0;
513                 for (i = 0; i < numregions; i++) {
514                         struct cfi_intelext_regioninfo *rinfo;
515                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
516                         numparts += rinfo->NumIdentPartitions;
517                         offs += sizeof(*rinfo)
518                                 + (rinfo->NumBlockTypes - 1) *
519                                   sizeof(struct cfi_intelext_blockinfo);
520                 }
521
522                 /*
523                  * All functions below currently rely on all chips having
524                  * the same geometry so we'll just assume that all hardware
525                  * partitions are of the same size too.
526                  */
527                 partshift = cfi->chipshift - __ffs(numparts);
528
529                 if ((1 << partshift) < mtd->erasesize) {
530                         printk( KERN_ERR
531                                 "%s: bad number of hw partitions (%d)\n",
532                                 __FUNCTION__, numparts);
533                         return -EINVAL;
534                 }
535
536                 numvirtchips = cfi->numchips * numparts;
537                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
538                 if (!newcfi)
539                         return -ENOMEM;
540                 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
541                 if (!shared) {
542                         kfree(newcfi);
543                         return -ENOMEM;
544                 }
545                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
546                 newcfi->numchips = numvirtchips;
547                 newcfi->chipshift = partshift;
548
549                 chip = &newcfi->chips[0];
550                 for (i = 0; i < cfi->numchips; i++) {
551                         shared[i].writing = shared[i].erasing = NULL;
552                         spin_lock_init(&shared[i].lock);
553                         for (j = 0; j < numparts; j++) {
554                                 *chip = cfi->chips[i];
555                                 chip->start += j << partshift;
556                                 chip->priv = &shared[i];
557                                 /* those should be reset too since
558                                    they create memory references. */
559                                 init_waitqueue_head(&chip->wq);
560                                 spin_lock_init(&chip->_spinlock);
561                                 chip->mutex = &chip->_spinlock;
562                                 chip++;
563                         }
564                 }
565
566                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
567                                   "--> %d partitions of %d KiB\n",
568                                   map->name, cfi->numchips, cfi->interleave,
569                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
570
571                 map->fldrv_priv = newcfi;
572                 *pcfi = newcfi;
573                 kfree(cfi);
574         }
575
576         return 0;
577 }
578
579 /*
580  *  *********** CHIP ACCESS FUNCTIONS ***********
581  */
582
583 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
584 {
585         DECLARE_WAITQUEUE(wait, current);
586         struct cfi_private *cfi = map->fldrv_priv;
587         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
588         unsigned long timeo;
589         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
590
591  resettime:
592         timeo = jiffies + HZ;
593  retry:
594         if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
595                 /*
596                  * OK. We have possibility for contension on the write/erase
597                  * operations which are global to the real chip and not per
598                  * partition.  So let's fight it over in the partition which
599                  * currently has authority on the operation.
600                  *
601                  * The rules are as follows:
602                  *
603                  * - any write operation must own shared->writing.
604                  *
605                  * - any erase operation must own _both_ shared->writing and
606                  *   shared->erasing.
607                  *
608                  * - contension arbitration is handled in the owner's context.
609                  *
610                  * The 'shared' struct can be read when its lock is taken.
611                  * However any writes to it can only be made when the current
612                  * owner's lock is also held.
613                  */
614                 struct flchip_shared *shared = chip->priv;
615                 struct flchip *contender;
616                 spin_lock(&shared->lock);
617                 contender = shared->writing;
618                 if (contender && contender != chip) {
619                         /*
620                          * The engine to perform desired operation on this
621                          * partition is already in use by someone else.
622                          * Let's fight over it in the context of the chip
623                          * currently using it.  If it is possible to suspend,
624                          * that other partition will do just that, otherwise
625                          * it'll happily send us to sleep.  In any case, when
626                          * get_chip returns success we're clear to go ahead.
627                          */
628                         int ret = spin_trylock(contender->mutex);
629                         spin_unlock(&shared->lock);
630                         if (!ret)
631                                 goto retry;
632                         spin_unlock(chip->mutex);
633                         ret = get_chip(map, contender, contender->start, mode);
634                         spin_lock(chip->mutex);
635                         if (ret) {
636                                 spin_unlock(contender->mutex);
637                                 return ret;
638                         }
639                         timeo = jiffies + HZ;
640                         spin_lock(&shared->lock);
641                 }
642
643                 /* We now own it */
644                 shared->writing = chip;
645                 if (mode == FL_ERASING)
646                         shared->erasing = chip;
647                 if (contender && contender != chip)
648                         spin_unlock(contender->mutex);
649                 spin_unlock(&shared->lock);
650         }
651
652         switch (chip->state) {
653
654         case FL_STATUS:
655                 for (;;) {
656                         status = map_read(map, adr);
657                         if (map_word_andequal(map, status, status_OK, status_OK))
658                                 break;
659
660                         /* At this point we're fine with write operations
661                            in other partitions as they don't conflict. */
662                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
663                                 break;
664
665                         if (time_after(jiffies, timeo)) {
666                                 printk(KERN_ERR "%s: Waiting for chip to be ready timed out. Status %lx\n", 
667                                        map->name, status.x[0]);
668                                 return -EIO;
669                         }
670                         spin_unlock(chip->mutex);
671                         cfi_udelay(1);
672                         spin_lock(chip->mutex);
673                         /* Someone else might have been playing with it. */
674                         goto retry;
675                 }
676                                 
677         case FL_READY:
678         case FL_CFI_QUERY:
679         case FL_JEDEC_QUERY:
680                 return 0;
681
682         case FL_ERASING:
683                 if (!cfip ||
684                     !(cfip->FeatureSupport & 2) ||
685                     !(mode == FL_READY || mode == FL_POINT ||
686                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
687                         goto sleep;
688
689
690                 /* Erase suspend */
691                 map_write(map, CMD(0xB0), adr);
692
693                 /* If the flash has finished erasing, then 'erase suspend'
694                  * appears to make some (28F320) flash devices switch to
695                  * 'read' mode.  Make sure that we switch to 'read status'
696                  * mode so we get the right data. --rmk
697                  */
698                 map_write(map, CMD(0x70), adr);
699                 chip->oldstate = FL_ERASING;
700                 chip->state = FL_ERASE_SUSPENDING;
701                 chip->erase_suspended = 1;
702                 for (;;) {
703                         status = map_read(map, adr);
704                         if (map_word_andequal(map, status, status_OK, status_OK))
705                                 break;
706
707                         if (time_after(jiffies, timeo)) {
708                                 /* Urgh. Resume and pretend we weren't here.  */
709                                 map_write(map, CMD(0xd0), adr);
710                                 /* Make sure we're in 'read status' mode if it had finished */
711                                 map_write(map, CMD(0x70), adr);
712                                 chip->state = FL_ERASING;
713                                 chip->oldstate = FL_READY;
714                                 printk(KERN_ERR "%s: Chip not ready after erase "
715                                        "suspended: status = 0x%lx\n", map->name, status.x[0]);
716                                 return -EIO;
717                         }
718
719                         spin_unlock(chip->mutex);
720                         cfi_udelay(1);
721                         spin_lock(chip->mutex);
722                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
723                            So we can just loop here. */
724                 }
725                 chip->state = FL_STATUS;
726                 return 0;
727
728         case FL_XIP_WHILE_ERASING:
729                 if (mode != FL_READY && mode != FL_POINT &&
730                     (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
731                         goto sleep;
732                 chip->oldstate = chip->state;
733                 chip->state = FL_READY;
734                 return 0;
735
736         case FL_POINT:
737                 /* Only if there's no operation suspended... */
738                 if (mode == FL_READY && chip->oldstate == FL_READY)
739                         return 0;
740
741         default:
742         sleep:
743                 set_current_state(TASK_UNINTERRUPTIBLE);
744                 add_wait_queue(&chip->wq, &wait);
745                 spin_unlock(chip->mutex);
746                 schedule();
747                 remove_wait_queue(&chip->wq, &wait);
748                 spin_lock(chip->mutex);
749                 goto resettime;
750         }
751 }
752
753 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
754 {
755         struct cfi_private *cfi = map->fldrv_priv;
756
757         if (chip->priv) {
758                 struct flchip_shared *shared = chip->priv;
759                 spin_lock(&shared->lock);
760                 if (shared->writing == chip && chip->oldstate == FL_READY) {
761                         /* We own the ability to write, but we're done */
762                         shared->writing = shared->erasing;
763                         if (shared->writing && shared->writing != chip) {
764                                 /* give back ownership to who we loaned it from */
765                                 struct flchip *loaner = shared->writing;
766                                 spin_lock(loaner->mutex);
767                                 spin_unlock(&shared->lock);
768                                 spin_unlock(chip->mutex);
769                                 put_chip(map, loaner, loaner->start);
770                                 spin_lock(chip->mutex);
771                                 spin_unlock(loaner->mutex);
772                                 wake_up(&chip->wq);
773                                 return;
774                         }
775                         shared->erasing = NULL;
776                         shared->writing = NULL;
777                 } else if (shared->erasing == chip && shared->writing != chip) {
778                         /*
779                          * We own the ability to erase without the ability
780                          * to write, which means the erase was suspended
781                          * and some other partition is currently writing.
782                          * Don't let the switch below mess things up since
783                          * we don't have ownership to resume anything.
784                          */
785                         spin_unlock(&shared->lock);
786                         wake_up(&chip->wq);
787                         return;
788                 }
789                 spin_unlock(&shared->lock);
790         }
791
792         switch(chip->oldstate) {
793         case FL_ERASING:
794                 chip->state = chip->oldstate;
795                 /* What if one interleaved chip has finished and the 
796                    other hasn't? The old code would leave the finished
797                    one in READY mode. That's bad, and caused -EROFS 
798                    errors to be returned from do_erase_oneblock because
799                    that's the only bit it checked for at the time.
800                    As the state machine appears to explicitly allow 
801                    sending the 0x70 (Read Status) command to an erasing
802                    chip and expecting it to be ignored, that's what we 
803                    do. */
804                 map_write(map, CMD(0xd0), adr);
805                 map_write(map, CMD(0x70), adr);
806                 chip->oldstate = FL_READY;
807                 chip->state = FL_ERASING;
808                 break;
809
810         case FL_XIP_WHILE_ERASING:
811                 chip->state = chip->oldstate;
812                 chip->oldstate = FL_READY;
813                 break;
814
815         case FL_READY:
816         case FL_STATUS:
817         case FL_JEDEC_QUERY:
818                 /* We should really make set_vpp() count, rather than doing this */
819                 DISABLE_VPP(map);
820                 break;
821         default:
822                 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
823         }
824         wake_up(&chip->wq);
825 }
826
827 #ifdef CONFIG_MTD_XIP
828
829 /*
830  * No interrupt what so ever can be serviced while the flash isn't in array
831  * mode.  This is ensured by the xip_disable() and xip_enable() functions
832  * enclosing any code path where the flash is known not to be in array mode.
833  * And within a XIP disabled code path, only functions marked with __xipram
834  * may be called and nothing else (it's a good thing to inspect generated
835  * assembly to make sure inline functions were actually inlined and that gcc
836  * didn't emit calls to its own support functions). Also configuring MTD CFI
837  * support to a single buswidth and a single interleave is also recommended.
838  */
839
840 static void xip_disable(struct map_info *map, struct flchip *chip,
841                         unsigned long adr)
842 {
843         /* TODO: chips with no XIP use should ignore and return */
844         (void) map_read(map, adr); /* ensure mmu mapping is up to date */
845         local_irq_disable();
846 }
847
848 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
849                                 unsigned long adr)
850 {
851         struct cfi_private *cfi = map->fldrv_priv;
852         if (chip->state != FL_POINT && chip->state != FL_READY) {
853                 map_write(map, CMD(0xff), adr);
854                 chip->state = FL_READY;
855         }
856         (void) map_read(map, adr);
857         xip_iprefetch();
858         local_irq_enable();
859 }
860
861 /*
862  * When a delay is required for the flash operation to complete, the
863  * xip_udelay() function is polling for both the given timeout and pending
864  * (but still masked) hardware interrupts.  Whenever there is an interrupt
865  * pending then the flash erase or write operation is suspended, array mode
866  * restored and interrupts unmasked.  Task scheduling might also happen at that
867  * point.  The CPU eventually returns from the interrupt or the call to
868  * schedule() and the suspended flash operation is resumed for the remaining
869  * of the delay period.
870  *
871  * Warning: this function _will_ fool interrupt latency tracing tools.
872  */
873
874 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
875                                 unsigned long adr, int usec)
876 {
877         struct cfi_private *cfi = map->fldrv_priv;
878         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
879         map_word status, OK = CMD(0x80);
880         unsigned long suspended, start = xip_currtime();
881         flstate_t oldstate, newstate;
882
883         do {
884                 cpu_relax();
885                 if (xip_irqpending() && cfip &&
886                     ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
887                      (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
888                     (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
889                         /*
890                          * Let's suspend the erase or write operation when
891                          * supported.  Note that we currently don't try to
892                          * suspend interleaved chips if there is already
893                          * another operation suspended (imagine what happens
894                          * when one chip was already done with the current
895                          * operation while another chip suspended it, then
896                          * we resume the whole thing at once).  Yes, it
897                          * can happen!
898                          */
899                         map_write(map, CMD(0xb0), adr);
900                         map_write(map, CMD(0x70), adr);
901                         usec -= xip_elapsed_since(start);
902                         suspended = xip_currtime();
903                         do {
904                                 if (xip_elapsed_since(suspended) > 100000) {
905                                         /*
906                                          * The chip doesn't want to suspend
907                                          * after waiting for 100 msecs.
908                                          * This is a critical error but there
909                                          * is not much we can do here.
910                                          */
911                                         return;
912                                 }
913                                 status = map_read(map, adr);
914                         } while (!map_word_andequal(map, status, OK, OK));
915
916                         /* Suspend succeeded */
917                         oldstate = chip->state;
918                         if (oldstate == FL_ERASING) {
919                                 if (!map_word_bitsset(map, status, CMD(0x40)))
920                                         break;
921                                 newstate = FL_XIP_WHILE_ERASING;
922                                 chip->erase_suspended = 1;
923                         } else {
924                                 if (!map_word_bitsset(map, status, CMD(0x04)))
925                                         break;
926                                 newstate = FL_XIP_WHILE_WRITING;
927                                 chip->write_suspended = 1;
928                         }
929                         chip->state = newstate;
930                         map_write(map, CMD(0xff), adr);
931                         (void) map_read(map, adr);
932                         asm volatile (".rep 8; nop; .endr");
933                         local_irq_enable();
934                         spin_unlock(chip->mutex);
935                         asm volatile (".rep 8; nop; .endr");
936                         cond_resched();
937
938                         /*
939                          * We're back.  However someone else might have
940                          * decided to go write to the chip if we are in
941                          * a suspended erase state.  If so let's wait
942                          * until it's done.
943                          */
944                         spin_lock(chip->mutex);
945                         while (chip->state != newstate) {
946                                 DECLARE_WAITQUEUE(wait, current);
947                                 set_current_state(TASK_UNINTERRUPTIBLE);
948                                 add_wait_queue(&chip->wq, &wait);
949                                 spin_unlock(chip->mutex);
950                                 schedule();
951                                 remove_wait_queue(&chip->wq, &wait);
952                                 spin_lock(chip->mutex);
953                         }
954                         /* Disallow XIP again */
955                         local_irq_disable();
956
957                         /* Resume the write or erase operation */
958                         map_write(map, CMD(0xd0), adr);
959                         map_write(map, CMD(0x70), adr);
960                         chip->state = oldstate;
961                         start = xip_currtime();
962                 } else if (usec >= 1000000/HZ) {
963                         /*
964                          * Try to save on CPU power when waiting delay
965                          * is at least a system timer tick period.
966                          * No need to be extremely accurate here.
967                          */
968                         xip_cpu_idle();
969                 }
970                 status = map_read(map, adr);
971         } while (!map_word_andequal(map, status, OK, OK)
972                  && xip_elapsed_since(start) < usec);
973 }
974
975 #define UDELAY(map, chip, adr, usec)  xip_udelay(map, chip, adr, usec)
976
977 /*
978  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
979  * the flash is actively programming or erasing since we have to poll for
980  * the operation to complete anyway.  We can't do that in a generic way with
981  * a XIP setup so do it before the actual flash operation in this case
982  * and stub it out from INVALIDATE_CACHE_UDELAY.
983  */
984 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
985         INVALIDATE_CACHED_RANGE(map, from, size)
986
987 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
988         UDELAY(map, chip, adr, usec)
989
990 /*
991  * Extra notes:
992  *
993  * Activating this XIP support changes the way the code works a bit.  For
994  * example the code to suspend the current process when concurrent access
995  * happens is never executed because xip_udelay() will always return with the
996  * same chip state as it was entered with.  This is why there is no care for
997  * the presence of add_wait_queue() or schedule() calls from within a couple
998  * xip_disable()'d  areas of code, like in do_erase_oneblock for example.
999  * The queueing and scheduling are always happening within xip_udelay().
1000  *
1001  * Similarly, get_chip() and put_chip() just happen to always be executed
1002  * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
1003  * is in array mode, therefore never executing many cases therein and not
1004  * causing any problem with XIP.
1005  */
1006
1007 #else
1008
1009 #define xip_disable(map, chip, adr)
1010 #define xip_enable(map, chip, adr)
1011 #define XIP_INVAL_CACHED_RANGE(x...)
1012
1013 #define UDELAY(map, chip, adr, usec)  \
1014 do {  \
1015         spin_unlock(chip->mutex);  \
1016         cfi_udelay(usec);  \
1017         spin_lock(chip->mutex);  \
1018 } while (0)
1019
1020 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
1021 do {  \
1022         spin_unlock(chip->mutex);  \
1023         INVALIDATE_CACHED_RANGE(map, adr, len);  \
1024         cfi_udelay(usec);  \
1025         spin_lock(chip->mutex);  \
1026 } while (0)
1027
1028 #endif
1029
1030 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1031 {
1032         unsigned long cmd_addr;
1033         struct cfi_private *cfi = map->fldrv_priv;
1034         int ret = 0;
1035
1036         adr += chip->start;
1037
1038         /* Ensure cmd read/writes are aligned. */ 
1039         cmd_addr = adr & ~(map_bankwidth(map)-1); 
1040
1041         spin_lock(chip->mutex);
1042
1043         ret = get_chip(map, chip, cmd_addr, FL_POINT);
1044
1045         if (!ret) {
1046                 if (chip->state != FL_POINT && chip->state != FL_READY)
1047                         map_write(map, CMD(0xff), cmd_addr);
1048
1049                 chip->state = FL_POINT;
1050                 chip->ref_point_counter++;
1051         }
1052         spin_unlock(chip->mutex);
1053
1054         return ret;
1055 }
1056
1057 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1058 {
1059         struct map_info *map = mtd->priv;
1060         struct cfi_private *cfi = map->fldrv_priv;
1061         unsigned long ofs;
1062         int chipnum;
1063         int ret = 0;
1064
1065         if (!map->virt || (from + len > mtd->size))
1066                 return -EINVAL;
1067         
1068         *mtdbuf = (void *)map->virt + from;
1069         *retlen = 0;
1070
1071         /* Now lock the chip(s) to POINT state */
1072
1073         /* ofs: offset within the first chip that the first read should start */
1074         chipnum = (from >> cfi->chipshift);
1075         ofs = from - (chipnum << cfi->chipshift);
1076
1077         while (len) {
1078                 unsigned long thislen;
1079
1080                 if (chipnum >= cfi->numchips)
1081                         break;
1082
1083                 if ((len + ofs -1) >> cfi->chipshift)
1084                         thislen = (1<<cfi->chipshift) - ofs;
1085                 else
1086                         thislen = len;
1087
1088                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1089                 if (ret)
1090                         break;
1091
1092                 *retlen += thislen;
1093                 len -= thislen;
1094                 
1095                 ofs = 0;
1096                 chipnum++;
1097         }
1098         return 0;
1099 }
1100
1101 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1102 {
1103         struct map_info *map = mtd->priv;
1104         struct cfi_private *cfi = map->fldrv_priv;
1105         unsigned long ofs;
1106         int chipnum;
1107
1108         /* Now unlock the chip(s) POINT state */
1109
1110         /* ofs: offset within the first chip that the first read should start */
1111         chipnum = (from >> cfi->chipshift);
1112         ofs = from - (chipnum <<  cfi->chipshift);
1113
1114         while (len) {
1115                 unsigned long thislen;
1116                 struct flchip *chip;
1117
1118                 chip = &cfi->chips[chipnum];
1119                 if (chipnum >= cfi->numchips)
1120                         break;
1121
1122                 if ((len + ofs -1) >> cfi->chipshift)
1123                         thislen = (1<<cfi->chipshift) - ofs;
1124                 else
1125                         thislen = len;
1126
1127                 spin_lock(chip->mutex);
1128                 if (chip->state == FL_POINT) {
1129                         chip->ref_point_counter--;
1130                         if(chip->ref_point_counter == 0)
1131                                 chip->state = FL_READY;
1132                 } else
1133                         printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1134
1135                 put_chip(map, chip, chip->start);
1136                 spin_unlock(chip->mutex);
1137
1138                 len -= thislen;
1139                 ofs = 0;
1140                 chipnum++;
1141         }
1142 }
1143
1144 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1145 {
1146         unsigned long cmd_addr;
1147         struct cfi_private *cfi = map->fldrv_priv;
1148         int ret;
1149
1150         adr += chip->start;
1151
1152         /* Ensure cmd read/writes are aligned. */ 
1153         cmd_addr = adr & ~(map_bankwidth(map)-1); 
1154
1155         spin_lock(chip->mutex);
1156         ret = get_chip(map, chip, cmd_addr, FL_READY);
1157         if (ret) {
1158                 spin_unlock(chip->mutex);
1159                 return ret;
1160         }
1161
1162         if (chip->state != FL_POINT && chip->state != FL_READY) {
1163                 map_write(map, CMD(0xff), cmd_addr);
1164
1165                 chip->state = FL_READY;
1166         }
1167
1168         map_copy_from(map, buf, adr, len);
1169
1170         put_chip(map, chip, cmd_addr);
1171
1172         spin_unlock(chip->mutex);
1173         return 0;
1174 }
1175
1176 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1177 {
1178         struct map_info *map = mtd->priv;
1179         struct cfi_private *cfi = map->fldrv_priv;
1180         unsigned long ofs;
1181         int chipnum;
1182         int ret = 0;
1183
1184         /* ofs: offset within the first chip that the first read should start */
1185         chipnum = (from >> cfi->chipshift);
1186         ofs = from - (chipnum <<  cfi->chipshift);
1187
1188         *retlen = 0;
1189
1190         while (len) {
1191                 unsigned long thislen;
1192
1193                 if (chipnum >= cfi->numchips)
1194                         break;
1195
1196                 if ((len + ofs -1) >> cfi->chipshift)
1197                         thislen = (1<<cfi->chipshift) - ofs;
1198                 else
1199                         thislen = len;
1200
1201                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1202                 if (ret)
1203                         break;
1204
1205                 *retlen += thislen;
1206                 len -= thislen;
1207                 buf += thislen;
1208                 
1209                 ofs = 0;
1210                 chipnum++;
1211         }
1212         return ret;
1213 }
1214
1215 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1216                                      unsigned long adr, map_word datum, int mode)
1217 {
1218         struct cfi_private *cfi = map->fldrv_priv;
1219         map_word status, status_OK, write_cmd;
1220         unsigned long timeo;
1221         int z, ret=0;
1222
1223         adr += chip->start;
1224
1225         /* Let's determine this according to the interleave only once */
1226         status_OK = CMD(0x80);
1227         switch (mode) {
1228         case FL_WRITING:   write_cmd = CMD(0x40); break;
1229         case FL_OTP_WRITE: write_cmd = CMD(0xc0); break;
1230         default: return -EINVAL;
1231         }
1232
1233         spin_lock(chip->mutex);
1234         ret = get_chip(map, chip, adr, mode);
1235         if (ret) {
1236                 spin_unlock(chip->mutex);
1237                 return ret;
1238         }
1239
1240         XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1241         ENABLE_VPP(map);
1242         xip_disable(map, chip, adr);
1243         map_write(map, write_cmd, adr);
1244         map_write(map, datum, adr);
1245         chip->state = mode;
1246
1247         INVALIDATE_CACHE_UDELAY(map, chip,
1248                                 adr, map_bankwidth(map),
1249                                 chip->word_write_time);
1250
1251         timeo = jiffies + (HZ/2);
1252         z = 0;
1253         for (;;) {
1254                 if (chip->state != mode) {
1255                         /* Someone's suspended the write. Sleep */
1256                         DECLARE_WAITQUEUE(wait, current);
1257
1258                         set_current_state(TASK_UNINTERRUPTIBLE);
1259                         add_wait_queue(&chip->wq, &wait);
1260                         spin_unlock(chip->mutex);
1261                         schedule();
1262                         remove_wait_queue(&chip->wq, &wait);
1263                         timeo = jiffies + (HZ / 2); /* FIXME */
1264                         spin_lock(chip->mutex);
1265                         continue;
1266                 }
1267
1268                 status = map_read(map, adr);
1269                 if (map_word_andequal(map, status, status_OK, status_OK))
1270                         break;
1271                 
1272                 /* OK Still waiting */
1273                 if (time_after(jiffies, timeo)) {
1274                         map_write(map, CMD(0x70), adr);
1275                         chip->state = FL_STATUS;
1276                         xip_enable(map, chip, adr);
1277                         printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1278                         ret = -EIO;
1279                         goto out;
1280                 }
1281
1282                 /* Latency issues. Drop the lock, wait a while and retry */
1283                 z++;
1284                 UDELAY(map, chip, adr, 1);
1285         }
1286         if (!z) {
1287                 chip->word_write_time--;
1288                 if (!chip->word_write_time)
1289                         chip->word_write_time = 1;
1290         }
1291         if (z > 1) 
1292                 chip->word_write_time++;
1293
1294         /* Done and happy. */
1295         chip->state = FL_STATUS;
1296
1297         /* check for errors */
1298         if (map_word_bitsset(map, status, CMD(0x1a))) {
1299                 unsigned long chipstatus = MERGESTATUS(status);
1300
1301                 /* reset status */
1302                 map_write(map, CMD(0x50), adr);
1303                 map_write(map, CMD(0x70), adr);
1304                 xip_enable(map, chip, adr);
1305
1306                 if (chipstatus & 0x02) {
1307                         ret = -EROFS;
1308                 } else if (chipstatus & 0x08) {
1309                         printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1310                         ret = -EIO;
1311                 } else {
1312                         printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1313                         ret = -EINVAL;
1314                 }
1315
1316                 goto out;
1317         }
1318
1319         xip_enable(map, chip, adr);
1320  out:   put_chip(map, chip, adr);
1321         spin_unlock(chip->mutex);
1322         return ret;
1323 }
1324
1325
1326 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1327 {
1328         struct map_info *map = mtd->priv;
1329         struct cfi_private *cfi = map->fldrv_priv;
1330         int ret = 0;
1331         int chipnum;
1332         unsigned long ofs;
1333
1334         *retlen = 0;
1335         if (!len)
1336                 return 0;
1337
1338         chipnum = to >> cfi->chipshift;
1339         ofs = to  - (chipnum << cfi->chipshift);
1340
1341         /* If it's not bus-aligned, do the first byte write */
1342         if (ofs & (map_bankwidth(map)-1)) {
1343                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1344                 int gap = ofs - bus_ofs;
1345                 int n;
1346                 map_word datum;
1347
1348                 n = min_t(int, len, map_bankwidth(map)-gap);
1349                 datum = map_word_ff(map);
1350                 datum = map_word_load_partial(map, datum, buf, gap, n);
1351
1352                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1353                                                bus_ofs, datum, FL_WRITING);
1354                 if (ret) 
1355                         return ret;
1356
1357                 len -= n;
1358                 ofs += n;
1359                 buf += n;
1360                 (*retlen) += n;
1361
1362                 if (ofs >> cfi->chipshift) {
1363                         chipnum ++; 
1364                         ofs = 0;
1365                         if (chipnum == cfi->numchips)
1366                                 return 0;
1367                 }
1368         }
1369         
1370         while(len >= map_bankwidth(map)) {
1371                 map_word datum = map_word_load(map, buf);
1372
1373                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1374                                        ofs, datum, FL_WRITING);
1375                 if (ret)
1376                         return ret;
1377
1378                 ofs += map_bankwidth(map);
1379                 buf += map_bankwidth(map);
1380                 (*retlen) += map_bankwidth(map);
1381                 len -= map_bankwidth(map);
1382
1383                 if (ofs >> cfi->chipshift) {
1384                         chipnum ++; 
1385                         ofs = 0;
1386                         if (chipnum == cfi->numchips)
1387                                 return 0;
1388                 }
1389         }
1390
1391         if (len & (map_bankwidth(map)-1)) {
1392                 map_word datum;
1393
1394                 datum = map_word_ff(map);
1395                 datum = map_word_load_partial(map, datum, buf, 0, len);
1396
1397                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1398                                        ofs, datum, FL_WRITING);
1399                 if (ret) 
1400                         return ret;
1401                 
1402                 (*retlen) += len;
1403         }
1404
1405         return 0;
1406 }
1407
1408
1409 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, 
1410                                     unsigned long adr, const u_char *buf, int len)
1411 {
1412         struct cfi_private *cfi = map->fldrv_priv;
1413         map_word status, status_OK;
1414         unsigned long cmd_adr, timeo;
1415         int wbufsize, z, ret=0, bytes, words;
1416
1417         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1418         adr += chip->start;
1419         cmd_adr = adr & ~(wbufsize-1);
1420         
1421         /* Let's determine this according to the interleave only once */
1422         status_OK = CMD(0x80);
1423
1424         spin_lock(chip->mutex);
1425         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1426         if (ret) {
1427                 spin_unlock(chip->mutex);
1428                 return ret;
1429         }
1430
1431         XIP_INVAL_CACHED_RANGE(map, adr, len);
1432         ENABLE_VPP(map);
1433         xip_disable(map, chip, cmd_adr);
1434
1435         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1436            [...], the device will not accept any more Write to Buffer commands". 
1437            So we must check here and reset those bits if they're set. Otherwise
1438            we're just pissing in the wind */
1439         if (chip->state != FL_STATUS)
1440                 map_write(map, CMD(0x70), cmd_adr);
1441         status = map_read(map, cmd_adr);
1442         if (map_word_bitsset(map, status, CMD(0x30))) {
1443                 xip_enable(map, chip, cmd_adr);
1444                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1445                 xip_disable(map, chip, cmd_adr);
1446                 map_write(map, CMD(0x50), cmd_adr);
1447                 map_write(map, CMD(0x70), cmd_adr);
1448         }
1449
1450         chip->state = FL_WRITING_TO_BUFFER;
1451
1452         z = 0;
1453         for (;;) {
1454                 map_write(map, CMD(0xe8), cmd_adr);
1455
1456                 status = map_read(map, cmd_adr);
1457                 if (map_word_andequal(map, status, status_OK, status_OK))
1458                         break;
1459
1460                 UDELAY(map, chip, cmd_adr, 1);
1461
1462                 if (++z > 20) {
1463                         /* Argh. Not ready for write to buffer */
1464                         map_word Xstatus;
1465                         map_write(map, CMD(0x70), cmd_adr);
1466                         chip->state = FL_STATUS;
1467                         Xstatus = map_read(map, cmd_adr);
1468                         /* Odd. Clear status bits */
1469                         map_write(map, CMD(0x50), cmd_adr);
1470                         map_write(map, CMD(0x70), cmd_adr);
1471                         xip_enable(map, chip, cmd_adr);
1472                         printk(KERN_ERR "%s: Chip not ready for buffer write. status = %lx, Xstatus = %lx\n",
1473                                map->name, status.x[0], Xstatus.x[0]);
1474                         ret = -EIO;
1475                         goto out;
1476                 }
1477         }
1478
1479         /* Write length of data to come */
1480         bytes = len & (map_bankwidth(map)-1);
1481         words = len / map_bankwidth(map);
1482         map_write(map, CMD(words - !bytes), cmd_adr );
1483
1484         /* Write data */
1485         z = 0;
1486         while(z < words * map_bankwidth(map)) {
1487                 map_word datum = map_word_load(map, buf);
1488                 map_write(map, datum, adr+z);
1489
1490                 z += map_bankwidth(map);
1491                 buf += map_bankwidth(map);
1492         }
1493
1494         if (bytes) {
1495                 map_word datum;
1496
1497                 datum = map_word_ff(map);
1498                 datum = map_word_load_partial(map, datum, buf, 0, bytes);
1499                 map_write(map, datum, adr+z);
1500         }
1501
1502         /* GO GO GO */
1503         map_write(map, CMD(0xd0), cmd_adr);
1504         chip->state = FL_WRITING;
1505
1506         INVALIDATE_CACHE_UDELAY(map, chip, 
1507                                 cmd_adr, len,
1508                                 chip->buffer_write_time);
1509
1510         timeo = jiffies + (HZ/2);
1511         z = 0;
1512         for (;;) {
1513                 if (chip->state != FL_WRITING) {
1514                         /* Someone's suspended the write. Sleep */
1515                         DECLARE_WAITQUEUE(wait, current);
1516                         set_current_state(TASK_UNINTERRUPTIBLE);
1517                         add_wait_queue(&chip->wq, &wait);
1518                         spin_unlock(chip->mutex);
1519                         schedule();
1520                         remove_wait_queue(&chip->wq, &wait);
1521                         timeo = jiffies + (HZ / 2); /* FIXME */
1522                         spin_lock(chip->mutex);
1523                         continue;
1524                 }
1525
1526                 status = map_read(map, cmd_adr);
1527                 if (map_word_andequal(map, status, status_OK, status_OK))
1528                         break;
1529
1530                 /* OK Still waiting */
1531                 if (time_after(jiffies, timeo)) {
1532                         map_write(map, CMD(0x70), cmd_adr);
1533                         chip->state = FL_STATUS;
1534                         xip_enable(map, chip, cmd_adr);
1535                         printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1536                         ret = -EIO;
1537                         goto out;
1538                 }
1539                 
1540                 /* Latency issues. Drop the lock, wait a while and retry */
1541                 z++;
1542                 UDELAY(map, chip, cmd_adr, 1);
1543         }
1544         if (!z) {
1545                 chip->buffer_write_time--;
1546                 if (!chip->buffer_write_time)
1547                         chip->buffer_write_time = 1;
1548         }
1549         if (z > 1) 
1550                 chip->buffer_write_time++;
1551
1552         /* Done and happy. */
1553         chip->state = FL_STATUS;
1554
1555         /* check for errors */
1556         if (map_word_bitsset(map, status, CMD(0x1a))) {
1557                 unsigned long chipstatus = MERGESTATUS(status);
1558
1559                 /* reset status */
1560                 map_write(map, CMD(0x50), cmd_adr);
1561                 map_write(map, CMD(0x70), cmd_adr);
1562                 xip_enable(map, chip, cmd_adr);
1563
1564                 if (chipstatus & 0x02) {
1565                         ret = -EROFS;
1566                 } else if (chipstatus & 0x08) {
1567                         printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1568                         ret = -EIO;
1569                 } else {
1570                         printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1571                         ret = -EINVAL;
1572                 }
1573
1574                 goto out;
1575         }
1576
1577         xip_enable(map, chip, cmd_adr);
1578  out:   put_chip(map, chip, cmd_adr);
1579         spin_unlock(chip->mutex);
1580         return ret;
1581 }
1582
1583 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to, 
1584                                        size_t len, size_t *retlen, const u_char *buf)
1585 {
1586         struct map_info *map = mtd->priv;
1587         struct cfi_private *cfi = map->fldrv_priv;
1588         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1589         int ret = 0;
1590         int chipnum;
1591         unsigned long ofs;
1592
1593         *retlen = 0;
1594         if (!len)
1595                 return 0;
1596
1597         chipnum = to >> cfi->chipshift;
1598         ofs = to  - (chipnum << cfi->chipshift);
1599
1600         /* If it's not bus-aligned, do the first word write */
1601         if (ofs & (map_bankwidth(map)-1)) {
1602                 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1603                 if (local_len > len)
1604                         local_len = len;
1605                 ret = cfi_intelext_write_words(mtd, to, local_len,
1606                                                retlen, buf);
1607                 if (ret)
1608                         return ret;
1609                 ofs += local_len;
1610                 buf += local_len;
1611                 len -= local_len;
1612
1613                 if (ofs >> cfi->chipshift) {
1614                         chipnum ++;
1615                         ofs = 0;
1616                         if (chipnum == cfi->numchips)
1617                                 return 0;
1618                 }
1619         }
1620
1621         while(len) {
1622                 /* We must not cross write block boundaries */
1623                 int size = wbufsize - (ofs & (wbufsize-1));
1624
1625                 if (size > len)
1626                         size = len;
1627                 ret = do_write_buffer(map, &cfi->chips[chipnum], 
1628                                       ofs, buf, size);
1629                 if (ret)
1630                         return ret;
1631
1632                 ofs += size;
1633                 buf += size;
1634                 (*retlen) += size;
1635                 len -= size;
1636
1637                 if (ofs >> cfi->chipshift) {
1638                         chipnum ++; 
1639                         ofs = 0;
1640                         if (chipnum == cfi->numchips)
1641                                 return 0;
1642                 }
1643         }
1644         return 0;
1645 }
1646
1647 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1648                                       unsigned long adr, int len, void *thunk)
1649 {
1650         struct cfi_private *cfi = map->fldrv_priv;
1651         map_word status, status_OK;
1652         unsigned long timeo;
1653         int retries = 3;
1654         DECLARE_WAITQUEUE(wait, current);
1655         int ret = 0;
1656
1657         adr += chip->start;
1658
1659         /* Let's determine this according to the interleave only once */
1660         status_OK = CMD(0x80);
1661
1662  retry:
1663         spin_lock(chip->mutex);
1664         ret = get_chip(map, chip, adr, FL_ERASING);
1665         if (ret) {
1666                 spin_unlock(chip->mutex);
1667                 return ret;
1668         }
1669
1670         XIP_INVAL_CACHED_RANGE(map, adr, len);
1671         ENABLE_VPP(map);
1672         xip_disable(map, chip, adr);
1673
1674         /* Clear the status register first */
1675         map_write(map, CMD(0x50), adr);
1676
1677         /* Now erase */
1678         map_write(map, CMD(0x20), adr);
1679         map_write(map, CMD(0xD0), adr);
1680         chip->state = FL_ERASING;
1681         chip->erase_suspended = 0;
1682
1683         INVALIDATE_CACHE_UDELAY(map, chip,
1684                                 adr, len,
1685                                 chip->erase_time*1000/2);
1686
1687         /* FIXME. Use a timer to check this, and return immediately. */
1688         /* Once the state machine's known to be working I'll do that */
1689
1690         timeo = jiffies + (HZ*20);
1691         for (;;) {
1692                 if (chip->state != FL_ERASING) {
1693                         /* Someone's suspended the erase. Sleep */
1694                         set_current_state(TASK_UNINTERRUPTIBLE);
1695                         add_wait_queue(&chip->wq, &wait);
1696                         spin_unlock(chip->mutex);
1697                         schedule();
1698                         remove_wait_queue(&chip->wq, &wait);
1699                         spin_lock(chip->mutex);
1700                         continue;
1701                 }
1702                 if (chip->erase_suspended) {
1703                         /* This erase was suspended and resumed.
1704                            Adjust the timeout */
1705                         timeo = jiffies + (HZ*20); /* FIXME */
1706                         chip->erase_suspended = 0;
1707                 }
1708
1709                 status = map_read(map, adr);
1710                 if (map_word_andequal(map, status, status_OK, status_OK))
1711                         break;
1712                 
1713                 /* OK Still waiting */
1714                 if (time_after(jiffies, timeo)) {
1715                         map_write(map, CMD(0x70), adr);
1716                         chip->state = FL_STATUS;
1717                         xip_enable(map, chip, adr);
1718                         printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1719                         ret = -EIO;
1720                         goto out;
1721                 }
1722                 
1723                 /* Latency issues. Drop the lock, wait a while and retry */
1724                 UDELAY(map, chip, adr, 1000000/HZ);
1725         }
1726
1727         /* We've broken this before. It doesn't hurt to be safe */
1728         map_write(map, CMD(0x70), adr);
1729         chip->state = FL_STATUS;
1730         status = map_read(map, adr);
1731
1732         /* check for errors */
1733         if (map_word_bitsset(map, status, CMD(0x3a))) {
1734                 unsigned long chipstatus = MERGESTATUS(status);
1735
1736                 /* Reset the error bits */
1737                 map_write(map, CMD(0x50), adr);
1738                 map_write(map, CMD(0x70), adr);
1739                 xip_enable(map, chip, adr);
1740
1741                 if ((chipstatus & 0x30) == 0x30) {
1742                         printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1743                         ret = -EINVAL;
1744                 } else if (chipstatus & 0x02) {
1745                         /* Protection bit set */
1746                         ret = -EROFS;
1747                 } else if (chipstatus & 0x8) {
1748                         /* Voltage */
1749                         printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1750                         ret = -EIO;
1751                 } else if (chipstatus & 0x20 && retries--) {
1752                         printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1753                         timeo = jiffies + HZ;
1754                         put_chip(map, chip, adr);
1755                         spin_unlock(chip->mutex);
1756                         goto retry;
1757                 } else {
1758                         printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1759                         ret = -EIO;
1760                 }
1761
1762                 goto out;
1763         }
1764
1765         xip_enable(map, chip, adr);
1766  out:   put_chip(map, chip, adr);
1767         spin_unlock(chip->mutex);
1768         return ret;
1769 }
1770
1771 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1772 {
1773         unsigned long ofs, len;
1774         int ret;
1775
1776         ofs = instr->addr;
1777         len = instr->len;
1778
1779         ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1780         if (ret)
1781                 return ret;
1782
1783         instr->state = MTD_ERASE_DONE;
1784         mtd_erase_callback(instr);
1785         
1786         return 0;
1787 }
1788
1789 static void cfi_intelext_sync (struct mtd_info *mtd)
1790 {
1791         struct map_info *map = mtd->priv;
1792         struct cfi_private *cfi = map->fldrv_priv;
1793         int i;
1794         struct flchip *chip;
1795         int ret = 0;
1796
1797         for (i=0; !ret && i<cfi->numchips; i++) {
1798                 chip = &cfi->chips[i];
1799
1800                 spin_lock(chip->mutex);
1801                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1802
1803                 if (!ret) {
1804                         chip->oldstate = chip->state;
1805                         chip->state = FL_SYNCING;
1806                         /* No need to wake_up() on this state change - 
1807                          * as the whole point is that nobody can do anything
1808                          * with the chip now anyway.
1809                          */
1810                 }
1811                 spin_unlock(chip->mutex);
1812         }
1813
1814         /* Unlock the chips again */
1815
1816         for (i--; i >=0; i--) {
1817                 chip = &cfi->chips[i];
1818
1819                 spin_lock(chip->mutex);
1820                 
1821                 if (chip->state == FL_SYNCING) {
1822                         chip->state = chip->oldstate;
1823                         chip->oldstate = FL_READY;
1824                         wake_up(&chip->wq);
1825                 }
1826                 spin_unlock(chip->mutex);
1827         }
1828 }
1829
1830 #ifdef DEBUG_LOCK_BITS
1831 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1832                                                 struct flchip *chip,
1833                                                 unsigned long adr,
1834                                                 int len, void *thunk)
1835 {
1836         struct cfi_private *cfi = map->fldrv_priv;
1837         int status, ofs_factor = cfi->interleave * cfi->device_type;
1838
1839         adr += chip->start;
1840         xip_disable(map, chip, adr+(2*ofs_factor));
1841         map_write(map, CMD(0x90), adr+(2*ofs_factor));
1842         chip->state = FL_JEDEC_QUERY;
1843         status = cfi_read_query(map, adr+(2*ofs_factor));
1844         xip_enable(map, chip, 0);
1845         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1846                adr, status);
1847         return 0;
1848 }
1849 #endif
1850
1851 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
1852 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
1853
1854 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1855                                        unsigned long adr, int len, void *thunk)
1856 {
1857         struct cfi_private *cfi = map->fldrv_priv;
1858         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1859         map_word status, status_OK;
1860         unsigned long timeo = jiffies + HZ;
1861         int ret;
1862
1863         adr += chip->start;
1864
1865         /* Let's determine this according to the interleave only once */
1866         status_OK = CMD(0x80);
1867
1868         spin_lock(chip->mutex);
1869         ret = get_chip(map, chip, adr, FL_LOCKING);
1870         if (ret) {
1871                 spin_unlock(chip->mutex);
1872                 return ret;
1873         }
1874
1875         ENABLE_VPP(map);
1876         xip_disable(map, chip, adr);
1877         
1878         map_write(map, CMD(0x60), adr);
1879         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1880                 map_write(map, CMD(0x01), adr);
1881                 chip->state = FL_LOCKING;
1882         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1883                 map_write(map, CMD(0xD0), adr);
1884                 chip->state = FL_UNLOCKING;
1885         } else
1886                 BUG();
1887
1888         /*
1889          * If Instant Individual Block Locking supported then no need
1890          * to delay.
1891          */
1892
1893         if (!extp || !(extp->FeatureSupport & (1 << 5)))
1894                 UDELAY(map, chip, adr, 1000000/HZ);
1895
1896         /* FIXME. Use a timer to check this, and return immediately. */
1897         /* Once the state machine's known to be working I'll do that */
1898
1899         timeo = jiffies + (HZ*20);
1900         for (;;) {
1901
1902                 status = map_read(map, adr);
1903                 if (map_word_andequal(map, status, status_OK, status_OK))
1904                         break;
1905                 
1906                 /* OK Still waiting */
1907                 if (time_after(jiffies, timeo)) {
1908                         map_write(map, CMD(0x70), adr);
1909                         chip->state = FL_STATUS;
1910                         xip_enable(map, chip, adr);
1911                         printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
1912                         put_chip(map, chip, adr);
1913                         spin_unlock(chip->mutex);
1914                         return -EIO;
1915                 }
1916                 
1917                 /* Latency issues. Drop the lock, wait a while and retry */
1918                 UDELAY(map, chip, adr, 1);
1919         }
1920         
1921         /* Done and happy. */
1922         chip->state = FL_STATUS;
1923         xip_enable(map, chip, adr);
1924         put_chip(map, chip, adr);
1925         spin_unlock(chip->mutex);
1926         return 0;
1927 }
1928
1929 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1930 {
1931         int ret;
1932
1933 #ifdef DEBUG_LOCK_BITS
1934         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1935                __FUNCTION__, ofs, len);
1936         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1937                 ofs, len, 0);
1938 #endif
1939
1940         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock, 
1941                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1942         
1943 #ifdef DEBUG_LOCK_BITS
1944         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1945                __FUNCTION__, ret);
1946         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1947                 ofs, len, 0);
1948 #endif
1949
1950         return ret;
1951 }
1952
1953 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1954 {
1955         int ret;
1956
1957 #ifdef DEBUG_LOCK_BITS
1958         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1959                __FUNCTION__, ofs, len);
1960         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1961                 ofs, len, 0);
1962 #endif
1963
1964         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1965                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1966         
1967 #ifdef DEBUG_LOCK_BITS
1968         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1969                __FUNCTION__, ret);
1970         cfi_varsize_frob(mtd, do_printlockstatus_oneblock, 
1971                 ofs, len, 0);
1972 #endif
1973         
1974         return ret;
1975 }
1976
1977 #ifdef CONFIG_MTD_OTP
1978
1979 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip, 
1980                         u_long data_offset, u_char *buf, u_int size,
1981                         u_long prot_offset, u_int groupno, u_int groupsize);
1982
1983 static int __xipram
1984 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
1985             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1986 {
1987         struct cfi_private *cfi = map->fldrv_priv;
1988         int ret;
1989
1990         spin_lock(chip->mutex);
1991         ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
1992         if (ret) {
1993                 spin_unlock(chip->mutex);
1994                 return ret;
1995         }
1996
1997         /* let's ensure we're not reading back cached data from array mode */
1998         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
1999
2000         xip_disable(map, chip, chip->start);
2001         if (chip->state != FL_JEDEC_QUERY) {
2002                 map_write(map, CMD(0x90), chip->start);
2003                 chip->state = FL_JEDEC_QUERY;
2004         }
2005         map_copy_from(map, buf, chip->start + offset, size);
2006         xip_enable(map, chip, chip->start);
2007
2008         /* then ensure we don't keep OTP data in the cache */
2009         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2010
2011         put_chip(map, chip, chip->start);
2012         spin_unlock(chip->mutex);
2013         return 0;
2014 }
2015
2016 static int
2017 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2018              u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2019 {
2020         int ret;
2021
2022         while (size) {
2023                 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2024                 int gap = offset - bus_ofs;
2025                 int n = min_t(int, size, map_bankwidth(map)-gap);
2026                 map_word datum = map_word_ff(map);
2027
2028                 datum = map_word_load_partial(map, datum, buf, gap, n);
2029                 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2030                 if (ret) 
2031                         return ret;
2032
2033                 offset += n;
2034                 buf += n;
2035                 size -= n;
2036         }
2037
2038         return 0;
2039 }
2040
2041 static int
2042 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2043             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2044 {
2045         struct cfi_private *cfi = map->fldrv_priv;
2046         map_word datum;
2047
2048         /* make sure area matches group boundaries */
2049         if (size != grpsz)
2050                 return -EXDEV;
2051
2052         datum = map_word_ff(map);
2053         datum = map_word_clr(map, datum, CMD(1 << grpno));
2054         return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2055 }
2056
2057 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2058                                  size_t *retlen, u_char *buf,
2059                                  otp_op_t action, int user_regs)
2060 {
2061         struct map_info *map = mtd->priv;
2062         struct cfi_private *cfi = map->fldrv_priv;
2063         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2064         struct flchip *chip;
2065         struct cfi_intelext_otpinfo *otp;
2066         u_long devsize, reg_prot_offset, data_offset;
2067         u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2068         u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2069         int ret;
2070
2071         *retlen = 0;
2072
2073         /* Check that we actually have some OTP registers */
2074         if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2075                 return -ENODATA;
2076
2077         /* we need real chips here not virtual ones */
2078         devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2079         chip_step = devsize >> cfi->chipshift;
2080         chip_num = 0;
2081
2082         /* Some chips have OTP located in the _top_ partition only.
2083            For example: Intel 28F256L18T (T means top-parameter device) */
2084         if (cfi->mfr == MANUFACTURER_INTEL) {
2085                 switch (cfi->id) {
2086                 case 0x880b:
2087                 case 0x880c:
2088                 case 0x880d:
2089                         chip_num = chip_step - 1;
2090                 }
2091         }
2092
2093         for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2094                 chip = &cfi->chips[chip_num];
2095                 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2096
2097                 /* first OTP region */
2098                 field = 0;
2099                 reg_prot_offset = extp->ProtRegAddr;
2100                 reg_fact_groups = 1;
2101                 reg_fact_size = 1 << extp->FactProtRegSize;
2102                 reg_user_groups = 1;
2103                 reg_user_size = 1 << extp->UserProtRegSize;
2104
2105                 while (len > 0) {
2106                         /* flash geometry fixup */
2107                         data_offset = reg_prot_offset + 1;
2108                         data_offset *= cfi->interleave * cfi->device_type;
2109                         reg_prot_offset *= cfi->interleave * cfi->device_type;
2110                         reg_fact_size *= cfi->interleave;
2111                         reg_user_size *= cfi->interleave;
2112
2113                         if (user_regs) {
2114                                 groups = reg_user_groups;
2115                                 groupsize = reg_user_size;
2116                                 /* skip over factory reg area */
2117                                 groupno = reg_fact_groups;
2118                                 data_offset += reg_fact_groups * reg_fact_size;
2119                         } else {
2120                                 groups = reg_fact_groups;
2121                                 groupsize = reg_fact_size;
2122                                 groupno = 0;
2123                         }
2124
2125                         while (len > 0 && groups > 0) {
2126                                 if (!action) {
2127                                         /*
2128                                          * Special case: if action is NULL
2129                                          * we fill buf with otp_info records.
2130                                          */
2131                                         struct otp_info *otpinfo;
2132                                         map_word lockword;
2133                                         len -= sizeof(struct otp_info);
2134                                         if (len <= 0)
2135                                                 return -ENOSPC;
2136                                         ret = do_otp_read(map, chip,
2137                                                           reg_prot_offset,
2138                                                           (u_char *)&lockword,
2139                                                           map_bankwidth(map),
2140                                                           0, 0,  0);
2141                                         if (ret)
2142                                                 return ret;
2143                                         otpinfo = (struct otp_info *)buf;
2144                                         otpinfo->start = from;
2145                                         otpinfo->length = groupsize;
2146                                         otpinfo->locked =
2147                                            !map_word_bitsset(map, lockword,
2148                                                              CMD(1 << groupno));
2149                                         from += groupsize;
2150                                         buf += sizeof(*otpinfo);
2151                                         *retlen += sizeof(*otpinfo);
2152                                 } else if (from >= groupsize) {
2153                                         from -= groupsize;
2154                                         data_offset += groupsize;
2155                                 } else {
2156                                         int size = groupsize;
2157                                         data_offset += from;
2158                                         size -= from;
2159                                         from = 0;
2160                                         if (size > len)
2161                                                 size = len;
2162                                         ret = action(map, chip, data_offset,
2163                                                      buf, size, reg_prot_offset,
2164                                                      groupno, groupsize);
2165                                         if (ret < 0)
2166                                                 return ret;
2167                                         buf += size;
2168                                         len -= size;
2169                                         *retlen += size;
2170                                         data_offset += size;
2171                                 }
2172                                 groupno++;
2173                                 groups--;
2174                         }
2175
2176                         /* next OTP region */
2177                         if (++field == extp->NumProtectionFields)
2178                                 break;
2179                         reg_prot_offset = otp->ProtRegAddr;
2180                         reg_fact_groups = otp->FactGroups;
2181                         reg_fact_size = 1 << otp->FactProtRegSize;
2182                         reg_user_groups = otp->UserGroups;
2183                         reg_user_size = 1 << otp->UserProtRegSize;
2184                         otp++;
2185                 }
2186         }
2187
2188         return 0;
2189 }
2190
2191 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2192                                            size_t len, size_t *retlen,
2193                                             u_char *buf)
2194 {
2195         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2196                                      buf, do_otp_read, 0);
2197 }
2198
2199 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2200                                            size_t len, size_t *retlen,
2201                                             u_char *buf)
2202 {
2203         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2204                                      buf, do_otp_read, 1);
2205 }
2206
2207 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2208                                             size_t len, size_t *retlen,
2209                                              u_char *buf)
2210 {
2211         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2212                                      buf, do_otp_write, 1);
2213 }
2214
2215 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2216                                            loff_t from, size_t len)
2217 {
2218         size_t retlen;
2219         return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2220                                      NULL, do_otp_lock, 1);
2221 }
2222
2223 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, 
2224                                            struct otp_info *buf, size_t len)
2225 {
2226         size_t retlen;
2227         int ret;
2228
2229         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2230         return ret ? : retlen;
2231 }
2232
2233 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2234                                            struct otp_info *buf, size_t len)
2235 {
2236         size_t retlen;
2237         int ret;
2238
2239         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2240         return ret ? : retlen;
2241 }
2242
2243 #endif
2244
2245 static int cfi_intelext_suspend(struct mtd_info *mtd)
2246 {
2247         struct map_info *map = mtd->priv;
2248         struct cfi_private *cfi = map->fldrv_priv;
2249         int i;
2250         struct flchip *chip;
2251         int ret = 0;
2252
2253         for (i=0; !ret && i<cfi->numchips; i++) {
2254                 chip = &cfi->chips[i];
2255
2256                 spin_lock(chip->mutex);
2257
2258                 switch (chip->state) {
2259                 case FL_READY:
2260                 case FL_STATUS:
2261                 case FL_CFI_QUERY:
2262                 case FL_JEDEC_QUERY:
2263                         if (chip->oldstate == FL_READY) {
2264                                 chip->oldstate = chip->state;
2265                                 chip->state = FL_PM_SUSPENDED;
2266                                 /* No need to wake_up() on this state change - 
2267                                  * as the whole point is that nobody can do anything
2268                                  * with the chip now anyway.
2269                                  */
2270                         } else {
2271                                 /* There seems to be an operation pending. We must wait for it. */
2272                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2273                                 ret = -EAGAIN;
2274                         }
2275                         break;
2276                 default:
2277                         /* Should we actually wait? Once upon a time these routines weren't
2278                            allowed to. Or should we return -EAGAIN, because the upper layers
2279                            ought to have already shut down anything which was using the device
2280                            anyway? The latter for now. */
2281                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2282                         ret = -EAGAIN;
2283                 case FL_PM_SUSPENDED:
2284                         break;
2285                 }
2286                 spin_unlock(chip->mutex);
2287         }
2288
2289         /* Unlock the chips again */
2290
2291         if (ret) {
2292                 for (i--; i >=0; i--) {
2293                         chip = &cfi->chips[i];
2294                         
2295                         spin_lock(chip->mutex);
2296                         
2297                         if (chip->state == FL_PM_SUSPENDED) {
2298                                 /* No need to force it into a known state here,
2299                                    because we're returning failure, and it didn't
2300                                    get power cycled */
2301                                 chip->state = chip->oldstate;
2302                                 chip->oldstate = FL_READY;
2303                                 wake_up(&chip->wq);
2304                         }
2305                         spin_unlock(chip->mutex);
2306                 }
2307         } 
2308         
2309         return ret;
2310 }
2311
2312 static void cfi_intelext_resume(struct mtd_info *mtd)
2313 {
2314         struct map_info *map = mtd->priv;
2315         struct cfi_private *cfi = map->fldrv_priv;
2316         int i;
2317         struct flchip *chip;
2318
2319         for (i=0; i<cfi->numchips; i++) {
2320         
2321                 chip = &cfi->chips[i];
2322
2323                 spin_lock(chip->mutex);
2324                 
2325                 /* Go to known state. Chip may have been power cycled */
2326                 if (chip->state == FL_PM_SUSPENDED) {
2327                         map_write(map, CMD(0xFF), cfi->chips[i].start);
2328                         chip->oldstate = chip->state = FL_READY;
2329                         wake_up(&chip->wq);
2330                 }
2331
2332                 spin_unlock(chip->mutex);
2333         }
2334 }
2335
2336 static int cfi_intelext_reset(struct mtd_info *mtd)
2337 {
2338         struct map_info *map = mtd->priv;
2339         struct cfi_private *cfi = map->fldrv_priv;
2340         int i, ret;
2341
2342         for (i=0; i < cfi->numchips; i++) {
2343                 struct flchip *chip = &cfi->chips[i];
2344
2345                 /* force the completion of any ongoing operation
2346                    and switch to array mode so any bootloader in 
2347                    flash is accessible for soft reboot. */
2348                 spin_lock(chip->mutex);
2349                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2350                 if (!ret) {
2351                         map_write(map, CMD(0xff), chip->start);
2352                         chip->state = FL_READY;
2353                 }
2354                 spin_unlock(chip->mutex);
2355         }
2356
2357         return 0;
2358 }
2359
2360 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2361                                void *v)
2362 {
2363         struct mtd_info *mtd;
2364
2365         mtd = container_of(nb, struct mtd_info, reboot_notifier);
2366         cfi_intelext_reset(mtd);
2367         return NOTIFY_DONE;
2368 }
2369
2370 static void cfi_intelext_destroy(struct mtd_info *mtd)
2371 {
2372         struct map_info *map = mtd->priv;
2373         struct cfi_private *cfi = map->fldrv_priv;
2374         cfi_intelext_reset(mtd);
2375         unregister_reboot_notifier(&mtd->reboot_notifier);
2376         kfree(cfi->cmdset_priv);
2377         kfree(cfi->cfiq);
2378         kfree(cfi->chips[0].priv);
2379         kfree(cfi);
2380         kfree(mtd->eraseregions);
2381 }
2382
2383 static char im_name_1[]="cfi_cmdset_0001";
2384 static char im_name_3[]="cfi_cmdset_0003";
2385
2386 static int __init cfi_intelext_init(void)
2387 {
2388         inter_module_register(im_name_1, THIS_MODULE, &cfi_cmdset_0001);
2389         inter_module_register(im_name_3, THIS_MODULE, &cfi_cmdset_0001);
2390         return 0;
2391 }
2392
2393 static void __exit cfi_intelext_exit(void)
2394 {
2395         inter_module_unregister(im_name_1);
2396         inter_module_unregister(im_name_3);
2397 }
2398
2399 module_init(cfi_intelext_init);
2400 module_exit(cfi_intelext_exit);
2401
2402 MODULE_LICENSE("GPL");
2403 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2404 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");