Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/agpgart
[pandora-kernel.git] / drivers / mtd / chips / amd_flash.c
1 /*
2  * MTD map driver for AMD compatible flash chips (non-CFI)
3  *
4  * Author: Jonas Holmberg <jonas.holmberg@axis.com>
5  *
6  * $Id: amd_flash.c,v 1.28 2005/11/07 11:14:22 gleixner Exp $
7  *
8  * Copyright (c) 2001 Axis Communications AB
9  *
10  * This file is under GPL.
11  *
12  */
13
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/sched.h>
18 #include <linux/errno.h>
19 #include <linux/slab.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
22 #include <linux/init.h>
23 #include <linux/mtd/map.h>
24 #include <linux/mtd/mtd.h>
25 #include <linux/mtd/flashchip.h>
26
27 /* There's no limit. It exists only to avoid realloc. */
28 #define MAX_AMD_CHIPS 8
29
30 #define DEVICE_TYPE_X8  (8 / 8)
31 #define DEVICE_TYPE_X16 (16 / 8)
32 #define DEVICE_TYPE_X32 (32 / 8)
33
34 /* Addresses */
35 #define ADDR_MANUFACTURER               0x0000
36 #define ADDR_DEVICE_ID                  0x0001
37 #define ADDR_SECTOR_LOCK                0x0002
38 #define ADDR_HANDSHAKE                  0x0003
39 #define ADDR_UNLOCK_1                   0x0555
40 #define ADDR_UNLOCK_2                   0x02AA
41
42 /* Commands */
43 #define CMD_UNLOCK_DATA_1               0x00AA
44 #define CMD_UNLOCK_DATA_2               0x0055
45 #define CMD_MANUFACTURER_UNLOCK_DATA    0x0090
46 #define CMD_UNLOCK_BYPASS_MODE          0x0020
47 #define CMD_PROGRAM_UNLOCK_DATA         0x00A0
48 #define CMD_RESET_DATA                  0x00F0
49 #define CMD_SECTOR_ERASE_UNLOCK_DATA    0x0080
50 #define CMD_SECTOR_ERASE_UNLOCK_DATA_2  0x0030
51
52 #define CMD_UNLOCK_SECTOR               0x0060
53
54 /* Manufacturers */
55 #define MANUFACTURER_AMD        0x0001
56 #define MANUFACTURER_ATMEL      0x001F
57 #define MANUFACTURER_FUJITSU    0x0004
58 #define MANUFACTURER_ST         0x0020
59 #define MANUFACTURER_SST        0x00BF
60 #define MANUFACTURER_TOSHIBA    0x0098
61
62 /* AMD */
63 #define AM29F800BB      0x2258
64 #define AM29F800BT      0x22D6
65 #define AM29LV800BB     0x225B
66 #define AM29LV800BT     0x22DA
67 #define AM29LV160DT     0x22C4
68 #define AM29LV160DB     0x2249
69 #define AM29BDS323D     0x22D1
70
71 /* Atmel */
72 #define AT49xV16x       0x00C0
73 #define AT49xV16xT      0x00C2
74
75 /* Fujitsu */
76 #define MBM29LV160TE    0x22C4
77 #define MBM29LV160BE    0x2249
78 #define MBM29LV800BB    0x225B
79
80 /* ST - www.st.com */
81 #define M29W800T        0x00D7
82 #define M29W160DT       0x22C4
83 #define M29W160DB       0x2249
84
85 /* SST */
86 #define SST39LF800      0x2781
87 #define SST39LF160      0x2782
88
89 /* Toshiba */
90 #define TC58FVT160      0x00C2
91 #define TC58FVB160      0x0043
92
93 #define D6_MASK 0x40
94
95 struct amd_flash_private {
96         int device_type;
97         int interleave;
98         int numchips;
99         unsigned long chipshift;
100         struct flchip chips[0];
101 };
102
103 struct amd_flash_info {
104         const __u16 mfr_id;
105         const __u16 dev_id;
106         const char *name;
107         const u_long size;
108         const int numeraseregions;
109         const struct mtd_erase_region_info regions[4];
110 };
111
112
113
114 static int amd_flash_read(struct mtd_info *, loff_t, size_t, size_t *,
115                           u_char *);
116 static int amd_flash_write(struct mtd_info *, loff_t, size_t, size_t *,
117                            const u_char *);
118 static int amd_flash_erase(struct mtd_info *, struct erase_info *);
119 static void amd_flash_sync(struct mtd_info *);
120 static int amd_flash_suspend(struct mtd_info *);
121 static void amd_flash_resume(struct mtd_info *);
122 static void amd_flash_destroy(struct mtd_info *);
123 static struct mtd_info *amd_flash_probe(struct map_info *map);
124
125
126 static struct mtd_chip_driver amd_flash_chipdrv = {
127         .probe = amd_flash_probe,
128         .destroy = amd_flash_destroy,
129         .name = "amd_flash",
130         .module = THIS_MODULE
131 };
132
133 static inline __u32 wide_read(struct map_info *map, __u32 addr)
134 {
135         if (map->buswidth == 1) {
136                 return map_read8(map, addr);
137         } else if (map->buswidth == 2) {
138                 return map_read16(map, addr);
139         } else if (map->buswidth == 4) {
140                 return map_read32(map, addr);
141         }
142
143         return 0;
144 }
145
146 static inline void wide_write(struct map_info *map, __u32 val, __u32 addr)
147 {
148         if (map->buswidth == 1) {
149                 map_write8(map, val, addr);
150         } else if (map->buswidth == 2) {
151                 map_write16(map, val, addr);
152         } else if (map->buswidth == 4) {
153                 map_write32(map, val, addr);
154         }
155 }
156
157 static inline __u32 make_cmd(struct map_info *map, __u32 cmd)
158 {
159         const struct amd_flash_private *private = map->fldrv_priv;
160         if ((private->interleave == 2) &&
161             (private->device_type == DEVICE_TYPE_X16)) {
162                 cmd |= (cmd << 16);
163         }
164
165         return cmd;
166 }
167
168 static inline void send_unlock(struct map_info *map, unsigned long base)
169 {
170         wide_write(map, (CMD_UNLOCK_DATA_1 << 16) | CMD_UNLOCK_DATA_1,
171                    base + (map->buswidth * ADDR_UNLOCK_1));
172         wide_write(map, (CMD_UNLOCK_DATA_2 << 16) | CMD_UNLOCK_DATA_2,
173                    base + (map->buswidth * ADDR_UNLOCK_2));
174 }
175
176 static inline void send_cmd(struct map_info *map, unsigned long base, __u32 cmd)
177 {
178         send_unlock(map, base);
179         wide_write(map, make_cmd(map, cmd),
180                    base + (map->buswidth * ADDR_UNLOCK_1));
181 }
182
183 static inline void send_cmd_to_addr(struct map_info *map, unsigned long base,
184                                     __u32 cmd, unsigned long addr)
185 {
186         send_unlock(map, base);
187         wide_write(map, make_cmd(map, cmd), addr);
188 }
189
190 static inline int flash_is_busy(struct map_info *map, unsigned long addr,
191                                 int interleave)
192 {
193
194         if ((interleave == 2) && (map->buswidth == 4)) {
195                 __u32 read1, read2;
196
197                 read1 = wide_read(map, addr);
198                 read2 = wide_read(map, addr);
199
200                 return (((read1 >> 16) & D6_MASK) !=
201                         ((read2 >> 16) & D6_MASK)) ||
202                        (((read1 & 0xffff) & D6_MASK) !=
203                         ((read2 & 0xffff) & D6_MASK));
204         }
205
206         return ((wide_read(map, addr) & D6_MASK) !=
207                 (wide_read(map, addr) & D6_MASK));
208 }
209
210 static inline void unlock_sector(struct map_info *map, unsigned long sect_addr,
211                                  int unlock)
212 {
213         /* Sector lock address. A6 = 1 for unlock, A6 = 0 for lock */
214         int SLA = unlock ?
215                 (sect_addr |  (0x40 * map->buswidth)) :
216                 (sect_addr & ~(0x40 * map->buswidth)) ;
217
218         __u32 cmd = make_cmd(map, CMD_UNLOCK_SECTOR);
219
220         wide_write(map, make_cmd(map, CMD_RESET_DATA), 0);
221         wide_write(map, cmd, SLA); /* 1st cycle: write cmd to any address */
222         wide_write(map, cmd, SLA); /* 2nd cycle: write cmd to any address */
223         wide_write(map, cmd, SLA); /* 3rd cycle: write cmd to SLA */
224 }
225
226 static inline int is_sector_locked(struct map_info *map,
227                                    unsigned long sect_addr)
228 {
229         int status;
230
231         wide_write(map, CMD_RESET_DATA, 0);
232         send_cmd(map, sect_addr, CMD_MANUFACTURER_UNLOCK_DATA);
233
234         /* status is 0x0000 for unlocked and 0x0001 for locked */
235         status = wide_read(map, sect_addr + (map->buswidth * ADDR_SECTOR_LOCK));
236         wide_write(map, CMD_RESET_DATA, 0);
237         return status;
238 }
239
240 static int amd_flash_do_unlock(struct mtd_info *mtd, loff_t ofs, size_t len,
241                                int is_unlock)
242 {
243         struct map_info *map;
244         struct mtd_erase_region_info *merip;
245         int eraseoffset, erasesize, eraseblocks;
246         int i;
247         int retval = 0;
248         int lock_status;
249
250         map = mtd->priv;
251
252         /* Pass the whole chip through sector by sector and check for each
253            sector if the sector and the given interval overlap */
254         for(i = 0; i < mtd->numeraseregions; i++) {
255                 merip = &mtd->eraseregions[i];
256
257                 eraseoffset = merip->offset;
258                 erasesize = merip->erasesize;
259                 eraseblocks = merip->numblocks;
260
261                 if (ofs > eraseoffset + erasesize)
262                         continue;
263
264                 while (eraseblocks > 0) {
265                         if (ofs < eraseoffset + erasesize && ofs + len > eraseoffset) {
266                                 unlock_sector(map, eraseoffset, is_unlock);
267
268                                 lock_status = is_sector_locked(map, eraseoffset);
269
270                                 if (is_unlock && lock_status) {
271                                         printk("Cannot unlock sector at address %x length %xx\n",
272                                                eraseoffset, merip->erasesize);
273                                         retval = -1;
274                                 } else if (!is_unlock && !lock_status) {
275                                         printk("Cannot lock sector at address %x length %x\n",
276                                                eraseoffset, merip->erasesize);
277                                         retval = -1;
278                                 }
279                         }
280                         eraseoffset += erasesize;
281                         eraseblocks --;
282                 }
283         }
284         return retval;
285 }
286
287 static int amd_flash_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
288 {
289         return amd_flash_do_unlock(mtd, ofs, len, 1);
290 }
291
292 static int amd_flash_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
293 {
294         return amd_flash_do_unlock(mtd, ofs, len, 0);
295 }
296
297
298 /*
299  * Reads JEDEC manufacturer ID and device ID and returns the index of the first
300  * matching table entry (-1 if not found or alias for already found chip).
301  */
302 static int probe_new_chip(struct mtd_info *mtd, __u32 base,
303                           struct flchip *chips,
304                           struct amd_flash_private *private,
305                           const struct amd_flash_info *table, int table_size)
306 {
307         __u32 mfr_id;
308         __u32 dev_id;
309         struct map_info *map = mtd->priv;
310         struct amd_flash_private temp;
311         int i;
312
313         temp.device_type = DEVICE_TYPE_X16;     // Assume X16 (FIXME)
314         temp.interleave = 2;
315         map->fldrv_priv = &temp;
316
317         /* Enter autoselect mode. */
318         send_cmd(map, base, CMD_RESET_DATA);
319         send_cmd(map, base, CMD_MANUFACTURER_UNLOCK_DATA);
320
321         mfr_id = wide_read(map, base + (map->buswidth * ADDR_MANUFACTURER));
322         dev_id = wide_read(map, base + (map->buswidth * ADDR_DEVICE_ID));
323
324         if ((map->buswidth == 4) && ((mfr_id >> 16) == (mfr_id & 0xffff)) &&
325             ((dev_id >> 16) == (dev_id & 0xffff))) {
326                 mfr_id &= 0xffff;
327                 dev_id &= 0xffff;
328         } else {
329                 temp.interleave = 1;
330         }
331
332         for (i = 0; i < table_size; i++) {
333                 if ((mfr_id == table[i].mfr_id) &&
334                     (dev_id == table[i].dev_id)) {
335                         if (chips) {
336                                 int j;
337
338                                 /* Is this an alias for an already found chip?
339                                  * In that case that chip should be in
340                                  * autoselect mode now.
341                                  */
342                                 for (j = 0; j < private->numchips; j++) {
343                                         __u32 mfr_id_other;
344                                         __u32 dev_id_other;
345
346                                         mfr_id_other =
347                                                 wide_read(map, chips[j].start +
348                                                                (map->buswidth *
349                                                                 ADDR_MANUFACTURER
350                                                                ));
351                                         dev_id_other =
352                                                 wide_read(map, chips[j].start +
353                                                                (map->buswidth *
354                                                                 ADDR_DEVICE_ID));
355                                         if (temp.interleave == 2) {
356                                                 mfr_id_other &= 0xffff;
357                                                 dev_id_other &= 0xffff;
358                                         }
359                                         if ((mfr_id_other == mfr_id) &&
360                                             (dev_id_other == dev_id)) {
361
362                                                 /* Exit autoselect mode. */
363                                                 send_cmd(map, base,
364                                                          CMD_RESET_DATA);
365
366                                                 return -1;
367                                         }
368                                 }
369
370                                 if (private->numchips == MAX_AMD_CHIPS) {
371                                         printk(KERN_WARNING
372                                                "%s: Too many flash chips "
373                                                "detected. Increase "
374                                                "MAX_AMD_CHIPS from %d.\n",
375                                                map->name, MAX_AMD_CHIPS);
376
377                                         return -1;
378                                 }
379
380                                 chips[private->numchips].start = base;
381                                 chips[private->numchips].state = FL_READY;
382                                 chips[private->numchips].mutex =
383                                         &chips[private->numchips]._spinlock;
384                                 private->numchips++;
385                         }
386
387                         printk("%s: Found %d x %ldMiB %s at 0x%x\n", map->name,
388                                temp.interleave, (table[i].size)/(1024*1024),
389                                table[i].name, base);
390
391                         mtd->size += table[i].size * temp.interleave;
392                         mtd->numeraseregions += table[i].numeraseregions;
393
394                         break;
395                 }
396         }
397
398         /* Exit autoselect mode. */
399         send_cmd(map, base, CMD_RESET_DATA);
400
401         if (i == table_size) {
402                 printk(KERN_DEBUG "%s: unknown flash device at 0x%x, "
403                        "mfr id 0x%x, dev id 0x%x\n", map->name,
404                        base, mfr_id, dev_id);
405                 map->fldrv_priv = NULL;
406
407                 return -1;
408         }
409
410         private->device_type = temp.device_type;
411         private->interleave = temp.interleave;
412
413         return i;
414 }
415
416
417
418 static struct mtd_info *amd_flash_probe(struct map_info *map)
419 {
420         static const struct amd_flash_info table[] = {
421         {
422                 .mfr_id = MANUFACTURER_AMD,
423                 .dev_id = AM29LV160DT,
424                 .name = "AMD AM29LV160DT",
425                 .size = 0x00200000,
426                 .numeraseregions = 4,
427                 .regions = {
428                         { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
429                         { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks =  1 },
430                         { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks =  2 },
431                         { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks =  1 }
432                 }
433         }, {
434                 .mfr_id = MANUFACTURER_AMD,
435                 .dev_id = AM29LV160DB,
436                 .name = "AMD AM29LV160DB",
437                 .size = 0x00200000,
438                 .numeraseregions = 4,
439                 .regions = {
440                         { .offset = 0x000000, .erasesize = 0x04000, .numblocks =  1 },
441                         { .offset = 0x004000, .erasesize = 0x02000, .numblocks =  2 },
442                         { .offset = 0x008000, .erasesize = 0x08000, .numblocks =  1 },
443                         { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
444                 }
445         }, {
446                 .mfr_id = MANUFACTURER_TOSHIBA,
447                 .dev_id = TC58FVT160,
448                 .name = "Toshiba TC58FVT160",
449                 .size = 0x00200000,
450                 .numeraseregions = 4,
451                 .regions = {
452                         { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
453                         { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks =  1 },
454                         { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks =  2 },
455                         { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks =  1 }
456                 }
457         }, {
458                 .mfr_id = MANUFACTURER_FUJITSU,
459                 .dev_id = MBM29LV160TE,
460                 .name = "Fujitsu MBM29LV160TE",
461                 .size = 0x00200000,
462                 .numeraseregions = 4,
463                 .regions = {
464                         { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
465                         { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks =  1 },
466                         { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks =  2 },
467                         { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks =  1 }
468                 }
469         }, {
470                 .mfr_id = MANUFACTURER_TOSHIBA,
471                 .dev_id = TC58FVB160,
472                 .name = "Toshiba TC58FVB160",
473                 .size = 0x00200000,
474                 .numeraseregions = 4,
475                 .regions = {
476                         { .offset = 0x000000, .erasesize = 0x04000, .numblocks =  1 },
477                         { .offset = 0x004000, .erasesize = 0x02000, .numblocks =  2 },
478                         { .offset = 0x008000, .erasesize = 0x08000, .numblocks =  1 },
479                         { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
480                 }
481         }, {
482                 .mfr_id = MANUFACTURER_FUJITSU,
483                 .dev_id = MBM29LV160BE,
484                 .name = "Fujitsu MBM29LV160BE",
485                 .size = 0x00200000,
486                 .numeraseregions = 4,
487                 .regions = {
488                         { .offset = 0x000000, .erasesize = 0x04000, .numblocks =  1 },
489                         { .offset = 0x004000, .erasesize = 0x02000, .numblocks =  2 },
490                         { .offset = 0x008000, .erasesize = 0x08000, .numblocks =  1 },
491                         { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
492                 }
493         }, {
494                 .mfr_id = MANUFACTURER_AMD,
495                 .dev_id = AM29LV800BB,
496                 .name = "AMD AM29LV800BB",
497                 .size = 0x00100000,
498                 .numeraseregions = 4,
499                 .regions = {
500                         { .offset = 0x000000, .erasesize = 0x04000, .numblocks =  1 },
501                         { .offset = 0x004000, .erasesize = 0x02000, .numblocks =  2 },
502                         { .offset = 0x008000, .erasesize = 0x08000, .numblocks =  1 },
503                         { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 15 }
504                 }
505         }, {
506                 .mfr_id = MANUFACTURER_AMD,
507                 .dev_id = AM29F800BB,
508                 .name = "AMD AM29F800BB",
509                 .size = 0x00100000,
510                 .numeraseregions = 4,
511                 .regions = {
512                         { .offset = 0x000000, .erasesize = 0x04000, .numblocks =  1 },
513                         { .offset = 0x004000, .erasesize = 0x02000, .numblocks =  2 },
514                         { .offset = 0x008000, .erasesize = 0x08000, .numblocks =  1 },
515                         { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 15 }
516                 }
517         }, {
518                 .mfr_id = MANUFACTURER_AMD,
519                 .dev_id = AM29LV800BT,
520                 .name = "AMD AM29LV800BT",
521                 .size = 0x00100000,
522                 .numeraseregions = 4,
523                 .regions = {
524                         { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 },
525                         { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks =  1 },
526                         { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks =  2 },
527                         { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks =  1 }
528                 }
529         }, {
530                 .mfr_id = MANUFACTURER_AMD,
531                 .dev_id = AM29F800BT,
532                 .name = "AMD AM29F800BT",
533                 .size = 0x00100000,
534                 .numeraseregions = 4,
535                 .regions = {
536                         { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 },
537                         { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks =  1 },
538                         { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks =  2 },
539                         { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks =  1 }
540                 }
541         }, {
542                 .mfr_id = MANUFACTURER_AMD,
543                 .dev_id = AM29LV800BB,
544                 .name = "AMD AM29LV800BB",
545                 .size = 0x00100000,
546                 .numeraseregions = 4,
547                 .regions = {
548                         { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 },
549                         { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks =  1 },
550                         { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks =  2 },
551                         { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks =  1 }
552                 }
553         }, {
554                 .mfr_id = MANUFACTURER_FUJITSU,
555                 .dev_id = MBM29LV800BB,
556                 .name = "Fujitsu MBM29LV800BB",
557                 .size = 0x00100000,
558                 .numeraseregions = 4,
559                 .regions = {
560                         { .offset = 0x000000, .erasesize = 0x04000, .numblocks =  1 },
561                         { .offset = 0x004000, .erasesize = 0x02000, .numblocks =  2 },
562                         { .offset = 0x008000, .erasesize = 0x08000, .numblocks =  1 },
563                         { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 15 }
564                 }
565         }, {
566                 .mfr_id = MANUFACTURER_ST,
567                 .dev_id = M29W800T,
568                 .name = "ST M29W800T",
569                 .size = 0x00100000,
570                 .numeraseregions = 4,
571                 .regions = {
572                         { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 },
573                         { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks =  1 },
574                         { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks =  2 },
575                         { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks =  1 }
576                 }
577         }, {
578                 .mfr_id = MANUFACTURER_ST,
579                 .dev_id = M29W160DT,
580                 .name = "ST M29W160DT",
581                 .size = 0x00200000,
582                 .numeraseregions = 4,
583                 .regions = {
584                         { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
585                         { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks =  1 },
586                         { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks =  2 },
587                         { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks =  1 }
588                 }
589         }, {
590                 .mfr_id = MANUFACTURER_ST,
591                 .dev_id = M29W160DB,
592                 .name = "ST M29W160DB",
593                 .size = 0x00200000,
594                 .numeraseregions = 4,
595                 .regions = {
596                         { .offset = 0x000000, .erasesize = 0x04000, .numblocks =  1 },
597                         { .offset = 0x004000, .erasesize = 0x02000, .numblocks =  2 },
598                         { .offset = 0x008000, .erasesize = 0x08000, .numblocks =  1 },
599                         { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
600                 }
601         }, {
602                 .mfr_id = MANUFACTURER_AMD,
603                 .dev_id = AM29BDS323D,
604                 .name = "AMD AM29BDS323D",
605                 .size = 0x00400000,
606                 .numeraseregions = 3,
607                 .regions = {
608                         { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 48 },
609                         { .offset = 0x300000, .erasesize = 0x10000, .numblocks = 15 },
610                         { .offset = 0x3f0000, .erasesize = 0x02000, .numblocks =  8 },
611                 }
612         }, {
613                 .mfr_id = MANUFACTURER_ATMEL,
614                 .dev_id = AT49xV16x,
615                 .name = "Atmel AT49xV16x",
616                 .size = 0x00200000,
617                 .numeraseregions = 2,
618                 .regions = {
619                         { .offset = 0x000000, .erasesize = 0x02000, .numblocks =  8 },
620                         { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
621                 }
622         }, {
623                 .mfr_id = MANUFACTURER_ATMEL,
624                 .dev_id = AT49xV16xT,
625                 .name = "Atmel AT49xV16xT",
626                 .size = 0x00200000,
627                 .numeraseregions = 2,
628                 .regions = {
629                         { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
630                         { .offset = 0x1F0000, .erasesize = 0x02000, .numblocks =  8 }
631                 }
632         }
633         };
634
635         struct mtd_info *mtd;
636         struct flchip chips[MAX_AMD_CHIPS];
637         int table_pos[MAX_AMD_CHIPS];
638         struct amd_flash_private temp;
639         struct amd_flash_private *private;
640         u_long size;
641         unsigned long base;
642         int i;
643         int reg_idx;
644         int offset;
645
646         mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
647         if (!mtd) {
648                 printk(KERN_WARNING
649                        "%s: kmalloc failed for info structure\n", map->name);
650                 return NULL;
651         }
652         mtd->priv = map;
653
654         memset(&temp, 0, sizeof(temp));
655
656         printk("%s: Probing for AMD compatible flash...\n", map->name);
657
658         if ((table_pos[0] = probe_new_chip(mtd, 0, NULL, &temp, table,
659                                            ARRAY_SIZE(table)))
660             == -1) {
661                 printk(KERN_WARNING
662                        "%s: Found no AMD compatible device at location zero\n",
663                        map->name);
664                 kfree(mtd);
665
666                 return NULL;
667         }
668
669         chips[0].start = 0;
670         chips[0].state = FL_READY;
671         chips[0].mutex = &chips[0]._spinlock;
672         temp.numchips = 1;
673         for (size = mtd->size; size > 1; size >>= 1) {
674                 temp.chipshift++;
675         }
676         switch (temp.interleave) {
677                 case 2:
678                         temp.chipshift += 1;
679                         break;
680                 case 4:
681                         temp.chipshift += 2;
682                         break;
683         }
684
685         /* Find out if there are any more chips in the map. */
686         for (base = (1 << temp.chipshift);
687              base < map->size;
688              base += (1 << temp.chipshift)) {
689                 int numchips = temp.numchips;
690                 table_pos[numchips] = probe_new_chip(mtd, base, chips,
691                         &temp, table, ARRAY_SIZE(table));
692         }
693
694         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) *
695                                     mtd->numeraseregions, GFP_KERNEL);
696         if (!mtd->eraseregions) {
697                 printk(KERN_WARNING "%s: Failed to allocate "
698                        "memory for MTD erase region info\n", map->name);
699                 kfree(mtd);
700                 map->fldrv_priv = NULL;
701                 return NULL;
702         }
703
704         reg_idx = 0;
705         offset = 0;
706         for (i = 0; i < temp.numchips; i++) {
707                 int dev_size;
708                 int j;
709
710                 dev_size = 0;
711                 for (j = 0; j < table[table_pos[i]].numeraseregions; j++) {
712                         mtd->eraseregions[reg_idx].offset = offset +
713                                 (table[table_pos[i]].regions[j].offset *
714                                  temp.interleave);
715                         mtd->eraseregions[reg_idx].erasesize =
716                                 table[table_pos[i]].regions[j].erasesize *
717                                 temp.interleave;
718                         mtd->eraseregions[reg_idx].numblocks =
719                                 table[table_pos[i]].regions[j].numblocks;
720                         if (mtd->erasesize <
721                             mtd->eraseregions[reg_idx].erasesize) {
722                                 mtd->erasesize =
723                                         mtd->eraseregions[reg_idx].erasesize;
724                         }
725                         dev_size += mtd->eraseregions[reg_idx].erasesize *
726                                     mtd->eraseregions[reg_idx].numblocks;
727                         reg_idx++;
728                 }
729                 offset += dev_size;
730         }
731         mtd->type = MTD_NORFLASH;
732         mtd->writesize = 1;
733         mtd->flags = MTD_CAP_NORFLASH;
734         mtd->name = map->name;
735         mtd->erase = amd_flash_erase;
736         mtd->read = amd_flash_read;
737         mtd->write = amd_flash_write;
738         mtd->sync = amd_flash_sync;
739         mtd->suspend = amd_flash_suspend;
740         mtd->resume = amd_flash_resume;
741         mtd->lock = amd_flash_lock;
742         mtd->unlock = amd_flash_unlock;
743
744         private = kmalloc(sizeof(*private) + (sizeof(struct flchip) *
745                                               temp.numchips), GFP_KERNEL);
746         if (!private) {
747                 printk(KERN_WARNING
748                        "%s: kmalloc failed for private structure\n", map->name);
749                 kfree(mtd);
750                 map->fldrv_priv = NULL;
751                 return NULL;
752         }
753         memcpy(private, &temp, sizeof(temp));
754         memcpy(private->chips, chips,
755                sizeof(struct flchip) * private->numchips);
756         for (i = 0; i < private->numchips; i++) {
757                 init_waitqueue_head(&private->chips[i].wq);
758                 spin_lock_init(&private->chips[i]._spinlock);
759         }
760
761         map->fldrv_priv = private;
762
763         map->fldrv = &amd_flash_chipdrv;
764
765         __module_get(THIS_MODULE);
766         return mtd;
767 }
768
769
770
771 static inline int read_one_chip(struct map_info *map, struct flchip *chip,
772                                loff_t adr, size_t len, u_char *buf)
773 {
774         DECLARE_WAITQUEUE(wait, current);
775         unsigned long timeo = jiffies + HZ;
776
777 retry:
778         spin_lock_bh(chip->mutex);
779
780         if (chip->state != FL_READY){
781                 printk(KERN_INFO "%s: waiting for chip to read, state = %d\n",
782                        map->name, chip->state);
783                 set_current_state(TASK_UNINTERRUPTIBLE);
784                 add_wait_queue(&chip->wq, &wait);
785
786                 spin_unlock_bh(chip->mutex);
787
788                 schedule();
789                 remove_wait_queue(&chip->wq, &wait);
790
791                 if(signal_pending(current)) {
792                         return -EINTR;
793                 }
794
795                 timeo = jiffies + HZ;
796
797                 goto retry;
798         }
799
800         adr += chip->start;
801
802         chip->state = FL_READY;
803
804         map_copy_from(map, buf, adr, len);
805
806         wake_up(&chip->wq);
807         spin_unlock_bh(chip->mutex);
808
809         return 0;
810 }
811
812
813
814 static int amd_flash_read(struct mtd_info *mtd, loff_t from, size_t len,
815                           size_t *retlen, u_char *buf)
816 {
817         struct map_info *map = mtd->priv;
818         struct amd_flash_private *private = map->fldrv_priv;
819         unsigned long ofs;
820         int chipnum;
821         int ret = 0;
822
823         if ((from + len) > mtd->size) {
824                 printk(KERN_WARNING "%s: read request past end of device "
825                        "(0x%lx)\n", map->name, (unsigned long)from + len);
826
827                 return -EINVAL;
828         }
829
830         /* Offset within the first chip that the first read should start. */
831         chipnum = (from >> private->chipshift);
832         ofs = from - (chipnum <<  private->chipshift);
833
834         *retlen = 0;
835
836         while (len) {
837                 unsigned long this_len;
838
839                 if (chipnum >= private->numchips) {
840                         break;
841                 }
842
843                 if ((len + ofs - 1) >> private->chipshift) {
844                         this_len = (1 << private->chipshift) - ofs;
845                 } else {
846                         this_len = len;
847                 }
848
849                 ret = read_one_chip(map, &private->chips[chipnum], ofs,
850                                     this_len, buf);
851                 if (ret) {
852                         break;
853                 }
854
855                 *retlen += this_len;
856                 len -= this_len;
857                 buf += this_len;
858
859                 ofs = 0;
860                 chipnum++;
861         }
862
863         return ret;
864 }
865
866
867
868 static int write_one_word(struct map_info *map, struct flchip *chip,
869                           unsigned long adr, __u32 datum)
870 {
871         unsigned long timeo = jiffies + HZ;
872         struct amd_flash_private *private = map->fldrv_priv;
873         DECLARE_WAITQUEUE(wait, current);
874         int ret = 0;
875         int times_left;
876
877 retry:
878         spin_lock_bh(chip->mutex);
879
880         if (chip->state != FL_READY){
881                 printk("%s: waiting for chip to write, state = %d\n",
882                        map->name, chip->state);
883                 set_current_state(TASK_UNINTERRUPTIBLE);
884                 add_wait_queue(&chip->wq, &wait);
885
886                 spin_unlock_bh(chip->mutex);
887
888                 schedule();
889                 remove_wait_queue(&chip->wq, &wait);
890                 printk(KERN_INFO "%s: woke up to write\n", map->name);
891                 if(signal_pending(current))
892                         return -EINTR;
893
894                 timeo = jiffies + HZ;
895
896                 goto retry;
897         }
898
899         chip->state = FL_WRITING;
900
901         adr += chip->start;
902         ENABLE_VPP(map);
903         send_cmd(map, chip->start, CMD_PROGRAM_UNLOCK_DATA);
904         wide_write(map, datum, adr);
905
906         times_left = 500000;
907         while (times_left-- && flash_is_busy(map, adr, private->interleave)) {
908                 if (need_resched()) {
909                         spin_unlock_bh(chip->mutex);
910                         schedule();
911                         spin_lock_bh(chip->mutex);
912                 }
913         }
914
915         if (!times_left) {
916                 printk(KERN_WARNING "%s: write to 0x%lx timed out!\n",
917                        map->name, adr);
918                 ret = -EIO;
919         } else {
920                 __u32 verify;
921                 if ((verify = wide_read(map, adr)) != datum) {
922                         printk(KERN_WARNING "%s: write to 0x%lx failed. "
923                                "datum = %x, verify = %x\n",
924                                map->name, adr, datum, verify);
925                         ret = -EIO;
926                 }
927         }
928
929         DISABLE_VPP(map);
930         chip->state = FL_READY;
931         wake_up(&chip->wq);
932         spin_unlock_bh(chip->mutex);
933
934         return ret;
935 }
936
937
938
939 static int amd_flash_write(struct mtd_info *mtd, loff_t to , size_t len,
940                            size_t *retlen, const u_char *buf)
941 {
942         struct map_info *map = mtd->priv;
943         struct amd_flash_private *private = map->fldrv_priv;
944         int ret = 0;
945         int chipnum;
946         unsigned long ofs;
947         unsigned long chipstart;
948
949         *retlen = 0;
950         if (!len) {
951                 return 0;
952         }
953
954         chipnum = to >> private->chipshift;
955         ofs = to  - (chipnum << private->chipshift);
956         chipstart = private->chips[chipnum].start;
957
958         /* If it's not bus-aligned, do the first byte write. */
959         if (ofs & (map->buswidth - 1)) {
960                 unsigned long bus_ofs = ofs & ~(map->buswidth - 1);
961                 int i = ofs - bus_ofs;
962                 int n = 0;
963                 u_char tmp_buf[4];
964                 __u32 datum;
965
966                 map_copy_from(map, tmp_buf,
967                                bus_ofs + private->chips[chipnum].start,
968                                map->buswidth);
969                 while (len && i < map->buswidth)
970                         tmp_buf[i++] = buf[n++], len--;
971
972                 if (map->buswidth == 2) {
973                         datum = *(__u16*)tmp_buf;
974                 } else if (map->buswidth == 4) {
975                         datum = *(__u32*)tmp_buf;
976                 } else {
977                         return -EINVAL;  /* should never happen, but be safe */
978                 }
979
980                 ret = write_one_word(map, &private->chips[chipnum], bus_ofs,
981                                      datum);
982                 if (ret) {
983                         return ret;
984                 }
985
986                 ofs += n;
987                 buf += n;
988                 (*retlen) += n;
989
990                 if (ofs >> private->chipshift) {
991                         chipnum++;
992                         ofs = 0;
993                         if (chipnum == private->numchips) {
994                                 return 0;
995                         }
996                 }
997         }
998
999         /* We are now aligned, write as much as possible. */
1000         while(len >= map->buswidth) {
1001                 __u32 datum;
1002
1003                 if (map->buswidth == 1) {
1004                         datum = *(__u8*)buf;
1005                 } else if (map->buswidth == 2) {
1006                         datum = *(__u16*)buf;
1007                 } else if (map->buswidth == 4) {
1008                         datum = *(__u32*)buf;
1009                 } else {
1010                         return -EINVAL;
1011                 }
1012
1013                 ret = write_one_word(map, &private->chips[chipnum], ofs, datum);
1014
1015                 if (ret) {
1016                         return ret;
1017                 }
1018
1019                 ofs += map->buswidth;
1020                 buf += map->buswidth;
1021                 (*retlen) += map->buswidth;
1022                 len -= map->buswidth;
1023
1024                 if (ofs >> private->chipshift) {
1025                         chipnum++;
1026                         ofs = 0;
1027                         if (chipnum == private->numchips) {
1028                                 return 0;
1029                         }
1030                         chipstart = private->chips[chipnum].start;
1031                 }
1032         }
1033
1034         if (len & (map->buswidth - 1)) {
1035                 int i = 0, n = 0;
1036                 u_char tmp_buf[2];
1037                 __u32 datum;
1038
1039                 map_copy_from(map, tmp_buf,
1040                                ofs + private->chips[chipnum].start,
1041                                map->buswidth);
1042                 while (len--) {
1043                         tmp_buf[i++] = buf[n++];
1044                 }
1045
1046                 if (map->buswidth == 2) {
1047                         datum = *(__u16*)tmp_buf;
1048                 } else if (map->buswidth == 4) {
1049                         datum = *(__u32*)tmp_buf;
1050                 } else {
1051                         return -EINVAL;  /* should never happen, but be safe */
1052                 }
1053
1054                 ret = write_one_word(map, &private->chips[chipnum], ofs, datum);
1055
1056                 if (ret) {
1057                         return ret;
1058                 }
1059
1060                 (*retlen) += n;
1061         }
1062
1063         return 0;
1064 }
1065
1066
1067
1068 static inline int erase_one_block(struct map_info *map, struct flchip *chip,
1069                                   unsigned long adr, u_long size)
1070 {
1071         unsigned long timeo = jiffies + HZ;
1072         struct amd_flash_private *private = map->fldrv_priv;
1073         DECLARE_WAITQUEUE(wait, current);
1074
1075 retry:
1076         spin_lock_bh(chip->mutex);
1077
1078         if (chip->state != FL_READY){
1079                 set_current_state(TASK_UNINTERRUPTIBLE);
1080                 add_wait_queue(&chip->wq, &wait);
1081
1082                 spin_unlock_bh(chip->mutex);
1083
1084                 schedule();
1085                 remove_wait_queue(&chip->wq, &wait);
1086
1087                 if (signal_pending(current)) {
1088                         return -EINTR;
1089                 }
1090
1091                 timeo = jiffies + HZ;
1092
1093                 goto retry;
1094         }
1095
1096         chip->state = FL_ERASING;
1097
1098         adr += chip->start;
1099         ENABLE_VPP(map);
1100         send_cmd(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA);
1101         send_cmd_to_addr(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA_2, adr);
1102
1103         timeo = jiffies + (HZ * 20);
1104
1105         spin_unlock_bh(chip->mutex);
1106         msleep(1000);
1107         spin_lock_bh(chip->mutex);
1108
1109         while (flash_is_busy(map, adr, private->interleave)) {
1110
1111                 if (chip->state != FL_ERASING) {
1112                         /* Someone's suspended the erase. Sleep */
1113                         set_current_state(TASK_UNINTERRUPTIBLE);
1114                         add_wait_queue(&chip->wq, &wait);
1115
1116                         spin_unlock_bh(chip->mutex);
1117                         printk(KERN_INFO "%s: erase suspended. Sleeping\n",
1118                                map->name);
1119                         schedule();
1120                         remove_wait_queue(&chip->wq, &wait);
1121
1122                         if (signal_pending(current)) {
1123                                 return -EINTR;
1124                         }
1125
1126                         timeo = jiffies + (HZ*2); /* FIXME */
1127                         spin_lock_bh(chip->mutex);
1128                         continue;
1129                 }
1130
1131                 /* OK Still waiting */
1132                 if (time_after(jiffies, timeo)) {
1133                         chip->state = FL_READY;
1134                         spin_unlock_bh(chip->mutex);
1135                         printk(KERN_WARNING "%s: waiting for erase to complete "
1136                                "timed out.\n", map->name);
1137                         DISABLE_VPP(map);
1138
1139                         return -EIO;
1140                 }
1141
1142                 /* Latency issues. Drop the lock, wait a while and retry */
1143                 spin_unlock_bh(chip->mutex);
1144
1145                 if (need_resched())
1146                         schedule();
1147                 else
1148                         udelay(1);
1149
1150                 spin_lock_bh(chip->mutex);
1151         }
1152
1153         /* Verify every single word */
1154         {
1155                 int address;
1156                 int error = 0;
1157                 __u8 verify;
1158
1159                 for (address = adr; address < (adr + size); address++) {
1160                         if ((verify = map_read8(map, address)) != 0xFF) {
1161                                 error = 1;
1162                                 break;
1163                         }
1164                 }
1165                 if (error) {
1166                         chip->state = FL_READY;
1167                         spin_unlock_bh(chip->mutex);
1168                         printk(KERN_WARNING
1169                                "%s: verify error at 0x%x, size %ld.\n",
1170                                map->name, address, size);
1171                         DISABLE_VPP(map);
1172
1173                         return -EIO;
1174                 }
1175         }
1176
1177         DISABLE_VPP(map);
1178         chip->state = FL_READY;
1179         wake_up(&chip->wq);
1180         spin_unlock_bh(chip->mutex);
1181
1182         return 0;
1183 }
1184
1185
1186
1187 static int amd_flash_erase(struct mtd_info *mtd, struct erase_info *instr)
1188 {
1189         struct map_info *map = mtd->priv;
1190         struct amd_flash_private *private = map->fldrv_priv;
1191         unsigned long adr, len;
1192         int chipnum;
1193         int ret = 0;
1194         int i;
1195         int first;
1196         struct mtd_erase_region_info *regions = mtd->eraseregions;
1197
1198         if (instr->addr > mtd->size) {
1199                 return -EINVAL;
1200         }
1201
1202         if ((instr->len + instr->addr) > mtd->size) {
1203                 return -EINVAL;
1204         }
1205
1206         /* Check that both start and end of the requested erase are
1207          * aligned with the erasesize at the appropriate addresses.
1208          */
1209
1210         i = 0;
1211
1212         /* Skip all erase regions which are ended before the start of
1213            the requested erase. Actually, to save on the calculations,
1214            we skip to the first erase region which starts after the
1215            start of the requested erase, and then go back one.
1216         */
1217
1218         while ((i < mtd->numeraseregions) &&
1219                (instr->addr >= regions[i].offset)) {
1220                i++;
1221         }
1222         i--;
1223
1224         /* OK, now i is pointing at the erase region in which this
1225          * erase request starts. Check the start of the requested
1226          * erase range is aligned with the erase size which is in
1227          * effect here.
1228          */
1229
1230         if (instr->addr & (regions[i].erasesize-1)) {
1231                 return -EINVAL;
1232         }
1233
1234         /* Remember the erase region we start on. */
1235
1236         first = i;
1237
1238         /* Next, check that the end of the requested erase is aligned
1239          * with the erase region at that address.
1240          */
1241
1242         while ((i < mtd->numeraseregions) &&
1243                ((instr->addr + instr->len) >= regions[i].offset)) {
1244                 i++;
1245         }
1246
1247         /* As before, drop back one to point at the region in which
1248          * the address actually falls.
1249          */
1250
1251         i--;
1252
1253         if ((instr->addr + instr->len) & (regions[i].erasesize-1)) {
1254                 return -EINVAL;
1255         }
1256
1257         chipnum = instr->addr >> private->chipshift;
1258         adr = instr->addr - (chipnum << private->chipshift);
1259         len = instr->len;
1260
1261         i = first;
1262
1263         while (len) {
1264                 ret = erase_one_block(map, &private->chips[chipnum], adr,
1265                                       regions[i].erasesize);
1266
1267                 if (ret) {
1268                         return ret;
1269                 }
1270
1271                 adr += regions[i].erasesize;
1272                 len -= regions[i].erasesize;
1273
1274                 if ((adr % (1 << private->chipshift)) ==
1275                     ((regions[i].offset + (regions[i].erasesize *
1276                                            regions[i].numblocks))
1277                      % (1 << private->chipshift))) {
1278                         i++;
1279                 }
1280
1281                 if (adr >> private->chipshift) {
1282                         adr = 0;
1283                         chipnum++;
1284                         if (chipnum >= private->numchips) {
1285                                 break;
1286                         }
1287                 }
1288         }
1289
1290         instr->state = MTD_ERASE_DONE;
1291         mtd_erase_callback(instr);
1292
1293         return 0;
1294 }
1295
1296
1297
1298 static void amd_flash_sync(struct mtd_info *mtd)
1299 {
1300         struct map_info *map = mtd->priv;
1301         struct amd_flash_private *private = map->fldrv_priv;
1302         int i;
1303         struct flchip *chip;
1304         int ret = 0;
1305         DECLARE_WAITQUEUE(wait, current);
1306
1307         for (i = 0; !ret && (i < private->numchips); i++) {
1308                 chip = &private->chips[i];
1309
1310         retry:
1311                 spin_lock_bh(chip->mutex);
1312
1313                 switch(chip->state) {
1314                 case FL_READY:
1315                 case FL_STATUS:
1316                 case FL_CFI_QUERY:
1317                 case FL_JEDEC_QUERY:
1318                         chip->oldstate = chip->state;
1319                         chip->state = FL_SYNCING;
1320                         /* No need to wake_up() on this state change -
1321                          * as the whole point is that nobody can do anything
1322                          * with the chip now anyway.
1323                          */
1324                 case FL_SYNCING:
1325                         spin_unlock_bh(chip->mutex);
1326                         break;
1327
1328                 default:
1329                         /* Not an idle state */
1330                         add_wait_queue(&chip->wq, &wait);
1331
1332                         spin_unlock_bh(chip->mutex);
1333
1334                         schedule();
1335
1336                         remove_wait_queue(&chip->wq, &wait);
1337
1338                         goto retry;
1339                 }
1340         }
1341
1342         /* Unlock the chips again */
1343         for (i--; i >= 0; i--) {
1344                 chip = &private->chips[i];
1345
1346                 spin_lock_bh(chip->mutex);
1347
1348                 if (chip->state == FL_SYNCING) {
1349                         chip->state = chip->oldstate;
1350                         wake_up(&chip->wq);
1351                 }
1352                 spin_unlock_bh(chip->mutex);
1353         }
1354 }
1355
1356
1357
1358 static int amd_flash_suspend(struct mtd_info *mtd)
1359 {
1360 printk("amd_flash_suspend(): not implemented!\n");
1361         return -EINVAL;
1362 }
1363
1364
1365
1366 static void amd_flash_resume(struct mtd_info *mtd)
1367 {
1368 printk("amd_flash_resume(): not implemented!\n");
1369 }
1370
1371
1372
1373 static void amd_flash_destroy(struct mtd_info *mtd)
1374 {
1375         struct map_info *map = mtd->priv;
1376         struct amd_flash_private *private = map->fldrv_priv;
1377         kfree(private);
1378 }
1379
1380 int __init amd_flash_init(void)
1381 {
1382         register_mtd_chip_driver(&amd_flash_chipdrv);
1383         return 0;
1384 }
1385
1386 void __exit amd_flash_exit(void)
1387 {
1388         unregister_mtd_chip_driver(&amd_flash_chipdrv);
1389 }
1390
1391 module_init(amd_flash_init);
1392 module_exit(amd_flash_exit);
1393
1394 MODULE_LICENSE("GPL");
1395 MODULE_AUTHOR("Jonas Holmberg <jonas.holmberg@axis.com>");
1396 MODULE_DESCRIPTION("Old MTD chip driver for AMD flash chips");