Merge commit 'upstream/master'
[pandora-kernel.git] / drivers / ide / arm / icside.c
1 /*
2  * Copyright (c) 1996-2004 Russell King.
3  *
4  * Please note that this platform does not support 32-bit IDE IO.
5  */
6
7 #include <linux/string.h>
8 #include <linux/module.h>
9 #include <linux/ioport.h>
10 #include <linux/slab.h>
11 #include <linux/blkdev.h>
12 #include <linux/errno.h>
13 #include <linux/hdreg.h>
14 #include <linux/ide.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/device.h>
17 #include <linux/init.h>
18 #include <linux/scatterlist.h>
19 #include <linux/io.h>
20
21 #include <asm/dma.h>
22 #include <asm/ecard.h>
23
24 #define DRV_NAME "icside"
25
26 #define ICS_IDENT_OFFSET                0x2280
27
28 #define ICS_ARCIN_V5_INTRSTAT           0x0000
29 #define ICS_ARCIN_V5_INTROFFSET         0x0004
30 #define ICS_ARCIN_V5_IDEOFFSET          0x2800
31 #define ICS_ARCIN_V5_IDEALTOFFSET       0x2b80
32 #define ICS_ARCIN_V5_IDESTEPPING        6
33
34 #define ICS_ARCIN_V6_IDEOFFSET_1        0x2000
35 #define ICS_ARCIN_V6_INTROFFSET_1       0x2200
36 #define ICS_ARCIN_V6_INTRSTAT_1         0x2290
37 #define ICS_ARCIN_V6_IDEALTOFFSET_1     0x2380
38 #define ICS_ARCIN_V6_IDEOFFSET_2        0x3000
39 #define ICS_ARCIN_V6_INTROFFSET_2       0x3200
40 #define ICS_ARCIN_V6_INTRSTAT_2         0x3290
41 #define ICS_ARCIN_V6_IDEALTOFFSET_2     0x3380
42 #define ICS_ARCIN_V6_IDESTEPPING        6
43
44 struct cardinfo {
45         unsigned int dataoffset;
46         unsigned int ctrloffset;
47         unsigned int stepping;
48 };
49
50 static struct cardinfo icside_cardinfo_v5 = {
51         .dataoffset     = ICS_ARCIN_V5_IDEOFFSET,
52         .ctrloffset     = ICS_ARCIN_V5_IDEALTOFFSET,
53         .stepping       = ICS_ARCIN_V5_IDESTEPPING,
54 };
55
56 static struct cardinfo icside_cardinfo_v6_1 = {
57         .dataoffset     = ICS_ARCIN_V6_IDEOFFSET_1,
58         .ctrloffset     = ICS_ARCIN_V6_IDEALTOFFSET_1,
59         .stepping       = ICS_ARCIN_V6_IDESTEPPING,
60 };
61
62 static struct cardinfo icside_cardinfo_v6_2 = {
63         .dataoffset     = ICS_ARCIN_V6_IDEOFFSET_2,
64         .ctrloffset     = ICS_ARCIN_V6_IDEALTOFFSET_2,
65         .stepping       = ICS_ARCIN_V6_IDESTEPPING,
66 };
67
68 struct icside_state {
69         unsigned int channel;
70         unsigned int enabled;
71         void __iomem *irq_port;
72         void __iomem *ioc_base;
73         unsigned int sel;
74         unsigned int type;
75         struct ide_host *host;
76 };
77
78 #define ICS_TYPE_A3IN   0
79 #define ICS_TYPE_A3USER 1
80 #define ICS_TYPE_V6     3
81 #define ICS_TYPE_V5     15
82 #define ICS_TYPE_NOTYPE ((unsigned int)-1)
83
84 /* ---------------- Version 5 PCB Support Functions --------------------- */
85 /* Prototype: icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr)
86  * Purpose  : enable interrupts from card
87  */
88 static void icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr)
89 {
90         struct icside_state *state = ec->irq_data;
91
92         writeb(0, state->irq_port + ICS_ARCIN_V5_INTROFFSET);
93 }
94
95 /* Prototype: icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr)
96  * Purpose  : disable interrupts from card
97  */
98 static void icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr)
99 {
100         struct icside_state *state = ec->irq_data;
101
102         readb(state->irq_port + ICS_ARCIN_V5_INTROFFSET);
103 }
104
105 static const expansioncard_ops_t icside_ops_arcin_v5 = {
106         .irqenable      = icside_irqenable_arcin_v5,
107         .irqdisable     = icside_irqdisable_arcin_v5,
108 };
109
110
111 /* ---------------- Version 6 PCB Support Functions --------------------- */
112 /* Prototype: icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
113  * Purpose  : enable interrupts from card
114  */
115 static void icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
116 {
117         struct icside_state *state = ec->irq_data;
118         void __iomem *base = state->irq_port;
119
120         state->enabled = 1;
121
122         switch (state->channel) {
123         case 0:
124                 writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1);
125                 readb(base + ICS_ARCIN_V6_INTROFFSET_2);
126                 break;
127         case 1:
128                 writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2);
129                 readb(base + ICS_ARCIN_V6_INTROFFSET_1);
130                 break;
131         }
132 }
133
134 /* Prototype: icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
135  * Purpose  : disable interrupts from card
136  */
137 static void icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
138 {
139         struct icside_state *state = ec->irq_data;
140
141         state->enabled = 0;
142
143         readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
144         readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
145 }
146
147 /* Prototype: icside_irqprobe(struct expansion_card *ec)
148  * Purpose  : detect an active interrupt from card
149  */
150 static int icside_irqpending_arcin_v6(struct expansion_card *ec)
151 {
152         struct icside_state *state = ec->irq_data;
153
154         return readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_1) & 1 ||
155                readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_2) & 1;
156 }
157
158 static const expansioncard_ops_t icside_ops_arcin_v6 = {
159         .irqenable      = icside_irqenable_arcin_v6,
160         .irqdisable     = icside_irqdisable_arcin_v6,
161         .irqpending     = icside_irqpending_arcin_v6,
162 };
163
164 /*
165  * Handle routing of interrupts.  This is called before
166  * we write the command to the drive.
167  */
168 static void icside_maskproc(ide_drive_t *drive, int mask)
169 {
170         ide_hwif_t *hwif = HWIF(drive);
171         struct expansion_card *ec = ECARD_DEV(hwif->dev);
172         struct icside_state *state = ecard_get_drvdata(ec);
173         unsigned long flags;
174
175         local_irq_save(flags);
176
177         state->channel = hwif->channel;
178
179         if (state->enabled && !mask) {
180                 switch (hwif->channel) {
181                 case 0:
182                         writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
183                         readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
184                         break;
185                 case 1:
186                         writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
187                         readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
188                         break;
189                 }
190         } else {
191                 readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
192                 readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
193         }
194
195         local_irq_restore(flags);
196 }
197
198 static const struct ide_port_ops icside_v6_no_dma_port_ops = {
199         .maskproc               = icside_maskproc,
200 };
201
202 #ifdef CONFIG_BLK_DEV_IDEDMA_ICS
203 /*
204  * SG-DMA support.
205  *
206  * Similar to the BM-DMA, but we use the RiscPCs IOMD DMA controllers.
207  * There is only one DMA controller per card, which means that only
208  * one drive can be accessed at one time.  NOTE! We do not enforce that
209  * here, but we rely on the main IDE driver spotting that both
210  * interfaces use the same IRQ, which should guarantee this.
211  */
212
213 /*
214  * Configure the IOMD to give the appropriate timings for the transfer
215  * mode being requested.  We take the advice of the ATA standards, and
216  * calculate the cycle time based on the transfer mode, and the EIDE
217  * MW DMA specs that the drive provides in the IDENTIFY command.
218  *
219  * We have the following IOMD DMA modes to choose from:
220  *
221  *      Type    Active          Recovery        Cycle
222  *      A       250 (250)       312 (550)       562 (800)
223  *      B       187             250             437
224  *      C       125 (125)       125 (375)       250 (500)
225  *      D       62              125             187
226  *
227  * (figures in brackets are actual measured timings)
228  *
229  * However, we also need to take care of the read/write active and
230  * recovery timings:
231  *
232  *                      Read    Write
233  *      Mode    Active  -- Recovery --  Cycle   IOMD type
234  *      MW0     215     50      215     480     A
235  *      MW1     80      50      50      150     C
236  *      MW2     70      25      25      120     C
237  */
238 static void icside_set_dma_mode(ide_drive_t *drive, const u8 xfer_mode)
239 {
240         int cycle_time, use_dma_info = 0;
241
242         switch (xfer_mode) {
243         case XFER_MW_DMA_2:
244                 cycle_time = 250;
245                 use_dma_info = 1;
246                 break;
247
248         case XFER_MW_DMA_1:
249                 cycle_time = 250;
250                 use_dma_info = 1;
251                 break;
252
253         case XFER_MW_DMA_0:
254                 cycle_time = 480;
255                 break;
256
257         case XFER_SW_DMA_2:
258         case XFER_SW_DMA_1:
259         case XFER_SW_DMA_0:
260                 cycle_time = 480;
261                 break;
262         }
263
264         /*
265          * If we're going to be doing MW_DMA_1 or MW_DMA_2, we should
266          * take care to note the values in the ID...
267          */
268         if (use_dma_info && drive->id->eide_dma_time > cycle_time)
269                 cycle_time = drive->id->eide_dma_time;
270
271         drive->drive_data = cycle_time;
272
273         printk("%s: %s selected (peak %dMB/s)\n", drive->name,
274                 ide_xfer_verbose(xfer_mode), 2000 / drive->drive_data);
275 }
276
277 static const struct ide_port_ops icside_v6_port_ops = {
278         .set_dma_mode           = icside_set_dma_mode,
279         .maskproc               = icside_maskproc,
280 };
281
282 static void icside_dma_host_set(ide_drive_t *drive, int on)
283 {
284 }
285
286 static int icside_dma_end(ide_drive_t *drive)
287 {
288         ide_hwif_t *hwif = HWIF(drive);
289         struct expansion_card *ec = ECARD_DEV(hwif->dev);
290
291         drive->waiting_for_dma = 0;
292
293         disable_dma(ec->dma);
294
295         /* Teardown mappings after DMA has completed. */
296         ide_destroy_dmatable(drive);
297
298         return get_dma_residue(ec->dma) != 0;
299 }
300
301 static void icside_dma_start(ide_drive_t *drive)
302 {
303         ide_hwif_t *hwif = HWIF(drive);
304         struct expansion_card *ec = ECARD_DEV(hwif->dev);
305
306         /* We can not enable DMA on both channels simultaneously. */
307         BUG_ON(dma_channel_active(ec->dma));
308         enable_dma(ec->dma);
309 }
310
311 static int icside_dma_setup(ide_drive_t *drive)
312 {
313         ide_hwif_t *hwif = HWIF(drive);
314         struct expansion_card *ec = ECARD_DEV(hwif->dev);
315         struct icside_state *state = ecard_get_drvdata(ec);
316         struct request *rq = hwif->hwgroup->rq;
317         unsigned int dma_mode;
318
319         if (rq_data_dir(rq))
320                 dma_mode = DMA_MODE_WRITE;
321         else
322                 dma_mode = DMA_MODE_READ;
323
324         /*
325          * We can not enable DMA on both channels.
326          */
327         BUG_ON(dma_channel_active(ec->dma));
328
329         hwif->sg_nents = ide_build_sglist(drive, rq);
330
331         /*
332          * Ensure that we have the right interrupt routed.
333          */
334         icside_maskproc(drive, 0);
335
336         /*
337          * Route the DMA signals to the correct interface.
338          */
339         writeb(state->sel | hwif->channel, state->ioc_base);
340
341         /*
342          * Select the correct timing for this drive.
343          */
344         set_dma_speed(ec->dma, drive->drive_data);
345
346         /*
347          * Tell the DMA engine about the SG table and
348          * data direction.
349          */
350         set_dma_sg(ec->dma, hwif->sg_table, hwif->sg_nents);
351         set_dma_mode(ec->dma, dma_mode);
352
353         drive->waiting_for_dma = 1;
354
355         return 0;
356 }
357
358 static void icside_dma_exec_cmd(ide_drive_t *drive, u8 cmd)
359 {
360         /* issue cmd to drive */
361         ide_execute_command(drive, cmd, ide_dma_intr, 2 * WAIT_CMD, NULL);
362 }
363
364 static int icside_dma_test_irq(ide_drive_t *drive)
365 {
366         ide_hwif_t *hwif = HWIF(drive);
367         struct expansion_card *ec = ECARD_DEV(hwif->dev);
368         struct icside_state *state = ecard_get_drvdata(ec);
369
370         return readb(state->irq_port +
371                      (hwif->channel ?
372                         ICS_ARCIN_V6_INTRSTAT_2 :
373                         ICS_ARCIN_V6_INTRSTAT_1)) & 1;
374 }
375
376 static void icside_dma_timeout(ide_drive_t *drive)
377 {
378         ide_hwif_t *hwif = drive->hwif;
379
380         printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name);
381
382         if (icside_dma_test_irq(drive))
383                 return;
384
385         ide_dump_status(drive, "DMA timeout", hwif->tp_ops->read_status(hwif));
386
387         icside_dma_end(drive);
388 }
389
390 static void icside_dma_lost_irq(ide_drive_t *drive)
391 {
392         printk(KERN_ERR "%s: IRQ lost\n", drive->name);
393 }
394
395 static int icside_dma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
396 {
397         hwif->dmatable_cpu      = NULL;
398         hwif->dmatable_dma      = 0;
399
400         return 0;
401 }
402
403 static const struct ide_dma_ops icside_v6_dma_ops = {
404         .dma_host_set           = icside_dma_host_set,
405         .dma_setup              = icside_dma_setup,
406         .dma_exec_cmd           = icside_dma_exec_cmd,
407         .dma_start              = icside_dma_start,
408         .dma_end                = icside_dma_end,
409         .dma_test_irq           = icside_dma_test_irq,
410         .dma_timeout            = icside_dma_timeout,
411         .dma_lost_irq           = icside_dma_lost_irq,
412 };
413 #else
414 #define icside_v6_dma_ops NULL
415 #endif
416
417 static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d)
418 {
419         return -EOPNOTSUPP;
420 }
421
422 static void icside_setup_ports(hw_regs_t *hw, void __iomem *base,
423                                struct cardinfo *info, struct expansion_card *ec)
424 {
425         unsigned long port = (unsigned long)base + info->dataoffset;
426
427         hw->io_ports.data_addr   = port;
428         hw->io_ports.error_addr  = port + (1 << info->stepping);
429         hw->io_ports.nsect_addr  = port + (2 << info->stepping);
430         hw->io_ports.lbal_addr   = port + (3 << info->stepping);
431         hw->io_ports.lbam_addr   = port + (4 << info->stepping);
432         hw->io_ports.lbah_addr   = port + (5 << info->stepping);
433         hw->io_ports.device_addr = port + (6 << info->stepping);
434         hw->io_ports.status_addr = port + (7 << info->stepping);
435         hw->io_ports.ctl_addr    = (unsigned long)base + info->ctrloffset;
436
437         hw->irq = ec->irq;
438         hw->dev = &ec->dev;
439         hw->chipset = ide_acorn;
440 }
441
442 static int __init
443 icside_register_v5(struct icside_state *state, struct expansion_card *ec)
444 {
445         void __iomem *base;
446         struct ide_host *host;
447         hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
448         int ret;
449
450         base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0);
451         if (!base)
452                 return -ENOMEM;
453
454         state->irq_port = base;
455
456         ec->irqaddr  = base + ICS_ARCIN_V5_INTRSTAT;
457         ec->irqmask  = 1;
458
459         ecard_setirq(ec, &icside_ops_arcin_v5, state);
460
461         /*
462          * Be on the safe side - disable interrupts
463          */
464         icside_irqdisable_arcin_v5(ec, 0);
465
466         icside_setup_ports(&hw, base, &icside_cardinfo_v5, ec);
467
468         host = ide_host_alloc(NULL, hws);
469         if (host == NULL)
470                 return -ENODEV;
471
472         state->host = host;
473
474         ecard_set_drvdata(ec, state);
475
476         ret = ide_host_register(host, NULL, hws);
477         if (ret)
478                 goto err_free;
479
480         return 0;
481 err_free:
482         ide_host_free(host);
483         ecard_set_drvdata(ec, NULL);
484         return ret;
485 }
486
487 static const struct ide_port_info icside_v6_port_info __initdata = {
488         .init_dma               = icside_dma_off_init,
489         .port_ops               = &icside_v6_no_dma_port_ops,
490         .dma_ops                = &icside_v6_dma_ops,
491         .host_flags             = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO,
492         .mwdma_mask             = ATA_MWDMA2,
493         .swdma_mask             = ATA_SWDMA2,
494 };
495
496 static int __init
497 icside_register_v6(struct icside_state *state, struct expansion_card *ec)
498 {
499         void __iomem *ioc_base, *easi_base;
500         struct ide_host *host;
501         unsigned int sel = 0;
502         int ret;
503         hw_regs_t hw[2], *hws[] = { &hw[0], NULL, NULL, NULL };
504         struct ide_port_info d = icside_v6_port_info;
505
506         ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
507         if (!ioc_base) {
508                 ret = -ENOMEM;
509                 goto out;
510         }
511
512         easi_base = ioc_base;
513
514         if (ecard_resource_flags(ec, ECARD_RES_EASI)) {
515                 easi_base = ecardm_iomap(ec, ECARD_RES_EASI, 0, 0);
516                 if (!easi_base) {
517                         ret = -ENOMEM;
518                         goto out;
519                 }
520
521                 /*
522                  * Enable access to the EASI region.
523                  */
524                 sel = 1 << 5;
525         }
526
527         writeb(sel, ioc_base);
528
529         ecard_setirq(ec, &icside_ops_arcin_v6, state);
530
531         state->irq_port   = easi_base;
532         state->ioc_base   = ioc_base;
533         state->sel        = sel;
534
535         /*
536          * Be on the safe side - disable interrupts
537          */
538         icside_irqdisable_arcin_v6(ec, 0);
539
540         icside_setup_ports(&hw[0], easi_base, &icside_cardinfo_v6_1, ec);
541         icside_setup_ports(&hw[1], easi_base, &icside_cardinfo_v6_2, ec);
542
543         host = ide_host_alloc(&d, hws);
544         if (host == NULL)
545                 return -ENODEV;
546
547         state->host = host;
548
549         ecard_set_drvdata(ec, state);
550
551         if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) {
552                 d.init_dma = icside_dma_init;
553                 d.port_ops = &icside_v6_port_ops;
554                 d.dma_ops = NULL;
555         }
556
557         ret = ide_host_register(host, NULL, hws);
558         if (ret)
559                 goto err_free;
560
561         return 0;
562 err_free:
563         ide_host_free(host);
564         if (d.dma_ops)
565                 free_dma(ec->dma);
566         ecard_set_drvdata(ec, NULL);
567 out:
568         return ret;
569 }
570
571 static int __devinit
572 icside_probe(struct expansion_card *ec, const struct ecard_id *id)
573 {
574         struct icside_state *state;
575         void __iomem *idmem;
576         int ret;
577
578         ret = ecard_request_resources(ec);
579         if (ret)
580                 goto out;
581
582         state = kzalloc(sizeof(struct icside_state), GFP_KERNEL);
583         if (!state) {
584                 ret = -ENOMEM;
585                 goto release;
586         }
587
588         state->type     = ICS_TYPE_NOTYPE;
589
590         idmem = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
591         if (idmem) {
592                 unsigned int type;
593
594                 type = readb(idmem + ICS_IDENT_OFFSET) & 1;
595                 type |= (readb(idmem + ICS_IDENT_OFFSET + 4) & 1) << 1;
596                 type |= (readb(idmem + ICS_IDENT_OFFSET + 8) & 1) << 2;
597                 type |= (readb(idmem + ICS_IDENT_OFFSET + 12) & 1) << 3;
598                 ecardm_iounmap(ec, idmem);
599
600                 state->type = type;
601         }
602
603         switch (state->type) {
604         case ICS_TYPE_A3IN:
605                 dev_warn(&ec->dev, "A3IN unsupported\n");
606                 ret = -ENODEV;
607                 break;
608
609         case ICS_TYPE_A3USER:
610                 dev_warn(&ec->dev, "A3USER unsupported\n");
611                 ret = -ENODEV;
612                 break;
613
614         case ICS_TYPE_V5:
615                 ret = icside_register_v5(state, ec);
616                 break;
617
618         case ICS_TYPE_V6:
619                 ret = icside_register_v6(state, ec);
620                 break;
621
622         default:
623                 dev_warn(&ec->dev, "unknown interface type\n");
624                 ret = -ENODEV;
625                 break;
626         }
627
628         if (ret == 0)
629                 goto out;
630
631         kfree(state);
632  release:
633         ecard_release_resources(ec);
634  out:
635         return ret;
636 }
637
638 static void __devexit icside_remove(struct expansion_card *ec)
639 {
640         struct icside_state *state = ecard_get_drvdata(ec);
641
642         switch (state->type) {
643         case ICS_TYPE_V5:
644                 /* FIXME: tell IDE to stop using the interface */
645
646                 /* Disable interrupts */
647                 icside_irqdisable_arcin_v5(ec, 0);
648                 break;
649
650         case ICS_TYPE_V6:
651                 /* FIXME: tell IDE to stop using the interface */
652                 if (ec->dma != NO_DMA)
653                         free_dma(ec->dma);
654
655                 /* Disable interrupts */
656                 icside_irqdisable_arcin_v6(ec, 0);
657
658                 /* Reset the ROM pointer/EASI selection */
659                 writeb(0, state->ioc_base);
660                 break;
661         }
662
663         ecard_set_drvdata(ec, NULL);
664
665         kfree(state);
666         ecard_release_resources(ec);
667 }
668
669 static void icside_shutdown(struct expansion_card *ec)
670 {
671         struct icside_state *state = ecard_get_drvdata(ec);
672         unsigned long flags;
673
674         /*
675          * Disable interrupts from this card.  We need to do
676          * this before disabling EASI since we may be accessing
677          * this register via that region.
678          */
679         local_irq_save(flags);
680         ec->ops->irqdisable(ec, 0);
681         local_irq_restore(flags);
682
683         /*
684          * Reset the ROM pointer so that we can read the ROM
685          * after a soft reboot.  This also disables access to
686          * the IDE taskfile via the EASI region.
687          */
688         if (state->ioc_base)
689                 writeb(0, state->ioc_base);
690 }
691
692 static const struct ecard_id icside_ids[] = {
693         { MANU_ICS,  PROD_ICS_IDE  },
694         { MANU_ICS2, PROD_ICS2_IDE },
695         { 0xffff, 0xffff }
696 };
697
698 static struct ecard_driver icside_driver = {
699         .probe          = icside_probe,
700         .remove         = __devexit_p(icside_remove),
701         .shutdown       = icside_shutdown,
702         .id_table       = icside_ids,
703         .drv = {
704                 .name   = "icside",
705         },
706 };
707
708 static int __init icside_init(void)
709 {
710         return ecard_register_driver(&icside_driver);
711 }
712
713 static void __exit icside_exit(void);
714 {
715         ecard_unregister_driver(&icside_driver);
716 }
717
718 MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
719 MODULE_LICENSE("GPL");
720 MODULE_DESCRIPTION("ICS IDE driver");
721
722 module_init(icside_init);
723 module_exit(icside_exit);