Merge master.kernel.org:/pub/scm/linux/kernel/git/sfrench/cifs-2.6
[pandora-kernel.git] / drivers / spi / atmel_spi.c
1 /*
2  * Driver for Atmel AT32 and AT91 SPI Controllers
3  *
4  * Copyright (C) 2006 Atmel Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/clk.h>
14 #include <linux/module.h>
15 #include <linux/platform_device.h>
16 #include <linux/delay.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/err.h>
19 #include <linux/interrupt.h>
20 #include <linux/spi/spi.h>
21
22 #include <asm/io.h>
23 #include <asm/arch/board.h>
24 #include <asm/arch/gpio.h>
25
26 #ifdef CONFIG_ARCH_AT91
27 #include <asm/arch/cpu.h>
28 #endif
29
30 #include "atmel_spi.h"
31
32 /*
33  * The core SPI transfer engine just talks to a register bank to set up
34  * DMA transfers; transfer queue progress is driven by IRQs.  The clock
35  * framework provides the base clock, subdivided for each spi_device.
36  *
37  * Newer controllers, marked with "new_1" flag, have:
38  *  - CR.LASTXFER
39  *  - SPI_MR.DIV32 may become FDIV or must-be-zero (here: always zero)
40  *  - SPI_SR.TXEMPTY, SPI_SR.NSSR (and corresponding irqs)
41  *  - SPI_CSRx.CSAAT
42  *  - SPI_CSRx.SBCR allows faster clocking
43  */
44 struct atmel_spi {
45         spinlock_t              lock;
46
47         void __iomem            *regs;
48         int                     irq;
49         struct clk              *clk;
50         struct platform_device  *pdev;
51         unsigned                new_1:1;
52
53         u8                      stopping;
54         struct list_head        queue;
55         struct spi_transfer     *current_transfer;
56         unsigned long           remaining_bytes;
57
58         void                    *buffer;
59         dma_addr_t              buffer_dma;
60 };
61
62 #define BUFFER_SIZE             PAGE_SIZE
63 #define INVALID_DMA_ADDRESS     0xffffffff
64
65 /*
66  * Earlier SPI controllers (e.g. on at91rm9200) have a design bug whereby
67  * they assume that spi slave device state will not change on deselect, so
68  * that automagic deselection is OK.  Not so!  Workaround uses nCSx pins
69  * as GPIOs; or newer controllers have CSAAT and friends.
70  *
71  * Since the CSAAT functionality is a bit weird on newer controllers
72  * as well, we use GPIO to control nCSx pins on all controllers.
73  */
74
75 static inline void cs_activate(struct spi_device *spi)
76 {
77         unsigned gpio = (unsigned) spi->controller_data;
78         unsigned active = spi->mode & SPI_CS_HIGH;
79
80         dev_dbg(&spi->dev, "activate %u%s\n", gpio, active ? " (high)" : "");
81         gpio_set_value(gpio, active);
82 }
83
84 static inline void cs_deactivate(struct spi_device *spi)
85 {
86         unsigned gpio = (unsigned) spi->controller_data;
87         unsigned active = spi->mode & SPI_CS_HIGH;
88
89         dev_dbg(&spi->dev, "DEactivate %u%s\n", gpio, active ? " (low)" : "");
90         gpio_set_value(gpio, !active);
91 }
92
93 /*
94  * Submit next transfer for DMA.
95  * lock is held, spi irq is blocked
96  */
97 static void atmel_spi_next_xfer(struct spi_master *master,
98                                 struct spi_message *msg)
99 {
100         struct atmel_spi        *as = spi_master_get_devdata(master);
101         struct spi_transfer     *xfer;
102         u32                     len;
103         dma_addr_t              tx_dma, rx_dma;
104
105         xfer = as->current_transfer;
106         if (!xfer || as->remaining_bytes == 0) {
107                 if (xfer)
108                         xfer = list_entry(xfer->transfer_list.next,
109                                         struct spi_transfer, transfer_list);
110                 else
111                         xfer = list_entry(msg->transfers.next,
112                                         struct spi_transfer, transfer_list);
113                 as->remaining_bytes = xfer->len;
114                 as->current_transfer = xfer;
115         }
116
117         len = as->remaining_bytes;
118
119         tx_dma = xfer->tx_dma;
120         rx_dma = xfer->rx_dma;
121
122         /* use scratch buffer only when rx or tx data is unspecified */
123         if (rx_dma == INVALID_DMA_ADDRESS) {
124                 rx_dma = as->buffer_dma;
125                 if (len > BUFFER_SIZE)
126                         len = BUFFER_SIZE;
127         }
128         if (tx_dma == INVALID_DMA_ADDRESS) {
129                 tx_dma = as->buffer_dma;
130                 if (len > BUFFER_SIZE)
131                         len = BUFFER_SIZE;
132                 memset(as->buffer, 0, len);
133                 dma_sync_single_for_device(&as->pdev->dev,
134                                 as->buffer_dma, len, DMA_TO_DEVICE);
135         }
136
137         spi_writel(as, RPR, rx_dma);
138         spi_writel(as, TPR, tx_dma);
139
140         as->remaining_bytes -= len;
141         if (msg->spi->bits_per_word > 8)
142                 len >>= 1;
143
144         /* REVISIT: when xfer->delay_usecs == 0, the PDC "next transfer"
145          * mechanism might help avoid the IRQ latency between transfers
146          *
147          * We're also waiting for ENDRX before we start the next
148          * transfer because we need to handle some difficult timing
149          * issues otherwise. If we wait for ENDTX in one transfer and
150          * then starts waiting for ENDRX in the next, it's difficult
151          * to tell the difference between the ENDRX interrupt we're
152          * actually waiting for and the ENDRX interrupt of the
153          * previous transfer.
154          *
155          * It should be doable, though. Just not now...
156          */
157         spi_writel(as, TNCR, 0);
158         spi_writel(as, RNCR, 0);
159         spi_writel(as, IER, SPI_BIT(ENDRX) | SPI_BIT(OVRES));
160
161         dev_dbg(&msg->spi->dev,
162                 "  start xfer %p: len %u tx %p/%08x rx %p/%08x imr %03x\n",
163                 xfer, xfer->len, xfer->tx_buf, xfer->tx_dma,
164                 xfer->rx_buf, xfer->rx_dma, spi_readl(as, IMR));
165
166         spi_writel(as, TCR, len);
167         spi_writel(as, RCR, len);
168         spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN));
169 }
170
171 static void atmel_spi_next_message(struct spi_master *master)
172 {
173         struct atmel_spi        *as = spi_master_get_devdata(master);
174         struct spi_message      *msg;
175         u32                     mr;
176
177         BUG_ON(as->current_transfer);
178
179         msg = list_entry(as->queue.next, struct spi_message, queue);
180
181         /* Select the chip */
182         mr = spi_readl(as, MR);
183         mr = SPI_BFINS(PCS, ~(1 << msg->spi->chip_select), mr);
184         spi_writel(as, MR, mr);
185         cs_activate(msg->spi);
186
187         atmel_spi_next_xfer(master, msg);
188 }
189
190 static void
191 atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer)
192 {
193         xfer->tx_dma = xfer->rx_dma = INVALID_DMA_ADDRESS;
194         if (xfer->tx_buf)
195                 xfer->tx_dma = dma_map_single(&as->pdev->dev,
196                                 (void *) xfer->tx_buf, xfer->len,
197                                 DMA_TO_DEVICE);
198         if (xfer->rx_buf)
199                 xfer->rx_dma = dma_map_single(&as->pdev->dev,
200                                 xfer->rx_buf, xfer->len,
201                                 DMA_FROM_DEVICE);
202 }
203
204 static void atmel_spi_dma_unmap_xfer(struct spi_master *master,
205                                      struct spi_transfer *xfer)
206 {
207         if (xfer->tx_dma != INVALID_DMA_ADDRESS)
208                 dma_unmap_single(master->cdev.dev, xfer->tx_dma,
209                                  xfer->len, DMA_TO_DEVICE);
210         if (xfer->rx_dma != INVALID_DMA_ADDRESS)
211                 dma_unmap_single(master->cdev.dev, xfer->rx_dma,
212                                  xfer->len, DMA_FROM_DEVICE);
213 }
214
215 static void
216 atmel_spi_msg_done(struct spi_master *master, struct atmel_spi *as,
217                    struct spi_message *msg, int status)
218 {
219         cs_deactivate(msg->spi);
220         list_del(&msg->queue);
221         msg->status = status;
222
223         dev_dbg(master->cdev.dev,
224                 "xfer complete: %u bytes transferred\n",
225                 msg->actual_length);
226
227         spin_unlock(&as->lock);
228         msg->complete(msg->context);
229         spin_lock(&as->lock);
230
231         as->current_transfer = NULL;
232
233         /* continue if needed */
234         if (list_empty(&as->queue) || as->stopping)
235                 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
236         else
237                 atmel_spi_next_message(master);
238 }
239
240 static irqreturn_t
241 atmel_spi_interrupt(int irq, void *dev_id)
242 {
243         struct spi_master       *master = dev_id;
244         struct atmel_spi        *as = spi_master_get_devdata(master);
245         struct spi_message      *msg;
246         struct spi_transfer     *xfer;
247         u32                     status, pending, imr;
248         int                     ret = IRQ_NONE;
249
250         spin_lock(&as->lock);
251
252         xfer = as->current_transfer;
253         msg = list_entry(as->queue.next, struct spi_message, queue);
254
255         imr = spi_readl(as, IMR);
256         status = spi_readl(as, SR);
257         pending = status & imr;
258
259         if (pending & SPI_BIT(OVRES)) {
260                 int timeout;
261
262                 ret = IRQ_HANDLED;
263
264                 spi_writel(as, IDR, (SPI_BIT(ENDTX) | SPI_BIT(ENDRX)
265                                      | SPI_BIT(OVRES)));
266
267                 /*
268                  * When we get an overrun, we disregard the current
269                  * transfer. Data will not be copied back from any
270                  * bounce buffer and msg->actual_len will not be
271                  * updated with the last xfer.
272                  *
273                  * We will also not process any remaning transfers in
274                  * the message.
275                  *
276                  * First, stop the transfer and unmap the DMA buffers.
277                  */
278                 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
279                 if (!msg->is_dma_mapped)
280                         atmel_spi_dma_unmap_xfer(master, xfer);
281
282                 /* REVISIT: udelay in irq is unfriendly */
283                 if (xfer->delay_usecs)
284                         udelay(xfer->delay_usecs);
285
286                 dev_warn(master->cdev.dev, "fifo overrun (%u/%u remaining)\n",
287                          spi_readl(as, TCR), spi_readl(as, RCR));
288
289                 /*
290                  * Clean up DMA registers and make sure the data
291                  * registers are empty.
292                  */
293                 spi_writel(as, RNCR, 0);
294                 spi_writel(as, TNCR, 0);
295                 spi_writel(as, RCR, 0);
296                 spi_writel(as, TCR, 0);
297                 for (timeout = 1000; timeout; timeout--)
298                         if (spi_readl(as, SR) & SPI_BIT(TXEMPTY))
299                                 break;
300                 if (!timeout)
301                         dev_warn(master->cdev.dev,
302                                  "timeout waiting for TXEMPTY");
303                 while (spi_readl(as, SR) & SPI_BIT(RDRF))
304                         spi_readl(as, RDR);
305
306                 /* Clear any overrun happening while cleaning up */
307                 spi_readl(as, SR);
308
309                 atmel_spi_msg_done(master, as, msg, -EIO);
310         } else if (pending & SPI_BIT(ENDRX)) {
311                 ret = IRQ_HANDLED;
312
313                 spi_writel(as, IDR, pending);
314
315                 if (as->remaining_bytes == 0) {
316                         msg->actual_length += xfer->len;
317
318                         if (!msg->is_dma_mapped)
319                                 atmel_spi_dma_unmap_xfer(master, xfer);
320
321                         /* REVISIT: udelay in irq is unfriendly */
322                         if (xfer->delay_usecs)
323                                 udelay(xfer->delay_usecs);
324
325                         if (msg->transfers.prev == &xfer->transfer_list) {
326                                 /* report completed message */
327                                 atmel_spi_msg_done(master, as, msg, 0);
328                         } else {
329                                 if (xfer->cs_change) {
330                                         cs_deactivate(msg->spi);
331                                         udelay(1);
332                                         cs_activate(msg->spi);
333                                 }
334
335                                 /*
336                                  * Not done yet. Submit the next transfer.
337                                  *
338                                  * FIXME handle protocol options for xfer
339                                  */
340                                 atmel_spi_next_xfer(master, msg);
341                         }
342                 } else {
343                         /*
344                          * Keep going, we still have data to send in
345                          * the current transfer.
346                          */
347                         atmel_spi_next_xfer(master, msg);
348                 }
349         }
350
351         spin_unlock(&as->lock);
352
353         return ret;
354 }
355
356 #define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
357
358 static int atmel_spi_setup(struct spi_device *spi)
359 {
360         struct atmel_spi        *as;
361         u32                     scbr, csr;
362         unsigned int            bits = spi->bits_per_word;
363         unsigned long           bus_hz, sck_hz;
364         unsigned int            npcs_pin;
365         int                     ret;
366
367         as = spi_master_get_devdata(spi->master);
368
369         if (as->stopping)
370                 return -ESHUTDOWN;
371
372         if (spi->chip_select > spi->master->num_chipselect) {
373                 dev_dbg(&spi->dev,
374                                 "setup: invalid chipselect %u (%u defined)\n",
375                                 spi->chip_select, spi->master->num_chipselect);
376                 return -EINVAL;
377         }
378
379         if (bits == 0)
380                 bits = 8;
381         if (bits < 8 || bits > 16) {
382                 dev_dbg(&spi->dev,
383                                 "setup: invalid bits_per_word %u (8 to 16)\n",
384                                 bits);
385                 return -EINVAL;
386         }
387
388         if (spi->mode & ~MODEBITS) {
389                 dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
390                         spi->mode & ~MODEBITS);
391                 return -EINVAL;
392         }
393
394         /* speed zero convention is used by some upper layers */
395         bus_hz = clk_get_rate(as->clk);
396         if (spi->max_speed_hz) {
397                 /* assume div32/fdiv/mbz == 0 */
398                 if (!as->new_1)
399                         bus_hz /= 2;
400                 scbr = ((bus_hz + spi->max_speed_hz - 1)
401                         / spi->max_speed_hz);
402                 if (scbr >= (1 << SPI_SCBR_SIZE)) {
403                         dev_dbg(&spi->dev, "setup: %d Hz too slow, scbr %u\n",
404                                         spi->max_speed_hz, scbr);
405                         return -EINVAL;
406                 }
407         } else
408                 scbr = 0xff;
409         sck_hz = bus_hz / scbr;
410
411         csr = SPI_BF(SCBR, scbr) | SPI_BF(BITS, bits - 8);
412         if (spi->mode & SPI_CPOL)
413                 csr |= SPI_BIT(CPOL);
414         if (!(spi->mode & SPI_CPHA))
415                 csr |= SPI_BIT(NCPHA);
416
417         /* TODO: DLYBS and DLYBCT */
418         csr |= SPI_BF(DLYBS, 10);
419         csr |= SPI_BF(DLYBCT, 10);
420
421         /* chipselect must have been muxed as GPIO (e.g. in board setup) */
422         npcs_pin = (unsigned int)spi->controller_data;
423         if (!spi->controller_state) {
424                 ret = gpio_request(npcs_pin, "spi_npcs");
425                 if (ret)
426                         return ret;
427                 spi->controller_state = (void *)npcs_pin;
428                 gpio_direction_output(npcs_pin);
429         }
430
431         dev_dbg(&spi->dev,
432                 "setup: %lu Hz bpw %u mode 0x%x -> csr%d %08x\n",
433                 sck_hz, bits, spi->mode, spi->chip_select, csr);
434
435         spi_writel(as, CSR0 + 4 * spi->chip_select, csr);
436
437         return 0;
438 }
439
440 static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
441 {
442         struct atmel_spi        *as;
443         struct spi_transfer     *xfer;
444         unsigned long           flags;
445         struct device           *controller = spi->master->cdev.dev;
446
447         as = spi_master_get_devdata(spi->master);
448
449         dev_dbg(controller, "new message %p submitted for %s\n",
450                         msg, spi->dev.bus_id);
451
452         if (unlikely(list_empty(&msg->transfers)
453                         || !spi->max_speed_hz))
454                 return -EINVAL;
455
456         if (as->stopping)
457                 return -ESHUTDOWN;
458
459         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
460                 if (!(xfer->tx_buf || xfer->rx_buf)) {
461                         dev_dbg(&spi->dev, "missing rx or tx buf\n");
462                         return -EINVAL;
463                 }
464
465                 /* FIXME implement these protocol options!! */
466                 if (xfer->bits_per_word || xfer->speed_hz) {
467                         dev_dbg(&spi->dev, "no protocol options yet\n");
468                         return -ENOPROTOOPT;
469                 }
470         }
471
472         /* scrub dcache "early" */
473         if (!msg->is_dma_mapped) {
474                 list_for_each_entry(xfer, &msg->transfers, transfer_list)
475                         atmel_spi_dma_map_xfer(as, xfer);
476         }
477
478         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
479                 dev_dbg(controller,
480                         "  xfer %p: len %u tx %p/%08x rx %p/%08x\n",
481                         xfer, xfer->len,
482                         xfer->tx_buf, xfer->tx_dma,
483                         xfer->rx_buf, xfer->rx_dma);
484         }
485
486         msg->status = -EINPROGRESS;
487         msg->actual_length = 0;
488
489         spin_lock_irqsave(&as->lock, flags);
490         list_add_tail(&msg->queue, &as->queue);
491         if (!as->current_transfer)
492                 atmel_spi_next_message(spi->master);
493         spin_unlock_irqrestore(&as->lock, flags);
494
495         return 0;
496 }
497
498 static void atmel_spi_cleanup(struct spi_device *spi)
499 {
500         if (spi->controller_state)
501                 gpio_free((unsigned int)spi->controller_data);
502 }
503
504 /*-------------------------------------------------------------------------*/
505
506 static int __init atmel_spi_probe(struct platform_device *pdev)
507 {
508         struct resource         *regs;
509         int                     irq;
510         struct clk              *clk;
511         int                     ret;
512         struct spi_master       *master;
513         struct atmel_spi        *as;
514
515         regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
516         if (!regs)
517                 return -ENXIO;
518
519         irq = platform_get_irq(pdev, 0);
520         if (irq < 0)
521                 return irq;
522
523         clk = clk_get(&pdev->dev, "spi_clk");
524         if (IS_ERR(clk))
525                 return PTR_ERR(clk);
526
527         /* setup spi core then atmel-specific driver state */
528         ret = -ENOMEM;
529         master = spi_alloc_master(&pdev->dev, sizeof *as);
530         if (!master)
531                 goto out_free;
532
533         master->bus_num = pdev->id;
534         master->num_chipselect = 4;
535         master->setup = atmel_spi_setup;
536         master->transfer = atmel_spi_transfer;
537         master->cleanup = atmel_spi_cleanup;
538         platform_set_drvdata(pdev, master);
539
540         as = spi_master_get_devdata(master);
541
542         as->buffer = dma_alloc_coherent(&pdev->dev, BUFFER_SIZE,
543                                         &as->buffer_dma, GFP_KERNEL);
544         if (!as->buffer)
545                 goto out_free;
546
547         spin_lock_init(&as->lock);
548         INIT_LIST_HEAD(&as->queue);
549         as->pdev = pdev;
550         as->regs = ioremap(regs->start, (regs->end - regs->start) + 1);
551         if (!as->regs)
552                 goto out_free_buffer;
553         as->irq = irq;
554         as->clk = clk;
555 #ifdef CONFIG_ARCH_AT91
556         if (!cpu_is_at91rm9200())
557                 as->new_1 = 1;
558 #endif
559
560         ret = request_irq(irq, atmel_spi_interrupt, 0,
561                         pdev->dev.bus_id, master);
562         if (ret)
563                 goto out_unmap_regs;
564
565         /* Initialize the hardware */
566         clk_enable(clk);
567         spi_writel(as, CR, SPI_BIT(SWRST));
568         spi_writel(as, MR, SPI_BIT(MSTR) | SPI_BIT(MODFDIS));
569         spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
570         spi_writel(as, CR, SPI_BIT(SPIEN));
571
572         /* go! */
573         dev_info(&pdev->dev, "Atmel SPI Controller at 0x%08lx (irq %d)\n",
574                         (unsigned long)regs->start, irq);
575
576         ret = spi_register_master(master);
577         if (ret)
578                 goto out_reset_hw;
579
580         return 0;
581
582 out_reset_hw:
583         spi_writel(as, CR, SPI_BIT(SWRST));
584         clk_disable(clk);
585         free_irq(irq, master);
586 out_unmap_regs:
587         iounmap(as->regs);
588 out_free_buffer:
589         dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer,
590                         as->buffer_dma);
591 out_free:
592         clk_put(clk);
593         spi_master_put(master);
594         return ret;
595 }
596
597 static int __exit atmel_spi_remove(struct platform_device *pdev)
598 {
599         struct spi_master       *master = platform_get_drvdata(pdev);
600         struct atmel_spi        *as = spi_master_get_devdata(master);
601         struct spi_message      *msg;
602
603         /* reset the hardware and block queue progress */
604         spin_lock_irq(&as->lock);
605         as->stopping = 1;
606         spi_writel(as, CR, SPI_BIT(SWRST));
607         spi_readl(as, SR);
608         spin_unlock_irq(&as->lock);
609
610         /* Terminate remaining queued transfers */
611         list_for_each_entry(msg, &as->queue, queue) {
612                 /* REVISIT unmapping the dma is a NOP on ARM and AVR32
613                  * but we shouldn't depend on that...
614                  */
615                 msg->status = -ESHUTDOWN;
616                 msg->complete(msg->context);
617         }
618
619         dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer,
620                         as->buffer_dma);
621
622         clk_disable(as->clk);
623         clk_put(as->clk);
624         free_irq(as->irq, master);
625         iounmap(as->regs);
626
627         spi_unregister_master(master);
628
629         return 0;
630 }
631
632 #ifdef  CONFIG_PM
633
634 static int atmel_spi_suspend(struct platform_device *pdev, pm_message_t mesg)
635 {
636         struct spi_master       *master = platform_get_drvdata(pdev);
637         struct atmel_spi        *as = spi_master_get_devdata(master);
638
639         clk_disable(as->clk);
640         return 0;
641 }
642
643 static int atmel_spi_resume(struct platform_device *pdev)
644 {
645         struct spi_master       *master = platform_get_drvdata(pdev);
646         struct atmel_spi        *as = spi_master_get_devdata(master);
647
648         clk_enable(as->clk);
649         return 0;
650 }
651
652 #else
653 #define atmel_spi_suspend       NULL
654 #define atmel_spi_resume        NULL
655 #endif
656
657
658 static struct platform_driver atmel_spi_driver = {
659         .driver         = {
660                 .name   = "atmel_spi",
661                 .owner  = THIS_MODULE,
662         },
663         .suspend        = atmel_spi_suspend,
664         .resume         = atmel_spi_resume,
665         .remove         = __exit_p(atmel_spi_remove),
666 };
667
668 static int __init atmel_spi_init(void)
669 {
670         return platform_driver_probe(&atmel_spi_driver, atmel_spi_probe);
671 }
672 module_init(atmel_spi_init);
673
674 static void __exit atmel_spi_exit(void)
675 {
676         platform_driver_unregister(&atmel_spi_driver);
677 }
678 module_exit(atmel_spi_exit);
679
680 MODULE_DESCRIPTION("Atmel AT32/AT91 SPI Controller driver");
681 MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>");
682 MODULE_LICENSE("GPL");