Merge remote branch 'origin' into secretlab/next-devicetree
[pandora-kernel.git] / drivers / spi / davinci_spi.c
1 /*
2  * Copyright (C) 2009 Texas Instruments.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
17  */
18
19 #include <linux/interrupt.h>
20 #include <linux/io.h>
21 #include <linux/gpio.h>
22 #include <linux/module.h>
23 #include <linux/delay.h>
24 #include <linux/platform_device.h>
25 #include <linux/err.h>
26 #include <linux/clk.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/spi/spi.h>
29 #include <linux/spi/spi_bitbang.h>
30 #include <linux/slab.h>
31
32 #include <mach/spi.h>
33 #include <mach/edma.h>
34
35 #define SPI_NO_RESOURCE         ((resource_size_t)-1)
36
37 #define SPI_MAX_CHIPSELECT      2
38
39 #define CS_DEFAULT      0xFF
40
41 #define SPI_BUFSIZ      (SMP_CACHE_BYTES + 1)
42 #define DAVINCI_DMA_DATA_TYPE_S8        0x01
43 #define DAVINCI_DMA_DATA_TYPE_S16       0x02
44 #define DAVINCI_DMA_DATA_TYPE_S32       0x04
45
46 #define SPIFMT_PHASE_MASK       BIT(16)
47 #define SPIFMT_POLARITY_MASK    BIT(17)
48 #define SPIFMT_DISTIMER_MASK    BIT(18)
49 #define SPIFMT_SHIFTDIR_MASK    BIT(20)
50 #define SPIFMT_WAITENA_MASK     BIT(21)
51 #define SPIFMT_PARITYENA_MASK   BIT(22)
52 #define SPIFMT_ODD_PARITY_MASK  BIT(23)
53 #define SPIFMT_WDELAY_MASK      0x3f000000u
54 #define SPIFMT_WDELAY_SHIFT     24
55 #define SPIFMT_CHARLEN_MASK     0x0000001Fu
56
57 /* SPIGCR1 */
58 #define SPIGCR1_SPIENA_MASK     0x01000000u
59
60 /* SPIPC0 */
61 #define SPIPC0_DIFUN_MASK       BIT(11)         /* MISO */
62 #define SPIPC0_DOFUN_MASK       BIT(10)         /* MOSI */
63 #define SPIPC0_CLKFUN_MASK      BIT(9)          /* CLK */
64 #define SPIPC0_SPIENA_MASK      BIT(8)          /* nREADY */
65 #define SPIPC0_EN1FUN_MASK      BIT(1)
66 #define SPIPC0_EN0FUN_MASK      BIT(0)
67
68 #define SPIINT_MASKALL          0x0101035F
69 #define SPI_INTLVL_1            0x000001FFu
70 #define SPI_INTLVL_0            0x00000000u
71
72 /* SPIDAT1 */
73 #define SPIDAT1_CSHOLD_SHIFT    28
74 #define SPIDAT1_CSNR_SHIFT      16
75 #define SPIGCR1_CLKMOD_MASK     BIT(1)
76 #define SPIGCR1_MASTER_MASK     BIT(0)
77 #define SPIGCR1_LOOPBACK_MASK   BIT(16)
78
79 /* SPIBUF */
80 #define SPIBUF_TXFULL_MASK      BIT(29)
81 #define SPIBUF_RXEMPTY_MASK     BIT(31)
82
83 /* Error Masks */
84 #define SPIFLG_DLEN_ERR_MASK            BIT(0)
85 #define SPIFLG_TIMEOUT_MASK             BIT(1)
86 #define SPIFLG_PARERR_MASK              BIT(2)
87 #define SPIFLG_DESYNC_MASK              BIT(3)
88 #define SPIFLG_BITERR_MASK              BIT(4)
89 #define SPIFLG_OVRRUN_MASK              BIT(6)
90 #define SPIFLG_RX_INTR_MASK             BIT(8)
91 #define SPIFLG_TX_INTR_MASK             BIT(9)
92 #define SPIFLG_BUF_INIT_ACTIVE_MASK     BIT(24)
93 #define SPIFLG_MASK                     (SPIFLG_DLEN_ERR_MASK \
94                                 | SPIFLG_TIMEOUT_MASK | SPIFLG_PARERR_MASK \
95                                 | SPIFLG_DESYNC_MASK | SPIFLG_BITERR_MASK \
96                                 | SPIFLG_OVRRUN_MASK | SPIFLG_RX_INTR_MASK \
97                                 | SPIFLG_TX_INTR_MASK \
98                                 | SPIFLG_BUF_INIT_ACTIVE_MASK)
99
100 #define SPIINT_DLEN_ERR_INTR    BIT(0)
101 #define SPIINT_TIMEOUT_INTR     BIT(1)
102 #define SPIINT_PARERR_INTR      BIT(2)
103 #define SPIINT_DESYNC_INTR      BIT(3)
104 #define SPIINT_BITERR_INTR      BIT(4)
105 #define SPIINT_OVRRUN_INTR      BIT(6)
106 #define SPIINT_RX_INTR          BIT(8)
107 #define SPIINT_TX_INTR          BIT(9)
108 #define SPIINT_DMA_REQ_EN       BIT(16)
109 #define SPIINT_ENABLE_HIGHZ     BIT(24)
110
111 #define SPI_T2CDELAY_SHIFT      16
112 #define SPI_C2TDELAY_SHIFT      24
113
114 /* SPI Controller registers */
115 #define SPIGCR0         0x00
116 #define SPIGCR1         0x04
117 #define SPIINT          0x08
118 #define SPILVL          0x0c
119 #define SPIFLG          0x10
120 #define SPIPC0          0x14
121 #define SPIPC1          0x18
122 #define SPIPC2          0x1c
123 #define SPIPC3          0x20
124 #define SPIPC4          0x24
125 #define SPIPC5          0x28
126 #define SPIPC6          0x2c
127 #define SPIPC7          0x30
128 #define SPIPC8          0x34
129 #define SPIDAT0         0x38
130 #define SPIDAT1         0x3c
131 #define SPIBUF          0x40
132 #define SPIEMU          0x44
133 #define SPIDELAY        0x48
134 #define SPIDEF          0x4c
135 #define SPIFMT0         0x50
136 #define SPIFMT1         0x54
137 #define SPIFMT2         0x58
138 #define SPIFMT3         0x5c
139 #define TGINTVEC0       0x60
140 #define TGINTVEC1       0x64
141
142 struct davinci_spi_slave {
143         u32     cmd_to_write;
144         u32     clk_ctrl_to_write;
145         u32     bytes_per_word;
146         u8      active_cs;
147 };
148
149 /* We have 2 DMA channels per CS, one for RX and one for TX */
150 struct davinci_spi_dma {
151         int                     dma_tx_channel;
152         int                     dma_rx_channel;
153         int                     dma_tx_sync_dev;
154         int                     dma_rx_sync_dev;
155         enum dma_event_q        eventq;
156
157         struct completion       dma_tx_completion;
158         struct completion       dma_rx_completion;
159 };
160
161 /* SPI Controller driver's private data. */
162 struct davinci_spi {
163         struct spi_bitbang      bitbang;
164         struct clk              *clk;
165
166         u8                      version;
167         resource_size_t         pbase;
168         void __iomem            *base;
169         size_t                  region_size;
170         u32                     irq;
171         struct completion       done;
172
173         const void              *tx;
174         void                    *rx;
175         u8                      *tmp_buf;
176         int                     count;
177         struct davinci_spi_dma  *dma_channels;
178         struct                  davinci_spi_platform_data *pdata;
179
180         void                    (*get_rx)(u32 rx_data, struct davinci_spi *);
181         u32                     (*get_tx)(struct davinci_spi *);
182
183         struct davinci_spi_slave slave[SPI_MAX_CHIPSELECT];
184 };
185
186 static unsigned use_dma;
187
188 static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *davinci_spi)
189 {
190         u8 *rx = davinci_spi->rx;
191
192         *rx++ = (u8)data;
193         davinci_spi->rx = rx;
194 }
195
196 static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *davinci_spi)
197 {
198         u16 *rx = davinci_spi->rx;
199
200         *rx++ = (u16)data;
201         davinci_spi->rx = rx;
202 }
203
204 static u32 davinci_spi_tx_buf_u8(struct davinci_spi *davinci_spi)
205 {
206         u32 data;
207         const u8 *tx = davinci_spi->tx;
208
209         data = *tx++;
210         davinci_spi->tx = tx;
211         return data;
212 }
213
214 static u32 davinci_spi_tx_buf_u16(struct davinci_spi *davinci_spi)
215 {
216         u32 data;
217         const u16 *tx = davinci_spi->tx;
218
219         data = *tx++;
220         davinci_spi->tx = tx;
221         return data;
222 }
223
224 static inline void set_io_bits(void __iomem *addr, u32 bits)
225 {
226         u32 v = ioread32(addr);
227
228         v |= bits;
229         iowrite32(v, addr);
230 }
231
232 static inline void clear_io_bits(void __iomem *addr, u32 bits)
233 {
234         u32 v = ioread32(addr);
235
236         v &= ~bits;
237         iowrite32(v, addr);
238 }
239
240 static inline void set_fmt_bits(void __iomem *addr, u32 bits, int cs_num)
241 {
242         set_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits);
243 }
244
245 static inline void clear_fmt_bits(void __iomem *addr, u32 bits, int cs_num)
246 {
247         clear_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits);
248 }
249
250 static void davinci_spi_set_dma_req(const struct spi_device *spi, int enable)
251 {
252         struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master);
253
254         if (enable)
255                 set_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
256         else
257                 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
258 }
259
260 /*
261  * Interface to control the chip select signal
262  */
263 static void davinci_spi_chipselect(struct spi_device *spi, int value)
264 {
265         struct davinci_spi *davinci_spi;
266         struct davinci_spi_platform_data *pdata;
267         u32 data1_reg_val = 0;
268
269         davinci_spi = spi_master_get_devdata(spi->master);
270         pdata = davinci_spi->pdata;
271
272         /*
273          * Board specific chip select logic decides the polarity and cs
274          * line for the controller
275          */
276         if (value == BITBANG_CS_INACTIVE) {
277                 set_io_bits(davinci_spi->base + SPIDEF, CS_DEFAULT);
278
279                 data1_reg_val |= CS_DEFAULT << SPIDAT1_CSNR_SHIFT;
280                 iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);
281
282                 while ((ioread32(davinci_spi->base + SPIBUF)
283                                         & SPIBUF_RXEMPTY_MASK) == 0)
284                         cpu_relax();
285         }
286 }
287
288 /**
289  * davinci_spi_setup_transfer - This functions will determine transfer method
290  * @spi: spi device on which data transfer to be done
291  * @t: spi transfer in which transfer info is filled
292  *
293  * This function determines data transfer method (8/16/32 bit transfer).
294  * It will also set the SPI Clock Control register according to
295  * SPI slave device freq.
296  */
297 static int davinci_spi_setup_transfer(struct spi_device *spi,
298                 struct spi_transfer *t)
299 {
300
301         struct davinci_spi *davinci_spi;
302         struct davinci_spi_platform_data *pdata;
303         u8 bits_per_word = 0;
304         u32 hz = 0, prescale;
305
306         davinci_spi = spi_master_get_devdata(spi->master);
307         pdata = davinci_spi->pdata;
308
309         if (t) {
310                 bits_per_word = t->bits_per_word;
311                 hz = t->speed_hz;
312         }
313
314         /* if bits_per_word is not set then set it default */
315         if (!bits_per_word)
316                 bits_per_word = spi->bits_per_word;
317
318         /*
319          * Assign function pointer to appropriate transfer method
320          * 8bit, 16bit or 32bit transfer
321          */
322         if (bits_per_word <= 8 && bits_per_word >= 2) {
323                 davinci_spi->get_rx = davinci_spi_rx_buf_u8;
324                 davinci_spi->get_tx = davinci_spi_tx_buf_u8;
325                 davinci_spi->slave[spi->chip_select].bytes_per_word = 1;
326         } else if (bits_per_word <= 16 && bits_per_word >= 2) {
327                 davinci_spi->get_rx = davinci_spi_rx_buf_u16;
328                 davinci_spi->get_tx = davinci_spi_tx_buf_u16;
329                 davinci_spi->slave[spi->chip_select].bytes_per_word = 2;
330         } else
331                 return -EINVAL;
332
333         if (!hz)
334                 hz = spi->max_speed_hz;
335
336         clear_fmt_bits(davinci_spi->base, SPIFMT_CHARLEN_MASK,
337                         spi->chip_select);
338         set_fmt_bits(davinci_spi->base, bits_per_word & 0x1f,
339                         spi->chip_select);
340
341         prescale = ((clk_get_rate(davinci_spi->clk) / hz) - 1) & 0xff;
342
343         clear_fmt_bits(davinci_spi->base, 0x0000ff00, spi->chip_select);
344         set_fmt_bits(davinci_spi->base, prescale << 8, spi->chip_select);
345
346         return 0;
347 }
348
349 static void davinci_spi_dma_rx_callback(unsigned lch, u16 ch_status, void *data)
350 {
351         struct spi_device *spi = (struct spi_device *)data;
352         struct davinci_spi *davinci_spi;
353         struct davinci_spi_dma *davinci_spi_dma;
354         struct davinci_spi_platform_data *pdata;
355
356         davinci_spi = spi_master_get_devdata(spi->master);
357         davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]);
358         pdata = davinci_spi->pdata;
359
360         if (ch_status == DMA_COMPLETE)
361                 edma_stop(davinci_spi_dma->dma_rx_channel);
362         else
363                 edma_clean_channel(davinci_spi_dma->dma_rx_channel);
364
365         complete(&davinci_spi_dma->dma_rx_completion);
366         /* We must disable the DMA RX request */
367         davinci_spi_set_dma_req(spi, 0);
368 }
369
370 static void davinci_spi_dma_tx_callback(unsigned lch, u16 ch_status, void *data)
371 {
372         struct spi_device *spi = (struct spi_device *)data;
373         struct davinci_spi *davinci_spi;
374         struct davinci_spi_dma *davinci_spi_dma;
375         struct davinci_spi_platform_data *pdata;
376
377         davinci_spi = spi_master_get_devdata(spi->master);
378         davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]);
379         pdata = davinci_spi->pdata;
380
381         if (ch_status == DMA_COMPLETE)
382                 edma_stop(davinci_spi_dma->dma_tx_channel);
383         else
384                 edma_clean_channel(davinci_spi_dma->dma_tx_channel);
385
386         complete(&davinci_spi_dma->dma_tx_completion);
387         /* We must disable the DMA TX request */
388         davinci_spi_set_dma_req(spi, 0);
389 }
390
391 static int davinci_spi_request_dma(struct spi_device *spi)
392 {
393         struct davinci_spi *davinci_spi;
394         struct davinci_spi_dma *davinci_spi_dma;
395         struct davinci_spi_platform_data *pdata;
396         struct device *sdev;
397         int r;
398
399         davinci_spi = spi_master_get_devdata(spi->master);
400         davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
401         pdata = davinci_spi->pdata;
402         sdev = davinci_spi->bitbang.master->dev.parent;
403
404         r = edma_alloc_channel(davinci_spi_dma->dma_rx_sync_dev,
405                                 davinci_spi_dma_rx_callback, spi,
406                                 davinci_spi_dma->eventq);
407         if (r < 0) {
408                 dev_dbg(sdev, "Unable to request DMA channel for SPI RX\n");
409                 return -EAGAIN;
410         }
411         davinci_spi_dma->dma_rx_channel = r;
412         r = edma_alloc_channel(davinci_spi_dma->dma_tx_sync_dev,
413                                 davinci_spi_dma_tx_callback, spi,
414                                 davinci_spi_dma->eventq);
415         if (r < 0) {
416                 edma_free_channel(davinci_spi_dma->dma_rx_channel);
417                 davinci_spi_dma->dma_rx_channel = -1;
418                 dev_dbg(sdev, "Unable to request DMA channel for SPI TX\n");
419                 return -EAGAIN;
420         }
421         davinci_spi_dma->dma_tx_channel = r;
422
423         return 0;
424 }
425
426 /**
427  * davinci_spi_setup - This functions will set default transfer method
428  * @spi: spi device on which data transfer to be done
429  *
430  * This functions sets the default transfer method.
431  */
432
433 static int davinci_spi_setup(struct spi_device *spi)
434 {
435         int retval;
436         struct davinci_spi *davinci_spi;
437         struct davinci_spi_dma *davinci_spi_dma;
438         struct device *sdev;
439
440         davinci_spi = spi_master_get_devdata(spi->master);
441         sdev = davinci_spi->bitbang.master->dev.parent;
442
443         /* if bits per word length is zero then set it default 8 */
444         if (!spi->bits_per_word)
445                 spi->bits_per_word = 8;
446
447         davinci_spi->slave[spi->chip_select].cmd_to_write = 0;
448
449         if (use_dma && davinci_spi->dma_channels) {
450                 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
451
452                 if ((davinci_spi_dma->dma_rx_channel == -1)
453                                 || (davinci_spi_dma->dma_tx_channel == -1)) {
454                         retval = davinci_spi_request_dma(spi);
455                         if (retval < 0)
456                                 return retval;
457                 }
458         }
459
460         /*
461          * SPI in DaVinci and DA8xx operate between
462          * 600 KHz and 50 MHz
463          */
464         if (spi->max_speed_hz < 600000 || spi->max_speed_hz > 50000000) {
465                 dev_dbg(sdev, "Operating frequency is not in acceptable "
466                                 "range\n");
467                 return -EINVAL;
468         }
469
470         /*
471          * Set up SPIFMTn register, unique to this chipselect.
472          *
473          * NOTE: we could do all of these with one write.  Also, some
474          * of the "version 2" features are found in chips that don't
475          * support all of them...
476          */
477         if (spi->mode & SPI_LSB_FIRST)
478                 set_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK,
479                                 spi->chip_select);
480         else
481                 clear_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK,
482                                 spi->chip_select);
483
484         if (spi->mode & SPI_CPOL)
485                 set_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK,
486                                 spi->chip_select);
487         else
488                 clear_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK,
489                                 spi->chip_select);
490
491         if (!(spi->mode & SPI_CPHA))
492                 set_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK,
493                                 spi->chip_select);
494         else
495                 clear_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK,
496                                 spi->chip_select);
497
498         /*
499          * Version 1 hardware supports two basic SPI modes:
500          *  - Standard SPI mode uses 4 pins, with chipselect
501          *  - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS)
502          *      (distinct from SPI_3WIRE, with just one data wire;
503          *      or similar variants without MOSI or without MISO)
504          *
505          * Version 2 hardware supports an optional handshaking signal,
506          * so it can support two more modes:
507          *  - 5 pin SPI variant is standard SPI plus SPI_READY
508          *  - 4 pin with enable is (SPI_READY | SPI_NO_CS)
509          */
510
511         if (davinci_spi->version == SPI_VERSION_2) {
512                 clear_fmt_bits(davinci_spi->base, SPIFMT_WDELAY_MASK,
513                                 spi->chip_select);
514                 set_fmt_bits(davinci_spi->base,
515                                 (davinci_spi->pdata->wdelay
516                                                 << SPIFMT_WDELAY_SHIFT)
517                                         & SPIFMT_WDELAY_MASK,
518                                 spi->chip_select);
519
520                 if (davinci_spi->pdata->odd_parity)
521                         set_fmt_bits(davinci_spi->base,
522                                         SPIFMT_ODD_PARITY_MASK,
523                                         spi->chip_select);
524                 else
525                         clear_fmt_bits(davinci_spi->base,
526                                         SPIFMT_ODD_PARITY_MASK,
527                                         spi->chip_select);
528
529                 if (davinci_spi->pdata->parity_enable)
530                         set_fmt_bits(davinci_spi->base,
531                                         SPIFMT_PARITYENA_MASK,
532                                         spi->chip_select);
533                 else
534                         clear_fmt_bits(davinci_spi->base,
535                                         SPIFMT_PARITYENA_MASK,
536                                         spi->chip_select);
537
538                 if (davinci_spi->pdata->wait_enable)
539                         set_fmt_bits(davinci_spi->base,
540                                         SPIFMT_WAITENA_MASK,
541                                         spi->chip_select);
542                 else
543                         clear_fmt_bits(davinci_spi->base,
544                                         SPIFMT_WAITENA_MASK,
545                                         spi->chip_select);
546
547                 if (davinci_spi->pdata->timer_disable)
548                         set_fmt_bits(davinci_spi->base,
549                                         SPIFMT_DISTIMER_MASK,
550                                         spi->chip_select);
551                 else
552                         clear_fmt_bits(davinci_spi->base,
553                                         SPIFMT_DISTIMER_MASK,
554                                         spi->chip_select);
555         }
556
557         retval = davinci_spi_setup_transfer(spi, NULL);
558
559         return retval;
560 }
561
562 static void davinci_spi_cleanup(struct spi_device *spi)
563 {
564         struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master);
565         struct davinci_spi_dma *davinci_spi_dma;
566
567         davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
568
569         if (use_dma && davinci_spi->dma_channels) {
570                 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
571
572                 if ((davinci_spi_dma->dma_rx_channel != -1)
573                                 && (davinci_spi_dma->dma_tx_channel != -1)) {
574                         edma_free_channel(davinci_spi_dma->dma_tx_channel);
575                         edma_free_channel(davinci_spi_dma->dma_rx_channel);
576                 }
577         }
578 }
579
580 static int davinci_spi_bufs_prep(struct spi_device *spi,
581                                  struct davinci_spi *davinci_spi)
582 {
583         int op_mode = 0;
584
585         /*
586          * REVISIT  unless devices disagree about SPI_LOOP or
587          * SPI_READY (SPI_NO_CS only allows one device!), this
588          * should not need to be done before each message...
589          * optimize for both flags staying cleared.
590          */
591
592         op_mode = SPIPC0_DIFUN_MASK
593                 | SPIPC0_DOFUN_MASK
594                 | SPIPC0_CLKFUN_MASK;
595         if (!(spi->mode & SPI_NO_CS))
596                 op_mode |= 1 << spi->chip_select;
597         if (spi->mode & SPI_READY)
598                 op_mode |= SPIPC0_SPIENA_MASK;
599
600         iowrite32(op_mode, davinci_spi->base + SPIPC0);
601
602         if (spi->mode & SPI_LOOP)
603                 set_io_bits(davinci_spi->base + SPIGCR1,
604                                 SPIGCR1_LOOPBACK_MASK);
605         else
606                 clear_io_bits(davinci_spi->base + SPIGCR1,
607                                 SPIGCR1_LOOPBACK_MASK);
608
609         return 0;
610 }
611
612 static int davinci_spi_check_error(struct davinci_spi *davinci_spi,
613                                    int int_status)
614 {
615         struct device *sdev = davinci_spi->bitbang.master->dev.parent;
616
617         if (int_status & SPIFLG_TIMEOUT_MASK) {
618                 dev_dbg(sdev, "SPI Time-out Error\n");
619                 return -ETIMEDOUT;
620         }
621         if (int_status & SPIFLG_DESYNC_MASK) {
622                 dev_dbg(sdev, "SPI Desynchronization Error\n");
623                 return -EIO;
624         }
625         if (int_status & SPIFLG_BITERR_MASK) {
626                 dev_dbg(sdev, "SPI Bit error\n");
627                 return -EIO;
628         }
629
630         if (davinci_spi->version == SPI_VERSION_2) {
631                 if (int_status & SPIFLG_DLEN_ERR_MASK) {
632                         dev_dbg(sdev, "SPI Data Length Error\n");
633                         return -EIO;
634                 }
635                 if (int_status & SPIFLG_PARERR_MASK) {
636                         dev_dbg(sdev, "SPI Parity Error\n");
637                         return -EIO;
638                 }
639                 if (int_status & SPIFLG_OVRRUN_MASK) {
640                         dev_dbg(sdev, "SPI Data Overrun error\n");
641                         return -EIO;
642                 }
643                 if (int_status & SPIFLG_TX_INTR_MASK) {
644                         dev_dbg(sdev, "SPI TX intr bit set\n");
645                         return -EIO;
646                 }
647                 if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) {
648                         dev_dbg(sdev, "SPI Buffer Init Active\n");
649                         return -EBUSY;
650                 }
651         }
652
653         return 0;
654 }
655
656 /**
657  * davinci_spi_bufs - functions which will handle transfer data
658  * @spi: spi device on which data transfer to be done
659  * @t: spi transfer in which transfer info is filled
660  *
661  * This function will put data to be transferred into data register
662  * of SPI controller and then wait until the completion will be marked
663  * by the IRQ Handler.
664  */
665 static int davinci_spi_bufs_pio(struct spi_device *spi, struct spi_transfer *t)
666 {
667         struct davinci_spi *davinci_spi;
668         int int_status, count, ret;
669         u8 conv, tmp;
670         u32 tx_data, data1_reg_val;
671         u32 buf_val, flg_val;
672         struct davinci_spi_platform_data *pdata;
673
674         davinci_spi = spi_master_get_devdata(spi->master);
675         pdata = davinci_spi->pdata;
676
677         davinci_spi->tx = t->tx_buf;
678         davinci_spi->rx = t->rx_buf;
679
680         /* convert len to words based on bits_per_word */
681         conv = davinci_spi->slave[spi->chip_select].bytes_per_word;
682         davinci_spi->count = t->len / conv;
683
684         INIT_COMPLETION(davinci_spi->done);
685
686         ret = davinci_spi_bufs_prep(spi, davinci_spi);
687         if (ret)
688                 return ret;
689
690         /* Enable SPI */
691         set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
692
693         iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) |
694                         (pdata->t2cdelay << SPI_T2CDELAY_SHIFT),
695                         davinci_spi->base + SPIDELAY);
696
697         count = davinci_spi->count;
698         data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT;
699         tmp = ~(0x1 << spi->chip_select);
700
701         clear_io_bits(davinci_spi->base + SPIDEF, ~tmp);
702
703         data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT;
704
705         while ((ioread32(davinci_spi->base + SPIBUF)
706                                 & SPIBUF_RXEMPTY_MASK) == 0)
707                 cpu_relax();
708
709         /* Determine the command to execute READ or WRITE */
710         if (t->tx_buf) {
711                 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);
712
713                 while (1) {
714                         tx_data = davinci_spi->get_tx(davinci_spi);
715
716                         data1_reg_val &= ~(0xFFFF);
717                         data1_reg_val |= (0xFFFF & tx_data);
718
719                         buf_val = ioread32(davinci_spi->base + SPIBUF);
720                         if ((buf_val & SPIBUF_TXFULL_MASK) == 0) {
721                                 iowrite32(data1_reg_val,
722                                                 davinci_spi->base + SPIDAT1);
723
724                                 count--;
725                         }
726                         while (ioread32(davinci_spi->base + SPIBUF)
727                                         & SPIBUF_RXEMPTY_MASK)
728                                 cpu_relax();
729
730                         /* getting the returned byte */
731                         if (t->rx_buf) {
732                                 buf_val = ioread32(davinci_spi->base + SPIBUF);
733                                 davinci_spi->get_rx(buf_val, davinci_spi);
734                         }
735                         if (count <= 0)
736                                 break;
737                 }
738         } else {
739                 if (pdata->poll_mode) {
740                         while (1) {
741                                 /* keeps the serial clock going */
742                                 if ((ioread32(davinci_spi->base + SPIBUF)
743                                                 & SPIBUF_TXFULL_MASK) == 0)
744                                         iowrite32(data1_reg_val,
745                                                 davinci_spi->base + SPIDAT1);
746
747                                 while (ioread32(davinci_spi->base + SPIBUF) &
748                                                 SPIBUF_RXEMPTY_MASK)
749                                         cpu_relax();
750
751                                 flg_val = ioread32(davinci_spi->base + SPIFLG);
752                                 buf_val = ioread32(davinci_spi->base + SPIBUF);
753
754                                 davinci_spi->get_rx(buf_val, davinci_spi);
755
756                                 count--;
757                                 if (count <= 0)
758                                         break;
759                         }
760                 } else {        /* Receive in Interrupt mode */
761                         int i;
762
763                         for (i = 0; i < davinci_spi->count; i++) {
764                                 set_io_bits(davinci_spi->base + SPIINT,
765                                                 SPIINT_BITERR_INTR
766                                                 | SPIINT_OVRRUN_INTR
767                                                 | SPIINT_RX_INTR);
768
769                                 iowrite32(data1_reg_val,
770                                                 davinci_spi->base + SPIDAT1);
771
772                                 while (ioread32(davinci_spi->base + SPIINT) &
773                                                 SPIINT_RX_INTR)
774                                         cpu_relax();
775                         }
776                         iowrite32((data1_reg_val & 0x0ffcffff),
777                                         davinci_spi->base + SPIDAT1);
778                 }
779         }
780
781         /*
782          * Check for bit error, desync error,parity error,timeout error and
783          * receive overflow errors
784          */
785         int_status = ioread32(davinci_spi->base + SPIFLG);
786
787         ret = davinci_spi_check_error(davinci_spi, int_status);
788         if (ret != 0)
789                 return ret;
790
791         /* SPI Framework maintains the count only in bytes so convert back */
792         davinci_spi->count *= conv;
793
794         return t->len;
795 }
796
797 #define DAVINCI_DMA_DATA_TYPE_S8        0x01
798 #define DAVINCI_DMA_DATA_TYPE_S16       0x02
799 #define DAVINCI_DMA_DATA_TYPE_S32       0x04
800
801 static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t)
802 {
803         struct davinci_spi *davinci_spi;
804         int int_status = 0;
805         int count, temp_count;
806         u8 conv = 1;
807         u8 tmp;
808         u32 data1_reg_val;
809         struct davinci_spi_dma *davinci_spi_dma;
810         int word_len, data_type, ret;
811         unsigned long tx_reg, rx_reg;
812         struct davinci_spi_platform_data *pdata;
813         struct device *sdev;
814
815         davinci_spi = spi_master_get_devdata(spi->master);
816         pdata = davinci_spi->pdata;
817         sdev = davinci_spi->bitbang.master->dev.parent;
818
819         davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
820
821         tx_reg = (unsigned long)davinci_spi->pbase + SPIDAT1;
822         rx_reg = (unsigned long)davinci_spi->pbase + SPIBUF;
823
824         davinci_spi->tx = t->tx_buf;
825         davinci_spi->rx = t->rx_buf;
826
827         /* convert len to words based on bits_per_word */
828         conv = davinci_spi->slave[spi->chip_select].bytes_per_word;
829         davinci_spi->count = t->len / conv;
830
831         INIT_COMPLETION(davinci_spi->done);
832
833         init_completion(&davinci_spi_dma->dma_rx_completion);
834         init_completion(&davinci_spi_dma->dma_tx_completion);
835
836         word_len = conv * 8;
837
838         if (word_len <= 8)
839                 data_type = DAVINCI_DMA_DATA_TYPE_S8;
840         else if (word_len <= 16)
841                 data_type = DAVINCI_DMA_DATA_TYPE_S16;
842         else if (word_len <= 32)
843                 data_type = DAVINCI_DMA_DATA_TYPE_S32;
844         else
845                 return -EINVAL;
846
847         ret = davinci_spi_bufs_prep(spi, davinci_spi);
848         if (ret)
849                 return ret;
850
851         /* Put delay val if required */
852         iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) |
853                         (pdata->t2cdelay << SPI_T2CDELAY_SHIFT),
854                         davinci_spi->base + SPIDELAY);
855
856         count = davinci_spi->count;     /* the number of elements */
857         data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT;
858
859         /* CS default = 0xFF */
860         tmp = ~(0x1 << spi->chip_select);
861
862         clear_io_bits(davinci_spi->base + SPIDEF, ~tmp);
863
864         data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT;
865
866         /* disable all interrupts for dma transfers */
867         clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);
868         /* Disable SPI to write configuration bits in SPIDAT */
869         clear_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
870         iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);
871         /* Enable SPI */
872         set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
873
874         while ((ioread32(davinci_spi->base + SPIBUF)
875                                 & SPIBUF_RXEMPTY_MASK) == 0)
876                 cpu_relax();
877
878
879         if (t->tx_buf) {
880                 t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, count,
881                                 DMA_TO_DEVICE);
882                 if (dma_mapping_error(&spi->dev, t->tx_dma)) {
883                         dev_dbg(sdev, "Unable to DMA map a %d bytes"
884                                 " TX buffer\n", count);
885                         return -ENOMEM;
886                 }
887                 temp_count = count;
888         } else {
889                 /* We need TX clocking for RX transaction */
890                 t->tx_dma = dma_map_single(&spi->dev,
891                                 (void *)davinci_spi->tmp_buf, count + 1,
892                                 DMA_TO_DEVICE);
893                 if (dma_mapping_error(&spi->dev, t->tx_dma)) {
894                         dev_dbg(sdev, "Unable to DMA map a %d bytes"
895                                 " TX tmp buffer\n", count);
896                         return -ENOMEM;
897                 }
898                 temp_count = count + 1;
899         }
900
901         edma_set_transfer_params(davinci_spi_dma->dma_tx_channel,
902                                         data_type, temp_count, 1, 0, ASYNC);
903         edma_set_dest(davinci_spi_dma->dma_tx_channel, tx_reg, INCR, W8BIT);
904         edma_set_src(davinci_spi_dma->dma_tx_channel, t->tx_dma, INCR, W8BIT);
905         edma_set_src_index(davinci_spi_dma->dma_tx_channel, data_type, 0);
906         edma_set_dest_index(davinci_spi_dma->dma_tx_channel, 0, 0);
907
908         if (t->rx_buf) {
909                 /* initiate transaction */
910                 iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);
911
912                 t->rx_dma = dma_map_single(&spi->dev, (void *)t->rx_buf, count,
913                                 DMA_FROM_DEVICE);
914                 if (dma_mapping_error(&spi->dev, t->rx_dma)) {
915                         dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n",
916                                         count);
917                         if (t->tx_buf != NULL)
918                                 dma_unmap_single(NULL, t->tx_dma,
919                                                  count, DMA_TO_DEVICE);
920                         return -ENOMEM;
921                 }
922                 edma_set_transfer_params(davinci_spi_dma->dma_rx_channel,
923                                 data_type, count, 1, 0, ASYNC);
924                 edma_set_src(davinci_spi_dma->dma_rx_channel,
925                                 rx_reg, INCR, W8BIT);
926                 edma_set_dest(davinci_spi_dma->dma_rx_channel,
927                                 t->rx_dma, INCR, W8BIT);
928                 edma_set_src_index(davinci_spi_dma->dma_rx_channel, 0, 0);
929                 edma_set_dest_index(davinci_spi_dma->dma_rx_channel,
930                                 data_type, 0);
931         }
932
933         if ((t->tx_buf) || (t->rx_buf))
934                 edma_start(davinci_spi_dma->dma_tx_channel);
935
936         if (t->rx_buf)
937                 edma_start(davinci_spi_dma->dma_rx_channel);
938
939         if ((t->rx_buf) || (t->tx_buf))
940                 davinci_spi_set_dma_req(spi, 1);
941
942         if (t->tx_buf)
943                 wait_for_completion_interruptible(
944                                 &davinci_spi_dma->dma_tx_completion);
945
946         if (t->rx_buf)
947                 wait_for_completion_interruptible(
948                                 &davinci_spi_dma->dma_rx_completion);
949
950         dma_unmap_single(NULL, t->tx_dma, temp_count, DMA_TO_DEVICE);
951
952         if (t->rx_buf)
953                 dma_unmap_single(NULL, t->rx_dma, count, DMA_FROM_DEVICE);
954
955         /*
956          * Check for bit error, desync error,parity error,timeout error and
957          * receive overflow errors
958          */
959         int_status = ioread32(davinci_spi->base + SPIFLG);
960
961         ret = davinci_spi_check_error(davinci_spi, int_status);
962         if (ret != 0)
963                 return ret;
964
965         /* SPI Framework maintains the count only in bytes so convert back */
966         davinci_spi->count *= conv;
967
968         return t->len;
969 }
970
971 /**
972  * davinci_spi_irq - IRQ handler for DaVinci SPI
973  * @irq: IRQ number for this SPI Master
974  * @context_data: structure for SPI Master controller davinci_spi
975  */
976 static irqreturn_t davinci_spi_irq(s32 irq, void *context_data)
977 {
978         struct davinci_spi *davinci_spi = context_data;
979         u32 int_status, rx_data = 0;
980         irqreturn_t ret = IRQ_NONE;
981
982         int_status = ioread32(davinci_spi->base + SPIFLG);
983
984         while ((int_status & SPIFLG_RX_INTR_MASK)) {
985                 if (likely(int_status & SPIFLG_RX_INTR_MASK)) {
986                         ret = IRQ_HANDLED;
987
988                         rx_data = ioread32(davinci_spi->base + SPIBUF);
989                         davinci_spi->get_rx(rx_data, davinci_spi);
990
991                         /* Disable Receive Interrupt */
992                         iowrite32(~(SPIINT_RX_INTR | SPIINT_TX_INTR),
993                                         davinci_spi->base + SPIINT);
994                 } else
995                         (void)davinci_spi_check_error(davinci_spi, int_status);
996
997                 int_status = ioread32(davinci_spi->base + SPIFLG);
998         }
999
1000         return ret;
1001 }
1002
1003 /**
1004  * davinci_spi_probe - probe function for SPI Master Controller
1005  * @pdev: platform_device structure which contains plateform specific data
1006  */
1007 static int davinci_spi_probe(struct platform_device *pdev)
1008 {
1009         struct spi_master *master;
1010         struct davinci_spi *davinci_spi;
1011         struct davinci_spi_platform_data *pdata;
1012         struct resource *r, *mem;
1013         resource_size_t dma_rx_chan = SPI_NO_RESOURCE;
1014         resource_size_t dma_tx_chan = SPI_NO_RESOURCE;
1015         resource_size_t dma_eventq = SPI_NO_RESOURCE;
1016         int i = 0, ret = 0;
1017
1018         pdata = pdev->dev.platform_data;
1019         if (pdata == NULL) {
1020                 ret = -ENODEV;
1021                 goto err;
1022         }
1023
1024         master = spi_alloc_master(&pdev->dev, sizeof(struct davinci_spi));
1025         if (master == NULL) {
1026                 ret = -ENOMEM;
1027                 goto err;
1028         }
1029
1030         dev_set_drvdata(&pdev->dev, master);
1031
1032         davinci_spi = spi_master_get_devdata(master);
1033         if (davinci_spi == NULL) {
1034                 ret = -ENOENT;
1035                 goto free_master;
1036         }
1037
1038         r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1039         if (r == NULL) {
1040                 ret = -ENOENT;
1041                 goto free_master;
1042         }
1043
1044         davinci_spi->pbase = r->start;
1045         davinci_spi->region_size = resource_size(r);
1046         davinci_spi->pdata = pdata;
1047
1048         mem = request_mem_region(r->start, davinci_spi->region_size,
1049                                         pdev->name);
1050         if (mem == NULL) {
1051                 ret = -EBUSY;
1052                 goto free_master;
1053         }
1054
1055         davinci_spi->base = (struct davinci_spi_reg __iomem *)
1056                         ioremap(r->start, davinci_spi->region_size);
1057         if (davinci_spi->base == NULL) {
1058                 ret = -ENOMEM;
1059                 goto release_region;
1060         }
1061
1062         davinci_spi->irq = platform_get_irq(pdev, 0);
1063         if (davinci_spi->irq <= 0) {
1064                 ret = -EINVAL;
1065                 goto unmap_io;
1066         }
1067
1068         ret = request_irq(davinci_spi->irq, davinci_spi_irq, IRQF_DISABLED,
1069                           dev_name(&pdev->dev), davinci_spi);
1070         if (ret)
1071                 goto unmap_io;
1072
1073         /* Allocate tmp_buf for tx_buf */
1074         davinci_spi->tmp_buf = kzalloc(SPI_BUFSIZ, GFP_KERNEL);
1075         if (davinci_spi->tmp_buf == NULL) {
1076                 ret = -ENOMEM;
1077                 goto irq_free;
1078         }
1079
1080         davinci_spi->bitbang.master = spi_master_get(master);
1081         if (davinci_spi->bitbang.master == NULL) {
1082                 ret = -ENODEV;
1083                 goto free_tmp_buf;
1084         }
1085
1086         davinci_spi->clk = clk_get(&pdev->dev, NULL);
1087         if (IS_ERR(davinci_spi->clk)) {
1088                 ret = -ENODEV;
1089                 goto put_master;
1090         }
1091         clk_enable(davinci_spi->clk);
1092
1093
1094         master->bus_num = pdev->id;
1095         master->num_chipselect = pdata->num_chipselect;
1096         master->setup = davinci_spi_setup;
1097         master->cleanup = davinci_spi_cleanup;
1098
1099         davinci_spi->bitbang.chipselect = davinci_spi_chipselect;
1100         davinci_spi->bitbang.setup_transfer = davinci_spi_setup_transfer;
1101
1102         davinci_spi->version = pdata->version;
1103         use_dma = pdata->use_dma;
1104
1105         davinci_spi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP;
1106         if (davinci_spi->version == SPI_VERSION_2)
1107                 davinci_spi->bitbang.flags |= SPI_READY;
1108
1109         if (use_dma) {
1110                         r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1111                         if (r)
1112                                 dma_rx_chan = r->start;
1113                         r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1114                         if (r)
1115                                 dma_tx_chan = r->start;
1116                         r = platform_get_resource(pdev, IORESOURCE_DMA, 2);
1117                         if (r)
1118                                 dma_eventq = r->start;
1119         }
1120
1121         if (!use_dma ||
1122             dma_rx_chan == SPI_NO_RESOURCE ||
1123             dma_tx_chan == SPI_NO_RESOURCE ||
1124             dma_eventq  == SPI_NO_RESOURCE) {
1125                 davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_pio;
1126                 use_dma = 0;
1127         } else {
1128                 davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_dma;
1129                 davinci_spi->dma_channels = kzalloc(master->num_chipselect
1130                                 * sizeof(struct davinci_spi_dma), GFP_KERNEL);
1131                 if (davinci_spi->dma_channels == NULL) {
1132                         ret = -ENOMEM;
1133                         goto free_clk;
1134                 }
1135
1136                 for (i = 0; i < master->num_chipselect; i++) {
1137                         davinci_spi->dma_channels[i].dma_rx_channel = -1;
1138                         davinci_spi->dma_channels[i].dma_rx_sync_dev =
1139                                 dma_rx_chan;
1140                         davinci_spi->dma_channels[i].dma_tx_channel = -1;
1141                         davinci_spi->dma_channels[i].dma_tx_sync_dev =
1142                                 dma_tx_chan;
1143                         davinci_spi->dma_channels[i].eventq = dma_eventq;
1144                 }
1145                 dev_info(&pdev->dev, "DaVinci SPI driver in EDMA mode\n"
1146                                 "Using RX channel = %d , TX channel = %d and "
1147                                 "event queue = %d", dma_rx_chan, dma_tx_chan,
1148                                 dma_eventq);
1149         }
1150
1151         davinci_spi->get_rx = davinci_spi_rx_buf_u8;
1152         davinci_spi->get_tx = davinci_spi_tx_buf_u8;
1153
1154         init_completion(&davinci_spi->done);
1155
1156         /* Reset In/OUT SPI module */
1157         iowrite32(0, davinci_spi->base + SPIGCR0);
1158         udelay(100);
1159         iowrite32(1, davinci_spi->base + SPIGCR0);
1160
1161         /* Clock internal */
1162         if (davinci_spi->pdata->clk_internal)
1163                 set_io_bits(davinci_spi->base + SPIGCR1,
1164                                 SPIGCR1_CLKMOD_MASK);
1165         else
1166                 clear_io_bits(davinci_spi->base + SPIGCR1,
1167                                 SPIGCR1_CLKMOD_MASK);
1168
1169         /* master mode default */
1170         set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_MASTER_MASK);
1171
1172         if (davinci_spi->pdata->intr_level)
1173                 iowrite32(SPI_INTLVL_1, davinci_spi->base + SPILVL);
1174         else
1175                 iowrite32(SPI_INTLVL_0, davinci_spi->base + SPILVL);
1176
1177         ret = spi_bitbang_start(&davinci_spi->bitbang);
1178         if (ret)
1179                 goto free_clk;
1180
1181         dev_info(&pdev->dev, "Controller at 0x%p \n", davinci_spi->base);
1182
1183         if (!pdata->poll_mode)
1184                 dev_info(&pdev->dev, "Operating in interrupt mode"
1185                         " using IRQ %d\n", davinci_spi->irq);
1186
1187         return ret;
1188
1189 free_clk:
1190         clk_disable(davinci_spi->clk);
1191         clk_put(davinci_spi->clk);
1192 put_master:
1193         spi_master_put(master);
1194 free_tmp_buf:
1195         kfree(davinci_spi->tmp_buf);
1196 irq_free:
1197         free_irq(davinci_spi->irq, davinci_spi);
1198 unmap_io:
1199         iounmap(davinci_spi->base);
1200 release_region:
1201         release_mem_region(davinci_spi->pbase, davinci_spi->region_size);
1202 free_master:
1203         kfree(master);
1204 err:
1205         return ret;
1206 }
1207
1208 /**
1209  * davinci_spi_remove - remove function for SPI Master Controller
1210  * @pdev: platform_device structure which contains plateform specific data
1211  *
1212  * This function will do the reverse action of davinci_spi_probe function
1213  * It will free the IRQ and SPI controller's memory region.
1214  * It will also call spi_bitbang_stop to destroy the work queue which was
1215  * created by spi_bitbang_start.
1216  */
1217 static int __exit davinci_spi_remove(struct platform_device *pdev)
1218 {
1219         struct davinci_spi *davinci_spi;
1220         struct spi_master *master;
1221
1222         master = dev_get_drvdata(&pdev->dev);
1223         davinci_spi = spi_master_get_devdata(master);
1224
1225         spi_bitbang_stop(&davinci_spi->bitbang);
1226
1227         clk_disable(davinci_spi->clk);
1228         clk_put(davinci_spi->clk);
1229         spi_master_put(master);
1230         kfree(davinci_spi->tmp_buf);
1231         free_irq(davinci_spi->irq, davinci_spi);
1232         iounmap(davinci_spi->base);
1233         release_mem_region(davinci_spi->pbase, davinci_spi->region_size);
1234
1235         return 0;
1236 }
1237
1238 static struct platform_driver davinci_spi_driver = {
1239         .driver.name = "spi_davinci",
1240         .remove = __exit_p(davinci_spi_remove),
1241 };
1242
1243 static int __init davinci_spi_init(void)
1244 {
1245         return platform_driver_probe(&davinci_spi_driver, davinci_spi_probe);
1246 }
1247 module_init(davinci_spi_init);
1248
1249 static void __exit davinci_spi_exit(void)
1250 {
1251         platform_driver_unregister(&davinci_spi_driver);
1252 }
1253 module_exit(davinci_spi_exit);
1254
1255 MODULE_DESCRIPTION("TI DaVinci SPI Master Controller Driver");
1256 MODULE_LICENSE("GPL");