drm/radeon/kms: enable use of unmappable VRAM V2
[pandora-kernel.git] / drivers / spi / davinci_spi.c
1 /*
2  * Copyright (C) 2009 Texas Instruments.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
17  */
18
19 #include <linux/interrupt.h>
20 #include <linux/io.h>
21 #include <linux/gpio.h>
22 #include <linux/module.h>
23 #include <linux/delay.h>
24 #include <linux/platform_device.h>
25 #include <linux/err.h>
26 #include <linux/clk.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/spi/spi.h>
29 #include <linux/spi/spi_bitbang.h>
30
31 #include <mach/spi.h>
32 #include <mach/edma.h>
33
34 #define SPI_NO_RESOURCE         ((resource_size_t)-1)
35
36 #define SPI_MAX_CHIPSELECT      2
37
38 #define CS_DEFAULT      0xFF
39
40 #define SPI_BUFSIZ      (SMP_CACHE_BYTES + 1)
41 #define DAVINCI_DMA_DATA_TYPE_S8        0x01
42 #define DAVINCI_DMA_DATA_TYPE_S16       0x02
43 #define DAVINCI_DMA_DATA_TYPE_S32       0x04
44
45 #define SPIFMT_PHASE_MASK       BIT(16)
46 #define SPIFMT_POLARITY_MASK    BIT(17)
47 #define SPIFMT_DISTIMER_MASK    BIT(18)
48 #define SPIFMT_SHIFTDIR_MASK    BIT(20)
49 #define SPIFMT_WAITENA_MASK     BIT(21)
50 #define SPIFMT_PARITYENA_MASK   BIT(22)
51 #define SPIFMT_ODD_PARITY_MASK  BIT(23)
52 #define SPIFMT_WDELAY_MASK      0x3f000000u
53 #define SPIFMT_WDELAY_SHIFT     24
54 #define SPIFMT_CHARLEN_MASK     0x0000001Fu
55
56 /* SPIGCR1 */
57 #define SPIGCR1_SPIENA_MASK     0x01000000u
58
59 /* SPIPC0 */
60 #define SPIPC0_DIFUN_MASK       BIT(11)         /* MISO */
61 #define SPIPC0_DOFUN_MASK       BIT(10)         /* MOSI */
62 #define SPIPC0_CLKFUN_MASK      BIT(9)          /* CLK */
63 #define SPIPC0_SPIENA_MASK      BIT(8)          /* nREADY */
64 #define SPIPC0_EN1FUN_MASK      BIT(1)
65 #define SPIPC0_EN0FUN_MASK      BIT(0)
66
67 #define SPIINT_MASKALL          0x0101035F
68 #define SPI_INTLVL_1            0x000001FFu
69 #define SPI_INTLVL_0            0x00000000u
70
71 /* SPIDAT1 */
72 #define SPIDAT1_CSHOLD_SHIFT    28
73 #define SPIDAT1_CSNR_SHIFT      16
74 #define SPIGCR1_CLKMOD_MASK     BIT(1)
75 #define SPIGCR1_MASTER_MASK     BIT(0)
76 #define SPIGCR1_LOOPBACK_MASK   BIT(16)
77
78 /* SPIBUF */
79 #define SPIBUF_TXFULL_MASK      BIT(29)
80 #define SPIBUF_RXEMPTY_MASK     BIT(31)
81
82 /* Error Masks */
83 #define SPIFLG_DLEN_ERR_MASK            BIT(0)
84 #define SPIFLG_TIMEOUT_MASK             BIT(1)
85 #define SPIFLG_PARERR_MASK              BIT(2)
86 #define SPIFLG_DESYNC_MASK              BIT(3)
87 #define SPIFLG_BITERR_MASK              BIT(4)
88 #define SPIFLG_OVRRUN_MASK              BIT(6)
89 #define SPIFLG_RX_INTR_MASK             BIT(8)
90 #define SPIFLG_TX_INTR_MASK             BIT(9)
91 #define SPIFLG_BUF_INIT_ACTIVE_MASK     BIT(24)
92 #define SPIFLG_MASK                     (SPIFLG_DLEN_ERR_MASK \
93                                 | SPIFLG_TIMEOUT_MASK | SPIFLG_PARERR_MASK \
94                                 | SPIFLG_DESYNC_MASK | SPIFLG_BITERR_MASK \
95                                 | SPIFLG_OVRRUN_MASK | SPIFLG_RX_INTR_MASK \
96                                 | SPIFLG_TX_INTR_MASK \
97                                 | SPIFLG_BUF_INIT_ACTIVE_MASK)
98
99 #define SPIINT_DLEN_ERR_INTR    BIT(0)
100 #define SPIINT_TIMEOUT_INTR     BIT(1)
101 #define SPIINT_PARERR_INTR      BIT(2)
102 #define SPIINT_DESYNC_INTR      BIT(3)
103 #define SPIINT_BITERR_INTR      BIT(4)
104 #define SPIINT_OVRRUN_INTR      BIT(6)
105 #define SPIINT_RX_INTR          BIT(8)
106 #define SPIINT_TX_INTR          BIT(9)
107 #define SPIINT_DMA_REQ_EN       BIT(16)
108 #define SPIINT_ENABLE_HIGHZ     BIT(24)
109
110 #define SPI_T2CDELAY_SHIFT      16
111 #define SPI_C2TDELAY_SHIFT      24
112
113 /* SPI Controller registers */
114 #define SPIGCR0         0x00
115 #define SPIGCR1         0x04
116 #define SPIINT          0x08
117 #define SPILVL          0x0c
118 #define SPIFLG          0x10
119 #define SPIPC0          0x14
120 #define SPIPC1          0x18
121 #define SPIPC2          0x1c
122 #define SPIPC3          0x20
123 #define SPIPC4          0x24
124 #define SPIPC5          0x28
125 #define SPIPC6          0x2c
126 #define SPIPC7          0x30
127 #define SPIPC8          0x34
128 #define SPIDAT0         0x38
129 #define SPIDAT1         0x3c
130 #define SPIBUF          0x40
131 #define SPIEMU          0x44
132 #define SPIDELAY        0x48
133 #define SPIDEF          0x4c
134 #define SPIFMT0         0x50
135 #define SPIFMT1         0x54
136 #define SPIFMT2         0x58
137 #define SPIFMT3         0x5c
138 #define TGINTVEC0       0x60
139 #define TGINTVEC1       0x64
140
141 struct davinci_spi_slave {
142         u32     cmd_to_write;
143         u32     clk_ctrl_to_write;
144         u32     bytes_per_word;
145         u8      active_cs;
146 };
147
148 /* We have 2 DMA channels per CS, one for RX and one for TX */
149 struct davinci_spi_dma {
150         int                     dma_tx_channel;
151         int                     dma_rx_channel;
152         int                     dma_tx_sync_dev;
153         int                     dma_rx_sync_dev;
154         enum dma_event_q        eventq;
155
156         struct completion       dma_tx_completion;
157         struct completion       dma_rx_completion;
158 };
159
160 /* SPI Controller driver's private data. */
161 struct davinci_spi {
162         struct spi_bitbang      bitbang;
163         struct clk              *clk;
164
165         u8                      version;
166         resource_size_t         pbase;
167         void __iomem            *base;
168         size_t                  region_size;
169         u32                     irq;
170         struct completion       done;
171
172         const void              *tx;
173         void                    *rx;
174         u8                      *tmp_buf;
175         int                     count;
176         struct davinci_spi_dma  *dma_channels;
177         struct                  davinci_spi_platform_data *pdata;
178
179         void                    (*get_rx)(u32 rx_data, struct davinci_spi *);
180         u32                     (*get_tx)(struct davinci_spi *);
181
182         struct davinci_spi_slave slave[SPI_MAX_CHIPSELECT];
183 };
184
185 static unsigned use_dma;
186
187 static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *davinci_spi)
188 {
189         u8 *rx = davinci_spi->rx;
190
191         *rx++ = (u8)data;
192         davinci_spi->rx = rx;
193 }
194
195 static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *davinci_spi)
196 {
197         u16 *rx = davinci_spi->rx;
198
199         *rx++ = (u16)data;
200         davinci_spi->rx = rx;
201 }
202
203 static u32 davinci_spi_tx_buf_u8(struct davinci_spi *davinci_spi)
204 {
205         u32 data;
206         const u8 *tx = davinci_spi->tx;
207
208         data = *tx++;
209         davinci_spi->tx = tx;
210         return data;
211 }
212
213 static u32 davinci_spi_tx_buf_u16(struct davinci_spi *davinci_spi)
214 {
215         u32 data;
216         const u16 *tx = davinci_spi->tx;
217
218         data = *tx++;
219         davinci_spi->tx = tx;
220         return data;
221 }
222
223 static inline void set_io_bits(void __iomem *addr, u32 bits)
224 {
225         u32 v = ioread32(addr);
226
227         v |= bits;
228         iowrite32(v, addr);
229 }
230
231 static inline void clear_io_bits(void __iomem *addr, u32 bits)
232 {
233         u32 v = ioread32(addr);
234
235         v &= ~bits;
236         iowrite32(v, addr);
237 }
238
239 static inline void set_fmt_bits(void __iomem *addr, u32 bits, int cs_num)
240 {
241         set_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits);
242 }
243
244 static inline void clear_fmt_bits(void __iomem *addr, u32 bits, int cs_num)
245 {
246         clear_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits);
247 }
248
249 static void davinci_spi_set_dma_req(const struct spi_device *spi, int enable)
250 {
251         struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master);
252
253         if (enable)
254                 set_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
255         else
256                 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
257 }
258
259 /*
260  * Interface to control the chip select signal
261  */
262 static void davinci_spi_chipselect(struct spi_device *spi, int value)
263 {
264         struct davinci_spi *davinci_spi;
265         struct davinci_spi_platform_data *pdata;
266         u32 data1_reg_val = 0;
267
268         davinci_spi = spi_master_get_devdata(spi->master);
269         pdata = davinci_spi->pdata;
270
271         /*
272          * Board specific chip select logic decides the polarity and cs
273          * line for the controller
274          */
275         if (value == BITBANG_CS_INACTIVE) {
276                 set_io_bits(davinci_spi->base + SPIDEF, CS_DEFAULT);
277
278                 data1_reg_val |= CS_DEFAULT << SPIDAT1_CSNR_SHIFT;
279                 iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);
280
281                 while ((ioread32(davinci_spi->base + SPIBUF)
282                                         & SPIBUF_RXEMPTY_MASK) == 0)
283                         cpu_relax();
284         }
285 }
286
287 /**
288  * davinci_spi_setup_transfer - This functions will determine transfer method
289  * @spi: spi device on which data transfer to be done
290  * @t: spi transfer in which transfer info is filled
291  *
292  * This function determines data transfer method (8/16/32 bit transfer).
293  * It will also set the SPI Clock Control register according to
294  * SPI slave device freq.
295  */
296 static int davinci_spi_setup_transfer(struct spi_device *spi,
297                 struct spi_transfer *t)
298 {
299
300         struct davinci_spi *davinci_spi;
301         struct davinci_spi_platform_data *pdata;
302         u8 bits_per_word = 0;
303         u32 hz = 0, prescale;
304
305         davinci_spi = spi_master_get_devdata(spi->master);
306         pdata = davinci_spi->pdata;
307
308         if (t) {
309                 bits_per_word = t->bits_per_word;
310                 hz = t->speed_hz;
311         }
312
313         /* if bits_per_word is not set then set it default */
314         if (!bits_per_word)
315                 bits_per_word = spi->bits_per_word;
316
317         /*
318          * Assign function pointer to appropriate transfer method
319          * 8bit, 16bit or 32bit transfer
320          */
321         if (bits_per_word <= 8 && bits_per_word >= 2) {
322                 davinci_spi->get_rx = davinci_spi_rx_buf_u8;
323                 davinci_spi->get_tx = davinci_spi_tx_buf_u8;
324                 davinci_spi->slave[spi->chip_select].bytes_per_word = 1;
325         } else if (bits_per_word <= 16 && bits_per_word >= 2) {
326                 davinci_spi->get_rx = davinci_spi_rx_buf_u16;
327                 davinci_spi->get_tx = davinci_spi_tx_buf_u16;
328                 davinci_spi->slave[spi->chip_select].bytes_per_word = 2;
329         } else
330                 return -EINVAL;
331
332         if (!hz)
333                 hz = spi->max_speed_hz;
334
335         clear_fmt_bits(davinci_spi->base, SPIFMT_CHARLEN_MASK,
336                         spi->chip_select);
337         set_fmt_bits(davinci_spi->base, bits_per_word & 0x1f,
338                         spi->chip_select);
339
340         prescale = ((clk_get_rate(davinci_spi->clk) / hz) - 1) & 0xff;
341
342         clear_fmt_bits(davinci_spi->base, 0x0000ff00, spi->chip_select);
343         set_fmt_bits(davinci_spi->base, prescale << 8, spi->chip_select);
344
345         return 0;
346 }
347
348 static void davinci_spi_dma_rx_callback(unsigned lch, u16 ch_status, void *data)
349 {
350         struct spi_device *spi = (struct spi_device *)data;
351         struct davinci_spi *davinci_spi;
352         struct davinci_spi_dma *davinci_spi_dma;
353         struct davinci_spi_platform_data *pdata;
354
355         davinci_spi = spi_master_get_devdata(spi->master);
356         davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]);
357         pdata = davinci_spi->pdata;
358
359         if (ch_status == DMA_COMPLETE)
360                 edma_stop(davinci_spi_dma->dma_rx_channel);
361         else
362                 edma_clean_channel(davinci_spi_dma->dma_rx_channel);
363
364         complete(&davinci_spi_dma->dma_rx_completion);
365         /* We must disable the DMA RX request */
366         davinci_spi_set_dma_req(spi, 0);
367 }
368
369 static void davinci_spi_dma_tx_callback(unsigned lch, u16 ch_status, void *data)
370 {
371         struct spi_device *spi = (struct spi_device *)data;
372         struct davinci_spi *davinci_spi;
373         struct davinci_spi_dma *davinci_spi_dma;
374         struct davinci_spi_platform_data *pdata;
375
376         davinci_spi = spi_master_get_devdata(spi->master);
377         davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]);
378         pdata = davinci_spi->pdata;
379
380         if (ch_status == DMA_COMPLETE)
381                 edma_stop(davinci_spi_dma->dma_tx_channel);
382         else
383                 edma_clean_channel(davinci_spi_dma->dma_tx_channel);
384
385         complete(&davinci_spi_dma->dma_tx_completion);
386         /* We must disable the DMA TX request */
387         davinci_spi_set_dma_req(spi, 0);
388 }
389
390 static int davinci_spi_request_dma(struct spi_device *spi)
391 {
392         struct davinci_spi *davinci_spi;
393         struct davinci_spi_dma *davinci_spi_dma;
394         struct davinci_spi_platform_data *pdata;
395         struct device *sdev;
396         int r;
397
398         davinci_spi = spi_master_get_devdata(spi->master);
399         davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
400         pdata = davinci_spi->pdata;
401         sdev = davinci_spi->bitbang.master->dev.parent;
402
403         r = edma_alloc_channel(davinci_spi_dma->dma_rx_sync_dev,
404                                 davinci_spi_dma_rx_callback, spi,
405                                 davinci_spi_dma->eventq);
406         if (r < 0) {
407                 dev_dbg(sdev, "Unable to request DMA channel for SPI RX\n");
408                 return -EAGAIN;
409         }
410         davinci_spi_dma->dma_rx_channel = r;
411         r = edma_alloc_channel(davinci_spi_dma->dma_tx_sync_dev,
412                                 davinci_spi_dma_tx_callback, spi,
413                                 davinci_spi_dma->eventq);
414         if (r < 0) {
415                 edma_free_channel(davinci_spi_dma->dma_rx_channel);
416                 davinci_spi_dma->dma_rx_channel = -1;
417                 dev_dbg(sdev, "Unable to request DMA channel for SPI TX\n");
418                 return -EAGAIN;
419         }
420         davinci_spi_dma->dma_tx_channel = r;
421
422         return 0;
423 }
424
425 /**
426  * davinci_spi_setup - This functions will set default transfer method
427  * @spi: spi device on which data transfer to be done
428  *
429  * This functions sets the default transfer method.
430  */
431
432 static int davinci_spi_setup(struct spi_device *spi)
433 {
434         int retval;
435         struct davinci_spi *davinci_spi;
436         struct davinci_spi_dma *davinci_spi_dma;
437         struct device *sdev;
438
439         davinci_spi = spi_master_get_devdata(spi->master);
440         sdev = davinci_spi->bitbang.master->dev.parent;
441
442         /* if bits per word length is zero then set it default 8 */
443         if (!spi->bits_per_word)
444                 spi->bits_per_word = 8;
445
446         davinci_spi->slave[spi->chip_select].cmd_to_write = 0;
447
448         if (use_dma && davinci_spi->dma_channels) {
449                 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
450
451                 if ((davinci_spi_dma->dma_rx_channel == -1)
452                                 || (davinci_spi_dma->dma_tx_channel == -1)) {
453                         retval = davinci_spi_request_dma(spi);
454                         if (retval < 0)
455                                 return retval;
456                 }
457         }
458
459         /*
460          * SPI in DaVinci and DA8xx operate between
461          * 600 KHz and 50 MHz
462          */
463         if (spi->max_speed_hz < 600000 || spi->max_speed_hz > 50000000) {
464                 dev_dbg(sdev, "Operating frequency is not in acceptable "
465                                 "range\n");
466                 return -EINVAL;
467         }
468
469         /*
470          * Set up SPIFMTn register, unique to this chipselect.
471          *
472          * NOTE: we could do all of these with one write.  Also, some
473          * of the "version 2" features are found in chips that don't
474          * support all of them...
475          */
476         if (spi->mode & SPI_LSB_FIRST)
477                 set_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK,
478                                 spi->chip_select);
479         else
480                 clear_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK,
481                                 spi->chip_select);
482
483         if (spi->mode & SPI_CPOL)
484                 set_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK,
485                                 spi->chip_select);
486         else
487                 clear_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK,
488                                 spi->chip_select);
489
490         if (!(spi->mode & SPI_CPHA))
491                 set_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK,
492                                 spi->chip_select);
493         else
494                 clear_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK,
495                                 spi->chip_select);
496
497         /*
498          * Version 1 hardware supports two basic SPI modes:
499          *  - Standard SPI mode uses 4 pins, with chipselect
500          *  - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS)
501          *      (distinct from SPI_3WIRE, with just one data wire;
502          *      or similar variants without MOSI or without MISO)
503          *
504          * Version 2 hardware supports an optional handshaking signal,
505          * so it can support two more modes:
506          *  - 5 pin SPI variant is standard SPI plus SPI_READY
507          *  - 4 pin with enable is (SPI_READY | SPI_NO_CS)
508          */
509
510         if (davinci_spi->version == SPI_VERSION_2) {
511                 clear_fmt_bits(davinci_spi->base, SPIFMT_WDELAY_MASK,
512                                 spi->chip_select);
513                 set_fmt_bits(davinci_spi->base,
514                                 (davinci_spi->pdata->wdelay
515                                                 << SPIFMT_WDELAY_SHIFT)
516                                         & SPIFMT_WDELAY_MASK,
517                                 spi->chip_select);
518
519                 if (davinci_spi->pdata->odd_parity)
520                         set_fmt_bits(davinci_spi->base,
521                                         SPIFMT_ODD_PARITY_MASK,
522                                         spi->chip_select);
523                 else
524                         clear_fmt_bits(davinci_spi->base,
525                                         SPIFMT_ODD_PARITY_MASK,
526                                         spi->chip_select);
527
528                 if (davinci_spi->pdata->parity_enable)
529                         set_fmt_bits(davinci_spi->base,
530                                         SPIFMT_PARITYENA_MASK,
531                                         spi->chip_select);
532                 else
533                         clear_fmt_bits(davinci_spi->base,
534                                         SPIFMT_PARITYENA_MASK,
535                                         spi->chip_select);
536
537                 if (davinci_spi->pdata->wait_enable)
538                         set_fmt_bits(davinci_spi->base,
539                                         SPIFMT_WAITENA_MASK,
540                                         spi->chip_select);
541                 else
542                         clear_fmt_bits(davinci_spi->base,
543                                         SPIFMT_WAITENA_MASK,
544                                         spi->chip_select);
545
546                 if (davinci_spi->pdata->timer_disable)
547                         set_fmt_bits(davinci_spi->base,
548                                         SPIFMT_DISTIMER_MASK,
549                                         spi->chip_select);
550                 else
551                         clear_fmt_bits(davinci_spi->base,
552                                         SPIFMT_DISTIMER_MASK,
553                                         spi->chip_select);
554         }
555
556         retval = davinci_spi_setup_transfer(spi, NULL);
557
558         return retval;
559 }
560
561 static void davinci_spi_cleanup(struct spi_device *spi)
562 {
563         struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master);
564         struct davinci_spi_dma *davinci_spi_dma;
565
566         davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
567
568         if (use_dma && davinci_spi->dma_channels) {
569                 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
570
571                 if ((davinci_spi_dma->dma_rx_channel != -1)
572                                 && (davinci_spi_dma->dma_tx_channel != -1)) {
573                         edma_free_channel(davinci_spi_dma->dma_tx_channel);
574                         edma_free_channel(davinci_spi_dma->dma_rx_channel);
575                 }
576         }
577 }
578
579 static int davinci_spi_bufs_prep(struct spi_device *spi,
580                                  struct davinci_spi *davinci_spi)
581 {
582         int op_mode = 0;
583
584         /*
585          * REVISIT  unless devices disagree about SPI_LOOP or
586          * SPI_READY (SPI_NO_CS only allows one device!), this
587          * should not need to be done before each message...
588          * optimize for both flags staying cleared.
589          */
590
591         op_mode = SPIPC0_DIFUN_MASK
592                 | SPIPC0_DOFUN_MASK
593                 | SPIPC0_CLKFUN_MASK;
594         if (!(spi->mode & SPI_NO_CS))
595                 op_mode |= 1 << spi->chip_select;
596         if (spi->mode & SPI_READY)
597                 op_mode |= SPIPC0_SPIENA_MASK;
598
599         iowrite32(op_mode, davinci_spi->base + SPIPC0);
600
601         if (spi->mode & SPI_LOOP)
602                 set_io_bits(davinci_spi->base + SPIGCR1,
603                                 SPIGCR1_LOOPBACK_MASK);
604         else
605                 clear_io_bits(davinci_spi->base + SPIGCR1,
606                                 SPIGCR1_LOOPBACK_MASK);
607
608         return 0;
609 }
610
611 static int davinci_spi_check_error(struct davinci_spi *davinci_spi,
612                                    int int_status)
613 {
614         struct device *sdev = davinci_spi->bitbang.master->dev.parent;
615
616         if (int_status & SPIFLG_TIMEOUT_MASK) {
617                 dev_dbg(sdev, "SPI Time-out Error\n");
618                 return -ETIMEDOUT;
619         }
620         if (int_status & SPIFLG_DESYNC_MASK) {
621                 dev_dbg(sdev, "SPI Desynchronization Error\n");
622                 return -EIO;
623         }
624         if (int_status & SPIFLG_BITERR_MASK) {
625                 dev_dbg(sdev, "SPI Bit error\n");
626                 return -EIO;
627         }
628
629         if (davinci_spi->version == SPI_VERSION_2) {
630                 if (int_status & SPIFLG_DLEN_ERR_MASK) {
631                         dev_dbg(sdev, "SPI Data Length Error\n");
632                         return -EIO;
633                 }
634                 if (int_status & SPIFLG_PARERR_MASK) {
635                         dev_dbg(sdev, "SPI Parity Error\n");
636                         return -EIO;
637                 }
638                 if (int_status & SPIFLG_OVRRUN_MASK) {
639                         dev_dbg(sdev, "SPI Data Overrun error\n");
640                         return -EIO;
641                 }
642                 if (int_status & SPIFLG_TX_INTR_MASK) {
643                         dev_dbg(sdev, "SPI TX intr bit set\n");
644                         return -EIO;
645                 }
646                 if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) {
647                         dev_dbg(sdev, "SPI Buffer Init Active\n");
648                         return -EBUSY;
649                 }
650         }
651
652         return 0;
653 }
654
655 /**
656  * davinci_spi_bufs - functions which will handle transfer data
657  * @spi: spi device on which data transfer to be done
658  * @t: spi transfer in which transfer info is filled
659  *
660  * This function will put data to be transferred into data register
661  * of SPI controller and then wait until the completion will be marked
662  * by the IRQ Handler.
663  */
664 static int davinci_spi_bufs_pio(struct spi_device *spi, struct spi_transfer *t)
665 {
666         struct davinci_spi *davinci_spi;
667         int int_status, count, ret;
668         u8 conv, tmp;
669         u32 tx_data, data1_reg_val;
670         u32 buf_val, flg_val;
671         struct davinci_spi_platform_data *pdata;
672
673         davinci_spi = spi_master_get_devdata(spi->master);
674         pdata = davinci_spi->pdata;
675
676         davinci_spi->tx = t->tx_buf;
677         davinci_spi->rx = t->rx_buf;
678
679         /* convert len to words based on bits_per_word */
680         conv = davinci_spi->slave[spi->chip_select].bytes_per_word;
681         davinci_spi->count = t->len / conv;
682
683         INIT_COMPLETION(davinci_spi->done);
684
685         ret = davinci_spi_bufs_prep(spi, davinci_spi);
686         if (ret)
687                 return ret;
688
689         /* Enable SPI */
690         set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
691
692         iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) |
693                         (pdata->t2cdelay << SPI_T2CDELAY_SHIFT),
694                         davinci_spi->base + SPIDELAY);
695
696         count = davinci_spi->count;
697         data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT;
698         tmp = ~(0x1 << spi->chip_select);
699
700         clear_io_bits(davinci_spi->base + SPIDEF, ~tmp);
701
702         data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT;
703
704         while ((ioread32(davinci_spi->base + SPIBUF)
705                                 & SPIBUF_RXEMPTY_MASK) == 0)
706                 cpu_relax();
707
708         /* Determine the command to execute READ or WRITE */
709         if (t->tx_buf) {
710                 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);
711
712                 while (1) {
713                         tx_data = davinci_spi->get_tx(davinci_spi);
714
715                         data1_reg_val &= ~(0xFFFF);
716                         data1_reg_val |= (0xFFFF & tx_data);
717
718                         buf_val = ioread32(davinci_spi->base + SPIBUF);
719                         if ((buf_val & SPIBUF_TXFULL_MASK) == 0) {
720                                 iowrite32(data1_reg_val,
721                                                 davinci_spi->base + SPIDAT1);
722
723                                 count--;
724                         }
725                         while (ioread32(davinci_spi->base + SPIBUF)
726                                         & SPIBUF_RXEMPTY_MASK)
727                                 cpu_relax();
728
729                         /* getting the returned byte */
730                         if (t->rx_buf) {
731                                 buf_val = ioread32(davinci_spi->base + SPIBUF);
732                                 davinci_spi->get_rx(buf_val, davinci_spi);
733                         }
734                         if (count <= 0)
735                                 break;
736                 }
737         } else {
738                 if (pdata->poll_mode) {
739                         while (1) {
740                                 /* keeps the serial clock going */
741                                 if ((ioread32(davinci_spi->base + SPIBUF)
742                                                 & SPIBUF_TXFULL_MASK) == 0)
743                                         iowrite32(data1_reg_val,
744                                                 davinci_spi->base + SPIDAT1);
745
746                                 while (ioread32(davinci_spi->base + SPIBUF) &
747                                                 SPIBUF_RXEMPTY_MASK)
748                                         cpu_relax();
749
750                                 flg_val = ioread32(davinci_spi->base + SPIFLG);
751                                 buf_val = ioread32(davinci_spi->base + SPIBUF);
752
753                                 davinci_spi->get_rx(buf_val, davinci_spi);
754
755                                 count--;
756                                 if (count <= 0)
757                                         break;
758                         }
759                 } else {        /* Receive in Interrupt mode */
760                         int i;
761
762                         for (i = 0; i < davinci_spi->count; i++) {
763                                 set_io_bits(davinci_spi->base + SPIINT,
764                                                 SPIINT_BITERR_INTR
765                                                 | SPIINT_OVRRUN_INTR
766                                                 | SPIINT_RX_INTR);
767
768                                 iowrite32(data1_reg_val,
769                                                 davinci_spi->base + SPIDAT1);
770
771                                 while (ioread32(davinci_spi->base + SPIINT) &
772                                                 SPIINT_RX_INTR)
773                                         cpu_relax();
774                         }
775                         iowrite32((data1_reg_val & 0x0ffcffff),
776                                         davinci_spi->base + SPIDAT1);
777                 }
778         }
779
780         /*
781          * Check for bit error, desync error,parity error,timeout error and
782          * receive overflow errors
783          */
784         int_status = ioread32(davinci_spi->base + SPIFLG);
785
786         ret = davinci_spi_check_error(davinci_spi, int_status);
787         if (ret != 0)
788                 return ret;
789
790         /* SPI Framework maintains the count only in bytes so convert back */
791         davinci_spi->count *= conv;
792
793         return t->len;
794 }
795
796 #define DAVINCI_DMA_DATA_TYPE_S8        0x01
797 #define DAVINCI_DMA_DATA_TYPE_S16       0x02
798 #define DAVINCI_DMA_DATA_TYPE_S32       0x04
799
800 static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t)
801 {
802         struct davinci_spi *davinci_spi;
803         int int_status = 0;
804         int count, temp_count;
805         u8 conv = 1;
806         u8 tmp;
807         u32 data1_reg_val;
808         struct davinci_spi_dma *davinci_spi_dma;
809         int word_len, data_type, ret;
810         unsigned long tx_reg, rx_reg;
811         struct davinci_spi_platform_data *pdata;
812         struct device *sdev;
813
814         davinci_spi = spi_master_get_devdata(spi->master);
815         pdata = davinci_spi->pdata;
816         sdev = davinci_spi->bitbang.master->dev.parent;
817
818         davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
819
820         tx_reg = (unsigned long)davinci_spi->pbase + SPIDAT1;
821         rx_reg = (unsigned long)davinci_spi->pbase + SPIBUF;
822
823         davinci_spi->tx = t->tx_buf;
824         davinci_spi->rx = t->rx_buf;
825
826         /* convert len to words based on bits_per_word */
827         conv = davinci_spi->slave[spi->chip_select].bytes_per_word;
828         davinci_spi->count = t->len / conv;
829
830         INIT_COMPLETION(davinci_spi->done);
831
832         init_completion(&davinci_spi_dma->dma_rx_completion);
833         init_completion(&davinci_spi_dma->dma_tx_completion);
834
835         word_len = conv * 8;
836
837         if (word_len <= 8)
838                 data_type = DAVINCI_DMA_DATA_TYPE_S8;
839         else if (word_len <= 16)
840                 data_type = DAVINCI_DMA_DATA_TYPE_S16;
841         else if (word_len <= 32)
842                 data_type = DAVINCI_DMA_DATA_TYPE_S32;
843         else
844                 return -EINVAL;
845
846         ret = davinci_spi_bufs_prep(spi, davinci_spi);
847         if (ret)
848                 return ret;
849
850         /* Put delay val if required */
851         iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) |
852                         (pdata->t2cdelay << SPI_T2CDELAY_SHIFT),
853                         davinci_spi->base + SPIDELAY);
854
855         count = davinci_spi->count;     /* the number of elements */
856         data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT;
857
858         /* CS default = 0xFF */
859         tmp = ~(0x1 << spi->chip_select);
860
861         clear_io_bits(davinci_spi->base + SPIDEF, ~tmp);
862
863         data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT;
864
865         /* disable all interrupts for dma transfers */
866         clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);
867         /* Disable SPI to write configuration bits in SPIDAT */
868         clear_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
869         iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);
870         /* Enable SPI */
871         set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
872
873         while ((ioread32(davinci_spi->base + SPIBUF)
874                                 & SPIBUF_RXEMPTY_MASK) == 0)
875                 cpu_relax();
876
877
878         if (t->tx_buf) {
879                 t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, count,
880                                 DMA_TO_DEVICE);
881                 if (dma_mapping_error(&spi->dev, t->tx_dma)) {
882                         dev_dbg(sdev, "Unable to DMA map a %d bytes"
883                                 " TX buffer\n", count);
884                         return -ENOMEM;
885                 }
886                 temp_count = count;
887         } else {
888                 /* We need TX clocking for RX transaction */
889                 t->tx_dma = dma_map_single(&spi->dev,
890                                 (void *)davinci_spi->tmp_buf, count + 1,
891                                 DMA_TO_DEVICE);
892                 if (dma_mapping_error(&spi->dev, t->tx_dma)) {
893                         dev_dbg(sdev, "Unable to DMA map a %d bytes"
894                                 " TX tmp buffer\n", count);
895                         return -ENOMEM;
896                 }
897                 temp_count = count + 1;
898         }
899
900         edma_set_transfer_params(davinci_spi_dma->dma_tx_channel,
901                                         data_type, temp_count, 1, 0, ASYNC);
902         edma_set_dest(davinci_spi_dma->dma_tx_channel, tx_reg, INCR, W8BIT);
903         edma_set_src(davinci_spi_dma->dma_tx_channel, t->tx_dma, INCR, W8BIT);
904         edma_set_src_index(davinci_spi_dma->dma_tx_channel, data_type, 0);
905         edma_set_dest_index(davinci_spi_dma->dma_tx_channel, 0, 0);
906
907         if (t->rx_buf) {
908                 /* initiate transaction */
909                 iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);
910
911                 t->rx_dma = dma_map_single(&spi->dev, (void *)t->rx_buf, count,
912                                 DMA_FROM_DEVICE);
913                 if (dma_mapping_error(&spi->dev, t->rx_dma)) {
914                         dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n",
915                                         count);
916                         if (t->tx_buf != NULL)
917                                 dma_unmap_single(NULL, t->tx_dma,
918                                                  count, DMA_TO_DEVICE);
919                         return -ENOMEM;
920                 }
921                 edma_set_transfer_params(davinci_spi_dma->dma_rx_channel,
922                                 data_type, count, 1, 0, ASYNC);
923                 edma_set_src(davinci_spi_dma->dma_rx_channel,
924                                 rx_reg, INCR, W8BIT);
925                 edma_set_dest(davinci_spi_dma->dma_rx_channel,
926                                 t->rx_dma, INCR, W8BIT);
927                 edma_set_src_index(davinci_spi_dma->dma_rx_channel, 0, 0);
928                 edma_set_dest_index(davinci_spi_dma->dma_rx_channel,
929                                 data_type, 0);
930         }
931
932         if ((t->tx_buf) || (t->rx_buf))
933                 edma_start(davinci_spi_dma->dma_tx_channel);
934
935         if (t->rx_buf)
936                 edma_start(davinci_spi_dma->dma_rx_channel);
937
938         if ((t->rx_buf) || (t->tx_buf))
939                 davinci_spi_set_dma_req(spi, 1);
940
941         if (t->tx_buf)
942                 wait_for_completion_interruptible(
943                                 &davinci_spi_dma->dma_tx_completion);
944
945         if (t->rx_buf)
946                 wait_for_completion_interruptible(
947                                 &davinci_spi_dma->dma_rx_completion);
948
949         dma_unmap_single(NULL, t->tx_dma, temp_count, DMA_TO_DEVICE);
950
951         if (t->rx_buf)
952                 dma_unmap_single(NULL, t->rx_dma, count, DMA_FROM_DEVICE);
953
954         /*
955          * Check for bit error, desync error,parity error,timeout error and
956          * receive overflow errors
957          */
958         int_status = ioread32(davinci_spi->base + SPIFLG);
959
960         ret = davinci_spi_check_error(davinci_spi, int_status);
961         if (ret != 0)
962                 return ret;
963
964         /* SPI Framework maintains the count only in bytes so convert back */
965         davinci_spi->count *= conv;
966
967         return t->len;
968 }
969
970 /**
971  * davinci_spi_irq - IRQ handler for DaVinci SPI
972  * @irq: IRQ number for this SPI Master
973  * @context_data: structure for SPI Master controller davinci_spi
974  */
975 static irqreturn_t davinci_spi_irq(s32 irq, void *context_data)
976 {
977         struct davinci_spi *davinci_spi = context_data;
978         u32 int_status, rx_data = 0;
979         irqreturn_t ret = IRQ_NONE;
980
981         int_status = ioread32(davinci_spi->base + SPIFLG);
982
983         while ((int_status & SPIFLG_RX_INTR_MASK)) {
984                 if (likely(int_status & SPIFLG_RX_INTR_MASK)) {
985                         ret = IRQ_HANDLED;
986
987                         rx_data = ioread32(davinci_spi->base + SPIBUF);
988                         davinci_spi->get_rx(rx_data, davinci_spi);
989
990                         /* Disable Receive Interrupt */
991                         iowrite32(~(SPIINT_RX_INTR | SPIINT_TX_INTR),
992                                         davinci_spi->base + SPIINT);
993                 } else
994                         (void)davinci_spi_check_error(davinci_spi, int_status);
995
996                 int_status = ioread32(davinci_spi->base + SPIFLG);
997         }
998
999         return ret;
1000 }
1001
1002 /**
1003  * davinci_spi_probe - probe function for SPI Master Controller
1004  * @pdev: platform_device structure which contains plateform specific data
1005  */
1006 static int davinci_spi_probe(struct platform_device *pdev)
1007 {
1008         struct spi_master *master;
1009         struct davinci_spi *davinci_spi;
1010         struct davinci_spi_platform_data *pdata;
1011         struct resource *r, *mem;
1012         resource_size_t dma_rx_chan = SPI_NO_RESOURCE;
1013         resource_size_t dma_tx_chan = SPI_NO_RESOURCE;
1014         resource_size_t dma_eventq = SPI_NO_RESOURCE;
1015         int i = 0, ret = 0;
1016
1017         pdata = pdev->dev.platform_data;
1018         if (pdata == NULL) {
1019                 ret = -ENODEV;
1020                 goto err;
1021         }
1022
1023         master = spi_alloc_master(&pdev->dev, sizeof(struct davinci_spi));
1024         if (master == NULL) {
1025                 ret = -ENOMEM;
1026                 goto err;
1027         }
1028
1029         dev_set_drvdata(&pdev->dev, master);
1030
1031         davinci_spi = spi_master_get_devdata(master);
1032         if (davinci_spi == NULL) {
1033                 ret = -ENOENT;
1034                 goto free_master;
1035         }
1036
1037         r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1038         if (r == NULL) {
1039                 ret = -ENOENT;
1040                 goto free_master;
1041         }
1042
1043         davinci_spi->pbase = r->start;
1044         davinci_spi->region_size = resource_size(r);
1045         davinci_spi->pdata = pdata;
1046
1047         mem = request_mem_region(r->start, davinci_spi->region_size,
1048                                         pdev->name);
1049         if (mem == NULL) {
1050                 ret = -EBUSY;
1051                 goto free_master;
1052         }
1053
1054         davinci_spi->base = (struct davinci_spi_reg __iomem *)
1055                         ioremap(r->start, davinci_spi->region_size);
1056         if (davinci_spi->base == NULL) {
1057                 ret = -ENOMEM;
1058                 goto release_region;
1059         }
1060
1061         davinci_spi->irq = platform_get_irq(pdev, 0);
1062         if (davinci_spi->irq <= 0) {
1063                 ret = -EINVAL;
1064                 goto unmap_io;
1065         }
1066
1067         ret = request_irq(davinci_spi->irq, davinci_spi_irq, IRQF_DISABLED,
1068                           dev_name(&pdev->dev), davinci_spi);
1069         if (ret)
1070                 goto unmap_io;
1071
1072         /* Allocate tmp_buf for tx_buf */
1073         davinci_spi->tmp_buf = kzalloc(SPI_BUFSIZ, GFP_KERNEL);
1074         if (davinci_spi->tmp_buf == NULL) {
1075                 ret = -ENOMEM;
1076                 goto irq_free;
1077         }
1078
1079         davinci_spi->bitbang.master = spi_master_get(master);
1080         if (davinci_spi->bitbang.master == NULL) {
1081                 ret = -ENODEV;
1082                 goto free_tmp_buf;
1083         }
1084
1085         davinci_spi->clk = clk_get(&pdev->dev, NULL);
1086         if (IS_ERR(davinci_spi->clk)) {
1087                 ret = -ENODEV;
1088                 goto put_master;
1089         }
1090         clk_enable(davinci_spi->clk);
1091
1092
1093         master->bus_num = pdev->id;
1094         master->num_chipselect = pdata->num_chipselect;
1095         master->setup = davinci_spi_setup;
1096         master->cleanup = davinci_spi_cleanup;
1097
1098         davinci_spi->bitbang.chipselect = davinci_spi_chipselect;
1099         davinci_spi->bitbang.setup_transfer = davinci_spi_setup_transfer;
1100
1101         davinci_spi->version = pdata->version;
1102         use_dma = pdata->use_dma;
1103
1104         davinci_spi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP;
1105         if (davinci_spi->version == SPI_VERSION_2)
1106                 davinci_spi->bitbang.flags |= SPI_READY;
1107
1108         if (use_dma) {
1109                         r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1110                         if (r)
1111                                 dma_rx_chan = r->start;
1112                         r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1113                         if (r)
1114                                 dma_tx_chan = r->start;
1115                         r = platform_get_resource(pdev, IORESOURCE_DMA, 2);
1116                         if (r)
1117                                 dma_eventq = r->start;
1118         }
1119
1120         if (!use_dma ||
1121             dma_rx_chan == SPI_NO_RESOURCE ||
1122             dma_tx_chan == SPI_NO_RESOURCE ||
1123             dma_eventq  == SPI_NO_RESOURCE) {
1124                 davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_pio;
1125                 use_dma = 0;
1126         } else {
1127                 davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_dma;
1128                 davinci_spi->dma_channels = kzalloc(master->num_chipselect
1129                                 * sizeof(struct davinci_spi_dma), GFP_KERNEL);
1130                 if (davinci_spi->dma_channels == NULL) {
1131                         ret = -ENOMEM;
1132                         goto free_clk;
1133                 }
1134
1135                 for (i = 0; i < master->num_chipselect; i++) {
1136                         davinci_spi->dma_channels[i].dma_rx_channel = -1;
1137                         davinci_spi->dma_channels[i].dma_rx_sync_dev =
1138                                 dma_rx_chan;
1139                         davinci_spi->dma_channels[i].dma_tx_channel = -1;
1140                         davinci_spi->dma_channels[i].dma_tx_sync_dev =
1141                                 dma_tx_chan;
1142                         davinci_spi->dma_channels[i].eventq = dma_eventq;
1143                 }
1144                 dev_info(&pdev->dev, "DaVinci SPI driver in EDMA mode\n"
1145                                 "Using RX channel = %d , TX channel = %d and "
1146                                 "event queue = %d", dma_rx_chan, dma_tx_chan,
1147                                 dma_eventq);
1148         }
1149
1150         davinci_spi->get_rx = davinci_spi_rx_buf_u8;
1151         davinci_spi->get_tx = davinci_spi_tx_buf_u8;
1152
1153         init_completion(&davinci_spi->done);
1154
1155         /* Reset In/OUT SPI module */
1156         iowrite32(0, davinci_spi->base + SPIGCR0);
1157         udelay(100);
1158         iowrite32(1, davinci_spi->base + SPIGCR0);
1159
1160         /* Clock internal */
1161         if (davinci_spi->pdata->clk_internal)
1162                 set_io_bits(davinci_spi->base + SPIGCR1,
1163                                 SPIGCR1_CLKMOD_MASK);
1164         else
1165                 clear_io_bits(davinci_spi->base + SPIGCR1,
1166                                 SPIGCR1_CLKMOD_MASK);
1167
1168         /* master mode default */
1169         set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_MASTER_MASK);
1170
1171         if (davinci_spi->pdata->intr_level)
1172                 iowrite32(SPI_INTLVL_1, davinci_spi->base + SPILVL);
1173         else
1174                 iowrite32(SPI_INTLVL_0, davinci_spi->base + SPILVL);
1175
1176         ret = spi_bitbang_start(&davinci_spi->bitbang);
1177         if (ret)
1178                 goto free_clk;
1179
1180         dev_info(&pdev->dev, "Controller at 0x%p \n", davinci_spi->base);
1181
1182         if (!pdata->poll_mode)
1183                 dev_info(&pdev->dev, "Operating in interrupt mode"
1184                         " using IRQ %d\n", davinci_spi->irq);
1185
1186         return ret;
1187
1188 free_clk:
1189         clk_disable(davinci_spi->clk);
1190         clk_put(davinci_spi->clk);
1191 put_master:
1192         spi_master_put(master);
1193 free_tmp_buf:
1194         kfree(davinci_spi->tmp_buf);
1195 irq_free:
1196         free_irq(davinci_spi->irq, davinci_spi);
1197 unmap_io:
1198         iounmap(davinci_spi->base);
1199 release_region:
1200         release_mem_region(davinci_spi->pbase, davinci_spi->region_size);
1201 free_master:
1202         kfree(master);
1203 err:
1204         return ret;
1205 }
1206
1207 /**
1208  * davinci_spi_remove - remove function for SPI Master Controller
1209  * @pdev: platform_device structure which contains plateform specific data
1210  *
1211  * This function will do the reverse action of davinci_spi_probe function
1212  * It will free the IRQ and SPI controller's memory region.
1213  * It will also call spi_bitbang_stop to destroy the work queue which was
1214  * created by spi_bitbang_start.
1215  */
1216 static int __exit davinci_spi_remove(struct platform_device *pdev)
1217 {
1218         struct davinci_spi *davinci_spi;
1219         struct spi_master *master;
1220
1221         master = dev_get_drvdata(&pdev->dev);
1222         davinci_spi = spi_master_get_devdata(master);
1223
1224         spi_bitbang_stop(&davinci_spi->bitbang);
1225
1226         clk_disable(davinci_spi->clk);
1227         clk_put(davinci_spi->clk);
1228         spi_master_put(master);
1229         kfree(davinci_spi->tmp_buf);
1230         free_irq(davinci_spi->irq, davinci_spi);
1231         iounmap(davinci_spi->base);
1232         release_mem_region(davinci_spi->pbase, davinci_spi->region_size);
1233
1234         return 0;
1235 }
1236
1237 static struct platform_driver davinci_spi_driver = {
1238         .driver.name = "spi_davinci",
1239         .remove = __exit_p(davinci_spi_remove),
1240 };
1241
1242 static int __init davinci_spi_init(void)
1243 {
1244         return platform_driver_probe(&davinci_spi_driver, davinci_spi_probe);
1245 }
1246 module_init(davinci_spi_init);
1247
1248 static void __exit davinci_spi_exit(void)
1249 {
1250         platform_driver_unregister(&davinci_spi_driver);
1251 }
1252 module_exit(davinci_spi_exit);
1253
1254 MODULE_DESCRIPTION("TI DaVinci SPI Master Controller Driver");
1255 MODULE_LICENSE("GPL");