mmc: sh_mmcif: protect against a theoretical race
[pandora-kernel.git] / drivers / mmc / host / sh_mmcif.c
1 /*
2  * MMCIF eMMC driver.
3  *
4  * Copyright (C) 2010 Renesas Solutions Corp.
5  * Yusuke Goda <yusuke.goda.sx@renesas.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License.
10  *
11  *
12  * TODO
13  *  1. DMA
14  *  2. Power management
15  *  3. Handle MMC errors better
16  *
17  */
18
19 #include <linux/clk.h>
20 #include <linux/completion.h>
21 #include <linux/delay.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/dmaengine.h>
24 #include <linux/mmc/card.h>
25 #include <linux/mmc/core.h>
26 #include <linux/mmc/host.h>
27 #include <linux/mmc/mmc.h>
28 #include <linux/mmc/sdio.h>
29 #include <linux/mmc/sh_mmcif.h>
30 #include <linux/pagemap.h>
31 #include <linux/platform_device.h>
32 #include <linux/spinlock.h>
33
34 #define DRIVER_NAME     "sh_mmcif"
35 #define DRIVER_VERSION  "2010-04-28"
36
37 /* CE_CMD_SET */
38 #define CMD_MASK                0x3f000000
39 #define CMD_SET_RTYP_NO         ((0 << 23) | (0 << 22))
40 #define CMD_SET_RTYP_6B         ((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */
41 #define CMD_SET_RTYP_17B        ((1 << 23) | (0 << 22)) /* R2 */
42 #define CMD_SET_RBSY            (1 << 21) /* R1b */
43 #define CMD_SET_CCSEN           (1 << 20)
44 #define CMD_SET_WDAT            (1 << 19) /* 1: on data, 0: no data */
45 #define CMD_SET_DWEN            (1 << 18) /* 1: write, 0: read */
46 #define CMD_SET_CMLTE           (1 << 17) /* 1: multi block trans, 0: single */
47 #define CMD_SET_CMD12EN         (1 << 16) /* 1: CMD12 auto issue */
48 #define CMD_SET_RIDXC_INDEX     ((0 << 15) | (0 << 14)) /* index check */
49 #define CMD_SET_RIDXC_BITS      ((0 << 15) | (1 << 14)) /* check bits check */
50 #define CMD_SET_RIDXC_NO        ((1 << 15) | (0 << 14)) /* no check */
51 #define CMD_SET_CRC7C           ((0 << 13) | (0 << 12)) /* CRC7 check*/
52 #define CMD_SET_CRC7C_BITS      ((0 << 13) | (1 << 12)) /* check bits check*/
53 #define CMD_SET_CRC7C_INTERNAL  ((1 << 13) | (0 << 12)) /* internal CRC7 check*/
54 #define CMD_SET_CRC16C          (1 << 10) /* 0: CRC16 check*/
55 #define CMD_SET_CRCSTE          (1 << 8) /* 1: not receive CRC status */
56 #define CMD_SET_TBIT            (1 << 7) /* 1: tran mission bit "Low" */
57 #define CMD_SET_OPDM            (1 << 6) /* 1: open/drain */
58 #define CMD_SET_CCSH            (1 << 5)
59 #define CMD_SET_DATW_1          ((0 << 1) | (0 << 0)) /* 1bit */
60 #define CMD_SET_DATW_4          ((0 << 1) | (1 << 0)) /* 4bit */
61 #define CMD_SET_DATW_8          ((1 << 1) | (0 << 0)) /* 8bit */
62
63 /* CE_CMD_CTRL */
64 #define CMD_CTRL_BREAK          (1 << 0)
65
66 /* CE_BLOCK_SET */
67 #define BLOCK_SIZE_MASK         0x0000ffff
68
69 /* CE_INT */
70 #define INT_CCSDE               (1 << 29)
71 #define INT_CMD12DRE            (1 << 26)
72 #define INT_CMD12RBE            (1 << 25)
73 #define INT_CMD12CRE            (1 << 24)
74 #define INT_DTRANE              (1 << 23)
75 #define INT_BUFRE               (1 << 22)
76 #define INT_BUFWEN              (1 << 21)
77 #define INT_BUFREN              (1 << 20)
78 #define INT_CCSRCV              (1 << 19)
79 #define INT_RBSYE               (1 << 17)
80 #define INT_CRSPE               (1 << 16)
81 #define INT_CMDVIO              (1 << 15)
82 #define INT_BUFVIO              (1 << 14)
83 #define INT_WDATERR             (1 << 11)
84 #define INT_RDATERR             (1 << 10)
85 #define INT_RIDXERR             (1 << 9)
86 #define INT_RSPERR              (1 << 8)
87 #define INT_CCSTO               (1 << 5)
88 #define INT_CRCSTO              (1 << 4)
89 #define INT_WDATTO              (1 << 3)
90 #define INT_RDATTO              (1 << 2)
91 #define INT_RBSYTO              (1 << 1)
92 #define INT_RSPTO               (1 << 0)
93 #define INT_ERR_STS             (INT_CMDVIO | INT_BUFVIO | INT_WDATERR |  \
94                                  INT_RDATERR | INT_RIDXERR | INT_RSPERR | \
95                                  INT_CCSTO | INT_CRCSTO | INT_WDATTO |    \
96                                  INT_RDATTO | INT_RBSYTO | INT_RSPTO)
97
98 /* CE_INT_MASK */
99 #define MASK_ALL                0x00000000
100 #define MASK_MCCSDE             (1 << 29)
101 #define MASK_MCMD12DRE          (1 << 26)
102 #define MASK_MCMD12RBE          (1 << 25)
103 #define MASK_MCMD12CRE          (1 << 24)
104 #define MASK_MDTRANE            (1 << 23)
105 #define MASK_MBUFRE             (1 << 22)
106 #define MASK_MBUFWEN            (1 << 21)
107 #define MASK_MBUFREN            (1 << 20)
108 #define MASK_MCCSRCV            (1 << 19)
109 #define MASK_MRBSYE             (1 << 17)
110 #define MASK_MCRSPE             (1 << 16)
111 #define MASK_MCMDVIO            (1 << 15)
112 #define MASK_MBUFVIO            (1 << 14)
113 #define MASK_MWDATERR           (1 << 11)
114 #define MASK_MRDATERR           (1 << 10)
115 #define MASK_MRIDXERR           (1 << 9)
116 #define MASK_MRSPERR            (1 << 8)
117 #define MASK_MCCSTO             (1 << 5)
118 #define MASK_MCRCSTO            (1 << 4)
119 #define MASK_MWDATTO            (1 << 3)
120 #define MASK_MRDATTO            (1 << 2)
121 #define MASK_MRBSYTO            (1 << 1)
122 #define MASK_MRSPTO             (1 << 0)
123
124 /* CE_HOST_STS1 */
125 #define STS1_CMDSEQ             (1 << 31)
126
127 /* CE_HOST_STS2 */
128 #define STS2_CRCSTE             (1 << 31)
129 #define STS2_CRC16E             (1 << 30)
130 #define STS2_AC12CRCE           (1 << 29)
131 #define STS2_RSPCRC7E           (1 << 28)
132 #define STS2_CRCSTEBE           (1 << 27)
133 #define STS2_RDATEBE            (1 << 26)
134 #define STS2_AC12REBE           (1 << 25)
135 #define STS2_RSPEBE             (1 << 24)
136 #define STS2_AC12IDXE           (1 << 23)
137 #define STS2_RSPIDXE            (1 << 22)
138 #define STS2_CCSTO              (1 << 15)
139 #define STS2_RDATTO             (1 << 14)
140 #define STS2_DATBSYTO           (1 << 13)
141 #define STS2_CRCSTTO            (1 << 12)
142 #define STS2_AC12BSYTO          (1 << 11)
143 #define STS2_RSPBSYTO           (1 << 10)
144 #define STS2_AC12RSPTO          (1 << 9)
145 #define STS2_RSPTO              (1 << 8)
146 #define STS2_CRC_ERR            (STS2_CRCSTE | STS2_CRC16E |            \
147                                  STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE)
148 #define STS2_TIMEOUT_ERR        (STS2_CCSTO | STS2_RDATTO |             \
149                                  STS2_DATBSYTO | STS2_CRCSTTO |         \
150                                  STS2_AC12BSYTO | STS2_RSPBSYTO |       \
151                                  STS2_AC12RSPTO | STS2_RSPTO)
152
153 #define CLKDEV_EMMC_DATA        52000000 /* 52MHz */
154 #define CLKDEV_MMC_DATA         20000000 /* 20MHz */
155 #define CLKDEV_INIT             400000   /* 400 KHz */
156
157 enum mmcif_state {
158         STATE_IDLE,
159         STATE_REQUEST,
160         STATE_IOS,
161 };
162
163 struct sh_mmcif_host {
164         struct mmc_host *mmc;
165         struct mmc_data *data;
166         struct platform_device *pd;
167         struct clk *hclk;
168         unsigned int clk;
169         int bus_width;
170         bool sd_error;
171         long timeout;
172         void __iomem *addr;
173         struct completion intr_wait;
174         enum mmcif_state state;
175         spinlock_t lock;
176
177         /* DMA support */
178         struct dma_chan         *chan_rx;
179         struct dma_chan         *chan_tx;
180         struct completion       dma_complete;
181         bool                    dma_active;
182 };
183
184 static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
185                                         unsigned int reg, u32 val)
186 {
187         writel(val | readl(host->addr + reg), host->addr + reg);
188 }
189
190 static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
191                                         unsigned int reg, u32 val)
192 {
193         writel(~val & readl(host->addr + reg), host->addr + reg);
194 }
195
196 static void mmcif_dma_complete(void *arg)
197 {
198         struct sh_mmcif_host *host = arg;
199         dev_dbg(&host->pd->dev, "Command completed\n");
200
201         if (WARN(!host->data, "%s: NULL data in DMA completion!\n",
202                  dev_name(&host->pd->dev)))
203                 return;
204
205         if (host->data->flags & MMC_DATA_READ)
206                 dma_unmap_sg(host->chan_rx->device->dev,
207                              host->data->sg, host->data->sg_len,
208                              DMA_FROM_DEVICE);
209         else
210                 dma_unmap_sg(host->chan_tx->device->dev,
211                              host->data->sg, host->data->sg_len,
212                              DMA_TO_DEVICE);
213
214         complete(&host->dma_complete);
215 }
216
217 static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
218 {
219         struct scatterlist *sg = host->data->sg;
220         struct dma_async_tx_descriptor *desc = NULL;
221         struct dma_chan *chan = host->chan_rx;
222         dma_cookie_t cookie = -EINVAL;
223         int ret;
224
225         ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len,
226                          DMA_FROM_DEVICE);
227         if (ret > 0) {
228                 host->dma_active = true;
229                 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
230                         DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
231         }
232
233         if (desc) {
234                 desc->callback = mmcif_dma_complete;
235                 desc->callback_param = host;
236                 cookie = dmaengine_submit(desc);
237                 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
238                 dma_async_issue_pending(chan);
239         }
240         dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
241                 __func__, host->data->sg_len, ret, cookie);
242
243         if (!desc) {
244                 /* DMA failed, fall back to PIO */
245                 if (ret >= 0)
246                         ret = -EIO;
247                 host->chan_rx = NULL;
248                 host->dma_active = false;
249                 dma_release_channel(chan);
250                 /* Free the Tx channel too */
251                 chan = host->chan_tx;
252                 if (chan) {
253                         host->chan_tx = NULL;
254                         dma_release_channel(chan);
255                 }
256                 dev_warn(&host->pd->dev,
257                          "DMA failed: %d, falling back to PIO\n", ret);
258                 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
259         }
260
261         dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
262                 desc, cookie, host->data->sg_len);
263 }
264
265 static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
266 {
267         struct scatterlist *sg = host->data->sg;
268         struct dma_async_tx_descriptor *desc = NULL;
269         struct dma_chan *chan = host->chan_tx;
270         dma_cookie_t cookie = -EINVAL;
271         int ret;
272
273         ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len,
274                          DMA_TO_DEVICE);
275         if (ret > 0) {
276                 host->dma_active = true;
277                 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
278                         DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
279         }
280
281         if (desc) {
282                 desc->callback = mmcif_dma_complete;
283                 desc->callback_param = host;
284                 cookie = dmaengine_submit(desc);
285                 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
286                 dma_async_issue_pending(chan);
287         }
288         dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
289                 __func__, host->data->sg_len, ret, cookie);
290
291         if (!desc) {
292                 /* DMA failed, fall back to PIO */
293                 if (ret >= 0)
294                         ret = -EIO;
295                 host->chan_tx = NULL;
296                 host->dma_active = false;
297                 dma_release_channel(chan);
298                 /* Free the Rx channel too */
299                 chan = host->chan_rx;
300                 if (chan) {
301                         host->chan_rx = NULL;
302                         dma_release_channel(chan);
303                 }
304                 dev_warn(&host->pd->dev,
305                          "DMA failed: %d, falling back to PIO\n", ret);
306                 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
307         }
308
309         dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d\n", __func__,
310                 desc, cookie);
311 }
312
313 static bool sh_mmcif_filter(struct dma_chan *chan, void *arg)
314 {
315         dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg);
316         chan->private = arg;
317         return true;
318 }
319
320 static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
321                                  struct sh_mmcif_plat_data *pdata)
322 {
323         host->dma_active = false;
324
325         /* We can only either use DMA for both Tx and Rx or not use it at all */
326         if (pdata->dma) {
327                 dma_cap_mask_t mask;
328
329                 dma_cap_zero(mask);
330                 dma_cap_set(DMA_SLAVE, mask);
331
332                 host->chan_tx = dma_request_channel(mask, sh_mmcif_filter,
333                                                     &pdata->dma->chan_priv_tx);
334                 dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__,
335                         host->chan_tx);
336
337                 if (!host->chan_tx)
338                         return;
339
340                 host->chan_rx = dma_request_channel(mask, sh_mmcif_filter,
341                                                     &pdata->dma->chan_priv_rx);
342                 dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__,
343                         host->chan_rx);
344
345                 if (!host->chan_rx) {
346                         dma_release_channel(host->chan_tx);
347                         host->chan_tx = NULL;
348                         return;
349                 }
350
351                 init_completion(&host->dma_complete);
352         }
353 }
354
355 static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
356 {
357         sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
358         /* Descriptors are freed automatically */
359         if (host->chan_tx) {
360                 struct dma_chan *chan = host->chan_tx;
361                 host->chan_tx = NULL;
362                 dma_release_channel(chan);
363         }
364         if (host->chan_rx) {
365                 struct dma_chan *chan = host->chan_rx;
366                 host->chan_rx = NULL;
367                 dma_release_channel(chan);
368         }
369
370         host->dma_active = false;
371 }
372
373 static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
374 {
375         struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
376
377         sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
378         sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);
379
380         if (!clk)
381                 return;
382         if (p->sup_pclk && clk == host->clk)
383                 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK);
384         else
385                 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR &
386                         (ilog2(__rounddown_pow_of_two(host->clk / clk)) << 16));
387
388         sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
389 }
390
391 static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
392 {
393         u32 tmp;
394
395         tmp = 0x010f0000 & sh_mmcif_readl(host->addr, MMCIF_CE_CLK_CTRL);
396
397         sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON);
398         sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF);
399         sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
400                 SRSPTO_256 | SRBSYTO_29 | SRWDTO_29 | SCCSTO_29);
401         /* byte swap on */
402         sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
403 }
404
405 static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
406 {
407         u32 state1, state2;
408         int ret, timeout = 10000000;
409
410         host->sd_error = false;
411
412         state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1);
413         state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2);
414         dev_dbg(&host->pd->dev, "ERR HOST_STS1 = %08x\n", state1);
415         dev_dbg(&host->pd->dev, "ERR HOST_STS2 = %08x\n", state2);
416
417         if (state1 & STS1_CMDSEQ) {
418                 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
419                 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
420                 while (1) {
421                         timeout--;
422                         if (timeout < 0) {
423                                 dev_err(&host->pd->dev,
424                                         "Forceed end of command sequence timeout err\n");
425                                 return -EIO;
426                         }
427                         if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1)
428                                                                 & STS1_CMDSEQ))
429                                 break;
430                         mdelay(1);
431                 }
432                 sh_mmcif_sync_reset(host);
433                 dev_dbg(&host->pd->dev, "Forced end of command sequence\n");
434                 return -EIO;
435         }
436
437         if (state2 & STS2_CRC_ERR) {
438                 dev_dbg(&host->pd->dev, ": Happened CRC error\n");
439                 ret = -EIO;
440         } else if (state2 & STS2_TIMEOUT_ERR) {
441                 dev_dbg(&host->pd->dev, ": Happened Timeout error\n");
442                 ret = -ETIMEDOUT;
443         } else {
444                 dev_dbg(&host->pd->dev, ": Happened End/Index error\n");
445                 ret = -EIO;
446         }
447         return ret;
448 }
449
450 static int sh_mmcif_single_read(struct sh_mmcif_host *host,
451                                         struct mmc_request *mrq)
452 {
453         struct mmc_data *data = mrq->data;
454         long time;
455         u32 blocksize, i, *p = sg_virt(data->sg);
456
457         /* buf read enable */
458         sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
459         time = wait_for_completion_interruptible_timeout(&host->intr_wait,
460                         host->timeout);
461         if (time <= 0 || host->sd_error)
462                 return sh_mmcif_error_manage(host);
463
464         blocksize = (BLOCK_SIZE_MASK &
465                         sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3;
466         for (i = 0; i < blocksize / 4; i++)
467                 *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
468
469         /* buffer read end */
470         sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
471         time = wait_for_completion_interruptible_timeout(&host->intr_wait,
472                         host->timeout);
473         if (time <= 0 || host->sd_error)
474                 return sh_mmcif_error_manage(host);
475
476         return 0;
477 }
478
479 static int sh_mmcif_multi_read(struct sh_mmcif_host *host,
480                                         struct mmc_request *mrq)
481 {
482         struct mmc_data *data = mrq->data;
483         long time;
484         u32 blocksize, i, j, sec, *p;
485
486         blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr,
487                                                      MMCIF_CE_BLOCK_SET);
488         for (j = 0; j < data->sg_len; j++) {
489                 p = sg_virt(data->sg);
490                 for (sec = 0; sec < data->sg->length / blocksize; sec++) {
491                         sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
492                         /* buf read enable */
493                         time = wait_for_completion_interruptible_timeout(&host->intr_wait,
494                                 host->timeout);
495
496                         if (time <= 0 || host->sd_error)
497                                 return sh_mmcif_error_manage(host);
498
499                         for (i = 0; i < blocksize / 4; i++)
500                                 *p++ = sh_mmcif_readl(host->addr,
501                                                       MMCIF_CE_DATA);
502                 }
503                 if (j < data->sg_len - 1)
504                         data->sg++;
505         }
506         return 0;
507 }
508
509 static int sh_mmcif_single_write(struct sh_mmcif_host *host,
510                                         struct mmc_request *mrq)
511 {
512         struct mmc_data *data = mrq->data;
513         long time;
514         u32 blocksize, i, *p = sg_virt(data->sg);
515
516         sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
517
518         /* buf write enable */
519         time = wait_for_completion_interruptible_timeout(&host->intr_wait,
520                         host->timeout);
521         if (time <= 0 || host->sd_error)
522                 return sh_mmcif_error_manage(host);
523
524         blocksize = (BLOCK_SIZE_MASK &
525                         sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3;
526         for (i = 0; i < blocksize / 4; i++)
527                 sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
528
529         /* buffer write end */
530         sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
531
532         time = wait_for_completion_interruptible_timeout(&host->intr_wait,
533                         host->timeout);
534         if (time <= 0 || host->sd_error)
535                 return sh_mmcif_error_manage(host);
536
537         return 0;
538 }
539
540 static int sh_mmcif_multi_write(struct sh_mmcif_host *host,
541                                                 struct mmc_request *mrq)
542 {
543         struct mmc_data *data = mrq->data;
544         long time;
545         u32 i, sec, j, blocksize, *p;
546
547         blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr,
548                                                      MMCIF_CE_BLOCK_SET);
549
550         for (j = 0; j < data->sg_len; j++) {
551                 p = sg_virt(data->sg);
552                 for (sec = 0; sec < data->sg->length / blocksize; sec++) {
553                         sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
554                         /* buf write enable*/
555                         time = wait_for_completion_interruptible_timeout(&host->intr_wait,
556                                 host->timeout);
557
558                         if (time <= 0 || host->sd_error)
559                                 return sh_mmcif_error_manage(host);
560
561                         for (i = 0; i < blocksize / 4; i++)
562                                 sh_mmcif_writel(host->addr,
563                                                 MMCIF_CE_DATA, *p++);
564                 }
565                 if (j < data->sg_len - 1)
566                         data->sg++;
567         }
568         return 0;
569 }
570
571 static void sh_mmcif_get_response(struct sh_mmcif_host *host,
572                                                 struct mmc_command *cmd)
573 {
574         if (cmd->flags & MMC_RSP_136) {
575                 cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP3);
576                 cmd->resp[1] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP2);
577                 cmd->resp[2] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP1);
578                 cmd->resp[3] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
579         } else
580                 cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
581 }
582
583 static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
584                                                 struct mmc_command *cmd)
585 {
586         cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP_CMD12);
587 }
588
589 static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
590                 struct mmc_request *mrq, struct mmc_command *cmd, u32 opc)
591 {
592         u32 tmp = 0;
593
594         /* Response Type check */
595         switch (mmc_resp_type(cmd)) {
596         case MMC_RSP_NONE:
597                 tmp |= CMD_SET_RTYP_NO;
598                 break;
599         case MMC_RSP_R1:
600         case MMC_RSP_R1B:
601         case MMC_RSP_R3:
602                 tmp |= CMD_SET_RTYP_6B;
603                 break;
604         case MMC_RSP_R2:
605                 tmp |= CMD_SET_RTYP_17B;
606                 break;
607         default:
608                 dev_err(&host->pd->dev, "Unsupported response type.\n");
609                 break;
610         }
611         switch (opc) {
612         /* RBSY */
613         case MMC_SWITCH:
614         case MMC_STOP_TRANSMISSION:
615         case MMC_SET_WRITE_PROT:
616         case MMC_CLR_WRITE_PROT:
617         case MMC_ERASE:
618         case MMC_GEN_CMD:
619                 tmp |= CMD_SET_RBSY;
620                 break;
621         }
622         /* WDAT / DATW */
623         if (host->data) {
624                 tmp |= CMD_SET_WDAT;
625                 switch (host->bus_width) {
626                 case MMC_BUS_WIDTH_1:
627                         tmp |= CMD_SET_DATW_1;
628                         break;
629                 case MMC_BUS_WIDTH_4:
630                         tmp |= CMD_SET_DATW_4;
631                         break;
632                 case MMC_BUS_WIDTH_8:
633                         tmp |= CMD_SET_DATW_8;
634                         break;
635                 default:
636                         dev_err(&host->pd->dev, "Unsupported bus width.\n");
637                         break;
638                 }
639         }
640         /* DWEN */
641         if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK)
642                 tmp |= CMD_SET_DWEN;
643         /* CMLTE/CMD12EN */
644         if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {
645                 tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;
646                 sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
647                                         mrq->data->blocks << 16);
648         }
649         /* RIDXC[1:0] check bits */
650         if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID ||
651             opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
652                 tmp |= CMD_SET_RIDXC_BITS;
653         /* RCRC7C[1:0] check bits */
654         if (opc == MMC_SEND_OP_COND)
655                 tmp |= CMD_SET_CRC7C_BITS;
656         /* RCRC7C[1:0] internal CRC7 */
657         if (opc == MMC_ALL_SEND_CID ||
658                 opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
659                 tmp |= CMD_SET_CRC7C_INTERNAL;
660
661         return opc = ((opc << 24) | tmp);
662 }
663
664 static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
665                                 struct mmc_request *mrq, u32 opc)
666 {
667         int ret;
668
669         switch (opc) {
670         case MMC_READ_MULTIPLE_BLOCK:
671                 ret = sh_mmcif_multi_read(host, mrq);
672                 break;
673         case MMC_WRITE_MULTIPLE_BLOCK:
674                 ret = sh_mmcif_multi_write(host, mrq);
675                 break;
676         case MMC_WRITE_BLOCK:
677                 ret = sh_mmcif_single_write(host, mrq);
678                 break;
679         case MMC_READ_SINGLE_BLOCK:
680         case MMC_SEND_EXT_CSD:
681                 ret = sh_mmcif_single_read(host, mrq);
682                 break;
683         default:
684                 dev_err(&host->pd->dev, "UNSUPPORTED CMD = d'%08d\n", opc);
685                 ret = -EINVAL;
686                 break;
687         }
688         return ret;
689 }
690
691 static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
692                         struct mmc_request *mrq, struct mmc_command *cmd)
693 {
694         long time;
695         int ret = 0, mask = 0;
696         u32 opc = cmd->opcode;
697
698         switch (opc) {
699         /* respons busy check */
700         case MMC_SWITCH:
701         case MMC_STOP_TRANSMISSION:
702         case MMC_SET_WRITE_PROT:
703         case MMC_CLR_WRITE_PROT:
704         case MMC_ERASE:
705         case MMC_GEN_CMD:
706                 mask = MASK_MRBSYE;
707                 break;
708         default:
709                 mask = MASK_MCRSPE;
710                 break;
711         }
712         mask |= MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR |
713                 MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR |
714                 MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO |
715                 MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO;
716
717         if (host->data) {
718                 sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
719                 sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET,
720                                 mrq->data->blksz);
721         }
722         opc = sh_mmcif_set_cmd(host, mrq, cmd, opc);
723
724         sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
725         sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
726         /* set arg */
727         sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg);
728         /* set cmd */
729         sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
730
731         time = wait_for_completion_interruptible_timeout(&host->intr_wait,
732                 host->timeout);
733         if (time <= 0) {
734                 cmd->error = sh_mmcif_error_manage(host);
735                 return;
736         }
737         if (host->sd_error) {
738                 switch (cmd->opcode) {
739                 case MMC_ALL_SEND_CID:
740                 case MMC_SELECT_CARD:
741                 case MMC_APP_CMD:
742                         cmd->error = -ETIMEDOUT;
743                         break;
744                 default:
745                         dev_dbg(&host->pd->dev, "Cmd(d'%d) err\n",
746                                         cmd->opcode);
747                         cmd->error = sh_mmcif_error_manage(host);
748                         break;
749                 }
750                 host->sd_error = false;
751                 return;
752         }
753         if (!(cmd->flags & MMC_RSP_PRESENT)) {
754                 cmd->error = 0;
755                 return;
756         }
757         sh_mmcif_get_response(host, cmd);
758         if (host->data) {
759                 if (!host->dma_active) {
760                         ret = sh_mmcif_data_trans(host, mrq, cmd->opcode);
761                 } else {
762                         long time =
763                                 wait_for_completion_interruptible_timeout(&host->dma_complete,
764                                                                           host->timeout);
765                         if (!time)
766                                 ret = -ETIMEDOUT;
767                         else if (time < 0)
768                                 ret = time;
769                         sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
770                                         BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
771                         host->dma_active = false;
772                 }
773                 if (ret < 0)
774                         mrq->data->bytes_xfered = 0;
775                 else
776                         mrq->data->bytes_xfered =
777                                 mrq->data->blocks * mrq->data->blksz;
778         }
779         cmd->error = ret;
780 }
781
782 static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
783                 struct mmc_request *mrq, struct mmc_command *cmd)
784 {
785         long time;
786
787         if (mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK)
788                 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
789         else if (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
790                 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
791         else {
792                 dev_err(&host->pd->dev, "unsupported stop cmd\n");
793                 cmd->error = sh_mmcif_error_manage(host);
794                 return;
795         }
796
797         time = wait_for_completion_interruptible_timeout(&host->intr_wait,
798                         host->timeout);
799         if (time <= 0 || host->sd_error) {
800                 cmd->error = sh_mmcif_error_manage(host);
801                 return;
802         }
803         sh_mmcif_get_cmd12response(host, cmd);
804         cmd->error = 0;
805 }
806
807 static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
808 {
809         struct sh_mmcif_host *host = mmc_priv(mmc);
810         unsigned long flags;
811
812         spin_lock_irqsave(&host->lock, flags);
813         if (host->state != STATE_IDLE) {
814                 spin_unlock_irqrestore(&host->lock, flags);
815                 mrq->cmd->error = -EAGAIN;
816                 mmc_request_done(mmc, mrq);
817                 return;
818         }
819
820         host->state = STATE_REQUEST;
821         spin_unlock_irqrestore(&host->lock, flags);
822
823         switch (mrq->cmd->opcode) {
824         /* MMCIF does not support SD/SDIO command */
825         case SD_IO_SEND_OP_COND:
826         case MMC_APP_CMD:
827                 host->state = STATE_IDLE;
828                 mrq->cmd->error = -ETIMEDOUT;
829                 mmc_request_done(mmc, mrq);
830                 return;
831         case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */
832                 if (!mrq->data) {
833                         /* send_if_cond cmd (not support) */
834                         host->state = STATE_IDLE;
835                         mrq->cmd->error = -ETIMEDOUT;
836                         mmc_request_done(mmc, mrq);
837                         return;
838                 }
839                 break;
840         default:
841                 break;
842         }
843         host->data = mrq->data;
844         if (mrq->data) {
845                 if (mrq->data->flags & MMC_DATA_READ) {
846                         if (host->chan_rx)
847                                 sh_mmcif_start_dma_rx(host);
848                 } else {
849                         if (host->chan_tx)
850                                 sh_mmcif_start_dma_tx(host);
851                 }
852         }
853         sh_mmcif_start_cmd(host, mrq, mrq->cmd);
854         host->data = NULL;
855
856         if (!mrq->cmd->error && mrq->stop)
857                 sh_mmcif_stop_cmd(host, mrq, mrq->stop);
858         host->state = STATE_IDLE;
859         mmc_request_done(mmc, mrq);
860 }
861
862 static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
863 {
864         struct sh_mmcif_host *host = mmc_priv(mmc);
865         struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
866         unsigned long flags;
867
868         spin_lock_irqsave(&host->lock, flags);
869         if (host->state != STATE_IDLE) {
870                 spin_unlock_irqrestore(&host->lock, flags);
871                 return;
872         }
873
874         host->state = STATE_IOS;
875         spin_unlock_irqrestore(&host->lock, flags);
876
877         if (ios->power_mode == MMC_POWER_UP) {
878                 if (p->set_pwr)
879                         p->set_pwr(host->pd, ios->power_mode);
880         } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
881                 /* clock stop */
882                 sh_mmcif_clock_control(host, 0);
883                 if (ios->power_mode == MMC_POWER_OFF && p->down_pwr)
884                         p->down_pwr(host->pd);
885                 host->state = STATE_IDLE;
886                 return;
887         }
888
889         if (ios->clock)
890                 sh_mmcif_clock_control(host, ios->clock);
891
892         host->bus_width = ios->bus_width;
893         host->state = STATE_IDLE;
894 }
895
896 static int sh_mmcif_get_cd(struct mmc_host *mmc)
897 {
898         struct sh_mmcif_host *host = mmc_priv(mmc);
899         struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
900
901         if (!p->get_cd)
902                 return -ENOSYS;
903         else
904                 return p->get_cd(host->pd);
905 }
906
907 static struct mmc_host_ops sh_mmcif_ops = {
908         .request        = sh_mmcif_request,
909         .set_ios        = sh_mmcif_set_ios,
910         .get_cd         = sh_mmcif_get_cd,
911 };
912
913 static void sh_mmcif_detect(struct mmc_host *mmc)
914 {
915         mmc_detect_change(mmc, 0);
916 }
917
918 static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
919 {
920         struct sh_mmcif_host *host = dev_id;
921         u32 state;
922         int err = 0;
923
924         state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
925
926         if (state & INT_RBSYE) {
927                 sh_mmcif_writel(host->addr, MMCIF_CE_INT,
928                                 ~(INT_RBSYE | INT_CRSPE));
929                 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MRBSYE);
930         } else if (state & INT_CRSPE) {
931                 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_CRSPE);
932                 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCRSPE);
933         } else if (state & INT_BUFREN) {
934                 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFREN);
935                 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
936         } else if (state & INT_BUFWEN) {
937                 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFWEN);
938                 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
939         } else if (state & INT_CMD12DRE) {
940                 sh_mmcif_writel(host->addr, MMCIF_CE_INT,
941                         ~(INT_CMD12DRE | INT_CMD12RBE |
942                           INT_CMD12CRE | INT_BUFRE));
943                 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
944         } else if (state & INT_BUFRE) {
945                 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFRE);
946                 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
947         } else if (state & INT_DTRANE) {
948                 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_DTRANE);
949                 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
950         } else if (state & INT_CMD12RBE) {
951                 sh_mmcif_writel(host->addr, MMCIF_CE_INT,
952                                 ~(INT_CMD12RBE | INT_CMD12CRE));
953                 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
954         } else if (state & INT_ERR_STS) {
955                 /* err interrupts */
956                 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
957                 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
958                 err = 1;
959         } else {
960                 dev_dbg(&host->pd->dev, "Not support int\n");
961                 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
962                 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
963                 err = 1;
964         }
965         if (err) {
966                 host->sd_error = true;
967                 dev_dbg(&host->pd->dev, "int err state = %08x\n", state);
968         }
969         if (state & ~(INT_CMD12RBE | INT_CMD12CRE))
970                 complete(&host->intr_wait);
971         else
972                 dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state);
973
974         return IRQ_HANDLED;
975 }
976
977 static int __devinit sh_mmcif_probe(struct platform_device *pdev)
978 {
979         int ret = 0, irq[2];
980         struct mmc_host *mmc;
981         struct sh_mmcif_host *host;
982         struct sh_mmcif_plat_data *pd;
983         struct resource *res;
984         void __iomem *reg;
985         char clk_name[8];
986
987         irq[0] = platform_get_irq(pdev, 0);
988         irq[1] = platform_get_irq(pdev, 1);
989         if (irq[0] < 0 || irq[1] < 0) {
990                 dev_err(&pdev->dev, "Get irq error\n");
991                 return -ENXIO;
992         }
993         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
994         if (!res) {
995                 dev_err(&pdev->dev, "platform_get_resource error.\n");
996                 return -ENXIO;
997         }
998         reg = ioremap(res->start, resource_size(res));
999         if (!reg) {
1000                 dev_err(&pdev->dev, "ioremap error.\n");
1001                 return -ENOMEM;
1002         }
1003         pd = pdev->dev.platform_data;
1004         if (!pd) {
1005                 dev_err(&pdev->dev, "sh_mmcif plat data error.\n");
1006                 ret = -ENXIO;
1007                 goto clean_up;
1008         }
1009         mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev);
1010         if (!mmc) {
1011                 ret = -ENOMEM;
1012                 goto clean_up;
1013         }
1014         host            = mmc_priv(mmc);
1015         host->mmc       = mmc;
1016         host->addr      = reg;
1017         host->timeout   = 1000;
1018
1019         snprintf(clk_name, sizeof(clk_name), "mmc%d", pdev->id);
1020         host->hclk = clk_get(&pdev->dev, clk_name);
1021         if (IS_ERR(host->hclk)) {
1022                 dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
1023                 ret = PTR_ERR(host->hclk);
1024                 goto clean_up1;
1025         }
1026         clk_enable(host->hclk);
1027         host->clk = clk_get_rate(host->hclk);
1028         host->pd = pdev;
1029
1030         init_completion(&host->intr_wait);
1031         spin_lock_init(&host->lock);
1032
1033         mmc->ops = &sh_mmcif_ops;
1034         mmc->f_max = host->clk;
1035         /* close to 400KHz */
1036         if (mmc->f_max < 51200000)
1037                 mmc->f_min = mmc->f_max / 128;
1038         else if (mmc->f_max < 102400000)
1039                 mmc->f_min = mmc->f_max / 256;
1040         else
1041                 mmc->f_min = mmc->f_max / 512;
1042         if (pd->ocr)
1043                 mmc->ocr_avail = pd->ocr;
1044         mmc->caps = MMC_CAP_MMC_HIGHSPEED;
1045         if (pd->caps)
1046                 mmc->caps |= pd->caps;
1047         mmc->max_segs = 32;
1048         mmc->max_blk_size = 512;
1049         mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs;
1050         mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
1051         mmc->max_seg_size = mmc->max_req_size;
1052
1053         sh_mmcif_sync_reset(host);
1054         platform_set_drvdata(pdev, host);
1055
1056         /* See if we also get DMA */
1057         sh_mmcif_request_dma(host, pd);
1058
1059         mmc_add_host(mmc);
1060
1061         sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1062
1063         ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host);
1064         if (ret) {
1065                 dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n");
1066                 goto clean_up2;
1067         }
1068         ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host);
1069         if (ret) {
1070                 free_irq(irq[0], host);
1071                 dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
1072                 goto clean_up2;
1073         }
1074
1075         sh_mmcif_detect(host->mmc);
1076
1077         dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION);
1078         dev_dbg(&pdev->dev, "chip ver H'%04x\n",
1079                 sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff);
1080         return ret;
1081
1082 clean_up2:
1083         clk_disable(host->hclk);
1084 clean_up1:
1085         mmc_free_host(mmc);
1086 clean_up:
1087         if (reg)
1088                 iounmap(reg);
1089         return ret;
1090 }
1091
1092 static int __devexit sh_mmcif_remove(struct platform_device *pdev)
1093 {
1094         struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1095         int irq[2];
1096
1097         mmc_remove_host(host->mmc);
1098         sh_mmcif_release_dma(host);
1099
1100         sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1101
1102         if (host->addr)
1103                 iounmap(host->addr);
1104
1105         irq[0] = platform_get_irq(pdev, 0);
1106         irq[1] = platform_get_irq(pdev, 1);
1107
1108         free_irq(irq[0], host);
1109         free_irq(irq[1], host);
1110
1111         platform_set_drvdata(pdev, NULL);
1112
1113         clk_disable(host->hclk);
1114         mmc_free_host(host->mmc);
1115
1116         return 0;
1117 }
1118
1119 static struct platform_driver sh_mmcif_driver = {
1120         .probe          = sh_mmcif_probe,
1121         .remove         = sh_mmcif_remove,
1122         .driver         = {
1123                 .name   = DRIVER_NAME,
1124         },
1125 };
1126
1127 static int __init sh_mmcif_init(void)
1128 {
1129         return platform_driver_register(&sh_mmcif_driver);
1130 }
1131
1132 static void __exit sh_mmcif_exit(void)
1133 {
1134         platform_driver_unregister(&sh_mmcif_driver);
1135 }
1136
1137 module_init(sh_mmcif_init);
1138 module_exit(sh_mmcif_exit);
1139
1140
1141 MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
1142 MODULE_LICENSE("GPL");
1143 MODULE_ALIAS("platform:" DRIVER_NAME);
1144 MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>");