Merge branch 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad...
[pandora-kernel.git] / drivers / mmc / host / sh_mmcif.c
1 /*
2  * MMCIF eMMC driver.
3  *
4  * Copyright (C) 2010 Renesas Solutions Corp.
5  * Yusuke Goda <yusuke.goda.sx@renesas.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License.
10  *
11  *
12  * TODO
13  *  1. DMA
14  *  2. Power management
15  *  3. Handle MMC errors better
16  *
17  */
18
19 #include <linux/clk.h>
20 #include <linux/completion.h>
21 #include <linux/delay.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/dmaengine.h>
24 #include <linux/mmc/card.h>
25 #include <linux/mmc/core.h>
26 #include <linux/mmc/host.h>
27 #include <linux/mmc/mmc.h>
28 #include <linux/mmc/sdio.h>
29 #include <linux/mmc/sh_mmcif.h>
30 #include <linux/pagemap.h>
31 #include <linux/platform_device.h>
32 #include <linux/pm_runtime.h>
33 #include <linux/spinlock.h>
34 #include <linux/module.h>
35
36 #define DRIVER_NAME     "sh_mmcif"
37 #define DRIVER_VERSION  "2010-04-28"
38
39 /* CE_CMD_SET */
40 #define CMD_MASK                0x3f000000
41 #define CMD_SET_RTYP_NO         ((0 << 23) | (0 << 22))
42 #define CMD_SET_RTYP_6B         ((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */
43 #define CMD_SET_RTYP_17B        ((1 << 23) | (0 << 22)) /* R2 */
44 #define CMD_SET_RBSY            (1 << 21) /* R1b */
45 #define CMD_SET_CCSEN           (1 << 20)
46 #define CMD_SET_WDAT            (1 << 19) /* 1: on data, 0: no data */
47 #define CMD_SET_DWEN            (1 << 18) /* 1: write, 0: read */
48 #define CMD_SET_CMLTE           (1 << 17) /* 1: multi block trans, 0: single */
49 #define CMD_SET_CMD12EN         (1 << 16) /* 1: CMD12 auto issue */
50 #define CMD_SET_RIDXC_INDEX     ((0 << 15) | (0 << 14)) /* index check */
51 #define CMD_SET_RIDXC_BITS      ((0 << 15) | (1 << 14)) /* check bits check */
52 #define CMD_SET_RIDXC_NO        ((1 << 15) | (0 << 14)) /* no check */
53 #define CMD_SET_CRC7C           ((0 << 13) | (0 << 12)) /* CRC7 check*/
54 #define CMD_SET_CRC7C_BITS      ((0 << 13) | (1 << 12)) /* check bits check*/
55 #define CMD_SET_CRC7C_INTERNAL  ((1 << 13) | (0 << 12)) /* internal CRC7 check*/
56 #define CMD_SET_CRC16C          (1 << 10) /* 0: CRC16 check*/
57 #define CMD_SET_CRCSTE          (1 << 8) /* 1: not receive CRC status */
58 #define CMD_SET_TBIT            (1 << 7) /* 1: tran mission bit "Low" */
59 #define CMD_SET_OPDM            (1 << 6) /* 1: open/drain */
60 #define CMD_SET_CCSH            (1 << 5)
61 #define CMD_SET_DATW_1          ((0 << 1) | (0 << 0)) /* 1bit */
62 #define CMD_SET_DATW_4          ((0 << 1) | (1 << 0)) /* 4bit */
63 #define CMD_SET_DATW_8          ((1 << 1) | (0 << 0)) /* 8bit */
64
65 /* CE_CMD_CTRL */
66 #define CMD_CTRL_BREAK          (1 << 0)
67
68 /* CE_BLOCK_SET */
69 #define BLOCK_SIZE_MASK         0x0000ffff
70
71 /* CE_INT */
72 #define INT_CCSDE               (1 << 29)
73 #define INT_CMD12DRE            (1 << 26)
74 #define INT_CMD12RBE            (1 << 25)
75 #define INT_CMD12CRE            (1 << 24)
76 #define INT_DTRANE              (1 << 23)
77 #define INT_BUFRE               (1 << 22)
78 #define INT_BUFWEN              (1 << 21)
79 #define INT_BUFREN              (1 << 20)
80 #define INT_CCSRCV              (1 << 19)
81 #define INT_RBSYE               (1 << 17)
82 #define INT_CRSPE               (1 << 16)
83 #define INT_CMDVIO              (1 << 15)
84 #define INT_BUFVIO              (1 << 14)
85 #define INT_WDATERR             (1 << 11)
86 #define INT_RDATERR             (1 << 10)
87 #define INT_RIDXERR             (1 << 9)
88 #define INT_RSPERR              (1 << 8)
89 #define INT_CCSTO               (1 << 5)
90 #define INT_CRCSTO              (1 << 4)
91 #define INT_WDATTO              (1 << 3)
92 #define INT_RDATTO              (1 << 2)
93 #define INT_RBSYTO              (1 << 1)
94 #define INT_RSPTO               (1 << 0)
95 #define INT_ERR_STS             (INT_CMDVIO | INT_BUFVIO | INT_WDATERR |  \
96                                  INT_RDATERR | INT_RIDXERR | INT_RSPERR | \
97                                  INT_CCSTO | INT_CRCSTO | INT_WDATTO |    \
98                                  INT_RDATTO | INT_RBSYTO | INT_RSPTO)
99
100 /* CE_INT_MASK */
101 #define MASK_ALL                0x00000000
102 #define MASK_MCCSDE             (1 << 29)
103 #define MASK_MCMD12DRE          (1 << 26)
104 #define MASK_MCMD12RBE          (1 << 25)
105 #define MASK_MCMD12CRE          (1 << 24)
106 #define MASK_MDTRANE            (1 << 23)
107 #define MASK_MBUFRE             (1 << 22)
108 #define MASK_MBUFWEN            (1 << 21)
109 #define MASK_MBUFREN            (1 << 20)
110 #define MASK_MCCSRCV            (1 << 19)
111 #define MASK_MRBSYE             (1 << 17)
112 #define MASK_MCRSPE             (1 << 16)
113 #define MASK_MCMDVIO            (1 << 15)
114 #define MASK_MBUFVIO            (1 << 14)
115 #define MASK_MWDATERR           (1 << 11)
116 #define MASK_MRDATERR           (1 << 10)
117 #define MASK_MRIDXERR           (1 << 9)
118 #define MASK_MRSPERR            (1 << 8)
119 #define MASK_MCCSTO             (1 << 5)
120 #define MASK_MCRCSTO            (1 << 4)
121 #define MASK_MWDATTO            (1 << 3)
122 #define MASK_MRDATTO            (1 << 2)
123 #define MASK_MRBSYTO            (1 << 1)
124 #define MASK_MRSPTO             (1 << 0)
125
126 /* CE_HOST_STS1 */
127 #define STS1_CMDSEQ             (1 << 31)
128
129 /* CE_HOST_STS2 */
130 #define STS2_CRCSTE             (1 << 31)
131 #define STS2_CRC16E             (1 << 30)
132 #define STS2_AC12CRCE           (1 << 29)
133 #define STS2_RSPCRC7E           (1 << 28)
134 #define STS2_CRCSTEBE           (1 << 27)
135 #define STS2_RDATEBE            (1 << 26)
136 #define STS2_AC12REBE           (1 << 25)
137 #define STS2_RSPEBE             (1 << 24)
138 #define STS2_AC12IDXE           (1 << 23)
139 #define STS2_RSPIDXE            (1 << 22)
140 #define STS2_CCSTO              (1 << 15)
141 #define STS2_RDATTO             (1 << 14)
142 #define STS2_DATBSYTO           (1 << 13)
143 #define STS2_CRCSTTO            (1 << 12)
144 #define STS2_AC12BSYTO          (1 << 11)
145 #define STS2_RSPBSYTO           (1 << 10)
146 #define STS2_AC12RSPTO          (1 << 9)
147 #define STS2_RSPTO              (1 << 8)
148 #define STS2_CRC_ERR            (STS2_CRCSTE | STS2_CRC16E |            \
149                                  STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE)
150 #define STS2_TIMEOUT_ERR        (STS2_CCSTO | STS2_RDATTO |             \
151                                  STS2_DATBSYTO | STS2_CRCSTTO |         \
152                                  STS2_AC12BSYTO | STS2_RSPBSYTO |       \
153                                  STS2_AC12RSPTO | STS2_RSPTO)
154
155 #define CLKDEV_EMMC_DATA        52000000 /* 52MHz */
156 #define CLKDEV_MMC_DATA         20000000 /* 20MHz */
157 #define CLKDEV_INIT             400000   /* 400 KHz */
158
159 enum mmcif_state {
160         STATE_IDLE,
161         STATE_REQUEST,
162         STATE_IOS,
163 };
164
165 struct sh_mmcif_host {
166         struct mmc_host *mmc;
167         struct mmc_data *data;
168         struct platform_device *pd;
169         struct sh_dmae_slave dma_slave_tx;
170         struct sh_dmae_slave dma_slave_rx;
171         struct clk *hclk;
172         unsigned int clk;
173         int bus_width;
174         bool sd_error;
175         long timeout;
176         void __iomem *addr;
177         struct completion intr_wait;
178         enum mmcif_state state;
179         spinlock_t lock;
180         bool power;
181         bool card_present;
182
183         /* DMA support */
184         struct dma_chan         *chan_rx;
185         struct dma_chan         *chan_tx;
186         struct completion       dma_complete;
187         bool                    dma_active;
188 };
189
190 static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
191                                         unsigned int reg, u32 val)
192 {
193         writel(val | readl(host->addr + reg), host->addr + reg);
194 }
195
196 static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
197                                         unsigned int reg, u32 val)
198 {
199         writel(~val & readl(host->addr + reg), host->addr + reg);
200 }
201
202 static void mmcif_dma_complete(void *arg)
203 {
204         struct sh_mmcif_host *host = arg;
205         dev_dbg(&host->pd->dev, "Command completed\n");
206
207         if (WARN(!host->data, "%s: NULL data in DMA completion!\n",
208                  dev_name(&host->pd->dev)))
209                 return;
210
211         if (host->data->flags & MMC_DATA_READ)
212                 dma_unmap_sg(host->chan_rx->device->dev,
213                              host->data->sg, host->data->sg_len,
214                              DMA_FROM_DEVICE);
215         else
216                 dma_unmap_sg(host->chan_tx->device->dev,
217                              host->data->sg, host->data->sg_len,
218                              DMA_TO_DEVICE);
219
220         complete(&host->dma_complete);
221 }
222
223 static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
224 {
225         struct scatterlist *sg = host->data->sg;
226         struct dma_async_tx_descriptor *desc = NULL;
227         struct dma_chan *chan = host->chan_rx;
228         dma_cookie_t cookie = -EINVAL;
229         int ret;
230
231         ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len,
232                          DMA_FROM_DEVICE);
233         if (ret > 0) {
234                 host->dma_active = true;
235                 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
236                         DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
237         }
238
239         if (desc) {
240                 desc->callback = mmcif_dma_complete;
241                 desc->callback_param = host;
242                 cookie = dmaengine_submit(desc);
243                 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
244                 dma_async_issue_pending(chan);
245         }
246         dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
247                 __func__, host->data->sg_len, ret, cookie);
248
249         if (!desc) {
250                 /* DMA failed, fall back to PIO */
251                 if (ret >= 0)
252                         ret = -EIO;
253                 host->chan_rx = NULL;
254                 host->dma_active = false;
255                 dma_release_channel(chan);
256                 /* Free the Tx channel too */
257                 chan = host->chan_tx;
258                 if (chan) {
259                         host->chan_tx = NULL;
260                         dma_release_channel(chan);
261                 }
262                 dev_warn(&host->pd->dev,
263                          "DMA failed: %d, falling back to PIO\n", ret);
264                 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
265         }
266
267         dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
268                 desc, cookie, host->data->sg_len);
269 }
270
271 static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
272 {
273         struct scatterlist *sg = host->data->sg;
274         struct dma_async_tx_descriptor *desc = NULL;
275         struct dma_chan *chan = host->chan_tx;
276         dma_cookie_t cookie = -EINVAL;
277         int ret;
278
279         ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len,
280                          DMA_TO_DEVICE);
281         if (ret > 0) {
282                 host->dma_active = true;
283                 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
284                         DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
285         }
286
287         if (desc) {
288                 desc->callback = mmcif_dma_complete;
289                 desc->callback_param = host;
290                 cookie = dmaengine_submit(desc);
291                 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
292                 dma_async_issue_pending(chan);
293         }
294         dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
295                 __func__, host->data->sg_len, ret, cookie);
296
297         if (!desc) {
298                 /* DMA failed, fall back to PIO */
299                 if (ret >= 0)
300                         ret = -EIO;
301                 host->chan_tx = NULL;
302                 host->dma_active = false;
303                 dma_release_channel(chan);
304                 /* Free the Rx channel too */
305                 chan = host->chan_rx;
306                 if (chan) {
307                         host->chan_rx = NULL;
308                         dma_release_channel(chan);
309                 }
310                 dev_warn(&host->pd->dev,
311                          "DMA failed: %d, falling back to PIO\n", ret);
312                 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
313         }
314
315         dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d\n", __func__,
316                 desc, cookie);
317 }
318
319 static bool sh_mmcif_filter(struct dma_chan *chan, void *arg)
320 {
321         dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg);
322         chan->private = arg;
323         return true;
324 }
325
326 static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
327                                  struct sh_mmcif_plat_data *pdata)
328 {
329         struct sh_dmae_slave *tx, *rx;
330         host->dma_active = false;
331
332         /* We can only either use DMA for both Tx and Rx or not use it at all */
333         if (pdata->dma) {
334                 dev_warn(&host->pd->dev,
335                          "Update your platform to use embedded DMA slave IDs\n");
336                 tx = &pdata->dma->chan_priv_tx;
337                 rx = &pdata->dma->chan_priv_rx;
338         } else {
339                 tx = &host->dma_slave_tx;
340                 tx->slave_id = pdata->slave_id_tx;
341                 rx = &host->dma_slave_rx;
342                 rx->slave_id = pdata->slave_id_rx;
343         }
344         if (tx->slave_id > 0 && rx->slave_id > 0) {
345                 dma_cap_mask_t mask;
346
347                 dma_cap_zero(mask);
348                 dma_cap_set(DMA_SLAVE, mask);
349
350                 host->chan_tx = dma_request_channel(mask, sh_mmcif_filter, tx);
351                 dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__,
352                         host->chan_tx);
353
354                 if (!host->chan_tx)
355                         return;
356
357                 host->chan_rx = dma_request_channel(mask, sh_mmcif_filter, rx);
358                 dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__,
359                         host->chan_rx);
360
361                 if (!host->chan_rx) {
362                         dma_release_channel(host->chan_tx);
363                         host->chan_tx = NULL;
364                         return;
365                 }
366
367                 init_completion(&host->dma_complete);
368         }
369 }
370
371 static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
372 {
373         sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
374         /* Descriptors are freed automatically */
375         if (host->chan_tx) {
376                 struct dma_chan *chan = host->chan_tx;
377                 host->chan_tx = NULL;
378                 dma_release_channel(chan);
379         }
380         if (host->chan_rx) {
381                 struct dma_chan *chan = host->chan_rx;
382                 host->chan_rx = NULL;
383                 dma_release_channel(chan);
384         }
385
386         host->dma_active = false;
387 }
388
389 static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
390 {
391         struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
392
393         sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
394         sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);
395
396         if (!clk)
397                 return;
398         if (p->sup_pclk && clk == host->clk)
399                 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK);
400         else
401                 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR &
402                         (ilog2(__rounddown_pow_of_two(host->clk / clk)) << 16));
403
404         sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
405 }
406
407 static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
408 {
409         u32 tmp;
410
411         tmp = 0x010f0000 & sh_mmcif_readl(host->addr, MMCIF_CE_CLK_CTRL);
412
413         sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON);
414         sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF);
415         sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
416                 SRSPTO_256 | SRBSYTO_29 | SRWDTO_29 | SCCSTO_29);
417         /* byte swap on */
418         sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
419 }
420
421 static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
422 {
423         u32 state1, state2;
424         int ret, timeout = 10000000;
425
426         host->sd_error = false;
427
428         state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1);
429         state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2);
430         dev_dbg(&host->pd->dev, "ERR HOST_STS1 = %08x\n", state1);
431         dev_dbg(&host->pd->dev, "ERR HOST_STS2 = %08x\n", state2);
432
433         if (state1 & STS1_CMDSEQ) {
434                 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
435                 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
436                 while (1) {
437                         timeout--;
438                         if (timeout < 0) {
439                                 dev_err(&host->pd->dev,
440                                         "Forceed end of command sequence timeout err\n");
441                                 return -EIO;
442                         }
443                         if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1)
444                                                                 & STS1_CMDSEQ))
445                                 break;
446                         mdelay(1);
447                 }
448                 sh_mmcif_sync_reset(host);
449                 dev_dbg(&host->pd->dev, "Forced end of command sequence\n");
450                 return -EIO;
451         }
452
453         if (state2 & STS2_CRC_ERR) {
454                 dev_dbg(&host->pd->dev, ": Happened CRC error\n");
455                 ret = -EIO;
456         } else if (state2 & STS2_TIMEOUT_ERR) {
457                 dev_dbg(&host->pd->dev, ": Happened Timeout error\n");
458                 ret = -ETIMEDOUT;
459         } else {
460                 dev_dbg(&host->pd->dev, ": Happened End/Index error\n");
461                 ret = -EIO;
462         }
463         return ret;
464 }
465
466 static int sh_mmcif_single_read(struct sh_mmcif_host *host,
467                                         struct mmc_request *mrq)
468 {
469         struct mmc_data *data = mrq->data;
470         long time;
471         u32 blocksize, i, *p = sg_virt(data->sg);
472
473         /* buf read enable */
474         sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
475         time = wait_for_completion_interruptible_timeout(&host->intr_wait,
476                         host->timeout);
477         if (time <= 0 || host->sd_error)
478                 return sh_mmcif_error_manage(host);
479
480         blocksize = (BLOCK_SIZE_MASK &
481                         sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3;
482         for (i = 0; i < blocksize / 4; i++)
483                 *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
484
485         /* buffer read end */
486         sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
487         time = wait_for_completion_interruptible_timeout(&host->intr_wait,
488                         host->timeout);
489         if (time <= 0 || host->sd_error)
490                 return sh_mmcif_error_manage(host);
491
492         return 0;
493 }
494
495 static int sh_mmcif_multi_read(struct sh_mmcif_host *host,
496                                         struct mmc_request *mrq)
497 {
498         struct mmc_data *data = mrq->data;
499         long time;
500         u32 blocksize, i, j, sec, *p;
501
502         blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr,
503                                                      MMCIF_CE_BLOCK_SET);
504         for (j = 0; j < data->sg_len; j++) {
505                 p = sg_virt(data->sg);
506                 for (sec = 0; sec < data->sg->length / blocksize; sec++) {
507                         sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
508                         /* buf read enable */
509                         time = wait_for_completion_interruptible_timeout(&host->intr_wait,
510                                 host->timeout);
511
512                         if (time <= 0 || host->sd_error)
513                                 return sh_mmcif_error_manage(host);
514
515                         for (i = 0; i < blocksize / 4; i++)
516                                 *p++ = sh_mmcif_readl(host->addr,
517                                                       MMCIF_CE_DATA);
518                 }
519                 if (j < data->sg_len - 1)
520                         data->sg++;
521         }
522         return 0;
523 }
524
525 static int sh_mmcif_single_write(struct sh_mmcif_host *host,
526                                         struct mmc_request *mrq)
527 {
528         struct mmc_data *data = mrq->data;
529         long time;
530         u32 blocksize, i, *p = sg_virt(data->sg);
531
532         sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
533
534         /* buf write enable */
535         time = wait_for_completion_interruptible_timeout(&host->intr_wait,
536                         host->timeout);
537         if (time <= 0 || host->sd_error)
538                 return sh_mmcif_error_manage(host);
539
540         blocksize = (BLOCK_SIZE_MASK &
541                         sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3;
542         for (i = 0; i < blocksize / 4; i++)
543                 sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
544
545         /* buffer write end */
546         sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
547
548         time = wait_for_completion_interruptible_timeout(&host->intr_wait,
549                         host->timeout);
550         if (time <= 0 || host->sd_error)
551                 return sh_mmcif_error_manage(host);
552
553         return 0;
554 }
555
556 static int sh_mmcif_multi_write(struct sh_mmcif_host *host,
557                                                 struct mmc_request *mrq)
558 {
559         struct mmc_data *data = mrq->data;
560         long time;
561         u32 i, sec, j, blocksize, *p;
562
563         blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr,
564                                                      MMCIF_CE_BLOCK_SET);
565
566         for (j = 0; j < data->sg_len; j++) {
567                 p = sg_virt(data->sg);
568                 for (sec = 0; sec < data->sg->length / blocksize; sec++) {
569                         sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
570                         /* buf write enable*/
571                         time = wait_for_completion_interruptible_timeout(&host->intr_wait,
572                                 host->timeout);
573
574                         if (time <= 0 || host->sd_error)
575                                 return sh_mmcif_error_manage(host);
576
577                         for (i = 0; i < blocksize / 4; i++)
578                                 sh_mmcif_writel(host->addr,
579                                                 MMCIF_CE_DATA, *p++);
580                 }
581                 if (j < data->sg_len - 1)
582                         data->sg++;
583         }
584         return 0;
585 }
586
587 static void sh_mmcif_get_response(struct sh_mmcif_host *host,
588                                                 struct mmc_command *cmd)
589 {
590         if (cmd->flags & MMC_RSP_136) {
591                 cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP3);
592                 cmd->resp[1] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP2);
593                 cmd->resp[2] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP1);
594                 cmd->resp[3] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
595         } else
596                 cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
597 }
598
599 static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
600                                                 struct mmc_command *cmd)
601 {
602         cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP_CMD12);
603 }
604
605 static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
606                 struct mmc_request *mrq, struct mmc_command *cmd, u32 opc)
607 {
608         u32 tmp = 0;
609
610         /* Response Type check */
611         switch (mmc_resp_type(cmd)) {
612         case MMC_RSP_NONE:
613                 tmp |= CMD_SET_RTYP_NO;
614                 break;
615         case MMC_RSP_R1:
616         case MMC_RSP_R1B:
617         case MMC_RSP_R3:
618                 tmp |= CMD_SET_RTYP_6B;
619                 break;
620         case MMC_RSP_R2:
621                 tmp |= CMD_SET_RTYP_17B;
622                 break;
623         default:
624                 dev_err(&host->pd->dev, "Unsupported response type.\n");
625                 break;
626         }
627         switch (opc) {
628         /* RBSY */
629         case MMC_SWITCH:
630         case MMC_STOP_TRANSMISSION:
631         case MMC_SET_WRITE_PROT:
632         case MMC_CLR_WRITE_PROT:
633         case MMC_ERASE:
634         case MMC_GEN_CMD:
635                 tmp |= CMD_SET_RBSY;
636                 break;
637         }
638         /* WDAT / DATW */
639         if (host->data) {
640                 tmp |= CMD_SET_WDAT;
641                 switch (host->bus_width) {
642                 case MMC_BUS_WIDTH_1:
643                         tmp |= CMD_SET_DATW_1;
644                         break;
645                 case MMC_BUS_WIDTH_4:
646                         tmp |= CMD_SET_DATW_4;
647                         break;
648                 case MMC_BUS_WIDTH_8:
649                         tmp |= CMD_SET_DATW_8;
650                         break;
651                 default:
652                         dev_err(&host->pd->dev, "Unsupported bus width.\n");
653                         break;
654                 }
655         }
656         /* DWEN */
657         if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK)
658                 tmp |= CMD_SET_DWEN;
659         /* CMLTE/CMD12EN */
660         if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {
661                 tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;
662                 sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
663                                         mrq->data->blocks << 16);
664         }
665         /* RIDXC[1:0] check bits */
666         if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID ||
667             opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
668                 tmp |= CMD_SET_RIDXC_BITS;
669         /* RCRC7C[1:0] check bits */
670         if (opc == MMC_SEND_OP_COND)
671                 tmp |= CMD_SET_CRC7C_BITS;
672         /* RCRC7C[1:0] internal CRC7 */
673         if (opc == MMC_ALL_SEND_CID ||
674                 opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
675                 tmp |= CMD_SET_CRC7C_INTERNAL;
676
677         return opc = ((opc << 24) | tmp);
678 }
679
680 static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
681                                 struct mmc_request *mrq, u32 opc)
682 {
683         int ret;
684
685         switch (opc) {
686         case MMC_READ_MULTIPLE_BLOCK:
687                 ret = sh_mmcif_multi_read(host, mrq);
688                 break;
689         case MMC_WRITE_MULTIPLE_BLOCK:
690                 ret = sh_mmcif_multi_write(host, mrq);
691                 break;
692         case MMC_WRITE_BLOCK:
693                 ret = sh_mmcif_single_write(host, mrq);
694                 break;
695         case MMC_READ_SINGLE_BLOCK:
696         case MMC_SEND_EXT_CSD:
697                 ret = sh_mmcif_single_read(host, mrq);
698                 break;
699         default:
700                 dev_err(&host->pd->dev, "UNSUPPORTED CMD = d'%08d\n", opc);
701                 ret = -EINVAL;
702                 break;
703         }
704         return ret;
705 }
706
707 static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
708                         struct mmc_request *mrq, struct mmc_command *cmd)
709 {
710         long time;
711         int ret = 0, mask = 0;
712         u32 opc = cmd->opcode;
713
714         switch (opc) {
715         /* respons busy check */
716         case MMC_SWITCH:
717         case MMC_STOP_TRANSMISSION:
718         case MMC_SET_WRITE_PROT:
719         case MMC_CLR_WRITE_PROT:
720         case MMC_ERASE:
721         case MMC_GEN_CMD:
722                 mask = MASK_MRBSYE;
723                 break;
724         default:
725                 mask = MASK_MCRSPE;
726                 break;
727         }
728         mask |= MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR |
729                 MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR |
730                 MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO |
731                 MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO;
732
733         if (host->data) {
734                 sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
735                 sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET,
736                                 mrq->data->blksz);
737         }
738         opc = sh_mmcif_set_cmd(host, mrq, cmd, opc);
739
740         sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
741         sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
742         /* set arg */
743         sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg);
744         /* set cmd */
745         sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
746
747         time = wait_for_completion_interruptible_timeout(&host->intr_wait,
748                 host->timeout);
749         if (time <= 0) {
750                 cmd->error = sh_mmcif_error_manage(host);
751                 return;
752         }
753         if (host->sd_error) {
754                 switch (cmd->opcode) {
755                 case MMC_ALL_SEND_CID:
756                 case MMC_SELECT_CARD:
757                 case MMC_APP_CMD:
758                         cmd->error = -ETIMEDOUT;
759                         break;
760                 default:
761                         dev_dbg(&host->pd->dev, "Cmd(d'%d) err\n",
762                                         cmd->opcode);
763                         cmd->error = sh_mmcif_error_manage(host);
764                         break;
765                 }
766                 host->sd_error = false;
767                 return;
768         }
769         if (!(cmd->flags & MMC_RSP_PRESENT)) {
770                 cmd->error = 0;
771                 return;
772         }
773         sh_mmcif_get_response(host, cmd);
774         if (host->data) {
775                 if (!host->dma_active) {
776                         ret = sh_mmcif_data_trans(host, mrq, cmd->opcode);
777                 } else {
778                         long time =
779                                 wait_for_completion_interruptible_timeout(&host->dma_complete,
780                                                                           host->timeout);
781                         if (!time)
782                                 ret = -ETIMEDOUT;
783                         else if (time < 0)
784                                 ret = time;
785                         sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
786                                         BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
787                         host->dma_active = false;
788                 }
789                 if (ret < 0)
790                         mrq->data->bytes_xfered = 0;
791                 else
792                         mrq->data->bytes_xfered =
793                                 mrq->data->blocks * mrq->data->blksz;
794         }
795         cmd->error = ret;
796 }
797
798 static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
799                 struct mmc_request *mrq, struct mmc_command *cmd)
800 {
801         long time;
802
803         if (mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK)
804                 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
805         else if (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
806                 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
807         else {
808                 dev_err(&host->pd->dev, "unsupported stop cmd\n");
809                 cmd->error = sh_mmcif_error_manage(host);
810                 return;
811         }
812
813         time = wait_for_completion_interruptible_timeout(&host->intr_wait,
814                         host->timeout);
815         if (time <= 0 || host->sd_error) {
816                 cmd->error = sh_mmcif_error_manage(host);
817                 return;
818         }
819         sh_mmcif_get_cmd12response(host, cmd);
820         cmd->error = 0;
821 }
822
823 static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
824 {
825         struct sh_mmcif_host *host = mmc_priv(mmc);
826         unsigned long flags;
827
828         spin_lock_irqsave(&host->lock, flags);
829         if (host->state != STATE_IDLE) {
830                 spin_unlock_irqrestore(&host->lock, flags);
831                 mrq->cmd->error = -EAGAIN;
832                 mmc_request_done(mmc, mrq);
833                 return;
834         }
835
836         host->state = STATE_REQUEST;
837         spin_unlock_irqrestore(&host->lock, flags);
838
839         switch (mrq->cmd->opcode) {
840         /* MMCIF does not support SD/SDIO command */
841         case SD_IO_SEND_OP_COND:
842         case MMC_APP_CMD:
843                 host->state = STATE_IDLE;
844                 mrq->cmd->error = -ETIMEDOUT;
845                 mmc_request_done(mmc, mrq);
846                 return;
847         case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */
848                 if (!mrq->data) {
849                         /* send_if_cond cmd (not support) */
850                         host->state = STATE_IDLE;
851                         mrq->cmd->error = -ETIMEDOUT;
852                         mmc_request_done(mmc, mrq);
853                         return;
854                 }
855                 break;
856         default:
857                 break;
858         }
859         host->data = mrq->data;
860         if (mrq->data) {
861                 if (mrq->data->flags & MMC_DATA_READ) {
862                         if (host->chan_rx)
863                                 sh_mmcif_start_dma_rx(host);
864                 } else {
865                         if (host->chan_tx)
866                                 sh_mmcif_start_dma_tx(host);
867                 }
868         }
869         sh_mmcif_start_cmd(host, mrq, mrq->cmd);
870         host->data = NULL;
871
872         if (!mrq->cmd->error && mrq->stop)
873                 sh_mmcif_stop_cmd(host, mrq, mrq->stop);
874         host->state = STATE_IDLE;
875         mmc_request_done(mmc, mrq);
876 }
877
878 static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
879 {
880         struct sh_mmcif_host *host = mmc_priv(mmc);
881         struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
882         unsigned long flags;
883
884         spin_lock_irqsave(&host->lock, flags);
885         if (host->state != STATE_IDLE) {
886                 spin_unlock_irqrestore(&host->lock, flags);
887                 return;
888         }
889
890         host->state = STATE_IOS;
891         spin_unlock_irqrestore(&host->lock, flags);
892
893         if (ios->power_mode == MMC_POWER_UP) {
894                 if (!host->card_present) {
895                         /* See if we also get DMA */
896                         sh_mmcif_request_dma(host, host->pd->dev.platform_data);
897                         host->card_present = true;
898                 }
899         } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
900                 /* clock stop */
901                 sh_mmcif_clock_control(host, 0);
902                 if (ios->power_mode == MMC_POWER_OFF) {
903                         if (host->card_present) {
904                                 sh_mmcif_release_dma(host);
905                                 host->card_present = false;
906                         }
907                 }
908                 if (host->power) {
909                         pm_runtime_put(&host->pd->dev);
910                         host->power = false;
911                         if (p->down_pwr && ios->power_mode == MMC_POWER_OFF)
912                                 p->down_pwr(host->pd);
913                 }
914                 host->state = STATE_IDLE;
915                 return;
916         }
917
918         if (ios->clock) {
919                 if (!host->power) {
920                         if (p->set_pwr)
921                                 p->set_pwr(host->pd, ios->power_mode);
922                         pm_runtime_get_sync(&host->pd->dev);
923                         host->power = true;
924                         sh_mmcif_sync_reset(host);
925                 }
926                 sh_mmcif_clock_control(host, ios->clock);
927         }
928
929         host->bus_width = ios->bus_width;
930         host->state = STATE_IDLE;
931 }
932
933 static int sh_mmcif_get_cd(struct mmc_host *mmc)
934 {
935         struct sh_mmcif_host *host = mmc_priv(mmc);
936         struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
937
938         if (!p->get_cd)
939                 return -ENOSYS;
940         else
941                 return p->get_cd(host->pd);
942 }
943
944 static struct mmc_host_ops sh_mmcif_ops = {
945         .request        = sh_mmcif_request,
946         .set_ios        = sh_mmcif_set_ios,
947         .get_cd         = sh_mmcif_get_cd,
948 };
949
950 static void sh_mmcif_detect(struct mmc_host *mmc)
951 {
952         mmc_detect_change(mmc, 0);
953 }
954
955 static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
956 {
957         struct sh_mmcif_host *host = dev_id;
958         u32 state;
959         int err = 0;
960
961         state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
962
963         if (state & INT_RBSYE) {
964                 sh_mmcif_writel(host->addr, MMCIF_CE_INT,
965                                 ~(INT_RBSYE | INT_CRSPE));
966                 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MRBSYE);
967         } else if (state & INT_CRSPE) {
968                 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_CRSPE);
969                 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCRSPE);
970         } else if (state & INT_BUFREN) {
971                 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFREN);
972                 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
973         } else if (state & INT_BUFWEN) {
974                 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFWEN);
975                 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
976         } else if (state & INT_CMD12DRE) {
977                 sh_mmcif_writel(host->addr, MMCIF_CE_INT,
978                         ~(INT_CMD12DRE | INT_CMD12RBE |
979                           INT_CMD12CRE | INT_BUFRE));
980                 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
981         } else if (state & INT_BUFRE) {
982                 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFRE);
983                 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
984         } else if (state & INT_DTRANE) {
985                 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_DTRANE);
986                 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
987         } else if (state & INT_CMD12RBE) {
988                 sh_mmcif_writel(host->addr, MMCIF_CE_INT,
989                                 ~(INT_CMD12RBE | INT_CMD12CRE));
990                 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
991         } else if (state & INT_ERR_STS) {
992                 /* err interrupts */
993                 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
994                 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
995                 err = 1;
996         } else {
997                 dev_dbg(&host->pd->dev, "Unsupported interrupt: 0x%x\n", state);
998                 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
999                 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
1000                 err = 1;
1001         }
1002         if (err) {
1003                 host->sd_error = true;
1004                 dev_dbg(&host->pd->dev, "int err state = %08x\n", state);
1005         }
1006         if (state & ~(INT_CMD12RBE | INT_CMD12CRE))
1007                 complete(&host->intr_wait);
1008         else
1009                 dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state);
1010
1011         return IRQ_HANDLED;
1012 }
1013
1014 static int __devinit sh_mmcif_probe(struct platform_device *pdev)
1015 {
1016         int ret = 0, irq[2];
1017         struct mmc_host *mmc;
1018         struct sh_mmcif_host *host;
1019         struct sh_mmcif_plat_data *pd;
1020         struct resource *res;
1021         void __iomem *reg;
1022         char clk_name[8];
1023
1024         irq[0] = platform_get_irq(pdev, 0);
1025         irq[1] = platform_get_irq(pdev, 1);
1026         if (irq[0] < 0 || irq[1] < 0) {
1027                 dev_err(&pdev->dev, "Get irq error\n");
1028                 return -ENXIO;
1029         }
1030         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1031         if (!res) {
1032                 dev_err(&pdev->dev, "platform_get_resource error.\n");
1033                 return -ENXIO;
1034         }
1035         reg = ioremap(res->start, resource_size(res));
1036         if (!reg) {
1037                 dev_err(&pdev->dev, "ioremap error.\n");
1038                 return -ENOMEM;
1039         }
1040         pd = pdev->dev.platform_data;
1041         if (!pd) {
1042                 dev_err(&pdev->dev, "sh_mmcif plat data error.\n");
1043                 ret = -ENXIO;
1044                 goto clean_up;
1045         }
1046         mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev);
1047         if (!mmc) {
1048                 ret = -ENOMEM;
1049                 goto clean_up;
1050         }
1051         host            = mmc_priv(mmc);
1052         host->mmc       = mmc;
1053         host->addr      = reg;
1054         host->timeout   = 1000;
1055
1056         snprintf(clk_name, sizeof(clk_name), "mmc%d", pdev->id);
1057         host->hclk = clk_get(&pdev->dev, clk_name);
1058         if (IS_ERR(host->hclk)) {
1059                 dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
1060                 ret = PTR_ERR(host->hclk);
1061                 goto clean_up1;
1062         }
1063         clk_enable(host->hclk);
1064         host->clk = clk_get_rate(host->hclk);
1065         host->pd = pdev;
1066
1067         init_completion(&host->intr_wait);
1068         spin_lock_init(&host->lock);
1069
1070         mmc->ops = &sh_mmcif_ops;
1071         mmc->f_max = host->clk;
1072         /* close to 400KHz */
1073         if (mmc->f_max < 51200000)
1074                 mmc->f_min = mmc->f_max / 128;
1075         else if (mmc->f_max < 102400000)
1076                 mmc->f_min = mmc->f_max / 256;
1077         else
1078                 mmc->f_min = mmc->f_max / 512;
1079         if (pd->ocr)
1080                 mmc->ocr_avail = pd->ocr;
1081         mmc->caps = MMC_CAP_MMC_HIGHSPEED;
1082         if (pd->caps)
1083                 mmc->caps |= pd->caps;
1084         mmc->max_segs = 32;
1085         mmc->max_blk_size = 512;
1086         mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs;
1087         mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
1088         mmc->max_seg_size = mmc->max_req_size;
1089
1090         sh_mmcif_sync_reset(host);
1091         platform_set_drvdata(pdev, host);
1092
1093         pm_runtime_enable(&pdev->dev);
1094         host->power = false;
1095
1096         ret = pm_runtime_resume(&pdev->dev);
1097         if (ret < 0)
1098                 goto clean_up2;
1099
1100         mmc_add_host(mmc);
1101
1102         sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1103
1104         ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host);
1105         if (ret) {
1106                 dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n");
1107                 goto clean_up3;
1108         }
1109         ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host);
1110         if (ret) {
1111                 free_irq(irq[0], host);
1112                 dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
1113                 goto clean_up3;
1114         }
1115
1116         sh_mmcif_detect(host->mmc);
1117
1118         dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION);
1119         dev_dbg(&pdev->dev, "chip ver H'%04x\n",
1120                 sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff);
1121         return ret;
1122
1123 clean_up3:
1124         mmc_remove_host(mmc);
1125         pm_runtime_suspend(&pdev->dev);
1126 clean_up2:
1127         pm_runtime_disable(&pdev->dev);
1128         clk_disable(host->hclk);
1129 clean_up1:
1130         mmc_free_host(mmc);
1131 clean_up:
1132         if (reg)
1133                 iounmap(reg);
1134         return ret;
1135 }
1136
1137 static int __devexit sh_mmcif_remove(struct platform_device *pdev)
1138 {
1139         struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1140         int irq[2];
1141
1142         pm_runtime_get_sync(&pdev->dev);
1143
1144         mmc_remove_host(host->mmc);
1145         sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1146
1147         if (host->addr)
1148                 iounmap(host->addr);
1149
1150         irq[0] = platform_get_irq(pdev, 0);
1151         irq[1] = platform_get_irq(pdev, 1);
1152
1153         free_irq(irq[0], host);
1154         free_irq(irq[1], host);
1155
1156         platform_set_drvdata(pdev, NULL);
1157
1158         clk_disable(host->hclk);
1159         mmc_free_host(host->mmc);
1160         pm_runtime_put_sync(&pdev->dev);
1161         pm_runtime_disable(&pdev->dev);
1162
1163         return 0;
1164 }
1165
1166 #ifdef CONFIG_PM
1167 static int sh_mmcif_suspend(struct device *dev)
1168 {
1169         struct platform_device *pdev = to_platform_device(dev);
1170         struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1171         int ret = mmc_suspend_host(host->mmc);
1172
1173         if (!ret) {
1174                 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1175                 clk_disable(host->hclk);
1176         }
1177
1178         return ret;
1179 }
1180
1181 static int sh_mmcif_resume(struct device *dev)
1182 {
1183         struct platform_device *pdev = to_platform_device(dev);
1184         struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1185
1186         clk_enable(host->hclk);
1187
1188         return mmc_resume_host(host->mmc);
1189 }
1190 #else
1191 #define sh_mmcif_suspend        NULL
1192 #define sh_mmcif_resume         NULL
1193 #endif  /* CONFIG_PM */
1194
1195 static const struct dev_pm_ops sh_mmcif_dev_pm_ops = {
1196         .suspend = sh_mmcif_suspend,
1197         .resume = sh_mmcif_resume,
1198 };
1199
1200 static struct platform_driver sh_mmcif_driver = {
1201         .probe          = sh_mmcif_probe,
1202         .remove         = sh_mmcif_remove,
1203         .driver         = {
1204                 .name   = DRIVER_NAME,
1205                 .pm     = &sh_mmcif_dev_pm_ops,
1206         },
1207 };
1208
1209 static int __init sh_mmcif_init(void)
1210 {
1211         return platform_driver_register(&sh_mmcif_driver);
1212 }
1213
1214 static void __exit sh_mmcif_exit(void)
1215 {
1216         platform_driver_unregister(&sh_mmcif_driver);
1217 }
1218
1219 module_init(sh_mmcif_init);
1220 module_exit(sh_mmcif_exit);
1221
1222
1223 MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
1224 MODULE_LICENSE("GPL");
1225 MODULE_ALIAS("platform:" DRIVER_NAME);
1226 MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>");