4 * Copyright (C) 2010 Renesas Solutions Corp.
5 * Yusuke Goda <yusuke.goda.sx@renesas.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License.
15 * 3. Handle MMC errors better
19 #include <linux/clk.h>
20 #include <linux/completion.h>
21 #include <linux/delay.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/dmaengine.h>
24 #include <linux/mmc/card.h>
25 #include <linux/mmc/core.h>
26 #include <linux/mmc/host.h>
27 #include <linux/mmc/mmc.h>
28 #include <linux/mmc/sdio.h>
29 #include <linux/mmc/sh_mmcif.h>
30 #include <linux/pagemap.h>
31 #include <linux/platform_device.h>
32 #include <linux/spinlock.h>
34 #define DRIVER_NAME "sh_mmcif"
35 #define DRIVER_VERSION "2010-04-28"
38 #define CMD_MASK 0x3f000000
39 #define CMD_SET_RTYP_NO ((0 << 23) | (0 << 22))
40 #define CMD_SET_RTYP_6B ((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */
41 #define CMD_SET_RTYP_17B ((1 << 23) | (0 << 22)) /* R2 */
42 #define CMD_SET_RBSY (1 << 21) /* R1b */
43 #define CMD_SET_CCSEN (1 << 20)
44 #define CMD_SET_WDAT (1 << 19) /* 1: on data, 0: no data */
45 #define CMD_SET_DWEN (1 << 18) /* 1: write, 0: read */
46 #define CMD_SET_CMLTE (1 << 17) /* 1: multi block trans, 0: single */
47 #define CMD_SET_CMD12EN (1 << 16) /* 1: CMD12 auto issue */
48 #define CMD_SET_RIDXC_INDEX ((0 << 15) | (0 << 14)) /* index check */
49 #define CMD_SET_RIDXC_BITS ((0 << 15) | (1 << 14)) /* check bits check */
50 #define CMD_SET_RIDXC_NO ((1 << 15) | (0 << 14)) /* no check */
51 #define CMD_SET_CRC7C ((0 << 13) | (0 << 12)) /* CRC7 check*/
52 #define CMD_SET_CRC7C_BITS ((0 << 13) | (1 << 12)) /* check bits check*/
53 #define CMD_SET_CRC7C_INTERNAL ((1 << 13) | (0 << 12)) /* internal CRC7 check*/
54 #define CMD_SET_CRC16C (1 << 10) /* 0: CRC16 check*/
55 #define CMD_SET_CRCSTE (1 << 8) /* 1: not receive CRC status */
56 #define CMD_SET_TBIT (1 << 7) /* 1: tran mission bit "Low" */
57 #define CMD_SET_OPDM (1 << 6) /* 1: open/drain */
58 #define CMD_SET_CCSH (1 << 5)
59 #define CMD_SET_DATW_1 ((0 << 1) | (0 << 0)) /* 1bit */
60 #define CMD_SET_DATW_4 ((0 << 1) | (1 << 0)) /* 4bit */
61 #define CMD_SET_DATW_8 ((1 << 1) | (0 << 0)) /* 8bit */
64 #define CMD_CTRL_BREAK (1 << 0)
67 #define BLOCK_SIZE_MASK 0x0000ffff
70 #define INT_CCSDE (1 << 29)
71 #define INT_CMD12DRE (1 << 26)
72 #define INT_CMD12RBE (1 << 25)
73 #define INT_CMD12CRE (1 << 24)
74 #define INT_DTRANE (1 << 23)
75 #define INT_BUFRE (1 << 22)
76 #define INT_BUFWEN (1 << 21)
77 #define INT_BUFREN (1 << 20)
78 #define INT_CCSRCV (1 << 19)
79 #define INT_RBSYE (1 << 17)
80 #define INT_CRSPE (1 << 16)
81 #define INT_CMDVIO (1 << 15)
82 #define INT_BUFVIO (1 << 14)
83 #define INT_WDATERR (1 << 11)
84 #define INT_RDATERR (1 << 10)
85 #define INT_RIDXERR (1 << 9)
86 #define INT_RSPERR (1 << 8)
87 #define INT_CCSTO (1 << 5)
88 #define INT_CRCSTO (1 << 4)
89 #define INT_WDATTO (1 << 3)
90 #define INT_RDATTO (1 << 2)
91 #define INT_RBSYTO (1 << 1)
92 #define INT_RSPTO (1 << 0)
93 #define INT_ERR_STS (INT_CMDVIO | INT_BUFVIO | INT_WDATERR | \
94 INT_RDATERR | INT_RIDXERR | INT_RSPERR | \
95 INT_CCSTO | INT_CRCSTO | INT_WDATTO | \
96 INT_RDATTO | INT_RBSYTO | INT_RSPTO)
99 #define MASK_ALL 0x00000000
100 #define MASK_MCCSDE (1 << 29)
101 #define MASK_MCMD12DRE (1 << 26)
102 #define MASK_MCMD12RBE (1 << 25)
103 #define MASK_MCMD12CRE (1 << 24)
104 #define MASK_MDTRANE (1 << 23)
105 #define MASK_MBUFRE (1 << 22)
106 #define MASK_MBUFWEN (1 << 21)
107 #define MASK_MBUFREN (1 << 20)
108 #define MASK_MCCSRCV (1 << 19)
109 #define MASK_MRBSYE (1 << 17)
110 #define MASK_MCRSPE (1 << 16)
111 #define MASK_MCMDVIO (1 << 15)
112 #define MASK_MBUFVIO (1 << 14)
113 #define MASK_MWDATERR (1 << 11)
114 #define MASK_MRDATERR (1 << 10)
115 #define MASK_MRIDXERR (1 << 9)
116 #define MASK_MRSPERR (1 << 8)
117 #define MASK_MCCSTO (1 << 5)
118 #define MASK_MCRCSTO (1 << 4)
119 #define MASK_MWDATTO (1 << 3)
120 #define MASK_MRDATTO (1 << 2)
121 #define MASK_MRBSYTO (1 << 1)
122 #define MASK_MRSPTO (1 << 0)
125 #define STS1_CMDSEQ (1 << 31)
128 #define STS2_CRCSTE (1 << 31)
129 #define STS2_CRC16E (1 << 30)
130 #define STS2_AC12CRCE (1 << 29)
131 #define STS2_RSPCRC7E (1 << 28)
132 #define STS2_CRCSTEBE (1 << 27)
133 #define STS2_RDATEBE (1 << 26)
134 #define STS2_AC12REBE (1 << 25)
135 #define STS2_RSPEBE (1 << 24)
136 #define STS2_AC12IDXE (1 << 23)
137 #define STS2_RSPIDXE (1 << 22)
138 #define STS2_CCSTO (1 << 15)
139 #define STS2_RDATTO (1 << 14)
140 #define STS2_DATBSYTO (1 << 13)
141 #define STS2_CRCSTTO (1 << 12)
142 #define STS2_AC12BSYTO (1 << 11)
143 #define STS2_RSPBSYTO (1 << 10)
144 #define STS2_AC12RSPTO (1 << 9)
145 #define STS2_RSPTO (1 << 8)
146 #define STS2_CRC_ERR (STS2_CRCSTE | STS2_CRC16E | \
147 STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE)
148 #define STS2_TIMEOUT_ERR (STS2_CCSTO | STS2_RDATTO | \
149 STS2_DATBSYTO | STS2_CRCSTTO | \
150 STS2_AC12BSYTO | STS2_RSPBSYTO | \
151 STS2_AC12RSPTO | STS2_RSPTO)
153 #define CLKDEV_EMMC_DATA 52000000 /* 52MHz */
154 #define CLKDEV_MMC_DATA 20000000 /* 20MHz */
155 #define CLKDEV_INIT 400000 /* 400 KHz */
163 struct sh_mmcif_host {
164 struct mmc_host *mmc;
165 struct mmc_data *data;
166 struct platform_device *pd;
173 struct completion intr_wait;
174 enum mmcif_state state;
178 struct dma_chan *chan_rx;
179 struct dma_chan *chan_tx;
180 struct completion dma_complete;
184 static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
185 unsigned int reg, u32 val)
187 writel(val | readl(host->addr + reg), host->addr + reg);
190 static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
191 unsigned int reg, u32 val)
193 writel(~val & readl(host->addr + reg), host->addr + reg);
196 static void mmcif_dma_complete(void *arg)
198 struct sh_mmcif_host *host = arg;
199 dev_dbg(&host->pd->dev, "Command completed\n");
201 if (WARN(!host->data, "%s: NULL data in DMA completion!\n",
202 dev_name(&host->pd->dev)))
205 if (host->data->flags & MMC_DATA_READ)
206 dma_unmap_sg(host->chan_rx->device->dev,
207 host->data->sg, host->data->sg_len,
210 dma_unmap_sg(host->chan_tx->device->dev,
211 host->data->sg, host->data->sg_len,
214 complete(&host->dma_complete);
217 static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
219 struct scatterlist *sg = host->data->sg;
220 struct dma_async_tx_descriptor *desc = NULL;
221 struct dma_chan *chan = host->chan_rx;
222 dma_cookie_t cookie = -EINVAL;
225 ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len,
228 host->dma_active = true;
229 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
230 DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
234 desc->callback = mmcif_dma_complete;
235 desc->callback_param = host;
236 cookie = dmaengine_submit(desc);
237 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
238 dma_async_issue_pending(chan);
240 dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
241 __func__, host->data->sg_len, ret, cookie);
244 /* DMA failed, fall back to PIO */
247 host->chan_rx = NULL;
248 host->dma_active = false;
249 dma_release_channel(chan);
250 /* Free the Tx channel too */
251 chan = host->chan_tx;
253 host->chan_tx = NULL;
254 dma_release_channel(chan);
256 dev_warn(&host->pd->dev,
257 "DMA failed: %d, falling back to PIO\n", ret);
258 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
261 dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
262 desc, cookie, host->data->sg_len);
265 static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
267 struct scatterlist *sg = host->data->sg;
268 struct dma_async_tx_descriptor *desc = NULL;
269 struct dma_chan *chan = host->chan_tx;
270 dma_cookie_t cookie = -EINVAL;
273 ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len,
276 host->dma_active = true;
277 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
278 DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
282 desc->callback = mmcif_dma_complete;
283 desc->callback_param = host;
284 cookie = dmaengine_submit(desc);
285 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
286 dma_async_issue_pending(chan);
288 dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
289 __func__, host->data->sg_len, ret, cookie);
292 /* DMA failed, fall back to PIO */
295 host->chan_tx = NULL;
296 host->dma_active = false;
297 dma_release_channel(chan);
298 /* Free the Rx channel too */
299 chan = host->chan_rx;
301 host->chan_rx = NULL;
302 dma_release_channel(chan);
304 dev_warn(&host->pd->dev,
305 "DMA failed: %d, falling back to PIO\n", ret);
306 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
309 dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d\n", __func__,
313 static bool sh_mmcif_filter(struct dma_chan *chan, void *arg)
315 dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg);
320 static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
321 struct sh_mmcif_plat_data *pdata)
323 host->dma_active = false;
325 /* We can only either use DMA for both Tx and Rx or not use it at all */
330 dma_cap_set(DMA_SLAVE, mask);
332 host->chan_tx = dma_request_channel(mask, sh_mmcif_filter,
333 &pdata->dma->chan_priv_tx);
334 dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__,
340 host->chan_rx = dma_request_channel(mask, sh_mmcif_filter,
341 &pdata->dma->chan_priv_rx);
342 dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__,
345 if (!host->chan_rx) {
346 dma_release_channel(host->chan_tx);
347 host->chan_tx = NULL;
351 init_completion(&host->dma_complete);
355 static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
357 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
358 /* Descriptors are freed automatically */
360 struct dma_chan *chan = host->chan_tx;
361 host->chan_tx = NULL;
362 dma_release_channel(chan);
365 struct dma_chan *chan = host->chan_rx;
366 host->chan_rx = NULL;
367 dma_release_channel(chan);
370 host->dma_active = false;
373 static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
375 struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
377 sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
378 sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);
382 if (p->sup_pclk && clk == host->clk)
383 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK);
385 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR &
386 (ilog2(__rounddown_pow_of_two(host->clk / clk)) << 16));
388 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
391 static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
395 tmp = 0x010f0000 & sh_mmcif_readl(host->addr, MMCIF_CE_CLK_CTRL);
397 sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON);
398 sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF);
399 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
400 SRSPTO_256 | SRBSYTO_29 | SRWDTO_29 | SCCSTO_29);
402 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
405 static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
408 int ret, timeout = 10000000;
410 host->sd_error = false;
412 state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1);
413 state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2);
414 dev_dbg(&host->pd->dev, "ERR HOST_STS1 = %08x\n", state1);
415 dev_dbg(&host->pd->dev, "ERR HOST_STS2 = %08x\n", state2);
417 if (state1 & STS1_CMDSEQ) {
418 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
419 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
423 dev_err(&host->pd->dev,
424 "Forceed end of command sequence timeout err\n");
427 if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1)
432 sh_mmcif_sync_reset(host);
433 dev_dbg(&host->pd->dev, "Forced end of command sequence\n");
437 if (state2 & STS2_CRC_ERR) {
438 dev_dbg(&host->pd->dev, ": Happened CRC error\n");
440 } else if (state2 & STS2_TIMEOUT_ERR) {
441 dev_dbg(&host->pd->dev, ": Happened Timeout error\n");
444 dev_dbg(&host->pd->dev, ": Happened End/Index error\n");
450 static int sh_mmcif_single_read(struct sh_mmcif_host *host,
451 struct mmc_request *mrq)
453 struct mmc_data *data = mrq->data;
455 u32 blocksize, i, *p = sg_virt(data->sg);
457 /* buf read enable */
458 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
459 time = wait_for_completion_interruptible_timeout(&host->intr_wait,
461 if (time <= 0 || host->sd_error)
462 return sh_mmcif_error_manage(host);
464 blocksize = (BLOCK_SIZE_MASK &
465 sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3;
466 for (i = 0; i < blocksize / 4; i++)
467 *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
469 /* buffer read end */
470 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
471 time = wait_for_completion_interruptible_timeout(&host->intr_wait,
473 if (time <= 0 || host->sd_error)
474 return sh_mmcif_error_manage(host);
479 static int sh_mmcif_multi_read(struct sh_mmcif_host *host,
480 struct mmc_request *mrq)
482 struct mmc_data *data = mrq->data;
484 u32 blocksize, i, j, sec, *p;
486 blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr,
488 for (j = 0; j < data->sg_len; j++) {
489 p = sg_virt(data->sg);
490 for (sec = 0; sec < data->sg->length / blocksize; sec++) {
491 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
492 /* buf read enable */
493 time = wait_for_completion_interruptible_timeout(&host->intr_wait,
496 if (time <= 0 || host->sd_error)
497 return sh_mmcif_error_manage(host);
499 for (i = 0; i < blocksize / 4; i++)
500 *p++ = sh_mmcif_readl(host->addr,
503 if (j < data->sg_len - 1)
509 static int sh_mmcif_single_write(struct sh_mmcif_host *host,
510 struct mmc_request *mrq)
512 struct mmc_data *data = mrq->data;
514 u32 blocksize, i, *p = sg_virt(data->sg);
516 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
518 /* buf write enable */
519 time = wait_for_completion_interruptible_timeout(&host->intr_wait,
521 if (time <= 0 || host->sd_error)
522 return sh_mmcif_error_manage(host);
524 blocksize = (BLOCK_SIZE_MASK &
525 sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3;
526 for (i = 0; i < blocksize / 4; i++)
527 sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
529 /* buffer write end */
530 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
532 time = wait_for_completion_interruptible_timeout(&host->intr_wait,
534 if (time <= 0 || host->sd_error)
535 return sh_mmcif_error_manage(host);
540 static int sh_mmcif_multi_write(struct sh_mmcif_host *host,
541 struct mmc_request *mrq)
543 struct mmc_data *data = mrq->data;
545 u32 i, sec, j, blocksize, *p;
547 blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr,
550 for (j = 0; j < data->sg_len; j++) {
551 p = sg_virt(data->sg);
552 for (sec = 0; sec < data->sg->length / blocksize; sec++) {
553 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
554 /* buf write enable*/
555 time = wait_for_completion_interruptible_timeout(&host->intr_wait,
558 if (time <= 0 || host->sd_error)
559 return sh_mmcif_error_manage(host);
561 for (i = 0; i < blocksize / 4; i++)
562 sh_mmcif_writel(host->addr,
563 MMCIF_CE_DATA, *p++);
565 if (j < data->sg_len - 1)
571 static void sh_mmcif_get_response(struct sh_mmcif_host *host,
572 struct mmc_command *cmd)
574 if (cmd->flags & MMC_RSP_136) {
575 cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP3);
576 cmd->resp[1] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP2);
577 cmd->resp[2] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP1);
578 cmd->resp[3] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
580 cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
583 static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
584 struct mmc_command *cmd)
586 cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP_CMD12);
589 static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
590 struct mmc_request *mrq, struct mmc_command *cmd, u32 opc)
594 /* Response Type check */
595 switch (mmc_resp_type(cmd)) {
597 tmp |= CMD_SET_RTYP_NO;
602 tmp |= CMD_SET_RTYP_6B;
605 tmp |= CMD_SET_RTYP_17B;
608 dev_err(&host->pd->dev, "Unsupported response type.\n");
614 case MMC_STOP_TRANSMISSION:
615 case MMC_SET_WRITE_PROT:
616 case MMC_CLR_WRITE_PROT:
625 switch (host->bus_width) {
626 case MMC_BUS_WIDTH_1:
627 tmp |= CMD_SET_DATW_1;
629 case MMC_BUS_WIDTH_4:
630 tmp |= CMD_SET_DATW_4;
632 case MMC_BUS_WIDTH_8:
633 tmp |= CMD_SET_DATW_8;
636 dev_err(&host->pd->dev, "Unsupported bus width.\n");
641 if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK)
644 if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {
645 tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;
646 sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
647 mrq->data->blocks << 16);
649 /* RIDXC[1:0] check bits */
650 if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID ||
651 opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
652 tmp |= CMD_SET_RIDXC_BITS;
653 /* RCRC7C[1:0] check bits */
654 if (opc == MMC_SEND_OP_COND)
655 tmp |= CMD_SET_CRC7C_BITS;
656 /* RCRC7C[1:0] internal CRC7 */
657 if (opc == MMC_ALL_SEND_CID ||
658 opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
659 tmp |= CMD_SET_CRC7C_INTERNAL;
661 return opc = ((opc << 24) | tmp);
664 static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
665 struct mmc_request *mrq, u32 opc)
670 case MMC_READ_MULTIPLE_BLOCK:
671 ret = sh_mmcif_multi_read(host, mrq);
673 case MMC_WRITE_MULTIPLE_BLOCK:
674 ret = sh_mmcif_multi_write(host, mrq);
676 case MMC_WRITE_BLOCK:
677 ret = sh_mmcif_single_write(host, mrq);
679 case MMC_READ_SINGLE_BLOCK:
680 case MMC_SEND_EXT_CSD:
681 ret = sh_mmcif_single_read(host, mrq);
684 dev_err(&host->pd->dev, "UNSUPPORTED CMD = d'%08d\n", opc);
691 static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
692 struct mmc_request *mrq, struct mmc_command *cmd)
695 int ret = 0, mask = 0;
696 u32 opc = cmd->opcode;
699 /* respons busy check */
701 case MMC_STOP_TRANSMISSION:
702 case MMC_SET_WRITE_PROT:
703 case MMC_CLR_WRITE_PROT:
712 mask |= MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR |
713 MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR |
714 MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO |
715 MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO;
718 sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
719 sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET,
722 opc = sh_mmcif_set_cmd(host, mrq, cmd, opc);
724 sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
725 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
727 sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg);
729 sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
731 time = wait_for_completion_interruptible_timeout(&host->intr_wait,
734 cmd->error = sh_mmcif_error_manage(host);
737 if (host->sd_error) {
738 switch (cmd->opcode) {
739 case MMC_ALL_SEND_CID:
740 case MMC_SELECT_CARD:
742 cmd->error = -ETIMEDOUT;
745 dev_dbg(&host->pd->dev, "Cmd(d'%d) err\n",
747 cmd->error = sh_mmcif_error_manage(host);
750 host->sd_error = false;
753 if (!(cmd->flags & MMC_RSP_PRESENT)) {
757 sh_mmcif_get_response(host, cmd);
759 if (!host->dma_active) {
760 ret = sh_mmcif_data_trans(host, mrq, cmd->opcode);
763 wait_for_completion_interruptible_timeout(&host->dma_complete,
769 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
770 BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
771 host->dma_active = false;
774 mrq->data->bytes_xfered = 0;
776 mrq->data->bytes_xfered =
777 mrq->data->blocks * mrq->data->blksz;
782 static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
783 struct mmc_request *mrq, struct mmc_command *cmd)
787 if (mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK)
788 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
789 else if (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
790 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
792 dev_err(&host->pd->dev, "unsupported stop cmd\n");
793 cmd->error = sh_mmcif_error_manage(host);
797 time = wait_for_completion_interruptible_timeout(&host->intr_wait,
799 if (time <= 0 || host->sd_error) {
800 cmd->error = sh_mmcif_error_manage(host);
803 sh_mmcif_get_cmd12response(host, cmd);
807 static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
809 struct sh_mmcif_host *host = mmc_priv(mmc);
812 spin_lock_irqsave(&host->lock, flags);
813 if (host->state != STATE_IDLE) {
814 spin_unlock_irqrestore(&host->lock, flags);
815 mrq->cmd->error = -EAGAIN;
816 mmc_request_done(mmc, mrq);
820 host->state = STATE_REQUEST;
821 spin_unlock_irqrestore(&host->lock, flags);
823 switch (mrq->cmd->opcode) {
824 /* MMCIF does not support SD/SDIO command */
825 case SD_IO_SEND_OP_COND:
827 host->state = STATE_IDLE;
828 mrq->cmd->error = -ETIMEDOUT;
829 mmc_request_done(mmc, mrq);
831 case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */
833 /* send_if_cond cmd (not support) */
834 host->state = STATE_IDLE;
835 mrq->cmd->error = -ETIMEDOUT;
836 mmc_request_done(mmc, mrq);
843 host->data = mrq->data;
845 if (mrq->data->flags & MMC_DATA_READ) {
847 sh_mmcif_start_dma_rx(host);
850 sh_mmcif_start_dma_tx(host);
853 sh_mmcif_start_cmd(host, mrq, mrq->cmd);
856 if (!mrq->cmd->error && mrq->stop)
857 sh_mmcif_stop_cmd(host, mrq, mrq->stop);
858 host->state = STATE_IDLE;
859 mmc_request_done(mmc, mrq);
862 static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
864 struct sh_mmcif_host *host = mmc_priv(mmc);
865 struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
868 spin_lock_irqsave(&host->lock, flags);
869 if (host->state != STATE_IDLE) {
870 spin_unlock_irqrestore(&host->lock, flags);
874 host->state = STATE_IOS;
875 spin_unlock_irqrestore(&host->lock, flags);
877 if (ios->power_mode == MMC_POWER_UP) {
879 p->set_pwr(host->pd, ios->power_mode);
880 } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
882 sh_mmcif_clock_control(host, 0);
883 if (ios->power_mode == MMC_POWER_OFF && p->down_pwr)
884 p->down_pwr(host->pd);
885 host->state = STATE_IDLE;
890 sh_mmcif_clock_control(host, ios->clock);
892 host->bus_width = ios->bus_width;
893 host->state = STATE_IDLE;
896 static int sh_mmcif_get_cd(struct mmc_host *mmc)
898 struct sh_mmcif_host *host = mmc_priv(mmc);
899 struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
904 return p->get_cd(host->pd);
907 static struct mmc_host_ops sh_mmcif_ops = {
908 .request = sh_mmcif_request,
909 .set_ios = sh_mmcif_set_ios,
910 .get_cd = sh_mmcif_get_cd,
913 static void sh_mmcif_detect(struct mmc_host *mmc)
915 mmc_detect_change(mmc, 0);
918 static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
920 struct sh_mmcif_host *host = dev_id;
924 state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
926 if (state & INT_RBSYE) {
927 sh_mmcif_writel(host->addr, MMCIF_CE_INT,
928 ~(INT_RBSYE | INT_CRSPE));
929 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MRBSYE);
930 } else if (state & INT_CRSPE) {
931 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_CRSPE);
932 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCRSPE);
933 } else if (state & INT_BUFREN) {
934 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFREN);
935 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
936 } else if (state & INT_BUFWEN) {
937 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFWEN);
938 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
939 } else if (state & INT_CMD12DRE) {
940 sh_mmcif_writel(host->addr, MMCIF_CE_INT,
941 ~(INT_CMD12DRE | INT_CMD12RBE |
942 INT_CMD12CRE | INT_BUFRE));
943 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
944 } else if (state & INT_BUFRE) {
945 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFRE);
946 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
947 } else if (state & INT_DTRANE) {
948 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_DTRANE);
949 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
950 } else if (state & INT_CMD12RBE) {
951 sh_mmcif_writel(host->addr, MMCIF_CE_INT,
952 ~(INT_CMD12RBE | INT_CMD12CRE));
953 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
954 } else if (state & INT_ERR_STS) {
956 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
957 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
960 dev_dbg(&host->pd->dev, "Not support int\n");
961 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
962 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
966 host->sd_error = true;
967 dev_dbg(&host->pd->dev, "int err state = %08x\n", state);
969 if (state & ~(INT_CMD12RBE | INT_CMD12CRE))
970 complete(&host->intr_wait);
972 dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state);
977 static int __devinit sh_mmcif_probe(struct platform_device *pdev)
980 struct mmc_host *mmc;
981 struct sh_mmcif_host *host;
982 struct sh_mmcif_plat_data *pd;
983 struct resource *res;
987 irq[0] = platform_get_irq(pdev, 0);
988 irq[1] = platform_get_irq(pdev, 1);
989 if (irq[0] < 0 || irq[1] < 0) {
990 dev_err(&pdev->dev, "Get irq error\n");
993 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
995 dev_err(&pdev->dev, "platform_get_resource error.\n");
998 reg = ioremap(res->start, resource_size(res));
1000 dev_err(&pdev->dev, "ioremap error.\n");
1003 pd = pdev->dev.platform_data;
1005 dev_err(&pdev->dev, "sh_mmcif plat data error.\n");
1009 mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev);
1014 host = mmc_priv(mmc);
1017 host->timeout = 1000;
1019 snprintf(clk_name, sizeof(clk_name), "mmc%d", pdev->id);
1020 host->hclk = clk_get(&pdev->dev, clk_name);
1021 if (IS_ERR(host->hclk)) {
1022 dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
1023 ret = PTR_ERR(host->hclk);
1026 clk_enable(host->hclk);
1027 host->clk = clk_get_rate(host->hclk);
1030 init_completion(&host->intr_wait);
1031 spin_lock_init(&host->lock);
1033 mmc->ops = &sh_mmcif_ops;
1034 mmc->f_max = host->clk;
1035 /* close to 400KHz */
1036 if (mmc->f_max < 51200000)
1037 mmc->f_min = mmc->f_max / 128;
1038 else if (mmc->f_max < 102400000)
1039 mmc->f_min = mmc->f_max / 256;
1041 mmc->f_min = mmc->f_max / 512;
1043 mmc->ocr_avail = pd->ocr;
1044 mmc->caps = MMC_CAP_MMC_HIGHSPEED;
1046 mmc->caps |= pd->caps;
1048 mmc->max_blk_size = 512;
1049 mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs;
1050 mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
1051 mmc->max_seg_size = mmc->max_req_size;
1053 sh_mmcif_sync_reset(host);
1054 platform_set_drvdata(pdev, host);
1056 /* See if we also get DMA */
1057 sh_mmcif_request_dma(host, pd);
1061 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1063 ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host);
1065 dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n");
1068 ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host);
1070 free_irq(irq[0], host);
1071 dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
1075 sh_mmcif_detect(host->mmc);
1077 dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION);
1078 dev_dbg(&pdev->dev, "chip ver H'%04x\n",
1079 sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff);
1083 clk_disable(host->hclk);
1092 static int __devexit sh_mmcif_remove(struct platform_device *pdev)
1094 struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1097 mmc_remove_host(host->mmc);
1098 sh_mmcif_release_dma(host);
1100 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1103 iounmap(host->addr);
1105 irq[0] = platform_get_irq(pdev, 0);
1106 irq[1] = platform_get_irq(pdev, 1);
1108 free_irq(irq[0], host);
1109 free_irq(irq[1], host);
1111 platform_set_drvdata(pdev, NULL);
1113 clk_disable(host->hclk);
1114 mmc_free_host(host->mmc);
1119 static struct platform_driver sh_mmcif_driver = {
1120 .probe = sh_mmcif_probe,
1121 .remove = sh_mmcif_remove,
1123 .name = DRIVER_NAME,
1127 static int __init sh_mmcif_init(void)
1129 return platform_driver_register(&sh_mmcif_driver);
1132 static void __exit sh_mmcif_exit(void)
1134 platform_driver_unregister(&sh_mmcif_driver);
1137 module_init(sh_mmcif_init);
1138 module_exit(sh_mmcif_exit);
1141 MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
1142 MODULE_LICENSE("GPL");
1143 MODULE_ALIAS("platform:" DRIVER_NAME);
1144 MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>");