2 * linux/drivers/mmc/core/core.c
4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/completion.h>
17 #include <linux/device.h>
18 #include <linux/delay.h>
19 #include <linux/pagemap.h>
20 #include <linux/err.h>
21 #include <linux/leds.h>
22 #include <linux/scatterlist.h>
23 #include <linux/log2.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/pm_runtime.h>
27 #include <linux/mmc/card.h>
28 #include <linux/mmc/host.h>
29 #include <linux/mmc/mmc.h>
30 #include <linux/mmc/sd.h>
41 static struct workqueue_struct *workqueue;
44 * Enabling software CRCs on the data blocks can be a significant (30%)
45 * performance cost, and for other reasons may not always be desired.
46 * So we allow it it to be disabled.
49 module_param(use_spi_crc, bool, 0);
52 * We normally treat cards as removed during suspend if they are not
53 * known to be on a non-removable bus, to avoid the risk of writing
54 * back data to a different card after resume. Allow this to be
55 * overridden if necessary.
57 #ifdef CONFIG_MMC_UNSAFE_RESUME
58 int mmc_assume_removable;
60 int mmc_assume_removable = 1;
62 EXPORT_SYMBOL(mmc_assume_removable);
63 module_param_named(removable, mmc_assume_removable, bool, 0644);
66 "MMC/SD cards are removable and may be removed during suspend");
69 * Internal function. Schedule delayed work in the MMC work queue.
71 static int mmc_schedule_delayed_work(struct delayed_work *work,
74 return queue_delayed_work(workqueue, work, delay);
78 * Internal function. Flush all scheduled work from the MMC work queue.
80 static void mmc_flush_scheduled_work(void)
82 flush_workqueue(workqueue);
86 * mmc_request_done - finish processing an MMC request
87 * @host: MMC host which completed request
88 * @mrq: MMC request which request
90 * MMC drivers should call this function when they have completed
91 * their processing of a request.
93 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
95 struct mmc_command *cmd = mrq->cmd;
98 if (err && cmd->retries && mmc_host_is_spi(host)) {
99 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
103 if (err && cmd->retries) {
104 pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
105 mmc_hostname(host), cmd->opcode, err);
109 host->ops->request(host, mrq);
111 led_trigger_event(host->led, LED_OFF);
113 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
114 mmc_hostname(host), cmd->opcode, err,
115 cmd->resp[0], cmd->resp[1],
116 cmd->resp[2], cmd->resp[3]);
119 pr_debug("%s: %d bytes transferred: %d\n",
121 mrq->data->bytes_xfered, mrq->data->error);
125 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
126 mmc_hostname(host), mrq->stop->opcode,
128 mrq->stop->resp[0], mrq->stop->resp[1],
129 mrq->stop->resp[2], mrq->stop->resp[3]);
135 mmc_host_clk_gate(host);
139 EXPORT_SYMBOL(mmc_request_done);
142 mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
144 #ifdef CONFIG_MMC_DEBUG
146 struct scatterlist *sg;
149 pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
150 mmc_hostname(host), mrq->cmd->opcode,
151 mrq->cmd->arg, mrq->cmd->flags);
154 pr_debug("%s: blksz %d blocks %d flags %08x "
155 "tsac %d ms nsac %d\n",
156 mmc_hostname(host), mrq->data->blksz,
157 mrq->data->blocks, mrq->data->flags,
158 mrq->data->timeout_ns / 1000000,
159 mrq->data->timeout_clks);
163 pr_debug("%s: CMD%u arg %08x flags %08x\n",
164 mmc_hostname(host), mrq->stop->opcode,
165 mrq->stop->arg, mrq->stop->flags);
168 WARN_ON(!host->claimed);
173 BUG_ON(mrq->data->blksz > host->max_blk_size);
174 BUG_ON(mrq->data->blocks > host->max_blk_count);
175 BUG_ON(mrq->data->blocks * mrq->data->blksz >
178 #ifdef CONFIG_MMC_DEBUG
180 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
182 BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
185 mrq->cmd->data = mrq->data;
186 mrq->data->error = 0;
187 mrq->data->mrq = mrq;
189 mrq->data->stop = mrq->stop;
190 mrq->stop->error = 0;
191 mrq->stop->mrq = mrq;
194 mmc_host_clk_ungate(host);
195 led_trigger_event(host->led, LED_FULL);
196 host->ops->request(host, mrq);
199 static void mmc_wait_done(struct mmc_request *mrq)
201 complete(&mrq->completion);
204 static void __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
206 init_completion(&mrq->completion);
207 mrq->done = mmc_wait_done;
208 mmc_start_request(host, mrq);
211 static void mmc_wait_for_req_done(struct mmc_host *host,
212 struct mmc_request *mrq)
214 wait_for_completion(&mrq->completion);
218 * mmc_pre_req - Prepare for a new request
219 * @host: MMC host to prepare command
220 * @mrq: MMC request to prepare for
221 * @is_first_req: true if there is no previous started request
222 * that may run in parellel to this call, otherwise false
224 * mmc_pre_req() is called in prior to mmc_start_req() to let
225 * host prepare for the new request. Preparation of a request may be
226 * performed while another request is running on the host.
228 static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
231 if (host->ops->pre_req)
232 host->ops->pre_req(host, mrq, is_first_req);
236 * mmc_post_req - Post process a completed request
237 * @host: MMC host to post process command
238 * @mrq: MMC request to post process for
239 * @err: Error, if non zero, clean up any resources made in pre_req
241 * Let the host post process a completed request. Post processing of
242 * a request may be performed while another reuqest is running.
244 static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
247 if (host->ops->post_req)
248 host->ops->post_req(host, mrq, err);
252 * mmc_start_req - start a non-blocking request
253 * @host: MMC host to start command
254 * @areq: async request to start
255 * @error: out parameter returns 0 for success, otherwise non zero
257 * Start a new MMC custom command request for a host.
258 * If there is on ongoing async request wait for completion
259 * of that request and start the new one and return.
260 * Does not wait for the new request to complete.
262 * Returns the completed request, NULL in case of none completed.
263 * Wait for the an ongoing request (previoulsy started) to complete and
264 * return the completed request. If there is no ongoing request, NULL
265 * is returned without waiting. NULL is not an error condition.
267 struct mmc_async_req *mmc_start_req(struct mmc_host *host,
268 struct mmc_async_req *areq, int *error)
271 struct mmc_async_req *data = host->areq;
273 /* Prepare a new request */
275 mmc_pre_req(host, areq->mrq, !host->areq);
278 mmc_wait_for_req_done(host, host->areq->mrq);
279 err = host->areq->err_check(host->card, host->areq);
281 mmc_post_req(host, host->areq->mrq, 0);
283 mmc_post_req(host, areq->mrq, -EINVAL);
291 __mmc_start_req(host, areq->mrq);
294 mmc_post_req(host, host->areq->mrq, 0);
302 EXPORT_SYMBOL(mmc_start_req);
305 * mmc_wait_for_req - start a request and wait for completion
306 * @host: MMC host to start command
307 * @mrq: MMC request to start
309 * Start a new MMC custom command request for a host, and wait
310 * for the command to complete. Does not attempt to parse the
313 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
315 __mmc_start_req(host, mrq);
316 mmc_wait_for_req_done(host, mrq);
318 EXPORT_SYMBOL(mmc_wait_for_req);
321 * mmc_wait_for_cmd - start a command and wait for completion
322 * @host: MMC host to start command
323 * @cmd: MMC command to start
324 * @retries: maximum number of retries
326 * Start a new MMC command for a host, and wait for the command
327 * to complete. Return any error that occurred while the command
328 * was executing. Do not attempt to parse the response.
330 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
332 struct mmc_request mrq = {0};
334 WARN_ON(!host->claimed);
336 memset(cmd->resp, 0, sizeof(cmd->resp));
337 cmd->retries = retries;
342 mmc_wait_for_req(host, &mrq);
347 EXPORT_SYMBOL(mmc_wait_for_cmd);
350 * mmc_set_data_timeout - set the timeout for a data command
351 * @data: data phase for command
352 * @card: the MMC card associated with the data transfer
354 * Computes the data timeout parameters according to the
355 * correct algorithm given the card type.
357 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
362 * SDIO cards only define an upper 1 s limit on access.
364 if (mmc_card_sdio(card)) {
365 data->timeout_ns = 1000000000;
366 data->timeout_clks = 0;
371 * SD cards use a 100 multiplier rather than 10
373 mult = mmc_card_sd(card) ? 100 : 10;
376 * Scale up the multiplier (and therefore the timeout) by
377 * the r2w factor for writes.
379 if (data->flags & MMC_DATA_WRITE)
380 mult <<= card->csd.r2w_factor;
382 data->timeout_ns = card->csd.tacc_ns * mult;
383 data->timeout_clks = card->csd.tacc_clks * mult;
386 * SD cards also have an upper limit on the timeout.
388 if (mmc_card_sd(card)) {
389 unsigned int timeout_us, limit_us;
391 timeout_us = data->timeout_ns / 1000;
392 if (mmc_host_clk_rate(card->host))
393 timeout_us += data->timeout_clks * 1000 /
394 (mmc_host_clk_rate(card->host) / 1000);
396 if (data->flags & MMC_DATA_WRITE)
398 * The limit is really 250 ms, but that is
399 * insufficient for some crappy cards.
406 * SDHC cards always use these fixed values.
408 if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
409 data->timeout_ns = limit_us * 1000;
410 data->timeout_clks = 0;
414 * Some cards need very high timeouts if driven in SPI mode.
415 * The worst observed timeout was 900ms after writing a
416 * continuous stream of data until the internal logic
419 if (mmc_host_is_spi(card->host)) {
420 if (data->flags & MMC_DATA_WRITE) {
421 if (data->timeout_ns < 1000000000)
422 data->timeout_ns = 1000000000; /* 1s */
424 if (data->timeout_ns < 100000000)
425 data->timeout_ns = 100000000; /* 100ms */
429 EXPORT_SYMBOL(mmc_set_data_timeout);
432 * mmc_align_data_size - pads a transfer size to a more optimal value
433 * @card: the MMC card associated with the data transfer
434 * @sz: original transfer size
436 * Pads the original data size with a number of extra bytes in
437 * order to avoid controller bugs and/or performance hits
438 * (e.g. some controllers revert to PIO for certain sizes).
440 * Returns the improved size, which might be unmodified.
442 * Note that this function is only relevant when issuing a
443 * single scatter gather entry.
445 unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
448 * FIXME: We don't have a system for the controller to tell
449 * the core about its problems yet, so for now we just 32-bit
452 sz = ((sz + 3) / 4) * 4;
456 EXPORT_SYMBOL(mmc_align_data_size);
459 * mmc_host_enable - enable a host.
460 * @host: mmc host to enable
462 * Hosts that support power saving can use the 'enable' and 'disable'
463 * methods to exit and enter power saving states. For more information
464 * see comments for struct mmc_host_ops.
466 int mmc_host_enable(struct mmc_host *host)
468 if (!(host->caps & MMC_CAP_DISABLE))
471 if (host->en_dis_recurs)
474 if (host->nesting_cnt++)
477 cancel_delayed_work_sync(&host->disable);
482 if (host->ops->enable) {
485 host->en_dis_recurs = 1;
486 err = host->ops->enable(host);
487 host->en_dis_recurs = 0;
490 pr_debug("%s: enable error %d\n",
491 mmc_hostname(host), err);
498 EXPORT_SYMBOL(mmc_host_enable);
500 static int mmc_host_do_disable(struct mmc_host *host, int lazy)
502 if (host->ops->disable) {
505 host->en_dis_recurs = 1;
506 err = host->ops->disable(host, lazy);
507 host->en_dis_recurs = 0;
510 pr_debug("%s: disable error %d\n",
511 mmc_hostname(host), err);
515 unsigned long delay = msecs_to_jiffies(err);
517 mmc_schedule_delayed_work(&host->disable, delay);
525 * mmc_host_disable - disable a host.
526 * @host: mmc host to disable
528 * Hosts that support power saving can use the 'enable' and 'disable'
529 * methods to exit and enter power saving states. For more information
530 * see comments for struct mmc_host_ops.
532 int mmc_host_disable(struct mmc_host *host)
536 if (!(host->caps & MMC_CAP_DISABLE))
539 if (host->en_dis_recurs)
542 if (--host->nesting_cnt)
548 err = mmc_host_do_disable(host, 0);
551 EXPORT_SYMBOL(mmc_host_disable);
554 * __mmc_claim_host - exclusively claim a host
555 * @host: mmc host to claim
556 * @abort: whether or not the operation should be aborted
558 * Claim a host for a set of operations. If @abort is non null and
559 * dereference a non-zero value then this will return prematurely with
560 * that non-zero value without acquiring the lock. Returns zero
561 * with the lock held otherwise.
563 int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
565 DECLARE_WAITQUEUE(wait, current);
571 add_wait_queue(&host->wq, &wait);
572 spin_lock_irqsave(&host->lock, flags);
574 set_current_state(TASK_UNINTERRUPTIBLE);
575 stop = abort ? atomic_read(abort) : 0;
576 if (stop || !host->claimed || host->claimer == current)
578 spin_unlock_irqrestore(&host->lock, flags);
580 spin_lock_irqsave(&host->lock, flags);
582 set_current_state(TASK_RUNNING);
585 host->claimer = current;
586 host->claim_cnt += 1;
589 spin_unlock_irqrestore(&host->lock, flags);
590 remove_wait_queue(&host->wq, &wait);
592 mmc_host_enable(host);
596 EXPORT_SYMBOL(__mmc_claim_host);
599 * mmc_try_claim_host - try exclusively to claim a host
600 * @host: mmc host to claim
602 * Returns %1 if the host is claimed, %0 otherwise.
604 int mmc_try_claim_host(struct mmc_host *host)
606 int claimed_host = 0;
609 spin_lock_irqsave(&host->lock, flags);
610 if (!host->claimed || host->claimer == current) {
612 host->claimer = current;
613 host->claim_cnt += 1;
616 spin_unlock_irqrestore(&host->lock, flags);
619 EXPORT_SYMBOL(mmc_try_claim_host);
622 * mmc_do_release_host - release a claimed host
623 * @host: mmc host to release
625 * If you successfully claimed a host, this function will
628 void mmc_do_release_host(struct mmc_host *host)
632 spin_lock_irqsave(&host->lock, flags);
633 if (--host->claim_cnt) {
634 /* Release for nested claim */
635 spin_unlock_irqrestore(&host->lock, flags);
638 host->claimer = NULL;
639 spin_unlock_irqrestore(&host->lock, flags);
643 EXPORT_SYMBOL(mmc_do_release_host);
645 void mmc_host_deeper_disable(struct work_struct *work)
647 struct mmc_host *host =
648 container_of(work, struct mmc_host, disable.work);
650 /* If the host is claimed then we do not want to disable it anymore */
651 if (!mmc_try_claim_host(host))
653 mmc_host_do_disable(host, 1);
654 mmc_do_release_host(host);
658 * mmc_host_lazy_disable - lazily disable a host.
659 * @host: mmc host to disable
661 * Hosts that support power saving can use the 'enable' and 'disable'
662 * methods to exit and enter power saving states. For more information
663 * see comments for struct mmc_host_ops.
665 int mmc_host_lazy_disable(struct mmc_host *host)
667 if (!(host->caps & MMC_CAP_DISABLE))
670 if (host->en_dis_recurs)
673 if (--host->nesting_cnt)
679 if (host->disable_delay) {
680 mmc_schedule_delayed_work(&host->disable,
681 msecs_to_jiffies(host->disable_delay));
684 return mmc_host_do_disable(host, 1);
686 EXPORT_SYMBOL(mmc_host_lazy_disable);
689 * mmc_release_host - release a host
690 * @host: mmc host to release
692 * Release a MMC host, allowing others to claim the host
693 * for their operations.
695 void mmc_release_host(struct mmc_host *host)
697 WARN_ON(!host->claimed);
699 mmc_host_lazy_disable(host);
701 mmc_do_release_host(host);
704 EXPORT_SYMBOL(mmc_release_host);
707 * Internal function that does the actual ios call to the host driver,
708 * optionally printing some debug output.
710 static inline void mmc_set_ios(struct mmc_host *host)
712 struct mmc_ios *ios = &host->ios;
714 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
715 "width %u timing %u\n",
716 mmc_hostname(host), ios->clock, ios->bus_mode,
717 ios->power_mode, ios->chip_select, ios->vdd,
718 ios->bus_width, ios->timing);
721 mmc_set_ungated(host);
722 host->ops->set_ios(host, ios);
726 * Control chip select pin on a host.
728 void mmc_set_chip_select(struct mmc_host *host, int mode)
730 host->ios.chip_select = mode;
735 * Sets the host clock to the highest possible frequency that
738 void mmc_set_clock(struct mmc_host *host, unsigned int hz)
740 WARN_ON(hz < host->f_min);
742 if (hz > host->f_max)
745 host->ios.clock = hz;
749 #ifdef CONFIG_MMC_CLKGATE
751 * This gates the clock by setting it to 0 Hz.
753 void mmc_gate_clock(struct mmc_host *host)
757 spin_lock_irqsave(&host->clk_lock, flags);
758 host->clk_old = host->ios.clock;
760 host->clk_gated = true;
761 spin_unlock_irqrestore(&host->clk_lock, flags);
766 * This restores the clock from gating by using the cached
769 void mmc_ungate_clock(struct mmc_host *host)
772 * We should previously have gated the clock, so the clock shall
773 * be 0 here! The clock may however be 0 during initialization,
774 * when some request operations are performed before setting
775 * the frequency. When ungate is requested in that situation
776 * we just ignore the call.
779 BUG_ON(host->ios.clock);
780 /* This call will also set host->clk_gated to false */
781 mmc_set_clock(host, host->clk_old);
785 void mmc_set_ungated(struct mmc_host *host)
790 * We've been given a new frequency while the clock is gated,
791 * so make sure we regard this as ungating it.
793 spin_lock_irqsave(&host->clk_lock, flags);
794 host->clk_gated = false;
795 spin_unlock_irqrestore(&host->clk_lock, flags);
799 void mmc_set_ungated(struct mmc_host *host)
805 * Change the bus mode (open drain/push-pull) of a host.
807 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
809 host->ios.bus_mode = mode;
814 * Change data bus width of a host.
816 void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
818 host->ios.bus_width = width;
823 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
825 * @low_bits: prefer low bits in boundary cases
827 * This function returns the OCR bit number according to the provided @vdd
828 * value. If conversion is not possible a negative errno value returned.
830 * Depending on the @low_bits flag the function prefers low or high OCR bits
831 * on boundary voltages. For example,
832 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
833 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
835 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
837 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
839 const int max_bit = ilog2(MMC_VDD_35_36);
842 if (vdd < 1650 || vdd > 3600)
845 if (vdd >= 1650 && vdd <= 1950)
846 return ilog2(MMC_VDD_165_195);
851 /* Base 2000 mV, step 100 mV, bit's base 8. */
852 bit = (vdd - 2000) / 100 + 8;
859 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
860 * @vdd_min: minimum voltage value (mV)
861 * @vdd_max: maximum voltage value (mV)
863 * This function returns the OCR mask bits according to the provided @vdd_min
864 * and @vdd_max values. If conversion is not possible the function returns 0.
866 * Notes wrt boundary cases:
867 * This function sets the OCR bits for all boundary voltages, for example
868 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
869 * MMC_VDD_34_35 mask.
871 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
875 if (vdd_max < vdd_min)
878 /* Prefer high bits for the boundary vdd_max values. */
879 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
883 /* Prefer low bits for the boundary vdd_min values. */
884 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
888 /* Fill the mask, from max bit to min bit. */
889 while (vdd_max >= vdd_min)
890 mask |= 1 << vdd_max--;
894 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
896 #ifdef CONFIG_REGULATOR
899 * mmc_regulator_get_ocrmask - return mask of supported voltages
900 * @supply: regulator to use
902 * This returns either a negative errno, or a mask of voltages that
903 * can be provided to MMC/SD/SDIO devices using the specified voltage
904 * regulator. This would normally be called before registering the
907 int mmc_regulator_get_ocrmask(struct regulator *supply)
913 count = regulator_count_voltages(supply);
917 for (i = 0; i < count; i++) {
921 vdd_uV = regulator_list_voltage(supply, i);
925 vdd_mV = vdd_uV / 1000;
926 result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
931 EXPORT_SYMBOL(mmc_regulator_get_ocrmask);
934 * mmc_regulator_set_ocr - set regulator to match host->ios voltage
935 * @mmc: the host to regulate
936 * @supply: regulator to use
937 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
939 * Returns zero on success, else negative errno.
941 * MMC host drivers may use this to enable or disable a regulator using
942 * a particular supply voltage. This would normally be called from the
945 int mmc_regulator_set_ocr(struct mmc_host *mmc,
946 struct regulator *supply,
947 unsigned short vdd_bit)
956 /* REVISIT mmc_vddrange_to_ocrmask() may have set some
957 * bits this regulator doesn't quite support ... don't
958 * be too picky, most cards and regulators are OK with
959 * a 0.1V range goof (it's a small error percentage).
961 tmp = vdd_bit - ilog2(MMC_VDD_165_195);
963 min_uV = 1650 * 1000;
964 max_uV = 1950 * 1000;
966 min_uV = 1900 * 1000 + tmp * 100 * 1000;
967 max_uV = min_uV + 100 * 1000;
970 /* avoid needless changes to this voltage; the regulator
971 * might not allow this operation
973 voltage = regulator_get_voltage(supply);
976 else if (voltage < min_uV || voltage > max_uV)
977 result = regulator_set_voltage(supply, min_uV, max_uV);
981 if (result == 0 && !mmc->regulator_enabled) {
982 result = regulator_enable(supply);
984 mmc->regulator_enabled = true;
986 } else if (mmc->regulator_enabled) {
987 result = regulator_disable(supply);
989 mmc->regulator_enabled = false;
993 dev_err(mmc_dev(mmc),
994 "could not set regulator OCR (%d)\n", result);
997 EXPORT_SYMBOL(mmc_regulator_set_ocr);
999 #endif /* CONFIG_REGULATOR */
1002 * Mask off any voltages we don't support and select
1003 * the lowest voltage
1005 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1009 ocr &= host->ocr_avail;
1017 host->ios.vdd = bit;
1020 pr_warning("%s: host doesn't support card's voltages\n",
1021 mmc_hostname(host));
1028 int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11)
1030 struct mmc_command cmd = {0};
1036 * Send CMD11 only if the request is to switch the card to
1039 if ((signal_voltage != MMC_SIGNAL_VOLTAGE_330) && cmd11) {
1040 cmd.opcode = SD_SWITCH_VOLTAGE;
1042 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1044 err = mmc_wait_for_cmd(host, &cmd, 0);
1048 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
1052 host->ios.signal_voltage = signal_voltage;
1054 if (host->ops->start_signal_voltage_switch)
1055 err = host->ops->start_signal_voltage_switch(host, &host->ios);
1061 * Select timing parameters for host.
1063 void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1065 host->ios.timing = timing;
1070 * Select appropriate driver type for host.
1072 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1074 host->ios.drv_type = drv_type;
1079 * Apply power to the MMC stack. This is a two-stage process.
1080 * First, we enable power to the card without the clock running.
1081 * We then wait a bit for the power to stabilise. Finally,
1082 * enable the bus drivers and clock to the card.
1084 * We must _NOT_ enable the clock prior to power stablising.
1086 * If a host does all the power sequencing itself, ignore the
1087 * initial MMC_POWER_UP stage.
1089 static void mmc_power_up(struct mmc_host *host)
1093 /* If ocr is set, we use it */
1095 bit = ffs(host->ocr) - 1;
1097 bit = fls(host->ocr_avail) - 1;
1099 host->ios.vdd = bit;
1100 if (mmc_host_is_spi(host)) {
1101 host->ios.chip_select = MMC_CS_HIGH;
1102 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1104 host->ios.chip_select = MMC_CS_DONTCARE;
1105 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
1107 host->ios.power_mode = MMC_POWER_UP;
1108 host->ios.bus_width = MMC_BUS_WIDTH_1;
1109 host->ios.timing = MMC_TIMING_LEGACY;
1113 * This delay should be sufficient to allow the power supply
1114 * to reach the minimum voltage.
1118 host->ios.clock = host->f_init;
1120 host->ios.power_mode = MMC_POWER_ON;
1124 * This delay must be at least 74 clock sizes, or 1 ms, or the
1125 * time required to reach a stable voltage.
1130 static void mmc_power_off(struct mmc_host *host)
1132 host->ios.clock = 0;
1136 * Reset ocr mask to be the highest possible voltage supported for
1137 * this mmc host. This value will be used at next power up.
1139 host->ocr = 1 << (fls(host->ocr_avail) - 1);
1141 if (!mmc_host_is_spi(host)) {
1142 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
1143 host->ios.chip_select = MMC_CS_DONTCARE;
1145 host->ios.power_mode = MMC_POWER_OFF;
1146 host->ios.bus_width = MMC_BUS_WIDTH_1;
1147 host->ios.timing = MMC_TIMING_LEGACY;
1152 * Cleanup when the last reference to the bus operator is dropped.
1154 static void __mmc_release_bus(struct mmc_host *host)
1157 BUG_ON(host->bus_refs);
1158 BUG_ON(!host->bus_dead);
1160 host->bus_ops = NULL;
1164 * Increase reference count of bus operator
1166 static inline void mmc_bus_get(struct mmc_host *host)
1168 unsigned long flags;
1170 spin_lock_irqsave(&host->lock, flags);
1172 spin_unlock_irqrestore(&host->lock, flags);
1176 * Decrease reference count of bus operator and free it if
1177 * it is the last reference.
1179 static inline void mmc_bus_put(struct mmc_host *host)
1181 unsigned long flags;
1183 spin_lock_irqsave(&host->lock, flags);
1185 if ((host->bus_refs == 0) && host->bus_ops)
1186 __mmc_release_bus(host);
1187 spin_unlock_irqrestore(&host->lock, flags);
1191 * Assign a mmc bus handler to a host. Only one bus handler may control a
1192 * host at any given time.
1194 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1196 unsigned long flags;
1201 WARN_ON(!host->claimed);
1203 spin_lock_irqsave(&host->lock, flags);
1205 BUG_ON(host->bus_ops);
1206 BUG_ON(host->bus_refs);
1208 host->bus_ops = ops;
1212 spin_unlock_irqrestore(&host->lock, flags);
1216 * Remove the current bus handler from a host. Assumes that there are
1217 * no interesting cards left, so the bus is powered down.
1219 void mmc_detach_bus(struct mmc_host *host)
1221 unsigned long flags;
1225 WARN_ON(!host->claimed);
1226 WARN_ON(!host->bus_ops);
1228 spin_lock_irqsave(&host->lock, flags);
1232 spin_unlock_irqrestore(&host->lock, flags);
1234 mmc_power_off(host);
1240 * mmc_detect_change - process change of state on a MMC socket
1241 * @host: host which changed state.
1242 * @delay: optional delay to wait before detection (jiffies)
1244 * MMC drivers should call this when they detect a card has been
1245 * inserted or removed. The MMC layer will confirm that any
1246 * present card is still functional, and initialize any newly
1249 void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1251 #ifdef CONFIG_MMC_DEBUG
1252 unsigned long flags;
1253 spin_lock_irqsave(&host->lock, flags);
1254 WARN_ON(host->removed);
1255 spin_unlock_irqrestore(&host->lock, flags);
1258 mmc_schedule_delayed_work(&host->detect, delay);
1261 EXPORT_SYMBOL(mmc_detect_change);
1263 void mmc_init_erase(struct mmc_card *card)
1267 if (is_power_of_2(card->erase_size))
1268 card->erase_shift = ffs(card->erase_size) - 1;
1270 card->erase_shift = 0;
1273 * It is possible to erase an arbitrarily large area of an SD or MMC
1274 * card. That is not desirable because it can take a long time
1275 * (minutes) potentially delaying more important I/O, and also the
1276 * timeout calculations become increasingly hugely over-estimated.
1277 * Consequently, 'pref_erase' is defined as a guide to limit erases
1278 * to that size and alignment.
1280 * For SD cards that define Allocation Unit size, limit erases to one
1281 * Allocation Unit at a time. For MMC cards that define High Capacity
1282 * Erase Size, whether it is switched on or not, limit to that size.
1283 * Otherwise just have a stab at a good value. For modern cards it
1284 * will end up being 4MiB. Note that if the value is too small, it
1285 * can end up taking longer to erase.
1287 if (mmc_card_sd(card) && card->ssr.au) {
1288 card->pref_erase = card->ssr.au;
1289 card->erase_shift = ffs(card->ssr.au) - 1;
1290 } else if (card->ext_csd.hc_erase_size) {
1291 card->pref_erase = card->ext_csd.hc_erase_size;
1293 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
1295 card->pref_erase = 512 * 1024 / 512;
1297 card->pref_erase = 1024 * 1024 / 512;
1299 card->pref_erase = 2 * 1024 * 1024 / 512;
1301 card->pref_erase = 4 * 1024 * 1024 / 512;
1302 if (card->pref_erase < card->erase_size)
1303 card->pref_erase = card->erase_size;
1305 sz = card->pref_erase % card->erase_size;
1307 card->pref_erase += card->erase_size - sz;
1312 static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1313 unsigned int arg, unsigned int qty)
1315 unsigned int erase_timeout;
1317 if (card->ext_csd.erase_group_def & 1) {
1318 /* High Capacity Erase Group Size uses HC timeouts */
1319 if (arg == MMC_TRIM_ARG)
1320 erase_timeout = card->ext_csd.trim_timeout;
1322 erase_timeout = card->ext_csd.hc_erase_timeout;
1324 /* CSD Erase Group Size uses write timeout */
1325 unsigned int mult = (10 << card->csd.r2w_factor);
1326 unsigned int timeout_clks = card->csd.tacc_clks * mult;
1327 unsigned int timeout_us;
1329 /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
1330 if (card->csd.tacc_ns < 1000000)
1331 timeout_us = (card->csd.tacc_ns * mult) / 1000;
1333 timeout_us = (card->csd.tacc_ns / 1000) * mult;
1336 * ios.clock is only a target. The real clock rate might be
1337 * less but not that much less, so fudge it by multiplying by 2.
1340 timeout_us += (timeout_clks * 1000) /
1341 (mmc_host_clk_rate(card->host) / 1000);
1343 erase_timeout = timeout_us / 1000;
1346 * Theoretically, the calculation could underflow so round up
1347 * to 1ms in that case.
1353 /* Multiplier for secure operations */
1354 if (arg & MMC_SECURE_ARGS) {
1355 if (arg == MMC_SECURE_ERASE_ARG)
1356 erase_timeout *= card->ext_csd.sec_erase_mult;
1358 erase_timeout *= card->ext_csd.sec_trim_mult;
1361 erase_timeout *= qty;
1364 * Ensure at least a 1 second timeout for SPI as per
1365 * 'mmc_set_data_timeout()'
1367 if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
1368 erase_timeout = 1000;
1370 return erase_timeout;
1373 static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
1377 unsigned int erase_timeout;
1379 if (card->ssr.erase_timeout) {
1380 /* Erase timeout specified in SD Status Register (SSR) */
1381 erase_timeout = card->ssr.erase_timeout * qty +
1382 card->ssr.erase_offset;
1385 * Erase timeout not specified in SD Status Register (SSR) so
1386 * use 250ms per write block.
1388 erase_timeout = 250 * qty;
1391 /* Must not be less than 1 second */
1392 if (erase_timeout < 1000)
1393 erase_timeout = 1000;
1395 return erase_timeout;
1398 static unsigned int mmc_erase_timeout(struct mmc_card *card,
1402 if (mmc_card_sd(card))
1403 return mmc_sd_erase_timeout(card, arg, qty);
1405 return mmc_mmc_erase_timeout(card, arg, qty);
1408 static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1409 unsigned int to, unsigned int arg)
1411 struct mmc_command cmd = {0};
1412 unsigned int qty = 0;
1416 * qty is used to calculate the erase timeout which depends on how many
1417 * erase groups (or allocation units in SD terminology) are affected.
1418 * We count erasing part of an erase group as one erase group.
1419 * For SD, the allocation units are always a power of 2. For MMC, the
1420 * erase group size is almost certainly also power of 2, but it does not
1421 * seem to insist on that in the JEDEC standard, so we fall back to
1422 * division in that case. SD may not specify an allocation unit size,
1423 * in which case the timeout is based on the number of write blocks.
1425 * Note that the timeout for secure trim 2 will only be correct if the
1426 * number of erase groups specified is the same as the total of all
1427 * preceding secure trim 1 commands. Since the power may have been
1428 * lost since the secure trim 1 commands occurred, it is generally
1429 * impossible to calculate the secure trim 2 timeout correctly.
1431 if (card->erase_shift)
1432 qty += ((to >> card->erase_shift) -
1433 (from >> card->erase_shift)) + 1;
1434 else if (mmc_card_sd(card))
1435 qty += to - from + 1;
1437 qty += ((to / card->erase_size) -
1438 (from / card->erase_size)) + 1;
1440 if (!mmc_card_blockaddr(card)) {
1445 if (mmc_card_sd(card))
1446 cmd.opcode = SD_ERASE_WR_BLK_START;
1448 cmd.opcode = MMC_ERASE_GROUP_START;
1450 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1451 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1453 printk(KERN_ERR "mmc_erase: group start error %d, "
1454 "status %#x\n", err, cmd.resp[0]);
1459 memset(&cmd, 0, sizeof(struct mmc_command));
1460 if (mmc_card_sd(card))
1461 cmd.opcode = SD_ERASE_WR_BLK_END;
1463 cmd.opcode = MMC_ERASE_GROUP_END;
1465 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1466 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1468 printk(KERN_ERR "mmc_erase: group end error %d, status %#x\n",
1474 memset(&cmd, 0, sizeof(struct mmc_command));
1475 cmd.opcode = MMC_ERASE;
1477 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1478 cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty);
1479 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1481 printk(KERN_ERR "mmc_erase: erase error %d, status %#x\n",
1487 if (mmc_host_is_spi(card->host))
1491 memset(&cmd, 0, sizeof(struct mmc_command));
1492 cmd.opcode = MMC_SEND_STATUS;
1493 cmd.arg = card->rca << 16;
1494 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1495 /* Do not retry else we can't see errors */
1496 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1497 if (err || (cmd.resp[0] & 0xFDF92000)) {
1498 printk(KERN_ERR "error %d requesting status %#x\n",
1503 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
1504 R1_CURRENT_STATE(cmd.resp[0]) == 7);
1510 * mmc_erase - erase sectors.
1511 * @card: card to erase
1512 * @from: first sector to erase
1513 * @nr: number of sectors to erase
1514 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
1516 * Caller must claim host before calling this function.
1518 int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
1521 unsigned int rem, to = from + nr;
1523 if (!(card->host->caps & MMC_CAP_ERASE) ||
1524 !(card->csd.cmdclass & CCC_ERASE))
1527 if (!card->erase_size)
1530 if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
1533 if ((arg & MMC_SECURE_ARGS) &&
1534 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
1537 if ((arg & MMC_TRIM_ARGS) &&
1538 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
1541 if (arg == MMC_SECURE_ERASE_ARG) {
1542 if (from % card->erase_size || nr % card->erase_size)
1546 if (arg == MMC_ERASE_ARG) {
1547 rem = from % card->erase_size;
1549 rem = card->erase_size - rem;
1556 rem = nr % card->erase_size;
1569 /* 'from' and 'to' are inclusive */
1572 return mmc_do_erase(card, from, to, arg);
1574 EXPORT_SYMBOL(mmc_erase);
1576 int mmc_can_erase(struct mmc_card *card)
1578 if ((card->host->caps & MMC_CAP_ERASE) &&
1579 (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
1583 EXPORT_SYMBOL(mmc_can_erase);
1585 int mmc_can_trim(struct mmc_card *card)
1587 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
1591 EXPORT_SYMBOL(mmc_can_trim);
1593 int mmc_can_secure_erase_trim(struct mmc_card *card)
1595 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)
1599 EXPORT_SYMBOL(mmc_can_secure_erase_trim);
1601 int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
1604 if (!card->erase_size)
1606 if (from % card->erase_size || nr % card->erase_size)
1610 EXPORT_SYMBOL(mmc_erase_group_aligned);
1612 static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
1615 struct mmc_host *host = card->host;
1616 unsigned int max_discard, x, y, qty = 0, max_qty, timeout;
1617 unsigned int last_timeout = 0;
1619 if (card->erase_shift)
1620 max_qty = UINT_MAX >> card->erase_shift;
1621 else if (mmc_card_sd(card))
1624 max_qty = UINT_MAX / card->erase_size;
1626 /* Find the largest qty with an OK timeout */
1629 for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
1630 timeout = mmc_erase_timeout(card, arg, qty + x);
1631 if (timeout > host->max_discard_to)
1633 if (timeout < last_timeout)
1635 last_timeout = timeout;
1647 /* Convert qty to sectors */
1648 if (card->erase_shift)
1649 max_discard = --qty << card->erase_shift;
1650 else if (mmc_card_sd(card))
1653 max_discard = --qty * card->erase_size;
1658 unsigned int mmc_calc_max_discard(struct mmc_card *card)
1660 struct mmc_host *host = card->host;
1661 unsigned int max_discard, max_trim;
1663 if (!host->max_discard_to)
1667 * Without erase_group_def set, MMC erase timeout depends on clock
1668 * frequence which can change. In that case, the best choice is
1669 * just the preferred erase size.
1671 if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
1672 return card->pref_erase;
1674 max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
1675 if (mmc_can_trim(card)) {
1676 max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
1677 if (max_trim < max_discard)
1678 max_discard = max_trim;
1679 } else if (max_discard < card->erase_size) {
1682 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
1683 mmc_hostname(host), max_discard, host->max_discard_to);
1686 EXPORT_SYMBOL(mmc_calc_max_discard);
1688 int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
1690 struct mmc_command cmd = {0};
1692 if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card))
1695 cmd.opcode = MMC_SET_BLOCKLEN;
1697 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1698 return mmc_wait_for_cmd(card->host, &cmd, 5);
1700 EXPORT_SYMBOL(mmc_set_blocklen);
1702 static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
1704 host->f_init = freq;
1706 #ifdef CONFIG_MMC_DEBUG
1707 pr_info("%s: %s: trying to init card at %u Hz\n",
1708 mmc_hostname(host), __func__, host->f_init);
1713 * sdio_reset sends CMD52 to reset card. Since we do not know
1714 * if the card is being re-initialized, just send it. CMD52
1715 * should be ignored by SD/eMMC cards.
1720 mmc_send_if_cond(host, host->ocr_avail);
1722 /* Order's important: probe SDIO, then SD, then MMC */
1723 if (!mmc_attach_sdio(host))
1725 if (!mmc_attach_sd(host))
1727 if (!mmc_attach_mmc(host))
1730 mmc_power_off(host);
1734 void mmc_rescan(struct work_struct *work)
1736 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
1737 struct mmc_host *host =
1738 container_of(work, struct mmc_host, detect.work);
1741 if (host->rescan_disable)
1747 * if there is a _removable_ card registered, check whether it is
1750 if (host->bus_ops && host->bus_ops->detect && !host->bus_dead
1751 && !(host->caps & MMC_CAP_NONREMOVABLE))
1752 host->bus_ops->detect(host);
1755 * Let mmc_bus_put() free the bus/bus_ops if we've found that
1756 * the card is no longer present.
1761 /* if there still is a card present, stop here */
1762 if (host->bus_ops != NULL) {
1768 * Only we can add a new handler, so it's safe to
1769 * release the lock here.
1773 if (host->ops->get_cd && host->ops->get_cd(host) == 0)
1776 mmc_claim_host(host);
1777 for (i = 0; i < ARRAY_SIZE(freqs); i++) {
1778 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
1780 if (freqs[i] <= host->f_min)
1783 mmc_release_host(host);
1786 if (host->caps & MMC_CAP_NEEDS_POLL)
1787 mmc_schedule_delayed_work(&host->detect, HZ);
1790 void mmc_start_host(struct mmc_host *host)
1792 mmc_power_off(host);
1793 mmc_detect_change(host, 0);
1796 void mmc_stop_host(struct mmc_host *host)
1798 #ifdef CONFIG_MMC_DEBUG
1799 unsigned long flags;
1800 spin_lock_irqsave(&host->lock, flags);
1802 spin_unlock_irqrestore(&host->lock, flags);
1805 if (host->caps & MMC_CAP_DISABLE)
1806 cancel_delayed_work(&host->disable);
1807 cancel_delayed_work_sync(&host->detect);
1808 mmc_flush_scheduled_work();
1810 /* clear pm flags now and let card drivers set them as needed */
1814 if (host->bus_ops && !host->bus_dead) {
1815 if (host->bus_ops->remove)
1816 host->bus_ops->remove(host);
1818 mmc_claim_host(host);
1819 mmc_detach_bus(host);
1820 mmc_release_host(host);
1828 mmc_power_off(host);
1831 int mmc_power_save_host(struct mmc_host *host)
1835 #ifdef CONFIG_MMC_DEBUG
1836 pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
1841 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
1846 if (host->bus_ops->power_save)
1847 ret = host->bus_ops->power_save(host);
1851 mmc_power_off(host);
1855 EXPORT_SYMBOL(mmc_power_save_host);
1857 int mmc_power_restore_host(struct mmc_host *host)
1861 #ifdef CONFIG_MMC_DEBUG
1862 pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
1867 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
1873 ret = host->bus_ops->power_restore(host);
1879 EXPORT_SYMBOL(mmc_power_restore_host);
1881 int mmc_card_awake(struct mmc_host *host)
1887 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
1888 err = host->bus_ops->awake(host);
1894 EXPORT_SYMBOL(mmc_card_awake);
1896 int mmc_card_sleep(struct mmc_host *host)
1902 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
1903 err = host->bus_ops->sleep(host);
1909 EXPORT_SYMBOL(mmc_card_sleep);
1911 int mmc_card_can_sleep(struct mmc_host *host)
1913 struct mmc_card *card = host->card;
1915 if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3)
1919 EXPORT_SYMBOL(mmc_card_can_sleep);
1924 * mmc_suspend_host - suspend a host
1927 int mmc_suspend_host(struct mmc_host *host)
1931 if (host->caps & MMC_CAP_DISABLE)
1932 cancel_delayed_work(&host->disable);
1933 cancel_delayed_work(&host->detect);
1934 mmc_flush_scheduled_work();
1937 if (host->bus_ops && !host->bus_dead) {
1938 if (host->bus_ops->suspend)
1939 err = host->bus_ops->suspend(host);
1940 if (err == -ENOSYS || !host->bus_ops->resume) {
1942 * We simply "remove" the card in this case.
1943 * It will be redetected on resume.
1945 if (host->bus_ops->remove)
1946 host->bus_ops->remove(host);
1947 mmc_claim_host(host);
1948 mmc_detach_bus(host);
1949 mmc_release_host(host);
1956 if (!err && !mmc_card_keep_power(host))
1957 mmc_power_off(host);
1962 EXPORT_SYMBOL(mmc_suspend_host);
1965 * mmc_resume_host - resume a previously suspended host
1968 int mmc_resume_host(struct mmc_host *host)
1973 if (host->bus_ops && !host->bus_dead) {
1974 if (!mmc_card_keep_power(host)) {
1976 mmc_select_voltage(host, host->ocr);
1978 * Tell runtime PM core we just powered up the card,
1979 * since it still believes the card is powered off.
1980 * Note that currently runtime PM is only enabled
1981 * for SDIO cards that are MMC_CAP_POWER_OFF_CARD
1983 if (mmc_card_sdio(host->card) &&
1984 (host->caps & MMC_CAP_POWER_OFF_CARD)) {
1985 pm_runtime_disable(&host->card->dev);
1986 pm_runtime_set_active(&host->card->dev);
1987 pm_runtime_enable(&host->card->dev);
1990 BUG_ON(!host->bus_ops->resume);
1991 err = host->bus_ops->resume(host);
1993 printk(KERN_WARNING "%s: error %d during resume "
1994 "(card was removed?)\n",
1995 mmc_hostname(host), err);
1999 host->pm_flags &= ~MMC_PM_KEEP_POWER;
2004 EXPORT_SYMBOL(mmc_resume_host);
2006 /* Do the card removal on suspend if card is assumed removeable
2007 * Do that in pm notifier while userspace isn't yet frozen, so we will be able
2010 int mmc_pm_notify(struct notifier_block *notify_block,
2011 unsigned long mode, void *unused)
2013 struct mmc_host *host = container_of(
2014 notify_block, struct mmc_host, pm_notify);
2015 unsigned long flags;
2019 case PM_HIBERNATION_PREPARE:
2020 case PM_SUSPEND_PREPARE:
2022 spin_lock_irqsave(&host->lock, flags);
2023 host->rescan_disable = 1;
2024 spin_unlock_irqrestore(&host->lock, flags);
2025 cancel_delayed_work_sync(&host->detect);
2027 if (!host->bus_ops || host->bus_ops->suspend)
2030 mmc_claim_host(host);
2032 if (host->bus_ops->remove)
2033 host->bus_ops->remove(host);
2035 mmc_detach_bus(host);
2036 mmc_release_host(host);
2040 case PM_POST_SUSPEND:
2041 case PM_POST_HIBERNATION:
2042 case PM_POST_RESTORE:
2044 spin_lock_irqsave(&host->lock, flags);
2045 host->rescan_disable = 0;
2046 spin_unlock_irqrestore(&host->lock, flags);
2047 mmc_detect_change(host, 0);
2055 static int __init mmc_init(void)
2059 workqueue = alloc_ordered_workqueue("kmmcd", 0);
2063 ret = mmc_register_bus();
2065 goto destroy_workqueue;
2067 ret = mmc_register_host_class();
2069 goto unregister_bus;
2071 ret = sdio_register_bus();
2073 goto unregister_host_class;
2077 unregister_host_class:
2078 mmc_unregister_host_class();
2080 mmc_unregister_bus();
2082 destroy_workqueue(workqueue);
2087 static void __exit mmc_exit(void)
2089 sdio_unregister_bus();
2090 mmc_unregister_host_class();
2091 mmc_unregister_bus();
2092 destroy_workqueue(workqueue);
2095 subsys_initcall(mmc_init);
2096 module_exit(mmc_exit);
2098 MODULE_LICENSE("GPL");