2 * linux/drivers/mmc/core/mmc.c
4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5 * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
6 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/err.h>
14 #include <linux/slab.h>
16 #include <linux/mmc/host.h>
17 #include <linux/mmc/card.h>
18 #include <linux/mmc/mmc.h>
25 static const unsigned int tran_exp[] = {
26 10000, 100000, 1000000, 10000000,
30 static const unsigned char tran_mant[] = {
31 0, 10, 12, 13, 15, 20, 25, 30,
32 35, 40, 45, 50, 55, 60, 70, 80,
35 static const unsigned int tacc_exp[] = {
36 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000,
39 static const unsigned int tacc_mant[] = {
40 0, 10, 12, 13, 15, 20, 25, 30,
41 35, 40, 45, 50, 55, 60, 70, 80,
44 #define UNSTUFF_BITS(resp,start,size) \
46 const int __size = size; \
47 const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \
48 const int __off = 3 - ((start) / 32); \
49 const int __shft = (start) & 31; \
52 __res = resp[__off] >> __shft; \
53 if (__size + __shft > 32) \
54 __res |= resp[__off-1] << ((32 - __shft) % 32); \
59 * Given the decoded CSD structure, decode the raw CID to our CID structure.
61 static int mmc_decode_cid(struct mmc_card *card)
63 u32 *resp = card->raw_cid;
66 * The selection of the format here is based upon published
67 * specs from sandisk and from what people have reported.
69 switch (card->csd.mmca_vsn) {
70 case 0: /* MMC v1.0 - v1.2 */
71 case 1: /* MMC v1.4 */
72 card->cid.manfid = UNSTUFF_BITS(resp, 104, 24);
73 card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
74 card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
75 card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
76 card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
77 card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
78 card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8);
79 card->cid.prod_name[6] = UNSTUFF_BITS(resp, 48, 8);
80 card->cid.hwrev = UNSTUFF_BITS(resp, 44, 4);
81 card->cid.fwrev = UNSTUFF_BITS(resp, 40, 4);
82 card->cid.serial = UNSTUFF_BITS(resp, 16, 24);
83 card->cid.month = UNSTUFF_BITS(resp, 12, 4);
84 card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997;
87 case 2: /* MMC v2.0 - v2.2 */
88 case 3: /* MMC v3.1 - v3.3 */
90 card->cid.manfid = UNSTUFF_BITS(resp, 120, 8);
91 card->cid.oemid = UNSTUFF_BITS(resp, 104, 16);
92 card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
93 card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
94 card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
95 card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
96 card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
97 card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8);
98 card->cid.serial = UNSTUFF_BITS(resp, 16, 32);
99 card->cid.month = UNSTUFF_BITS(resp, 12, 4);
100 card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997;
104 pr_err("%s: card has unknown MMCA version %d\n",
105 mmc_hostname(card->host), card->csd.mmca_vsn);
112 static void mmc_set_erase_size(struct mmc_card *card)
114 if (card->ext_csd.erase_group_def & 1)
115 card->erase_size = card->ext_csd.hc_erase_size;
117 card->erase_size = card->csd.erase_size;
119 mmc_init_erase(card);
123 * Given a 128-bit response, decode to our card CSD structure.
125 static int mmc_decode_csd(struct mmc_card *card)
127 struct mmc_csd *csd = &card->csd;
128 unsigned int e, m, a, b;
129 u32 *resp = card->raw_csd;
132 * We only understand CSD structure v1.1 and v1.2.
133 * v1.2 has extra information in bits 15, 11 and 10.
134 * We also support eMMC v4.4 & v4.41.
136 csd->structure = UNSTUFF_BITS(resp, 126, 2);
137 if (csd->structure == 0) {
138 pr_err("%s: unrecognised CSD structure version %d\n",
139 mmc_hostname(card->host), csd->structure);
143 csd->mmca_vsn = UNSTUFF_BITS(resp, 122, 4);
144 m = UNSTUFF_BITS(resp, 115, 4);
145 e = UNSTUFF_BITS(resp, 112, 3);
146 csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10;
147 csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100;
149 m = UNSTUFF_BITS(resp, 99, 4);
150 e = UNSTUFF_BITS(resp, 96, 3);
151 csd->max_dtr = tran_exp[e] * tran_mant[m];
152 csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
154 e = UNSTUFF_BITS(resp, 47, 3);
155 m = UNSTUFF_BITS(resp, 62, 12);
156 csd->capacity = (1 + m) << (e + 2);
158 csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
159 csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
160 csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
161 csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
162 csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
163 csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
164 csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
166 if (csd->write_blkbits >= 9) {
167 a = UNSTUFF_BITS(resp, 42, 5);
168 b = UNSTUFF_BITS(resp, 37, 5);
169 csd->erase_size = (a + 1) * (b + 1);
170 csd->erase_size <<= csd->write_blkbits - 9;
179 static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
185 BUG_ON(!new_ext_csd);
189 if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
193 * As the ext_csd is so large and mostly unused, we don't store the
194 * raw block in mmc_card.
196 ext_csd = kmalloc(512, GFP_KERNEL);
198 pr_err("%s: could not allocate a buffer to "
199 "receive the ext_csd.\n", mmc_hostname(card->host));
203 err = mmc_send_ext_csd(card, ext_csd);
208 /* If the host or the card can't do the switch,
209 * fail more gracefully. */
216 * High capacity cards should have this "magic" size
217 * stored in their CSD.
219 if (card->csd.capacity == (4096 * 512)) {
220 pr_err("%s: unable to read EXT_CSD "
221 "on a possible high capacity card. "
222 "Card will be ignored.\n",
223 mmc_hostname(card->host));
225 pr_warning("%s: unable to read "
226 "EXT_CSD, performance might "
228 mmc_hostname(card->host));
232 *new_ext_csd = ext_csd;
238 * Decode extended CSD.
240 static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
243 unsigned int part_size;
244 u8 hc_erase_grp_sz = 0, hc_wp_grp_sz = 0;
251 /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
252 card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE];
253 if (card->csd.structure == 3) {
254 if (card->ext_csd.raw_ext_csd_structure > 2) {
255 pr_err("%s: unrecognised EXT_CSD structure "
256 "version %d\n", mmc_hostname(card->host),
257 card->ext_csd.raw_ext_csd_structure);
263 card->ext_csd.rev = ext_csd[EXT_CSD_REV];
264 if (card->ext_csd.rev > 6) {
265 pr_err("%s: unrecognised EXT_CSD revision %d\n",
266 mmc_hostname(card->host), card->ext_csd.rev);
271 card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0];
272 card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1];
273 card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2];
274 card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3];
275 if (card->ext_csd.rev >= 2) {
276 card->ext_csd.sectors =
277 ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
278 ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
279 ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
280 ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
282 /* Cards with density > 2GiB are sector addressed */
283 if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
284 mmc_card_set_blockaddr(card);
286 card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
287 switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) {
288 case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 |
289 EXT_CSD_CARD_TYPE_26:
290 card->ext_csd.hs_max_dtr = 52000000;
291 card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_52;
293 case EXT_CSD_CARD_TYPE_DDR_1_2V | EXT_CSD_CARD_TYPE_52 |
294 EXT_CSD_CARD_TYPE_26:
295 card->ext_csd.hs_max_dtr = 52000000;
296 card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_2V;
298 case EXT_CSD_CARD_TYPE_DDR_1_8V | EXT_CSD_CARD_TYPE_52 |
299 EXT_CSD_CARD_TYPE_26:
300 card->ext_csd.hs_max_dtr = 52000000;
301 card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_8V;
303 case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26:
304 card->ext_csd.hs_max_dtr = 52000000;
306 case EXT_CSD_CARD_TYPE_26:
307 card->ext_csd.hs_max_dtr = 26000000;
310 /* MMC v4 spec says this cannot happen */
311 pr_warning("%s: card is mmc v4 but doesn't "
312 "support any high-speed modes.\n",
313 mmc_hostname(card->host));
316 card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
317 card->ext_csd.raw_erase_timeout_mult =
318 ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
319 card->ext_csd.raw_hc_erase_grp_size =
320 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
321 if (card->ext_csd.rev >= 3) {
322 u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
323 card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];
325 /* EXT_CSD value is in units of 10ms, but we store in ms */
326 card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
328 /* Sleep / awake timeout in 100ns units */
329 if (sa_shift > 0 && sa_shift <= 0x17)
330 card->ext_csd.sa_timeout =
331 1 << ext_csd[EXT_CSD_S_A_TIMEOUT];
332 card->ext_csd.erase_group_def =
333 ext_csd[EXT_CSD_ERASE_GROUP_DEF];
334 card->ext_csd.hc_erase_timeout = 300 *
335 ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
336 card->ext_csd.hc_erase_size =
337 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10;
339 card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C];
342 * There are two boot regions of equal size, defined in
345 if (ext_csd[EXT_CSD_BOOT_MULT] && mmc_boot_partition_access(card->host)) {
346 for (idx = 0; idx < MMC_NUM_BOOT_PARTITION; idx++) {
347 part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
348 mmc_part_add(card, part_size,
349 EXT_CSD_PART_CONFIG_ACC_BOOT0 + idx,
350 "boot%d", idx, true);
355 card->ext_csd.raw_hc_erase_gap_size =
356 ext_csd[EXT_CSD_PARTITION_ATTRIBUTE];
357 card->ext_csd.raw_sec_trim_mult =
358 ext_csd[EXT_CSD_SEC_TRIM_MULT];
359 card->ext_csd.raw_sec_erase_mult =
360 ext_csd[EXT_CSD_SEC_ERASE_MULT];
361 card->ext_csd.raw_sec_feature_support =
362 ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
363 card->ext_csd.raw_trim_mult =
364 ext_csd[EXT_CSD_TRIM_MULT];
365 if (card->ext_csd.rev >= 4) {
367 * Enhanced area feature support -- check whether the eMMC
368 * card has the Enhanced area enabled. If so, export enhanced
369 * area offset and size to user by adding sysfs interface.
371 card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];
372 if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
373 (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
375 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
377 ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
379 card->ext_csd.enhanced_area_en = 1;
381 * calculate the enhanced data area offset, in bytes
383 card->ext_csd.enhanced_area_offset =
384 (ext_csd[139] << 24) + (ext_csd[138] << 16) +
385 (ext_csd[137] << 8) + ext_csd[136];
386 if (mmc_card_blockaddr(card))
387 card->ext_csd.enhanced_area_offset <<= 9;
389 * calculate the enhanced data area size, in kilobytes
391 card->ext_csd.enhanced_area_size =
392 (ext_csd[142] << 16) + (ext_csd[141] << 8) +
394 card->ext_csd.enhanced_area_size *=
395 (size_t)(hc_erase_grp_sz * hc_wp_grp_sz);
396 card->ext_csd.enhanced_area_size <<= 9;
399 * If the enhanced area is not enabled, disable these
402 card->ext_csd.enhanced_area_offset = -EINVAL;
403 card->ext_csd.enhanced_area_size = -EINVAL;
407 * General purpose partition feature support --
408 * If ext_csd has the size of general purpose partitions,
409 * set size, part_cfg, partition name in mmc_part.
411 if (ext_csd[EXT_CSD_PARTITION_SUPPORT] &
412 EXT_CSD_PART_SUPPORT_PART_EN) {
413 if (card->ext_csd.enhanced_area_en != 1) {
415 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
417 ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
419 card->ext_csd.enhanced_area_en = 1;
422 for (idx = 0; idx < MMC_NUM_GP_PARTITION; idx++) {
423 if (!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3] &&
424 !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] &&
425 !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2])
428 (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2]
430 (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1]
432 ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3];
433 part_size *= (size_t)(hc_erase_grp_sz *
435 mmc_part_add(card, part_size << 19,
436 EXT_CSD_PART_CONFIG_ACC_GP0 + idx,
440 card->ext_csd.sec_trim_mult =
441 ext_csd[EXT_CSD_SEC_TRIM_MULT];
442 card->ext_csd.sec_erase_mult =
443 ext_csd[EXT_CSD_SEC_ERASE_MULT];
444 card->ext_csd.sec_feature_support =
445 ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
446 card->ext_csd.trim_timeout = 300 *
447 ext_csd[EXT_CSD_TRIM_MULT];
450 if (card->ext_csd.rev >= 5) {
451 /* check whether the eMMC card supports HPI */
452 if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) {
453 card->ext_csd.hpi = 1;
454 if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2)
455 card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION;
457 card->ext_csd.hpi_cmd = MMC_SEND_STATUS;
459 * Indicate the maximum timeout to close
460 * a command interrupted by HPI
462 card->ext_csd.out_of_int_time =
463 ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10;
466 card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM];
467 card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION];
470 card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT];
471 if (ext_csd[EXT_CSD_ERASED_MEM_CONT])
472 card->erased_byte = 0xFF;
474 card->erased_byte = 0x0;
476 /* eMMC v4.5 or later */
477 if (card->ext_csd.rev >= 6) {
478 card->ext_csd.feature_support |= MMC_DISCARD_FEATURE;
480 card->ext_csd.generic_cmd6_time = 10 *
481 ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
482 card->ext_csd.power_off_longtime = 10 *
483 ext_csd[EXT_CSD_POWER_OFF_LONG_TIME];
485 card->ext_csd.cache_size =
486 ext_csd[EXT_CSD_CACHE_SIZE + 0] << 0 |
487 ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 |
488 ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 |
489 ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24;
496 static inline void mmc_free_ext_csd(u8 *ext_csd)
502 static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
507 if (bus_width == MMC_BUS_WIDTH_1)
510 err = mmc_get_ext_csd(card, &bw_ext_csd);
512 if (err || bw_ext_csd == NULL) {
513 if (bus_width != MMC_BUS_WIDTH_1)
518 if (bus_width == MMC_BUS_WIDTH_1)
521 /* only compare read only fields */
522 err = (!(card->ext_csd.raw_partition_support ==
523 bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
524 (card->ext_csd.raw_erased_mem_count ==
525 bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) &&
526 (card->ext_csd.rev ==
527 bw_ext_csd[EXT_CSD_REV]) &&
528 (card->ext_csd.raw_ext_csd_structure ==
529 bw_ext_csd[EXT_CSD_STRUCTURE]) &&
530 (card->ext_csd.raw_card_type ==
531 bw_ext_csd[EXT_CSD_CARD_TYPE]) &&
532 (card->ext_csd.raw_s_a_timeout ==
533 bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) &&
534 (card->ext_csd.raw_hc_erase_gap_size ==
535 bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
536 (card->ext_csd.raw_erase_timeout_mult ==
537 bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) &&
538 (card->ext_csd.raw_hc_erase_grp_size ==
539 bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
540 (card->ext_csd.raw_sec_trim_mult ==
541 bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) &&
542 (card->ext_csd.raw_sec_erase_mult ==
543 bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) &&
544 (card->ext_csd.raw_sec_feature_support ==
545 bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) &&
546 (card->ext_csd.raw_trim_mult ==
547 bw_ext_csd[EXT_CSD_TRIM_MULT]) &&
548 (card->ext_csd.raw_sectors[0] ==
549 bw_ext_csd[EXT_CSD_SEC_CNT + 0]) &&
550 (card->ext_csd.raw_sectors[1] ==
551 bw_ext_csd[EXT_CSD_SEC_CNT + 1]) &&
552 (card->ext_csd.raw_sectors[2] ==
553 bw_ext_csd[EXT_CSD_SEC_CNT + 2]) &&
554 (card->ext_csd.raw_sectors[3] ==
555 bw_ext_csd[EXT_CSD_SEC_CNT + 3]));
560 mmc_free_ext_csd(bw_ext_csd);
564 MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
565 card->raw_cid[2], card->raw_cid[3]);
566 MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
567 card->raw_csd[2], card->raw_csd[3]);
568 MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year);
569 MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9);
570 MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9);
571 MMC_DEV_ATTR(fwrev, "0x%x\n", card->cid.fwrev);
572 MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev);
573 MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
574 MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
575 MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
576 MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
577 MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
578 card->ext_csd.enhanced_area_offset);
579 MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
581 static struct attribute *mmc_std_attrs[] = {
585 &dev_attr_erase_size.attr,
586 &dev_attr_preferred_erase_size.attr,
587 &dev_attr_fwrev.attr,
588 &dev_attr_hwrev.attr,
589 &dev_attr_manfid.attr,
591 &dev_attr_oemid.attr,
592 &dev_attr_serial.attr,
593 &dev_attr_enhanced_area_offset.attr,
594 &dev_attr_enhanced_area_size.attr,
598 static struct attribute_group mmc_std_attr_group = {
599 .attrs = mmc_std_attrs,
602 static const struct attribute_group *mmc_attr_groups[] = {
607 static struct device_type mmc_type = {
608 .groups = mmc_attr_groups,
612 * Select the PowerClass for the current bus width
613 * If power class is defined for 4/8 bit bus in the
614 * extended CSD register, select it by executing the
615 * mmc_switch command.
617 static int mmc_select_powerclass(struct mmc_card *card,
618 unsigned int bus_width, u8 *ext_csd)
621 unsigned int pwrclass_val;
622 unsigned int index = 0;
623 struct mmc_host *host;
633 /* Power class selection is supported for versions >= 4.0 */
634 if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
637 /* Power class values are defined only for 4/8 bit bus */
638 if (bus_width == EXT_CSD_BUS_WIDTH_1)
641 switch (1 << host->ios.vdd) {
642 case MMC_VDD_165_195:
643 if (host->ios.clock <= 26000000)
644 index = EXT_CSD_PWR_CL_26_195;
645 else if (host->ios.clock <= 52000000)
646 index = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
647 EXT_CSD_PWR_CL_52_195 :
648 EXT_CSD_PWR_CL_DDR_52_195;
649 else if (host->ios.clock <= 200000000)
650 index = EXT_CSD_PWR_CL_200_195;
656 if (host->ios.clock <= 26000000)
657 index = EXT_CSD_PWR_CL_26_360;
658 else if (host->ios.clock <= 52000000)
659 index = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
660 EXT_CSD_PWR_CL_52_360 :
661 EXT_CSD_PWR_CL_DDR_52_360;
662 else if (host->ios.clock <= 200000000)
663 index = EXT_CSD_PWR_CL_200_360;
666 pr_warning("%s: Voltage range not supported "
667 "for power class.\n", mmc_hostname(host));
671 pwrclass_val = ext_csd[index];
673 if (bus_width & (EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_BUS_WIDTH_8))
674 pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_8BIT_MASK) >>
675 EXT_CSD_PWR_CL_8BIT_SHIFT;
677 pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_4BIT_MASK) >>
678 EXT_CSD_PWR_CL_4BIT_SHIFT;
680 /* If the power class is different from the default value */
681 if (pwrclass_val > 0) {
682 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
685 card->ext_csd.generic_cmd6_time);
692 * Handle the detection and initialisation of a card.
694 * In the case of a resume, "oldcard" will contain the card
695 * we're trying to reinitialise.
697 static int mmc_init_card(struct mmc_host *host, u32 ocr,
698 struct mmc_card *oldcard)
700 struct mmc_card *card;
703 unsigned int max_dtr;
708 WARN_ON(!host->claimed);
710 /* Set correct bus mode for MMC before attempting init */
711 if (!mmc_host_is_spi(host))
712 mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
715 * Since we're changing the OCR value, we seem to
716 * need to tell some cards to go back to the idle
717 * state. We wait 1ms to give cards time to
719 * mmc_go_idle is needed for eMMC that are asleep
723 /* The extra bit indicates that we support high capacity */
724 err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr);
729 * For SPI, enable CRC as appropriate.
731 if (mmc_host_is_spi(host)) {
732 err = mmc_spi_set_crc(host, use_spi_crc);
738 * Fetch CID from card.
740 if (mmc_host_is_spi(host))
741 err = mmc_send_cid(host, cid);
743 err = mmc_all_send_cid(host, cid);
748 if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
756 * Allocate card structure.
758 card = mmc_alloc_card(host, &mmc_type);
764 card->type = MMC_TYPE_MMC;
766 memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
770 * For native busses: set card RCA and quit open drain mode.
772 if (!mmc_host_is_spi(host)) {
773 err = mmc_set_relative_addr(card);
777 mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
782 * Fetch CSD from card.
784 err = mmc_send_csd(card, card->raw_csd);
788 err = mmc_decode_csd(card);
791 err = mmc_decode_cid(card);
797 * Select card, as all following commands rely on that.
799 if (!mmc_host_is_spi(host)) {
800 err = mmc_select_card(card);
807 * Fetch and process extended CSD.
810 err = mmc_get_ext_csd(card, &ext_csd);
813 err = mmc_read_ext_csd(card, ext_csd);
817 /* If doing byte addressing, check if required to do sector
818 * addressing. Handle the case of <2GB cards needing sector
819 * addressing. See section 8.1 JEDEC Standard JED84-A441;
820 * ocr register has bit 30 set for sector addressing.
822 if (!(mmc_card_blockaddr(card)) && (rocr & (1<<30)))
823 mmc_card_set_blockaddr(card);
825 /* Erase size depends on CSD and Extended CSD */
826 mmc_set_erase_size(card);
830 * If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF
831 * bit. This bit will be lost every time after a reset or power off.
833 if (card->ext_csd.enhanced_area_en) {
834 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
835 EXT_CSD_ERASE_GROUP_DEF, 1,
836 card->ext_csd.generic_cmd6_time);
838 if (err && err != -EBADMSG)
844 * Just disable enhanced area off & sz
845 * will try to enable ERASE_GROUP_DEF
846 * during next time reinit
848 card->ext_csd.enhanced_area_offset = -EINVAL;
849 card->ext_csd.enhanced_area_size = -EINVAL;
851 card->ext_csd.erase_group_def = 1;
853 * enable ERASE_GRP_DEF successfully.
854 * This will affect the erase size, so
855 * here need to reset erase size
857 mmc_set_erase_size(card);
862 * Ensure eMMC user default partition is enabled
864 if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) {
865 card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
866 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG,
867 card->ext_csd.part_config,
868 card->ext_csd.part_time);
869 if (err && err != -EBADMSG)
874 * If the host supports the power_off_notify capability then
875 * set the notification byte in the ext_csd register of device
877 if ((host->caps2 & MMC_CAP2_POWEROFF_NOTIFY) &&
878 (card->poweroff_notify_state == MMC_NO_POWER_NOTIFICATION)) {
879 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
880 EXT_CSD_POWER_OFF_NOTIFICATION,
882 card->ext_csd.generic_cmd6_time);
883 if (err && err != -EBADMSG)
888 card->poweroff_notify_state = MMC_POWERED_ON;
891 * Activate high speed (if supported)
893 if ((card->ext_csd.hs_max_dtr != 0) &&
894 (host->caps & MMC_CAP_MMC_HIGHSPEED)) {
895 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
896 EXT_CSD_HS_TIMING, 1,
897 card->ext_csd.generic_cmd6_time);
898 if (err && err != -EBADMSG)
902 pr_warning("%s: switch to highspeed failed\n",
903 mmc_hostname(card->host));
906 mmc_card_set_highspeed(card);
907 mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
912 * Enable HPI feature (if supported)
914 if (card->ext_csd.hpi) {
915 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
916 EXT_CSD_HPI_MGMT, 1, 0);
917 if (err && err != -EBADMSG)
920 pr_warning("%s: Enabling HPI failed\n",
921 mmc_hostname(card->host));
924 card->ext_csd.hpi_en = 1;
930 max_dtr = (unsigned int)-1;
932 if (mmc_card_highspeed(card)) {
933 if (max_dtr > card->ext_csd.hs_max_dtr)
934 max_dtr = card->ext_csd.hs_max_dtr;
935 } else if (max_dtr > card->csd.max_dtr) {
936 max_dtr = card->csd.max_dtr;
939 mmc_set_clock(host, max_dtr);
942 * Indicate DDR mode (if supported).
944 if (mmc_card_highspeed(card)) {
945 if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_8V)
946 && ((host->caps & (MMC_CAP_1_8V_DDR |
948 == (MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50)))
949 ddr = MMC_1_8V_DDR_MODE;
950 else if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_2V)
951 && ((host->caps & (MMC_CAP_1_2V_DDR |
953 == (MMC_CAP_1_2V_DDR | MMC_CAP_UHS_DDR50)))
954 ddr = MMC_1_2V_DDR_MODE;
958 * Activate wide bus and DDR (if supported).
960 if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) &&
961 (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) {
962 static unsigned ext_csd_bits[][2] = {
963 { EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 },
964 { EXT_CSD_BUS_WIDTH_4, EXT_CSD_DDR_BUS_WIDTH_4 },
965 { EXT_CSD_BUS_WIDTH_1, EXT_CSD_BUS_WIDTH_1 },
967 static unsigned bus_widths[] = {
972 unsigned idx, bus_width = 0;
974 if (host->caps & MMC_CAP_8_BIT_DATA)
978 for (; idx < ARRAY_SIZE(bus_widths); idx++) {
979 bus_width = bus_widths[idx];
980 if (bus_width == MMC_BUS_WIDTH_1)
981 ddr = 0; /* no DDR for 1-bit width */
982 err = mmc_select_powerclass(card, ext_csd_bits[idx][0],
985 pr_err("%s: power class selection to "
986 "bus width %d failed\n",
987 mmc_hostname(card->host),
990 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
992 ext_csd_bits[idx][0],
993 card->ext_csd.generic_cmd6_time);
995 mmc_set_bus_width(card->host, bus_width);
998 * If controller can't handle bus width test,
999 * compare ext_csd previously read in 1 bit mode
1000 * against ext_csd at new bus width
1002 if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
1003 err = mmc_compare_ext_csds(card,
1006 err = mmc_bus_test(card, bus_width);
1013 err = mmc_select_powerclass(card, ext_csd_bits[idx][1],
1016 pr_err("%s: power class selection to "
1017 "bus width %d ddr %d failed\n",
1018 mmc_hostname(card->host),
1019 1 << bus_width, ddr);
1021 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1023 ext_csd_bits[idx][1],
1024 card->ext_csd.generic_cmd6_time);
1027 pr_warning("%s: switch to bus width %d ddr %d "
1028 "failed\n", mmc_hostname(card->host),
1029 1 << bus_width, ddr);
1033 * eMMC cards can support 3.3V to 1.2V i/o (vccq)
1036 * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq.
1038 * 1.8V vccq at 3.3V core voltage (vcc) is not required
1039 * in the JEDEC spec for DDR.
1041 * Do not force change in vccq since we are obviously
1042 * working and no change to vccq is needed.
1044 * WARNING: eMMC rules are NOT the same as SD DDR
1046 if (ddr == EXT_CSD_CARD_TYPE_DDR_1_2V) {
1047 err = mmc_set_signal_voltage(host,
1048 MMC_SIGNAL_VOLTAGE_120, 0);
1052 mmc_card_set_ddr_mode(card);
1053 mmc_set_timing(card->host, MMC_TIMING_UHS_DDR50);
1054 mmc_set_bus_width(card->host, bus_width);
1059 * If cache size is higher than 0, this indicates
1060 * the existence of cache and it can be turned on.
1062 if ((host->caps2 & MMC_CAP2_CACHE_CTRL) &&
1063 card->ext_csd.cache_size > 0) {
1064 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1065 EXT_CSD_CACHE_CTRL, 1, 0);
1066 if (err && err != -EBADMSG)
1070 * Only if no error, cache is turned on successfully.
1072 card->ext_csd.cache_ctrl = err ? 0 : 1;
1078 mmc_free_ext_csd(ext_csd);
1083 mmc_remove_card(card);
1085 mmc_free_ext_csd(ext_csd);
1091 * Host is being removed. Free up the current card.
1093 static void mmc_remove(struct mmc_host *host)
1096 BUG_ON(!host->card);
1098 mmc_remove_card(host->card);
1103 * Card detection callback from host.
1105 static void mmc_detect(struct mmc_host *host)
1110 BUG_ON(!host->card);
1112 mmc_claim_host(host);
1115 * Just check if our card has been removed.
1117 err = mmc_send_status(host->card, NULL);
1119 mmc_release_host(host);
1124 mmc_claim_host(host);
1125 mmc_detach_bus(host);
1126 mmc_power_off(host);
1127 mmc_release_host(host);
1132 * Suspend callback from host.
1134 static int mmc_suspend(struct mmc_host *host)
1139 BUG_ON(!host->card);
1141 mmc_claim_host(host);
1142 if (mmc_card_can_sleep(host))
1143 err = mmc_card_sleep(host);
1144 else if (!mmc_host_is_spi(host))
1145 mmc_deselect_cards(host);
1146 host->card->state &= ~MMC_STATE_HIGHSPEED;
1147 mmc_release_host(host);
1153 * Resume callback from host.
1155 * This function tries to determine if the same card is still present
1156 * and, if so, restore all state to it.
1158 static int mmc_resume(struct mmc_host *host)
1163 BUG_ON(!host->card);
1165 mmc_claim_host(host);
1166 err = mmc_init_card(host, host->ocr, host->card);
1167 mmc_release_host(host);
1172 static int mmc_power_restore(struct mmc_host *host)
1176 host->card->state &= ~MMC_STATE_HIGHSPEED;
1177 mmc_claim_host(host);
1178 ret = mmc_init_card(host, host->ocr, host->card);
1179 mmc_release_host(host);
1184 static int mmc_sleep(struct mmc_host *host)
1186 struct mmc_card *card = host->card;
1189 if (card && card->ext_csd.rev >= 3) {
1190 err = mmc_card_sleepawake(host, 1);
1192 pr_debug("%s: Error %d while putting card into sleep",
1193 mmc_hostname(host), err);
1199 static int mmc_awake(struct mmc_host *host)
1201 struct mmc_card *card = host->card;
1204 if (card && card->ext_csd.rev >= 3) {
1205 err = mmc_card_sleepawake(host, 0);
1207 pr_debug("%s: Error %d while awaking sleeping card",
1208 mmc_hostname(host), err);
1214 static const struct mmc_bus_ops mmc_ops = {
1217 .remove = mmc_remove,
1218 .detect = mmc_detect,
1221 .power_restore = mmc_power_restore,
1224 static const struct mmc_bus_ops mmc_ops_unsafe = {
1227 .remove = mmc_remove,
1228 .detect = mmc_detect,
1229 .suspend = mmc_suspend,
1230 .resume = mmc_resume,
1231 .power_restore = mmc_power_restore,
1234 static void mmc_attach_bus_ops(struct mmc_host *host)
1236 const struct mmc_bus_ops *bus_ops;
1238 if (!mmc_card_is_removable(host))
1239 bus_ops = &mmc_ops_unsafe;
1242 mmc_attach_bus(host, bus_ops);
1246 * Starting point for MMC card init.
1248 int mmc_attach_mmc(struct mmc_host *host)
1254 WARN_ON(!host->claimed);
1256 /* Set correct bus mode for MMC before attempting attach */
1257 if (!mmc_host_is_spi(host))
1258 mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
1260 err = mmc_send_op_cond(host, 0, &ocr);
1264 mmc_attach_bus_ops(host);
1265 if (host->ocr_avail_mmc)
1266 host->ocr_avail = host->ocr_avail_mmc;
1269 * We need to get OCR a different way for SPI.
1271 if (mmc_host_is_spi(host)) {
1272 err = mmc_spi_read_ocr(host, 1, &ocr);
1278 * Sanity check the voltages that the card claims to
1282 pr_warning("%s: card claims to support voltages "
1283 "below the defined range. These will be ignored.\n",
1284 mmc_hostname(host));
1288 host->ocr = mmc_select_voltage(host, ocr);
1291 * Can we support the voltage of the card?
1299 * Detect and init the card.
1301 err = mmc_init_card(host, host->ocr, NULL);
1305 mmc_release_host(host);
1306 err = mmc_add_card(host->card);
1307 mmc_claim_host(host);
1314 mmc_release_host(host);
1315 mmc_remove_card(host->card);
1316 mmc_claim_host(host);
1319 mmc_detach_bus(host);
1321 pr_err("%s: error %d whilst initialising MMC card\n",
1322 mmc_hostname(host), err);