2 * Block driver for media (i.e., flash cards)
4 * Copyright 2002 Hewlett-Packard Company
5 * Copyright 2005-2008 Pierre Ossman
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
17 * Author: Andrew Christian
20 #include <linux/moduleparam.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
24 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/hdreg.h>
29 #include <linux/kdev_t.h>
30 #include <linux/blkdev.h>
31 #include <linux/mutex.h>
32 #include <linux/scatterlist.h>
33 #include <linux/string_helpers.h>
35 #include <linux/mmc/card.h>
36 #include <linux/mmc/host.h>
37 #include <linux/mmc/mmc.h>
38 #include <linux/mmc/sd.h>
40 #include <asm/system.h>
41 #include <asm/uaccess.h>
45 MODULE_ALIAS("mmc:block");
46 #ifdef MODULE_PARAM_PREFIX
47 #undef MODULE_PARAM_PREFIX
49 #define MODULE_PARAM_PREFIX "mmcblk."
51 #define REL_WRITES_SUPPORTED(card) (mmc_card_mmc((card)) && \
52 (((card)->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) || \
53 ((card)->ext_csd.rel_sectors)))
55 static DEFINE_MUTEX(block_mutex);
58 * The defaults come from config options but can be overriden by module
61 static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
64 * We've only got one major, so number of mmcblk devices is
65 * limited to 256 / number of minors per device.
67 static int max_devices;
69 /* 256 minors, so at most 256 separate devices */
70 static DECLARE_BITMAP(dev_use, 256);
73 * There is one mmc_blk_data per slot.
78 struct mmc_queue queue;
79 struct list_head part;
82 unsigned int read_only;
83 unsigned int part_type;
86 * Only set in main mmc_blk_data associated
87 * with mmc_card with mmc_set_drvdata, and keeps
88 * track of the current selected device partition.
90 unsigned int part_curr;
91 struct device_attribute force_ro;
94 static DEFINE_MUTEX(open_lock);
96 module_param(perdev_minors, int, 0444);
97 MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
99 static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
101 struct mmc_blk_data *md;
103 mutex_lock(&open_lock);
104 md = disk->private_data;
105 if (md && md->usage == 0)
109 mutex_unlock(&open_lock);
114 static inline int mmc_get_devidx(struct gendisk *disk)
116 int devmaj = MAJOR(disk_devt(disk));
117 int devidx = MINOR(disk_devt(disk)) / perdev_minors;
120 devidx = disk->first_minor / perdev_minors;
124 static void mmc_blk_put(struct mmc_blk_data *md)
126 mutex_lock(&open_lock);
128 if (md->usage == 0) {
129 int devidx = mmc_get_devidx(md->disk);
130 blk_cleanup_queue(md->queue.queue);
132 __clear_bit(devidx, dev_use);
137 mutex_unlock(&open_lock);
140 static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
144 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
146 ret = snprintf(buf, PAGE_SIZE, "%d",
147 get_disk_ro(dev_to_disk(dev)) ^
153 static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
154 const char *buf, size_t count)
158 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
159 unsigned long set = simple_strtoul(buf, &end, 0);
165 set_disk_ro(dev_to_disk(dev), set || md->read_only);
172 static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
174 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
177 mutex_lock(&block_mutex);
180 check_disk_change(bdev);
183 if ((mode & FMODE_WRITE) && md->read_only) {
188 mutex_unlock(&block_mutex);
193 static int mmc_blk_release(struct gendisk *disk, fmode_t mode)
195 struct mmc_blk_data *md = disk->private_data;
197 mutex_lock(&block_mutex);
199 mutex_unlock(&block_mutex);
204 mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
206 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
212 static const struct block_device_operations mmc_bdops = {
213 .open = mmc_blk_open,
214 .release = mmc_blk_release,
215 .getgeo = mmc_blk_getgeo,
216 .owner = THIS_MODULE,
219 struct mmc_blk_request {
220 struct mmc_request mrq;
221 struct mmc_command cmd;
222 struct mmc_command stop;
223 struct mmc_data data;
226 static inline int mmc_blk_part_switch(struct mmc_card *card,
227 struct mmc_blk_data *md)
230 struct mmc_blk_data *main_md = mmc_get_drvdata(card);
231 if (main_md->part_curr == md->part_type)
234 if (mmc_card_mmc(card)) {
235 card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
236 card->ext_csd.part_config |= md->part_type;
238 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
239 EXT_CSD_PART_CONFIG, card->ext_csd.part_config,
240 card->ext_csd.part_time);
245 main_md->part_curr = md->part_type;
249 static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
255 struct mmc_request mrq;
256 struct mmc_command cmd;
257 struct mmc_data data;
258 unsigned int timeout_us;
260 struct scatterlist sg;
262 memset(&cmd, 0, sizeof(struct mmc_command));
264 cmd.opcode = MMC_APP_CMD;
265 cmd.arg = card->rca << 16;
266 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
268 err = mmc_wait_for_cmd(card->host, &cmd, 0);
271 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
274 memset(&cmd, 0, sizeof(struct mmc_command));
276 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
278 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
280 memset(&data, 0, sizeof(struct mmc_data));
282 data.timeout_ns = card->csd.tacc_ns * 100;
283 data.timeout_clks = card->csd.tacc_clks * 100;
285 timeout_us = data.timeout_ns / 1000;
286 timeout_us += data.timeout_clks * 1000 /
287 (card->host->ios.clock / 1000);
289 if (timeout_us > 100000) {
290 data.timeout_ns = 100000000;
291 data.timeout_clks = 0;
296 data.flags = MMC_DATA_READ;
300 memset(&mrq, 0, sizeof(struct mmc_request));
305 blocks = kmalloc(4, GFP_KERNEL);
309 sg_init_one(&sg, blocks, 4);
311 mmc_wait_for_req(card->host, &mrq);
313 result = ntohl(*blocks);
316 if (cmd.error || data.error)
322 static u32 get_card_status(struct mmc_card *card, struct request *req)
324 struct mmc_command cmd;
327 memset(&cmd, 0, sizeof(struct mmc_command));
328 cmd.opcode = MMC_SEND_STATUS;
329 if (!mmc_host_is_spi(card->host))
330 cmd.arg = card->rca << 16;
331 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
332 err = mmc_wait_for_cmd(card->host, &cmd, 0);
334 printk(KERN_ERR "%s: error %d sending status command",
335 req->rq_disk->disk_name, err);
339 static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
341 struct mmc_blk_data *md = mq->data;
342 struct mmc_card *card = md->queue.card;
343 unsigned int from, nr, arg;
346 if (!mmc_can_erase(card)) {
351 from = blk_rq_pos(req);
352 nr = blk_rq_sectors(req);
354 if (mmc_can_trim(card))
359 err = mmc_erase(card, from, nr, arg);
361 spin_lock_irq(&md->lock);
362 __blk_end_request(req, err, blk_rq_bytes(req));
363 spin_unlock_irq(&md->lock);
368 static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
371 struct mmc_blk_data *md = mq->data;
372 struct mmc_card *card = md->queue.card;
373 unsigned int from, nr, arg;
376 if (!mmc_can_secure_erase_trim(card)) {
381 from = blk_rq_pos(req);
382 nr = blk_rq_sectors(req);
384 if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
385 arg = MMC_SECURE_TRIM1_ARG;
387 arg = MMC_SECURE_ERASE_ARG;
389 err = mmc_erase(card, from, nr, arg);
390 if (!err && arg == MMC_SECURE_TRIM1_ARG)
391 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
393 spin_lock_irq(&md->lock);
394 __blk_end_request(req, err, blk_rq_bytes(req));
395 spin_unlock_irq(&md->lock);
400 static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
402 struct mmc_blk_data *md = mq->data;
405 * No-op, only service this because we need REQ_FUA for reliable
408 spin_lock_irq(&md->lock);
409 __blk_end_request_all(req, 0);
410 spin_unlock_irq(&md->lock);
416 * Reformat current write as a reliable write, supporting
417 * both legacy and the enhanced reliable write MMC cards.
418 * In each transfer we'll handle only as much as a single
419 * reliable write can handle, thus finish the request in
420 * partial completions.
422 static inline int mmc_apply_rel_rw(struct mmc_blk_request *brq,
423 struct mmc_card *card,
427 struct mmc_command set_count;
429 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
430 /* Legacy mode imposes restrictions on transfers. */
431 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
432 brq->data.blocks = 1;
434 if (brq->data.blocks > card->ext_csd.rel_sectors)
435 brq->data.blocks = card->ext_csd.rel_sectors;
436 else if (brq->data.blocks < card->ext_csd.rel_sectors)
437 brq->data.blocks = 1;
440 memset(&set_count, 0, sizeof(struct mmc_command));
441 set_count.opcode = MMC_SET_BLOCK_COUNT;
442 set_count.arg = brq->data.blocks | (1 << 31);
443 set_count.flags = MMC_RSP_R1 | MMC_CMD_AC;
444 err = mmc_wait_for_cmd(card->host, &set_count, 0);
446 printk(KERN_ERR "%s: error %d SET_BLOCK_COUNT\n",
447 req->rq_disk->disk_name, err);
451 static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
453 struct mmc_blk_data *md = mq->data;
454 struct mmc_card *card = md->queue.card;
455 struct mmc_blk_request brq;
456 int ret = 1, disable_multi = 0;
459 * Reliable writes are used to implement Forced Unit Access and
460 * REQ_META accesses, and are supported only on MMCs.
462 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
463 (req->cmd_flags & REQ_META)) &&
464 (rq_data_dir(req) == WRITE) &&
465 REL_WRITES_SUPPORTED(card);
468 struct mmc_command cmd;
469 u32 readcmd, writecmd, status = 0;
471 memset(&brq, 0, sizeof(struct mmc_blk_request));
472 brq.mrq.cmd = &brq.cmd;
473 brq.mrq.data = &brq.data;
475 brq.cmd.arg = blk_rq_pos(req);
476 if (!mmc_card_blockaddr(card))
478 brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
479 brq.data.blksz = 512;
480 brq.stop.opcode = MMC_STOP_TRANSMISSION;
482 brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
483 brq.data.blocks = blk_rq_sectors(req);
486 * The block layer doesn't support all sector count
487 * restrictions, so we need to be prepared for too big
490 if (brq.data.blocks > card->host->max_blk_count)
491 brq.data.blocks = card->host->max_blk_count;
494 * After a read error, we redo the request one sector at a time
495 * in order to accurately determine which sectors can be read
498 if (disable_multi && brq.data.blocks > 1)
501 if (brq.data.blocks > 1 || do_rel_wr) {
502 /* SPI multiblock writes terminate using a special
503 * token, not a STOP_TRANSMISSION request. Reliable
504 * writes use SET_BLOCK_COUNT and do not use a
505 * STOP_TRANSMISSION request either.
507 if ((!mmc_host_is_spi(card->host) && !do_rel_wr) ||
508 rq_data_dir(req) == READ)
509 brq.mrq.stop = &brq.stop;
510 readcmd = MMC_READ_MULTIPLE_BLOCK;
511 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
514 readcmd = MMC_READ_SINGLE_BLOCK;
515 writecmd = MMC_WRITE_BLOCK;
517 if (rq_data_dir(req) == READ) {
518 brq.cmd.opcode = readcmd;
519 brq.data.flags |= MMC_DATA_READ;
521 brq.cmd.opcode = writecmd;
522 brq.data.flags |= MMC_DATA_WRITE;
525 if (do_rel_wr && mmc_apply_rel_rw(&brq, card, req))
528 mmc_set_data_timeout(&brq.data, card);
530 brq.data.sg = mq->sg;
531 brq.data.sg_len = mmc_queue_map_sg(mq);
534 * Adjust the sg list so it is the same size as the
537 if (brq.data.blocks != blk_rq_sectors(req)) {
538 int i, data_size = brq.data.blocks << 9;
539 struct scatterlist *sg;
541 for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
542 data_size -= sg->length;
543 if (data_size <= 0) {
544 sg->length += data_size;
552 mmc_queue_bounce_pre(mq);
554 mmc_wait_for_req(card->host, &brq.mrq);
556 mmc_queue_bounce_post(mq);
559 * Check for errors here, but don't jump to cmd_err
560 * until later as we need to wait for the card to leave
561 * programming mode even when things go wrong.
563 if (brq.cmd.error || brq.data.error || brq.stop.error) {
564 if (brq.data.blocks > 1 && rq_data_dir(req) == READ) {
565 /* Redo read one sector at a time */
566 printk(KERN_WARNING "%s: retrying using single "
567 "block read\n", req->rq_disk->disk_name);
571 status = get_card_status(card, req);
575 printk(KERN_ERR "%s: error %d sending read/write "
576 "command, response %#x, card status %#x\n",
577 req->rq_disk->disk_name, brq.cmd.error,
578 brq.cmd.resp[0], status);
581 if (brq.data.error) {
582 if (brq.data.error == -ETIMEDOUT && brq.mrq.stop)
583 /* 'Stop' response contains card status */
584 status = brq.mrq.stop->resp[0];
585 printk(KERN_ERR "%s: error %d transferring data,"
586 " sector %u, nr %u, card status %#x\n",
587 req->rq_disk->disk_name, brq.data.error,
588 (unsigned)blk_rq_pos(req),
589 (unsigned)blk_rq_sectors(req), status);
592 if (brq.stop.error) {
593 printk(KERN_ERR "%s: error %d sending stop command, "
594 "response %#x, card status %#x\n",
595 req->rq_disk->disk_name, brq.stop.error,
596 brq.stop.resp[0], status);
599 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
603 cmd.opcode = MMC_SEND_STATUS;
604 cmd.arg = card->rca << 16;
605 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
606 err = mmc_wait_for_cmd(card->host, &cmd, 5);
608 printk(KERN_ERR "%s: error %d requesting status\n",
609 req->rq_disk->disk_name, err);
613 * Some cards mishandle the status bits,
614 * so make sure to check both the busy
615 * indication and the card state.
617 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
618 (R1_CURRENT_STATE(cmd.resp[0]) == 7));
621 if (cmd.resp[0] & ~0x00000900)
622 printk(KERN_ERR "%s: status = %08x\n",
623 req->rq_disk->disk_name, cmd.resp[0]);
624 if (mmc_decode_status(cmd.resp))
629 if (brq.cmd.error || brq.stop.error || brq.data.error) {
630 if (rq_data_dir(req) == READ) {
632 * After an error, we redo I/O one sector at a
633 * time, so we only reach here after trying to
634 * read a single sector.
636 spin_lock_irq(&md->lock);
637 ret = __blk_end_request(req, -EIO, brq.data.blksz);
638 spin_unlock_irq(&md->lock);
645 * A block was successfully transferred.
647 spin_lock_irq(&md->lock);
648 ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
649 spin_unlock_irq(&md->lock);
656 * If this is an SD card and we're writing, we can first
657 * mark the known good sectors as ok.
659 * If the card is not SD, we can still ok written sectors
660 * as reported by the controller (which might be less than
661 * the real number of written sectors, but never more).
663 if (mmc_card_sd(card)) {
666 blocks = mmc_sd_num_wr_blocks(card);
667 if (blocks != (u32)-1) {
668 spin_lock_irq(&md->lock);
669 ret = __blk_end_request(req, 0, blocks << 9);
670 spin_unlock_irq(&md->lock);
673 spin_lock_irq(&md->lock);
674 ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
675 spin_unlock_irq(&md->lock);
678 spin_lock_irq(&md->lock);
680 ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
681 spin_unlock_irq(&md->lock);
686 static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
689 struct mmc_blk_data *md = mq->data;
690 struct mmc_card *card = md->queue.card;
692 mmc_claim_host(card->host);
693 ret = mmc_blk_part_switch(card, md);
699 if (req->cmd_flags & REQ_DISCARD) {
700 if (req->cmd_flags & REQ_SECURE)
701 ret = mmc_blk_issue_secdiscard_rq(mq, req);
703 ret = mmc_blk_issue_discard_rq(mq, req);
704 } else if (req->cmd_flags & REQ_FLUSH) {
705 ret = mmc_blk_issue_flush(mq, req);
707 ret = mmc_blk_issue_rw_rq(mq, req);
711 mmc_release_host(card->host);
715 static inline int mmc_blk_readonly(struct mmc_card *card)
717 return mmc_card_readonly(card) ||
718 !(card->csd.cmdclass & CCC_BLOCK_WRITE);
721 static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
722 struct device *parent,
727 struct mmc_blk_data *md;
730 devidx = find_first_zero_bit(dev_use, max_devices);
731 if (devidx >= max_devices)
732 return ERR_PTR(-ENOSPC);
733 __set_bit(devidx, dev_use);
735 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
742 * Set the read-only status based on the supported commands
743 * and the write protect switch.
745 md->read_only = mmc_blk_readonly(card);
747 md->disk = alloc_disk(perdev_minors);
748 if (md->disk == NULL) {
753 spin_lock_init(&md->lock);
754 INIT_LIST_HEAD(&md->part);
757 ret = mmc_init_queue(&md->queue, card, &md->lock);
761 md->queue.issue_fn = mmc_blk_issue_rq;
764 md->disk->major = MMC_BLOCK_MAJOR;
765 md->disk->first_minor = devidx * perdev_minors;
766 md->disk->fops = &mmc_bdops;
767 md->disk->private_data = md;
768 md->disk->queue = md->queue.queue;
769 md->disk->driverfs_dev = parent;
770 set_disk_ro(md->disk, md->read_only || default_ro);
771 if (REL_WRITES_SUPPORTED(card))
772 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
775 * As discussed on lkml, GENHD_FL_REMOVABLE should:
777 * - be set for removable media with permanent block devices
778 * - be unset for removable block devices with permanent media
780 * Since MMC block devices clearly fall under the second
781 * case, we do not set GENHD_FL_REMOVABLE. Userspace
782 * should use the block device creation/destruction hotplug
783 * messages to tell when the card is present.
787 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
789 mmc_get_devidx(dev_to_disk(parent)), subname);
791 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
794 blk_queue_logical_block_size(md->queue.queue, 512);
795 set_capacity(md->disk, size);
806 static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
809 struct mmc_blk_data *md;
811 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
813 * The EXT_CSD sector count is in number or 512 byte
816 size = card->ext_csd.sectors;
819 * The CSD capacity field is in units of read_blkbits.
820 * set_capacity takes units of 512 bytes.
822 size = card->csd.capacity << (card->csd.read_blkbits - 9);
825 md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL);
829 static int mmc_blk_alloc_part(struct mmc_card *card,
830 struct mmc_blk_data *md,
831 unsigned int part_type,
837 struct mmc_blk_data *part_md;
839 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
842 return PTR_ERR(part_md);
843 part_md->part_type = part_type;
844 list_add(&part_md->part, &md->part);
846 string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
847 cap_str, sizeof(cap_str));
848 printk(KERN_INFO "%s: %s %s partition %u %s\n",
849 part_md->disk->disk_name, mmc_card_id(card),
850 mmc_card_name(card), part_md->part_type, cap_str);
854 static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
858 if (!mmc_card_mmc(card))
861 if (card->ext_csd.boot_size) {
862 ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT0,
863 card->ext_csd.boot_size >> 9,
868 ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT1,
869 card->ext_csd.boot_size >> 9,
880 mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
884 mmc_claim_host(card->host);
885 err = mmc_set_blocklen(card, 512);
886 mmc_release_host(card->host);
889 printk(KERN_ERR "%s: unable to set block size to 512: %d\n",
890 md->disk->disk_name, err);
897 static void mmc_blk_remove_req(struct mmc_blk_data *md)
900 if (md->disk->flags & GENHD_FL_UP) {
901 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
903 /* Stop new requests from getting into the queue */
904 del_gendisk(md->disk);
907 /* Then flush out any already in there */
908 mmc_cleanup_queue(&md->queue);
913 static void mmc_blk_remove_parts(struct mmc_card *card,
914 struct mmc_blk_data *md)
916 struct list_head *pos, *q;
917 struct mmc_blk_data *part_md;
919 list_for_each_safe(pos, q, &md->part) {
920 part_md = list_entry(pos, struct mmc_blk_data, part);
922 mmc_blk_remove_req(part_md);
926 static int mmc_add_disk(struct mmc_blk_data *md)
931 md->force_ro.show = force_ro_show;
932 md->force_ro.store = force_ro_store;
933 md->force_ro.attr.name = "force_ro";
934 md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
935 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
937 del_gendisk(md->disk);
942 static int mmc_blk_probe(struct mmc_card *card)
944 struct mmc_blk_data *md, *part_md;
949 * Check that the card supports the command class(es) we need.
951 if (!(card->csd.cmdclass & CCC_BLOCK_READ))
954 md = mmc_blk_alloc(card);
958 err = mmc_blk_set_blksize(md, card);
962 string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
963 cap_str, sizeof(cap_str));
964 printk(KERN_INFO "%s: %s %s %s %s\n",
965 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
966 cap_str, md->read_only ? "(ro)" : "");
968 if (mmc_blk_alloc_parts(card, md))
971 mmc_set_drvdata(card, md);
972 if (mmc_add_disk(md))
975 list_for_each_entry(part_md, &md->part, part) {
976 if (mmc_add_disk(part_md))
982 mmc_blk_remove_parts(card, md);
983 mmc_blk_remove_req(md);
987 static void mmc_blk_remove(struct mmc_card *card)
989 struct mmc_blk_data *md = mmc_get_drvdata(card);
991 mmc_blk_remove_parts(card, md);
992 mmc_blk_remove_req(md);
993 mmc_set_drvdata(card, NULL);
997 static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state)
999 struct mmc_blk_data *part_md;
1000 struct mmc_blk_data *md = mmc_get_drvdata(card);
1003 mmc_queue_suspend(&md->queue);
1004 list_for_each_entry(part_md, &md->part, part) {
1005 mmc_queue_suspend(&part_md->queue);
1011 static int mmc_blk_resume(struct mmc_card *card)
1013 struct mmc_blk_data *part_md;
1014 struct mmc_blk_data *md = mmc_get_drvdata(card);
1017 mmc_blk_set_blksize(md, card);
1020 * Resume involves the card going into idle state,
1021 * so current partition is always the main one.
1023 md->part_curr = md->part_type;
1024 mmc_queue_resume(&md->queue);
1025 list_for_each_entry(part_md, &md->part, part) {
1026 mmc_queue_resume(&part_md->queue);
1032 #define mmc_blk_suspend NULL
1033 #define mmc_blk_resume NULL
1036 static struct mmc_driver mmc_driver = {
1040 .probe = mmc_blk_probe,
1041 .remove = mmc_blk_remove,
1042 .suspend = mmc_blk_suspend,
1043 .resume = mmc_blk_resume,
1046 static int __init mmc_blk_init(void)
1050 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
1051 pr_info("mmcblk: using %d minors per device\n", perdev_minors);
1053 max_devices = 256 / perdev_minors;
1055 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
1059 res = mmc_register_driver(&mmc_driver);
1065 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
1070 static void __exit mmc_blk_exit(void)
1072 mmc_unregister_driver(&mmc_driver);
1073 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
1076 module_init(mmc_blk_init);
1077 module_exit(mmc_blk_exit);
1079 MODULE_LICENSE("GPL");
1080 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");