mmc: MMC boot partitions support.
[pandora-kernel.git] / drivers / mmc / card / block.c
1 /*
2  * Block driver for media (i.e., flash cards)
3  *
4  * Copyright 2002 Hewlett-Packard Company
5  * Copyright 2005-2008 Pierre Ossman
6  *
7  * Use consistent with the GNU GPL is permitted,
8  * provided that this copyright notice is
9  * preserved in its entirety in all copies and derived works.
10  *
11  * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12  * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13  * FITNESS FOR ANY PARTICULAR PURPOSE.
14  *
15  * Many thanks to Alessandro Rubini and Jonathan Corbet!
16  *
17  * Author:  Andrew Christian
18  *          28 May 2002
19  */
20 #include <linux/moduleparam.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23
24 #include <linux/kernel.h>
25 #include <linux/fs.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/hdreg.h>
29 #include <linux/kdev_t.h>
30 #include <linux/blkdev.h>
31 #include <linux/mutex.h>
32 #include <linux/scatterlist.h>
33 #include <linux/string_helpers.h>
34
35 #include <linux/mmc/card.h>
36 #include <linux/mmc/host.h>
37 #include <linux/mmc/mmc.h>
38 #include <linux/mmc/sd.h>
39
40 #include <asm/system.h>
41 #include <asm/uaccess.h>
42
43 #include "queue.h"
44
45 MODULE_ALIAS("mmc:block");
46 #ifdef MODULE_PARAM_PREFIX
47 #undef MODULE_PARAM_PREFIX
48 #endif
49 #define MODULE_PARAM_PREFIX "mmcblk."
50
51 #define REL_WRITES_SUPPORTED(card) (mmc_card_mmc((card)) &&     \
52     (((card)->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||   \
53      ((card)->ext_csd.rel_sectors)))
54
55 static DEFINE_MUTEX(block_mutex);
56
57 /*
58  * The defaults come from config options but can be overriden by module
59  * or bootarg options.
60  */
61 static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
62
63 /*
64  * We've only got one major, so number of mmcblk devices is
65  * limited to 256 / number of minors per device.
66  */
67 static int max_devices;
68
69 /* 256 minors, so at most 256 separate devices */
70 static DECLARE_BITMAP(dev_use, 256);
71
72 /*
73  * There is one mmc_blk_data per slot.
74  */
75 struct mmc_blk_data {
76         spinlock_t      lock;
77         struct gendisk  *disk;
78         struct mmc_queue queue;
79         struct list_head part;
80
81         unsigned int    usage;
82         unsigned int    read_only;
83         unsigned int    part_type;
84
85         /*
86          * Only set in main mmc_blk_data associated
87          * with mmc_card with mmc_set_drvdata, and keeps
88          * track of the current selected device partition.
89          */
90         unsigned int    part_curr;
91         struct device_attribute force_ro;
92 };
93
94 static DEFINE_MUTEX(open_lock);
95
96 module_param(perdev_minors, int, 0444);
97 MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
98
99 static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
100 {
101         struct mmc_blk_data *md;
102
103         mutex_lock(&open_lock);
104         md = disk->private_data;
105         if (md && md->usage == 0)
106                 md = NULL;
107         if (md)
108                 md->usage++;
109         mutex_unlock(&open_lock);
110
111         return md;
112 }
113
114 static inline int mmc_get_devidx(struct gendisk *disk)
115 {
116         int devmaj = MAJOR(disk_devt(disk));
117         int devidx = MINOR(disk_devt(disk)) / perdev_minors;
118
119         if (!devmaj)
120                 devidx = disk->first_minor / perdev_minors;
121         return devidx;
122 }
123
124 static void mmc_blk_put(struct mmc_blk_data *md)
125 {
126         mutex_lock(&open_lock);
127         md->usage--;
128         if (md->usage == 0) {
129                 int devidx = mmc_get_devidx(md->disk);
130                 blk_cleanup_queue(md->queue.queue);
131
132                 __clear_bit(devidx, dev_use);
133
134                 put_disk(md->disk);
135                 kfree(md);
136         }
137         mutex_unlock(&open_lock);
138 }
139
140 static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
141                              char *buf)
142 {
143         int ret;
144         struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
145
146         ret = snprintf(buf, PAGE_SIZE, "%d",
147                        get_disk_ro(dev_to_disk(dev)) ^
148                        md->read_only);
149         mmc_blk_put(md);
150         return ret;
151 }
152
153 static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
154                               const char *buf, size_t count)
155 {
156         int ret;
157         char *end;
158         struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
159         unsigned long set = simple_strtoul(buf, &end, 0);
160         if (end == buf) {
161                 ret = -EINVAL;
162                 goto out;
163         }
164
165         set_disk_ro(dev_to_disk(dev), set || md->read_only);
166         ret = count;
167 out:
168         mmc_blk_put(md);
169         return ret;
170 }
171
172 static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
173 {
174         struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
175         int ret = -ENXIO;
176
177         mutex_lock(&block_mutex);
178         if (md) {
179                 if (md->usage == 2)
180                         check_disk_change(bdev);
181                 ret = 0;
182
183                 if ((mode & FMODE_WRITE) && md->read_only) {
184                         mmc_blk_put(md);
185                         ret = -EROFS;
186                 }
187         }
188         mutex_unlock(&block_mutex);
189
190         return ret;
191 }
192
193 static int mmc_blk_release(struct gendisk *disk, fmode_t mode)
194 {
195         struct mmc_blk_data *md = disk->private_data;
196
197         mutex_lock(&block_mutex);
198         mmc_blk_put(md);
199         mutex_unlock(&block_mutex);
200         return 0;
201 }
202
203 static int
204 mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
205 {
206         geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
207         geo->heads = 4;
208         geo->sectors = 16;
209         return 0;
210 }
211
212 static const struct block_device_operations mmc_bdops = {
213         .open                   = mmc_blk_open,
214         .release                = mmc_blk_release,
215         .getgeo                 = mmc_blk_getgeo,
216         .owner                  = THIS_MODULE,
217 };
218
219 struct mmc_blk_request {
220         struct mmc_request      mrq;
221         struct mmc_command      cmd;
222         struct mmc_command      stop;
223         struct mmc_data         data;
224 };
225
226 static inline int mmc_blk_part_switch(struct mmc_card *card,
227                                       struct mmc_blk_data *md)
228 {
229         int ret;
230         struct mmc_blk_data *main_md = mmc_get_drvdata(card);
231         if (main_md->part_curr == md->part_type)
232                 return 0;
233
234         if (mmc_card_mmc(card)) {
235                 card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
236                 card->ext_csd.part_config |= md->part_type;
237
238                 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
239                                  EXT_CSD_PART_CONFIG, card->ext_csd.part_config,
240                                  card->ext_csd.part_time);
241                 if (ret)
242                         return ret;
243 }
244
245         main_md->part_curr = md->part_type;
246         return 0;
247 }
248
249 static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
250 {
251         int err;
252         u32 result;
253         __be32 *blocks;
254
255         struct mmc_request mrq;
256         struct mmc_command cmd;
257         struct mmc_data data;
258         unsigned int timeout_us;
259
260         struct scatterlist sg;
261
262         memset(&cmd, 0, sizeof(struct mmc_command));
263
264         cmd.opcode = MMC_APP_CMD;
265         cmd.arg = card->rca << 16;
266         cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
267
268         err = mmc_wait_for_cmd(card->host, &cmd, 0);
269         if (err)
270                 return (u32)-1;
271         if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
272                 return (u32)-1;
273
274         memset(&cmd, 0, sizeof(struct mmc_command));
275
276         cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
277         cmd.arg = 0;
278         cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
279
280         memset(&data, 0, sizeof(struct mmc_data));
281
282         data.timeout_ns = card->csd.tacc_ns * 100;
283         data.timeout_clks = card->csd.tacc_clks * 100;
284
285         timeout_us = data.timeout_ns / 1000;
286         timeout_us += data.timeout_clks * 1000 /
287                 (card->host->ios.clock / 1000);
288
289         if (timeout_us > 100000) {
290                 data.timeout_ns = 100000000;
291                 data.timeout_clks = 0;
292         }
293
294         data.blksz = 4;
295         data.blocks = 1;
296         data.flags = MMC_DATA_READ;
297         data.sg = &sg;
298         data.sg_len = 1;
299
300         memset(&mrq, 0, sizeof(struct mmc_request));
301
302         mrq.cmd = &cmd;
303         mrq.data = &data;
304
305         blocks = kmalloc(4, GFP_KERNEL);
306         if (!blocks)
307                 return (u32)-1;
308
309         sg_init_one(&sg, blocks, 4);
310
311         mmc_wait_for_req(card->host, &mrq);
312
313         result = ntohl(*blocks);
314         kfree(blocks);
315
316         if (cmd.error || data.error)
317                 result = (u32)-1;
318
319         return result;
320 }
321
322 static u32 get_card_status(struct mmc_card *card, struct request *req)
323 {
324         struct mmc_command cmd;
325         int err;
326
327         memset(&cmd, 0, sizeof(struct mmc_command));
328         cmd.opcode = MMC_SEND_STATUS;
329         if (!mmc_host_is_spi(card->host))
330                 cmd.arg = card->rca << 16;
331         cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
332         err = mmc_wait_for_cmd(card->host, &cmd, 0);
333         if (err)
334                 printk(KERN_ERR "%s: error %d sending status command",
335                        req->rq_disk->disk_name, err);
336         return cmd.resp[0];
337 }
338
339 static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
340 {
341         struct mmc_blk_data *md = mq->data;
342         struct mmc_card *card = md->queue.card;
343         unsigned int from, nr, arg;
344         int err = 0;
345
346         if (!mmc_can_erase(card)) {
347                 err = -EOPNOTSUPP;
348                 goto out;
349         }
350
351         from = blk_rq_pos(req);
352         nr = blk_rq_sectors(req);
353
354         if (mmc_can_trim(card))
355                 arg = MMC_TRIM_ARG;
356         else
357                 arg = MMC_ERASE_ARG;
358
359         err = mmc_erase(card, from, nr, arg);
360 out:
361         spin_lock_irq(&md->lock);
362         __blk_end_request(req, err, blk_rq_bytes(req));
363         spin_unlock_irq(&md->lock);
364
365         return err ? 0 : 1;
366 }
367
368 static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
369                                        struct request *req)
370 {
371         struct mmc_blk_data *md = mq->data;
372         struct mmc_card *card = md->queue.card;
373         unsigned int from, nr, arg;
374         int err = 0;
375
376         if (!mmc_can_secure_erase_trim(card)) {
377                 err = -EOPNOTSUPP;
378                 goto out;
379         }
380
381         from = blk_rq_pos(req);
382         nr = blk_rq_sectors(req);
383
384         if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
385                 arg = MMC_SECURE_TRIM1_ARG;
386         else
387                 arg = MMC_SECURE_ERASE_ARG;
388
389         err = mmc_erase(card, from, nr, arg);
390         if (!err && arg == MMC_SECURE_TRIM1_ARG)
391                 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
392 out:
393         spin_lock_irq(&md->lock);
394         __blk_end_request(req, err, blk_rq_bytes(req));
395         spin_unlock_irq(&md->lock);
396
397         return err ? 0 : 1;
398 }
399
400 static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
401 {
402         struct mmc_blk_data *md = mq->data;
403
404         /*
405          * No-op, only service this because we need REQ_FUA for reliable
406          * writes.
407          */
408         spin_lock_irq(&md->lock);
409         __blk_end_request_all(req, 0);
410         spin_unlock_irq(&md->lock);
411
412         return 1;
413 }
414
415 /*
416  * Reformat current write as a reliable write, supporting
417  * both legacy and the enhanced reliable write MMC cards.
418  * In each transfer we'll handle only as much as a single
419  * reliable write can handle, thus finish the request in
420  * partial completions.
421  */
422 static inline int mmc_apply_rel_rw(struct mmc_blk_request *brq,
423                                    struct mmc_card *card,
424                                    struct request *req)
425 {
426         int err;
427         struct mmc_command set_count;
428
429         if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
430                 /* Legacy mode imposes restrictions on transfers. */
431                 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
432                         brq->data.blocks = 1;
433
434                 if (brq->data.blocks > card->ext_csd.rel_sectors)
435                         brq->data.blocks = card->ext_csd.rel_sectors;
436                 else if (brq->data.blocks < card->ext_csd.rel_sectors)
437                         brq->data.blocks = 1;
438         }
439
440         memset(&set_count, 0, sizeof(struct mmc_command));
441         set_count.opcode = MMC_SET_BLOCK_COUNT;
442         set_count.arg = brq->data.blocks | (1 << 31);
443         set_count.flags = MMC_RSP_R1 | MMC_CMD_AC;
444         err = mmc_wait_for_cmd(card->host, &set_count, 0);
445         if (err)
446                 printk(KERN_ERR "%s: error %d SET_BLOCK_COUNT\n",
447                        req->rq_disk->disk_name, err);
448         return err;
449 }
450
451 static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
452 {
453         struct mmc_blk_data *md = mq->data;
454         struct mmc_card *card = md->queue.card;
455         struct mmc_blk_request brq;
456         int ret = 1, disable_multi = 0;
457
458         /*
459          * Reliable writes are used to implement Forced Unit Access and
460          * REQ_META accesses, and are supported only on MMCs.
461          */
462         bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
463                           (req->cmd_flags & REQ_META)) &&
464                 (rq_data_dir(req) == WRITE) &&
465                 REL_WRITES_SUPPORTED(card);
466
467         do {
468                 struct mmc_command cmd;
469                 u32 readcmd, writecmd, status = 0;
470
471                 memset(&brq, 0, sizeof(struct mmc_blk_request));
472                 brq.mrq.cmd = &brq.cmd;
473                 brq.mrq.data = &brq.data;
474
475                 brq.cmd.arg = blk_rq_pos(req);
476                 if (!mmc_card_blockaddr(card))
477                         brq.cmd.arg <<= 9;
478                 brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
479                 brq.data.blksz = 512;
480                 brq.stop.opcode = MMC_STOP_TRANSMISSION;
481                 brq.stop.arg = 0;
482                 brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
483                 brq.data.blocks = blk_rq_sectors(req);
484
485                 /*
486                  * The block layer doesn't support all sector count
487                  * restrictions, so we need to be prepared for too big
488                  * requests.
489                  */
490                 if (brq.data.blocks > card->host->max_blk_count)
491                         brq.data.blocks = card->host->max_blk_count;
492
493                 /*
494                  * After a read error, we redo the request one sector at a time
495                  * in order to accurately determine which sectors can be read
496                  * successfully.
497                  */
498                 if (disable_multi && brq.data.blocks > 1)
499                         brq.data.blocks = 1;
500
501                 if (brq.data.blocks > 1 || do_rel_wr) {
502                         /* SPI multiblock writes terminate using a special
503                          * token, not a STOP_TRANSMISSION request. Reliable
504                          * writes use SET_BLOCK_COUNT and do not use a
505                          * STOP_TRANSMISSION request either.
506                          */
507                         if ((!mmc_host_is_spi(card->host) && !do_rel_wr) ||
508                             rq_data_dir(req) == READ)
509                                 brq.mrq.stop = &brq.stop;
510                         readcmd = MMC_READ_MULTIPLE_BLOCK;
511                         writecmd = MMC_WRITE_MULTIPLE_BLOCK;
512                 } else {
513                         brq.mrq.stop = NULL;
514                         readcmd = MMC_READ_SINGLE_BLOCK;
515                         writecmd = MMC_WRITE_BLOCK;
516                 }
517                 if (rq_data_dir(req) == READ) {
518                         brq.cmd.opcode = readcmd;
519                         brq.data.flags |= MMC_DATA_READ;
520                 } else {
521                         brq.cmd.opcode = writecmd;
522                         brq.data.flags |= MMC_DATA_WRITE;
523                 }
524
525                 if (do_rel_wr && mmc_apply_rel_rw(&brq, card, req))
526                         goto cmd_err;
527
528                 mmc_set_data_timeout(&brq.data, card);
529
530                 brq.data.sg = mq->sg;
531                 brq.data.sg_len = mmc_queue_map_sg(mq);
532
533                 /*
534                  * Adjust the sg list so it is the same size as the
535                  * request.
536                  */
537                 if (brq.data.blocks != blk_rq_sectors(req)) {
538                         int i, data_size = brq.data.blocks << 9;
539                         struct scatterlist *sg;
540
541                         for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
542                                 data_size -= sg->length;
543                                 if (data_size <= 0) {
544                                         sg->length += data_size;
545                                         i++;
546                                         break;
547                                 }
548                         }
549                         brq.data.sg_len = i;
550                 }
551
552                 mmc_queue_bounce_pre(mq);
553
554                 mmc_wait_for_req(card->host, &brq.mrq);
555
556                 mmc_queue_bounce_post(mq);
557
558                 /*
559                  * Check for errors here, but don't jump to cmd_err
560                  * until later as we need to wait for the card to leave
561                  * programming mode even when things go wrong.
562                  */
563                 if (brq.cmd.error || brq.data.error || brq.stop.error) {
564                         if (brq.data.blocks > 1 && rq_data_dir(req) == READ) {
565                                 /* Redo read one sector at a time */
566                                 printk(KERN_WARNING "%s: retrying using single "
567                                        "block read\n", req->rq_disk->disk_name);
568                                 disable_multi = 1;
569                                 continue;
570                         }
571                         status = get_card_status(card, req);
572                 }
573
574                 if (brq.cmd.error) {
575                         printk(KERN_ERR "%s: error %d sending read/write "
576                                "command, response %#x, card status %#x\n",
577                                req->rq_disk->disk_name, brq.cmd.error,
578                                brq.cmd.resp[0], status);
579                 }
580
581                 if (brq.data.error) {
582                         if (brq.data.error == -ETIMEDOUT && brq.mrq.stop)
583                                 /* 'Stop' response contains card status */
584                                 status = brq.mrq.stop->resp[0];
585                         printk(KERN_ERR "%s: error %d transferring data,"
586                                " sector %u, nr %u, card status %#x\n",
587                                req->rq_disk->disk_name, brq.data.error,
588                                (unsigned)blk_rq_pos(req),
589                                (unsigned)blk_rq_sectors(req), status);
590                 }
591
592                 if (brq.stop.error) {
593                         printk(KERN_ERR "%s: error %d sending stop command, "
594                                "response %#x, card status %#x\n",
595                                req->rq_disk->disk_name, brq.stop.error,
596                                brq.stop.resp[0], status);
597                 }
598
599                 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
600                         do {
601                                 int err;
602
603                                 cmd.opcode = MMC_SEND_STATUS;
604                                 cmd.arg = card->rca << 16;
605                                 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
606                                 err = mmc_wait_for_cmd(card->host, &cmd, 5);
607                                 if (err) {
608                                         printk(KERN_ERR "%s: error %d requesting status\n",
609                                                req->rq_disk->disk_name, err);
610                                         goto cmd_err;
611                                 }
612                                 /*
613                                  * Some cards mishandle the status bits,
614                                  * so make sure to check both the busy
615                                  * indication and the card state.
616                                  */
617                         } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
618                                 (R1_CURRENT_STATE(cmd.resp[0]) == 7));
619
620 #if 0
621                         if (cmd.resp[0] & ~0x00000900)
622                                 printk(KERN_ERR "%s: status = %08x\n",
623                                        req->rq_disk->disk_name, cmd.resp[0]);
624                         if (mmc_decode_status(cmd.resp))
625                                 goto cmd_err;
626 #endif
627                 }
628
629                 if (brq.cmd.error || brq.stop.error || brq.data.error) {
630                         if (rq_data_dir(req) == READ) {
631                                 /*
632                                  * After an error, we redo I/O one sector at a
633                                  * time, so we only reach here after trying to
634                                  * read a single sector.
635                                  */
636                                 spin_lock_irq(&md->lock);
637                                 ret = __blk_end_request(req, -EIO, brq.data.blksz);
638                                 spin_unlock_irq(&md->lock);
639                                 continue;
640                         }
641                         goto cmd_err;
642                 }
643
644                 /*
645                  * A block was successfully transferred.
646                  */
647                 spin_lock_irq(&md->lock);
648                 ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
649                 spin_unlock_irq(&md->lock);
650         } while (ret);
651
652         return 1;
653
654  cmd_err:
655         /*
656          * If this is an SD card and we're writing, we can first
657          * mark the known good sectors as ok.
658          *
659          * If the card is not SD, we can still ok written sectors
660          * as reported by the controller (which might be less than
661          * the real number of written sectors, but never more).
662          */
663         if (mmc_card_sd(card)) {
664                 u32 blocks;
665
666                 blocks = mmc_sd_num_wr_blocks(card);
667                 if (blocks != (u32)-1) {
668                         spin_lock_irq(&md->lock);
669                         ret = __blk_end_request(req, 0, blocks << 9);
670                         spin_unlock_irq(&md->lock);
671                 }
672         } else {
673                 spin_lock_irq(&md->lock);
674                 ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
675                 spin_unlock_irq(&md->lock);
676         }
677
678         spin_lock_irq(&md->lock);
679         while (ret)
680                 ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
681         spin_unlock_irq(&md->lock);
682
683         return 0;
684 }
685
686 static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
687 {
688         int ret;
689         struct mmc_blk_data *md = mq->data;
690         struct mmc_card *card = md->queue.card;
691
692         mmc_claim_host(card->host);
693         ret = mmc_blk_part_switch(card, md);
694         if (ret) {
695                 ret = 0;
696                 goto out;
697         }
698
699         if (req->cmd_flags & REQ_DISCARD) {
700                 if (req->cmd_flags & REQ_SECURE)
701                         ret = mmc_blk_issue_secdiscard_rq(mq, req);
702                 else
703                         ret = mmc_blk_issue_discard_rq(mq, req);
704         } else if (req->cmd_flags & REQ_FLUSH) {
705                 ret = mmc_blk_issue_flush(mq, req);
706         } else {
707                 ret = mmc_blk_issue_rw_rq(mq, req);
708         }
709
710 out:
711         mmc_release_host(card->host);
712         return ret;
713 }
714
715 static inline int mmc_blk_readonly(struct mmc_card *card)
716 {
717         return mmc_card_readonly(card) ||
718                !(card->csd.cmdclass & CCC_BLOCK_WRITE);
719 }
720
721 static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
722                                               struct device *parent,
723                                               sector_t size,
724                                               bool default_ro,
725                                               const char *subname)
726 {
727         struct mmc_blk_data *md;
728         int devidx, ret;
729
730         devidx = find_first_zero_bit(dev_use, max_devices);
731         if (devidx >= max_devices)
732                 return ERR_PTR(-ENOSPC);
733         __set_bit(devidx, dev_use);
734
735         md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
736         if (!md) {
737                 ret = -ENOMEM;
738                 goto out;
739         }
740
741         /*
742          * Set the read-only status based on the supported commands
743          * and the write protect switch.
744          */
745         md->read_only = mmc_blk_readonly(card);
746
747         md->disk = alloc_disk(perdev_minors);
748         if (md->disk == NULL) {
749                 ret = -ENOMEM;
750                 goto err_kfree;
751         }
752
753         spin_lock_init(&md->lock);
754         INIT_LIST_HEAD(&md->part);
755         md->usage = 1;
756
757         ret = mmc_init_queue(&md->queue, card, &md->lock);
758         if (ret)
759                 goto err_putdisk;
760
761         md->queue.issue_fn = mmc_blk_issue_rq;
762         md->queue.data = md;
763
764         md->disk->major = MMC_BLOCK_MAJOR;
765         md->disk->first_minor = devidx * perdev_minors;
766         md->disk->fops = &mmc_bdops;
767         md->disk->private_data = md;
768         md->disk->queue = md->queue.queue;
769         md->disk->driverfs_dev = parent;
770         set_disk_ro(md->disk, md->read_only || default_ro);
771         if (REL_WRITES_SUPPORTED(card))
772                 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
773
774         /*
775          * As discussed on lkml, GENHD_FL_REMOVABLE should:
776          *
777          * - be set for removable media with permanent block devices
778          * - be unset for removable block devices with permanent media
779          *
780          * Since MMC block devices clearly fall under the second
781          * case, we do not set GENHD_FL_REMOVABLE.  Userspace
782          * should use the block device creation/destruction hotplug
783          * messages to tell when the card is present.
784          */
785
786         if (subname)
787                 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
788                          "mmcblk%d%s",
789                          mmc_get_devidx(dev_to_disk(parent)), subname);
790         else
791                 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
792                          "mmcblk%d", devidx);
793
794         blk_queue_logical_block_size(md->queue.queue, 512);
795         set_capacity(md->disk, size);
796         return md;
797
798  err_putdisk:
799         put_disk(md->disk);
800  err_kfree:
801         kfree(md);
802  out:
803         return ERR_PTR(ret);
804 }
805
806 static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
807 {
808         sector_t size;
809         struct mmc_blk_data *md;
810
811         if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
812                 /*
813                  * The EXT_CSD sector count is in number or 512 byte
814                  * sectors.
815                  */
816                 size = card->ext_csd.sectors;
817         } else {
818                 /*
819                  * The CSD capacity field is in units of read_blkbits.
820                  * set_capacity takes units of 512 bytes.
821                  */
822                 size = card->csd.capacity << (card->csd.read_blkbits - 9);
823         }
824
825         md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL);
826         return md;
827 }
828
829 static int mmc_blk_alloc_part(struct mmc_card *card,
830                               struct mmc_blk_data *md,
831                               unsigned int part_type,
832                               sector_t size,
833                               bool default_ro,
834                               const char *subname)
835 {
836         char cap_str[10];
837         struct mmc_blk_data *part_md;
838
839         part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
840                                     subname);
841         if (IS_ERR(part_md))
842                 return PTR_ERR(part_md);
843         part_md->part_type = part_type;
844         list_add(&part_md->part, &md->part);
845
846         string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
847                         cap_str, sizeof(cap_str));
848         printk(KERN_INFO "%s: %s %s partition %u %s\n",
849                part_md->disk->disk_name, mmc_card_id(card),
850                mmc_card_name(card), part_md->part_type, cap_str);
851         return 0;
852 }
853
854 static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
855 {
856         int ret = 0;
857
858         if (!mmc_card_mmc(card))
859                 return 0;
860
861         if (card->ext_csd.boot_size) {
862                 ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT0,
863                                          card->ext_csd.boot_size >> 9,
864                                          true,
865                                          "boot0");
866                 if (ret)
867                         return ret;
868                 ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT1,
869                                          card->ext_csd.boot_size >> 9,
870                                          true,
871                                          "boot1");
872                 if (ret)
873                         return ret;
874         }
875
876         return ret;
877 }
878
879 static int
880 mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
881 {
882         int err;
883
884         mmc_claim_host(card->host);
885         err = mmc_set_blocklen(card, 512);
886         mmc_release_host(card->host);
887
888         if (err) {
889                 printk(KERN_ERR "%s: unable to set block size to 512: %d\n",
890                         md->disk->disk_name, err);
891                 return -EINVAL;
892         }
893
894         return 0;
895 }
896
897 static void mmc_blk_remove_req(struct mmc_blk_data *md)
898 {
899         if (md) {
900                 if (md->disk->flags & GENHD_FL_UP) {
901                         device_remove_file(disk_to_dev(md->disk), &md->force_ro);
902
903                         /* Stop new requests from getting into the queue */
904                         del_gendisk(md->disk);
905                 }
906
907                 /* Then flush out any already in there */
908                 mmc_cleanup_queue(&md->queue);
909                 mmc_blk_put(md);
910         }
911 }
912
913 static void mmc_blk_remove_parts(struct mmc_card *card,
914                                  struct mmc_blk_data *md)
915 {
916         struct list_head *pos, *q;
917         struct mmc_blk_data *part_md;
918
919         list_for_each_safe(pos, q, &md->part) {
920                 part_md = list_entry(pos, struct mmc_blk_data, part);
921                 list_del(pos);
922                 mmc_blk_remove_req(part_md);
923         }
924 }
925
926 static int mmc_add_disk(struct mmc_blk_data *md)
927 {
928         int ret;
929
930         add_disk(md->disk);
931         md->force_ro.show = force_ro_show;
932         md->force_ro.store = force_ro_store;
933         md->force_ro.attr.name = "force_ro";
934         md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
935         ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
936         if (ret)
937                 del_gendisk(md->disk);
938
939         return ret;
940 }
941
942 static int mmc_blk_probe(struct mmc_card *card)
943 {
944         struct mmc_blk_data *md, *part_md;
945         int err;
946         char cap_str[10];
947
948         /*
949          * Check that the card supports the command class(es) we need.
950          */
951         if (!(card->csd.cmdclass & CCC_BLOCK_READ))
952                 return -ENODEV;
953
954         md = mmc_blk_alloc(card);
955         if (IS_ERR(md))
956                 return PTR_ERR(md);
957
958         err = mmc_blk_set_blksize(md, card);
959         if (err)
960                 goto out;
961
962         string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
963                         cap_str, sizeof(cap_str));
964         printk(KERN_INFO "%s: %s %s %s %s\n",
965                 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
966                 cap_str, md->read_only ? "(ro)" : "");
967
968         if (mmc_blk_alloc_parts(card, md))
969                 goto out;
970
971         mmc_set_drvdata(card, md);
972         if (mmc_add_disk(md))
973                 goto out;
974
975         list_for_each_entry(part_md, &md->part, part) {
976                 if (mmc_add_disk(part_md))
977                         goto out;
978         }
979         return 0;
980
981  out:
982         mmc_blk_remove_parts(card, md);
983         mmc_blk_remove_req(md);
984         return err;
985 }
986
987 static void mmc_blk_remove(struct mmc_card *card)
988 {
989         struct mmc_blk_data *md = mmc_get_drvdata(card);
990
991         mmc_blk_remove_parts(card, md);
992         mmc_blk_remove_req(md);
993         mmc_set_drvdata(card, NULL);
994 }
995
996 #ifdef CONFIG_PM
997 static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state)
998 {
999         struct mmc_blk_data *part_md;
1000         struct mmc_blk_data *md = mmc_get_drvdata(card);
1001
1002         if (md) {
1003                 mmc_queue_suspend(&md->queue);
1004                 list_for_each_entry(part_md, &md->part, part) {
1005                         mmc_queue_suspend(&part_md->queue);
1006                 }
1007         }
1008         return 0;
1009 }
1010
1011 static int mmc_blk_resume(struct mmc_card *card)
1012 {
1013         struct mmc_blk_data *part_md;
1014         struct mmc_blk_data *md = mmc_get_drvdata(card);
1015
1016         if (md) {
1017                 mmc_blk_set_blksize(md, card);
1018
1019                 /*
1020                  * Resume involves the card going into idle state,
1021                  * so current partition is always the main one.
1022                  */
1023                 md->part_curr = md->part_type;
1024                 mmc_queue_resume(&md->queue);
1025                 list_for_each_entry(part_md, &md->part, part) {
1026                         mmc_queue_resume(&part_md->queue);
1027                 }
1028         }
1029         return 0;
1030 }
1031 #else
1032 #define mmc_blk_suspend NULL
1033 #define mmc_blk_resume  NULL
1034 #endif
1035
1036 static struct mmc_driver mmc_driver = {
1037         .drv            = {
1038                 .name   = "mmcblk",
1039         },
1040         .probe          = mmc_blk_probe,
1041         .remove         = mmc_blk_remove,
1042         .suspend        = mmc_blk_suspend,
1043         .resume         = mmc_blk_resume,
1044 };
1045
1046 static int __init mmc_blk_init(void)
1047 {
1048         int res;
1049
1050         if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
1051                 pr_info("mmcblk: using %d minors per device\n", perdev_minors);
1052
1053         max_devices = 256 / perdev_minors;
1054
1055         res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
1056         if (res)
1057                 goto out;
1058
1059         res = mmc_register_driver(&mmc_driver);
1060         if (res)
1061                 goto out2;
1062
1063         return 0;
1064  out2:
1065         unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
1066  out:
1067         return res;
1068 }
1069
1070 static void __exit mmc_blk_exit(void)
1071 {
1072         mmc_unregister_driver(&mmc_driver);
1073         unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
1074 }
1075
1076 module_init(mmc_blk_init);
1077 module_exit(mmc_blk_exit);
1078
1079 MODULE_LICENSE("GPL");
1080 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
1081