drm/i915: Use chipset-specific irq installers
[pandora-kernel.git] / drivers / mmc / card / block.c
index e5bf2bf..71da564 100644 (file)
 #include <linux/mutex.h>
 #include <linux/scatterlist.h>
 #include <linux/string_helpers.h>
+#include <linux/delay.h>
+#include <linux/capability.h>
+#include <linux/compat.h>
 
+#include <linux/mmc/ioctl.h>
 #include <linux/mmc/card.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/mmc.h>
@@ -55,10 +59,6 @@ MODULE_ALIAS("mmc:block");
 #define INAND_CMD38_ARG_SECTRIM1 0x81
 #define INAND_CMD38_ARG_SECTRIM2 0x88
 
-#define REL_WRITES_SUPPORTED(card) (mmc_card_mmc((card)) &&    \
-    (((card)->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||  \
-     ((card)->ext_csd.rel_sectors)))
-
 static DEFINE_MUTEX(block_mutex);
 
 /*
@@ -86,6 +86,10 @@ struct mmc_blk_data {
        struct mmc_queue queue;
        struct list_head part;
 
+       unsigned int    flags;
+#define MMC_BLK_CMD23  (1 << 0)        /* Can do SET_BLOCK_COUNT for multiblock */
+#define MMC_BLK_REL_WR (1 << 1)        /* MMC Reliable write support */
+
        unsigned int    usage;
        unsigned int    read_only;
        unsigned int    part_type;
@@ -218,15 +222,214 @@ mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
        return 0;
 }
 
+struct mmc_blk_ioc_data {
+       struct mmc_ioc_cmd ic;
+       unsigned char *buf;
+       u64 buf_bytes;
+};
+
+static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
+       struct mmc_ioc_cmd __user *user)
+{
+       struct mmc_blk_ioc_data *idata;
+       int err;
+
+       idata = kzalloc(sizeof(*idata), GFP_KERNEL);
+       if (!idata) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
+               err = -EFAULT;
+               goto idata_err;
+       }
+
+       idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
+       if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
+               err = -EOVERFLOW;
+               goto idata_err;
+       }
+
+       idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
+       if (!idata->buf) {
+               err = -ENOMEM;
+               goto idata_err;
+       }
+
+       if (copy_from_user(idata->buf, (void __user *)(unsigned long)
+                                       idata->ic.data_ptr, idata->buf_bytes)) {
+               err = -EFAULT;
+               goto copy_err;
+       }
+
+       return idata;
+
+copy_err:
+       kfree(idata->buf);
+idata_err:
+       kfree(idata);
+out:
+       return ERR_PTR(err);
+}
+
+static int mmc_blk_ioctl_cmd(struct block_device *bdev,
+       struct mmc_ioc_cmd __user *ic_ptr)
+{
+       struct mmc_blk_ioc_data *idata;
+       struct mmc_blk_data *md;
+       struct mmc_card *card;
+       struct mmc_command cmd = {0};
+       struct mmc_data data = {0};
+       struct mmc_request mrq = {0};
+       struct scatterlist sg;
+       int err;
+
+       /*
+        * The caller must have CAP_SYS_RAWIO, and must be calling this on the
+        * whole block device, not on a partition.  This prevents overspray
+        * between sibling partitions.
+        */
+       if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
+               return -EPERM;
+
+       idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
+       if (IS_ERR(idata))
+               return PTR_ERR(idata);
+
+       cmd.opcode = idata->ic.opcode;
+       cmd.arg = idata->ic.arg;
+       cmd.flags = idata->ic.flags;
+
+       data.sg = &sg;
+       data.sg_len = 1;
+       data.blksz = idata->ic.blksz;
+       data.blocks = idata->ic.blocks;
+
+       sg_init_one(data.sg, idata->buf, idata->buf_bytes);
+
+       if (idata->ic.write_flag)
+               data.flags = MMC_DATA_WRITE;
+       else
+               data.flags = MMC_DATA_READ;
+
+       mrq.cmd = &cmd;
+       mrq.data = &data;
+
+       md = mmc_blk_get(bdev->bd_disk);
+       if (!md) {
+               err = -EINVAL;
+               goto cmd_done;
+       }
+
+       card = md->queue.card;
+       if (IS_ERR(card)) {
+               err = PTR_ERR(card);
+               goto cmd_done;
+       }
+
+       mmc_claim_host(card->host);
+
+       if (idata->ic.is_acmd) {
+               err = mmc_app_cmd(card->host, card);
+               if (err)
+                       goto cmd_rel_host;
+       }
+
+       /* data.flags must already be set before doing this. */
+       mmc_set_data_timeout(&data, card);
+       /* Allow overriding the timeout_ns for empirical tuning. */
+       if (idata->ic.data_timeout_ns)
+               data.timeout_ns = idata->ic.data_timeout_ns;
+
+       if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
+               /*
+                * Pretend this is a data transfer and rely on the host driver
+                * to compute timeout.  When all host drivers support
+                * cmd.cmd_timeout for R1B, this can be changed to:
+                *
+                *     mrq.data = NULL;
+                *     cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
+                */
+               data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
+       }
+
+       mmc_wait_for_req(card->host, &mrq);
+
+       if (cmd.error) {
+               dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
+                                               __func__, cmd.error);
+               err = cmd.error;
+               goto cmd_rel_host;
+       }
+       if (data.error) {
+               dev_err(mmc_dev(card->host), "%s: data error %d\n",
+                                               __func__, data.error);
+               err = data.error;
+               goto cmd_rel_host;
+       }
+
+       /*
+        * According to the SD specs, some commands require a delay after
+        * issuing the command.
+        */
+       if (idata->ic.postsleep_min_us)
+               usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
+
+       if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
+               err = -EFAULT;
+               goto cmd_rel_host;
+       }
+
+       if (!idata->ic.write_flag) {
+               if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr,
+                                               idata->buf, idata->buf_bytes)) {
+                       err = -EFAULT;
+                       goto cmd_rel_host;
+               }
+       }
+
+cmd_rel_host:
+       mmc_release_host(card->host);
+
+cmd_done:
+       mmc_blk_put(md);
+       kfree(idata->buf);
+       kfree(idata);
+       return err;
+}
+
+static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
+       unsigned int cmd, unsigned long arg)
+{
+       int ret = -EINVAL;
+       if (cmd == MMC_IOC_CMD)
+               ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
+       return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
+       unsigned int cmd, unsigned long arg)
+{
+       return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
+}
+#endif
+
 static const struct block_device_operations mmc_bdops = {
        .open                   = mmc_blk_open,
        .release                = mmc_blk_release,
        .getgeo                 = mmc_blk_getgeo,
        .owner                  = THIS_MODULE,
+       .ioctl                  = mmc_blk_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl           = mmc_blk_compat_ioctl,
+#endif
 };
 
 struct mmc_blk_request {
        struct mmc_request      mrq;
+       struct mmc_command      sbc;
        struct mmc_command      cmd;
        struct mmc_command      stop;
        struct mmc_data         data;
@@ -450,13 +653,10 @@ static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
  * reliable write can handle, thus finish the request in
  * partial completions.
  */
-static inline int mmc_apply_rel_rw(struct mmc_blk_request *brq,
-                                  struct mmc_card *card,
-                                  struct request *req)
+static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
+                                   struct mmc_card *card,
+                                   struct request *req)
 {
-       int err;
-       struct mmc_command set_count = {0};
-
        if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
                /* Legacy mode imposes restrictions on transfers. */
                if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
@@ -467,15 +667,6 @@ static inline int mmc_apply_rel_rw(struct mmc_blk_request *brq,
                else if (brq->data.blocks < card->ext_csd.rel_sectors)
                        brq->data.blocks = 1;
        }
-
-       set_count.opcode = MMC_SET_BLOCK_COUNT;
-       set_count.arg = brq->data.blocks | (1 << 31);
-       set_count.flags = MMC_RSP_R1 | MMC_CMD_AC;
-       err = mmc_wait_for_cmd(card->host, &set_count, 0);
-       if (err)
-               printk(KERN_ERR "%s: error %d SET_BLOCK_COUNT\n",
-                      req->rq_disk->disk_name, err);
-       return err;
 }
 
 static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
@@ -492,7 +683,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
        bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
                          (req->cmd_flags & REQ_META)) &&
                (rq_data_dir(req) == WRITE) &&
-               REL_WRITES_SUPPORTED(card);
+               (md->flags & MMC_BLK_REL_WR);
 
        do {
                struct mmc_command cmd = {0};
@@ -530,11 +721,9 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
 
                if (brq.data.blocks > 1 || do_rel_wr) {
                        /* SPI multiblock writes terminate using a special
-                        * token, not a STOP_TRANSMISSION request. Reliable
-                        * writes use SET_BLOCK_COUNT and do not use a
-                        * STOP_TRANSMISSION request either.
+                        * token, not a STOP_TRANSMISSION request.
                         */
-                       if ((!mmc_host_is_spi(card->host) && !do_rel_wr) ||
+                       if (!mmc_host_is_spi(card->host) ||
                            rq_data_dir(req) == READ)
                                brq.mrq.stop = &brq.stop;
                        readcmd = MMC_READ_MULTIPLE_BLOCK;
@@ -552,8 +741,37 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
                        brq.data.flags |= MMC_DATA_WRITE;
                }
 
-               if (do_rel_wr && mmc_apply_rel_rw(&brq, card, req))
-                       goto cmd_err;
+               if (do_rel_wr)
+                       mmc_apply_rel_rw(&brq, card, req);
+
+               /*
+                * Pre-defined multi-block transfers are preferable to
+                * open ended-ones (and necessary for reliable writes).
+                * However, it is not sufficient to just send CMD23,
+                * and avoid the final CMD12, as on an error condition
+                * CMD12 (stop) needs to be sent anyway. This, coupled
+                * with Auto-CMD23 enhancements provided by some
+                * hosts, means that the complexity of dealing
+                * with this is best left to the host. If CMD23 is
+                * supported by card and host, we'll fill sbc in and let
+                * the host deal with handling it correctly. This means
+                * that for hosts that don't expose MMC_CAP_CMD23, no
+                * change of behavior will be observed.
+                *
+                * N.B: Some MMC cards experience perf degradation.
+                * We'll avoid using CMD23-bounded multiblock writes for
+                * these, while retaining features like reliable writes.
+                */
+
+               if ((md->flags & MMC_BLK_CMD23) &&
+                   mmc_op_multi(brq.cmd.opcode) &&
+                   (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) {
+                       brq.sbc.opcode = MMC_SET_BLOCK_COUNT;
+                       brq.sbc.arg = brq.data.blocks |
+                               (do_rel_wr ? (1 << 31) : 0);
+                       brq.sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
+                       brq.mrq.sbc = &brq.sbc;
+               }
 
                mmc_set_data_timeout(&brq.data, card);
 
@@ -590,7 +808,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
                 * until later as we need to wait for the card to leave
                 * programming mode even when things go wrong.
                 */
-               if (brq.cmd.error || brq.data.error || brq.stop.error) {
+               if (brq.sbc.error || brq.cmd.error ||
+                   brq.data.error || brq.stop.error) {
                        if (brq.data.blocks > 1 && rq_data_dir(req) == READ) {
                                /* Redo read one sector at a time */
                                printk(KERN_WARNING "%s: retrying using single "
@@ -601,6 +820,13 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
                        status = get_card_status(card, req);
                }
 
+               if (brq.sbc.error) {
+                       printk(KERN_ERR "%s: error %d sending SET_BLOCK_COUNT "
+                              "command, response %#x, card status %#x\n",
+                              req->rq_disk->disk_name, brq.sbc.error,
+                              brq.sbc.resp[0], status);
+               }
+
                if (brq.cmd.error) {
                        printk(KERN_ERR "%s: error %d sending read/write "
                               "command, response %#x, card status %#x\n",
@@ -812,8 +1038,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
        md->disk->queue = md->queue.queue;
        md->disk->driverfs_dev = parent;
        set_disk_ro(md->disk, md->read_only || default_ro);
-       if (REL_WRITES_SUPPORTED(card))
-               blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
 
        /*
         * As discussed on lkml, GENHD_FL_REMOVABLE should:
@@ -832,6 +1056,22 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
 
        blk_queue_logical_block_size(md->queue.queue, 512);
        set_capacity(md->disk, size);
+
+       if (mmc_host_cmd23(card->host)) {
+               if (mmc_card_mmc(card) ||
+                   (mmc_card_sd(card) &&
+                    card->scr.cmds & SD_SCR_CMD23_SUPPORT))
+                       md->flags |= MMC_BLK_CMD23;
+       }
+
+       if (mmc_card_mmc(card) &&
+           md->flags & MMC_BLK_CMD23 &&
+           ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
+            card->ext_csd.rel_sectors)) {
+               md->flags |= MMC_BLK_REL_WR;
+               blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
+       }
+
        return md;
 
  err_putdisk:
@@ -970,6 +1210,7 @@ static int mmc_add_disk(struct mmc_blk_data *md)
        add_disk(md->disk);
        md->force_ro.show = force_ro_show;
        md->force_ro.store = force_ro_store;
+       sysfs_attr_init(&md->force_ro.attr);
        md->force_ro.attr.name = "force_ro";
        md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
        ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
@@ -986,6 +1227,21 @@ static const struct mmc_fixup blk_fixups[] =
        MMC_FIXUP("SEM08G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
        MMC_FIXUP("SEM16G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
        MMC_FIXUP("SEM32G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
+
+       /*
+        * Some MMC cards experience performance degradation with CMD23
+        * instead of CMD12-bounded multiblock transfers. For now we'll
+        * black list what's bad...
+        * - Certain Toshiba cards.
+        *
+        * N.B. This doesn't affect SD cards.
+        */
+       MMC_FIXUP("MMC08G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
+                 MMC_QUIRK_BLK_NO_CMD23),
+       MMC_FIXUP("MMC16G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
+                 MMC_QUIRK_BLK_NO_CMD23),
+       MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
+                 MMC_QUIRK_BLK_NO_CMD23),
        END_FIXUP
 };