mmc: block: fix boot partition switch error path
[pandora-kernel.git] / drivers / mmc / card / block.c
index f85e422..d7eb2c0 100644 (file)
@@ -94,6 +94,11 @@ struct mmc_blk_data {
        unsigned int    read_only;
        unsigned int    part_type;
        unsigned int    name_idx;
+       unsigned int    reset_done;
+#define MMC_BLK_READ           BIT(0)
+#define MMC_BLK_WRITE          BIT(1)
+#define MMC_BLK_DISCARD                BIT(2)
+#define MMC_BLK_SECDISCARD     BIT(3)
 
        /*
         * Only set in main mmc_blk_data associated
@@ -106,6 +111,16 @@ struct mmc_blk_data {
 
 static DEFINE_MUTEX(open_lock);
 
+enum mmc_blk_status {
+       MMC_BLK_SUCCESS = 0,
+       MMC_BLK_PARTIAL,
+       MMC_BLK_CMD_ERR,
+       MMC_BLK_RETRY,
+       MMC_BLK_ABORT,
+       MMC_BLK_DATA_ERR,
+       MMC_BLK_ECC_ERR,
+};
+
 module_param(perdev_minors, int, 0444);
 MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
 
@@ -281,7 +296,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
        struct mmc_card *card;
        struct mmc_command cmd = {0};
        struct mmc_data data = {0};
-       struct mmc_request mrq = {0};
+       struct mmc_request mrq = {NULL};
        struct scatterlist sg;
        int err;
 
@@ -427,32 +442,29 @@ static const struct block_device_operations mmc_bdops = {
 #endif
 };
 
-struct mmc_blk_request {
-       struct mmc_request      mrq;
-       struct mmc_command      sbc;
-       struct mmc_command      cmd;
-       struct mmc_command      stop;
-       struct mmc_data         data;
-};
-
 static inline int mmc_blk_part_switch(struct mmc_card *card,
                                      struct mmc_blk_data *md)
 {
        int ret;
        struct mmc_blk_data *main_md = mmc_get_drvdata(card);
+
        if (main_md->part_curr == md->part_type)
                return 0;
 
        if (mmc_card_mmc(card)) {
-               card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
-               card->ext_csd.part_config |= md->part_type;
+               u8 part_config = card->ext_csd.part_config;
+
+               part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
+               part_config |= md->part_type;
 
                ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-                                EXT_CSD_PART_CONFIG, card->ext_csd.part_config,
+                                EXT_CSD_PART_CONFIG, part_config,
                                 card->ext_csd.part_time);
                if (ret)
                        return ret;
-}
+
+               card->ext_csd.part_config = part_config;
+       }
 
        main_md->part_curr = md->part_type;
        return 0;
@@ -464,7 +476,7 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
        u32 result;
        __be32 *blocks;
 
-       struct mmc_request mrq = {0};
+       struct mmc_request mrq = {NULL};
        struct mmc_command cmd = {0};
        struct mmc_data data = {0};
        unsigned int timeout_us;
@@ -525,7 +537,20 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
        return result;
 }
 
-static u32 get_card_status(struct mmc_card *card, struct request *req)
+static int send_stop(struct mmc_card *card, u32 *status)
+{
+       struct mmc_command cmd = {0};
+       int err;
+
+       cmd.opcode = MMC_STOP_TRANSMISSION;
+       cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+       err = mmc_wait_for_cmd(card->host, &cmd, 5);
+       if (err == 0)
+               *status = cmd.resp[0];
+       return err;
+}
+
+static int get_card_status(struct mmc_card *card, u32 *status, int retries)
 {
        struct mmc_command cmd = {0};
        int err;
@@ -534,11 +559,186 @@ static u32 get_card_status(struct mmc_card *card, struct request *req)
        if (!mmc_host_is_spi(card->host))
                cmd.arg = card->rca << 16;
        cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
-       err = mmc_wait_for_cmd(card->host, &cmd, 0);
+       err = mmc_wait_for_cmd(card->host, &cmd, retries);
+       if (err == 0)
+               *status = cmd.resp[0];
+       return err;
+}
+
+#define ERR_RETRY      2
+#define ERR_ABORT      1
+#define ERR_CONTINUE   0
+
+static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
+       bool status_valid, u32 status)
+{
+       switch (error) {
+       case -EILSEQ:
+               /* response crc error, retry the r/w cmd */
+               pr_err("%s: %s sending %s command, card status %#x\n",
+                       req->rq_disk->disk_name, "response CRC error",
+                       name, status);
+               return ERR_RETRY;
+
+       case -ETIMEDOUT:
+               pr_err("%s: %s sending %s command, card status %#x\n",
+                       req->rq_disk->disk_name, "timed out", name, status);
+
+               /* If the status cmd initially failed, retry the r/w cmd */
+               if (!status_valid)
+                       return ERR_RETRY;
+
+               /*
+                * If it was a r/w cmd crc error, or illegal command
+                * (eg, issued in wrong state) then retry - we should
+                * have corrected the state problem above.
+                */
+               if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND))
+                       return ERR_RETRY;
+
+               /* Otherwise abort the command */
+               return ERR_ABORT;
+
+       default:
+               /* We don't understand the error code the driver gave us */
+               pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
+                      req->rq_disk->disk_name, error, status);
+               return ERR_ABORT;
+       }
+}
+
+/*
+ * Initial r/w and stop cmd error recovery.
+ * We don't know whether the card received the r/w cmd or not, so try to
+ * restore things back to a sane state.  Essentially, we do this as follows:
+ * - Obtain card status.  If the first attempt to obtain card status fails,
+ *   the status word will reflect the failed status cmd, not the failed
+ *   r/w cmd.  If we fail to obtain card status, it suggests we can no
+ *   longer communicate with the card.
+ * - Check the card state.  If the card received the cmd but there was a
+ *   transient problem with the response, it might still be in a data transfer
+ *   mode.  Try to send it a stop command.  If this fails, we can't recover.
+ * - If the r/w cmd failed due to a response CRC error, it was probably
+ *   transient, so retry the cmd.
+ * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
+ * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
+ *   illegal cmd, retry.
+ * Otherwise we don't understand what happened, so abort.
+ */
+static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
+       struct mmc_blk_request *brq, int *ecc_err)
+{
+       bool prev_cmd_status_valid = true;
+       u32 status, stop_status = 0;
+       int err, retry;
+
+       /*
+        * Try to get card status which indicates both the card state
+        * and why there was no response.  If the first attempt fails,
+        * we can't be sure the returned status is for the r/w command.
+        */
+       for (retry = 2; retry >= 0; retry--) {
+               err = get_card_status(card, &status, 0);
+               if (!err)
+                       break;
+
+               prev_cmd_status_valid = false;
+               pr_err("%s: error %d sending status command, %sing\n",
+                      req->rq_disk->disk_name, err, retry ? "retry" : "abort");
+       }
+
+       /* We couldn't get a response from the card.  Give up. */
        if (err)
-               printk(KERN_ERR "%s: error %d sending status command",
-                      req->rq_disk->disk_name, err);
-       return cmd.resp[0];
+               return ERR_ABORT;
+
+       /* Flag ECC errors */
+       if ((status & R1_CARD_ECC_FAILED) ||
+           (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
+           (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
+               *ecc_err = 1;
+
+       /*
+        * Check the current card state.  If it is in some data transfer
+        * mode, tell it to stop (and hopefully transition back to TRAN.)
+        */
+       if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
+           R1_CURRENT_STATE(status) == R1_STATE_RCV) {
+               err = send_stop(card, &stop_status);
+               if (err)
+                       pr_err("%s: error %d sending stop command\n",
+                              req->rq_disk->disk_name, err);
+
+               /*
+                * If the stop cmd also timed out, the card is probably
+                * not present, so abort.  Other errors are bad news too.
+                */
+               if (err)
+                       return ERR_ABORT;
+               if (stop_status & R1_CARD_ECC_FAILED)
+                       *ecc_err = 1;
+       }
+
+       /* Check for set block count errors */
+       if (brq->sbc.error)
+               return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
+                               prev_cmd_status_valid, status);
+
+       /* Check for r/w command errors */
+       if (brq->cmd.error)
+               return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
+                               prev_cmd_status_valid, status);
+
+       /* Data errors */
+       if (!brq->stop.error)
+               return ERR_CONTINUE;
+
+       /* Now for stop errors.  These aren't fatal to the transfer. */
+       pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
+              req->rq_disk->disk_name, brq->stop.error,
+              brq->cmd.resp[0], status);
+
+       /*
+        * Subsitute in our own stop status as this will give the error
+        * state which happened during the execution of the r/w command.
+        */
+       if (stop_status) {
+               brq->stop.resp[0] = stop_status;
+               brq->stop.error = 0;
+       }
+       return ERR_CONTINUE;
+}
+
+static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
+                        int type)
+{
+       int err;
+
+       if (md->reset_done & type)
+               return -EEXIST;
+
+       md->reset_done |= type;
+       err = mmc_hw_reset(host);
+       /* Ensure we switch back to the correct partition */
+       if (err != -EOPNOTSUPP) {
+               struct mmc_blk_data *main_md = mmc_get_drvdata(host->card);
+               int part_err;
+
+               main_md->part_curr = main_md->part_type;
+               part_err = mmc_blk_part_switch(host->card, md);
+               if (part_err) {
+                       /*
+                        * We have failed to get back into the correct
+                        * partition, so we need to abort the whole request.
+                        */
+                       return -ENODEV;
+               }
+       }
+       return err;
+}
+
+static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
+{
+       md->reset_done &= ~type;
 }
 
 static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
@@ -546,7 +746,7 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
        struct mmc_blk_data *md = mq->data;
        struct mmc_card *card = md->queue.card;
        unsigned int from, nr, arg;
-       int err = 0;
+       int err = 0, type = MMC_BLK_DISCARD;
 
        if (!mmc_can_erase(card)) {
                err = -EOPNOTSUPP;
@@ -560,7 +760,7 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
                arg = MMC_TRIM_ARG;
        else
                arg = MMC_ERASE_ARG;
-
+retry:
        if (card->quirks & MMC_QUIRK_INAND_CMD38) {
                err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
                                 INAND_CMD38_ARG_EXT_CSD,
@@ -573,6 +773,10 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
        }
        err = mmc_erase(card, from, nr, arg);
 out:
+       if (err == -EIO && !mmc_blk_reset(md, card->host, type))
+               goto retry;
+       if (!err)
+               mmc_blk_reset_success(md, type);
        spin_lock_irq(&md->lock);
        __blk_end_request(req, err, blk_rq_bytes(req));
        spin_unlock_irq(&md->lock);
@@ -586,7 +790,7 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
        struct mmc_blk_data *md = mq->data;
        struct mmc_card *card = md->queue.card;
        unsigned int from, nr, arg;
-       int err = 0;
+       int err = 0, type = MMC_BLK_SECDISCARD;
 
        if (!mmc_can_secure_erase_trim(card)) {
                err = -EOPNOTSUPP;
@@ -600,7 +804,7 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
                arg = MMC_SECURE_TRIM1_ARG;
        else
                arg = MMC_SECURE_ERASE_ARG;
-
+retry:
        if (card->quirks & MMC_QUIRK_INAND_CMD38) {
                err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
                                 INAND_CMD38_ARG_EXT_CSD,
@@ -624,6 +828,10 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
                err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
        }
 out:
+       if (err == -EIO && !mmc_blk_reset(md, card->host, type))
+               goto retry;
+       if (!err)
+               mmc_blk_reset_success(md, type);
        spin_lock_irq(&md->lock);
        __blk_end_request(req, err, blk_rq_bytes(req));
        spin_unlock_irq(&md->lock);
@@ -669,249 +877,248 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
        }
 }
 
-static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
+#define CMD_ERRORS                                                     \
+       (R1_OUT_OF_RANGE |      /* Command argument out of range */     \
+        R1_ADDRESS_ERROR |     /* Misaligned address */                \
+        R1_BLOCK_LEN_ERROR |   /* Transferred block length incorrect */\
+        R1_WP_VIOLATION |      /* Tried to write to protected block */ \
+        R1_CC_ERROR |          /* Card controller error */             \
+        R1_ERROR)              /* General/unknown error */
+
+static int mmc_blk_err_check(struct mmc_card *card,
+                            struct mmc_async_req *areq)
 {
-       struct mmc_blk_data *md = mq->data;
-       struct mmc_card *card = md->queue.card;
-       struct mmc_blk_request brq;
-       int ret = 1, disable_multi = 0;
+       struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
+                                                   mmc_active);
+       struct mmc_blk_request *brq = &mq_mrq->brq;
+       struct request *req = mq_mrq->req;
+       int ecc_err = 0;
 
        /*
-        * Reliable writes are used to implement Forced Unit Access and
-        * REQ_META accesses, and are supported only on MMCs.
+        * sbc.error indicates a problem with the set block count
+        * command.  No data will have been transferred.
+        *
+        * cmd.error indicates a problem with the r/w command.  No
+        * data will have been transferred.
+        *
+        * stop.error indicates a problem with the stop command.  Data
+        * may have been transferred, or may still be transferring.
         */
-       bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
-                         (req->cmd_flags & REQ_META)) &&
-               (rq_data_dir(req) == WRITE) &&
-               (md->flags & MMC_BLK_REL_WR);
+       if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
+           brq->data.error) {
+               switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err)) {
+               case ERR_RETRY:
+                       return MMC_BLK_RETRY;
+               case ERR_ABORT:
+                       return MMC_BLK_ABORT;
+               case ERR_CONTINUE:
+                       break;
+               }
+       }
 
-       do {
-               struct mmc_command cmd = {0};
-               u32 readcmd, writecmd, status = 0;
-
-               memset(&brq, 0, sizeof(struct mmc_blk_request));
-               brq.mrq.cmd = &brq.cmd;
-               brq.mrq.data = &brq.data;
-
-               brq.cmd.arg = blk_rq_pos(req);
-               if (!mmc_card_blockaddr(card))
-                       brq.cmd.arg <<= 9;
-               brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
-               brq.data.blksz = 512;
-               brq.stop.opcode = MMC_STOP_TRANSMISSION;
-               brq.stop.arg = 0;
-               brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
-               brq.data.blocks = blk_rq_sectors(req);
+       /*
+        * Check for errors relating to the execution of the
+        * initial command - such as address errors.  No data
+        * has been transferred.
+        */
+       if (brq->cmd.resp[0] & CMD_ERRORS) {
+               pr_err("%s: r/w command failed, status = %#x\n",
+                      req->rq_disk->disk_name, brq->cmd.resp[0]);
+               return MMC_BLK_ABORT;
+       }
 
-               /*
-                * The block layer doesn't support all sector count
-                * restrictions, so we need to be prepared for too big
-                * requests.
-                */
-               if (brq.data.blocks > card->host->max_blk_count)
-                       brq.data.blocks = card->host->max_blk_count;
+       /*
+        * Everything else is either success, or a data error of some
+        * kind.  If it was a write, we may have transitioned to
+        * program mode, which we have to wait for it to complete.
+        */
+       if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
+               u32 status;
+               do {
+                       int err = get_card_status(card, &status, 5);
+                       if (err) {
+                               printk(KERN_ERR "%s: error %d requesting status\n",
+                                      req->rq_disk->disk_name, err);
+                               return MMC_BLK_CMD_ERR;
+                       }
+                       /*
+                        * Some cards mishandle the status bits,
+                        * so make sure to check both the busy
+                        * indication and the card state.
+                        */
+               } while (!(status & R1_READY_FOR_DATA) ||
+                        (R1_CURRENT_STATE(status) == R1_STATE_PRG));
+       }
 
-               /*
-                * After a read error, we redo the request one sector at a time
-                * in order to accurately determine which sectors can be read
-                * successfully.
-                */
-               if (disable_multi && brq.data.blocks > 1)
-                       brq.data.blocks = 1;
+       if (brq->data.error) {
+               pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
+                      req->rq_disk->disk_name, brq->data.error,
+                      (unsigned)blk_rq_pos(req),
+                      (unsigned)blk_rq_sectors(req),
+                      brq->cmd.resp[0], brq->stop.resp[0]);
 
-               if (brq.data.blocks > 1 || do_rel_wr) {
-                       /* SPI multiblock writes terminate using a special
-                        * token, not a STOP_TRANSMISSION request.
-                        */
-                       if (!mmc_host_is_spi(card->host) ||
-                           rq_data_dir(req) == READ)
-                               brq.mrq.stop = &brq.stop;
-                       readcmd = MMC_READ_MULTIPLE_BLOCK;
-                       writecmd = MMC_WRITE_MULTIPLE_BLOCK;
-               } else {
-                       brq.mrq.stop = NULL;
-                       readcmd = MMC_READ_SINGLE_BLOCK;
-                       writecmd = MMC_WRITE_BLOCK;
-               }
                if (rq_data_dir(req) == READ) {
-                       brq.cmd.opcode = readcmd;
-                       brq.data.flags |= MMC_DATA_READ;
+                       if (ecc_err)
+                               return MMC_BLK_ECC_ERR;
+                       return MMC_BLK_DATA_ERR;
                } else {
-                       brq.cmd.opcode = writecmd;
-                       brq.data.flags |= MMC_DATA_WRITE;
+                       return MMC_BLK_CMD_ERR;
                }
+       }
 
-               if (do_rel_wr)
-                       mmc_apply_rel_rw(&brq, card, req);
+       if (!brq->data.bytes_xfered)
+               return MMC_BLK_RETRY;
 
-               /*
-                * Pre-defined multi-block transfers are preferable to
-                * open ended-ones (and necessary for reliable writes).
-                * However, it is not sufficient to just send CMD23,
-                * and avoid the final CMD12, as on an error condition
-                * CMD12 (stop) needs to be sent anyway. This, coupled
-                * with Auto-CMD23 enhancements provided by some
-                * hosts, means that the complexity of dealing
-                * with this is best left to the host. If CMD23 is
-                * supported by card and host, we'll fill sbc in and let
-                * the host deal with handling it correctly. This means
-                * that for hosts that don't expose MMC_CAP_CMD23, no
-                * change of behavior will be observed.
-                *
-                * N.B: Some MMC cards experience perf degradation.
-                * We'll avoid using CMD23-bounded multiblock writes for
-                * these, while retaining features like reliable writes.
-                */
+       if (blk_rq_bytes(req) != brq->data.bytes_xfered)
+               return MMC_BLK_PARTIAL;
 
-               if ((md->flags & MMC_BLK_CMD23) &&
-                   mmc_op_multi(brq.cmd.opcode) &&
-                   (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) {
-                       brq.sbc.opcode = MMC_SET_BLOCK_COUNT;
-                       brq.sbc.arg = brq.data.blocks |
-                               (do_rel_wr ? (1 << 31) : 0);
-                       brq.sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
-                       brq.mrq.sbc = &brq.sbc;
-               }
+       return MMC_BLK_SUCCESS;
+}
 
-               mmc_set_data_timeout(&brq.data, card);
+static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
+                              struct mmc_card *card,
+                              int disable_multi,
+                              struct mmc_queue *mq)
+{
+       u32 readcmd, writecmd;
+       struct mmc_blk_request *brq = &mqrq->brq;
+       struct request *req = mqrq->req;
+       struct mmc_blk_data *md = mq->data;
 
-               brq.data.sg = mq->sg;
-               brq.data.sg_len = mmc_queue_map_sg(mq);
+       /*
+        * Reliable writes are used to implement Forced Unit Access and
+        * REQ_META accesses, and are supported only on MMCs.
+        *
+        * XXX: this really needs a good explanation of why REQ_META
+        * is treated special.
+        */
+       bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
+                         (req->cmd_flags & REQ_META)) &&
+               (rq_data_dir(req) == WRITE) &&
+               (md->flags & MMC_BLK_REL_WR);
 
-               /*
-                * Adjust the sg list so it is the same size as the
-                * request.
-                */
-               if (brq.data.blocks != blk_rq_sectors(req)) {
-                       int i, data_size = brq.data.blocks << 9;
-                       struct scatterlist *sg;
-
-                       for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
-                               data_size -= sg->length;
-                               if (data_size <= 0) {
-                                       sg->length += data_size;
-                                       i++;
-                                       break;
-                               }
-                       }
-                       brq.data.sg_len = i;
-               }
+       memset(brq, 0, sizeof(struct mmc_blk_request));
+       brq->mrq.cmd = &brq->cmd;
+       brq->mrq.data = &brq->data;
 
-               mmc_queue_bounce_pre(mq);
+       brq->cmd.arg = blk_rq_pos(req);
+       if (!mmc_card_blockaddr(card))
+               brq->cmd.arg <<= 9;
+       brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+       brq->data.blksz = 512;
+       brq->stop.opcode = MMC_STOP_TRANSMISSION;
+       brq->stop.arg = 0;
+       brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+       brq->data.blocks = blk_rq_sectors(req);
 
-               mmc_wait_for_req(card->host, &brq.mrq);
+       /*
+        * The block layer doesn't support all sector count
+        * restrictions, so we need to be prepared for too big
+        * requests.
+        */
+       if (brq->data.blocks > card->host->max_blk_count)
+               brq->data.blocks = card->host->max_blk_count;
 
-               mmc_queue_bounce_post(mq);
+       /*
+        * After a read error, we redo the request one sector at a time
+        * in order to accurately determine which sectors can be read
+        * successfully.
+        */
+       if (disable_multi && brq->data.blocks > 1)
+               brq->data.blocks = 1;
 
-               /*
-                * Check for errors here, but don't jump to cmd_err
-                * until later as we need to wait for the card to leave
-                * programming mode even when things go wrong.
+       if (brq->data.blocks > 1 || do_rel_wr) {
+               /* SPI multiblock writes terminate using a special
+                * token, not a STOP_TRANSMISSION request.
                 */
-               if (brq.sbc.error || brq.cmd.error ||
-                   brq.data.error || brq.stop.error) {
-                       if (brq.data.blocks > 1 && rq_data_dir(req) == READ) {
-                               /* Redo read one sector at a time */
-                               printk(KERN_WARNING "%s: retrying using single "
-                                      "block read\n", req->rq_disk->disk_name);
-                               disable_multi = 1;
-                               continue;
-                       }
-                       status = get_card_status(card, req);
-               }
+               if (!mmc_host_is_spi(card->host) ||
+                   rq_data_dir(req) == READ)
+                       brq->mrq.stop = &brq->stop;
+               readcmd = MMC_READ_MULTIPLE_BLOCK;
+               writecmd = MMC_WRITE_MULTIPLE_BLOCK;
+       } else {
+               brq->mrq.stop = NULL;
+               readcmd = MMC_READ_SINGLE_BLOCK;
+               writecmd = MMC_WRITE_BLOCK;
+       }
+       if (rq_data_dir(req) == READ) {
+               brq->cmd.opcode = readcmd;
+               brq->data.flags |= MMC_DATA_READ;
+       } else {
+               brq->cmd.opcode = writecmd;
+               brq->data.flags |= MMC_DATA_WRITE;
+       }
 
-               if (brq.sbc.error) {
-                       printk(KERN_ERR "%s: error %d sending SET_BLOCK_COUNT "
-                              "command, response %#x, card status %#x\n",
-                              req->rq_disk->disk_name, brq.sbc.error,
-                              brq.sbc.resp[0], status);
-               }
+       if (do_rel_wr)
+               mmc_apply_rel_rw(brq, card, req);
 
-               if (brq.cmd.error) {
-                       printk(KERN_ERR "%s: error %d sending read/write "
-                              "command, response %#x, card status %#x\n",
-                              req->rq_disk->disk_name, brq.cmd.error,
-                              brq.cmd.resp[0], status);
-               }
+       /*
+        * Pre-defined multi-block transfers are preferable to
+        * open ended-ones (and necessary for reliable writes).
+        * However, it is not sufficient to just send CMD23,
+        * and avoid the final CMD12, as on an error condition
+        * CMD12 (stop) needs to be sent anyway. This, coupled
+        * with Auto-CMD23 enhancements provided by some
+        * hosts, means that the complexity of dealing
+        * with this is best left to the host. If CMD23 is
+        * supported by card and host, we'll fill sbc in and let
+        * the host deal with handling it correctly. This means
+        * that for hosts that don't expose MMC_CAP_CMD23, no
+        * change of behavior will be observed.
+        *
+        * N.B: Some MMC cards experience perf degradation.
+        * We'll avoid using CMD23-bounded multiblock writes for
+        * these, while retaining features like reliable writes.
+        */
 
-               if (brq.data.error) {
-                       if (brq.data.error == -ETIMEDOUT && brq.mrq.stop)
-                               /* 'Stop' response contains card status */
-                               status = brq.mrq.stop->resp[0];
-                       printk(KERN_ERR "%s: error %d transferring data,"
-                              " sector %u, nr %u, card status %#x\n",
-                              req->rq_disk->disk_name, brq.data.error,
-                              (unsigned)blk_rq_pos(req),
-                              (unsigned)blk_rq_sectors(req), status);
-               }
+       if ((md->flags & MMC_BLK_CMD23) &&
+           mmc_op_multi(brq->cmd.opcode) &&
+           (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) {
+               brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
+               brq->sbc.arg = brq->data.blocks |
+                       (do_rel_wr ? (1 << 31) : 0);
+               brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
+               brq->mrq.sbc = &brq->sbc;
+       }
 
-               if (brq.stop.error) {
-                       printk(KERN_ERR "%s: error %d sending stop command, "
-                              "response %#x, card status %#x\n",
-                              req->rq_disk->disk_name, brq.stop.error,
-                              brq.stop.resp[0], status);
-               }
+       mmc_set_data_timeout(&brq->data, card);
 
-               if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
-                       do {
-                               int err;
-
-                               cmd.opcode = MMC_SEND_STATUS;
-                               cmd.arg = card->rca << 16;
-                               cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
-                               err = mmc_wait_for_cmd(card->host, &cmd, 5);
-                               if (err) {
-                                       printk(KERN_ERR "%s: error %d requesting status\n",
-                                              req->rq_disk->disk_name, err);
-                                       goto cmd_err;
-                               }
-                               /*
-                                * Some cards mishandle the status bits,
-                                * so make sure to check both the busy
-                                * indication and the card state.
-                                */
-                       } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
-                               (R1_CURRENT_STATE(cmd.resp[0]) == 7));
-
-#if 0
-                       if (cmd.resp[0] & ~0x00000900)
-                               printk(KERN_ERR "%s: status = %08x\n",
-                                      req->rq_disk->disk_name, cmd.resp[0]);
-                       if (mmc_decode_status(cmd.resp))
-                               goto cmd_err;
-#endif
-               }
+       brq->data.sg = mqrq->sg;
+       brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
 
-               if (brq.cmd.error || brq.stop.error || brq.data.error) {
-                       if (rq_data_dir(req) == READ) {
-                               /*
-                                * After an error, we redo I/O one sector at a
-                                * time, so we only reach here after trying to
-                                * read a single sector.
-                                */
-                               spin_lock_irq(&md->lock);
-                               ret = __blk_end_request(req, -EIO, brq.data.blksz);
-                               spin_unlock_irq(&md->lock);
-                               continue;
+       /*
+        * Adjust the sg list so it is the same size as the
+        * request.
+        */
+       if (brq->data.blocks != blk_rq_sectors(req)) {
+               int i, data_size = brq->data.blocks << 9;
+               struct scatterlist *sg;
+
+               for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
+                       data_size -= sg->length;
+                       if (data_size <= 0) {
+                               sg->length += data_size;
+                               i++;
+                               break;
                        }
-                       goto cmd_err;
                }
+               brq->data.sg_len = i;
+       }
 
-               /*
-                * A block was successfully transferred.
-                */
-               spin_lock_irq(&md->lock);
-               ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
-               spin_unlock_irq(&md->lock);
-       } while (ret);
+       mqrq->mmc_active.mrq = &brq->mrq;
+       mqrq->mmc_active.err_check = mmc_blk_err_check;
 
-       return 1;
+       mmc_queue_bounce_pre(mqrq);
+}
 
- cmd_err:
-       /*
-        * If this is an SD card and we're writing, we can first
-        * mark the known good sectors as ok.
-        *
+static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
+                          struct mmc_blk_request *brq, struct request *req,
+                          int ret)
+{
+       /*
+        * If this is an SD card and we're writing, we can first
+        * mark the known good sectors as ok.
+        *
         * If the card is not SD, we can still ok written sectors
         * as reported by the controller (which might be less than
         * the real number of written sectors, but never more).
@@ -927,15 +1134,135 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
                }
        } else {
                spin_lock_irq(&md->lock);
-               ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
+               ret = __blk_end_request(req, 0, brq->data.bytes_xfered);
                spin_unlock_irq(&md->lock);
        }
+       return ret;
+}
+
+static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
+{
+       struct mmc_blk_data *md = mq->data;
+       struct mmc_card *card = md->queue.card;
+       struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
+       int ret = 1, disable_multi = 0, retry = 0, type;
+       enum mmc_blk_status status;
+       struct mmc_queue_req *mq_rq;
+       struct request *req;
+       struct mmc_async_req *areq;
+
+       if (!rqc && !mq->mqrq_prev->req)
+               return 0;
+
+       do {
+               if (rqc) {
+                       mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
+                       areq = &mq->mqrq_cur->mmc_active;
+               } else
+                       areq = NULL;
+               areq = mmc_start_req(card->host, areq, (int *) &status);
+               if (!areq)
+                       return 0;
+
+               mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
+               brq = &mq_rq->brq;
+               req = mq_rq->req;
+               type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
+               mmc_queue_bounce_post(mq_rq);
+
+               switch (status) {
+               case MMC_BLK_SUCCESS:
+               case MMC_BLK_PARTIAL:
+                       /*
+                        * A block was successfully transferred.
+                        */
+                       mmc_blk_reset_success(md, type);
+                       spin_lock_irq(&md->lock);
+                       ret = __blk_end_request(req, 0,
+                                               brq->data.bytes_xfered);
+                       spin_unlock_irq(&md->lock);
+                       /*
+                        * If the blk_end_request function returns non-zero even
+                        * though all data has been transferred and no errors
+                        * were returned by the host controller, it's a bug.
+                        */
+                       if (status == MMC_BLK_SUCCESS && ret) {
+                               printk(KERN_ERR "%s BUG rq_tot %d d_xfer %d\n",
+                                      __func__, blk_rq_bytes(req),
+                                      brq->data.bytes_xfered);
+                               rqc = NULL;
+                               goto cmd_abort;
+                       }
+                       break;
+               case MMC_BLK_CMD_ERR:
+                       ret = mmc_blk_cmd_err(md, card, brq, req, ret);
+                       if (!mmc_blk_reset(md, card->host, type))
+                               break;
+                       goto cmd_abort;
+               case MMC_BLK_RETRY:
+                       if (retry++ < 5)
+                               break;
+                       /* Fall through */
+               case MMC_BLK_ABORT:
+                       if (!mmc_blk_reset(md, card->host, type))
+                               break;
+                       goto cmd_abort;
+               case MMC_BLK_DATA_ERR: {
+                       int err;
+
+                       err = mmc_blk_reset(md, card->host, type);
+                       if (!err)
+                               break;
+                       if (err == -ENODEV)
+                               goto cmd_abort;
+                       /* Fall through */
+               }
+               case MMC_BLK_ECC_ERR:
+                       if (brq->data.blocks > 1) {
+                               /* Redo read one sector at a time */
+                               pr_warning("%s: retrying using single block read\n",
+                                          req->rq_disk->disk_name);
+                               disable_multi = 1;
+                               break;
+                       }
+                       /*
+                        * After an error, we redo I/O one sector at a
+                        * time, so we only reach here after trying to
+                        * read a single sector.
+                        */
+                       spin_lock_irq(&md->lock);
+                       ret = __blk_end_request(req, -EIO,
+                                               brq->data.blksz);
+                       spin_unlock_irq(&md->lock);
+                       if (!ret)
+                               goto start_new_req;
+                       break;
+               }
+
+               if (ret) {
+                       /*
+                        * In case of a incomplete request
+                        * prepare it again and resend.
+                        */
+                       mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq);
+                       mmc_start_req(card->host, &mq_rq->mmc_active, NULL);
+               }
+       } while (ret);
+
+       return 1;
 
+ cmd_abort:
        spin_lock_irq(&md->lock);
        while (ret)
                ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
        spin_unlock_irq(&md->lock);
 
+ start_new_req:
+       if (rqc) {
+               mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
+               mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL);
+       }
+
        return 0;
 }
 
@@ -945,26 +1272,42 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
        struct mmc_blk_data *md = mq->data;
        struct mmc_card *card = md->queue.card;
 
-       mmc_claim_host(card->host);
+       if (req && !mq->mqrq_prev->req)
+               /* claim host only for the first request */
+               mmc_claim_host(card->host);
+
        ret = mmc_blk_part_switch(card, md);
        if (ret) {
+               if (req) {
+                       spin_lock_irq(&md->lock);
+                       __blk_end_request_all(req, -EIO);
+                       spin_unlock_irq(&md->lock);
+               }
                ret = 0;
                goto out;
        }
 
-       if (req->cmd_flags & REQ_DISCARD) {
+       if (req && req->cmd_flags & REQ_DISCARD) {
+               /* complete ongoing async transfer before issuing discard */
+               if (card->host->areq)
+                       mmc_blk_issue_rw_rq(mq, NULL);
                if (req->cmd_flags & REQ_SECURE)
                        ret = mmc_blk_issue_secdiscard_rq(mq, req);
                else
                        ret = mmc_blk_issue_discard_rq(mq, req);
-       } else if (req->cmd_flags & REQ_FLUSH) {
+       } else if (req && req->cmd_flags & REQ_FLUSH) {
+               /* complete ongoing async transfer before issuing flush */
+               if (card->host->areq)
+                       mmc_blk_issue_rw_rq(mq, NULL);
                ret = mmc_blk_issue_flush(mq, req);
        } else {
                ret = mmc_blk_issue_rw_rq(mq, req);
        }
 
 out:
-       mmc_release_host(card->host);
+       if (!req)
+               /* release host only when there are no more requests */
+               mmc_release_host(card->host);
        return ret;
 }