#include <linux/mmc/card.h>
#include <linux/mmc/protocol.h>
+#include <linux/mmc/host.h>
#include <asm/system.h>
#include <asm/uaccess.h>
unsigned int usage;
unsigned int block_bits;
+ unsigned int suspended;
};
static DECLARE_MUTEX(open_lock);
stat = BLKPREP_KILL;
}
+ if (md->suspended) {
+ blk_plug_device(md->queue.queue);
+ stat = BLKPREP_DEFER;
+ }
+
+ /*
+ * Check for excessive requests.
+ */
+ if (req->sector + req->nr_sectors > get_capacity(req->rq_disk)) {
+ printk("bad request size\n");
+ stat = BLKPREP_KILL;
+ }
+
return stat;
}
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
int ret;
+
+#ifdef CONFIG_MMC_BULKTRANSFER
+ int failsafe;
+#endif
if (mmc_card_claim_host(card))
goto cmd_err;
+
+#ifdef CONFIG_MMC_BULKTRANSFER
+ /*
+ * We first try transfering multiple blocks. If this fails
+ * we fall back to single block transfers.
+ *
+ * This gives us good performance when all is well and the
+ * possibility to determine which sector fails when all
+ * is not well.
+ */
+ failsafe = 0;
+#endif
do {
struct mmc_blk_request brq;
brq.stop.arg = 0;
brq.stop.flags = MMC_RSP_R1B;
+#ifdef CONFIG_MMC_BULKTRANSFER
+ /*
+ * A multi-block transfer failed. Falling back to single
+ * blocks.
+ */
+ if (failsafe)
+ brq.data.blocks = 1;
+
+#else
+ /*
+ * Writes are done one sector at a time.
+ */
+ if (rq_data_dir(req) != READ)
+ brq.data.blocks = 1;
+#endif
+
+ ret = 1;
+
if (rq_data_dir(req) == READ) {
brq.cmd.opcode = brq.data.blocks > 1 ? MMC_READ_MULTIPLE_BLOCK : MMC_READ_SINGLE_BLOCK;
brq.data.flags |= MMC_DATA_READ;
} else {
- brq.cmd.opcode = MMC_WRITE_BLOCK;
+ brq.cmd.opcode = brq.data.blocks > 1 ? MMC_WRITE_MULTIPLE_BLOCK :
+ MMC_WRITE_BLOCK;
brq.cmd.flags = MMC_RSP_R1B;
brq.data.flags |= MMC_DATA_WRITE;
- brq.data.blocks = 1;
}
brq.mrq.stop = brq.data.blocks > 1 ? &brq.stop : NULL;
if (brq.cmd.error) {
printk(KERN_ERR "%s: error %d sending read/write command\n",
req->rq_disk->disk_name, brq.cmd.error);
- goto cmd_err;
+ goto cmd_fail;
}
if (brq.data.error) {
printk(KERN_ERR "%s: error %d transferring data\n",
req->rq_disk->disk_name, brq.data.error);
- goto cmd_err;
+ goto cmd_fail;
}
if (brq.stop.error) {
printk(KERN_ERR "%s: error %d sending stop command\n",
req->rq_disk->disk_name, brq.stop.error);
- goto cmd_err;
+ goto cmd_fail;
}
+ /* No need to check card status after a read */
+ if (rq_data_dir(req) == READ)
+ goto card_ready;
+
do {
int err;
if (err) {
printk(KERN_ERR "%s: error %d requesting status\n",
req->rq_disk->disk_name, err);
- goto cmd_err;
+ goto cmd_fail;
}
+#ifdef CONFIG_MMC_BLOCK_BROKEN_RFD
+ /* Work-around for broken cards setting READY_FOR_DATA
+ * when not actually ready.
+ */
+ if (R1_CURRENT_STATE(cmd.resp[0]) == 7)
+ cmd.resp[0] &= ~R1_READY_FOR_DATA;
+#endif
} while (!(cmd.resp[0] & R1_READY_FOR_DATA));
#if 0
goto cmd_err;
#endif
+ card_ready:
+
/*
* A block was successfully transferred.
*/
end_that_request_last(req);
}
spin_unlock_irq(&md->lock);
+
+#ifdef CONFIG_MMC_BULKTRANSFER
+ /*
+ * Go back to bulk mode if in failsafe mode.
+ */
+ failsafe = 0;
+#endif
+
+ continue;
+
+ cmd_fail:
+
+#ifdef CONFIG_MMC_BULKTRANSFER
+ if (failsafe)
+ goto cmd_err;
+ else
+ failsafe = 1;
+#else
+ goto cmd_err;
+#endif
+
} while (ret);
mmc_card_release_host(card);
end_that_request_last(req);
spin_unlock_irq(&md->lock);
+ /* If a command fails, the card might be removed. */
+ mmc_detect_change(card->host);
+
return 0;
}