2 * linux/drivers/mmc/card/queue.c
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
5 * Copyright 2006-2007 Pierre Ossman
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/blkdev.h>
15 #include <linux/freezer.h>
16 #include <linux/kthread.h>
17 #include <linux/scatterlist.h>
19 #include <linux/mmc/card.h>
20 #include <linux/mmc/host.h>
23 #define MMC_QUEUE_BOUNCESZ 65536
25 #define MMC_QUEUE_SUSPENDED (1 << 0)
28 * Prepare a MMC request. This just filters out odd stuff.
30 static int mmc_prep_request(struct request_queue *q, struct request *req)
33 * We only like normal block requests and discards.
35 if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) {
36 blk_dump_rq_flags(req, "MMC bad request");
40 req->cmd_flags |= REQ_DONTPREP;
45 static int mmc_queue_thread(void *d)
47 struct mmc_queue *mq = d;
48 struct request_queue *q = mq->queue;
50 current->flags |= PF_MEMALLOC;
52 down(&mq->thread_sem);
54 struct request *req = NULL;
56 spin_lock_irq(q->queue_lock);
57 set_current_state(TASK_INTERRUPTIBLE);
58 req = blk_fetch_request(q);
60 spin_unlock_irq(q->queue_lock);
63 if (kthread_should_stop()) {
64 set_current_state(TASK_RUNNING);
69 down(&mq->thread_sem);
72 set_current_state(TASK_RUNNING);
74 mq->issue_fn(mq, req);
82 * Generic MMC request handler. This is called for any queue on a
83 * particular host. When the host is not busy, we look for a request
84 * on any queue on this host, and attempt to issue it. This may
85 * not be the queue we were asked to process.
87 static void mmc_request(struct request_queue *q)
89 struct mmc_queue *mq = q->queuedata;
93 while ((req = blk_fetch_request(q)) != NULL) {
94 req->cmd_flags |= REQ_QUIET;
95 __blk_end_request_all(req, -EIO);
101 wake_up_process(mq->thread);
104 static void mmc_queue_setup_discard(struct request_queue *q,
105 struct mmc_card *card)
107 unsigned max_discard;
109 max_discard = mmc_calc_max_discard(card);
113 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
114 q->limits.max_discard_sectors = max_discard;
115 if (card->erased_byte == 0)
116 q->limits.discard_zeroes_data = 1;
117 q->limits.discard_granularity = card->pref_erase << 9;
118 /* granularity must not be greater than max. discard */
119 if (card->pref_erase > max_discard)
120 q->limits.discard_granularity = 0;
121 if (mmc_can_secure_erase_trim(card))
122 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
126 * mmc_init_queue - initialise a queue structure.
128 * @card: mmc card to attach this queue
130 * @subname: partition subname
132 * Initialise a MMC card request queue.
134 int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
135 spinlock_t *lock, const char *subname)
137 struct mmc_host *host = card->host;
138 u64 limit = BLK_BOUNCE_HIGH;
141 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
142 limit = *mmc_dev(host)->dma_mask;
145 mq->queue = blk_init_queue(mmc_request, lock);
149 mq->queue->queuedata = mq;
152 blk_queue_prep_rq(mq->queue, mmc_prep_request);
153 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
154 if (mmc_can_erase(card))
155 mmc_queue_setup_discard(mq->queue, card);
157 #ifdef CONFIG_MMC_BLOCK_BOUNCE
158 if (host->max_segs == 1) {
159 unsigned int bouncesz;
161 bouncesz = MMC_QUEUE_BOUNCESZ;
163 if (bouncesz > host->max_req_size)
164 bouncesz = host->max_req_size;
165 if (bouncesz > host->max_seg_size)
166 bouncesz = host->max_seg_size;
167 if (bouncesz > (host->max_blk_count * 512))
168 bouncesz = host->max_blk_count * 512;
170 if (bouncesz > 512) {
171 mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
172 if (!mq->bounce_buf) {
173 printk(KERN_WARNING "%s: unable to "
174 "allocate bounce buffer\n",
175 mmc_card_name(card));
179 if (mq->bounce_buf) {
180 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
181 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
182 blk_queue_max_segments(mq->queue, bouncesz / 512);
183 blk_queue_max_segment_size(mq->queue, bouncesz);
185 mq->sg = kmalloc(sizeof(struct scatterlist),
191 sg_init_table(mq->sg, 1);
193 mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
194 bouncesz / 512, GFP_KERNEL);
195 if (!mq->bounce_sg) {
199 sg_init_table(mq->bounce_sg, bouncesz / 512);
204 if (!mq->bounce_buf) {
205 blk_queue_bounce_limit(mq->queue, limit);
206 blk_queue_max_hw_sectors(mq->queue,
207 min(host->max_blk_count, host->max_req_size / 512));
208 blk_queue_max_segments(mq->queue, host->max_segs);
209 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
211 mq->sg = kmalloc(sizeof(struct scatterlist) *
212 host->max_segs, GFP_KERNEL);
217 sg_init_table(mq->sg, host->max_segs);
220 sema_init(&mq->thread_sem, 1);
222 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
223 host->index, subname ? subname : "");
225 if (IS_ERR(mq->thread)) {
226 ret = PTR_ERR(mq->thread);
233 kfree(mq->bounce_sg);
234 mq->bounce_sg = NULL;
240 kfree(mq->bounce_buf);
241 mq->bounce_buf = NULL;
242 blk_cleanup_queue(mq->queue);
246 void mmc_cleanup_queue(struct mmc_queue *mq)
248 struct request_queue *q = mq->queue;
251 /* Make sure the queue isn't suspended, as that will deadlock */
252 mmc_queue_resume(mq);
254 /* Then terminate our worker thread */
255 kthread_stop(mq->thread);
257 /* Empty the queue */
258 spin_lock_irqsave(q->queue_lock, flags);
261 spin_unlock_irqrestore(q->queue_lock, flags);
264 kfree(mq->bounce_sg);
265 mq->bounce_sg = NULL;
271 kfree(mq->bounce_buf);
272 mq->bounce_buf = NULL;
276 EXPORT_SYMBOL(mmc_cleanup_queue);
279 * mmc_queue_suspend - suspend a MMC request queue
280 * @mq: MMC queue to suspend
282 * Stop the block request queue, and wait for our thread to
283 * complete any outstanding requests. This ensures that we
284 * won't suspend while a request is being processed.
286 void mmc_queue_suspend(struct mmc_queue *mq)
288 struct request_queue *q = mq->queue;
291 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
292 mq->flags |= MMC_QUEUE_SUSPENDED;
294 spin_lock_irqsave(q->queue_lock, flags);
296 spin_unlock_irqrestore(q->queue_lock, flags);
298 down(&mq->thread_sem);
303 * mmc_queue_resume - resume a previously suspended MMC request queue
304 * @mq: MMC queue to resume
306 void mmc_queue_resume(struct mmc_queue *mq)
308 struct request_queue *q = mq->queue;
311 if (mq->flags & MMC_QUEUE_SUSPENDED) {
312 mq->flags &= ~MMC_QUEUE_SUSPENDED;
316 spin_lock_irqsave(q->queue_lock, flags);
318 spin_unlock_irqrestore(q->queue_lock, flags);
323 * Prepare the sg list(s) to be handed of to the host driver
325 unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
329 struct scatterlist *sg;
333 return blk_rq_map_sg(mq->queue, mq->req, mq->sg);
335 BUG_ON(!mq->bounce_sg);
337 sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg);
339 mq->bounce_sg_len = sg_len;
342 for_each_sg(mq->bounce_sg, sg, sg_len, i)
343 buflen += sg->length;
345 sg_init_one(mq->sg, mq->bounce_buf, buflen);
351 * If writing, bounce the data to the buffer before the request
352 * is sent to the host driver
354 void mmc_queue_bounce_pre(struct mmc_queue *mq)
359 if (rq_data_dir(mq->req) != WRITE)
362 sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len,
363 mq->bounce_buf, mq->sg[0].length);
367 * If reading, bounce the data from the buffer after the request
368 * has been handled by the host driver
370 void mmc_queue_bounce_post(struct mmc_queue *mq)
375 if (rq_data_dir(mq->req) != READ)
378 sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len,
379 mq->bounce_buf, mq->sg[0].length);