2 * linux/drivers/mmc/card/queue.c
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
5 * Copyright 2006-2007 Pierre Ossman
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/blkdev.h>
15 #include <linux/freezer.h>
16 #include <linux/kthread.h>
17 #include <linux/scatterlist.h>
19 #include <linux/mmc/card.h>
20 #include <linux/mmc/host.h>
23 #define MMC_QUEUE_BOUNCESZ 65536
25 #define MMC_QUEUE_SUSPENDED (1 << 0)
28 * Prepare a MMC request. This just filters out odd stuff.
30 static int mmc_prep_request(struct request_queue *q, struct request *req)
33 * We only like normal block requests and discards.
35 if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) {
36 blk_dump_rq_flags(req, "MMC bad request");
40 req->cmd_flags |= REQ_DONTPREP;
45 static int mmc_queue_thread(void *d)
47 struct mmc_queue *mq = d;
48 struct request_queue *q = mq->queue;
50 current->flags |= PF_MEMALLOC;
52 down(&mq->thread_sem);
54 struct request *req = NULL;
56 spin_lock_irq(q->queue_lock);
57 set_current_state(TASK_INTERRUPTIBLE);
58 req = blk_fetch_request(q);
59 mq->mqrq_cur->req = req;
60 spin_unlock_irq(q->queue_lock);
63 if (kthread_should_stop()) {
64 set_current_state(TASK_RUNNING);
69 down(&mq->thread_sem);
72 set_current_state(TASK_RUNNING);
74 mq->issue_fn(mq, req);
82 * Generic MMC request handler. This is called for any queue on a
83 * particular host. When the host is not busy, we look for a request
84 * on any queue on this host, and attempt to issue it. This may
85 * not be the queue we were asked to process.
87 static void mmc_request(struct request_queue *q)
89 struct mmc_queue *mq = q->queuedata;
93 while ((req = blk_fetch_request(q)) != NULL) {
94 req->cmd_flags |= REQ_QUIET;
95 __blk_end_request_all(req, -EIO);
100 if (!mq->mqrq_cur->req)
101 wake_up_process(mq->thread);
104 struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
106 struct scatterlist *sg;
108 sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
113 sg_init_table(sg, sg_len);
119 static void mmc_queue_setup_discard(struct request_queue *q,
120 struct mmc_card *card)
122 unsigned max_discard;
124 max_discard = mmc_calc_max_discard(card);
128 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
129 q->limits.max_discard_sectors = max_discard;
130 if (card->erased_byte == 0)
131 q->limits.discard_zeroes_data = 1;
132 q->limits.discard_granularity = card->pref_erase << 9;
133 /* granularity must not be greater than max. discard */
134 if (card->pref_erase > max_discard)
135 q->limits.discard_granularity = 0;
136 if (mmc_can_secure_erase_trim(card))
137 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
141 * mmc_init_queue - initialise a queue structure.
143 * @card: mmc card to attach this queue
145 * @subname: partition subname
147 * Initialise a MMC card request queue.
149 int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
150 spinlock_t *lock, const char *subname)
152 struct mmc_host *host = card->host;
153 u64 limit = BLK_BOUNCE_HIGH;
155 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
157 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
158 limit = *mmc_dev(host)->dma_mask;
161 mq->queue = blk_init_queue(mmc_request, lock);
165 memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur));
166 mq->mqrq_cur = mqrq_cur;
167 mq->queue->queuedata = mq;
169 blk_queue_prep_rq(mq->queue, mmc_prep_request);
170 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
171 if (mmc_can_erase(card))
172 mmc_queue_setup_discard(mq->queue, card);
174 #ifdef CONFIG_MMC_BLOCK_BOUNCE
175 if (host->max_segs == 1) {
176 unsigned int bouncesz;
178 bouncesz = MMC_QUEUE_BOUNCESZ;
180 if (bouncesz > host->max_req_size)
181 bouncesz = host->max_req_size;
182 if (bouncesz > host->max_seg_size)
183 bouncesz = host->max_seg_size;
184 if (bouncesz > (host->max_blk_count * 512))
185 bouncesz = host->max_blk_count * 512;
187 if (bouncesz > 512) {
188 mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
189 if (!mqrq_cur->bounce_buf) {
190 printk(KERN_WARNING "%s: unable to "
191 "allocate bounce cur buffer\n",
192 mmc_card_name(card));
196 if (mqrq_cur->bounce_buf) {
197 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
198 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
199 blk_queue_max_segments(mq->queue, bouncesz / 512);
200 blk_queue_max_segment_size(mq->queue, bouncesz);
202 mqrq_cur->sg = mmc_alloc_sg(1, &ret);
206 mqrq_cur->bounce_sg =
207 mmc_alloc_sg(bouncesz / 512, &ret);
215 if (!mqrq_cur->bounce_buf) {
216 blk_queue_bounce_limit(mq->queue, limit);
217 blk_queue_max_hw_sectors(mq->queue,
218 min(host->max_blk_count, host->max_req_size / 512));
219 blk_queue_max_segments(mq->queue, host->max_segs);
220 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
222 mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
228 sema_init(&mq->thread_sem, 1);
230 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
231 host->index, subname ? subname : "");
233 if (IS_ERR(mq->thread)) {
234 ret = PTR_ERR(mq->thread);
240 kfree(mqrq_cur->bounce_sg);
241 mqrq_cur->bounce_sg = NULL;
246 kfree(mqrq_cur->bounce_buf);
247 mqrq_cur->bounce_buf = NULL;
249 blk_cleanup_queue(mq->queue);
253 void mmc_cleanup_queue(struct mmc_queue *mq)
255 struct request_queue *q = mq->queue;
257 struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
259 /* Make sure the queue isn't suspended, as that will deadlock */
260 mmc_queue_resume(mq);
262 /* Then terminate our worker thread */
263 kthread_stop(mq->thread);
265 /* Empty the queue */
266 spin_lock_irqsave(q->queue_lock, flags);
269 spin_unlock_irqrestore(q->queue_lock, flags);
271 kfree(mqrq_cur->bounce_sg);
272 mqrq_cur->bounce_sg = NULL;
277 kfree(mqrq_cur->bounce_buf);
278 mqrq_cur->bounce_buf = NULL;
282 EXPORT_SYMBOL(mmc_cleanup_queue);
285 * mmc_queue_suspend - suspend a MMC request queue
286 * @mq: MMC queue to suspend
288 * Stop the block request queue, and wait for our thread to
289 * complete any outstanding requests. This ensures that we
290 * won't suspend while a request is being processed.
292 void mmc_queue_suspend(struct mmc_queue *mq)
294 struct request_queue *q = mq->queue;
297 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
298 mq->flags |= MMC_QUEUE_SUSPENDED;
300 spin_lock_irqsave(q->queue_lock, flags);
302 spin_unlock_irqrestore(q->queue_lock, flags);
304 down(&mq->thread_sem);
309 * mmc_queue_resume - resume a previously suspended MMC request queue
310 * @mq: MMC queue to resume
312 void mmc_queue_resume(struct mmc_queue *mq)
314 struct request_queue *q = mq->queue;
317 if (mq->flags & MMC_QUEUE_SUSPENDED) {
318 mq->flags &= ~MMC_QUEUE_SUSPENDED;
322 spin_lock_irqsave(q->queue_lock, flags);
324 spin_unlock_irqrestore(q->queue_lock, flags);
329 * Prepare the sg list(s) to be handed of to the host driver
331 unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
335 struct scatterlist *sg;
338 if (!mqrq->bounce_buf)
339 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
341 BUG_ON(!mqrq->bounce_sg);
343 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
345 mqrq->bounce_sg_len = sg_len;
348 for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
349 buflen += sg->length;
351 sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
357 * If writing, bounce the data to the buffer before the request
358 * is sent to the host driver
360 void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
362 if (!mqrq->bounce_buf)
365 if (rq_data_dir(mqrq->req) != WRITE)
368 sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
369 mqrq->bounce_buf, mqrq->sg[0].length);
373 * If reading, bounce the data from the buffer after the request
374 * has been handled by the host driver
376 void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
378 if (!mqrq->bounce_buf)
381 if (rq_data_dir(mqrq->req) != READ)
384 sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
385 mqrq->bounce_buf, mqrq->sg[0].length);