Merge tag 'r8169-20060920-00' of git://electric-eye.fr.zoreil.com/home/romieu/linux...
[pandora-kernel.git] / drivers / mmc / mmc_queue.c
1 /*
2  *  linux/drivers/mmc/mmc_queue.c
3  *
4  *  Copyright (C) 2003 Russell King, All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  */
11 #include <linux/module.h>
12 #include <linux/blkdev.h>
13
14 #include <linux/mmc/card.h>
15 #include <linux/mmc/host.h>
16 #include "mmc_queue.h"
17
18 #define MMC_QUEUE_EXIT          (1 << 0)
19 #define MMC_QUEUE_SUSPENDED     (1 << 1)
20
21 /*
22  * Prepare a MMC request.  Essentially, this means passing the
23  * preparation off to the media driver.  The media driver will
24  * create a mmc_io_request in req->special.
25  */
26 static int mmc_prep_request(struct request_queue *q, struct request *req)
27 {
28         struct mmc_queue *mq = q->queuedata;
29         int ret = BLKPREP_KILL;
30
31         if (req->flags & REQ_SPECIAL) {
32                 /*
33                  * Special commands already have the command
34                  * blocks already setup in req->special.
35                  */
36                 BUG_ON(!req->special);
37
38                 ret = BLKPREP_OK;
39         } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
40                 /*
41                  * Block I/O requests need translating according
42                  * to the protocol.
43                  */
44                 ret = mq->prep_fn(mq, req);
45         } else {
46                 /*
47                  * Everything else is invalid.
48                  */
49                 blk_dump_rq_flags(req, "MMC bad request");
50         }
51
52         if (ret == BLKPREP_OK)
53                 req->flags |= REQ_DONTPREP;
54
55         return ret;
56 }
57
58 static int mmc_queue_thread(void *d)
59 {
60         struct mmc_queue *mq = d;
61         struct request_queue *q = mq->queue;
62         DECLARE_WAITQUEUE(wait, current);
63
64         /*
65          * Set iothread to ensure that we aren't put to sleep by
66          * the process freezing.  We handle suspension ourselves.
67          */
68         current->flags |= PF_MEMALLOC|PF_NOFREEZE;
69
70         daemonize("mmcqd");
71
72         complete(&mq->thread_complete);
73
74         down(&mq->thread_sem);
75         add_wait_queue(&mq->thread_wq, &wait);
76         do {
77                 struct request *req = NULL;
78
79                 spin_lock_irq(q->queue_lock);
80                 set_current_state(TASK_INTERRUPTIBLE);
81                 if (!blk_queue_plugged(q))
82                         req = elv_next_request(q);
83                 mq->req = req;
84                 spin_unlock_irq(q->queue_lock);
85
86                 if (!req) {
87                         if (mq->flags & MMC_QUEUE_EXIT)
88                                 break;
89                         up(&mq->thread_sem);
90                         schedule();
91                         down(&mq->thread_sem);
92                         continue;
93                 }
94                 set_current_state(TASK_RUNNING);
95
96                 mq->issue_fn(mq, req);
97         } while (1);
98         remove_wait_queue(&mq->thread_wq, &wait);
99         up(&mq->thread_sem);
100
101         complete_and_exit(&mq->thread_complete, 0);
102         return 0;
103 }
104
105 /*
106  * Generic MMC request handler.  This is called for any queue on a
107  * particular host.  When the host is not busy, we look for a request
108  * on any queue on this host, and attempt to issue it.  This may
109  * not be the queue we were asked to process.
110  */
111 static void mmc_request(request_queue_t *q)
112 {
113         struct mmc_queue *mq = q->queuedata;
114
115         if (!mq->req)
116                 wake_up(&mq->thread_wq);
117 }
118
119 /**
120  * mmc_init_queue - initialise a queue structure.
121  * @mq: mmc queue
122  * @card: mmc card to attach this queue
123  * @lock: queue lock
124  *
125  * Initialise a MMC card request queue.
126  */
127 int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
128 {
129         struct mmc_host *host = card->host;
130         u64 limit = BLK_BOUNCE_HIGH;
131         int ret;
132
133         if (host->dev->dma_mask && *host->dev->dma_mask)
134                 limit = *host->dev->dma_mask;
135
136         mq->card = card;
137         mq->queue = blk_init_queue(mmc_request, lock);
138         if (!mq->queue)
139                 return -ENOMEM;
140
141         blk_queue_prep_rq(mq->queue, mmc_prep_request);
142         blk_queue_bounce_limit(mq->queue, limit);
143         blk_queue_max_sectors(mq->queue, host->max_sectors);
144         blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
145         blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
146         blk_queue_max_segment_size(mq->queue, host->max_seg_size);
147
148         mq->queue->queuedata = mq;
149         mq->req = NULL;
150
151         mq->sg = kmalloc(sizeof(struct scatterlist) * host->max_phys_segs,
152                          GFP_KERNEL);
153         if (!mq->sg) {
154                 ret = -ENOMEM;
155                 goto cleanup;
156         }
157
158         init_completion(&mq->thread_complete);
159         init_waitqueue_head(&mq->thread_wq);
160         init_MUTEX(&mq->thread_sem);
161
162         ret = kernel_thread(mmc_queue_thread, mq, CLONE_KERNEL);
163         if (ret >= 0) {
164                 wait_for_completion(&mq->thread_complete);
165                 init_completion(&mq->thread_complete);
166                 ret = 0;
167                 goto out;
168         }
169
170  cleanup:
171         kfree(mq->sg);
172         mq->sg = NULL;
173
174         blk_cleanup_queue(mq->queue);
175  out:
176         return ret;
177 }
178 EXPORT_SYMBOL(mmc_init_queue);
179
180 void mmc_cleanup_queue(struct mmc_queue *mq)
181 {
182         mq->flags |= MMC_QUEUE_EXIT;
183         wake_up(&mq->thread_wq);
184         wait_for_completion(&mq->thread_complete);
185
186         kfree(mq->sg);
187         mq->sg = NULL;
188
189         blk_cleanup_queue(mq->queue);
190
191         mq->card = NULL;
192 }
193 EXPORT_SYMBOL(mmc_cleanup_queue);
194
195 /**
196  * mmc_queue_suspend - suspend a MMC request queue
197  * @mq: MMC queue to suspend
198  *
199  * Stop the block request queue, and wait for our thread to
200  * complete any outstanding requests.  This ensures that we
201  * won't suspend while a request is being processed.
202  */
203 void mmc_queue_suspend(struct mmc_queue *mq)
204 {
205         request_queue_t *q = mq->queue;
206         unsigned long flags;
207
208         if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
209                 mq->flags |= MMC_QUEUE_SUSPENDED;
210
211                 spin_lock_irqsave(q->queue_lock, flags);
212                 blk_stop_queue(q);
213                 spin_unlock_irqrestore(q->queue_lock, flags);
214
215                 down(&mq->thread_sem);
216         }
217 }
218 EXPORT_SYMBOL(mmc_queue_suspend);
219
220 /**
221  * mmc_queue_resume - resume a previously suspended MMC request queue
222  * @mq: MMC queue to resume
223  */
224 void mmc_queue_resume(struct mmc_queue *mq)
225 {
226         request_queue_t *q = mq->queue;
227         unsigned long flags;
228
229         if (mq->flags & MMC_QUEUE_SUSPENDED) {
230                 mq->flags &= ~MMC_QUEUE_SUSPENDED;
231
232                 up(&mq->thread_sem);
233
234                 spin_lock_irqsave(q->queue_lock, flags);
235                 blk_start_queue(q);
236                 spin_unlock_irqrestore(q->queue_lock, flags);
237         }
238 }
239 EXPORT_SYMBOL(mmc_queue_resume);