2 * Functions related to barrier IO handling
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/blkdev.h>
13 * blk_queue_ordered - does this queue support ordered writes
14 * @q: the request queue
15 * @ordered: one of QUEUE_ORDERED_*
16 * @prepare_flush_fn: rq setup helper for cache flush ordered writes
19 * For journalled file systems, doing ordered writes on a commit
20 * block instead of explicitly doing wait_on_buffer (which is bad
21 * for performance) can be a big win. Block drivers supporting this
22 * feature should call this function and indicate so.
25 int blk_queue_ordered(struct request_queue *q, unsigned ordered,
26 prepare_flush_fn *prepare_flush_fn)
28 if (ordered != QUEUE_ORDERED_NONE &&
29 ordered != QUEUE_ORDERED_DRAIN &&
30 ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
31 ordered != QUEUE_ORDERED_DRAIN_FUA &&
32 ordered != QUEUE_ORDERED_TAG &&
33 ordered != QUEUE_ORDERED_TAG_FLUSH &&
34 ordered != QUEUE_ORDERED_TAG_FUA) {
35 printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered);
40 q->next_ordered = ordered;
41 q->prepare_flush_fn = prepare_flush_fn;
45 EXPORT_SYMBOL(blk_queue_ordered);
48 * Cache flushing for ordered writes handling
50 unsigned blk_ordered_cur_seq(struct request_queue *q)
54 return 1 << ffz(q->ordseq);
57 unsigned blk_ordered_req_seq(struct request *rq)
59 struct request_queue *q = rq->q;
61 BUG_ON(q->ordseq == 0);
63 if (rq == &q->pre_flush_rq)
64 return QUEUE_ORDSEQ_PREFLUSH;
66 return QUEUE_ORDSEQ_BAR;
67 if (rq == &q->post_flush_rq)
68 return QUEUE_ORDSEQ_POSTFLUSH;
71 * !fs requests don't need to follow barrier ordering. Always
72 * put them at the front. This fixes the following deadlock.
74 * http://thread.gmane.org/gmane.linux.kernel/537473
76 if (rq->cmd_type != REQ_TYPE_FS)
77 return QUEUE_ORDSEQ_DRAIN;
79 if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
80 (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR))
81 return QUEUE_ORDSEQ_DRAIN;
83 return QUEUE_ORDSEQ_DONE;
86 bool blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
90 if (error && !q->orderr)
93 BUG_ON(q->ordseq & seq);
96 if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE)
100 * Okay, sequence complete.
104 __blk_end_request_all(rq, q->orderr);
108 static void pre_flush_end_io(struct request *rq, int error)
110 elv_completed_request(rq->q, rq);
111 blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error);
114 static void bar_end_io(struct request *rq, int error)
116 elv_completed_request(rq->q, rq);
117 blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error);
120 static void post_flush_end_io(struct request *rq, int error)
122 elv_completed_request(rq->q, rq);
123 blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
126 static void queue_flush(struct request_queue *q, unsigned which)
129 rq_end_io_fn *end_io;
131 if (which == QUEUE_ORDERED_DO_PREFLUSH) {
132 rq = &q->pre_flush_rq;
133 end_io = pre_flush_end_io;
135 rq = &q->post_flush_rq;
136 end_io = post_flush_end_io;
140 rq->cmd_flags = REQ_HARDBARRIER | REQ_FLUSH;
141 rq->rq_disk = q->bar_rq.rq_disk;
143 if (q->prepare_flush_fn)
144 q->prepare_flush_fn(q, rq);
146 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
149 static inline bool start_ordered(struct request_queue *q, struct request **rqp)
151 struct request *rq = *rqp;
155 q->ordered = q->next_ordered;
156 q->ordseq |= QUEUE_ORDSEQ_STARTED;
159 * For an empty barrier, there's no actual BAR request, which
160 * in turn makes POSTFLUSH unnecessary. Mask them off.
162 if (!blk_rq_sectors(rq)) {
163 q->ordered &= ~(QUEUE_ORDERED_DO_BAR |
164 QUEUE_ORDERED_DO_POSTFLUSH);
166 * Empty barrier on a write-through device w/ ordered
167 * tag has no command to issue and without any command
168 * to issue, ordering by tag can't be used. Drain
171 if ((q->ordered & QUEUE_ORDERED_BY_TAG) &&
172 !(q->ordered & QUEUE_ORDERED_DO_PREFLUSH)) {
173 q->ordered &= ~QUEUE_ORDERED_BY_TAG;
174 q->ordered |= QUEUE_ORDERED_BY_DRAIN;
178 /* stash away the original request */
179 blk_dequeue_request(rq);
184 * Queue ordered sequence. As we stack them at the head, we
185 * need to queue in reverse order. Note that we rely on that
186 * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
187 * request gets inbetween ordered sequence.
189 if (q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) {
190 queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH);
191 rq = &q->post_flush_rq;
193 skip |= QUEUE_ORDSEQ_POSTFLUSH;
195 if (q->ordered & QUEUE_ORDERED_DO_BAR) {
198 /* initialize proxy request and queue it */
200 if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
201 rq->cmd_flags |= REQ_WRITE;
202 if (q->ordered & QUEUE_ORDERED_DO_FUA)
203 rq->cmd_flags |= REQ_FUA;
204 init_request_from_bio(rq, q->orig_bar_rq->bio);
205 rq->end_io = bar_end_io;
207 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
209 skip |= QUEUE_ORDSEQ_BAR;
211 if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) {
212 queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH);
213 rq = &q->pre_flush_rq;
215 skip |= QUEUE_ORDSEQ_PREFLUSH;
217 if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && queue_in_flight(q))
220 skip |= QUEUE_ORDSEQ_DRAIN;
225 * Complete skipped sequences. If whole sequence is complete,
226 * return false to tell elevator that this request is gone.
228 return !blk_ordered_complete_seq(q, skip, 0);
231 bool blk_do_ordered(struct request_queue *q, struct request **rqp)
233 struct request *rq = *rqp;
234 const int is_barrier = rq->cmd_type == REQ_TYPE_FS &&
235 (rq->cmd_flags & REQ_HARDBARRIER);
241 if (q->next_ordered != QUEUE_ORDERED_NONE)
242 return start_ordered(q, rqp);
245 * Queue ordering not supported. Terminate
248 blk_dequeue_request(rq);
249 __blk_end_request_all(rq, -EOPNOTSUPP);
256 * Ordered sequence in progress
259 /* Special requests are not subject to ordering rules. */
260 if (rq->cmd_type != REQ_TYPE_FS &&
261 rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
264 if (q->ordered & QUEUE_ORDERED_BY_TAG) {
265 /* Ordered by tag. Blocking the next barrier is enough. */
266 if (is_barrier && rq != &q->bar_rq)
269 /* Ordered by draining. Wait for turn. */
270 WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
271 if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
278 static void bio_end_empty_barrier(struct bio *bio, int err)
281 if (err == -EOPNOTSUPP)
282 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
283 clear_bit(BIO_UPTODATE, &bio->bi_flags);
286 complete(bio->bi_private);
291 * blkdev_issue_flush - queue a flush
292 * @bdev: blockdev to issue flush for
293 * @gfp_mask: memory allocation flags (for bio_alloc)
294 * @error_sector: error sector
295 * @flags: BLKDEV_IFL_* flags to control behaviour
298 * Issue a flush for the block device in question. Caller can supply
299 * room for storing the error offset in case of a flush error, if they
300 * wish to. If WAIT flag is not passed then caller may check only what
301 * request was pushed in some internal queue for later handling.
303 int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
304 sector_t *error_sector, unsigned long flags)
306 DECLARE_COMPLETION_ONSTACK(wait);
307 struct request_queue *q;
311 if (bdev->bd_disk == NULL)
314 q = bdev_get_queue(bdev);
318 bio = bio_alloc(gfp_mask, 0);
319 bio->bi_end_io = bio_end_empty_barrier;
321 if (test_bit(BLKDEV_WAIT, &flags))
322 bio->bi_private = &wait;
325 submit_bio(WRITE_BARRIER, bio);
326 if (test_bit(BLKDEV_WAIT, &flags)) {
327 wait_for_completion(&wait);
329 * The driver must store the error location in ->bi_sector, if
330 * it supports it. For non-stacked drivers, this should be
331 * copied from blk_rq_pos(rq).
334 *error_sector = bio->bi_sector;
337 if (bio_flagged(bio, BIO_EOPNOTSUPP))
339 else if (!bio_flagged(bio, BIO_UPTODATE))
345 EXPORT_SYMBOL(blkdev_issue_flush);