Merge branch 'upstream-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/linvil...
[pandora-kernel.git] / block / elevator.c
1 /*
2  *  Block device elevator/IO-scheduler.
3  *
4  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5  *
6  * 30042000 Jens Axboe <axboe@kernel.dk> :
7  *
8  * Split the elevator a bit so that it is possible to choose a different
9  * one or even write a new "plug in". There are three pieces:
10  * - elevator_fn, inserts a new request in the queue list
11  * - elevator_merge_fn, decides whether a new buffer can be merged with
12  *   an existing request
13  * - elevator_dequeue_fn, called when a request is taken off the active list
14  *
15  * 20082000 Dave Jones <davej@suse.de> :
16  * Removed tests for max-bomb-segments, which was breaking elvtune
17  *  when run without -bN
18  *
19  * Jens:
20  * - Rework again to work with bio instead of buffer_heads
21  * - loose bi_dev comparisons, partition handling is right now
22  * - completely modularize elevator setup and teardown
23  *
24  */
25 #include <linux/kernel.h>
26 #include <linux/fs.h>
27 #include <linux/blkdev.h>
28 #include <linux/elevator.h>
29 #include <linux/bio.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/compiler.h>
34 #include <linux/delay.h>
35 #include <linux/blktrace_api.h>
36 #include <linux/hash.h>
37
38 #include <asm/uaccess.h>
39
40 static DEFINE_SPINLOCK(elv_list_lock);
41 static LIST_HEAD(elv_list);
42
43 /*
44  * Merge hash stuff.
45  */
46 static const int elv_hash_shift = 6;
47 #define ELV_HASH_BLOCK(sec)     ((sec) >> 3)
48 #define ELV_HASH_FN(sec)        (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
49 #define ELV_HASH_ENTRIES        (1 << elv_hash_shift)
50 #define rq_hash_key(rq)         ((rq)->sector + (rq)->nr_sectors)
51 #define ELV_ON_HASH(rq)         (!hlist_unhashed(&(rq)->hash))
52
53 /*
54  * Query io scheduler to see if the current process issuing bio may be
55  * merged with rq.
56  */
57 static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
58 {
59         request_queue_t *q = rq->q;
60         elevator_t *e = q->elevator;
61
62         if (e->ops->elevator_allow_merge_fn)
63                 return e->ops->elevator_allow_merge_fn(q, rq, bio);
64
65         return 1;
66 }
67
68 /*
69  * can we safely merge with this request?
70  */
71 inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
72 {
73         if (!rq_mergeable(rq))
74                 return 0;
75
76         /*
77          * different data direction or already started, don't merge
78          */
79         if (bio_data_dir(bio) != rq_data_dir(rq))
80                 return 0;
81
82         /*
83          * must be same device and not a special request
84          */
85         if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
86                 return 0;
87
88         if (!elv_iosched_allow_merge(rq, bio))
89                 return 0;
90
91         return 1;
92 }
93 EXPORT_SYMBOL(elv_rq_merge_ok);
94
95 static inline int elv_try_merge(struct request *__rq, struct bio *bio)
96 {
97         int ret = ELEVATOR_NO_MERGE;
98
99         /*
100          * we can merge and sequence is ok, check if it's possible
101          */
102         if (elv_rq_merge_ok(__rq, bio)) {
103                 if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
104                         ret = ELEVATOR_BACK_MERGE;
105                 else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
106                         ret = ELEVATOR_FRONT_MERGE;
107         }
108
109         return ret;
110 }
111
112 static struct elevator_type *elevator_find(const char *name)
113 {
114         struct elevator_type *e;
115         struct list_head *entry;
116
117         list_for_each(entry, &elv_list) {
118
119                 e = list_entry(entry, struct elevator_type, list);
120
121                 if (!strcmp(e->elevator_name, name))
122                         return e;
123         }
124
125         return NULL;
126 }
127
128 static void elevator_put(struct elevator_type *e)
129 {
130         module_put(e->elevator_owner);
131 }
132
133 static struct elevator_type *elevator_get(const char *name)
134 {
135         struct elevator_type *e;
136
137         spin_lock_irq(&elv_list_lock);
138
139         e = elevator_find(name);
140         if (e && !try_module_get(e->elevator_owner))
141                 e = NULL;
142
143         spin_unlock_irq(&elv_list_lock);
144
145         return e;
146 }
147
148 static void *elevator_init_queue(request_queue_t *q, struct elevator_queue *eq)
149 {
150         return eq->ops->elevator_init_fn(q);
151 }
152
153 static void elevator_attach(request_queue_t *q, struct elevator_queue *eq,
154                            void *data)
155 {
156         q->elevator = eq;
157         eq->elevator_data = data;
158 }
159
160 static char chosen_elevator[16];
161
162 static int __init elevator_setup(char *str)
163 {
164         /*
165          * Be backwards-compatible with previous kernels, so users
166          * won't get the wrong elevator.
167          */
168         if (!strcmp(str, "as"))
169                 strcpy(chosen_elevator, "anticipatory");
170         else
171                 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
172         return 1;
173 }
174
175 __setup("elevator=", elevator_setup);
176
177 static struct kobj_type elv_ktype;
178
179 static elevator_t *elevator_alloc(request_queue_t *q, struct elevator_type *e)
180 {
181         elevator_t *eq;
182         int i;
183
184         eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL, q->node);
185         if (unlikely(!eq))
186                 goto err;
187
188         memset(eq, 0, sizeof(*eq));
189         eq->ops = &e->ops;
190         eq->elevator_type = e;
191         kobject_init(&eq->kobj);
192         snprintf(eq->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
193         eq->kobj.ktype = &elv_ktype;
194         mutex_init(&eq->sysfs_lock);
195
196         eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
197                                         GFP_KERNEL, q->node);
198         if (!eq->hash)
199                 goto err;
200
201         for (i = 0; i < ELV_HASH_ENTRIES; i++)
202                 INIT_HLIST_HEAD(&eq->hash[i]);
203
204         return eq;
205 err:
206         kfree(eq);
207         elevator_put(e);
208         return NULL;
209 }
210
211 static void elevator_release(struct kobject *kobj)
212 {
213         elevator_t *e = container_of(kobj, elevator_t, kobj);
214
215         elevator_put(e->elevator_type);
216         kfree(e->hash);
217         kfree(e);
218 }
219
220 int elevator_init(request_queue_t *q, char *name)
221 {
222         struct elevator_type *e = NULL;
223         struct elevator_queue *eq;
224         int ret = 0;
225         void *data;
226
227         INIT_LIST_HEAD(&q->queue_head);
228         q->last_merge = NULL;
229         q->end_sector = 0;
230         q->boundary_rq = NULL;
231
232         if (name && !(e = elevator_get(name)))
233                 return -EINVAL;
234
235         if (!e && *chosen_elevator && !(e = elevator_get(chosen_elevator)))
236                 printk("I/O scheduler %s not found\n", chosen_elevator);
237
238         if (!e && !(e = elevator_get(CONFIG_DEFAULT_IOSCHED))) {
239                 printk("Default I/O scheduler not found, using no-op\n");
240                 e = elevator_get("noop");
241         }
242
243         eq = elevator_alloc(q, e);
244         if (!eq)
245                 return -ENOMEM;
246
247         data = elevator_init_queue(q, eq);
248         if (!data) {
249                 kobject_put(&eq->kobj);
250                 return -ENOMEM;
251         }
252
253         elevator_attach(q, eq, data);
254         return ret;
255 }
256
257 EXPORT_SYMBOL(elevator_init);
258
259 void elevator_exit(elevator_t *e)
260 {
261         mutex_lock(&e->sysfs_lock);
262         if (e->ops->elevator_exit_fn)
263                 e->ops->elevator_exit_fn(e);
264         e->ops = NULL;
265         mutex_unlock(&e->sysfs_lock);
266
267         kobject_put(&e->kobj);
268 }
269
270 EXPORT_SYMBOL(elevator_exit);
271
272 static inline void __elv_rqhash_del(struct request *rq)
273 {
274         hlist_del_init(&rq->hash);
275 }
276
277 static void elv_rqhash_del(request_queue_t *q, struct request *rq)
278 {
279         if (ELV_ON_HASH(rq))
280                 __elv_rqhash_del(rq);
281 }
282
283 static void elv_rqhash_add(request_queue_t *q, struct request *rq)
284 {
285         elevator_t *e = q->elevator;
286
287         BUG_ON(ELV_ON_HASH(rq));
288         hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
289 }
290
291 static void elv_rqhash_reposition(request_queue_t *q, struct request *rq)
292 {
293         __elv_rqhash_del(rq);
294         elv_rqhash_add(q, rq);
295 }
296
297 static struct request *elv_rqhash_find(request_queue_t *q, sector_t offset)
298 {
299         elevator_t *e = q->elevator;
300         struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
301         struct hlist_node *entry, *next;
302         struct request *rq;
303
304         hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
305                 BUG_ON(!ELV_ON_HASH(rq));
306
307                 if (unlikely(!rq_mergeable(rq))) {
308                         __elv_rqhash_del(rq);
309                         continue;
310                 }
311
312                 if (rq_hash_key(rq) == offset)
313                         return rq;
314         }
315
316         return NULL;
317 }
318
319 /*
320  * RB-tree support functions for inserting/lookup/removal of requests
321  * in a sorted RB tree.
322  */
323 struct request *elv_rb_add(struct rb_root *root, struct request *rq)
324 {
325         struct rb_node **p = &root->rb_node;
326         struct rb_node *parent = NULL;
327         struct request *__rq;
328
329         while (*p) {
330                 parent = *p;
331                 __rq = rb_entry(parent, struct request, rb_node);
332
333                 if (rq->sector < __rq->sector)
334                         p = &(*p)->rb_left;
335                 else if (rq->sector > __rq->sector)
336                         p = &(*p)->rb_right;
337                 else
338                         return __rq;
339         }
340
341         rb_link_node(&rq->rb_node, parent, p);
342         rb_insert_color(&rq->rb_node, root);
343         return NULL;
344 }
345
346 EXPORT_SYMBOL(elv_rb_add);
347
348 void elv_rb_del(struct rb_root *root, struct request *rq)
349 {
350         BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
351         rb_erase(&rq->rb_node, root);
352         RB_CLEAR_NODE(&rq->rb_node);
353 }
354
355 EXPORT_SYMBOL(elv_rb_del);
356
357 struct request *elv_rb_find(struct rb_root *root, sector_t sector)
358 {
359         struct rb_node *n = root->rb_node;
360         struct request *rq;
361
362         while (n) {
363                 rq = rb_entry(n, struct request, rb_node);
364
365                 if (sector < rq->sector)
366                         n = n->rb_left;
367                 else if (sector > rq->sector)
368                         n = n->rb_right;
369                 else
370                         return rq;
371         }
372
373         return NULL;
374 }
375
376 EXPORT_SYMBOL(elv_rb_find);
377
378 /*
379  * Insert rq into dispatch queue of q.  Queue lock must be held on
380  * entry.  rq is sort insted into the dispatch queue. To be used by
381  * specific elevators.
382  */
383 void elv_dispatch_sort(request_queue_t *q, struct request *rq)
384 {
385         sector_t boundary;
386         struct list_head *entry;
387
388         if (q->last_merge == rq)
389                 q->last_merge = NULL;
390
391         elv_rqhash_del(q, rq);
392
393         q->nr_sorted--;
394
395         boundary = q->end_sector;
396
397         list_for_each_prev(entry, &q->queue_head) {
398                 struct request *pos = list_entry_rq(entry);
399
400                 if (pos->cmd_flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED))
401                         break;
402                 if (rq->sector >= boundary) {
403                         if (pos->sector < boundary)
404                                 continue;
405                 } else {
406                         if (pos->sector >= boundary)
407                                 break;
408                 }
409                 if (rq->sector >= pos->sector)
410                         break;
411         }
412
413         list_add(&rq->queuelist, entry);
414 }
415
416 EXPORT_SYMBOL(elv_dispatch_sort);
417
418 /*
419  * Insert rq into dispatch queue of q.  Queue lock must be held on
420  * entry.  rq is added to the back of the dispatch queue. To be used by
421  * specific elevators.
422  */
423 void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
424 {
425         if (q->last_merge == rq)
426                 q->last_merge = NULL;
427
428         elv_rqhash_del(q, rq);
429
430         q->nr_sorted--;
431
432         q->end_sector = rq_end_sector(rq);
433         q->boundary_rq = rq;
434         list_add_tail(&rq->queuelist, &q->queue_head);
435 }
436
437 EXPORT_SYMBOL(elv_dispatch_add_tail);
438
439 int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
440 {
441         elevator_t *e = q->elevator;
442         struct request *__rq;
443         int ret;
444
445         /*
446          * First try one-hit cache.
447          */
448         if (q->last_merge) {
449                 ret = elv_try_merge(q->last_merge, bio);
450                 if (ret != ELEVATOR_NO_MERGE) {
451                         *req = q->last_merge;
452                         return ret;
453                 }
454         }
455
456         /*
457          * See if our hash lookup can find a potential backmerge.
458          */
459         __rq = elv_rqhash_find(q, bio->bi_sector);
460         if (__rq && elv_rq_merge_ok(__rq, bio)) {
461                 *req = __rq;
462                 return ELEVATOR_BACK_MERGE;
463         }
464
465         if (e->ops->elevator_merge_fn)
466                 return e->ops->elevator_merge_fn(q, req, bio);
467
468         return ELEVATOR_NO_MERGE;
469 }
470
471 void elv_merged_request(request_queue_t *q, struct request *rq, int type)
472 {
473         elevator_t *e = q->elevator;
474
475         if (e->ops->elevator_merged_fn)
476                 e->ops->elevator_merged_fn(q, rq, type);
477
478         if (type == ELEVATOR_BACK_MERGE)
479                 elv_rqhash_reposition(q, rq);
480
481         q->last_merge = rq;
482 }
483
484 void elv_merge_requests(request_queue_t *q, struct request *rq,
485                              struct request *next)
486 {
487         elevator_t *e = q->elevator;
488
489         if (e->ops->elevator_merge_req_fn)
490                 e->ops->elevator_merge_req_fn(q, rq, next);
491
492         elv_rqhash_reposition(q, rq);
493         elv_rqhash_del(q, next);
494
495         q->nr_sorted--;
496         q->last_merge = rq;
497 }
498
499 void elv_requeue_request(request_queue_t *q, struct request *rq)
500 {
501         elevator_t *e = q->elevator;
502
503         /*
504          * it already went through dequeue, we need to decrement the
505          * in_flight count again
506          */
507         if (blk_account_rq(rq)) {
508                 q->in_flight--;
509                 if (blk_sorted_rq(rq) && e->ops->elevator_deactivate_req_fn)
510                         e->ops->elevator_deactivate_req_fn(q, rq);
511         }
512
513         rq->cmd_flags &= ~REQ_STARTED;
514
515         elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
516 }
517
518 static void elv_drain_elevator(request_queue_t *q)
519 {
520         static int printed;
521         while (q->elevator->ops->elevator_dispatch_fn(q, 1))
522                 ;
523         if (q->nr_sorted == 0)
524                 return;
525         if (printed++ < 10) {
526                 printk(KERN_ERR "%s: forced dispatching is broken "
527                        "(nr_sorted=%u), please report this\n",
528                        q->elevator->elevator_type->elevator_name, q->nr_sorted);
529         }
530 }
531
532 void elv_insert(request_queue_t *q, struct request *rq, int where)
533 {
534         struct list_head *pos;
535         unsigned ordseq;
536         int unplug_it = 1;
537
538         blk_add_trace_rq(q, rq, BLK_TA_INSERT);
539
540         rq->q = q;
541
542         switch (where) {
543         case ELEVATOR_INSERT_FRONT:
544                 rq->cmd_flags |= REQ_SOFTBARRIER;
545
546                 list_add(&rq->queuelist, &q->queue_head);
547                 break;
548
549         case ELEVATOR_INSERT_BACK:
550                 rq->cmd_flags |= REQ_SOFTBARRIER;
551                 elv_drain_elevator(q);
552                 list_add_tail(&rq->queuelist, &q->queue_head);
553                 /*
554                  * We kick the queue here for the following reasons.
555                  * - The elevator might have returned NULL previously
556                  *   to delay requests and returned them now.  As the
557                  *   queue wasn't empty before this request, ll_rw_blk
558                  *   won't run the queue on return, resulting in hang.
559                  * - Usually, back inserted requests won't be merged
560                  *   with anything.  There's no point in delaying queue
561                  *   processing.
562                  */
563                 blk_remove_plug(q);
564                 q->request_fn(q);
565                 break;
566
567         case ELEVATOR_INSERT_SORT:
568                 BUG_ON(!blk_fs_request(rq));
569                 rq->cmd_flags |= REQ_SORTED;
570                 q->nr_sorted++;
571                 if (rq_mergeable(rq)) {
572                         elv_rqhash_add(q, rq);
573                         if (!q->last_merge)
574                                 q->last_merge = rq;
575                 }
576
577                 /*
578                  * Some ioscheds (cfq) run q->request_fn directly, so
579                  * rq cannot be accessed after calling
580                  * elevator_add_req_fn.
581                  */
582                 q->elevator->ops->elevator_add_req_fn(q, rq);
583                 break;
584
585         case ELEVATOR_INSERT_REQUEUE:
586                 /*
587                  * If ordered flush isn't in progress, we do front
588                  * insertion; otherwise, requests should be requeued
589                  * in ordseq order.
590                  */
591                 rq->cmd_flags |= REQ_SOFTBARRIER;
592
593                 if (q->ordseq == 0) {
594                         list_add(&rq->queuelist, &q->queue_head);
595                         break;
596                 }
597
598                 ordseq = blk_ordered_req_seq(rq);
599
600                 list_for_each(pos, &q->queue_head) {
601                         struct request *pos_rq = list_entry_rq(pos);
602                         if (ordseq <= blk_ordered_req_seq(pos_rq))
603                                 break;
604                 }
605
606                 list_add_tail(&rq->queuelist, pos);
607                 /*
608                  * most requeues happen because of a busy condition, don't
609                  * force unplug of the queue for that case.
610                  */
611                 unplug_it = 0;
612                 break;
613
614         default:
615                 printk(KERN_ERR "%s: bad insertion point %d\n",
616                        __FUNCTION__, where);
617                 BUG();
618         }
619
620         if (unplug_it && blk_queue_plugged(q)) {
621                 int nrq = q->rq.count[READ] + q->rq.count[WRITE]
622                         - q->in_flight;
623
624                 if (nrq >= q->unplug_thresh)
625                         __generic_unplug_device(q);
626         }
627 }
628
629 void __elv_add_request(request_queue_t *q, struct request *rq, int where,
630                        int plug)
631 {
632         if (q->ordcolor)
633                 rq->cmd_flags |= REQ_ORDERED_COLOR;
634
635         if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
636                 /*
637                  * toggle ordered color
638                  */
639                 if (blk_barrier_rq(rq))
640                         q->ordcolor ^= 1;
641
642                 /*
643                  * barriers implicitly indicate back insertion
644                  */
645                 if (where == ELEVATOR_INSERT_SORT)
646                         where = ELEVATOR_INSERT_BACK;
647
648                 /*
649                  * this request is scheduling boundary, update
650                  * end_sector
651                  */
652                 if (blk_fs_request(rq)) {
653                         q->end_sector = rq_end_sector(rq);
654                         q->boundary_rq = rq;
655                 }
656         } else if (!(rq->cmd_flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
657                 where = ELEVATOR_INSERT_BACK;
658
659         if (plug)
660                 blk_plug_device(q);
661
662         elv_insert(q, rq, where);
663 }
664
665 EXPORT_SYMBOL(__elv_add_request);
666
667 void elv_add_request(request_queue_t *q, struct request *rq, int where,
668                      int plug)
669 {
670         unsigned long flags;
671
672         spin_lock_irqsave(q->queue_lock, flags);
673         __elv_add_request(q, rq, where, plug);
674         spin_unlock_irqrestore(q->queue_lock, flags);
675 }
676
677 EXPORT_SYMBOL(elv_add_request);
678
679 static inline struct request *__elv_next_request(request_queue_t *q)
680 {
681         struct request *rq;
682
683         while (1) {
684                 while (!list_empty(&q->queue_head)) {
685                         rq = list_entry_rq(q->queue_head.next);
686                         if (blk_do_ordered(q, &rq))
687                                 return rq;
688                 }
689
690                 if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
691                         return NULL;
692         }
693 }
694
695 struct request *elv_next_request(request_queue_t *q)
696 {
697         struct request *rq;
698         int ret;
699
700         while ((rq = __elv_next_request(q)) != NULL) {
701                 if (!(rq->cmd_flags & REQ_STARTED)) {
702                         elevator_t *e = q->elevator;
703
704                         /*
705                          * This is the first time the device driver
706                          * sees this request (possibly after
707                          * requeueing).  Notify IO scheduler.
708                          */
709                         if (blk_sorted_rq(rq) &&
710                             e->ops->elevator_activate_req_fn)
711                                 e->ops->elevator_activate_req_fn(q, rq);
712
713                         /*
714                          * just mark as started even if we don't start
715                          * it, a request that has been delayed should
716                          * not be passed by new incoming requests
717                          */
718                         rq->cmd_flags |= REQ_STARTED;
719                         blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
720                 }
721
722                 if (!q->boundary_rq || q->boundary_rq == rq) {
723                         q->end_sector = rq_end_sector(rq);
724                         q->boundary_rq = NULL;
725                 }
726
727                 if ((rq->cmd_flags & REQ_DONTPREP) || !q->prep_rq_fn)
728                         break;
729
730                 ret = q->prep_rq_fn(q, rq);
731                 if (ret == BLKPREP_OK) {
732                         break;
733                 } else if (ret == BLKPREP_DEFER) {
734                         /*
735                          * the request may have been (partially) prepped.
736                          * we need to keep this request in the front to
737                          * avoid resource deadlock.  REQ_STARTED will
738                          * prevent other fs requests from passing this one.
739                          */
740                         rq = NULL;
741                         break;
742                 } else if (ret == BLKPREP_KILL) {
743                         int nr_bytes = rq->hard_nr_sectors << 9;
744
745                         if (!nr_bytes)
746                                 nr_bytes = rq->data_len;
747
748                         blkdev_dequeue_request(rq);
749                         rq->cmd_flags |= REQ_QUIET;
750                         end_that_request_chunk(rq, 0, nr_bytes);
751                         end_that_request_last(rq, 0);
752                 } else {
753                         printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
754                                                                 ret);
755                         break;
756                 }
757         }
758
759         return rq;
760 }
761
762 EXPORT_SYMBOL(elv_next_request);
763
764 void elv_dequeue_request(request_queue_t *q, struct request *rq)
765 {
766         BUG_ON(list_empty(&rq->queuelist));
767         BUG_ON(ELV_ON_HASH(rq));
768
769         list_del_init(&rq->queuelist);
770
771         /*
772          * the time frame between a request being removed from the lists
773          * and to it is freed is accounted as io that is in progress at
774          * the driver side.
775          */
776         if (blk_account_rq(rq))
777                 q->in_flight++;
778 }
779
780 EXPORT_SYMBOL(elv_dequeue_request);
781
782 int elv_queue_empty(request_queue_t *q)
783 {
784         elevator_t *e = q->elevator;
785
786         if (!list_empty(&q->queue_head))
787                 return 0;
788
789         if (e->ops->elevator_queue_empty_fn)
790                 return e->ops->elevator_queue_empty_fn(q);
791
792         return 1;
793 }
794
795 EXPORT_SYMBOL(elv_queue_empty);
796
797 struct request *elv_latter_request(request_queue_t *q, struct request *rq)
798 {
799         elevator_t *e = q->elevator;
800
801         if (e->ops->elevator_latter_req_fn)
802                 return e->ops->elevator_latter_req_fn(q, rq);
803         return NULL;
804 }
805
806 struct request *elv_former_request(request_queue_t *q, struct request *rq)
807 {
808         elevator_t *e = q->elevator;
809
810         if (e->ops->elevator_former_req_fn)
811                 return e->ops->elevator_former_req_fn(q, rq);
812         return NULL;
813 }
814
815 int elv_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
816 {
817         elevator_t *e = q->elevator;
818
819         if (e->ops->elevator_set_req_fn)
820                 return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
821
822         rq->elevator_private = NULL;
823         return 0;
824 }
825
826 void elv_put_request(request_queue_t *q, struct request *rq)
827 {
828         elevator_t *e = q->elevator;
829
830         if (e->ops->elevator_put_req_fn)
831                 e->ops->elevator_put_req_fn(rq);
832 }
833
834 int elv_may_queue(request_queue_t *q, int rw)
835 {
836         elevator_t *e = q->elevator;
837
838         if (e->ops->elevator_may_queue_fn)
839                 return e->ops->elevator_may_queue_fn(q, rw);
840
841         return ELV_MQUEUE_MAY;
842 }
843
844 void elv_completed_request(request_queue_t *q, struct request *rq)
845 {
846         elevator_t *e = q->elevator;
847
848         /*
849          * request is released from the driver, io must be done
850          */
851         if (blk_account_rq(rq)) {
852                 q->in_flight--;
853                 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
854                         e->ops->elevator_completed_req_fn(q, rq);
855         }
856
857         /*
858          * Check if the queue is waiting for fs requests to be
859          * drained for flush sequence.
860          */
861         if (unlikely(q->ordseq)) {
862                 struct request *first_rq = list_entry_rq(q->queue_head.next);
863                 if (q->in_flight == 0 &&
864                     blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
865                     blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
866                         blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
867                         q->request_fn(q);
868                 }
869         }
870 }
871
872 #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
873
874 static ssize_t
875 elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
876 {
877         elevator_t *e = container_of(kobj, elevator_t, kobj);
878         struct elv_fs_entry *entry = to_elv(attr);
879         ssize_t error;
880
881         if (!entry->show)
882                 return -EIO;
883
884         mutex_lock(&e->sysfs_lock);
885         error = e->ops ? entry->show(e, page) : -ENOENT;
886         mutex_unlock(&e->sysfs_lock);
887         return error;
888 }
889
890 static ssize_t
891 elv_attr_store(struct kobject *kobj, struct attribute *attr,
892                const char *page, size_t length)
893 {
894         elevator_t *e = container_of(kobj, elevator_t, kobj);
895         struct elv_fs_entry *entry = to_elv(attr);
896         ssize_t error;
897
898         if (!entry->store)
899                 return -EIO;
900
901         mutex_lock(&e->sysfs_lock);
902         error = e->ops ? entry->store(e, page, length) : -ENOENT;
903         mutex_unlock(&e->sysfs_lock);
904         return error;
905 }
906
907 static struct sysfs_ops elv_sysfs_ops = {
908         .show   = elv_attr_show,
909         .store  = elv_attr_store,
910 };
911
912 static struct kobj_type elv_ktype = {
913         .sysfs_ops      = &elv_sysfs_ops,
914         .release        = elevator_release,
915 };
916
917 int elv_register_queue(struct request_queue *q)
918 {
919         elevator_t *e = q->elevator;
920         int error;
921
922         e->kobj.parent = &q->kobj;
923
924         error = kobject_add(&e->kobj);
925         if (!error) {
926                 struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
927                 if (attr) {
928                         while (attr->attr.name) {
929                                 if (sysfs_create_file(&e->kobj, &attr->attr))
930                                         break;
931                                 attr++;
932                         }
933                 }
934                 kobject_uevent(&e->kobj, KOBJ_ADD);
935         }
936         return error;
937 }
938
939 static void __elv_unregister_queue(elevator_t *e)
940 {
941         kobject_uevent(&e->kobj, KOBJ_REMOVE);
942         kobject_del(&e->kobj);
943 }
944
945 void elv_unregister_queue(struct request_queue *q)
946 {
947         if (q)
948                 __elv_unregister_queue(q->elevator);
949 }
950
951 int elv_register(struct elevator_type *e)
952 {
953         spin_lock_irq(&elv_list_lock);
954         BUG_ON(elevator_find(e->elevator_name));
955         list_add_tail(&e->list, &elv_list);
956         spin_unlock_irq(&elv_list_lock);
957
958         printk(KERN_INFO "io scheduler %s registered", e->elevator_name);
959         if (!strcmp(e->elevator_name, chosen_elevator) ||
960                         (!*chosen_elevator &&
961                          !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
962                                 printk(" (default)");
963         printk("\n");
964         return 0;
965 }
966 EXPORT_SYMBOL_GPL(elv_register);
967
968 void elv_unregister(struct elevator_type *e)
969 {
970         struct task_struct *g, *p;
971
972         /*
973          * Iterate every thread in the process to remove the io contexts.
974          */
975         if (e->ops.trim) {
976                 read_lock(&tasklist_lock);
977                 do_each_thread(g, p) {
978                         task_lock(p);
979                         if (p->io_context)
980                                 e->ops.trim(p->io_context);
981                         task_unlock(p);
982                 } while_each_thread(g, p);
983                 read_unlock(&tasklist_lock);
984         }
985
986         spin_lock_irq(&elv_list_lock);
987         list_del_init(&e->list);
988         spin_unlock_irq(&elv_list_lock);
989 }
990 EXPORT_SYMBOL_GPL(elv_unregister);
991
992 /*
993  * switch to new_e io scheduler. be careful not to introduce deadlocks -
994  * we don't free the old io scheduler, before we have allocated what we
995  * need for the new one. this way we have a chance of going back to the old
996  * one, if the new one fails init for some reason.
997  */
998 static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
999 {
1000         elevator_t *old_elevator, *e;
1001         void *data;
1002
1003         /*
1004          * Allocate new elevator
1005          */
1006         e = elevator_alloc(q, new_e);
1007         if (!e)
1008                 return 0;
1009
1010         data = elevator_init_queue(q, e);
1011         if (!data) {
1012                 kobject_put(&e->kobj);
1013                 return 0;
1014         }
1015
1016         /*
1017          * Turn on BYPASS and drain all requests w/ elevator private data
1018          */
1019         spin_lock_irq(q->queue_lock);
1020
1021         set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
1022
1023         elv_drain_elevator(q);
1024
1025         while (q->rq.elvpriv) {
1026                 blk_remove_plug(q);
1027                 q->request_fn(q);
1028                 spin_unlock_irq(q->queue_lock);
1029                 msleep(10);
1030                 spin_lock_irq(q->queue_lock);
1031                 elv_drain_elevator(q);
1032         }
1033
1034         /*
1035          * Remember old elevator.
1036          */
1037         old_elevator = q->elevator;
1038
1039         /*
1040          * attach and start new elevator
1041          */
1042         elevator_attach(q, e, data);
1043
1044         spin_unlock_irq(q->queue_lock);
1045
1046         __elv_unregister_queue(old_elevator);
1047
1048         if (elv_register_queue(q))
1049                 goto fail_register;
1050
1051         /*
1052          * finally exit old elevator and turn off BYPASS.
1053          */
1054         elevator_exit(old_elevator);
1055         clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
1056         return 1;
1057
1058 fail_register:
1059         /*
1060          * switch failed, exit the new io scheduler and reattach the old
1061          * one again (along with re-adding the sysfs dir)
1062          */
1063         elevator_exit(e);
1064         q->elevator = old_elevator;
1065         elv_register_queue(q);
1066         clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
1067         return 0;
1068 }
1069
1070 ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
1071 {
1072         char elevator_name[ELV_NAME_MAX];
1073         size_t len;
1074         struct elevator_type *e;
1075
1076         elevator_name[sizeof(elevator_name) - 1] = '\0';
1077         strncpy(elevator_name, name, sizeof(elevator_name) - 1);
1078         len = strlen(elevator_name);
1079
1080         if (len && elevator_name[len - 1] == '\n')
1081                 elevator_name[len - 1] = '\0';
1082
1083         e = elevator_get(elevator_name);
1084         if (!e) {
1085                 printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
1086                 return -EINVAL;
1087         }
1088
1089         if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
1090                 elevator_put(e);
1091                 return count;
1092         }
1093
1094         if (!elevator_switch(q, e))
1095                 printk(KERN_ERR "elevator: switch to %s failed\n",elevator_name);
1096         return count;
1097 }
1098
1099 ssize_t elv_iosched_show(request_queue_t *q, char *name)
1100 {
1101         elevator_t *e = q->elevator;
1102         struct elevator_type *elv = e->elevator_type;
1103         struct list_head *entry;
1104         int len = 0;
1105
1106         spin_lock_irq(&elv_list_lock);
1107         list_for_each(entry, &elv_list) {
1108                 struct elevator_type *__e;
1109
1110                 __e = list_entry(entry, struct elevator_type, list);
1111                 if (!strcmp(elv->elevator_name, __e->elevator_name))
1112                         len += sprintf(name+len, "[%s] ", elv->elevator_name);
1113                 else
1114                         len += sprintf(name+len, "%s ", __e->elevator_name);
1115         }
1116         spin_unlock_irq(&elv_list_lock);
1117
1118         len += sprintf(len+name, "\n");
1119         return len;
1120 }
1121
1122 struct request *elv_rb_former_request(request_queue_t *q, struct request *rq)
1123 {
1124         struct rb_node *rbprev = rb_prev(&rq->rb_node);
1125
1126         if (rbprev)
1127                 return rb_entry_rq(rbprev);
1128
1129         return NULL;
1130 }
1131
1132 EXPORT_SYMBOL(elv_rb_former_request);
1133
1134 struct request *elv_rb_latter_request(request_queue_t *q, struct request *rq)
1135 {
1136         struct rb_node *rbnext = rb_next(&rq->rb_node);
1137
1138         if (rbnext)
1139                 return rb_entry_rq(rbnext);
1140
1141         return NULL;
1142 }
1143
1144 EXPORT_SYMBOL(elv_rb_latter_request);