Merge branch 'upstream'
[pandora-kernel.git] / block / elevator.c
1 /*
2  *  Block device elevator/IO-scheduler.
3  *
4  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5  *
6  * 30042000 Jens Axboe <axboe@suse.de> :
7  *
8  * Split the elevator a bit so that it is possible to choose a different
9  * one or even write a new "plug in". There are three pieces:
10  * - elevator_fn, inserts a new request in the queue list
11  * - elevator_merge_fn, decides whether a new buffer can be merged with
12  *   an existing request
13  * - elevator_dequeue_fn, called when a request is taken off the active list
14  *
15  * 20082000 Dave Jones <davej@suse.de> :
16  * Removed tests for max-bomb-segments, which was breaking elvtune
17  *  when run without -bN
18  *
19  * Jens:
20  * - Rework again to work with bio instead of buffer_heads
21  * - loose bi_dev comparisons, partition handling is right now
22  * - completely modularize elevator setup and teardown
23  *
24  */
25 #include <linux/kernel.h>
26 #include <linux/fs.h>
27 #include <linux/blkdev.h>
28 #include <linux/elevator.h>
29 #include <linux/bio.h>
30 #include <linux/config.h>
31 #include <linux/module.h>
32 #include <linux/slab.h>
33 #include <linux/init.h>
34 #include <linux/compiler.h>
35 #include <linux/delay.h>
36
37 #include <asm/uaccess.h>
38
39 static DEFINE_SPINLOCK(elv_list_lock);
40 static LIST_HEAD(elv_list);
41
42 /*
43  * can we safely merge with this request?
44  */
45 inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
46 {
47         if (!rq_mergeable(rq))
48                 return 0;
49
50         /*
51          * different data direction or already started, don't merge
52          */
53         if (bio_data_dir(bio) != rq_data_dir(rq))
54                 return 0;
55
56         /*
57          * same device and no special stuff set, merge is ok
58          */
59         if (rq->rq_disk == bio->bi_bdev->bd_disk &&
60             !rq->waiting && !rq->special)
61                 return 1;
62
63         return 0;
64 }
65 EXPORT_SYMBOL(elv_rq_merge_ok);
66
67 static inline int elv_try_merge(struct request *__rq, struct bio *bio)
68 {
69         int ret = ELEVATOR_NO_MERGE;
70
71         /*
72          * we can merge and sequence is ok, check if it's possible
73          */
74         if (elv_rq_merge_ok(__rq, bio)) {
75                 if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
76                         ret = ELEVATOR_BACK_MERGE;
77                 else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
78                         ret = ELEVATOR_FRONT_MERGE;
79         }
80
81         return ret;
82 }
83
84 static struct elevator_type *elevator_find(const char *name)
85 {
86         struct elevator_type *e = NULL;
87         struct list_head *entry;
88
89         list_for_each(entry, &elv_list) {
90                 struct elevator_type *__e;
91
92                 __e = list_entry(entry, struct elevator_type, list);
93
94                 if (!strcmp(__e->elevator_name, name)) {
95                         e = __e;
96                         break;
97                 }
98         }
99
100         return e;
101 }
102
103 static void elevator_put(struct elevator_type *e)
104 {
105         module_put(e->elevator_owner);
106 }
107
108 static struct elevator_type *elevator_get(const char *name)
109 {
110         struct elevator_type *e;
111
112         spin_lock_irq(&elv_list_lock);
113
114         e = elevator_find(name);
115         if (e && !try_module_get(e->elevator_owner))
116                 e = NULL;
117
118         spin_unlock_irq(&elv_list_lock);
119
120         return e;
121 }
122
123 static int elevator_attach(request_queue_t *q, struct elevator_type *e,
124                            struct elevator_queue *eq)
125 {
126         int ret = 0;
127
128         memset(eq, 0, sizeof(*eq));
129         eq->ops = &e->ops;
130         eq->elevator_type = e;
131
132         q->elevator = eq;
133
134         if (eq->ops->elevator_init_fn)
135                 ret = eq->ops->elevator_init_fn(q, eq);
136
137         return ret;
138 }
139
140 static char chosen_elevator[16];
141
142 static void elevator_setup_default(void)
143 {
144         struct elevator_type *e;
145
146         /*
147          * If default has not been set, use the compiled-in selection.
148          */
149         if (!chosen_elevator[0])
150                 strcpy(chosen_elevator, CONFIG_DEFAULT_IOSCHED);
151
152         /*
153          * Be backwards-compatible with previous kernels, so users
154          * won't get the wrong elevator.
155          */
156         if (!strcmp(chosen_elevator, "as"))
157                 strcpy(chosen_elevator, "anticipatory");
158
159         /*
160          * If the given scheduler is not available, fall back to the default
161          */
162         if ((e = elevator_find(chosen_elevator)))
163                 elevator_put(e);
164         else
165                 strcpy(chosen_elevator, CONFIG_DEFAULT_IOSCHED);
166 }
167
168 static int __init elevator_setup(char *str)
169 {
170         strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
171         return 0;
172 }
173
174 __setup("elevator=", elevator_setup);
175
176 int elevator_init(request_queue_t *q, char *name)
177 {
178         struct elevator_type *e = NULL;
179         struct elevator_queue *eq;
180         int ret = 0;
181
182         INIT_LIST_HEAD(&q->queue_head);
183         q->last_merge = NULL;
184         q->end_sector = 0;
185         q->boundary_rq = NULL;
186
187         elevator_setup_default();
188
189         if (!name)
190                 name = chosen_elevator;
191
192         e = elevator_get(name);
193         if (!e)
194                 return -EINVAL;
195
196         eq = kmalloc(sizeof(struct elevator_queue), GFP_KERNEL);
197         if (!eq) {
198                 elevator_put(e);
199                 return -ENOMEM;
200         }
201
202         ret = elevator_attach(q, e, eq);
203         if (ret) {
204                 kfree(eq);
205                 elevator_put(e);
206         }
207
208         return ret;
209 }
210
211 void elevator_exit(elevator_t *e)
212 {
213         if (e->ops->elevator_exit_fn)
214                 e->ops->elevator_exit_fn(e);
215
216         elevator_put(e->elevator_type);
217         e->elevator_type = NULL;
218         kfree(e);
219 }
220
221 /*
222  * Insert rq into dispatch queue of q.  Queue lock must be held on
223  * entry.  If sort != 0, rq is sort-inserted; otherwise, rq will be
224  * appended to the dispatch queue.  To be used by specific elevators.
225  */
226 void elv_dispatch_sort(request_queue_t *q, struct request *rq)
227 {
228         sector_t boundary;
229         struct list_head *entry;
230
231         if (q->last_merge == rq)
232                 q->last_merge = NULL;
233         q->nr_sorted--;
234
235         boundary = q->end_sector;
236
237         list_for_each_prev(entry, &q->queue_head) {
238                 struct request *pos = list_entry_rq(entry);
239
240                 if (pos->flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED))
241                         break;
242                 if (rq->sector >= boundary) {
243                         if (pos->sector < boundary)
244                                 continue;
245                 } else {
246                         if (pos->sector >= boundary)
247                                 break;
248                 }
249                 if (rq->sector >= pos->sector)
250                         break;
251         }
252
253         list_add(&rq->queuelist, entry);
254 }
255
256 int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
257 {
258         elevator_t *e = q->elevator;
259         int ret;
260
261         if (q->last_merge) {
262                 ret = elv_try_merge(q->last_merge, bio);
263                 if (ret != ELEVATOR_NO_MERGE) {
264                         *req = q->last_merge;
265                         return ret;
266                 }
267         }
268
269         if (e->ops->elevator_merge_fn)
270                 return e->ops->elevator_merge_fn(q, req, bio);
271
272         return ELEVATOR_NO_MERGE;
273 }
274
275 void elv_merged_request(request_queue_t *q, struct request *rq)
276 {
277         elevator_t *e = q->elevator;
278
279         if (e->ops->elevator_merged_fn)
280                 e->ops->elevator_merged_fn(q, rq);
281
282         q->last_merge = rq;
283 }
284
285 void elv_merge_requests(request_queue_t *q, struct request *rq,
286                              struct request *next)
287 {
288         elevator_t *e = q->elevator;
289
290         if (e->ops->elevator_merge_req_fn)
291                 e->ops->elevator_merge_req_fn(q, rq, next);
292         q->nr_sorted--;
293
294         q->last_merge = rq;
295 }
296
297 void elv_requeue_request(request_queue_t *q, struct request *rq)
298 {
299         elevator_t *e = q->elevator;
300
301         /*
302          * it already went through dequeue, we need to decrement the
303          * in_flight count again
304          */
305         if (blk_account_rq(rq)) {
306                 q->in_flight--;
307                 if (blk_sorted_rq(rq) && e->ops->elevator_deactivate_req_fn)
308                         e->ops->elevator_deactivate_req_fn(q, rq);
309         }
310
311         rq->flags &= ~REQ_STARTED;
312
313         __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE, 0);
314 }
315
316 static void elv_drain_elevator(request_queue_t *q)
317 {
318         static int printed;
319         while (q->elevator->ops->elevator_dispatch_fn(q, 1))
320                 ;
321         if (q->nr_sorted == 0)
322                 return;
323         if (printed++ < 10) {
324                 printk(KERN_ERR "%s: forced dispatching is broken "
325                        "(nr_sorted=%u), please report this\n",
326                        q->elevator->elevator_type->elevator_name, q->nr_sorted);
327         }
328 }
329
330 void __elv_add_request(request_queue_t *q, struct request *rq, int where,
331                        int plug)
332 {
333         struct list_head *pos;
334         unsigned ordseq;
335
336         if (q->ordcolor)
337                 rq->flags |= REQ_ORDERED_COLOR;
338
339         if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
340                 /*
341                  * toggle ordered color
342                  */
343                 q->ordcolor ^= 1;
344
345                 /*
346                  * barriers implicitly indicate back insertion
347                  */
348                 if (where == ELEVATOR_INSERT_SORT)
349                         where = ELEVATOR_INSERT_BACK;
350
351                 /*
352                  * this request is scheduling boundary, update end_sector
353                  */
354                 if (blk_fs_request(rq)) {
355                         q->end_sector = rq_end_sector(rq);
356                         q->boundary_rq = rq;
357                 }
358         } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
359                 where = ELEVATOR_INSERT_BACK;
360
361         if (plug)
362                 blk_plug_device(q);
363
364         rq->q = q;
365
366         switch (where) {
367         case ELEVATOR_INSERT_FRONT:
368                 rq->flags |= REQ_SOFTBARRIER;
369
370                 list_add(&rq->queuelist, &q->queue_head);
371                 break;
372
373         case ELEVATOR_INSERT_BACK:
374                 rq->flags |= REQ_SOFTBARRIER;
375                 elv_drain_elevator(q);
376                 list_add_tail(&rq->queuelist, &q->queue_head);
377                 /*
378                  * We kick the queue here for the following reasons.
379                  * - The elevator might have returned NULL previously
380                  *   to delay requests and returned them now.  As the
381                  *   queue wasn't empty before this request, ll_rw_blk
382                  *   won't run the queue on return, resulting in hang.
383                  * - Usually, back inserted requests won't be merged
384                  *   with anything.  There's no point in delaying queue
385                  *   processing.
386                  */
387                 blk_remove_plug(q);
388                 q->request_fn(q);
389                 break;
390
391         case ELEVATOR_INSERT_SORT:
392                 BUG_ON(!blk_fs_request(rq));
393                 rq->flags |= REQ_SORTED;
394                 q->nr_sorted++;
395                 if (q->last_merge == NULL && rq_mergeable(rq))
396                         q->last_merge = rq;
397                 /*
398                  * Some ioscheds (cfq) run q->request_fn directly, so
399                  * rq cannot be accessed after calling
400                  * elevator_add_req_fn.
401                  */
402                 q->elevator->ops->elevator_add_req_fn(q, rq);
403                 break;
404
405         case ELEVATOR_INSERT_REQUEUE:
406                 /*
407                  * If ordered flush isn't in progress, we do front
408                  * insertion; otherwise, requests should be requeued
409                  * in ordseq order.
410                  */
411                 rq->flags |= REQ_SOFTBARRIER;
412
413                 if (q->ordseq == 0) {
414                         list_add(&rq->queuelist, &q->queue_head);
415                         break;
416                 }
417
418                 ordseq = blk_ordered_req_seq(rq);
419
420                 list_for_each(pos, &q->queue_head) {
421                         struct request *pos_rq = list_entry_rq(pos);
422                         if (ordseq <= blk_ordered_req_seq(pos_rq))
423                                 break;
424                 }
425
426                 list_add_tail(&rq->queuelist, pos);
427                 break;
428
429         default:
430                 printk(KERN_ERR "%s: bad insertion point %d\n",
431                        __FUNCTION__, where);
432                 BUG();
433         }
434
435         if (blk_queue_plugged(q)) {
436                 int nrq = q->rq.count[READ] + q->rq.count[WRITE]
437                         - q->in_flight;
438
439                 if (nrq >= q->unplug_thresh)
440                         __generic_unplug_device(q);
441         }
442 }
443
444 void elv_add_request(request_queue_t *q, struct request *rq, int where,
445                      int plug)
446 {
447         unsigned long flags;
448
449         spin_lock_irqsave(q->queue_lock, flags);
450         __elv_add_request(q, rq, where, plug);
451         spin_unlock_irqrestore(q->queue_lock, flags);
452 }
453
454 static inline struct request *__elv_next_request(request_queue_t *q)
455 {
456         struct request *rq;
457
458         while (1) {
459                 while (!list_empty(&q->queue_head)) {
460                         rq = list_entry_rq(q->queue_head.next);
461                         if (blk_do_ordered(q, &rq))
462                                 return rq;
463                 }
464
465                 if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
466                         return NULL;
467         }
468 }
469
470 struct request *elv_next_request(request_queue_t *q)
471 {
472         struct request *rq;
473         int ret;
474
475         while ((rq = __elv_next_request(q)) != NULL) {
476                 if (!(rq->flags & REQ_STARTED)) {
477                         elevator_t *e = q->elevator;
478
479                         /*
480                          * This is the first time the device driver
481                          * sees this request (possibly after
482                          * requeueing).  Notify IO scheduler.
483                          */
484                         if (blk_sorted_rq(rq) &&
485                             e->ops->elevator_activate_req_fn)
486                                 e->ops->elevator_activate_req_fn(q, rq);
487
488                         /*
489                          * just mark as started even if we don't start
490                          * it, a request that has been delayed should
491                          * not be passed by new incoming requests
492                          */
493                         rq->flags |= REQ_STARTED;
494                 }
495
496                 if (!q->boundary_rq || q->boundary_rq == rq) {
497                         q->end_sector = rq_end_sector(rq);
498                         q->boundary_rq = NULL;
499                 }
500
501                 if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn)
502                         break;
503
504                 ret = q->prep_rq_fn(q, rq);
505                 if (ret == BLKPREP_OK) {
506                         break;
507                 } else if (ret == BLKPREP_DEFER) {
508                         /*
509                          * the request may have been (partially) prepped.
510                          * we need to keep this request in the front to
511                          * avoid resource deadlock.  REQ_STARTED will
512                          * prevent other fs requests from passing this one.
513                          */
514                         rq = NULL;
515                         break;
516                 } else if (ret == BLKPREP_KILL) {
517                         int nr_bytes = rq->hard_nr_sectors << 9;
518
519                         if (!nr_bytes)
520                                 nr_bytes = rq->data_len;
521
522                         blkdev_dequeue_request(rq);
523                         rq->flags |= REQ_QUIET;
524                         end_that_request_chunk(rq, 0, nr_bytes);
525                         end_that_request_last(rq, 0);
526                 } else {
527                         printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
528                                                                 ret);
529                         break;
530                 }
531         }
532
533         return rq;
534 }
535
536 void elv_dequeue_request(request_queue_t *q, struct request *rq)
537 {
538         BUG_ON(list_empty(&rq->queuelist));
539
540         list_del_init(&rq->queuelist);
541
542         /*
543          * the time frame between a request being removed from the lists
544          * and to it is freed is accounted as io that is in progress at
545          * the driver side.
546          */
547         if (blk_account_rq(rq))
548                 q->in_flight++;
549 }
550
551 int elv_queue_empty(request_queue_t *q)
552 {
553         elevator_t *e = q->elevator;
554
555         if (!list_empty(&q->queue_head))
556                 return 0;
557
558         if (e->ops->elevator_queue_empty_fn)
559                 return e->ops->elevator_queue_empty_fn(q);
560
561         return 1;
562 }
563
564 struct request *elv_latter_request(request_queue_t *q, struct request *rq)
565 {
566         elevator_t *e = q->elevator;
567
568         if (e->ops->elevator_latter_req_fn)
569                 return e->ops->elevator_latter_req_fn(q, rq);
570         return NULL;
571 }
572
573 struct request *elv_former_request(request_queue_t *q, struct request *rq)
574 {
575         elevator_t *e = q->elevator;
576
577         if (e->ops->elevator_former_req_fn)
578                 return e->ops->elevator_former_req_fn(q, rq);
579         return NULL;
580 }
581
582 int elv_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
583                     gfp_t gfp_mask)
584 {
585         elevator_t *e = q->elevator;
586
587         if (e->ops->elevator_set_req_fn)
588                 return e->ops->elevator_set_req_fn(q, rq, bio, gfp_mask);
589
590         rq->elevator_private = NULL;
591         return 0;
592 }
593
594 void elv_put_request(request_queue_t *q, struct request *rq)
595 {
596         elevator_t *e = q->elevator;
597
598         if (e->ops->elevator_put_req_fn)
599                 e->ops->elevator_put_req_fn(q, rq);
600 }
601
602 int elv_may_queue(request_queue_t *q, int rw, struct bio *bio)
603 {
604         elevator_t *e = q->elevator;
605
606         if (e->ops->elevator_may_queue_fn)
607                 return e->ops->elevator_may_queue_fn(q, rw, bio);
608
609         return ELV_MQUEUE_MAY;
610 }
611
612 void elv_completed_request(request_queue_t *q, struct request *rq)
613 {
614         elevator_t *e = q->elevator;
615
616         /*
617          * request is released from the driver, io must be done
618          */
619         if (blk_account_rq(rq)) {
620                 q->in_flight--;
621                 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
622                         e->ops->elevator_completed_req_fn(q, rq);
623         }
624
625         /*
626          * Check if the queue is waiting for fs requests to be
627          * drained for flush sequence.
628          */
629         if (unlikely(q->ordseq)) {
630                 struct request *first_rq = list_entry_rq(q->queue_head.next);
631                 if (q->in_flight == 0 &&
632                     blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
633                     blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
634                         blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
635                         q->request_fn(q);
636                 }
637         }
638 }
639
640 int elv_register_queue(struct request_queue *q)
641 {
642         elevator_t *e = q->elevator;
643
644         e->kobj.parent = kobject_get(&q->kobj);
645         if (!e->kobj.parent)
646                 return -EBUSY;
647
648         snprintf(e->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
649         e->kobj.ktype = e->elevator_type->elevator_ktype;
650
651         return kobject_register(&e->kobj);
652 }
653
654 void elv_unregister_queue(struct request_queue *q)
655 {
656         if (q) {
657                 elevator_t *e = q->elevator;
658                 kobject_unregister(&e->kobj);
659                 kobject_put(&q->kobj);
660         }
661 }
662
663 int elv_register(struct elevator_type *e)
664 {
665         spin_lock_irq(&elv_list_lock);
666         if (elevator_find(e->elevator_name))
667                 BUG();
668         list_add_tail(&e->list, &elv_list);
669         spin_unlock_irq(&elv_list_lock);
670
671         printk(KERN_INFO "io scheduler %s registered", e->elevator_name);
672         if (!strcmp(e->elevator_name, chosen_elevator))
673                 printk(" (default)");
674         printk("\n");
675         return 0;
676 }
677 EXPORT_SYMBOL_GPL(elv_register);
678
679 void elv_unregister(struct elevator_type *e)
680 {
681         struct task_struct *g, *p;
682
683         /*
684          * Iterate every thread in the process to remove the io contexts.
685          */
686         read_lock(&tasklist_lock);
687         do_each_thread(g, p) {
688                 struct io_context *ioc = p->io_context;
689                 if (ioc && ioc->cic) {
690                         ioc->cic->exit(ioc->cic);
691                         ioc->cic->dtor(ioc->cic);
692                         ioc->cic = NULL;
693                 }
694                 if (ioc && ioc->aic) {
695                         ioc->aic->exit(ioc->aic);
696                         ioc->aic->dtor(ioc->aic);
697                         ioc->aic = NULL;
698                 }
699         } while_each_thread(g, p);
700         read_unlock(&tasklist_lock);
701
702         spin_lock_irq(&elv_list_lock);
703         list_del_init(&e->list);
704         spin_unlock_irq(&elv_list_lock);
705 }
706 EXPORT_SYMBOL_GPL(elv_unregister);
707
708 /*
709  * switch to new_e io scheduler. be careful not to introduce deadlocks -
710  * we don't free the old io scheduler, before we have allocated what we
711  * need for the new one. this way we have a chance of going back to the old
712  * one, if the new one fails init for some reason.
713  */
714 static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
715 {
716         elevator_t *old_elevator, *e;
717
718         /*
719          * Allocate new elevator
720          */
721         e = kmalloc(sizeof(elevator_t), GFP_KERNEL);
722         if (!e)
723                 goto error;
724
725         /*
726          * Turn on BYPASS and drain all requests w/ elevator private data
727          */
728         spin_lock_irq(q->queue_lock);
729
730         set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
731
732         elv_drain_elevator(q);
733
734         while (q->rq.elvpriv) {
735                 blk_remove_plug(q);
736                 q->request_fn(q);
737                 spin_unlock_irq(q->queue_lock);
738                 msleep(10);
739                 spin_lock_irq(q->queue_lock);
740                 elv_drain_elevator(q);
741         }
742
743         spin_unlock_irq(q->queue_lock);
744
745         /*
746          * unregister old elevator data
747          */
748         elv_unregister_queue(q);
749         old_elevator = q->elevator;
750
751         /*
752          * attach and start new elevator
753          */
754         if (elevator_attach(q, new_e, e))
755                 goto fail;
756
757         if (elv_register_queue(q))
758                 goto fail_register;
759
760         /*
761          * finally exit old elevator and turn off BYPASS.
762          */
763         elevator_exit(old_elevator);
764         clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
765         return;
766
767 fail_register:
768         /*
769          * switch failed, exit the new io scheduler and reattach the old
770          * one again (along with re-adding the sysfs dir)
771          */
772         elevator_exit(e);
773         e = NULL;
774 fail:
775         q->elevator = old_elevator;
776         elv_register_queue(q);
777         clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
778         kfree(e);
779 error:
780         elevator_put(new_e);
781         printk(KERN_ERR "elevator: switch to %s failed\n",new_e->elevator_name);
782 }
783
784 ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
785 {
786         char elevator_name[ELV_NAME_MAX];
787         size_t len;
788         struct elevator_type *e;
789
790         elevator_name[sizeof(elevator_name) - 1] = '\0';
791         strncpy(elevator_name, name, sizeof(elevator_name) - 1);
792         len = strlen(elevator_name);
793
794         if (len && elevator_name[len - 1] == '\n')
795                 elevator_name[len - 1] = '\0';
796
797         e = elevator_get(elevator_name);
798         if (!e) {
799                 printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
800                 return -EINVAL;
801         }
802
803         if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
804                 elevator_put(e);
805                 return count;
806         }
807
808         elevator_switch(q, e);
809         return count;
810 }
811
812 ssize_t elv_iosched_show(request_queue_t *q, char *name)
813 {
814         elevator_t *e = q->elevator;
815         struct elevator_type *elv = e->elevator_type;
816         struct list_head *entry;
817         int len = 0;
818
819         spin_lock_irq(q->queue_lock);
820         list_for_each(entry, &elv_list) {
821                 struct elevator_type *__e;
822
823                 __e = list_entry(entry, struct elevator_type, list);
824                 if (!strcmp(elv->elevator_name, __e->elevator_name))
825                         len += sprintf(name+len, "[%s] ", elv->elevator_name);
826                 else
827                         len += sprintf(name+len, "%s ", __e->elevator_name);
828         }
829         spin_unlock_irq(q->queue_lock);
830
831         len += sprintf(len+name, "\n");
832         return len;
833 }
834
835 EXPORT_SYMBOL(elv_dispatch_sort);
836 EXPORT_SYMBOL(elv_add_request);
837 EXPORT_SYMBOL(__elv_add_request);
838 EXPORT_SYMBOL(elv_requeue_request);
839 EXPORT_SYMBOL(elv_next_request);
840 EXPORT_SYMBOL(elv_dequeue_request);
841 EXPORT_SYMBOL(elv_queue_empty);
842 EXPORT_SYMBOL(elv_completed_request);
843 EXPORT_SYMBOL(elevator_exit);
844 EXPORT_SYMBOL(elevator_init);