[PATCH] cfq-iosched: move on_rr check into cfq_resort_rr_list()
[pandora-kernel.git] / block / cfq-iosched.c
1 /*
2  *  CFQ, or complete fairness queueing, disk scheduler.
3  *
4  *  Based on ideas from a previously unfinished io
5  *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6  *
7  *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8  */
9 #include <linux/module.h>
10 #include <linux/blkdev.h>
11 #include <linux/elevator.h>
12 #include <linux/hash.h>
13 #include <linux/rbtree.h>
14 #include <linux/ioprio.h>
15
16 /*
17  * tunables
18  */
19 static const int cfq_quantum = 4;               /* max queue in one round of service */
20 static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
21 static const int cfq_back_max = 16 * 1024;      /* maximum backwards seek, in KiB */
22 static const int cfq_back_penalty = 2;          /* penalty of a backwards seek */
23
24 static const int cfq_slice_sync = HZ / 10;
25 static int cfq_slice_async = HZ / 25;
26 static const int cfq_slice_async_rq = 2;
27 static int cfq_slice_idle = HZ / 125;
28
29 #define CFQ_IDLE_GRACE          (HZ / 10)
30 #define CFQ_SLICE_SCALE         (5)
31
32 #define CFQ_KEY_ASYNC           (0)
33
34 /*
35  * for the hash of cfqq inside the cfqd
36  */
37 #define CFQ_QHASH_SHIFT         6
38 #define CFQ_QHASH_ENTRIES       (1 << CFQ_QHASH_SHIFT)
39 #define list_entry_qhash(entry) hlist_entry((entry), struct cfq_queue, cfq_hash)
40
41 #define list_entry_cfqq(ptr)    list_entry((ptr), struct cfq_queue, cfq_list)
42
43 #define RQ_CIC(rq)              ((struct cfq_io_context*)(rq)->elevator_private)
44 #define RQ_CFQQ(rq)             ((rq)->elevator_private2)
45
46 static struct kmem_cache *cfq_pool;
47 static struct kmem_cache *cfq_ioc_pool;
48
49 static DEFINE_PER_CPU(unsigned long, ioc_count);
50 static struct completion *ioc_gone;
51
52 #define CFQ_PRIO_LISTS          IOPRIO_BE_NR
53 #define cfq_class_idle(cfqq)    ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
54 #define cfq_class_rt(cfqq)      ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
55
56 #define ASYNC                   (0)
57 #define SYNC                    (1)
58
59 #define cfq_cfqq_dispatched(cfqq)       \
60         ((cfqq)->on_dispatch[ASYNC] + (cfqq)->on_dispatch[SYNC])
61
62 #define cfq_cfqq_class_sync(cfqq)       ((cfqq)->key != CFQ_KEY_ASYNC)
63
64 #define cfq_cfqq_sync(cfqq)             \
65         (cfq_cfqq_class_sync(cfqq) || (cfqq)->on_dispatch[SYNC])
66
67 #define sample_valid(samples)   ((samples) > 80)
68
69 /*
70  * Per block device queue structure
71  */
72 struct cfq_data {
73         request_queue_t *queue;
74
75         /*
76          * rr list of queues with requests and the count of them
77          */
78         struct list_head rr_list[CFQ_PRIO_LISTS];
79         struct list_head busy_rr;
80         struct list_head cur_rr;
81         struct list_head idle_rr;
82         unsigned int busy_queues;
83
84         /*
85          * cfqq lookup hash
86          */
87         struct hlist_head *cfq_hash;
88
89         int rq_in_driver;
90         int hw_tag;
91
92         /*
93          * idle window management
94          */
95         struct timer_list idle_slice_timer;
96         struct work_struct unplug_work;
97
98         struct cfq_queue *active_queue;
99         struct cfq_io_context *active_cic;
100         int cur_prio, cur_end_prio;
101         unsigned int dispatch_slice;
102
103         struct timer_list idle_class_timer;
104
105         sector_t last_sector;
106         unsigned long last_end_request;
107
108         /*
109          * tunables, see top of file
110          */
111         unsigned int cfq_quantum;
112         unsigned int cfq_fifo_expire[2];
113         unsigned int cfq_back_penalty;
114         unsigned int cfq_back_max;
115         unsigned int cfq_slice[2];
116         unsigned int cfq_slice_async_rq;
117         unsigned int cfq_slice_idle;
118
119         struct list_head cic_list;
120 };
121
122 /*
123  * Per process-grouping structure
124  */
125 struct cfq_queue {
126         /* reference count */
127         atomic_t ref;
128         /* parent cfq_data */
129         struct cfq_data *cfqd;
130         /* cfqq lookup hash */
131         struct hlist_node cfq_hash;
132         /* hash key */
133         unsigned int key;
134         /* member of the rr/busy/cur/idle cfqd list */
135         struct list_head cfq_list;
136         /* sorted list of pending requests */
137         struct rb_root sort_list;
138         /* if fifo isn't expired, next request to serve */
139         struct request *next_rq;
140         /* requests queued in sort_list */
141         int queued[2];
142         /* currently allocated requests */
143         int allocated[2];
144         /* pending metadata requests */
145         int meta_pending;
146         /* fifo list of requests in sort_list */
147         struct list_head fifo;
148
149         unsigned long slice_start;
150         unsigned long slice_end;
151         unsigned long slice_left;
152
153         /* number of requests that are on the dispatch list */
154         int on_dispatch[2];
155
156         /* io prio of this group */
157         unsigned short ioprio, org_ioprio;
158         unsigned short ioprio_class, org_ioprio_class;
159
160         /* various state flags, see below */
161         unsigned int flags;
162 };
163
164 enum cfqq_state_flags {
165         CFQ_CFQQ_FLAG_on_rr = 0,
166         CFQ_CFQQ_FLAG_wait_request,
167         CFQ_CFQQ_FLAG_must_alloc,
168         CFQ_CFQQ_FLAG_must_alloc_slice,
169         CFQ_CFQQ_FLAG_must_dispatch,
170         CFQ_CFQQ_FLAG_fifo_expire,
171         CFQ_CFQQ_FLAG_idle_window,
172         CFQ_CFQQ_FLAG_prio_changed,
173         CFQ_CFQQ_FLAG_queue_new,
174 };
175
176 #define CFQ_CFQQ_FNS(name)                                              \
177 static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)         \
178 {                                                                       \
179         cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name);                     \
180 }                                                                       \
181 static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)        \
182 {                                                                       \
183         cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);                    \
184 }                                                                       \
185 static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)         \
186 {                                                                       \
187         return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;        \
188 }
189
190 CFQ_CFQQ_FNS(on_rr);
191 CFQ_CFQQ_FNS(wait_request);
192 CFQ_CFQQ_FNS(must_alloc);
193 CFQ_CFQQ_FNS(must_alloc_slice);
194 CFQ_CFQQ_FNS(must_dispatch);
195 CFQ_CFQQ_FNS(fifo_expire);
196 CFQ_CFQQ_FNS(idle_window);
197 CFQ_CFQQ_FNS(prio_changed);
198 CFQ_CFQQ_FNS(queue_new);
199 #undef CFQ_CFQQ_FNS
200
201 static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short);
202 static void cfq_dispatch_insert(request_queue_t *, struct request *);
203 static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask);
204
205 /*
206  * scheduler run of queue, if there are requests pending and no one in the
207  * driver that will restart queueing
208  */
209 static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
210 {
211         if (cfqd->busy_queues)
212                 kblockd_schedule_work(&cfqd->unplug_work);
213 }
214
215 static int cfq_queue_empty(request_queue_t *q)
216 {
217         struct cfq_data *cfqd = q->elevator->elevator_data;
218
219         return !cfqd->busy_queues;
220 }
221
222 static inline pid_t cfq_queue_pid(struct task_struct *task, int rw, int is_sync)
223 {
224         /*
225          * Use the per-process queue, for read requests and syncronous writes
226          */
227         if (!(rw & REQ_RW) || is_sync)
228                 return task->pid;
229
230         return CFQ_KEY_ASYNC;
231 }
232
233 /*
234  * Lifted from AS - choose which of rq1 and rq2 that is best served now.
235  * We choose the request that is closest to the head right now. Distance
236  * behind the head is penalized and only allowed to a certain extent.
237  */
238 static struct request *
239 cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
240 {
241         sector_t last, s1, s2, d1 = 0, d2 = 0;
242         unsigned long back_max;
243 #define CFQ_RQ1_WRAP    0x01 /* request 1 wraps */
244 #define CFQ_RQ2_WRAP    0x02 /* request 2 wraps */
245         unsigned wrap = 0; /* bit mask: requests behind the disk head? */
246
247         if (rq1 == NULL || rq1 == rq2)
248                 return rq2;
249         if (rq2 == NULL)
250                 return rq1;
251
252         if (rq_is_sync(rq1) && !rq_is_sync(rq2))
253                 return rq1;
254         else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
255                 return rq2;
256         if (rq_is_meta(rq1) && !rq_is_meta(rq2))
257                 return rq1;
258         else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
259                 return rq2;
260
261         s1 = rq1->sector;
262         s2 = rq2->sector;
263
264         last = cfqd->last_sector;
265
266         /*
267          * by definition, 1KiB is 2 sectors
268          */
269         back_max = cfqd->cfq_back_max * 2;
270
271         /*
272          * Strict one way elevator _except_ in the case where we allow
273          * short backward seeks which are biased as twice the cost of a
274          * similar forward seek.
275          */
276         if (s1 >= last)
277                 d1 = s1 - last;
278         else if (s1 + back_max >= last)
279                 d1 = (last - s1) * cfqd->cfq_back_penalty;
280         else
281                 wrap |= CFQ_RQ1_WRAP;
282
283         if (s2 >= last)
284                 d2 = s2 - last;
285         else if (s2 + back_max >= last)
286                 d2 = (last - s2) * cfqd->cfq_back_penalty;
287         else
288                 wrap |= CFQ_RQ2_WRAP;
289
290         /* Found required data */
291
292         /*
293          * By doing switch() on the bit mask "wrap" we avoid having to
294          * check two variables for all permutations: --> faster!
295          */
296         switch (wrap) {
297         case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
298                 if (d1 < d2)
299                         return rq1;
300                 else if (d2 < d1)
301                         return rq2;
302                 else {
303                         if (s1 >= s2)
304                                 return rq1;
305                         else
306                                 return rq2;
307                 }
308
309         case CFQ_RQ2_WRAP:
310                 return rq1;
311         case CFQ_RQ1_WRAP:
312                 return rq2;
313         case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
314         default:
315                 /*
316                  * Since both rqs are wrapped,
317                  * start with the one that's further behind head
318                  * (--> only *one* back seek required),
319                  * since back seek takes more time than forward.
320                  */
321                 if (s1 <= s2)
322                         return rq1;
323                 else
324                         return rq2;
325         }
326 }
327
328 /*
329  * would be nice to take fifo expire time into account as well
330  */
331 static struct request *
332 cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
333                   struct request *last)
334 {
335         struct rb_node *rbnext = rb_next(&last->rb_node);
336         struct rb_node *rbprev = rb_prev(&last->rb_node);
337         struct request *next = NULL, *prev = NULL;
338
339         BUG_ON(RB_EMPTY_NODE(&last->rb_node));
340
341         if (rbprev)
342                 prev = rb_entry_rq(rbprev);
343
344         if (rbnext)
345                 next = rb_entry_rq(rbnext);
346         else {
347                 rbnext = rb_first(&cfqq->sort_list);
348                 if (rbnext && rbnext != &last->rb_node)
349                         next = rb_entry_rq(rbnext);
350         }
351
352         return cfq_choose_req(cfqd, next, prev);
353 }
354
355 static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
356 {
357         struct cfq_data *cfqd = cfqq->cfqd;
358         struct list_head *list;
359
360         /*
361          * Resorting requires the cfqq to be on the RR list already.
362          */
363         if (!cfq_cfqq_on_rr(cfqq))
364                 return;
365
366         list_del(&cfqq->cfq_list);
367
368         if (cfq_class_rt(cfqq))
369                 list = &cfqd->cur_rr;
370         else if (cfq_class_idle(cfqq))
371                 list = &cfqd->idle_rr;
372         else {
373                 /*
374                  * if cfqq has requests in flight, don't allow it to be
375                  * found in cfq_set_active_queue before it has finished them.
376                  * this is done to increase fairness between a process that
377                  * has lots of io pending vs one that only generates one
378                  * sporadically or synchronously
379                  */
380                 if (cfq_cfqq_dispatched(cfqq))
381                         list = &cfqd->busy_rr;
382                 else
383                         list = &cfqd->rr_list[cfqq->ioprio];
384         }
385
386         /*
387          * If this queue was preempted or is new (never been serviced), let
388          * it be added first for fairness but beind other new queues.
389          * Otherwise, just add to the back  of the list.
390          */
391         if (preempted || cfq_cfqq_queue_new(cfqq)) {
392                 struct list_head *n = list;
393                 struct cfq_queue *__cfqq;
394
395                 while (n->next != list) {
396                         __cfqq = list_entry_cfqq(n->next);
397                         if (!cfq_cfqq_queue_new(__cfqq))
398                                 break;
399
400                         n = n->next;
401                 }
402
403                 list = n;
404         }
405
406         list_add_tail(&cfqq->cfq_list, list);
407 }
408
409 /*
410  * add to busy list of queues for service, trying to be fair in ordering
411  * the pending list according to last request service
412  */
413 static inline void
414 cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
415 {
416         BUG_ON(cfq_cfqq_on_rr(cfqq));
417         cfq_mark_cfqq_on_rr(cfqq);
418         cfqd->busy_queues++;
419
420         cfq_resort_rr_list(cfqq, 0);
421 }
422
423 static inline void
424 cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
425 {
426         BUG_ON(!cfq_cfqq_on_rr(cfqq));
427         cfq_clear_cfqq_on_rr(cfqq);
428         list_del_init(&cfqq->cfq_list);
429
430         BUG_ON(!cfqd->busy_queues);
431         cfqd->busy_queues--;
432 }
433
434 /*
435  * rb tree support functions
436  */
437 static inline void cfq_del_rq_rb(struct request *rq)
438 {
439         struct cfq_queue *cfqq = RQ_CFQQ(rq);
440         struct cfq_data *cfqd = cfqq->cfqd;
441         const int sync = rq_is_sync(rq);
442
443         BUG_ON(!cfqq->queued[sync]);
444         cfqq->queued[sync]--;
445
446         elv_rb_del(&cfqq->sort_list, rq);
447
448         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
449                 cfq_del_cfqq_rr(cfqd, cfqq);
450 }
451
452 static void cfq_add_rq_rb(struct request *rq)
453 {
454         struct cfq_queue *cfqq = RQ_CFQQ(rq);
455         struct cfq_data *cfqd = cfqq->cfqd;
456         struct request *__alias;
457
458         cfqq->queued[rq_is_sync(rq)]++;
459
460         /*
461          * looks a little odd, but the first insert might return an alias.
462          * if that happens, put the alias on the dispatch list
463          */
464         while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
465                 cfq_dispatch_insert(cfqd->queue, __alias);
466
467         if (!cfq_cfqq_on_rr(cfqq))
468                 cfq_add_cfqq_rr(cfqd, cfqq);
469 }
470
471 static inline void
472 cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
473 {
474         elv_rb_del(&cfqq->sort_list, rq);
475         cfqq->queued[rq_is_sync(rq)]--;
476         cfq_add_rq_rb(rq);
477 }
478
479 static struct request *
480 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
481 {
482         struct task_struct *tsk = current;
483         pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio), bio_sync(bio));
484         struct cfq_queue *cfqq;
485
486         cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
487         if (cfqq) {
488                 sector_t sector = bio->bi_sector + bio_sectors(bio);
489
490                 return elv_rb_find(&cfqq->sort_list, sector);
491         }
492
493         return NULL;
494 }
495
496 static void cfq_activate_request(request_queue_t *q, struct request *rq)
497 {
498         struct cfq_data *cfqd = q->elevator->elevator_data;
499
500         cfqd->rq_in_driver++;
501
502         /*
503          * If the depth is larger 1, it really could be queueing. But lets
504          * make the mark a little higher - idling could still be good for
505          * low queueing, and a low queueing number could also just indicate
506          * a SCSI mid layer like behaviour where limit+1 is often seen.
507          */
508         if (!cfqd->hw_tag && cfqd->rq_in_driver > 4)
509                 cfqd->hw_tag = 1;
510 }
511
512 static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
513 {
514         struct cfq_data *cfqd = q->elevator->elevator_data;
515
516         WARN_ON(!cfqd->rq_in_driver);
517         cfqd->rq_in_driver--;
518 }
519
520 static void cfq_remove_request(struct request *rq)
521 {
522         struct cfq_queue *cfqq = RQ_CFQQ(rq);
523
524         if (cfqq->next_rq == rq)
525                 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
526
527         list_del_init(&rq->queuelist);
528         cfq_del_rq_rb(rq);
529
530         if (rq_is_meta(rq)) {
531                 WARN_ON(!cfqq->meta_pending);
532                 cfqq->meta_pending--;
533         }
534 }
535
536 static int
537 cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
538 {
539         struct cfq_data *cfqd = q->elevator->elevator_data;
540         struct request *__rq;
541
542         __rq = cfq_find_rq_fmerge(cfqd, bio);
543         if (__rq && elv_rq_merge_ok(__rq, bio)) {
544                 *req = __rq;
545                 return ELEVATOR_FRONT_MERGE;
546         }
547
548         return ELEVATOR_NO_MERGE;
549 }
550
551 static void cfq_merged_request(request_queue_t *q, struct request *req,
552                                int type)
553 {
554         if (type == ELEVATOR_FRONT_MERGE) {
555                 struct cfq_queue *cfqq = RQ_CFQQ(req);
556
557                 cfq_reposition_rq_rb(cfqq, req);
558         }
559 }
560
561 static void
562 cfq_merged_requests(request_queue_t *q, struct request *rq,
563                     struct request *next)
564 {
565         /*
566          * reposition in fifo if next is older than rq
567          */
568         if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
569             time_before(next->start_time, rq->start_time))
570                 list_move(&rq->queuelist, &next->queuelist);
571
572         cfq_remove_request(next);
573 }
574
575 static int cfq_allow_merge(request_queue_t *q, struct request *rq,
576                            struct bio *bio)
577 {
578         struct cfq_data *cfqd = q->elevator->elevator_data;
579         const int rw = bio_data_dir(bio);
580         struct cfq_queue *cfqq;
581         pid_t key;
582
583         /*
584          * Disallow merge of a sync bio into an async request.
585          */
586         if ((bio_data_dir(bio) == READ || bio_sync(bio)) && !rq_is_sync(rq))
587                 return 0;
588
589         /*
590          * Lookup the cfqq that this bio will be queued with. Allow
591          * merge only if rq is queued there.
592          */
593         key = cfq_queue_pid(current, rw, bio_sync(bio));
594         cfqq = cfq_find_cfq_hash(cfqd, key, current->ioprio);
595
596         if (cfqq == RQ_CFQQ(rq))
597                 return 1;
598
599         return 0;
600 }
601
602 static inline void
603 __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
604 {
605         if (cfqq) {
606                 /*
607                  * stop potential idle class queues waiting service
608                  */
609                 del_timer(&cfqd->idle_class_timer);
610
611                 cfqq->slice_start = jiffies;
612                 cfqq->slice_end = 0;
613                 cfqq->slice_left = 0;
614                 cfq_clear_cfqq_must_alloc_slice(cfqq);
615                 cfq_clear_cfqq_fifo_expire(cfqq);
616         }
617
618         cfqd->active_queue = cfqq;
619 }
620
621 /*
622  * current cfqq expired its slice (or was too idle), select new one
623  */
624 static void
625 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
626                     int preempted)
627 {
628         unsigned long now = jiffies;
629
630         if (cfq_cfqq_wait_request(cfqq))
631                 del_timer(&cfqd->idle_slice_timer);
632
633         if (!preempted && !cfq_cfqq_dispatched(cfqq))
634                 cfq_schedule_dispatch(cfqd);
635
636         cfq_clear_cfqq_must_dispatch(cfqq);
637         cfq_clear_cfqq_wait_request(cfqq);
638         cfq_clear_cfqq_queue_new(cfqq);
639
640         /*
641          * store what was left of this slice, if the queue idled out
642          * or was preempted
643          */
644         if (time_after(cfqq->slice_end, now))
645                 cfqq->slice_left = cfqq->slice_end - now;
646         else
647                 cfqq->slice_left = 0;
648
649         cfq_resort_rr_list(cfqq, preempted);
650
651         if (cfqq == cfqd->active_queue)
652                 cfqd->active_queue = NULL;
653
654         if (cfqd->active_cic) {
655                 put_io_context(cfqd->active_cic->ioc);
656                 cfqd->active_cic = NULL;
657         }
658
659         cfqd->dispatch_slice = 0;
660 }
661
662 static inline void cfq_slice_expired(struct cfq_data *cfqd, int preempted)
663 {
664         struct cfq_queue *cfqq = cfqd->active_queue;
665
666         if (cfqq)
667                 __cfq_slice_expired(cfqd, cfqq, preempted);
668 }
669
670 /*
671  * 0
672  * 0,1
673  * 0,1,2
674  * 0,1,2,3
675  * 0,1,2,3,4
676  * 0,1,2,3,4,5
677  * 0,1,2,3,4,5,6
678  * 0,1,2,3,4,5,6,7
679  */
680 static int cfq_get_next_prio_level(struct cfq_data *cfqd)
681 {
682         int prio, wrap;
683
684         prio = -1;
685         wrap = 0;
686         do {
687                 int p;
688
689                 for (p = cfqd->cur_prio; p <= cfqd->cur_end_prio; p++) {
690                         if (!list_empty(&cfqd->rr_list[p])) {
691                                 prio = p;
692                                 break;
693                         }
694                 }
695
696                 if (prio != -1)
697                         break;
698                 cfqd->cur_prio = 0;
699                 if (++cfqd->cur_end_prio == CFQ_PRIO_LISTS) {
700                         cfqd->cur_end_prio = 0;
701                         if (wrap)
702                                 break;
703                         wrap = 1;
704                 }
705         } while (1);
706
707         if (unlikely(prio == -1))
708                 return -1;
709
710         BUG_ON(prio >= CFQ_PRIO_LISTS);
711
712         list_splice_init(&cfqd->rr_list[prio], &cfqd->cur_rr);
713
714         cfqd->cur_prio = prio + 1;
715         if (cfqd->cur_prio > cfqd->cur_end_prio) {
716                 cfqd->cur_end_prio = cfqd->cur_prio;
717                 cfqd->cur_prio = 0;
718         }
719         if (cfqd->cur_end_prio == CFQ_PRIO_LISTS) {
720                 cfqd->cur_prio = 0;
721                 cfqd->cur_end_prio = 0;
722         }
723
724         return prio;
725 }
726
727 static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
728 {
729         struct cfq_queue *cfqq = NULL;
730
731         if (!list_empty(&cfqd->cur_rr) || cfq_get_next_prio_level(cfqd) != -1) {
732                 /*
733                  * if current list is non-empty, grab first entry. if it is
734                  * empty, get next prio level and grab first entry then if any
735                  * are spliced
736                  */
737                 cfqq = list_entry_cfqq(cfqd->cur_rr.next);
738         } else if (!list_empty(&cfqd->busy_rr)) {
739                 /*
740                  * If no new queues are available, check if the busy list has
741                  * some before falling back to idle io.
742                  */
743                 cfqq = list_entry_cfqq(cfqd->busy_rr.next);
744         } else if (!list_empty(&cfqd->idle_rr)) {
745                 /*
746                  * if we have idle queues and no rt or be queues had pending
747                  * requests, either allow immediate service if the grace period
748                  * has passed or arm the idle grace timer
749                  */
750                 unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE;
751
752                 if (time_after_eq(jiffies, end))
753                         cfqq = list_entry_cfqq(cfqd->idle_rr.next);
754                 else
755                         mod_timer(&cfqd->idle_class_timer, end);
756         }
757
758         __cfq_set_active_queue(cfqd, cfqq);
759         return cfqq;
760 }
761
762 #define CIC_SEEKY(cic) ((cic)->seek_mean > (128 * 1024))
763
764 static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
765
766 {
767         struct cfq_io_context *cic;
768         unsigned long sl;
769
770         WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
771         WARN_ON(cfqq != cfqd->active_queue);
772
773         /*
774          * idle is disabled, either manually or by past process history
775          */
776         if (!cfqd->cfq_slice_idle)
777                 return 0;
778         if (!cfq_cfqq_idle_window(cfqq))
779                 return 0;
780         /*
781          * task has exited, don't wait
782          */
783         cic = cfqd->active_cic;
784         if (!cic || !cic->ioc->task)
785                 return 0;
786
787         cfq_mark_cfqq_must_dispatch(cfqq);
788         cfq_mark_cfqq_wait_request(cfqq);
789
790         sl = min(cfqq->slice_end - 1, (unsigned long) cfqd->cfq_slice_idle);
791
792         /*
793          * we don't want to idle for seeks, but we do want to allow
794          * fair distribution of slice time for a process doing back-to-back
795          * seeks. so allow a little bit of time for him to submit a new rq
796          */
797         if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
798                 sl = min(sl, msecs_to_jiffies(2));
799
800         mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
801         return 1;
802 }
803
804 static void cfq_dispatch_insert(request_queue_t *q, struct request *rq)
805 {
806         struct cfq_data *cfqd = q->elevator->elevator_data;
807         struct cfq_queue *cfqq = RQ_CFQQ(rq);
808
809         cfq_remove_request(rq);
810         cfqq->on_dispatch[rq_is_sync(rq)]++;
811         elv_dispatch_sort(q, rq);
812
813         rq = list_entry(q->queue_head.prev, struct request, queuelist);
814         cfqd->last_sector = rq->sector + rq->nr_sectors;
815 }
816
817 /*
818  * return expired entry, or NULL to just start from scratch in rbtree
819  */
820 static inline struct request *cfq_check_fifo(struct cfq_queue *cfqq)
821 {
822         struct cfq_data *cfqd = cfqq->cfqd;
823         struct request *rq;
824         int fifo;
825
826         if (cfq_cfqq_fifo_expire(cfqq))
827                 return NULL;
828         if (list_empty(&cfqq->fifo))
829                 return NULL;
830
831         fifo = cfq_cfqq_class_sync(cfqq);
832         rq = rq_entry_fifo(cfqq->fifo.next);
833
834         if (time_after(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) {
835                 cfq_mark_cfqq_fifo_expire(cfqq);
836                 return rq;
837         }
838
839         return NULL;
840 }
841
842 /*
843  * Scale schedule slice based on io priority. Use the sync time slice only
844  * if a queue is marked sync and has sync io queued. A sync queue with async
845  * io only, should not get full sync slice length.
846  */
847 static inline int
848 cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
849 {
850         const int base_slice = cfqd->cfq_slice[cfq_cfqq_sync(cfqq)];
851
852         WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
853
854         return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - cfqq->ioprio));
855 }
856
857 static inline void
858 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
859 {
860         cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
861 }
862
863 static inline int
864 cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
865 {
866         const int base_rq = cfqd->cfq_slice_async_rq;
867
868         WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
869
870         return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
871 }
872
873 /*
874  * get next queue for service
875  */
876 static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
877 {
878         unsigned long now = jiffies;
879         struct cfq_queue *cfqq;
880
881         cfqq = cfqd->active_queue;
882         if (!cfqq)
883                 goto new_queue;
884
885         /*
886          * slice has expired
887          */
888         if (!cfq_cfqq_must_dispatch(cfqq) && time_after(now, cfqq->slice_end))
889                 goto expire;
890
891         /*
892          * if queue has requests, dispatch one. if not, check if
893          * enough slice is left to wait for one
894          */
895         if (!RB_EMPTY_ROOT(&cfqq->sort_list))
896                 goto keep_queue;
897         else if (cfq_cfqq_dispatched(cfqq)) {
898                 cfqq = NULL;
899                 goto keep_queue;
900         } else if (cfq_cfqq_class_sync(cfqq)) {
901                 if (cfq_arm_slice_timer(cfqd, cfqq))
902                         return NULL;
903         }
904
905 expire:
906         cfq_slice_expired(cfqd, 0);
907 new_queue:
908         cfqq = cfq_set_active_queue(cfqd);
909 keep_queue:
910         return cfqq;
911 }
912
913 static int
914 __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
915                         int max_dispatch)
916 {
917         int dispatched = 0;
918
919         BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
920
921         do {
922                 struct request *rq;
923
924                 /*
925                  * follow expired path, else get first next available
926                  */
927                 if ((rq = cfq_check_fifo(cfqq)) == NULL)
928                         rq = cfqq->next_rq;
929
930                 /*
931                  * finally, insert request into driver dispatch list
932                  */
933                 cfq_dispatch_insert(cfqd->queue, rq);
934
935                 cfqd->dispatch_slice++;
936                 dispatched++;
937
938                 if (!cfqd->active_cic) {
939                         atomic_inc(&RQ_CIC(rq)->ioc->refcount);
940                         cfqd->active_cic = RQ_CIC(rq);
941                 }
942
943                 if (RB_EMPTY_ROOT(&cfqq->sort_list))
944                         break;
945
946         } while (dispatched < max_dispatch);
947
948         /*
949          * if slice end isn't set yet, set it.
950          */
951         if (!cfqq->slice_end)
952                 cfq_set_prio_slice(cfqd, cfqq);
953
954         /*
955          * expire an async queue immediately if it has used up its slice. idle
956          * queue always expire after 1 dispatch round.
957          */
958         if ((!cfq_cfqq_sync(cfqq) &&
959             cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
960             cfq_class_idle(cfqq) ||
961             !cfq_cfqq_idle_window(cfqq))
962                 cfq_slice_expired(cfqd, 0);
963
964         return dispatched;
965 }
966
967 static int
968 cfq_forced_dispatch_cfqqs(struct list_head *list)
969 {
970         struct cfq_queue *cfqq, *next;
971         int dispatched;
972
973         dispatched = 0;
974         list_for_each_entry_safe(cfqq, next, list, cfq_list) {
975                 while (cfqq->next_rq) {
976                         cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
977                         dispatched++;
978                 }
979                 BUG_ON(!list_empty(&cfqq->fifo));
980         }
981
982         return dispatched;
983 }
984
985 static int
986 cfq_forced_dispatch(struct cfq_data *cfqd)
987 {
988         int i, dispatched = 0;
989
990         for (i = 0; i < CFQ_PRIO_LISTS; i++)
991                 dispatched += cfq_forced_dispatch_cfqqs(&cfqd->rr_list[i]);
992
993         dispatched += cfq_forced_dispatch_cfqqs(&cfqd->busy_rr);
994         dispatched += cfq_forced_dispatch_cfqqs(&cfqd->cur_rr);
995         dispatched += cfq_forced_dispatch_cfqqs(&cfqd->idle_rr);
996
997         cfq_slice_expired(cfqd, 0);
998
999         BUG_ON(cfqd->busy_queues);
1000
1001         return dispatched;
1002 }
1003
1004 static int
1005 cfq_dispatch_requests(request_queue_t *q, int force)
1006 {
1007         struct cfq_data *cfqd = q->elevator->elevator_data;
1008         struct cfq_queue *cfqq, *prev_cfqq;
1009         int dispatched;
1010
1011         if (!cfqd->busy_queues)
1012                 return 0;
1013
1014         if (unlikely(force))
1015                 return cfq_forced_dispatch(cfqd);
1016
1017         dispatched = 0;
1018         prev_cfqq = NULL;
1019         while ((cfqq = cfq_select_queue(cfqd)) != NULL) {
1020                 int max_dispatch;
1021
1022                 /*
1023                  * Don't repeat dispatch from the previous queue.
1024                  */
1025                 if (prev_cfqq == cfqq)
1026                         break;
1027
1028                 cfq_clear_cfqq_must_dispatch(cfqq);
1029                 cfq_clear_cfqq_wait_request(cfqq);
1030                 del_timer(&cfqd->idle_slice_timer);
1031
1032                 max_dispatch = cfqd->cfq_quantum;
1033                 if (cfq_class_idle(cfqq))
1034                         max_dispatch = 1;
1035
1036                 dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
1037
1038                 /*
1039                  * If the dispatch cfqq has idling enabled and is still
1040                  * the active queue, break out.
1041                  */
1042                 if (cfq_cfqq_idle_window(cfqq) && cfqd->active_queue)
1043                         break;
1044
1045                 prev_cfqq = cfqq;
1046         }
1047
1048         return dispatched;
1049 }
1050
1051 /*
1052  * task holds one reference to the queue, dropped when task exits. each rq
1053  * in-flight on this queue also holds a reference, dropped when rq is freed.
1054  *
1055  * queue lock must be held here.
1056  */
1057 static void cfq_put_queue(struct cfq_queue *cfqq)
1058 {
1059         struct cfq_data *cfqd = cfqq->cfqd;
1060
1061         BUG_ON(atomic_read(&cfqq->ref) <= 0);
1062
1063         if (!atomic_dec_and_test(&cfqq->ref))
1064                 return;
1065
1066         BUG_ON(rb_first(&cfqq->sort_list));
1067         BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
1068         BUG_ON(cfq_cfqq_on_rr(cfqq));
1069
1070         if (unlikely(cfqd->active_queue == cfqq))
1071                 __cfq_slice_expired(cfqd, cfqq, 0);
1072
1073         /*
1074          * it's on the empty list and still hashed
1075          */
1076         list_del(&cfqq->cfq_list);
1077         hlist_del(&cfqq->cfq_hash);
1078         kmem_cache_free(cfq_pool, cfqq);
1079 }
1080
1081 static struct cfq_queue *
1082 __cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned int prio,
1083                     const int hashval)
1084 {
1085         struct hlist_head *hash_list = &cfqd->cfq_hash[hashval];
1086         struct hlist_node *entry;
1087         struct cfq_queue *__cfqq;
1088
1089         hlist_for_each_entry(__cfqq, entry, hash_list, cfq_hash) {
1090                 const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->org_ioprio_class, __cfqq->org_ioprio);
1091
1092                 if (__cfqq->key == key && (__p == prio || !prio))
1093                         return __cfqq;
1094         }
1095
1096         return NULL;
1097 }
1098
1099 static struct cfq_queue *
1100 cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned short prio)
1101 {
1102         return __cfq_find_cfq_hash(cfqd, key, prio, hash_long(key, CFQ_QHASH_SHIFT));
1103 }
1104
1105 static void cfq_free_io_context(struct io_context *ioc)
1106 {
1107         struct cfq_io_context *__cic;
1108         struct rb_node *n;
1109         int freed = 0;
1110
1111         while ((n = rb_first(&ioc->cic_root)) != NULL) {
1112                 __cic = rb_entry(n, struct cfq_io_context, rb_node);
1113                 rb_erase(&__cic->rb_node, &ioc->cic_root);
1114                 kmem_cache_free(cfq_ioc_pool, __cic);
1115                 freed++;
1116         }
1117
1118         elv_ioc_count_mod(ioc_count, -freed);
1119
1120         if (ioc_gone && !elv_ioc_count_read(ioc_count))
1121                 complete(ioc_gone);
1122 }
1123
1124 static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1125 {
1126         if (unlikely(cfqq == cfqd->active_queue))
1127                 __cfq_slice_expired(cfqd, cfqq, 0);
1128
1129         cfq_put_queue(cfqq);
1130 }
1131
1132 static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
1133                                          struct cfq_io_context *cic)
1134 {
1135         list_del_init(&cic->queue_list);
1136         smp_wmb();
1137         cic->key = NULL;
1138
1139         if (cic->cfqq[ASYNC]) {
1140                 cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]);
1141                 cic->cfqq[ASYNC] = NULL;
1142         }
1143
1144         if (cic->cfqq[SYNC]) {
1145                 cfq_exit_cfqq(cfqd, cic->cfqq[SYNC]);
1146                 cic->cfqq[SYNC] = NULL;
1147         }
1148 }
1149
1150
1151 /*
1152  * Called with interrupts disabled
1153  */
1154 static void cfq_exit_single_io_context(struct cfq_io_context *cic)
1155 {
1156         struct cfq_data *cfqd = cic->key;
1157
1158         if (cfqd) {
1159                 request_queue_t *q = cfqd->queue;
1160
1161                 spin_lock_irq(q->queue_lock);
1162                 __cfq_exit_single_io_context(cfqd, cic);
1163                 spin_unlock_irq(q->queue_lock);
1164         }
1165 }
1166
1167 static void cfq_exit_io_context(struct io_context *ioc)
1168 {
1169         struct cfq_io_context *__cic;
1170         struct rb_node *n;
1171
1172         /*
1173          * put the reference this task is holding to the various queues
1174          */
1175
1176         n = rb_first(&ioc->cic_root);
1177         while (n != NULL) {
1178                 __cic = rb_entry(n, struct cfq_io_context, rb_node);
1179
1180                 cfq_exit_single_io_context(__cic);
1181                 n = rb_next(n);
1182         }
1183 }
1184
1185 static struct cfq_io_context *
1186 cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1187 {
1188         struct cfq_io_context *cic;
1189
1190         cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask, cfqd->queue->node);
1191         if (cic) {
1192                 memset(cic, 0, sizeof(*cic));
1193                 cic->last_end_request = jiffies;
1194                 INIT_LIST_HEAD(&cic->queue_list);
1195                 cic->dtor = cfq_free_io_context;
1196                 cic->exit = cfq_exit_io_context;
1197                 elv_ioc_count_inc(ioc_count);
1198         }
1199
1200         return cic;
1201 }
1202
1203 static void cfq_init_prio_data(struct cfq_queue *cfqq)
1204 {
1205         struct task_struct *tsk = current;
1206         int ioprio_class;
1207
1208         if (!cfq_cfqq_prio_changed(cfqq))
1209                 return;
1210
1211         ioprio_class = IOPRIO_PRIO_CLASS(tsk->ioprio);
1212         switch (ioprio_class) {
1213                 default:
1214                         printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
1215                 case IOPRIO_CLASS_NONE:
1216                         /*
1217                          * no prio set, place us in the middle of the BE classes
1218                          */
1219                         cfqq->ioprio = task_nice_ioprio(tsk);
1220                         cfqq->ioprio_class = IOPRIO_CLASS_BE;
1221                         break;
1222                 case IOPRIO_CLASS_RT:
1223                         cfqq->ioprio = task_ioprio(tsk);
1224                         cfqq->ioprio_class = IOPRIO_CLASS_RT;
1225                         break;
1226                 case IOPRIO_CLASS_BE:
1227                         cfqq->ioprio = task_ioprio(tsk);
1228                         cfqq->ioprio_class = IOPRIO_CLASS_BE;
1229                         break;
1230                 case IOPRIO_CLASS_IDLE:
1231                         cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
1232                         cfqq->ioprio = 7;
1233                         cfq_clear_cfqq_idle_window(cfqq);
1234                         break;
1235         }
1236
1237         /*
1238          * keep track of original prio settings in case we have to temporarily
1239          * elevate the priority of this queue
1240          */
1241         cfqq->org_ioprio = cfqq->ioprio;
1242         cfqq->org_ioprio_class = cfqq->ioprio_class;
1243
1244         cfq_resort_rr_list(cfqq, 0);
1245         cfq_clear_cfqq_prio_changed(cfqq);
1246 }
1247
1248 static inline void changed_ioprio(struct cfq_io_context *cic)
1249 {
1250         struct cfq_data *cfqd = cic->key;
1251         struct cfq_queue *cfqq;
1252         unsigned long flags;
1253
1254         if (unlikely(!cfqd))
1255                 return;
1256
1257         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1258
1259         cfqq = cic->cfqq[ASYNC];
1260         if (cfqq) {
1261                 struct cfq_queue *new_cfqq;
1262                 new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC, cic->ioc->task,
1263                                          GFP_ATOMIC);
1264                 if (new_cfqq) {
1265                         cic->cfqq[ASYNC] = new_cfqq;
1266                         cfq_put_queue(cfqq);
1267                 }
1268         }
1269
1270         cfqq = cic->cfqq[SYNC];
1271         if (cfqq)
1272                 cfq_mark_cfqq_prio_changed(cfqq);
1273
1274         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1275 }
1276
1277 static void cfq_ioc_set_ioprio(struct io_context *ioc)
1278 {
1279         struct cfq_io_context *cic;
1280         struct rb_node *n;
1281
1282         ioc->ioprio_changed = 0;
1283
1284         n = rb_first(&ioc->cic_root);
1285         while (n != NULL) {
1286                 cic = rb_entry(n, struct cfq_io_context, rb_node);
1287
1288                 changed_ioprio(cic);
1289                 n = rb_next(n);
1290         }
1291 }
1292
1293 static struct cfq_queue *
1294 cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk,
1295               gfp_t gfp_mask)
1296 {
1297         const int hashval = hash_long(key, CFQ_QHASH_SHIFT);
1298         struct cfq_queue *cfqq, *new_cfqq = NULL;
1299         unsigned short ioprio;
1300
1301 retry:
1302         ioprio = tsk->ioprio;
1303         cfqq = __cfq_find_cfq_hash(cfqd, key, ioprio, hashval);
1304
1305         if (!cfqq) {
1306                 if (new_cfqq) {
1307                         cfqq = new_cfqq;
1308                         new_cfqq = NULL;
1309                 } else if (gfp_mask & __GFP_WAIT) {
1310                         /*
1311                          * Inform the allocator of the fact that we will
1312                          * just repeat this allocation if it fails, to allow
1313                          * the allocator to do whatever it needs to attempt to
1314                          * free memory.
1315                          */
1316                         spin_unlock_irq(cfqd->queue->queue_lock);
1317                         new_cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask|__GFP_NOFAIL, cfqd->queue->node);
1318                         spin_lock_irq(cfqd->queue->queue_lock);
1319                         goto retry;
1320                 } else {
1321                         cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask, cfqd->queue->node);
1322                         if (!cfqq)
1323                                 goto out;
1324                 }
1325
1326                 memset(cfqq, 0, sizeof(*cfqq));
1327
1328                 INIT_HLIST_NODE(&cfqq->cfq_hash);
1329                 INIT_LIST_HEAD(&cfqq->cfq_list);
1330                 INIT_LIST_HEAD(&cfqq->fifo);
1331
1332                 cfqq->key = key;
1333                 hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
1334                 atomic_set(&cfqq->ref, 0);
1335                 cfqq->cfqd = cfqd;
1336                 /*
1337                  * set ->slice_left to allow preemption for a new process
1338                  */
1339                 cfqq->slice_left = 2 * cfqd->cfq_slice_idle;
1340                 cfq_mark_cfqq_idle_window(cfqq);
1341                 cfq_mark_cfqq_prio_changed(cfqq);
1342                 cfq_mark_cfqq_queue_new(cfqq);
1343                 cfq_init_prio_data(cfqq);
1344         }
1345
1346         if (new_cfqq)
1347                 kmem_cache_free(cfq_pool, new_cfqq);
1348
1349         atomic_inc(&cfqq->ref);
1350 out:
1351         WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
1352         return cfqq;
1353 }
1354
1355 static void
1356 cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic)
1357 {
1358         WARN_ON(!list_empty(&cic->queue_list));
1359         rb_erase(&cic->rb_node, &ioc->cic_root);
1360         kmem_cache_free(cfq_ioc_pool, cic);
1361         elv_ioc_count_dec(ioc_count);
1362 }
1363
1364 static struct cfq_io_context *
1365 cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc)
1366 {
1367         struct rb_node *n;
1368         struct cfq_io_context *cic;
1369         void *k, *key = cfqd;
1370
1371 restart:
1372         n = ioc->cic_root.rb_node;
1373         while (n) {
1374                 cic = rb_entry(n, struct cfq_io_context, rb_node);
1375                 /* ->key must be copied to avoid race with cfq_exit_queue() */
1376                 k = cic->key;
1377                 if (unlikely(!k)) {
1378                         cfq_drop_dead_cic(ioc, cic);
1379                         goto restart;
1380                 }
1381
1382                 if (key < k)
1383                         n = n->rb_left;
1384                 else if (key > k)
1385                         n = n->rb_right;
1386                 else
1387                         return cic;
1388         }
1389
1390         return NULL;
1391 }
1392
1393 static inline void
1394 cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
1395              struct cfq_io_context *cic)
1396 {
1397         struct rb_node **p;
1398         struct rb_node *parent;
1399         struct cfq_io_context *__cic;
1400         unsigned long flags;
1401         void *k;
1402
1403         cic->ioc = ioc;
1404         cic->key = cfqd;
1405
1406 restart:
1407         parent = NULL;
1408         p = &ioc->cic_root.rb_node;
1409         while (*p) {
1410                 parent = *p;
1411                 __cic = rb_entry(parent, struct cfq_io_context, rb_node);
1412                 /* ->key must be copied to avoid race with cfq_exit_queue() */
1413                 k = __cic->key;
1414                 if (unlikely(!k)) {
1415                         cfq_drop_dead_cic(ioc, __cic);
1416                         goto restart;
1417                 }
1418
1419                 if (cic->key < k)
1420                         p = &(*p)->rb_left;
1421                 else if (cic->key > k)
1422                         p = &(*p)->rb_right;
1423                 else
1424                         BUG();
1425         }
1426
1427         rb_link_node(&cic->rb_node, parent, p);
1428         rb_insert_color(&cic->rb_node, &ioc->cic_root);
1429
1430         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1431         list_add(&cic->queue_list, &cfqd->cic_list);
1432         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1433 }
1434
1435 /*
1436  * Setup general io context and cfq io context. There can be several cfq
1437  * io contexts per general io context, if this process is doing io to more
1438  * than one device managed by cfq.
1439  */
1440 static struct cfq_io_context *
1441 cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1442 {
1443         struct io_context *ioc = NULL;
1444         struct cfq_io_context *cic;
1445
1446         might_sleep_if(gfp_mask & __GFP_WAIT);
1447
1448         ioc = get_io_context(gfp_mask, cfqd->queue->node);
1449         if (!ioc)
1450                 return NULL;
1451
1452         cic = cfq_cic_rb_lookup(cfqd, ioc);
1453         if (cic)
1454                 goto out;
1455
1456         cic = cfq_alloc_io_context(cfqd, gfp_mask);
1457         if (cic == NULL)
1458                 goto err;
1459
1460         cfq_cic_link(cfqd, ioc, cic);
1461 out:
1462         smp_read_barrier_depends();
1463         if (unlikely(ioc->ioprio_changed))
1464                 cfq_ioc_set_ioprio(ioc);
1465
1466         return cic;
1467 err:
1468         put_io_context(ioc);
1469         return NULL;
1470 }
1471
1472 static void
1473 cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
1474 {
1475         unsigned long elapsed = jiffies - cic->last_end_request;
1476         unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
1477
1478         cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
1479         cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
1480         cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
1481 }
1482
1483 static void
1484 cfq_update_io_seektime(struct cfq_io_context *cic, struct request *rq)
1485 {
1486         sector_t sdist;
1487         u64 total;
1488
1489         if (cic->last_request_pos < rq->sector)
1490                 sdist = rq->sector - cic->last_request_pos;
1491         else
1492                 sdist = cic->last_request_pos - rq->sector;
1493
1494         /*
1495          * Don't allow the seek distance to get too large from the
1496          * odd fragment, pagein, etc
1497          */
1498         if (cic->seek_samples <= 60) /* second&third seek */
1499                 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024);
1500         else
1501                 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*64);
1502
1503         cic->seek_samples = (7*cic->seek_samples + 256) / 8;
1504         cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
1505         total = cic->seek_total + (cic->seek_samples/2);
1506         do_div(total, cic->seek_samples);
1507         cic->seek_mean = (sector_t)total;
1508 }
1509
1510 /*
1511  * Disable idle window if the process thinks too long or seeks so much that
1512  * it doesn't matter
1513  */
1514 static void
1515 cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1516                        struct cfq_io_context *cic)
1517 {
1518         int enable_idle = cfq_cfqq_idle_window(cfqq);
1519
1520         if (!cic->ioc->task || !cfqd->cfq_slice_idle ||
1521             (cfqd->hw_tag && CIC_SEEKY(cic)))
1522                 enable_idle = 0;
1523         else if (sample_valid(cic->ttime_samples)) {
1524                 if (cic->ttime_mean > cfqd->cfq_slice_idle)
1525                         enable_idle = 0;
1526                 else
1527                         enable_idle = 1;
1528         }
1529
1530         if (enable_idle)
1531                 cfq_mark_cfqq_idle_window(cfqq);
1532         else
1533                 cfq_clear_cfqq_idle_window(cfqq);
1534 }
1535
1536
1537 /*
1538  * Check if new_cfqq should preempt the currently active queue. Return 0 for
1539  * no or if we aren't sure, a 1 will cause a preempt.
1540  */
1541 static int
1542 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
1543                    struct request *rq)
1544 {
1545         struct cfq_queue *cfqq = cfqd->active_queue;
1546
1547         if (cfq_class_idle(new_cfqq))
1548                 return 0;
1549
1550         if (!cfqq)
1551                 return 0;
1552
1553         if (cfq_class_idle(cfqq))
1554                 return 1;
1555         if (!cfq_cfqq_wait_request(new_cfqq))
1556                 return 0;
1557         /*
1558          * if it doesn't have slice left, forget it
1559          */
1560         if (new_cfqq->slice_left < cfqd->cfq_slice_idle)
1561                 return 0;
1562         /*
1563          * if the new request is sync, but the currently running queue is
1564          * not, let the sync request have priority.
1565          */
1566         if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
1567                 return 1;
1568         /*
1569          * So both queues are sync. Let the new request get disk time if
1570          * it's a metadata request and the current queue is doing regular IO.
1571          */
1572         if (rq_is_meta(rq) && !cfqq->meta_pending)
1573                 return 1;
1574
1575         return 0;
1576 }
1577
1578 /*
1579  * cfqq preempts the active queue. if we allowed preempt with no slice left,
1580  * let it have half of its nominal slice.
1581  */
1582 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1583 {
1584         cfq_slice_expired(cfqd, 1);
1585
1586         if (!cfqq->slice_left)
1587                 cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2;
1588
1589         /*
1590          * Put the new queue at the front of the of the current list,
1591          * so we know that it will be selected next.
1592          */
1593         BUG_ON(!cfq_cfqq_on_rr(cfqq));
1594         list_move(&cfqq->cfq_list, &cfqd->cur_rr);
1595
1596         cfqq->slice_end = cfqq->slice_left + jiffies;
1597 }
1598
1599 /*
1600  * Called when a new fs request (rq) is added (to cfqq). Check if there's
1601  * something we should do about it
1602  */
1603 static void
1604 cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1605                 struct request *rq)
1606 {
1607         struct cfq_io_context *cic = RQ_CIC(rq);
1608
1609         if (rq_is_meta(rq))
1610                 cfqq->meta_pending++;
1611
1612         /*
1613          * check if this request is a better next-serve candidate)) {
1614          */
1615         cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
1616         BUG_ON(!cfqq->next_rq);
1617
1618         /*
1619          * we never wait for an async request and we don't allow preemption
1620          * of an async request. so just return early
1621          */
1622         if (!rq_is_sync(rq)) {
1623                 /*
1624                  * sync process issued an async request, if it's waiting
1625                  * then expire it and kick rq handling.
1626                  */
1627                 if (cic == cfqd->active_cic &&
1628                     del_timer(&cfqd->idle_slice_timer)) {
1629                         cfq_slice_expired(cfqd, 0);
1630                         blk_start_queueing(cfqd->queue);
1631                 }
1632                 return;
1633         }
1634
1635         cfq_update_io_thinktime(cfqd, cic);
1636         cfq_update_io_seektime(cic, rq);
1637         cfq_update_idle_window(cfqd, cfqq, cic);
1638
1639         cic->last_request_pos = rq->sector + rq->nr_sectors;
1640
1641         if (cfqq == cfqd->active_queue) {
1642                 /*
1643                  * if we are waiting for a request for this queue, let it rip
1644                  * immediately and flag that we must not expire this queue
1645                  * just now
1646                  */
1647                 if (cfq_cfqq_wait_request(cfqq)) {
1648                         cfq_mark_cfqq_must_dispatch(cfqq);
1649                         del_timer(&cfqd->idle_slice_timer);
1650                         blk_start_queueing(cfqd->queue);
1651                 }
1652         } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
1653                 /*
1654                  * not the active queue - expire current slice if it is
1655                  * idle and has expired it's mean thinktime or this new queue
1656                  * has some old slice time left and is of higher priority
1657                  */
1658                 cfq_preempt_queue(cfqd, cfqq);
1659                 cfq_mark_cfqq_must_dispatch(cfqq);
1660                 blk_start_queueing(cfqd->queue);
1661         }
1662 }
1663
1664 static void cfq_insert_request(request_queue_t *q, struct request *rq)
1665 {
1666         struct cfq_data *cfqd = q->elevator->elevator_data;
1667         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1668
1669         cfq_init_prio_data(cfqq);
1670
1671         cfq_add_rq_rb(rq);
1672
1673         list_add_tail(&rq->queuelist, &cfqq->fifo);
1674
1675         cfq_rq_enqueued(cfqd, cfqq, rq);
1676 }
1677
1678 static void cfq_completed_request(request_queue_t *q, struct request *rq)
1679 {
1680         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1681         struct cfq_data *cfqd = cfqq->cfqd;
1682         const int sync = rq_is_sync(rq);
1683         unsigned long now;
1684
1685         now = jiffies;
1686
1687         WARN_ON(!cfqd->rq_in_driver);
1688         WARN_ON(!cfqq->on_dispatch[sync]);
1689         cfqd->rq_in_driver--;
1690         cfqq->on_dispatch[sync]--;
1691
1692         if (!cfq_class_idle(cfqq))
1693                 cfqd->last_end_request = now;
1694
1695         cfq_resort_rr_list(cfqq, 0);
1696
1697         if (sync)
1698                 RQ_CIC(rq)->last_end_request = now;
1699
1700         /*
1701          * If this is the active queue, check if it needs to be expired,
1702          * or if we want to idle in case it has no pending requests.
1703          */
1704         if (cfqd->active_queue == cfqq) {
1705                 if (time_after(now, cfqq->slice_end))
1706                         cfq_slice_expired(cfqd, 0);
1707                 else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list)) {
1708                         if (!cfq_arm_slice_timer(cfqd, cfqq))
1709                                 cfq_schedule_dispatch(cfqd);
1710                 }
1711         }
1712 }
1713
1714 /*
1715  * we temporarily boost lower priority queues if they are holding fs exclusive
1716  * resources. they are boosted to normal prio (CLASS_BE/4)
1717  */
1718 static void cfq_prio_boost(struct cfq_queue *cfqq)
1719 {
1720         const int ioprio_class = cfqq->ioprio_class;
1721         const int ioprio = cfqq->ioprio;
1722
1723         if (has_fs_excl()) {
1724                 /*
1725                  * boost idle prio on transactions that would lock out other
1726                  * users of the filesystem
1727                  */
1728                 if (cfq_class_idle(cfqq))
1729                         cfqq->ioprio_class = IOPRIO_CLASS_BE;
1730                 if (cfqq->ioprio > IOPRIO_NORM)
1731                         cfqq->ioprio = IOPRIO_NORM;
1732         } else {
1733                 /*
1734                  * check if we need to unboost the queue
1735                  */
1736                 if (cfqq->ioprio_class != cfqq->org_ioprio_class)
1737                         cfqq->ioprio_class = cfqq->org_ioprio_class;
1738                 if (cfqq->ioprio != cfqq->org_ioprio)
1739                         cfqq->ioprio = cfqq->org_ioprio;
1740         }
1741
1742         /*
1743          * refile between round-robin lists if we moved the priority class
1744          */
1745         if ((ioprio_class != cfqq->ioprio_class || ioprio != cfqq->ioprio))
1746                 cfq_resort_rr_list(cfqq, 0);
1747 }
1748
1749 static inline int __cfq_may_queue(struct cfq_queue *cfqq)
1750 {
1751         if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&
1752             !cfq_cfqq_must_alloc_slice(cfqq)) {
1753                 cfq_mark_cfqq_must_alloc_slice(cfqq);
1754                 return ELV_MQUEUE_MUST;
1755         }
1756
1757         return ELV_MQUEUE_MAY;
1758 }
1759
1760 static int cfq_may_queue(request_queue_t *q, int rw)
1761 {
1762         struct cfq_data *cfqd = q->elevator->elevator_data;
1763         struct task_struct *tsk = current;
1764         struct cfq_queue *cfqq;
1765         unsigned int key;
1766
1767         key = cfq_queue_pid(tsk, rw, rw & REQ_RW_SYNC);
1768
1769         /*
1770          * don't force setup of a queue from here, as a call to may_queue
1771          * does not necessarily imply that a request actually will be queued.
1772          * so just lookup a possibly existing queue, or return 'may queue'
1773          * if that fails
1774          */
1775         cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
1776         if (cfqq) {
1777                 cfq_init_prio_data(cfqq);
1778                 cfq_prio_boost(cfqq);
1779
1780                 return __cfq_may_queue(cfqq);
1781         }
1782
1783         return ELV_MQUEUE_MAY;
1784 }
1785
1786 /*
1787  * queue lock held here
1788  */
1789 static void cfq_put_request(struct request *rq)
1790 {
1791         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1792
1793         if (cfqq) {
1794                 const int rw = rq_data_dir(rq);
1795
1796                 BUG_ON(!cfqq->allocated[rw]);
1797                 cfqq->allocated[rw]--;
1798
1799                 put_io_context(RQ_CIC(rq)->ioc);
1800
1801                 rq->elevator_private = NULL;
1802                 rq->elevator_private2 = NULL;
1803
1804                 cfq_put_queue(cfqq);
1805         }
1806 }
1807
1808 /*
1809  * Allocate cfq data structures associated with this request.
1810  */
1811 static int
1812 cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
1813 {
1814         struct cfq_data *cfqd = q->elevator->elevator_data;
1815         struct task_struct *tsk = current;
1816         struct cfq_io_context *cic;
1817         const int rw = rq_data_dir(rq);
1818         const int is_sync = rq_is_sync(rq);
1819         pid_t key = cfq_queue_pid(tsk, rw, is_sync);
1820         struct cfq_queue *cfqq;
1821         unsigned long flags;
1822
1823         might_sleep_if(gfp_mask & __GFP_WAIT);
1824
1825         cic = cfq_get_io_context(cfqd, gfp_mask);
1826
1827         spin_lock_irqsave(q->queue_lock, flags);
1828
1829         if (!cic)
1830                 goto queue_fail;
1831
1832         if (!cic->cfqq[is_sync]) {
1833                 cfqq = cfq_get_queue(cfqd, key, tsk, gfp_mask);
1834                 if (!cfqq)
1835                         goto queue_fail;
1836
1837                 cic->cfqq[is_sync] = cfqq;
1838         } else
1839                 cfqq = cic->cfqq[is_sync];
1840
1841         cfqq->allocated[rw]++;
1842         cfq_clear_cfqq_must_alloc(cfqq);
1843         atomic_inc(&cfqq->ref);
1844
1845         spin_unlock_irqrestore(q->queue_lock, flags);
1846
1847         rq->elevator_private = cic;
1848         rq->elevator_private2 = cfqq;
1849         return 0;
1850
1851 queue_fail:
1852         if (cic)
1853                 put_io_context(cic->ioc);
1854
1855         cfq_schedule_dispatch(cfqd);
1856         spin_unlock_irqrestore(q->queue_lock, flags);
1857         return 1;
1858 }
1859
1860 static void cfq_kick_queue(struct work_struct *work)
1861 {
1862         struct cfq_data *cfqd =
1863                 container_of(work, struct cfq_data, unplug_work);
1864         request_queue_t *q = cfqd->queue;
1865         unsigned long flags;
1866
1867         spin_lock_irqsave(q->queue_lock, flags);
1868         blk_start_queueing(q);
1869         spin_unlock_irqrestore(q->queue_lock, flags);
1870 }
1871
1872 /*
1873  * Timer running if the active_queue is currently idling inside its time slice
1874  */
1875 static void cfq_idle_slice_timer(unsigned long data)
1876 {
1877         struct cfq_data *cfqd = (struct cfq_data *) data;
1878         struct cfq_queue *cfqq;
1879         unsigned long flags;
1880
1881         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1882
1883         if ((cfqq = cfqd->active_queue) != NULL) {
1884                 unsigned long now = jiffies;
1885
1886                 /*
1887                  * expired
1888                  */
1889                 if (time_after(now, cfqq->slice_end))
1890                         goto expire;
1891
1892                 /*
1893                  * only expire and reinvoke request handler, if there are
1894                  * other queues with pending requests
1895                  */
1896                 if (!cfqd->busy_queues)
1897                         goto out_cont;
1898
1899                 /*
1900                  * not expired and it has a request pending, let it dispatch
1901                  */
1902                 if (!RB_EMPTY_ROOT(&cfqq->sort_list)) {
1903                         cfq_mark_cfqq_must_dispatch(cfqq);
1904                         goto out_kick;
1905                 }
1906         }
1907 expire:
1908         cfq_slice_expired(cfqd, 0);
1909 out_kick:
1910         cfq_schedule_dispatch(cfqd);
1911 out_cont:
1912         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1913 }
1914
1915 /*
1916  * Timer running if an idle class queue is waiting for service
1917  */
1918 static void cfq_idle_class_timer(unsigned long data)
1919 {
1920         struct cfq_data *cfqd = (struct cfq_data *) data;
1921         unsigned long flags, end;
1922
1923         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1924
1925         /*
1926          * race with a non-idle queue, reset timer
1927          */
1928         end = cfqd->last_end_request + CFQ_IDLE_GRACE;
1929         if (!time_after_eq(jiffies, end))
1930                 mod_timer(&cfqd->idle_class_timer, end);
1931         else
1932                 cfq_schedule_dispatch(cfqd);
1933
1934         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1935 }
1936
1937 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
1938 {
1939         del_timer_sync(&cfqd->idle_slice_timer);
1940         del_timer_sync(&cfqd->idle_class_timer);
1941         blk_sync_queue(cfqd->queue);
1942 }
1943
1944 static void cfq_exit_queue(elevator_t *e)
1945 {
1946         struct cfq_data *cfqd = e->elevator_data;
1947         request_queue_t *q = cfqd->queue;
1948
1949         cfq_shutdown_timer_wq(cfqd);
1950
1951         spin_lock_irq(q->queue_lock);
1952
1953         if (cfqd->active_queue)
1954                 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
1955
1956         while (!list_empty(&cfqd->cic_list)) {
1957                 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
1958                                                         struct cfq_io_context,
1959                                                         queue_list);
1960
1961                 __cfq_exit_single_io_context(cfqd, cic);
1962         }
1963
1964         spin_unlock_irq(q->queue_lock);
1965
1966         cfq_shutdown_timer_wq(cfqd);
1967
1968         kfree(cfqd->cfq_hash);
1969         kfree(cfqd);
1970 }
1971
1972 static void *cfq_init_queue(request_queue_t *q)
1973 {
1974         struct cfq_data *cfqd;
1975         int i;
1976
1977         cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
1978         if (!cfqd)
1979                 return NULL;
1980
1981         memset(cfqd, 0, sizeof(*cfqd));
1982
1983         for (i = 0; i < CFQ_PRIO_LISTS; i++)
1984                 INIT_LIST_HEAD(&cfqd->rr_list[i]);
1985
1986         INIT_LIST_HEAD(&cfqd->busy_rr);
1987         INIT_LIST_HEAD(&cfqd->cur_rr);
1988         INIT_LIST_HEAD(&cfqd->idle_rr);
1989         INIT_LIST_HEAD(&cfqd->cic_list);
1990
1991         cfqd->cfq_hash = kmalloc_node(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL, q->node);
1992         if (!cfqd->cfq_hash)
1993                 goto out_free;
1994
1995         for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
1996                 INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);
1997
1998         cfqd->queue = q;
1999
2000         init_timer(&cfqd->idle_slice_timer);
2001         cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
2002         cfqd->idle_slice_timer.data = (unsigned long) cfqd;
2003
2004         init_timer(&cfqd->idle_class_timer);
2005         cfqd->idle_class_timer.function = cfq_idle_class_timer;
2006         cfqd->idle_class_timer.data = (unsigned long) cfqd;
2007
2008         INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
2009
2010         cfqd->cfq_quantum = cfq_quantum;
2011         cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
2012         cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
2013         cfqd->cfq_back_max = cfq_back_max;
2014         cfqd->cfq_back_penalty = cfq_back_penalty;
2015         cfqd->cfq_slice[0] = cfq_slice_async;
2016         cfqd->cfq_slice[1] = cfq_slice_sync;
2017         cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
2018         cfqd->cfq_slice_idle = cfq_slice_idle;
2019
2020         return cfqd;
2021 out_free:
2022         kfree(cfqd);
2023         return NULL;
2024 }
2025
2026 static void cfq_slab_kill(void)
2027 {
2028         if (cfq_pool)
2029                 kmem_cache_destroy(cfq_pool);
2030         if (cfq_ioc_pool)
2031                 kmem_cache_destroy(cfq_ioc_pool);
2032 }
2033
2034 static int __init cfq_slab_setup(void)
2035 {
2036         cfq_pool = kmem_cache_create("cfq_pool", sizeof(struct cfq_queue), 0, 0,
2037                                         NULL, NULL);
2038         if (!cfq_pool)
2039                 goto fail;
2040
2041         cfq_ioc_pool = kmem_cache_create("cfq_ioc_pool",
2042                         sizeof(struct cfq_io_context), 0, 0, NULL, NULL);
2043         if (!cfq_ioc_pool)
2044                 goto fail;
2045
2046         return 0;
2047 fail:
2048         cfq_slab_kill();
2049         return -ENOMEM;
2050 }
2051
2052 /*
2053  * sysfs parts below -->
2054  */
2055
2056 static ssize_t
2057 cfq_var_show(unsigned int var, char *page)
2058 {
2059         return sprintf(page, "%d\n", var);
2060 }
2061
2062 static ssize_t
2063 cfq_var_store(unsigned int *var, const char *page, size_t count)
2064 {
2065         char *p = (char *) page;
2066
2067         *var = simple_strtoul(p, &p, 10);
2068         return count;
2069 }
2070
2071 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
2072 static ssize_t __FUNC(elevator_t *e, char *page)                        \
2073 {                                                                       \
2074         struct cfq_data *cfqd = e->elevator_data;                       \
2075         unsigned int __data = __VAR;                                    \
2076         if (__CONV)                                                     \
2077                 __data = jiffies_to_msecs(__data);                      \
2078         return cfq_var_show(__data, (page));                            \
2079 }
2080 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
2081 SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
2082 SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
2083 SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
2084 SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
2085 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
2086 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
2087 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
2088 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
2089 #undef SHOW_FUNCTION
2090
2091 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
2092 static ssize_t __FUNC(elevator_t *e, const char *page, size_t count)    \
2093 {                                                                       \
2094         struct cfq_data *cfqd = e->elevator_data;                       \
2095         unsigned int __data;                                            \
2096         int ret = cfq_var_store(&__data, (page), count);                \
2097         if (__data < (MIN))                                             \
2098                 __data = (MIN);                                         \
2099         else if (__data > (MAX))                                        \
2100                 __data = (MAX);                                         \
2101         if (__CONV)                                                     \
2102                 *(__PTR) = msecs_to_jiffies(__data);                    \
2103         else                                                            \
2104                 *(__PTR) = __data;                                      \
2105         return ret;                                                     \
2106 }
2107 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
2108 STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1);
2109 STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1);
2110 STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
2111 STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0);
2112 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
2113 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
2114 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
2115 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0);
2116 #undef STORE_FUNCTION
2117
2118 #define CFQ_ATTR(name) \
2119         __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
2120
2121 static struct elv_fs_entry cfq_attrs[] = {
2122         CFQ_ATTR(quantum),
2123         CFQ_ATTR(fifo_expire_sync),
2124         CFQ_ATTR(fifo_expire_async),
2125         CFQ_ATTR(back_seek_max),
2126         CFQ_ATTR(back_seek_penalty),
2127         CFQ_ATTR(slice_sync),
2128         CFQ_ATTR(slice_async),
2129         CFQ_ATTR(slice_async_rq),
2130         CFQ_ATTR(slice_idle),
2131         __ATTR_NULL
2132 };
2133
2134 static struct elevator_type iosched_cfq = {
2135         .ops = {
2136                 .elevator_merge_fn =            cfq_merge,
2137                 .elevator_merged_fn =           cfq_merged_request,
2138                 .elevator_merge_req_fn =        cfq_merged_requests,
2139                 .elevator_allow_merge_fn =      cfq_allow_merge,
2140                 .elevator_dispatch_fn =         cfq_dispatch_requests,
2141                 .elevator_add_req_fn =          cfq_insert_request,
2142                 .elevator_activate_req_fn =     cfq_activate_request,
2143                 .elevator_deactivate_req_fn =   cfq_deactivate_request,
2144                 .elevator_queue_empty_fn =      cfq_queue_empty,
2145                 .elevator_completed_req_fn =    cfq_completed_request,
2146                 .elevator_former_req_fn =       elv_rb_former_request,
2147                 .elevator_latter_req_fn =       elv_rb_latter_request,
2148                 .elevator_set_req_fn =          cfq_set_request,
2149                 .elevator_put_req_fn =          cfq_put_request,
2150                 .elevator_may_queue_fn =        cfq_may_queue,
2151                 .elevator_init_fn =             cfq_init_queue,
2152                 .elevator_exit_fn =             cfq_exit_queue,
2153                 .trim =                         cfq_free_io_context,
2154         },
2155         .elevator_attrs =       cfq_attrs,
2156         .elevator_name =        "cfq",
2157         .elevator_owner =       THIS_MODULE,
2158 };
2159
2160 static int __init cfq_init(void)
2161 {
2162         int ret;
2163
2164         /*
2165          * could be 0 on HZ < 1000 setups
2166          */
2167         if (!cfq_slice_async)
2168                 cfq_slice_async = 1;
2169         if (!cfq_slice_idle)
2170                 cfq_slice_idle = 1;
2171
2172         if (cfq_slab_setup())
2173                 return -ENOMEM;
2174
2175         ret = elv_register(&iosched_cfq);
2176         if (ret)
2177                 cfq_slab_kill();
2178
2179         return ret;
2180 }
2181
2182 static void __exit cfq_exit(void)
2183 {
2184         DECLARE_COMPLETION_ONSTACK(all_gone);
2185         elv_unregister(&iosched_cfq);
2186         ioc_gone = &all_gone;
2187         /* ioc_gone's update must be visible before reading ioc_count */
2188         smp_wmb();
2189         if (elv_ioc_count_read(ioc_count))
2190                 wait_for_completion(ioc_gone);
2191         synchronize_rcu();
2192         cfq_slab_kill();
2193 }
2194
2195 module_init(cfq_init);
2196 module_exit(cfq_exit);
2197
2198 MODULE_AUTHOR("Jens Axboe");
2199 MODULE_LICENSE("GPL");
2200 MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");