Merge master.kernel.org:/pub/scm/linux/kernel/git/steve/gfs2-2.6-fixes
[pandora-kernel.git] / block / cfq-iosched.c
1 /*
2  *  CFQ, or complete fairness queueing, disk scheduler.
3  *
4  *  Based on ideas from a previously unfinished io
5  *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6  *
7  *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8  */
9 #include <linux/module.h>
10 #include <linux/blkdev.h>
11 #include <linux/elevator.h>
12 #include <linux/hash.h>
13 #include <linux/rbtree.h>
14 #include <linux/ioprio.h>
15
16 /*
17  * tunables
18  */
19 static const int cfq_quantum = 4;               /* max queue in one round of service */
20 static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
21 static const int cfq_back_max = 16 * 1024;      /* maximum backwards seek, in KiB */
22 static const int cfq_back_penalty = 2;          /* penalty of a backwards seek */
23
24 static const int cfq_slice_sync = HZ / 10;
25 static int cfq_slice_async = HZ / 25;
26 static const int cfq_slice_async_rq = 2;
27 static int cfq_slice_idle = HZ / 125;
28
29 #define CFQ_IDLE_GRACE          (HZ / 10)
30 #define CFQ_SLICE_SCALE         (5)
31
32 #define CFQ_KEY_ASYNC           (0)
33
34 /*
35  * for the hash of cfqq inside the cfqd
36  */
37 #define CFQ_QHASH_SHIFT         6
38 #define CFQ_QHASH_ENTRIES       (1 << CFQ_QHASH_SHIFT)
39 #define list_entry_qhash(entry) hlist_entry((entry), struct cfq_queue, cfq_hash)
40
41 #define list_entry_cfqq(ptr)    list_entry((ptr), struct cfq_queue, cfq_list)
42
43 #define RQ_CIC(rq)              ((struct cfq_io_context*)(rq)->elevator_private)
44 #define RQ_CFQQ(rq)             ((rq)->elevator_private2)
45
46 static struct kmem_cache *cfq_pool;
47 static struct kmem_cache *cfq_ioc_pool;
48
49 static DEFINE_PER_CPU(unsigned long, ioc_count);
50 static struct completion *ioc_gone;
51
52 #define CFQ_PRIO_LISTS          IOPRIO_BE_NR
53 #define cfq_class_idle(cfqq)    ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
54 #define cfq_class_rt(cfqq)      ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
55
56 #define ASYNC                   (0)
57 #define SYNC                    (1)
58
59 #define cfq_cfqq_dispatched(cfqq)       \
60         ((cfqq)->on_dispatch[ASYNC] + (cfqq)->on_dispatch[SYNC])
61
62 #define cfq_cfqq_class_sync(cfqq)       ((cfqq)->key != CFQ_KEY_ASYNC)
63
64 #define cfq_cfqq_sync(cfqq)             \
65         (cfq_cfqq_class_sync(cfqq) || (cfqq)->on_dispatch[SYNC])
66
67 #define sample_valid(samples)   ((samples) > 80)
68
69 /*
70  * Per block device queue structure
71  */
72 struct cfq_data {
73         request_queue_t *queue;
74
75         /*
76          * rr list of queues with requests and the count of them
77          */
78         struct list_head rr_list[CFQ_PRIO_LISTS];
79         struct list_head busy_rr;
80         struct list_head cur_rr;
81         struct list_head idle_rr;
82         unsigned int busy_queues;
83
84         /*
85          * cfqq lookup hash
86          */
87         struct hlist_head *cfq_hash;
88
89         int rq_in_driver;
90         int hw_tag;
91
92         /*
93          * idle window management
94          */
95         struct timer_list idle_slice_timer;
96         struct work_struct unplug_work;
97
98         struct cfq_queue *active_queue;
99         struct cfq_io_context *active_cic;
100         int cur_prio, cur_end_prio;
101         unsigned int dispatch_slice;
102
103         struct timer_list idle_class_timer;
104
105         sector_t last_sector;
106         unsigned long last_end_request;
107
108         /*
109          * tunables, see top of file
110          */
111         unsigned int cfq_quantum;
112         unsigned int cfq_fifo_expire[2];
113         unsigned int cfq_back_penalty;
114         unsigned int cfq_back_max;
115         unsigned int cfq_slice[2];
116         unsigned int cfq_slice_async_rq;
117         unsigned int cfq_slice_idle;
118
119         struct list_head cic_list;
120 };
121
122 /*
123  * Per process-grouping structure
124  */
125 struct cfq_queue {
126         /* reference count */
127         atomic_t ref;
128         /* parent cfq_data */
129         struct cfq_data *cfqd;
130         /* cfqq lookup hash */
131         struct hlist_node cfq_hash;
132         /* hash key */
133         unsigned int key;
134         /* member of the rr/busy/cur/idle cfqd list */
135         struct list_head cfq_list;
136         /* sorted list of pending requests */
137         struct rb_root sort_list;
138         /* if fifo isn't expired, next request to serve */
139         struct request *next_rq;
140         /* requests queued in sort_list */
141         int queued[2];
142         /* currently allocated requests */
143         int allocated[2];
144         /* pending metadata requests */
145         int meta_pending;
146         /* fifo list of requests in sort_list */
147         struct list_head fifo;
148
149         unsigned long slice_start;
150         unsigned long slice_end;
151         unsigned long slice_left;
152
153         /* number of requests that are on the dispatch list */
154         int on_dispatch[2];
155
156         /* io prio of this group */
157         unsigned short ioprio, org_ioprio;
158         unsigned short ioprio_class, org_ioprio_class;
159
160         /* various state flags, see below */
161         unsigned int flags;
162 };
163
164 enum cfqq_state_flags {
165         CFQ_CFQQ_FLAG_on_rr = 0,
166         CFQ_CFQQ_FLAG_wait_request,
167         CFQ_CFQQ_FLAG_must_alloc,
168         CFQ_CFQQ_FLAG_must_alloc_slice,
169         CFQ_CFQQ_FLAG_must_dispatch,
170         CFQ_CFQQ_FLAG_fifo_expire,
171         CFQ_CFQQ_FLAG_idle_window,
172         CFQ_CFQQ_FLAG_prio_changed,
173         CFQ_CFQQ_FLAG_queue_new,
174 };
175
176 #define CFQ_CFQQ_FNS(name)                                              \
177 static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)         \
178 {                                                                       \
179         cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name);                     \
180 }                                                                       \
181 static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)        \
182 {                                                                       \
183         cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);                    \
184 }                                                                       \
185 static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)         \
186 {                                                                       \
187         return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;        \
188 }
189
190 CFQ_CFQQ_FNS(on_rr);
191 CFQ_CFQQ_FNS(wait_request);
192 CFQ_CFQQ_FNS(must_alloc);
193 CFQ_CFQQ_FNS(must_alloc_slice);
194 CFQ_CFQQ_FNS(must_dispatch);
195 CFQ_CFQQ_FNS(fifo_expire);
196 CFQ_CFQQ_FNS(idle_window);
197 CFQ_CFQQ_FNS(prio_changed);
198 CFQ_CFQQ_FNS(queue_new);
199 #undef CFQ_CFQQ_FNS
200
201 static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short);
202 static void cfq_dispatch_insert(request_queue_t *, struct request *);
203 static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask);
204
205 /*
206  * scheduler run of queue, if there are requests pending and no one in the
207  * driver that will restart queueing
208  */
209 static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
210 {
211         if (cfqd->busy_queues)
212                 kblockd_schedule_work(&cfqd->unplug_work);
213 }
214
215 static int cfq_queue_empty(request_queue_t *q)
216 {
217         struct cfq_data *cfqd = q->elevator->elevator_data;
218
219         return !cfqd->busy_queues;
220 }
221
222 static inline pid_t cfq_queue_pid(struct task_struct *task, int rw, int is_sync)
223 {
224         /*
225          * Use the per-process queue, for read requests and syncronous writes
226          */
227         if (!(rw & REQ_RW) || is_sync)
228                 return task->pid;
229
230         return CFQ_KEY_ASYNC;
231 }
232
233 /*
234  * Lifted from AS - choose which of rq1 and rq2 that is best served now.
235  * We choose the request that is closest to the head right now. Distance
236  * behind the head is penalized and only allowed to a certain extent.
237  */
238 static struct request *
239 cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
240 {
241         sector_t last, s1, s2, d1 = 0, d2 = 0;
242         unsigned long back_max;
243 #define CFQ_RQ1_WRAP    0x01 /* request 1 wraps */
244 #define CFQ_RQ2_WRAP    0x02 /* request 2 wraps */
245         unsigned wrap = 0; /* bit mask: requests behind the disk head? */
246
247         if (rq1 == NULL || rq1 == rq2)
248                 return rq2;
249         if (rq2 == NULL)
250                 return rq1;
251
252         if (rq_is_sync(rq1) && !rq_is_sync(rq2))
253                 return rq1;
254         else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
255                 return rq2;
256         if (rq_is_meta(rq1) && !rq_is_meta(rq2))
257                 return rq1;
258         else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
259                 return rq2;
260
261         s1 = rq1->sector;
262         s2 = rq2->sector;
263
264         last = cfqd->last_sector;
265
266         /*
267          * by definition, 1KiB is 2 sectors
268          */
269         back_max = cfqd->cfq_back_max * 2;
270
271         /*
272          * Strict one way elevator _except_ in the case where we allow
273          * short backward seeks which are biased as twice the cost of a
274          * similar forward seek.
275          */
276         if (s1 >= last)
277                 d1 = s1 - last;
278         else if (s1 + back_max >= last)
279                 d1 = (last - s1) * cfqd->cfq_back_penalty;
280         else
281                 wrap |= CFQ_RQ1_WRAP;
282
283         if (s2 >= last)
284                 d2 = s2 - last;
285         else if (s2 + back_max >= last)
286                 d2 = (last - s2) * cfqd->cfq_back_penalty;
287         else
288                 wrap |= CFQ_RQ2_WRAP;
289
290         /* Found required data */
291
292         /*
293          * By doing switch() on the bit mask "wrap" we avoid having to
294          * check two variables for all permutations: --> faster!
295          */
296         switch (wrap) {
297         case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
298                 if (d1 < d2)
299                         return rq1;
300                 else if (d2 < d1)
301                         return rq2;
302                 else {
303                         if (s1 >= s2)
304                                 return rq1;
305                         else
306                                 return rq2;
307                 }
308
309         case CFQ_RQ2_WRAP:
310                 return rq1;
311         case CFQ_RQ1_WRAP:
312                 return rq2;
313         case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
314         default:
315                 /*
316                  * Since both rqs are wrapped,
317                  * start with the one that's further behind head
318                  * (--> only *one* back seek required),
319                  * since back seek takes more time than forward.
320                  */
321                 if (s1 <= s2)
322                         return rq1;
323                 else
324                         return rq2;
325         }
326 }
327
328 /*
329  * would be nice to take fifo expire time into account as well
330  */
331 static struct request *
332 cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
333                   struct request *last)
334 {
335         struct rb_node *rbnext = rb_next(&last->rb_node);
336         struct rb_node *rbprev = rb_prev(&last->rb_node);
337         struct request *next = NULL, *prev = NULL;
338
339         BUG_ON(RB_EMPTY_NODE(&last->rb_node));
340
341         if (rbprev)
342                 prev = rb_entry_rq(rbprev);
343
344         if (rbnext)
345                 next = rb_entry_rq(rbnext);
346         else {
347                 rbnext = rb_first(&cfqq->sort_list);
348                 if (rbnext && rbnext != &last->rb_node)
349                         next = rb_entry_rq(rbnext);
350         }
351
352         return cfq_choose_req(cfqd, next, prev);
353 }
354
355 static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
356 {
357         struct cfq_data *cfqd = cfqq->cfqd;
358         struct list_head *list;
359
360         BUG_ON(!cfq_cfqq_on_rr(cfqq));
361
362         list_del(&cfqq->cfq_list);
363
364         if (cfq_class_rt(cfqq))
365                 list = &cfqd->cur_rr;
366         else if (cfq_class_idle(cfqq))
367                 list = &cfqd->idle_rr;
368         else {
369                 /*
370                  * if cfqq has requests in flight, don't allow it to be
371                  * found in cfq_set_active_queue before it has finished them.
372                  * this is done to increase fairness between a process that
373                  * has lots of io pending vs one that only generates one
374                  * sporadically or synchronously
375                  */
376                 if (cfq_cfqq_dispatched(cfqq))
377                         list = &cfqd->busy_rr;
378                 else
379                         list = &cfqd->rr_list[cfqq->ioprio];
380         }
381
382         /*
383          * If this queue was preempted or is new (never been serviced), let
384          * it be added first for fairness but beind other new queues.
385          * Otherwise, just add to the back  of the list.
386          */
387         if (preempted || cfq_cfqq_queue_new(cfqq)) {
388                 struct list_head *n = list;
389                 struct cfq_queue *__cfqq;
390
391                 while (n->next != list) {
392                         __cfqq = list_entry_cfqq(n->next);
393                         if (!cfq_cfqq_queue_new(__cfqq))
394                                 break;
395
396                         n = n->next;
397                 }
398
399                 list = n;
400         }
401
402         list_add_tail(&cfqq->cfq_list, list);
403 }
404
405 /*
406  * add to busy list of queues for service, trying to be fair in ordering
407  * the pending list according to last request service
408  */
409 static inline void
410 cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
411 {
412         BUG_ON(cfq_cfqq_on_rr(cfqq));
413         cfq_mark_cfqq_on_rr(cfqq);
414         cfqd->busy_queues++;
415
416         cfq_resort_rr_list(cfqq, 0);
417 }
418
419 static inline void
420 cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
421 {
422         BUG_ON(!cfq_cfqq_on_rr(cfqq));
423         cfq_clear_cfqq_on_rr(cfqq);
424         list_del_init(&cfqq->cfq_list);
425
426         BUG_ON(!cfqd->busy_queues);
427         cfqd->busy_queues--;
428 }
429
430 /*
431  * rb tree support functions
432  */
433 static inline void cfq_del_rq_rb(struct request *rq)
434 {
435         struct cfq_queue *cfqq = RQ_CFQQ(rq);
436         struct cfq_data *cfqd = cfqq->cfqd;
437         const int sync = rq_is_sync(rq);
438
439         BUG_ON(!cfqq->queued[sync]);
440         cfqq->queued[sync]--;
441
442         elv_rb_del(&cfqq->sort_list, rq);
443
444         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
445                 cfq_del_cfqq_rr(cfqd, cfqq);
446 }
447
448 static void cfq_add_rq_rb(struct request *rq)
449 {
450         struct cfq_queue *cfqq = RQ_CFQQ(rq);
451         struct cfq_data *cfqd = cfqq->cfqd;
452         struct request *__alias;
453
454         cfqq->queued[rq_is_sync(rq)]++;
455
456         /*
457          * looks a little odd, but the first insert might return an alias.
458          * if that happens, put the alias on the dispatch list
459          */
460         while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
461                 cfq_dispatch_insert(cfqd->queue, __alias);
462
463         if (!cfq_cfqq_on_rr(cfqq))
464                 cfq_add_cfqq_rr(cfqd, cfqq);
465 }
466
467 static inline void
468 cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
469 {
470         elv_rb_del(&cfqq->sort_list, rq);
471         cfqq->queued[rq_is_sync(rq)]--;
472         cfq_add_rq_rb(rq);
473 }
474
475 static struct request *
476 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
477 {
478         struct task_struct *tsk = current;
479         pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio), bio_sync(bio));
480         struct cfq_queue *cfqq;
481
482         cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
483         if (cfqq) {
484                 sector_t sector = bio->bi_sector + bio_sectors(bio);
485
486                 return elv_rb_find(&cfqq->sort_list, sector);
487         }
488
489         return NULL;
490 }
491
492 static void cfq_activate_request(request_queue_t *q, struct request *rq)
493 {
494         struct cfq_data *cfqd = q->elevator->elevator_data;
495
496         cfqd->rq_in_driver++;
497
498         /*
499          * If the depth is larger 1, it really could be queueing. But lets
500          * make the mark a little higher - idling could still be good for
501          * low queueing, and a low queueing number could also just indicate
502          * a SCSI mid layer like behaviour where limit+1 is often seen.
503          */
504         if (!cfqd->hw_tag && cfqd->rq_in_driver > 4)
505                 cfqd->hw_tag = 1;
506 }
507
508 static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
509 {
510         struct cfq_data *cfqd = q->elevator->elevator_data;
511
512         WARN_ON(!cfqd->rq_in_driver);
513         cfqd->rq_in_driver--;
514 }
515
516 static void cfq_remove_request(struct request *rq)
517 {
518         struct cfq_queue *cfqq = RQ_CFQQ(rq);
519
520         if (cfqq->next_rq == rq)
521                 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
522
523         list_del_init(&rq->queuelist);
524         cfq_del_rq_rb(rq);
525
526         if (rq_is_meta(rq)) {
527                 WARN_ON(!cfqq->meta_pending);
528                 cfqq->meta_pending--;
529         }
530 }
531
532 static int
533 cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
534 {
535         struct cfq_data *cfqd = q->elevator->elevator_data;
536         struct request *__rq;
537
538         __rq = cfq_find_rq_fmerge(cfqd, bio);
539         if (__rq && elv_rq_merge_ok(__rq, bio)) {
540                 *req = __rq;
541                 return ELEVATOR_FRONT_MERGE;
542         }
543
544         return ELEVATOR_NO_MERGE;
545 }
546
547 static void cfq_merged_request(request_queue_t *q, struct request *req,
548                                int type)
549 {
550         if (type == ELEVATOR_FRONT_MERGE) {
551                 struct cfq_queue *cfqq = RQ_CFQQ(req);
552
553                 cfq_reposition_rq_rb(cfqq, req);
554         }
555 }
556
557 static void
558 cfq_merged_requests(request_queue_t *q, struct request *rq,
559                     struct request *next)
560 {
561         /*
562          * reposition in fifo if next is older than rq
563          */
564         if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
565             time_before(next->start_time, rq->start_time))
566                 list_move(&rq->queuelist, &next->queuelist);
567
568         cfq_remove_request(next);
569 }
570
571 static int cfq_allow_merge(request_queue_t *q, struct request *rq,
572                            struct bio *bio)
573 {
574         struct cfq_data *cfqd = q->elevator->elevator_data;
575         const int rw = bio_data_dir(bio);
576         struct cfq_queue *cfqq;
577         pid_t key;
578
579         /*
580          * If bio is async or a write, always allow merge
581          */
582         if (!bio_sync(bio) || rw == WRITE)
583                 return 1;
584
585         /*
586          * bio is sync. if request is not, disallow.
587          */
588         if (!rq_is_sync(rq))
589                 return 0;
590
591         /*
592          * Ok, both bio and request are sync. Allow merge if they are
593          * from the same queue.
594          */
595         key = cfq_queue_pid(current, rw, 1);
596         cfqq = cfq_find_cfq_hash(cfqd, key, current->ioprio);
597         if (cfqq != RQ_CFQQ(rq))
598                 return 0;
599
600         return 1;
601 }
602
603 static inline void
604 __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
605 {
606         if (cfqq) {
607                 /*
608                  * stop potential idle class queues waiting service
609                  */
610                 del_timer(&cfqd->idle_class_timer);
611
612                 cfqq->slice_start = jiffies;
613                 cfqq->slice_end = 0;
614                 cfqq->slice_left = 0;
615                 cfq_clear_cfqq_must_alloc_slice(cfqq);
616                 cfq_clear_cfqq_fifo_expire(cfqq);
617         }
618
619         cfqd->active_queue = cfqq;
620 }
621
622 /*
623  * current cfqq expired its slice (or was too idle), select new one
624  */
625 static void
626 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
627                     int preempted)
628 {
629         unsigned long now = jiffies;
630
631         if (cfq_cfqq_wait_request(cfqq))
632                 del_timer(&cfqd->idle_slice_timer);
633
634         if (!preempted && !cfq_cfqq_dispatched(cfqq))
635                 cfq_schedule_dispatch(cfqd);
636
637         cfq_clear_cfqq_must_dispatch(cfqq);
638         cfq_clear_cfqq_wait_request(cfqq);
639         cfq_clear_cfqq_queue_new(cfqq);
640
641         /*
642          * store what was left of this slice, if the queue idled out
643          * or was preempted
644          */
645         if (time_after(cfqq->slice_end, now))
646                 cfqq->slice_left = cfqq->slice_end - now;
647         else
648                 cfqq->slice_left = 0;
649
650         if (cfq_cfqq_on_rr(cfqq))
651                 cfq_resort_rr_list(cfqq, preempted);
652
653         if (cfqq == cfqd->active_queue)
654                 cfqd->active_queue = NULL;
655
656         if (cfqd->active_cic) {
657                 put_io_context(cfqd->active_cic->ioc);
658                 cfqd->active_cic = NULL;
659         }
660
661         cfqd->dispatch_slice = 0;
662 }
663
664 static inline void cfq_slice_expired(struct cfq_data *cfqd, int preempted)
665 {
666         struct cfq_queue *cfqq = cfqd->active_queue;
667
668         if (cfqq)
669                 __cfq_slice_expired(cfqd, cfqq, preempted);
670 }
671
672 /*
673  * 0
674  * 0,1
675  * 0,1,2
676  * 0,1,2,3
677  * 0,1,2,3,4
678  * 0,1,2,3,4,5
679  * 0,1,2,3,4,5,6
680  * 0,1,2,3,4,5,6,7
681  */
682 static int cfq_get_next_prio_level(struct cfq_data *cfqd)
683 {
684         int prio, wrap;
685
686         prio = -1;
687         wrap = 0;
688         do {
689                 int p;
690
691                 for (p = cfqd->cur_prio; p <= cfqd->cur_end_prio; p++) {
692                         if (!list_empty(&cfqd->rr_list[p])) {
693                                 prio = p;
694                                 break;
695                         }
696                 }
697
698                 if (prio != -1)
699                         break;
700                 cfqd->cur_prio = 0;
701                 if (++cfqd->cur_end_prio == CFQ_PRIO_LISTS) {
702                         cfqd->cur_end_prio = 0;
703                         if (wrap)
704                                 break;
705                         wrap = 1;
706                 }
707         } while (1);
708
709         if (unlikely(prio == -1))
710                 return -1;
711
712         BUG_ON(prio >= CFQ_PRIO_LISTS);
713
714         list_splice_init(&cfqd->rr_list[prio], &cfqd->cur_rr);
715
716         cfqd->cur_prio = prio + 1;
717         if (cfqd->cur_prio > cfqd->cur_end_prio) {
718                 cfqd->cur_end_prio = cfqd->cur_prio;
719                 cfqd->cur_prio = 0;
720         }
721         if (cfqd->cur_end_prio == CFQ_PRIO_LISTS) {
722                 cfqd->cur_prio = 0;
723                 cfqd->cur_end_prio = 0;
724         }
725
726         return prio;
727 }
728
729 static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
730 {
731         struct cfq_queue *cfqq = NULL;
732
733         if (!list_empty(&cfqd->cur_rr) || cfq_get_next_prio_level(cfqd) != -1) {
734                 /*
735                  * if current list is non-empty, grab first entry. if it is
736                  * empty, get next prio level and grab first entry then if any
737                  * are spliced
738                  */
739                 cfqq = list_entry_cfqq(cfqd->cur_rr.next);
740         } else if (!list_empty(&cfqd->busy_rr)) {
741                 /*
742                  * If no new queues are available, check if the busy list has
743                  * some before falling back to idle io.
744                  */
745                 cfqq = list_entry_cfqq(cfqd->busy_rr.next);
746         } else if (!list_empty(&cfqd->idle_rr)) {
747                 /*
748                  * if we have idle queues and no rt or be queues had pending
749                  * requests, either allow immediate service if the grace period
750                  * has passed or arm the idle grace timer
751                  */
752                 unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE;
753
754                 if (time_after_eq(jiffies, end))
755                         cfqq = list_entry_cfqq(cfqd->idle_rr.next);
756                 else
757                         mod_timer(&cfqd->idle_class_timer, end);
758         }
759
760         __cfq_set_active_queue(cfqd, cfqq);
761         return cfqq;
762 }
763
764 #define CIC_SEEKY(cic) ((cic)->seek_mean > (128 * 1024))
765
766 static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
767
768 {
769         struct cfq_io_context *cic;
770         unsigned long sl;
771
772         WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
773         WARN_ON(cfqq != cfqd->active_queue);
774
775         /*
776          * idle is disabled, either manually or by past process history
777          */
778         if (!cfqd->cfq_slice_idle)
779                 return 0;
780         if (!cfq_cfqq_idle_window(cfqq))
781                 return 0;
782         /*
783          * task has exited, don't wait
784          */
785         cic = cfqd->active_cic;
786         if (!cic || !cic->ioc->task)
787                 return 0;
788
789         cfq_mark_cfqq_must_dispatch(cfqq);
790         cfq_mark_cfqq_wait_request(cfqq);
791
792         sl = min(cfqq->slice_end - 1, (unsigned long) cfqd->cfq_slice_idle);
793
794         /*
795          * we don't want to idle for seeks, but we do want to allow
796          * fair distribution of slice time for a process doing back-to-back
797          * seeks. so allow a little bit of time for him to submit a new rq
798          */
799         if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
800                 sl = min(sl, msecs_to_jiffies(2));
801
802         mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
803         return 1;
804 }
805
806 static void cfq_dispatch_insert(request_queue_t *q, struct request *rq)
807 {
808         struct cfq_data *cfqd = q->elevator->elevator_data;
809         struct cfq_queue *cfqq = RQ_CFQQ(rq);
810
811         cfq_remove_request(rq);
812         cfqq->on_dispatch[rq_is_sync(rq)]++;
813         elv_dispatch_sort(q, rq);
814
815         rq = list_entry(q->queue_head.prev, struct request, queuelist);
816         cfqd->last_sector = rq->sector + rq->nr_sectors;
817 }
818
819 /*
820  * return expired entry, or NULL to just start from scratch in rbtree
821  */
822 static inline struct request *cfq_check_fifo(struct cfq_queue *cfqq)
823 {
824         struct cfq_data *cfqd = cfqq->cfqd;
825         struct request *rq;
826         int fifo;
827
828         if (cfq_cfqq_fifo_expire(cfqq))
829                 return NULL;
830         if (list_empty(&cfqq->fifo))
831                 return NULL;
832
833         fifo = cfq_cfqq_class_sync(cfqq);
834         rq = rq_entry_fifo(cfqq->fifo.next);
835
836         if (time_after(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) {
837                 cfq_mark_cfqq_fifo_expire(cfqq);
838                 return rq;
839         }
840
841         return NULL;
842 }
843
844 /*
845  * Scale schedule slice based on io priority. Use the sync time slice only
846  * if a queue is marked sync and has sync io queued. A sync queue with async
847  * io only, should not get full sync slice length.
848  */
849 static inline int
850 cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
851 {
852         const int base_slice = cfqd->cfq_slice[cfq_cfqq_sync(cfqq)];
853
854         WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
855
856         return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - cfqq->ioprio));
857 }
858
859 static inline void
860 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
861 {
862         cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
863 }
864
865 static inline int
866 cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
867 {
868         const int base_rq = cfqd->cfq_slice_async_rq;
869
870         WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
871
872         return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
873 }
874
875 /*
876  * get next queue for service
877  */
878 static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
879 {
880         unsigned long now = jiffies;
881         struct cfq_queue *cfqq;
882
883         cfqq = cfqd->active_queue;
884         if (!cfqq)
885                 goto new_queue;
886
887         /*
888          * slice has expired
889          */
890         if (!cfq_cfqq_must_dispatch(cfqq) && time_after(now, cfqq->slice_end))
891                 goto expire;
892
893         /*
894          * if queue has requests, dispatch one. if not, check if
895          * enough slice is left to wait for one
896          */
897         if (!RB_EMPTY_ROOT(&cfqq->sort_list))
898                 goto keep_queue;
899         else if (cfq_cfqq_dispatched(cfqq)) {
900                 cfqq = NULL;
901                 goto keep_queue;
902         } else if (cfq_cfqq_class_sync(cfqq)) {
903                 if (cfq_arm_slice_timer(cfqd, cfqq))
904                         return NULL;
905         }
906
907 expire:
908         cfq_slice_expired(cfqd, 0);
909 new_queue:
910         cfqq = cfq_set_active_queue(cfqd);
911 keep_queue:
912         return cfqq;
913 }
914
915 static int
916 __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
917                         int max_dispatch)
918 {
919         int dispatched = 0;
920
921         BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
922
923         do {
924                 struct request *rq;
925
926                 /*
927                  * follow expired path, else get first next available
928                  */
929                 if ((rq = cfq_check_fifo(cfqq)) == NULL)
930                         rq = cfqq->next_rq;
931
932                 /*
933                  * finally, insert request into driver dispatch list
934                  */
935                 cfq_dispatch_insert(cfqd->queue, rq);
936
937                 cfqd->dispatch_slice++;
938                 dispatched++;
939
940                 if (!cfqd->active_cic) {
941                         atomic_inc(&RQ_CIC(rq)->ioc->refcount);
942                         cfqd->active_cic = RQ_CIC(rq);
943                 }
944
945                 if (RB_EMPTY_ROOT(&cfqq->sort_list))
946                         break;
947
948         } while (dispatched < max_dispatch);
949
950         /*
951          * if slice end isn't set yet, set it.
952          */
953         if (!cfqq->slice_end)
954                 cfq_set_prio_slice(cfqd, cfqq);
955
956         /*
957          * expire an async queue immediately if it has used up its slice. idle
958          * queue always expire after 1 dispatch round.
959          */
960         if ((!cfq_cfqq_sync(cfqq) &&
961             cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
962             cfq_class_idle(cfqq) ||
963             !cfq_cfqq_idle_window(cfqq))
964                 cfq_slice_expired(cfqd, 0);
965
966         return dispatched;
967 }
968
969 static int
970 cfq_forced_dispatch_cfqqs(struct list_head *list)
971 {
972         struct cfq_queue *cfqq, *next;
973         int dispatched;
974
975         dispatched = 0;
976         list_for_each_entry_safe(cfqq, next, list, cfq_list) {
977                 while (cfqq->next_rq) {
978                         cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
979                         dispatched++;
980                 }
981                 BUG_ON(!list_empty(&cfqq->fifo));
982         }
983
984         return dispatched;
985 }
986
987 static int
988 cfq_forced_dispatch(struct cfq_data *cfqd)
989 {
990         int i, dispatched = 0;
991
992         for (i = 0; i < CFQ_PRIO_LISTS; i++)
993                 dispatched += cfq_forced_dispatch_cfqqs(&cfqd->rr_list[i]);
994
995         dispatched += cfq_forced_dispatch_cfqqs(&cfqd->busy_rr);
996         dispatched += cfq_forced_dispatch_cfqqs(&cfqd->cur_rr);
997         dispatched += cfq_forced_dispatch_cfqqs(&cfqd->idle_rr);
998
999         cfq_slice_expired(cfqd, 0);
1000
1001         BUG_ON(cfqd->busy_queues);
1002
1003         return dispatched;
1004 }
1005
1006 static int
1007 cfq_dispatch_requests(request_queue_t *q, int force)
1008 {
1009         struct cfq_data *cfqd = q->elevator->elevator_data;
1010         struct cfq_queue *cfqq, *prev_cfqq;
1011         int dispatched;
1012
1013         if (!cfqd->busy_queues)
1014                 return 0;
1015
1016         if (unlikely(force))
1017                 return cfq_forced_dispatch(cfqd);
1018
1019         dispatched = 0;
1020         prev_cfqq = NULL;
1021         while ((cfqq = cfq_select_queue(cfqd)) != NULL) {
1022                 int max_dispatch;
1023
1024                 /*
1025                  * Don't repeat dispatch from the previous queue.
1026                  */
1027                 if (prev_cfqq == cfqq)
1028                         break;
1029
1030                 cfq_clear_cfqq_must_dispatch(cfqq);
1031                 cfq_clear_cfqq_wait_request(cfqq);
1032                 del_timer(&cfqd->idle_slice_timer);
1033
1034                 max_dispatch = cfqd->cfq_quantum;
1035                 if (cfq_class_idle(cfqq))
1036                         max_dispatch = 1;
1037
1038                 dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
1039
1040                 /*
1041                  * If the dispatch cfqq has idling enabled and is still
1042                  * the active queue, break out.
1043                  */
1044                 if (cfq_cfqq_idle_window(cfqq) && cfqd->active_queue)
1045                         break;
1046
1047                 prev_cfqq = cfqq;
1048         }
1049
1050         return dispatched;
1051 }
1052
1053 /*
1054  * task holds one reference to the queue, dropped when task exits. each rq
1055  * in-flight on this queue also holds a reference, dropped when rq is freed.
1056  *
1057  * queue lock must be held here.
1058  */
1059 static void cfq_put_queue(struct cfq_queue *cfqq)
1060 {
1061         struct cfq_data *cfqd = cfqq->cfqd;
1062
1063         BUG_ON(atomic_read(&cfqq->ref) <= 0);
1064
1065         if (!atomic_dec_and_test(&cfqq->ref))
1066                 return;
1067
1068         BUG_ON(rb_first(&cfqq->sort_list));
1069         BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
1070         BUG_ON(cfq_cfqq_on_rr(cfqq));
1071
1072         if (unlikely(cfqd->active_queue == cfqq))
1073                 __cfq_slice_expired(cfqd, cfqq, 0);
1074
1075         /*
1076          * it's on the empty list and still hashed
1077          */
1078         list_del(&cfqq->cfq_list);
1079         hlist_del(&cfqq->cfq_hash);
1080         kmem_cache_free(cfq_pool, cfqq);
1081 }
1082
1083 static struct cfq_queue *
1084 __cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned int prio,
1085                     const int hashval)
1086 {
1087         struct hlist_head *hash_list = &cfqd->cfq_hash[hashval];
1088         struct hlist_node *entry;
1089         struct cfq_queue *__cfqq;
1090
1091         hlist_for_each_entry(__cfqq, entry, hash_list, cfq_hash) {
1092                 const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->org_ioprio_class, __cfqq->org_ioprio);
1093
1094                 if (__cfqq->key == key && (__p == prio || !prio))
1095                         return __cfqq;
1096         }
1097
1098         return NULL;
1099 }
1100
1101 static struct cfq_queue *
1102 cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned short prio)
1103 {
1104         return __cfq_find_cfq_hash(cfqd, key, prio, hash_long(key, CFQ_QHASH_SHIFT));
1105 }
1106
1107 static void cfq_free_io_context(struct io_context *ioc)
1108 {
1109         struct cfq_io_context *__cic;
1110         struct rb_node *n;
1111         int freed = 0;
1112
1113         while ((n = rb_first(&ioc->cic_root)) != NULL) {
1114                 __cic = rb_entry(n, struct cfq_io_context, rb_node);
1115                 rb_erase(&__cic->rb_node, &ioc->cic_root);
1116                 kmem_cache_free(cfq_ioc_pool, __cic);
1117                 freed++;
1118         }
1119
1120         elv_ioc_count_mod(ioc_count, -freed);
1121
1122         if (ioc_gone && !elv_ioc_count_read(ioc_count))
1123                 complete(ioc_gone);
1124 }
1125
1126 static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1127 {
1128         if (unlikely(cfqq == cfqd->active_queue))
1129                 __cfq_slice_expired(cfqd, cfqq, 0);
1130
1131         cfq_put_queue(cfqq);
1132 }
1133
1134 static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
1135                                          struct cfq_io_context *cic)
1136 {
1137         list_del_init(&cic->queue_list);
1138         smp_wmb();
1139         cic->key = NULL;
1140
1141         if (cic->cfqq[ASYNC]) {
1142                 cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]);
1143                 cic->cfqq[ASYNC] = NULL;
1144         }
1145
1146         if (cic->cfqq[SYNC]) {
1147                 cfq_exit_cfqq(cfqd, cic->cfqq[SYNC]);
1148                 cic->cfqq[SYNC] = NULL;
1149         }
1150 }
1151
1152
1153 /*
1154  * Called with interrupts disabled
1155  */
1156 static void cfq_exit_single_io_context(struct cfq_io_context *cic)
1157 {
1158         struct cfq_data *cfqd = cic->key;
1159
1160         if (cfqd) {
1161                 request_queue_t *q = cfqd->queue;
1162
1163                 spin_lock_irq(q->queue_lock);
1164                 __cfq_exit_single_io_context(cfqd, cic);
1165                 spin_unlock_irq(q->queue_lock);
1166         }
1167 }
1168
1169 static void cfq_exit_io_context(struct io_context *ioc)
1170 {
1171         struct cfq_io_context *__cic;
1172         struct rb_node *n;
1173
1174         /*
1175          * put the reference this task is holding to the various queues
1176          */
1177
1178         n = rb_first(&ioc->cic_root);
1179         while (n != NULL) {
1180                 __cic = rb_entry(n, struct cfq_io_context, rb_node);
1181
1182                 cfq_exit_single_io_context(__cic);
1183                 n = rb_next(n);
1184         }
1185 }
1186
1187 static struct cfq_io_context *
1188 cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1189 {
1190         struct cfq_io_context *cic;
1191
1192         cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask, cfqd->queue->node);
1193         if (cic) {
1194                 memset(cic, 0, sizeof(*cic));
1195                 cic->last_end_request = jiffies;
1196                 INIT_LIST_HEAD(&cic->queue_list);
1197                 cic->dtor = cfq_free_io_context;
1198                 cic->exit = cfq_exit_io_context;
1199                 elv_ioc_count_inc(ioc_count);
1200         }
1201
1202         return cic;
1203 }
1204
1205 static void cfq_init_prio_data(struct cfq_queue *cfqq)
1206 {
1207         struct task_struct *tsk = current;
1208         int ioprio_class;
1209
1210         if (!cfq_cfqq_prio_changed(cfqq))
1211                 return;
1212
1213         ioprio_class = IOPRIO_PRIO_CLASS(tsk->ioprio);
1214         switch (ioprio_class) {
1215                 default:
1216                         printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
1217                 case IOPRIO_CLASS_NONE:
1218                         /*
1219                          * no prio set, place us in the middle of the BE classes
1220                          */
1221                         cfqq->ioprio = task_nice_ioprio(tsk);
1222                         cfqq->ioprio_class = IOPRIO_CLASS_BE;
1223                         break;
1224                 case IOPRIO_CLASS_RT:
1225                         cfqq->ioprio = task_ioprio(tsk);
1226                         cfqq->ioprio_class = IOPRIO_CLASS_RT;
1227                         break;
1228                 case IOPRIO_CLASS_BE:
1229                         cfqq->ioprio = task_ioprio(tsk);
1230                         cfqq->ioprio_class = IOPRIO_CLASS_BE;
1231                         break;
1232                 case IOPRIO_CLASS_IDLE:
1233                         cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
1234                         cfqq->ioprio = 7;
1235                         cfq_clear_cfqq_idle_window(cfqq);
1236                         break;
1237         }
1238
1239         /*
1240          * keep track of original prio settings in case we have to temporarily
1241          * elevate the priority of this queue
1242          */
1243         cfqq->org_ioprio = cfqq->ioprio;
1244         cfqq->org_ioprio_class = cfqq->ioprio_class;
1245
1246         if (cfq_cfqq_on_rr(cfqq))
1247                 cfq_resort_rr_list(cfqq, 0);
1248
1249         cfq_clear_cfqq_prio_changed(cfqq);
1250 }
1251
1252 static inline void changed_ioprio(struct cfq_io_context *cic)
1253 {
1254         struct cfq_data *cfqd = cic->key;
1255         struct cfq_queue *cfqq;
1256         unsigned long flags;
1257
1258         if (unlikely(!cfqd))
1259                 return;
1260
1261         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1262
1263         cfqq = cic->cfqq[ASYNC];
1264         if (cfqq) {
1265                 struct cfq_queue *new_cfqq;
1266                 new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC, cic->ioc->task,
1267                                          GFP_ATOMIC);
1268                 if (new_cfqq) {
1269                         cic->cfqq[ASYNC] = new_cfqq;
1270                         cfq_put_queue(cfqq);
1271                 }
1272         }
1273
1274         cfqq = cic->cfqq[SYNC];
1275         if (cfqq)
1276                 cfq_mark_cfqq_prio_changed(cfqq);
1277
1278         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1279 }
1280
1281 static void cfq_ioc_set_ioprio(struct io_context *ioc)
1282 {
1283         struct cfq_io_context *cic;
1284         struct rb_node *n;
1285
1286         ioc->ioprio_changed = 0;
1287
1288         n = rb_first(&ioc->cic_root);
1289         while (n != NULL) {
1290                 cic = rb_entry(n, struct cfq_io_context, rb_node);
1291
1292                 changed_ioprio(cic);
1293                 n = rb_next(n);
1294         }
1295 }
1296
1297 static struct cfq_queue *
1298 cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk,
1299               gfp_t gfp_mask)
1300 {
1301         const int hashval = hash_long(key, CFQ_QHASH_SHIFT);
1302         struct cfq_queue *cfqq, *new_cfqq = NULL;
1303         unsigned short ioprio;
1304
1305 retry:
1306         ioprio = tsk->ioprio;
1307         cfqq = __cfq_find_cfq_hash(cfqd, key, ioprio, hashval);
1308
1309         if (!cfqq) {
1310                 if (new_cfqq) {
1311                         cfqq = new_cfqq;
1312                         new_cfqq = NULL;
1313                 } else if (gfp_mask & __GFP_WAIT) {
1314                         /*
1315                          * Inform the allocator of the fact that we will
1316                          * just repeat this allocation if it fails, to allow
1317                          * the allocator to do whatever it needs to attempt to
1318                          * free memory.
1319                          */
1320                         spin_unlock_irq(cfqd->queue->queue_lock);
1321                         new_cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask|__GFP_NOFAIL, cfqd->queue->node);
1322                         spin_lock_irq(cfqd->queue->queue_lock);
1323                         goto retry;
1324                 } else {
1325                         cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask, cfqd->queue->node);
1326                         if (!cfqq)
1327                                 goto out;
1328                 }
1329
1330                 memset(cfqq, 0, sizeof(*cfqq));
1331
1332                 INIT_HLIST_NODE(&cfqq->cfq_hash);
1333                 INIT_LIST_HEAD(&cfqq->cfq_list);
1334                 INIT_LIST_HEAD(&cfqq->fifo);
1335
1336                 cfqq->key = key;
1337                 hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
1338                 atomic_set(&cfqq->ref, 0);
1339                 cfqq->cfqd = cfqd;
1340                 /*
1341                  * set ->slice_left to allow preemption for a new process
1342                  */
1343                 cfqq->slice_left = 2 * cfqd->cfq_slice_idle;
1344                 cfq_mark_cfqq_idle_window(cfqq);
1345                 cfq_mark_cfqq_prio_changed(cfqq);
1346                 cfq_mark_cfqq_queue_new(cfqq);
1347                 cfq_init_prio_data(cfqq);
1348         }
1349
1350         if (new_cfqq)
1351                 kmem_cache_free(cfq_pool, new_cfqq);
1352
1353         atomic_inc(&cfqq->ref);
1354 out:
1355         WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
1356         return cfqq;
1357 }
1358
1359 static void
1360 cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic)
1361 {
1362         WARN_ON(!list_empty(&cic->queue_list));
1363         rb_erase(&cic->rb_node, &ioc->cic_root);
1364         kmem_cache_free(cfq_ioc_pool, cic);
1365         elv_ioc_count_dec(ioc_count);
1366 }
1367
1368 static struct cfq_io_context *
1369 cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc)
1370 {
1371         struct rb_node *n;
1372         struct cfq_io_context *cic;
1373         void *k, *key = cfqd;
1374
1375 restart:
1376         n = ioc->cic_root.rb_node;
1377         while (n) {
1378                 cic = rb_entry(n, struct cfq_io_context, rb_node);
1379                 /* ->key must be copied to avoid race with cfq_exit_queue() */
1380                 k = cic->key;
1381                 if (unlikely(!k)) {
1382                         cfq_drop_dead_cic(ioc, cic);
1383                         goto restart;
1384                 }
1385
1386                 if (key < k)
1387                         n = n->rb_left;
1388                 else if (key > k)
1389                         n = n->rb_right;
1390                 else
1391                         return cic;
1392         }
1393
1394         return NULL;
1395 }
1396
1397 static inline void
1398 cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
1399              struct cfq_io_context *cic)
1400 {
1401         struct rb_node **p;
1402         struct rb_node *parent;
1403         struct cfq_io_context *__cic;
1404         unsigned long flags;
1405         void *k;
1406
1407         cic->ioc = ioc;
1408         cic->key = cfqd;
1409
1410 restart:
1411         parent = NULL;
1412         p = &ioc->cic_root.rb_node;
1413         while (*p) {
1414                 parent = *p;
1415                 __cic = rb_entry(parent, struct cfq_io_context, rb_node);
1416                 /* ->key must be copied to avoid race with cfq_exit_queue() */
1417                 k = __cic->key;
1418                 if (unlikely(!k)) {
1419                         cfq_drop_dead_cic(ioc, __cic);
1420                         goto restart;
1421                 }
1422
1423                 if (cic->key < k)
1424                         p = &(*p)->rb_left;
1425                 else if (cic->key > k)
1426                         p = &(*p)->rb_right;
1427                 else
1428                         BUG();
1429         }
1430
1431         rb_link_node(&cic->rb_node, parent, p);
1432         rb_insert_color(&cic->rb_node, &ioc->cic_root);
1433
1434         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1435         list_add(&cic->queue_list, &cfqd->cic_list);
1436         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1437 }
1438
1439 /*
1440  * Setup general io context and cfq io context. There can be several cfq
1441  * io contexts per general io context, if this process is doing io to more
1442  * than one device managed by cfq.
1443  */
1444 static struct cfq_io_context *
1445 cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1446 {
1447         struct io_context *ioc = NULL;
1448         struct cfq_io_context *cic;
1449
1450         might_sleep_if(gfp_mask & __GFP_WAIT);
1451
1452         ioc = get_io_context(gfp_mask, cfqd->queue->node);
1453         if (!ioc)
1454                 return NULL;
1455
1456         cic = cfq_cic_rb_lookup(cfqd, ioc);
1457         if (cic)
1458                 goto out;
1459
1460         cic = cfq_alloc_io_context(cfqd, gfp_mask);
1461         if (cic == NULL)
1462                 goto err;
1463
1464         cfq_cic_link(cfqd, ioc, cic);
1465 out:
1466         smp_read_barrier_depends();
1467         if (unlikely(ioc->ioprio_changed))
1468                 cfq_ioc_set_ioprio(ioc);
1469
1470         return cic;
1471 err:
1472         put_io_context(ioc);
1473         return NULL;
1474 }
1475
1476 static void
1477 cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
1478 {
1479         unsigned long elapsed, ttime;
1480
1481         /*
1482          * if this context already has stuff queued, thinktime is from
1483          * last queue not last end
1484          */
1485 #if 0
1486         if (time_after(cic->last_end_request, cic->last_queue))
1487                 elapsed = jiffies - cic->last_end_request;
1488         else
1489                 elapsed = jiffies - cic->last_queue;
1490 #else
1491                 elapsed = jiffies - cic->last_end_request;
1492 #endif
1493
1494         ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
1495
1496         cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
1497         cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
1498         cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
1499 }
1500
1501 static void
1502 cfq_update_io_seektime(struct cfq_io_context *cic, struct request *rq)
1503 {
1504         sector_t sdist;
1505         u64 total;
1506
1507         if (cic->last_request_pos < rq->sector)
1508                 sdist = rq->sector - cic->last_request_pos;
1509         else
1510                 sdist = cic->last_request_pos - rq->sector;
1511
1512         /*
1513          * Don't allow the seek distance to get too large from the
1514          * odd fragment, pagein, etc
1515          */
1516         if (cic->seek_samples <= 60) /* second&third seek */
1517                 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024);
1518         else
1519                 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*64);
1520
1521         cic->seek_samples = (7*cic->seek_samples + 256) / 8;
1522         cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
1523         total = cic->seek_total + (cic->seek_samples/2);
1524         do_div(total, cic->seek_samples);
1525         cic->seek_mean = (sector_t)total;
1526 }
1527
1528 /*
1529  * Disable idle window if the process thinks too long or seeks so much that
1530  * it doesn't matter
1531  */
1532 static void
1533 cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1534                        struct cfq_io_context *cic)
1535 {
1536         int enable_idle = cfq_cfqq_idle_window(cfqq);
1537
1538         if (!cic->ioc->task || !cfqd->cfq_slice_idle ||
1539             (cfqd->hw_tag && CIC_SEEKY(cic)))
1540                 enable_idle = 0;
1541         else if (sample_valid(cic->ttime_samples)) {
1542                 if (cic->ttime_mean > cfqd->cfq_slice_idle)
1543                         enable_idle = 0;
1544                 else
1545                         enable_idle = 1;
1546         }
1547
1548         if (enable_idle)
1549                 cfq_mark_cfqq_idle_window(cfqq);
1550         else
1551                 cfq_clear_cfqq_idle_window(cfqq);
1552 }
1553
1554
1555 /*
1556  * Check if new_cfqq should preempt the currently active queue. Return 0 for
1557  * no or if we aren't sure, a 1 will cause a preempt.
1558  */
1559 static int
1560 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
1561                    struct request *rq)
1562 {
1563         struct cfq_queue *cfqq = cfqd->active_queue;
1564
1565         if (cfq_class_idle(new_cfqq))
1566                 return 0;
1567
1568         if (!cfqq)
1569                 return 0;
1570
1571         if (cfq_class_idle(cfqq))
1572                 return 1;
1573         if (!cfq_cfqq_wait_request(new_cfqq))
1574                 return 0;
1575         /*
1576          * if it doesn't have slice left, forget it
1577          */
1578         if (new_cfqq->slice_left < cfqd->cfq_slice_idle)
1579                 return 0;
1580         /*
1581          * if the new request is sync, but the currently running queue is
1582          * not, let the sync request have priority.
1583          */
1584         if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
1585                 return 1;
1586         /*
1587          * So both queues are sync. Let the new request get disk time if
1588          * it's a metadata request and the current queue is doing regular IO.
1589          */
1590         if (rq_is_meta(rq) && !cfqq->meta_pending)
1591                 return 1;
1592
1593         return 0;
1594 }
1595
1596 /*
1597  * cfqq preempts the active queue. if we allowed preempt with no slice left,
1598  * let it have half of its nominal slice.
1599  */
1600 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1601 {
1602         cfq_slice_expired(cfqd, 1);
1603
1604         if (!cfqq->slice_left)
1605                 cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2;
1606
1607         /*
1608          * Put the new queue at the front of the of the current list,
1609          * so we know that it will be selected next.
1610          */
1611         BUG_ON(!cfq_cfqq_on_rr(cfqq));
1612         list_move(&cfqq->cfq_list, &cfqd->cur_rr);
1613
1614         cfqq->slice_end = cfqq->slice_left + jiffies;
1615 }
1616
1617 /*
1618  * Called when a new fs request (rq) is added (to cfqq). Check if there's
1619  * something we should do about it
1620  */
1621 static void
1622 cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1623                 struct request *rq)
1624 {
1625         struct cfq_io_context *cic = RQ_CIC(rq);
1626
1627         if (rq_is_meta(rq))
1628                 cfqq->meta_pending++;
1629
1630         /*
1631          * check if this request is a better next-serve candidate)) {
1632          */
1633         cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
1634         BUG_ON(!cfqq->next_rq);
1635
1636         /*
1637          * we never wait for an async request and we don't allow preemption
1638          * of an async request. so just return early
1639          */
1640         if (!rq_is_sync(rq)) {
1641                 /*
1642                  * sync process issued an async request, if it's waiting
1643                  * then expire it and kick rq handling.
1644                  */
1645                 if (cic == cfqd->active_cic &&
1646                     del_timer(&cfqd->idle_slice_timer)) {
1647                         cfq_slice_expired(cfqd, 0);
1648                         blk_start_queueing(cfqd->queue);
1649                 }
1650                 return;
1651         }
1652
1653         cfq_update_io_thinktime(cfqd, cic);
1654         cfq_update_io_seektime(cic, rq);
1655         cfq_update_idle_window(cfqd, cfqq, cic);
1656
1657         cic->last_queue = jiffies;
1658         cic->last_request_pos = rq->sector + rq->nr_sectors;
1659
1660         if (cfqq == cfqd->active_queue) {
1661                 /*
1662                  * if we are waiting for a request for this queue, let it rip
1663                  * immediately and flag that we must not expire this queue
1664                  * just now
1665                  */
1666                 if (cfq_cfqq_wait_request(cfqq)) {
1667                         cfq_mark_cfqq_must_dispatch(cfqq);
1668                         del_timer(&cfqd->idle_slice_timer);
1669                         blk_start_queueing(cfqd->queue);
1670                 }
1671         } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
1672                 /*
1673                  * not the active queue - expire current slice if it is
1674                  * idle and has expired it's mean thinktime or this new queue
1675                  * has some old slice time left and is of higher priority
1676                  */
1677                 cfq_preempt_queue(cfqd, cfqq);
1678                 cfq_mark_cfqq_must_dispatch(cfqq);
1679                 blk_start_queueing(cfqd->queue);
1680         }
1681 }
1682
1683 static void cfq_insert_request(request_queue_t *q, struct request *rq)
1684 {
1685         struct cfq_data *cfqd = q->elevator->elevator_data;
1686         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1687
1688         cfq_init_prio_data(cfqq);
1689
1690         cfq_add_rq_rb(rq);
1691
1692         list_add_tail(&rq->queuelist, &cfqq->fifo);
1693
1694         cfq_rq_enqueued(cfqd, cfqq, rq);
1695 }
1696
1697 static void cfq_completed_request(request_queue_t *q, struct request *rq)
1698 {
1699         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1700         struct cfq_data *cfqd = cfqq->cfqd;
1701         const int sync = rq_is_sync(rq);
1702         unsigned long now;
1703
1704         now = jiffies;
1705
1706         WARN_ON(!cfqd->rq_in_driver);
1707         WARN_ON(!cfqq->on_dispatch[sync]);
1708         cfqd->rq_in_driver--;
1709         cfqq->on_dispatch[sync]--;
1710
1711         if (!cfq_class_idle(cfqq))
1712                 cfqd->last_end_request = now;
1713
1714         if (!cfq_cfqq_dispatched(cfqq) && cfq_cfqq_on_rr(cfqq))
1715                 cfq_resort_rr_list(cfqq, 0);
1716
1717         if (sync)
1718                 RQ_CIC(rq)->last_end_request = now;
1719
1720         /*
1721          * If this is the active queue, check if it needs to be expired,
1722          * or if we want to idle in case it has no pending requests.
1723          */
1724         if (cfqd->active_queue == cfqq) {
1725                 if (time_after(now, cfqq->slice_end))
1726                         cfq_slice_expired(cfqd, 0);
1727                 else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list)) {
1728                         if (!cfq_arm_slice_timer(cfqd, cfqq))
1729                                 cfq_schedule_dispatch(cfqd);
1730                 }
1731         }
1732 }
1733
1734 /*
1735  * we temporarily boost lower priority queues if they are holding fs exclusive
1736  * resources. they are boosted to normal prio (CLASS_BE/4)
1737  */
1738 static void cfq_prio_boost(struct cfq_queue *cfqq)
1739 {
1740         const int ioprio_class = cfqq->ioprio_class;
1741         const int ioprio = cfqq->ioprio;
1742
1743         if (has_fs_excl()) {
1744                 /*
1745                  * boost idle prio on transactions that would lock out other
1746                  * users of the filesystem
1747                  */
1748                 if (cfq_class_idle(cfqq))
1749                         cfqq->ioprio_class = IOPRIO_CLASS_BE;
1750                 if (cfqq->ioprio > IOPRIO_NORM)
1751                         cfqq->ioprio = IOPRIO_NORM;
1752         } else {
1753                 /*
1754                  * check if we need to unboost the queue
1755                  */
1756                 if (cfqq->ioprio_class != cfqq->org_ioprio_class)
1757                         cfqq->ioprio_class = cfqq->org_ioprio_class;
1758                 if (cfqq->ioprio != cfqq->org_ioprio)
1759                         cfqq->ioprio = cfqq->org_ioprio;
1760         }
1761
1762         /*
1763          * refile between round-robin lists if we moved the priority class
1764          */
1765         if ((ioprio_class != cfqq->ioprio_class || ioprio != cfqq->ioprio) &&
1766             cfq_cfqq_on_rr(cfqq))
1767                 cfq_resort_rr_list(cfqq, 0);
1768 }
1769
1770 static inline int __cfq_may_queue(struct cfq_queue *cfqq)
1771 {
1772         if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&
1773             !cfq_cfqq_must_alloc_slice(cfqq)) {
1774                 cfq_mark_cfqq_must_alloc_slice(cfqq);
1775                 return ELV_MQUEUE_MUST;
1776         }
1777
1778         return ELV_MQUEUE_MAY;
1779 }
1780
1781 static int cfq_may_queue(request_queue_t *q, int rw)
1782 {
1783         struct cfq_data *cfqd = q->elevator->elevator_data;
1784         struct task_struct *tsk = current;
1785         struct cfq_queue *cfqq;
1786         unsigned int key;
1787
1788         key = cfq_queue_pid(tsk, rw, rw & REQ_RW_SYNC);
1789
1790         /*
1791          * don't force setup of a queue from here, as a call to may_queue
1792          * does not necessarily imply that a request actually will be queued.
1793          * so just lookup a possibly existing queue, or return 'may queue'
1794          * if that fails
1795          */
1796         cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
1797         if (cfqq) {
1798                 cfq_init_prio_data(cfqq);
1799                 cfq_prio_boost(cfqq);
1800
1801                 return __cfq_may_queue(cfqq);
1802         }
1803
1804         return ELV_MQUEUE_MAY;
1805 }
1806
1807 /*
1808  * queue lock held here
1809  */
1810 static void cfq_put_request(struct request *rq)
1811 {
1812         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1813
1814         if (cfqq) {
1815                 const int rw = rq_data_dir(rq);
1816
1817                 BUG_ON(!cfqq->allocated[rw]);
1818                 cfqq->allocated[rw]--;
1819
1820                 put_io_context(RQ_CIC(rq)->ioc);
1821
1822                 rq->elevator_private = NULL;
1823                 rq->elevator_private2 = NULL;
1824
1825                 cfq_put_queue(cfqq);
1826         }
1827 }
1828
1829 /*
1830  * Allocate cfq data structures associated with this request.
1831  */
1832 static int
1833 cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
1834 {
1835         struct cfq_data *cfqd = q->elevator->elevator_data;
1836         struct task_struct *tsk = current;
1837         struct cfq_io_context *cic;
1838         const int rw = rq_data_dir(rq);
1839         const int is_sync = rq_is_sync(rq);
1840         pid_t key = cfq_queue_pid(tsk, rw, is_sync);
1841         struct cfq_queue *cfqq;
1842         unsigned long flags;
1843
1844         might_sleep_if(gfp_mask & __GFP_WAIT);
1845
1846         cic = cfq_get_io_context(cfqd, gfp_mask);
1847
1848         spin_lock_irqsave(q->queue_lock, flags);
1849
1850         if (!cic)
1851                 goto queue_fail;
1852
1853         if (!cic->cfqq[is_sync]) {
1854                 cfqq = cfq_get_queue(cfqd, key, tsk, gfp_mask);
1855                 if (!cfqq)
1856                         goto queue_fail;
1857
1858                 cic->cfqq[is_sync] = cfqq;
1859         } else
1860                 cfqq = cic->cfqq[is_sync];
1861
1862         cfqq->allocated[rw]++;
1863         cfq_clear_cfqq_must_alloc(cfqq);
1864         atomic_inc(&cfqq->ref);
1865
1866         spin_unlock_irqrestore(q->queue_lock, flags);
1867
1868         rq->elevator_private = cic;
1869         rq->elevator_private2 = cfqq;
1870         return 0;
1871
1872 queue_fail:
1873         if (cic)
1874                 put_io_context(cic->ioc);
1875
1876         cfq_schedule_dispatch(cfqd);
1877         spin_unlock_irqrestore(q->queue_lock, flags);
1878         return 1;
1879 }
1880
1881 static void cfq_kick_queue(struct work_struct *work)
1882 {
1883         struct cfq_data *cfqd =
1884                 container_of(work, struct cfq_data, unplug_work);
1885         request_queue_t *q = cfqd->queue;
1886         unsigned long flags;
1887
1888         spin_lock_irqsave(q->queue_lock, flags);
1889         blk_start_queueing(q);
1890         spin_unlock_irqrestore(q->queue_lock, flags);
1891 }
1892
1893 /*
1894  * Timer running if the active_queue is currently idling inside its time slice
1895  */
1896 static void cfq_idle_slice_timer(unsigned long data)
1897 {
1898         struct cfq_data *cfqd = (struct cfq_data *) data;
1899         struct cfq_queue *cfqq;
1900         unsigned long flags;
1901
1902         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1903
1904         if ((cfqq = cfqd->active_queue) != NULL) {
1905                 unsigned long now = jiffies;
1906
1907                 /*
1908                  * expired
1909                  */
1910                 if (time_after(now, cfqq->slice_end))
1911                         goto expire;
1912
1913                 /*
1914                  * only expire and reinvoke request handler, if there are
1915                  * other queues with pending requests
1916                  */
1917                 if (!cfqd->busy_queues)
1918                         goto out_cont;
1919
1920                 /*
1921                  * not expired and it has a request pending, let it dispatch
1922                  */
1923                 if (!RB_EMPTY_ROOT(&cfqq->sort_list)) {
1924                         cfq_mark_cfqq_must_dispatch(cfqq);
1925                         goto out_kick;
1926                 }
1927         }
1928 expire:
1929         cfq_slice_expired(cfqd, 0);
1930 out_kick:
1931         cfq_schedule_dispatch(cfqd);
1932 out_cont:
1933         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1934 }
1935
1936 /*
1937  * Timer running if an idle class queue is waiting for service
1938  */
1939 static void cfq_idle_class_timer(unsigned long data)
1940 {
1941         struct cfq_data *cfqd = (struct cfq_data *) data;
1942         unsigned long flags, end;
1943
1944         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1945
1946         /*
1947          * race with a non-idle queue, reset timer
1948          */
1949         end = cfqd->last_end_request + CFQ_IDLE_GRACE;
1950         if (!time_after_eq(jiffies, end))
1951                 mod_timer(&cfqd->idle_class_timer, end);
1952         else
1953                 cfq_schedule_dispatch(cfqd);
1954
1955         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1956 }
1957
1958 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
1959 {
1960         del_timer_sync(&cfqd->idle_slice_timer);
1961         del_timer_sync(&cfqd->idle_class_timer);
1962         blk_sync_queue(cfqd->queue);
1963 }
1964
1965 static void cfq_exit_queue(elevator_t *e)
1966 {
1967         struct cfq_data *cfqd = e->elevator_data;
1968         request_queue_t *q = cfqd->queue;
1969
1970         cfq_shutdown_timer_wq(cfqd);
1971
1972         spin_lock_irq(q->queue_lock);
1973
1974         if (cfqd->active_queue)
1975                 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
1976
1977         while (!list_empty(&cfqd->cic_list)) {
1978                 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
1979                                                         struct cfq_io_context,
1980                                                         queue_list);
1981
1982                 __cfq_exit_single_io_context(cfqd, cic);
1983         }
1984
1985         spin_unlock_irq(q->queue_lock);
1986
1987         cfq_shutdown_timer_wq(cfqd);
1988
1989         kfree(cfqd->cfq_hash);
1990         kfree(cfqd);
1991 }
1992
1993 static void *cfq_init_queue(request_queue_t *q)
1994 {
1995         struct cfq_data *cfqd;
1996         int i;
1997
1998         cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
1999         if (!cfqd)
2000                 return NULL;
2001
2002         memset(cfqd, 0, sizeof(*cfqd));
2003
2004         for (i = 0; i < CFQ_PRIO_LISTS; i++)
2005                 INIT_LIST_HEAD(&cfqd->rr_list[i]);
2006
2007         INIT_LIST_HEAD(&cfqd->busy_rr);
2008         INIT_LIST_HEAD(&cfqd->cur_rr);
2009         INIT_LIST_HEAD(&cfqd->idle_rr);
2010         INIT_LIST_HEAD(&cfqd->cic_list);
2011
2012         cfqd->cfq_hash = kmalloc_node(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL, q->node);
2013         if (!cfqd->cfq_hash)
2014                 goto out_free;
2015
2016         for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
2017                 INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);
2018
2019         cfqd->queue = q;
2020
2021         init_timer(&cfqd->idle_slice_timer);
2022         cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
2023         cfqd->idle_slice_timer.data = (unsigned long) cfqd;
2024
2025         init_timer(&cfqd->idle_class_timer);
2026         cfqd->idle_class_timer.function = cfq_idle_class_timer;
2027         cfqd->idle_class_timer.data = (unsigned long) cfqd;
2028
2029         INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
2030
2031         cfqd->cfq_quantum = cfq_quantum;
2032         cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
2033         cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
2034         cfqd->cfq_back_max = cfq_back_max;
2035         cfqd->cfq_back_penalty = cfq_back_penalty;
2036         cfqd->cfq_slice[0] = cfq_slice_async;
2037         cfqd->cfq_slice[1] = cfq_slice_sync;
2038         cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
2039         cfqd->cfq_slice_idle = cfq_slice_idle;
2040
2041         return cfqd;
2042 out_free:
2043         kfree(cfqd);
2044         return NULL;
2045 }
2046
2047 static void cfq_slab_kill(void)
2048 {
2049         if (cfq_pool)
2050                 kmem_cache_destroy(cfq_pool);
2051         if (cfq_ioc_pool)
2052                 kmem_cache_destroy(cfq_ioc_pool);
2053 }
2054
2055 static int __init cfq_slab_setup(void)
2056 {
2057         cfq_pool = kmem_cache_create("cfq_pool", sizeof(struct cfq_queue), 0, 0,
2058                                         NULL, NULL);
2059         if (!cfq_pool)
2060                 goto fail;
2061
2062         cfq_ioc_pool = kmem_cache_create("cfq_ioc_pool",
2063                         sizeof(struct cfq_io_context), 0, 0, NULL, NULL);
2064         if (!cfq_ioc_pool)
2065                 goto fail;
2066
2067         return 0;
2068 fail:
2069         cfq_slab_kill();
2070         return -ENOMEM;
2071 }
2072
2073 /*
2074  * sysfs parts below -->
2075  */
2076
2077 static ssize_t
2078 cfq_var_show(unsigned int var, char *page)
2079 {
2080         return sprintf(page, "%d\n", var);
2081 }
2082
2083 static ssize_t
2084 cfq_var_store(unsigned int *var, const char *page, size_t count)
2085 {
2086         char *p = (char *) page;
2087
2088         *var = simple_strtoul(p, &p, 10);
2089         return count;
2090 }
2091
2092 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
2093 static ssize_t __FUNC(elevator_t *e, char *page)                        \
2094 {                                                                       \
2095         struct cfq_data *cfqd = e->elevator_data;                       \
2096         unsigned int __data = __VAR;                                    \
2097         if (__CONV)                                                     \
2098                 __data = jiffies_to_msecs(__data);                      \
2099         return cfq_var_show(__data, (page));                            \
2100 }
2101 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
2102 SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
2103 SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
2104 SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
2105 SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
2106 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
2107 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
2108 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
2109 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
2110 #undef SHOW_FUNCTION
2111
2112 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
2113 static ssize_t __FUNC(elevator_t *e, const char *page, size_t count)    \
2114 {                                                                       \
2115         struct cfq_data *cfqd = e->elevator_data;                       \
2116         unsigned int __data;                                            \
2117         int ret = cfq_var_store(&__data, (page), count);                \
2118         if (__data < (MIN))                                             \
2119                 __data = (MIN);                                         \
2120         else if (__data > (MAX))                                        \
2121                 __data = (MAX);                                         \
2122         if (__CONV)                                                     \
2123                 *(__PTR) = msecs_to_jiffies(__data);                    \
2124         else                                                            \
2125                 *(__PTR) = __data;                                      \
2126         return ret;                                                     \
2127 }
2128 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
2129 STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1);
2130 STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1);
2131 STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
2132 STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0);
2133 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
2134 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
2135 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
2136 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0);
2137 #undef STORE_FUNCTION
2138
2139 #define CFQ_ATTR(name) \
2140         __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
2141
2142 static struct elv_fs_entry cfq_attrs[] = {
2143         CFQ_ATTR(quantum),
2144         CFQ_ATTR(fifo_expire_sync),
2145         CFQ_ATTR(fifo_expire_async),
2146         CFQ_ATTR(back_seek_max),
2147         CFQ_ATTR(back_seek_penalty),
2148         CFQ_ATTR(slice_sync),
2149         CFQ_ATTR(slice_async),
2150         CFQ_ATTR(slice_async_rq),
2151         CFQ_ATTR(slice_idle),
2152         __ATTR_NULL
2153 };
2154
2155 static struct elevator_type iosched_cfq = {
2156         .ops = {
2157                 .elevator_merge_fn =            cfq_merge,
2158                 .elevator_merged_fn =           cfq_merged_request,
2159                 .elevator_merge_req_fn =        cfq_merged_requests,
2160                 .elevator_allow_merge_fn =      cfq_allow_merge,
2161                 .elevator_dispatch_fn =         cfq_dispatch_requests,
2162                 .elevator_add_req_fn =          cfq_insert_request,
2163                 .elevator_activate_req_fn =     cfq_activate_request,
2164                 .elevator_deactivate_req_fn =   cfq_deactivate_request,
2165                 .elevator_queue_empty_fn =      cfq_queue_empty,
2166                 .elevator_completed_req_fn =    cfq_completed_request,
2167                 .elevator_former_req_fn =       elv_rb_former_request,
2168                 .elevator_latter_req_fn =       elv_rb_latter_request,
2169                 .elevator_set_req_fn =          cfq_set_request,
2170                 .elevator_put_req_fn =          cfq_put_request,
2171                 .elevator_may_queue_fn =        cfq_may_queue,
2172                 .elevator_init_fn =             cfq_init_queue,
2173                 .elevator_exit_fn =             cfq_exit_queue,
2174                 .trim =                         cfq_free_io_context,
2175         },
2176         .elevator_attrs =       cfq_attrs,
2177         .elevator_name =        "cfq",
2178         .elevator_owner =       THIS_MODULE,
2179 };
2180
2181 static int __init cfq_init(void)
2182 {
2183         int ret;
2184
2185         /*
2186          * could be 0 on HZ < 1000 setups
2187          */
2188         if (!cfq_slice_async)
2189                 cfq_slice_async = 1;
2190         if (!cfq_slice_idle)
2191                 cfq_slice_idle = 1;
2192
2193         if (cfq_slab_setup())
2194                 return -ENOMEM;
2195
2196         ret = elv_register(&iosched_cfq);
2197         if (ret)
2198                 cfq_slab_kill();
2199
2200         return ret;
2201 }
2202
2203 static void __exit cfq_exit(void)
2204 {
2205         DECLARE_COMPLETION_ONSTACK(all_gone);
2206         elv_unregister(&iosched_cfq);
2207         ioc_gone = &all_gone;
2208         /* ioc_gone's update must be visible before reading ioc_count */
2209         smp_wmb();
2210         if (elv_ioc_count_read(ioc_count))
2211                 wait_for_completion(ioc_gone);
2212         synchronize_rcu();
2213         cfq_slab_kill();
2214 }
2215
2216 module_init(cfq_init);
2217 module_exit(cfq_exit);
2218
2219 MODULE_AUTHOR("Jens Axboe");
2220 MODULE_LICENSE("GPL");
2221 MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");