2 * Common Block IO controller cgroup interface
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
13 #include <linux/ioprio.h>
14 #include <linux/seq_file.h>
15 #include <linux/kdev_t.h>
16 #include <linux/module.h>
17 #include <linux/err.h>
18 #include <linux/blkdev.h>
19 #include "blk-cgroup.h"
21 #define MAX_KEY_LEN 100
23 static DEFINE_SPINLOCK(blkio_list_lock);
24 static LIST_HEAD(blkio_list);
26 struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
27 EXPORT_SYMBOL_GPL(blkio_root_cgroup);
29 static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
31 static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
32 struct task_struct *, bool);
33 static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
34 struct cgroup *, struct task_struct *, bool);
35 static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
36 static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
38 struct cgroup_subsys blkio_subsys = {
40 .create = blkiocg_create,
41 .can_attach = blkiocg_can_attach,
42 .attach = blkiocg_attach,
43 .destroy = blkiocg_destroy,
44 .populate = blkiocg_populate,
45 #ifdef CONFIG_BLK_CGROUP
46 /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
47 .subsys_id = blkio_subsys_id,
50 .module = THIS_MODULE,
52 EXPORT_SYMBOL_GPL(blkio_subsys);
54 struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
56 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
57 struct blkio_cgroup, css);
59 EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
61 void blkio_group_init(struct blkio_group *blkg)
63 spin_lock_init(&blkg->stats_lock);
65 EXPORT_SYMBOL_GPL(blkio_group_init);
68 * Add to the appropriate stat variable depending on the request type.
69 * This should be called with the blkg->stats_lock held.
71 static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
75 stat[BLKIO_STAT_WRITE] += add;
77 stat[BLKIO_STAT_READ] += add;
79 stat[BLKIO_STAT_SYNC] += add;
81 stat[BLKIO_STAT_ASYNC] += add;
85 * Decrements the appropriate stat variable if non-zero depending on the
86 * request type. Panics on value being zero.
87 * This should be called with the blkg->stats_lock held.
89 static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
92 BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
93 stat[BLKIO_STAT_WRITE]--;
95 BUG_ON(stat[BLKIO_STAT_READ] == 0);
96 stat[BLKIO_STAT_READ]--;
99 BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
100 stat[BLKIO_STAT_SYNC]--;
102 BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
103 stat[BLKIO_STAT_ASYNC]--;
107 #ifdef CONFIG_DEBUG_BLK_CGROUP
108 /* This should be called with the blkg->stats_lock held. */
109 static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
110 struct blkio_group *curr_blkg)
112 if (blkio_blkg_waiting(&blkg->stats))
114 if (blkg == curr_blkg)
116 blkg->stats.start_group_wait_time = sched_clock();
117 blkio_mark_blkg_waiting(&blkg->stats);
120 /* This should be called with the blkg->stats_lock held. */
121 static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
123 unsigned long long now;
125 if (!blkio_blkg_waiting(stats))
129 if (time_after64(now, stats->start_group_wait_time))
130 stats->group_wait_time += now - stats->start_group_wait_time;
131 blkio_clear_blkg_waiting(stats);
134 /* This should be called with the blkg->stats_lock held. */
135 static void blkio_end_empty_time(struct blkio_group_stats *stats)
137 unsigned long long now;
139 if (!blkio_blkg_empty(stats))
143 if (time_after64(now, stats->start_empty_time))
144 stats->empty_time += now - stats->start_empty_time;
145 blkio_clear_blkg_empty(stats);
148 void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
152 spin_lock_irqsave(&blkg->stats_lock, flags);
153 BUG_ON(blkio_blkg_idling(&blkg->stats));
154 blkg->stats.start_idle_time = sched_clock();
155 blkio_mark_blkg_idling(&blkg->stats);
156 spin_unlock_irqrestore(&blkg->stats_lock, flags);
158 EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
160 void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
163 unsigned long long now;
164 struct blkio_group_stats *stats;
166 spin_lock_irqsave(&blkg->stats_lock, flags);
167 stats = &blkg->stats;
168 if (blkio_blkg_idling(stats)) {
170 if (time_after64(now, stats->start_idle_time))
171 stats->idle_time += now - stats->start_idle_time;
172 blkio_clear_blkg_idling(stats);
174 spin_unlock_irqrestore(&blkg->stats_lock, flags);
176 EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
178 void blkiocg_update_set_active_queue_stats(struct blkio_group *blkg)
181 struct blkio_group_stats *stats;
183 spin_lock_irqsave(&blkg->stats_lock, flags);
184 stats = &blkg->stats;
185 stats->avg_queue_size_sum +=
186 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
187 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
188 stats->avg_queue_size_samples++;
189 blkio_update_group_wait_time(stats);
190 spin_unlock_irqrestore(&blkg->stats_lock, flags);
192 EXPORT_SYMBOL_GPL(blkiocg_update_set_active_queue_stats);
194 static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
195 struct blkio_group *curr_blkg) {}
196 static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
199 void blkiocg_update_request_add_stats(struct blkio_group *blkg,
200 struct blkio_group *curr_blkg, bool direction,
205 spin_lock_irqsave(&blkg->stats_lock, flags);
206 blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
208 blkio_end_empty_time(&blkg->stats);
209 blkio_set_start_group_wait_time(blkg, curr_blkg);
210 spin_unlock_irqrestore(&blkg->stats_lock, flags);
212 EXPORT_SYMBOL_GPL(blkiocg_update_request_add_stats);
214 void blkiocg_update_request_remove_stats(struct blkio_group *blkg,
215 bool direction, bool sync)
219 spin_lock_irqsave(&blkg->stats_lock, flags);
220 blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
222 spin_unlock_irqrestore(&blkg->stats_lock, flags);
224 EXPORT_SYMBOL_GPL(blkiocg_update_request_remove_stats);
226 void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time)
230 spin_lock_irqsave(&blkg->stats_lock, flags);
231 blkg->stats.time += time;
232 spin_unlock_irqrestore(&blkg->stats_lock, flags);
234 EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
236 void blkiocg_set_start_empty_time(struct blkio_group *blkg, bool ignore)
239 struct blkio_group_stats *stats;
241 spin_lock_irqsave(&blkg->stats_lock, flags);
242 stats = &blkg->stats;
244 if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
245 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
246 spin_unlock_irqrestore(&blkg->stats_lock, flags);
251 * If ignore is set, we do not panic on the empty flag being set
252 * already. This is to avoid cases where there are superfluous timeslice
253 * complete events (for eg., forced_dispatch in CFQ) when no IOs are
254 * served which could result in triggering the empty check incorrectly.
256 BUG_ON(!ignore && blkio_blkg_empty(stats));
257 stats->start_empty_time = sched_clock();
258 blkio_mark_blkg_empty(stats);
259 spin_unlock_irqrestore(&blkg->stats_lock, flags);
261 EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
263 void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
264 uint64_t bytes, bool direction, bool sync)
266 struct blkio_group_stats *stats;
269 spin_lock_irqsave(&blkg->stats_lock, flags);
270 stats = &blkg->stats;
271 stats->sectors += bytes >> 9;
272 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICED], 1, direction,
274 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_BYTES], bytes,
276 spin_unlock_irqrestore(&blkg->stats_lock, flags);
278 EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
280 void blkiocg_update_completion_stats(struct blkio_group *blkg,
281 uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
283 struct blkio_group_stats *stats;
285 unsigned long long now = sched_clock();
287 spin_lock_irqsave(&blkg->stats_lock, flags);
288 stats = &blkg->stats;
289 if (time_after64(now, io_start_time))
290 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
291 now - io_start_time, direction, sync);
292 if (time_after64(io_start_time, start_time))
293 blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
294 io_start_time - start_time, direction, sync);
295 spin_unlock_irqrestore(&blkg->stats_lock, flags);
297 EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
299 void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
304 spin_lock_irqsave(&blkg->stats_lock, flags);
305 blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_MERGED], 1, direction,
307 spin_unlock_irqrestore(&blkg->stats_lock, flags);
309 EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
311 void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
312 struct blkio_group *blkg, void *key, dev_t dev)
316 spin_lock_irqsave(&blkcg->lock, flags);
317 rcu_assign_pointer(blkg->key, key);
318 blkg->blkcg_id = css_id(&blkcg->css);
319 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
320 spin_unlock_irqrestore(&blkcg->lock, flags);
321 #ifdef CONFIG_DEBUG_BLK_CGROUP
322 /* Need to take css reference ? */
323 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
327 EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
329 static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
331 hlist_del_init_rcu(&blkg->blkcg_node);
336 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
337 * indicating that blk_group was unhashed by the time we got to it.
339 int blkiocg_del_blkio_group(struct blkio_group *blkg)
341 struct blkio_cgroup *blkcg;
343 struct cgroup_subsys_state *css;
347 css = css_lookup(&blkio_subsys, blkg->blkcg_id);
351 blkcg = container_of(css, struct blkio_cgroup, css);
352 spin_lock_irqsave(&blkcg->lock, flags);
353 if (!hlist_unhashed(&blkg->blkcg_node)) {
354 __blkiocg_del_blkio_group(blkg);
357 spin_unlock_irqrestore(&blkcg->lock, flags);
362 EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
364 /* called under rcu_read_lock(). */
365 struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
367 struct blkio_group *blkg;
368 struct hlist_node *n;
371 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
379 EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
381 #define SHOW_FUNCTION(__VAR) \
382 static u64 blkiocg_##__VAR##_read(struct cgroup *cgroup, \
383 struct cftype *cftype) \
385 struct blkio_cgroup *blkcg; \
387 blkcg = cgroup_to_blkio_cgroup(cgroup); \
388 return (u64)blkcg->__VAR; \
391 SHOW_FUNCTION(weight);
395 blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val)
397 struct blkio_cgroup *blkcg;
398 struct blkio_group *blkg;
399 struct hlist_node *n;
400 struct blkio_policy_type *blkiop;
402 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
405 blkcg = cgroup_to_blkio_cgroup(cgroup);
406 spin_lock(&blkio_list_lock);
407 spin_lock_irq(&blkcg->lock);
408 blkcg->weight = (unsigned int)val;
409 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
410 list_for_each_entry(blkiop, &blkio_list, list)
411 blkiop->ops.blkio_update_group_weight_fn(blkg,
414 spin_unlock_irq(&blkcg->lock);
415 spin_unlock(&blkio_list_lock);
420 blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
422 struct blkio_cgroup *blkcg;
423 struct blkio_group *blkg;
424 struct blkio_group_stats *stats;
425 struct hlist_node *n;
426 uint64_t queued[BLKIO_STAT_TOTAL];
428 #ifdef CONFIG_DEBUG_BLK_CGROUP
429 bool idling, waiting, empty;
430 unsigned long long now = sched_clock();
433 blkcg = cgroup_to_blkio_cgroup(cgroup);
434 spin_lock_irq(&blkcg->lock);
435 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
436 spin_lock(&blkg->stats_lock);
437 stats = &blkg->stats;
438 #ifdef CONFIG_DEBUG_BLK_CGROUP
439 idling = blkio_blkg_idling(stats);
440 waiting = blkio_blkg_waiting(stats);
441 empty = blkio_blkg_empty(stats);
443 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
444 queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
445 memset(stats, 0, sizeof(struct blkio_group_stats));
446 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
447 stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
448 #ifdef CONFIG_DEBUG_BLK_CGROUP
450 blkio_mark_blkg_idling(stats);
451 stats->start_idle_time = now;
454 blkio_mark_blkg_waiting(stats);
455 stats->start_group_wait_time = now;
458 blkio_mark_blkg_empty(stats);
459 stats->start_empty_time = now;
462 spin_unlock(&blkg->stats_lock);
464 spin_unlock_irq(&blkcg->lock);
468 static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
469 int chars_left, bool diskname_only)
471 snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
472 chars_left -= strlen(str);
473 if (chars_left <= 0) {
475 "Possibly incorrect cgroup stat display format");
481 case BLKIO_STAT_READ:
482 strlcat(str, " Read", chars_left);
484 case BLKIO_STAT_WRITE:
485 strlcat(str, " Write", chars_left);
487 case BLKIO_STAT_SYNC:
488 strlcat(str, " Sync", chars_left);
490 case BLKIO_STAT_ASYNC:
491 strlcat(str, " Async", chars_left);
493 case BLKIO_STAT_TOTAL:
494 strlcat(str, " Total", chars_left);
497 strlcat(str, " Invalid", chars_left);
501 static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
502 struct cgroup_map_cb *cb, dev_t dev)
504 blkio_get_key_name(0, dev, str, chars_left, true);
505 cb->fill(cb, str, val);
509 /* This should be called with blkg->stats_lock held */
510 static uint64_t blkio_get_stat(struct blkio_group *blkg,
511 struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
514 char key_str[MAX_KEY_LEN];
515 enum stat_sub_type sub_type;
517 if (type == BLKIO_STAT_TIME)
518 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
519 blkg->stats.time, cb, dev);
520 if (type == BLKIO_STAT_SECTORS)
521 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
522 blkg->stats.sectors, cb, dev);
523 #ifdef CONFIG_DEBUG_BLK_CGROUP
524 if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
525 uint64_t sum = blkg->stats.avg_queue_size_sum;
526 uint64_t samples = blkg->stats.avg_queue_size_samples;
528 do_div(sum, samples);
531 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
533 if (type == BLKIO_STAT_GROUP_WAIT_TIME)
534 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
535 blkg->stats.group_wait_time, cb, dev);
536 if (type == BLKIO_STAT_IDLE_TIME)
537 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
538 blkg->stats.idle_time, cb, dev);
539 if (type == BLKIO_STAT_EMPTY_TIME)
540 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
541 blkg->stats.empty_time, cb, dev);
542 if (type == BLKIO_STAT_DEQUEUE)
543 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
544 blkg->stats.dequeue, cb, dev);
547 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
549 blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
550 cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
552 disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
553 blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
554 blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
555 cb->fill(cb, key_str, disk_total);
559 #define SHOW_FUNCTION_PER_GROUP(__VAR, type, show_total) \
560 static int blkiocg_##__VAR##_read(struct cgroup *cgroup, \
561 struct cftype *cftype, struct cgroup_map_cb *cb) \
563 struct blkio_cgroup *blkcg; \
564 struct blkio_group *blkg; \
565 struct hlist_node *n; \
566 uint64_t cgroup_total = 0; \
568 if (!cgroup_lock_live_group(cgroup)) \
571 blkcg = cgroup_to_blkio_cgroup(cgroup); \
573 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\
575 spin_lock_irq(&blkg->stats_lock); \
576 cgroup_total += blkio_get_stat(blkg, cb, \
578 spin_unlock_irq(&blkg->stats_lock); \
582 cb->fill(cb, "Total", cgroup_total); \
588 SHOW_FUNCTION_PER_GROUP(time, BLKIO_STAT_TIME, 0);
589 SHOW_FUNCTION_PER_GROUP(sectors, BLKIO_STAT_SECTORS, 0);
590 SHOW_FUNCTION_PER_GROUP(io_service_bytes, BLKIO_STAT_SERVICE_BYTES, 1);
591 SHOW_FUNCTION_PER_GROUP(io_serviced, BLKIO_STAT_SERVICED, 1);
592 SHOW_FUNCTION_PER_GROUP(io_service_time, BLKIO_STAT_SERVICE_TIME, 1);
593 SHOW_FUNCTION_PER_GROUP(io_wait_time, BLKIO_STAT_WAIT_TIME, 1);
594 SHOW_FUNCTION_PER_GROUP(io_merged, BLKIO_STAT_MERGED, 1);
595 SHOW_FUNCTION_PER_GROUP(io_queued, BLKIO_STAT_QUEUED, 1);
596 #ifdef CONFIG_DEBUG_BLK_CGROUP
597 SHOW_FUNCTION_PER_GROUP(dequeue, BLKIO_STAT_DEQUEUE, 0);
598 SHOW_FUNCTION_PER_GROUP(avg_queue_size, BLKIO_STAT_AVG_QUEUE_SIZE, 0);
599 SHOW_FUNCTION_PER_GROUP(group_wait_time, BLKIO_STAT_GROUP_WAIT_TIME, 0);
600 SHOW_FUNCTION_PER_GROUP(idle_time, BLKIO_STAT_IDLE_TIME, 0);
601 SHOW_FUNCTION_PER_GROUP(empty_time, BLKIO_STAT_EMPTY_TIME, 0);
603 #undef SHOW_FUNCTION_PER_GROUP
605 #ifdef CONFIG_DEBUG_BLK_CGROUP
606 void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
607 unsigned long dequeue)
609 blkg->stats.dequeue += dequeue;
611 EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
614 struct cftype blkio_files[] = {
617 .read_u64 = blkiocg_weight_read,
618 .write_u64 = blkiocg_weight_write,
622 .read_map = blkiocg_time_read,
626 .read_map = blkiocg_sectors_read,
629 .name = "io_service_bytes",
630 .read_map = blkiocg_io_service_bytes_read,
633 .name = "io_serviced",
634 .read_map = blkiocg_io_serviced_read,
637 .name = "io_service_time",
638 .read_map = blkiocg_io_service_time_read,
641 .name = "io_wait_time",
642 .read_map = blkiocg_io_wait_time_read,
646 .read_map = blkiocg_io_merged_read,
650 .read_map = blkiocg_io_queued_read,
653 .name = "reset_stats",
654 .write_u64 = blkiocg_reset_stats,
656 #ifdef CONFIG_DEBUG_BLK_CGROUP
658 .name = "avg_queue_size",
659 .read_map = blkiocg_avg_queue_size_read,
662 .name = "group_wait_time",
663 .read_map = blkiocg_group_wait_time_read,
667 .read_map = blkiocg_idle_time_read,
670 .name = "empty_time",
671 .read_map = blkiocg_empty_time_read,
675 .read_map = blkiocg_dequeue_read,
680 static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
682 return cgroup_add_files(cgroup, subsys, blkio_files,
683 ARRAY_SIZE(blkio_files));
686 static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
688 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
690 struct blkio_group *blkg;
692 struct blkio_policy_type *blkiop;
696 spin_lock_irqsave(&blkcg->lock, flags);
698 if (hlist_empty(&blkcg->blkg_list)) {
699 spin_unlock_irqrestore(&blkcg->lock, flags);
703 blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
705 key = rcu_dereference(blkg->key);
706 __blkiocg_del_blkio_group(blkg);
708 spin_unlock_irqrestore(&blkcg->lock, flags);
711 * This blkio_group is being unlinked as associated cgroup is going
712 * away. Let all the IO controlling policies know about this event.
714 * Currently this is static call to one io controlling policy. Once
715 * we have more policies in place, we need some dynamic registration
716 * of callback function.
718 spin_lock(&blkio_list_lock);
719 list_for_each_entry(blkiop, &blkio_list, list)
720 blkiop->ops.blkio_unlink_group_fn(key, blkg);
721 spin_unlock(&blkio_list_lock);
724 free_css_id(&blkio_subsys, &blkcg->css);
726 if (blkcg != &blkio_root_cgroup)
730 static struct cgroup_subsys_state *
731 blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
733 struct blkio_cgroup *blkcg, *parent_blkcg;
735 if (!cgroup->parent) {
736 blkcg = &blkio_root_cgroup;
740 /* Currently we do not support hierarchy deeper than two level (0,1) */
741 parent_blkcg = cgroup_to_blkio_cgroup(cgroup->parent);
742 if (css_depth(&parent_blkcg->css) > 0)
743 return ERR_PTR(-EINVAL);
745 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
747 return ERR_PTR(-ENOMEM);
749 blkcg->weight = BLKIO_WEIGHT_DEFAULT;
751 spin_lock_init(&blkcg->lock);
752 INIT_HLIST_HEAD(&blkcg->blkg_list);
758 * We cannot support shared io contexts, as we have no mean to support
759 * two tasks with the same ioc in two different groups without major rework
760 * of the main cic data structures. For now we allow a task to change
761 * its cgroup only if it's the only owner of its ioc.
763 static int blkiocg_can_attach(struct cgroup_subsys *subsys,
764 struct cgroup *cgroup, struct task_struct *tsk,
767 struct io_context *ioc;
770 /* task_lock() is needed to avoid races with exit_io_context() */
772 ioc = tsk->io_context;
773 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
780 static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
781 struct cgroup *prev, struct task_struct *tsk,
784 struct io_context *ioc;
787 ioc = tsk->io_context;
789 ioc->cgroup_changed = 1;
793 void blkio_policy_register(struct blkio_policy_type *blkiop)
795 spin_lock(&blkio_list_lock);
796 list_add_tail(&blkiop->list, &blkio_list);
797 spin_unlock(&blkio_list_lock);
799 EXPORT_SYMBOL_GPL(blkio_policy_register);
801 void blkio_policy_unregister(struct blkio_policy_type *blkiop)
803 spin_lock(&blkio_list_lock);
804 list_del_init(&blkiop->list);
805 spin_unlock(&blkio_list_lock);
807 EXPORT_SYMBOL_GPL(blkio_policy_unregister);
809 static int __init init_cgroup_blkio(void)
811 return cgroup_load_subsys(&blkio_subsys);
814 static void __exit exit_cgroup_blkio(void)
816 cgroup_unload_subsys(&blkio_subsys);
819 module_init(init_cgroup_blkio);
820 module_exit(exit_cgroup_blkio);
821 MODULE_LICENSE("GPL");