2 * Common Block IO controller cgroup interface
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
13 #include <linux/ioprio.h>
14 #include <linux/seq_file.h>
15 #include <linux/kdev_t.h>
16 #include <linux/module.h>
17 #include <linux/err.h>
18 #include <linux/blkdev.h>
19 #include <linux/slab.h>
20 #include "blk-cgroup.h"
21 #include <linux/genhd.h>
23 #define MAX_KEY_LEN 100
25 static DEFINE_SPINLOCK(blkio_list_lock);
26 static LIST_HEAD(blkio_list);
28 struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
29 EXPORT_SYMBOL_GPL(blkio_root_cgroup);
31 static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
33 static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
34 struct task_struct *, bool);
35 static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
36 struct cgroup *, struct task_struct *, bool);
37 static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
38 static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
40 /* for encoding cft->private value on file */
41 #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
42 /* What policy owns the file, proportional or throttle */
43 #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
44 #define BLKIOFILE_ATTR(val) ((val) & 0xffff)
46 struct cgroup_subsys blkio_subsys = {
48 .create = blkiocg_create,
49 .can_attach = blkiocg_can_attach,
50 .attach = blkiocg_attach,
51 .destroy = blkiocg_destroy,
52 .populate = blkiocg_populate,
53 #ifdef CONFIG_BLK_CGROUP
54 /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
55 .subsys_id = blkio_subsys_id,
58 .module = THIS_MODULE,
60 EXPORT_SYMBOL_GPL(blkio_subsys);
62 static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
63 struct blkio_policy_node *pn)
65 list_add(&pn->node, &blkcg->policy_list);
68 static inline bool cftype_blkg_same_policy(struct cftype *cft,
69 struct blkio_group *blkg)
71 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
73 if (blkg->plid == plid)
79 /* Determines if policy node matches cgroup file being accessed */
80 static inline bool pn_matches_cftype(struct cftype *cft,
81 struct blkio_policy_node *pn)
83 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
84 int fileid = BLKIOFILE_ATTR(cft->private);
86 return (plid == pn->plid && fileid == pn->fileid);
89 /* Must be called with blkcg->lock held */
90 static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
95 /* Must be called with blkcg->lock held */
96 static struct blkio_policy_node *
97 blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev,
98 enum blkio_policy_id plid, int fileid)
100 struct blkio_policy_node *pn;
102 list_for_each_entry(pn, &blkcg->policy_list, node) {
103 if (pn->dev == dev && pn->plid == plid && pn->fileid == fileid)
110 struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
112 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
113 struct blkio_cgroup, css);
115 EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
117 struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
119 return container_of(task_subsys_state(tsk, blkio_subsys_id),
120 struct blkio_cgroup, css);
122 EXPORT_SYMBOL_GPL(task_blkio_cgroup);
125 blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
127 struct blkio_policy_type *blkiop;
129 list_for_each_entry(blkiop, &blkio_list, list) {
130 /* If this policy does not own the blkg, do not send updates */
131 if (blkiop->plid != blkg->plid)
133 if (blkiop->ops.blkio_update_group_weight_fn)
134 blkiop->ops.blkio_update_group_weight_fn(blkg->key,
139 static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps,
142 struct blkio_policy_type *blkiop;
144 list_for_each_entry(blkiop, &blkio_list, list) {
146 /* If this policy does not own the blkg, do not send updates */
147 if (blkiop->plid != blkg->plid)
150 if (fileid == BLKIO_THROTL_read_bps_device
151 && blkiop->ops.blkio_update_group_read_bps_fn)
152 blkiop->ops.blkio_update_group_read_bps_fn(blkg->key,
155 if (fileid == BLKIO_THROTL_write_bps_device
156 && blkiop->ops.blkio_update_group_write_bps_fn)
157 blkiop->ops.blkio_update_group_write_bps_fn(blkg->key,
162 static inline void blkio_update_group_iops(struct blkio_group *blkg,
163 unsigned int iops, int fileid)
165 struct blkio_policy_type *blkiop;
167 list_for_each_entry(blkiop, &blkio_list, list) {
169 /* If this policy does not own the blkg, do not send updates */
170 if (blkiop->plid != blkg->plid)
173 if (fileid == BLKIO_THROTL_read_iops_device
174 && blkiop->ops.blkio_update_group_read_iops_fn)
175 blkiop->ops.blkio_update_group_read_iops_fn(blkg->key,
178 if (fileid == BLKIO_THROTL_write_iops_device
179 && blkiop->ops.blkio_update_group_write_iops_fn)
180 blkiop->ops.blkio_update_group_write_iops_fn(blkg->key,
186 * Add to the appropriate stat variable depending on the request type.
187 * This should be called with the blkg->stats_lock held.
189 static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
193 stat[BLKIO_STAT_WRITE] += add;
195 stat[BLKIO_STAT_READ] += add;
197 stat[BLKIO_STAT_SYNC] += add;
199 stat[BLKIO_STAT_ASYNC] += add;
203 * Decrements the appropriate stat variable if non-zero depending on the
204 * request type. Panics on value being zero.
205 * This should be called with the blkg->stats_lock held.
207 static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
210 BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
211 stat[BLKIO_STAT_WRITE]--;
213 BUG_ON(stat[BLKIO_STAT_READ] == 0);
214 stat[BLKIO_STAT_READ]--;
217 BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
218 stat[BLKIO_STAT_SYNC]--;
220 BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
221 stat[BLKIO_STAT_ASYNC]--;
225 #ifdef CONFIG_DEBUG_BLK_CGROUP
226 /* This should be called with the blkg->stats_lock held. */
227 static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
228 struct blkio_group *curr_blkg)
230 if (blkio_blkg_waiting(&blkg->stats))
232 if (blkg == curr_blkg)
234 blkg->stats.start_group_wait_time = sched_clock();
235 blkio_mark_blkg_waiting(&blkg->stats);
238 /* This should be called with the blkg->stats_lock held. */
239 static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
241 unsigned long long now;
243 if (!blkio_blkg_waiting(stats))
247 if (time_after64(now, stats->start_group_wait_time))
248 stats->group_wait_time += now - stats->start_group_wait_time;
249 blkio_clear_blkg_waiting(stats);
252 /* This should be called with the blkg->stats_lock held. */
253 static void blkio_end_empty_time(struct blkio_group_stats *stats)
255 unsigned long long now;
257 if (!blkio_blkg_empty(stats))
261 if (time_after64(now, stats->start_empty_time))
262 stats->empty_time += now - stats->start_empty_time;
263 blkio_clear_blkg_empty(stats);
266 void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
270 spin_lock_irqsave(&blkg->stats_lock, flags);
271 BUG_ON(blkio_blkg_idling(&blkg->stats));
272 blkg->stats.start_idle_time = sched_clock();
273 blkio_mark_blkg_idling(&blkg->stats);
274 spin_unlock_irqrestore(&blkg->stats_lock, flags);
276 EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
278 void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
281 unsigned long long now;
282 struct blkio_group_stats *stats;
284 spin_lock_irqsave(&blkg->stats_lock, flags);
285 stats = &blkg->stats;
286 if (blkio_blkg_idling(stats)) {
288 if (time_after64(now, stats->start_idle_time))
289 stats->idle_time += now - stats->start_idle_time;
290 blkio_clear_blkg_idling(stats);
292 spin_unlock_irqrestore(&blkg->stats_lock, flags);
294 EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
296 void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
299 struct blkio_group_stats *stats;
301 spin_lock_irqsave(&blkg->stats_lock, flags);
302 stats = &blkg->stats;
303 stats->avg_queue_size_sum +=
304 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
305 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
306 stats->avg_queue_size_samples++;
307 blkio_update_group_wait_time(stats);
308 spin_unlock_irqrestore(&blkg->stats_lock, flags);
310 EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
312 void blkiocg_set_start_empty_time(struct blkio_group *blkg)
315 struct blkio_group_stats *stats;
317 spin_lock_irqsave(&blkg->stats_lock, flags);
318 stats = &blkg->stats;
320 if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
321 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
322 spin_unlock_irqrestore(&blkg->stats_lock, flags);
327 * group is already marked empty. This can happen if cfqq got new
328 * request in parent group and moved to this group while being added
329 * to service tree. Just ignore the event and move on.
331 if(blkio_blkg_empty(stats)) {
332 spin_unlock_irqrestore(&blkg->stats_lock, flags);
336 stats->start_empty_time = sched_clock();
337 blkio_mark_blkg_empty(stats);
338 spin_unlock_irqrestore(&blkg->stats_lock, flags);
340 EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
342 void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
343 unsigned long dequeue)
345 blkg->stats.dequeue += dequeue;
347 EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
349 static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
350 struct blkio_group *curr_blkg) {}
351 static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
354 void blkiocg_update_io_add_stats(struct blkio_group *blkg,
355 struct blkio_group *curr_blkg, bool direction,
360 spin_lock_irqsave(&blkg->stats_lock, flags);
361 blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
363 blkio_end_empty_time(&blkg->stats);
364 blkio_set_start_group_wait_time(blkg, curr_blkg);
365 spin_unlock_irqrestore(&blkg->stats_lock, flags);
367 EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
369 void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
370 bool direction, bool sync)
374 spin_lock_irqsave(&blkg->stats_lock, flags);
375 blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
377 spin_unlock_irqrestore(&blkg->stats_lock, flags);
379 EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
381 void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time,
382 unsigned long unaccounted_time)
386 spin_lock_irqsave(&blkg->stats_lock, flags);
387 blkg->stats.time += time;
388 #ifdef CONFIG_DEBUG_BLK_CGROUP
389 blkg->stats.unaccounted_time += unaccounted_time;
391 spin_unlock_irqrestore(&blkg->stats_lock, flags);
393 EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
396 * should be called under rcu read lock or queue lock to make sure blkg pointer
399 void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
400 uint64_t bytes, bool direction, bool sync)
402 struct blkio_group_stats_cpu *stats_cpu;
406 * Disabling interrupts to provide mutual exclusion between two
407 * writes on same cpu. It probably is not needed for 64bit. Not
408 * optimizing that case yet.
410 local_irq_save(flags);
412 stats_cpu = this_cpu_ptr(blkg->stats_cpu);
414 u64_stats_update_begin(&stats_cpu->syncp);
415 stats_cpu->sectors += bytes >> 9;
416 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
418 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
419 bytes, direction, sync);
420 u64_stats_update_end(&stats_cpu->syncp);
421 local_irq_restore(flags);
423 EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
425 void blkiocg_update_completion_stats(struct blkio_group *blkg,
426 uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
428 struct blkio_group_stats *stats;
430 unsigned long long now = sched_clock();
432 spin_lock_irqsave(&blkg->stats_lock, flags);
433 stats = &blkg->stats;
434 if (time_after64(now, io_start_time))
435 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
436 now - io_start_time, direction, sync);
437 if (time_after64(io_start_time, start_time))
438 blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
439 io_start_time - start_time, direction, sync);
440 spin_unlock_irqrestore(&blkg->stats_lock, flags);
442 EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
444 void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
449 spin_lock_irqsave(&blkg->stats_lock, flags);
450 blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_MERGED], 1, direction,
452 spin_unlock_irqrestore(&blkg->stats_lock, flags);
454 EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
457 * This function allocates the per cpu stats for blkio_group. Should be called
458 * from sleepable context as alloc_per_cpu() requires that.
460 int blkio_alloc_blkg_stats(struct blkio_group *blkg)
462 /* Allocate memory for per cpu stats */
463 blkg->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
464 if (!blkg->stats_cpu)
468 EXPORT_SYMBOL_GPL(blkio_alloc_blkg_stats);
470 void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
471 struct blkio_group *blkg, void *key, dev_t dev,
472 enum blkio_policy_id plid)
476 spin_lock_irqsave(&blkcg->lock, flags);
477 spin_lock_init(&blkg->stats_lock);
478 rcu_assign_pointer(blkg->key, key);
479 blkg->blkcg_id = css_id(&blkcg->css);
480 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
482 spin_unlock_irqrestore(&blkcg->lock, flags);
483 /* Need to take css reference ? */
484 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
487 EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
489 static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
491 hlist_del_init_rcu(&blkg->blkcg_node);
496 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
497 * indicating that blk_group was unhashed by the time we got to it.
499 int blkiocg_del_blkio_group(struct blkio_group *blkg)
501 struct blkio_cgroup *blkcg;
503 struct cgroup_subsys_state *css;
507 css = css_lookup(&blkio_subsys, blkg->blkcg_id);
509 blkcg = container_of(css, struct blkio_cgroup, css);
510 spin_lock_irqsave(&blkcg->lock, flags);
511 if (!hlist_unhashed(&blkg->blkcg_node)) {
512 __blkiocg_del_blkio_group(blkg);
515 spin_unlock_irqrestore(&blkcg->lock, flags);
521 EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
523 /* called under rcu_read_lock(). */
524 struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
526 struct blkio_group *blkg;
527 struct hlist_node *n;
530 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
538 EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
540 static void blkio_reset_stats_cpu(struct blkio_group *blkg)
542 struct blkio_group_stats_cpu *stats_cpu;
545 * Note: On 64 bit arch this should not be an issue. This has the
546 * possibility of returning some inconsistent value on 32bit arch
547 * as 64bit update on 32bit is non atomic. Taking care of this
548 * corner case makes code very complicated, like sending IPIs to
549 * cpus, taking care of stats of offline cpus etc.
551 * reset stats is anyway more of a debug feature and this sounds a
552 * corner case. So I am not complicating the code yet until and
553 * unless this becomes a real issue.
555 for_each_possible_cpu(i) {
556 stats_cpu = per_cpu_ptr(blkg->stats_cpu, i);
557 stats_cpu->sectors = 0;
558 for(j = 0; j < BLKIO_STAT_CPU_NR; j++)
559 for (k = 0; k < BLKIO_STAT_TOTAL; k++)
560 stats_cpu->stat_arr_cpu[j][k] = 0;
565 blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
567 struct blkio_cgroup *blkcg;
568 struct blkio_group *blkg;
569 struct blkio_group_stats *stats;
570 struct hlist_node *n;
571 uint64_t queued[BLKIO_STAT_TOTAL];
573 #ifdef CONFIG_DEBUG_BLK_CGROUP
574 bool idling, waiting, empty;
575 unsigned long long now = sched_clock();
578 blkcg = cgroup_to_blkio_cgroup(cgroup);
579 spin_lock_irq(&blkcg->lock);
580 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
581 spin_lock(&blkg->stats_lock);
582 stats = &blkg->stats;
583 #ifdef CONFIG_DEBUG_BLK_CGROUP
584 idling = blkio_blkg_idling(stats);
585 waiting = blkio_blkg_waiting(stats);
586 empty = blkio_blkg_empty(stats);
588 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
589 queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
590 memset(stats, 0, sizeof(struct blkio_group_stats));
591 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
592 stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
593 #ifdef CONFIG_DEBUG_BLK_CGROUP
595 blkio_mark_blkg_idling(stats);
596 stats->start_idle_time = now;
599 blkio_mark_blkg_waiting(stats);
600 stats->start_group_wait_time = now;
603 blkio_mark_blkg_empty(stats);
604 stats->start_empty_time = now;
607 spin_unlock(&blkg->stats_lock);
609 /* Reset Per cpu stats which don't take blkg->stats_lock */
610 blkio_reset_stats_cpu(blkg);
613 spin_unlock_irq(&blkcg->lock);
617 static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
618 int chars_left, bool diskname_only)
620 snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
621 chars_left -= strlen(str);
622 if (chars_left <= 0) {
624 "Possibly incorrect cgroup stat display format");
630 case BLKIO_STAT_READ:
631 strlcat(str, " Read", chars_left);
633 case BLKIO_STAT_WRITE:
634 strlcat(str, " Write", chars_left);
636 case BLKIO_STAT_SYNC:
637 strlcat(str, " Sync", chars_left);
639 case BLKIO_STAT_ASYNC:
640 strlcat(str, " Async", chars_left);
642 case BLKIO_STAT_TOTAL:
643 strlcat(str, " Total", chars_left);
646 strlcat(str, " Invalid", chars_left);
650 static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
651 struct cgroup_map_cb *cb, dev_t dev)
653 blkio_get_key_name(0, dev, str, chars_left, true);
654 cb->fill(cb, str, val);
659 static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg,
660 enum stat_type_cpu type, enum stat_sub_type sub_type)
663 struct blkio_group_stats_cpu *stats_cpu;
666 for_each_possible_cpu(cpu) {
668 stats_cpu = per_cpu_ptr(blkg->stats_cpu, cpu);
671 start = u64_stats_fetch_begin(&stats_cpu->syncp);
672 if (type == BLKIO_STAT_CPU_SECTORS)
673 tval = stats_cpu->sectors;
675 tval = stats_cpu->stat_arr_cpu[type][sub_type];
676 } while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
684 static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg,
685 struct cgroup_map_cb *cb, dev_t dev, enum stat_type_cpu type)
687 uint64_t disk_total, val;
688 char key_str[MAX_KEY_LEN];
689 enum stat_sub_type sub_type;
691 if (type == BLKIO_STAT_CPU_SECTORS) {
692 val = blkio_read_stat_cpu(blkg, type, 0);
693 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb, dev);
696 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
698 blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
699 val = blkio_read_stat_cpu(blkg, type, sub_type);
700 cb->fill(cb, key_str, val);
703 disk_total = blkio_read_stat_cpu(blkg, type, BLKIO_STAT_READ) +
704 blkio_read_stat_cpu(blkg, type, BLKIO_STAT_WRITE);
706 blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
707 cb->fill(cb, key_str, disk_total);
711 /* This should be called with blkg->stats_lock held */
712 static uint64_t blkio_get_stat(struct blkio_group *blkg,
713 struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
716 char key_str[MAX_KEY_LEN];
717 enum stat_sub_type sub_type;
719 if (type == BLKIO_STAT_TIME)
720 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
721 blkg->stats.time, cb, dev);
722 #ifdef CONFIG_DEBUG_BLK_CGROUP
723 if (type == BLKIO_STAT_UNACCOUNTED_TIME)
724 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
725 blkg->stats.unaccounted_time, cb, dev);
726 if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
727 uint64_t sum = blkg->stats.avg_queue_size_sum;
728 uint64_t samples = blkg->stats.avg_queue_size_samples;
730 do_div(sum, samples);
733 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
735 if (type == BLKIO_STAT_GROUP_WAIT_TIME)
736 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
737 blkg->stats.group_wait_time, cb, dev);
738 if (type == BLKIO_STAT_IDLE_TIME)
739 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
740 blkg->stats.idle_time, cb, dev);
741 if (type == BLKIO_STAT_EMPTY_TIME)
742 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
743 blkg->stats.empty_time, cb, dev);
744 if (type == BLKIO_STAT_DEQUEUE)
745 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
746 blkg->stats.dequeue, cb, dev);
749 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
751 blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
752 cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
754 disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
755 blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
756 blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
757 cb->fill(cb, key_str, disk_total);
761 static int blkio_check_dev_num(dev_t dev)
764 struct gendisk *disk;
766 disk = get_gendisk(dev, &part);
773 static int blkio_policy_parse_and_set(char *buf,
774 struct blkio_policy_node *newpn, enum blkio_policy_id plid, int fileid)
776 char *s[4], *p, *major_s = NULL, *minor_s = NULL;
778 unsigned long major, minor, temp;
783 memset(s, 0, sizeof(s));
785 while ((p = strsep(&buf, " ")) != NULL) {
791 /* Prevent from inputing too many things */
799 p = strsep(&s[0], ":");
809 ret = strict_strtoul(major_s, 10, &major);
813 ret = strict_strtoul(minor_s, 10, &minor);
817 dev = MKDEV(major, minor);
819 ret = blkio_check_dev_num(dev);
829 case BLKIO_POLICY_PROP:
830 ret = strict_strtoul(s[1], 10, &temp);
831 if (ret || (temp < BLKIO_WEIGHT_MIN && temp > 0) ||
832 temp > BLKIO_WEIGHT_MAX)
836 newpn->fileid = fileid;
837 newpn->val.weight = temp;
839 case BLKIO_POLICY_THROTL:
841 case BLKIO_THROTL_read_bps_device:
842 case BLKIO_THROTL_write_bps_device:
843 ret = strict_strtoull(s[1], 10, &bps);
848 newpn->fileid = fileid;
849 newpn->val.bps = bps;
851 case BLKIO_THROTL_read_iops_device:
852 case BLKIO_THROTL_write_iops_device:
853 ret = strict_strtoull(s[1], 10, &iops);
857 if (iops > THROTL_IOPS_MAX)
861 newpn->fileid = fileid;
862 newpn->val.iops = (unsigned int)iops;
873 unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
876 struct blkio_policy_node *pn;
878 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_PROP,
879 BLKIO_PROP_weight_device);
881 return pn->val.weight;
883 return blkcg->weight;
885 EXPORT_SYMBOL_GPL(blkcg_get_weight);
887 uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg, dev_t dev)
889 struct blkio_policy_node *pn;
891 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
892 BLKIO_THROTL_read_bps_device);
899 uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg, dev_t dev)
901 struct blkio_policy_node *pn;
902 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
903 BLKIO_THROTL_write_bps_device);
910 unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg, dev_t dev)
912 struct blkio_policy_node *pn;
914 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
915 BLKIO_THROTL_read_iops_device);
922 unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg, dev_t dev)
924 struct blkio_policy_node *pn;
925 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
926 BLKIO_THROTL_write_iops_device);
933 /* Checks whether user asked for deleting a policy rule */
934 static bool blkio_delete_rule_command(struct blkio_policy_node *pn)
937 case BLKIO_POLICY_PROP:
938 if (pn->val.weight == 0)
941 case BLKIO_POLICY_THROTL:
943 case BLKIO_THROTL_read_bps_device:
944 case BLKIO_THROTL_write_bps_device:
945 if (pn->val.bps == 0)
948 case BLKIO_THROTL_read_iops_device:
949 case BLKIO_THROTL_write_iops_device:
950 if (pn->val.iops == 0)
961 static void blkio_update_policy_rule(struct blkio_policy_node *oldpn,
962 struct blkio_policy_node *newpn)
964 switch(oldpn->plid) {
965 case BLKIO_POLICY_PROP:
966 oldpn->val.weight = newpn->val.weight;
968 case BLKIO_POLICY_THROTL:
969 switch(newpn->fileid) {
970 case BLKIO_THROTL_read_bps_device:
971 case BLKIO_THROTL_write_bps_device:
972 oldpn->val.bps = newpn->val.bps;
974 case BLKIO_THROTL_read_iops_device:
975 case BLKIO_THROTL_write_iops_device:
976 oldpn->val.iops = newpn->val.iops;
985 * Some rules/values in blkg have changed. Propagate those to respective
988 static void blkio_update_blkg_policy(struct blkio_cgroup *blkcg,
989 struct blkio_group *blkg, struct blkio_policy_node *pn)
991 unsigned int weight, iops;
995 case BLKIO_POLICY_PROP:
996 weight = pn->val.weight ? pn->val.weight :
998 blkio_update_group_weight(blkg, weight);
1000 case BLKIO_POLICY_THROTL:
1001 switch(pn->fileid) {
1002 case BLKIO_THROTL_read_bps_device:
1003 case BLKIO_THROTL_write_bps_device:
1004 bps = pn->val.bps ? pn->val.bps : (-1);
1005 blkio_update_group_bps(blkg, bps, pn->fileid);
1007 case BLKIO_THROTL_read_iops_device:
1008 case BLKIO_THROTL_write_iops_device:
1009 iops = pn->val.iops ? pn->val.iops : (-1);
1010 blkio_update_group_iops(blkg, iops, pn->fileid);
1020 * A policy node rule has been updated. Propagate this update to all the
1021 * block groups which might be affected by this update.
1023 static void blkio_update_policy_node_blkg(struct blkio_cgroup *blkcg,
1024 struct blkio_policy_node *pn)
1026 struct blkio_group *blkg;
1027 struct hlist_node *n;
1029 spin_lock(&blkio_list_lock);
1030 spin_lock_irq(&blkcg->lock);
1032 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
1033 if (pn->dev != blkg->dev || pn->plid != blkg->plid)
1035 blkio_update_blkg_policy(blkcg, blkg, pn);
1038 spin_unlock_irq(&blkcg->lock);
1039 spin_unlock(&blkio_list_lock);
1042 static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
1047 struct blkio_policy_node *newpn, *pn;
1048 struct blkio_cgroup *blkcg;
1050 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1051 int fileid = BLKIOFILE_ATTR(cft->private);
1053 buf = kstrdup(buffer, GFP_KERNEL);
1057 newpn = kzalloc(sizeof(*newpn), GFP_KERNEL);
1063 ret = blkio_policy_parse_and_set(buf, newpn, plid, fileid);
1067 blkcg = cgroup_to_blkio_cgroup(cgrp);
1069 spin_lock_irq(&blkcg->lock);
1071 pn = blkio_policy_search_node(blkcg, newpn->dev, plid, fileid);
1073 if (!blkio_delete_rule_command(newpn)) {
1074 blkio_policy_insert_node(blkcg, newpn);
1077 spin_unlock_irq(&blkcg->lock);
1078 goto update_io_group;
1081 if (blkio_delete_rule_command(newpn)) {
1082 blkio_policy_delete_node(pn);
1083 spin_unlock_irq(&blkcg->lock);
1084 goto update_io_group;
1086 spin_unlock_irq(&blkcg->lock);
1088 blkio_update_policy_rule(pn, newpn);
1091 blkio_update_policy_node_blkg(blkcg, newpn);
1102 blkio_print_policy_node(struct seq_file *m, struct blkio_policy_node *pn)
1105 case BLKIO_POLICY_PROP:
1106 if (pn->fileid == BLKIO_PROP_weight_device)
1107 seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
1108 MINOR(pn->dev), pn->val.weight);
1110 case BLKIO_POLICY_THROTL:
1111 switch(pn->fileid) {
1112 case BLKIO_THROTL_read_bps_device:
1113 case BLKIO_THROTL_write_bps_device:
1114 seq_printf(m, "%u:%u\t%llu\n", MAJOR(pn->dev),
1115 MINOR(pn->dev), pn->val.bps);
1117 case BLKIO_THROTL_read_iops_device:
1118 case BLKIO_THROTL_write_iops_device:
1119 seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
1120 MINOR(pn->dev), pn->val.iops);
1129 /* cgroup files which read their data from policy nodes end up here */
1130 static void blkio_read_policy_node_files(struct cftype *cft,
1131 struct blkio_cgroup *blkcg, struct seq_file *m)
1133 struct blkio_policy_node *pn;
1135 if (!list_empty(&blkcg->policy_list)) {
1136 spin_lock_irq(&blkcg->lock);
1137 list_for_each_entry(pn, &blkcg->policy_list, node) {
1138 if (!pn_matches_cftype(cft, pn))
1140 blkio_print_policy_node(m, pn);
1142 spin_unlock_irq(&blkcg->lock);
1146 static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
1149 struct blkio_cgroup *blkcg;
1150 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1151 int name = BLKIOFILE_ATTR(cft->private);
1153 blkcg = cgroup_to_blkio_cgroup(cgrp);
1156 case BLKIO_POLICY_PROP:
1158 case BLKIO_PROP_weight_device:
1159 blkio_read_policy_node_files(cft, blkcg, m);
1165 case BLKIO_POLICY_THROTL:
1167 case BLKIO_THROTL_read_bps_device:
1168 case BLKIO_THROTL_write_bps_device:
1169 case BLKIO_THROTL_read_iops_device:
1170 case BLKIO_THROTL_write_iops_device:
1171 blkio_read_policy_node_files(cft, blkcg, m);
1184 static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
1185 struct cftype *cft, struct cgroup_map_cb *cb,
1186 enum stat_type type, bool show_total, bool pcpu)
1188 struct blkio_group *blkg;
1189 struct hlist_node *n;
1190 uint64_t cgroup_total = 0;
1193 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
1195 if (!cftype_blkg_same_policy(cft, blkg))
1198 cgroup_total += blkio_get_stat_cpu(blkg, cb,
1201 spin_lock_irq(&blkg->stats_lock);
1202 cgroup_total += blkio_get_stat(blkg, cb,
1204 spin_unlock_irq(&blkg->stats_lock);
1209 cb->fill(cb, "Total", cgroup_total);
1214 /* All map kind of cgroup file get serviced by this function */
1215 static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
1216 struct cgroup_map_cb *cb)
1218 struct blkio_cgroup *blkcg;
1219 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1220 int name = BLKIOFILE_ATTR(cft->private);
1222 blkcg = cgroup_to_blkio_cgroup(cgrp);
1225 case BLKIO_POLICY_PROP:
1227 case BLKIO_PROP_time:
1228 return blkio_read_blkg_stats(blkcg, cft, cb,
1229 BLKIO_STAT_TIME, 0, 0);
1230 case BLKIO_PROP_sectors:
1231 return blkio_read_blkg_stats(blkcg, cft, cb,
1232 BLKIO_STAT_CPU_SECTORS, 0, 1);
1233 case BLKIO_PROP_io_service_bytes:
1234 return blkio_read_blkg_stats(blkcg, cft, cb,
1235 BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
1236 case BLKIO_PROP_io_serviced:
1237 return blkio_read_blkg_stats(blkcg, cft, cb,
1238 BLKIO_STAT_CPU_SERVICED, 1, 1);
1239 case BLKIO_PROP_io_service_time:
1240 return blkio_read_blkg_stats(blkcg, cft, cb,
1241 BLKIO_STAT_SERVICE_TIME, 1, 0);
1242 case BLKIO_PROP_io_wait_time:
1243 return blkio_read_blkg_stats(blkcg, cft, cb,
1244 BLKIO_STAT_WAIT_TIME, 1, 0);
1245 case BLKIO_PROP_io_merged:
1246 return blkio_read_blkg_stats(blkcg, cft, cb,
1247 BLKIO_STAT_MERGED, 1, 0);
1248 case BLKIO_PROP_io_queued:
1249 return blkio_read_blkg_stats(blkcg, cft, cb,
1250 BLKIO_STAT_QUEUED, 1, 0);
1251 #ifdef CONFIG_DEBUG_BLK_CGROUP
1252 case BLKIO_PROP_unaccounted_time:
1253 return blkio_read_blkg_stats(blkcg, cft, cb,
1254 BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
1255 case BLKIO_PROP_dequeue:
1256 return blkio_read_blkg_stats(blkcg, cft, cb,
1257 BLKIO_STAT_DEQUEUE, 0, 0);
1258 case BLKIO_PROP_avg_queue_size:
1259 return blkio_read_blkg_stats(blkcg, cft, cb,
1260 BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
1261 case BLKIO_PROP_group_wait_time:
1262 return blkio_read_blkg_stats(blkcg, cft, cb,
1263 BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
1264 case BLKIO_PROP_idle_time:
1265 return blkio_read_blkg_stats(blkcg, cft, cb,
1266 BLKIO_STAT_IDLE_TIME, 0, 0);
1267 case BLKIO_PROP_empty_time:
1268 return blkio_read_blkg_stats(blkcg, cft, cb,
1269 BLKIO_STAT_EMPTY_TIME, 0, 0);
1275 case BLKIO_POLICY_THROTL:
1277 case BLKIO_THROTL_io_service_bytes:
1278 return blkio_read_blkg_stats(blkcg, cft, cb,
1279 BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
1280 case BLKIO_THROTL_io_serviced:
1281 return blkio_read_blkg_stats(blkcg, cft, cb,
1282 BLKIO_STAT_CPU_SERVICED, 1, 1);
1294 static int blkio_weight_write(struct blkio_cgroup *blkcg, u64 val)
1296 struct blkio_group *blkg;
1297 struct hlist_node *n;
1298 struct blkio_policy_node *pn;
1300 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
1303 spin_lock(&blkio_list_lock);
1304 spin_lock_irq(&blkcg->lock);
1305 blkcg->weight = (unsigned int)val;
1307 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
1308 pn = blkio_policy_search_node(blkcg, blkg->dev,
1309 BLKIO_POLICY_PROP, BLKIO_PROP_weight_device);
1313 blkio_update_group_weight(blkg, blkcg->weight);
1315 spin_unlock_irq(&blkcg->lock);
1316 spin_unlock(&blkio_list_lock);
1320 static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
1321 struct blkio_cgroup *blkcg;
1322 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1323 int name = BLKIOFILE_ATTR(cft->private);
1325 blkcg = cgroup_to_blkio_cgroup(cgrp);
1328 case BLKIO_POLICY_PROP:
1330 case BLKIO_PROP_weight:
1331 return (u64)blkcg->weight;
1341 blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1343 struct blkio_cgroup *blkcg;
1344 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1345 int name = BLKIOFILE_ATTR(cft->private);
1347 blkcg = cgroup_to_blkio_cgroup(cgrp);
1350 case BLKIO_POLICY_PROP:
1352 case BLKIO_PROP_weight:
1353 return blkio_weight_write(blkcg, val);
1363 struct cftype blkio_files[] = {
1365 .name = "weight_device",
1366 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1367 BLKIO_PROP_weight_device),
1368 .read_seq_string = blkiocg_file_read,
1369 .write_string = blkiocg_file_write,
1370 .max_write_len = 256,
1374 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1376 .read_u64 = blkiocg_file_read_u64,
1377 .write_u64 = blkiocg_file_write_u64,
1381 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1383 .read_map = blkiocg_file_read_map,
1387 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1388 BLKIO_PROP_sectors),
1389 .read_map = blkiocg_file_read_map,
1392 .name = "io_service_bytes",
1393 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1394 BLKIO_PROP_io_service_bytes),
1395 .read_map = blkiocg_file_read_map,
1398 .name = "io_serviced",
1399 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1400 BLKIO_PROP_io_serviced),
1401 .read_map = blkiocg_file_read_map,
1404 .name = "io_service_time",
1405 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1406 BLKIO_PROP_io_service_time),
1407 .read_map = blkiocg_file_read_map,
1410 .name = "io_wait_time",
1411 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1412 BLKIO_PROP_io_wait_time),
1413 .read_map = blkiocg_file_read_map,
1416 .name = "io_merged",
1417 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1418 BLKIO_PROP_io_merged),
1419 .read_map = blkiocg_file_read_map,
1422 .name = "io_queued",
1423 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1424 BLKIO_PROP_io_queued),
1425 .read_map = blkiocg_file_read_map,
1428 .name = "reset_stats",
1429 .write_u64 = blkiocg_reset_stats,
1431 #ifdef CONFIG_BLK_DEV_THROTTLING
1433 .name = "throttle.read_bps_device",
1434 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1435 BLKIO_THROTL_read_bps_device),
1436 .read_seq_string = blkiocg_file_read,
1437 .write_string = blkiocg_file_write,
1438 .max_write_len = 256,
1442 .name = "throttle.write_bps_device",
1443 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1444 BLKIO_THROTL_write_bps_device),
1445 .read_seq_string = blkiocg_file_read,
1446 .write_string = blkiocg_file_write,
1447 .max_write_len = 256,
1451 .name = "throttle.read_iops_device",
1452 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1453 BLKIO_THROTL_read_iops_device),
1454 .read_seq_string = blkiocg_file_read,
1455 .write_string = blkiocg_file_write,
1456 .max_write_len = 256,
1460 .name = "throttle.write_iops_device",
1461 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1462 BLKIO_THROTL_write_iops_device),
1463 .read_seq_string = blkiocg_file_read,
1464 .write_string = blkiocg_file_write,
1465 .max_write_len = 256,
1468 .name = "throttle.io_service_bytes",
1469 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1470 BLKIO_THROTL_io_service_bytes),
1471 .read_map = blkiocg_file_read_map,
1474 .name = "throttle.io_serviced",
1475 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1476 BLKIO_THROTL_io_serviced),
1477 .read_map = blkiocg_file_read_map,
1479 #endif /* CONFIG_BLK_DEV_THROTTLING */
1481 #ifdef CONFIG_DEBUG_BLK_CGROUP
1483 .name = "avg_queue_size",
1484 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1485 BLKIO_PROP_avg_queue_size),
1486 .read_map = blkiocg_file_read_map,
1489 .name = "group_wait_time",
1490 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1491 BLKIO_PROP_group_wait_time),
1492 .read_map = blkiocg_file_read_map,
1495 .name = "idle_time",
1496 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1497 BLKIO_PROP_idle_time),
1498 .read_map = blkiocg_file_read_map,
1501 .name = "empty_time",
1502 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1503 BLKIO_PROP_empty_time),
1504 .read_map = blkiocg_file_read_map,
1508 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1509 BLKIO_PROP_dequeue),
1510 .read_map = blkiocg_file_read_map,
1513 .name = "unaccounted_time",
1514 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1515 BLKIO_PROP_unaccounted_time),
1516 .read_map = blkiocg_file_read_map,
1521 static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1523 return cgroup_add_files(cgroup, subsys, blkio_files,
1524 ARRAY_SIZE(blkio_files));
1527 static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1529 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
1530 unsigned long flags;
1531 struct blkio_group *blkg;
1533 struct blkio_policy_type *blkiop;
1534 struct blkio_policy_node *pn, *pntmp;
1538 spin_lock_irqsave(&blkcg->lock, flags);
1540 if (hlist_empty(&blkcg->blkg_list)) {
1541 spin_unlock_irqrestore(&blkcg->lock, flags);
1545 blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
1547 key = rcu_dereference(blkg->key);
1548 __blkiocg_del_blkio_group(blkg);
1550 spin_unlock_irqrestore(&blkcg->lock, flags);
1553 * This blkio_group is being unlinked as associated cgroup is
1554 * going away. Let all the IO controlling policies know about
1557 spin_lock(&blkio_list_lock);
1558 list_for_each_entry(blkiop, &blkio_list, list) {
1559 if (blkiop->plid != blkg->plid)
1561 blkiop->ops.blkio_unlink_group_fn(key, blkg);
1563 spin_unlock(&blkio_list_lock);
1566 list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
1567 blkio_policy_delete_node(pn);
1571 free_css_id(&blkio_subsys, &blkcg->css);
1573 if (blkcg != &blkio_root_cgroup)
1577 static struct cgroup_subsys_state *
1578 blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1580 struct blkio_cgroup *blkcg;
1581 struct cgroup *parent = cgroup->parent;
1584 blkcg = &blkio_root_cgroup;
1588 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1590 return ERR_PTR(-ENOMEM);
1592 blkcg->weight = BLKIO_WEIGHT_DEFAULT;
1594 spin_lock_init(&blkcg->lock);
1595 INIT_HLIST_HEAD(&blkcg->blkg_list);
1597 INIT_LIST_HEAD(&blkcg->policy_list);
1602 * We cannot support shared io contexts, as we have no mean to support
1603 * two tasks with the same ioc in two different groups without major rework
1604 * of the main cic data structures. For now we allow a task to change
1605 * its cgroup only if it's the only owner of its ioc.
1607 static int blkiocg_can_attach(struct cgroup_subsys *subsys,
1608 struct cgroup *cgroup, struct task_struct *tsk,
1611 struct io_context *ioc;
1614 /* task_lock() is needed to avoid races with exit_io_context() */
1616 ioc = tsk->io_context;
1617 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1624 static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
1625 struct cgroup *prev, struct task_struct *tsk,
1628 struct io_context *ioc;
1631 ioc = tsk->io_context;
1633 ioc->cgroup_changed = 1;
1637 void blkio_policy_register(struct blkio_policy_type *blkiop)
1639 spin_lock(&blkio_list_lock);
1640 list_add_tail(&blkiop->list, &blkio_list);
1641 spin_unlock(&blkio_list_lock);
1643 EXPORT_SYMBOL_GPL(blkio_policy_register);
1645 void blkio_policy_unregister(struct blkio_policy_type *blkiop)
1647 spin_lock(&blkio_list_lock);
1648 list_del_init(&blkiop->list);
1649 spin_unlock(&blkio_list_lock);
1651 EXPORT_SYMBOL_GPL(blkio_policy_unregister);
1653 static int __init init_cgroup_blkio(void)
1655 return cgroup_load_subsys(&blkio_subsys);
1658 static void __exit exit_cgroup_blkio(void)
1660 cgroup_unload_subsys(&blkio_subsys);
1663 module_init(init_cgroup_blkio);
1664 module_exit(exit_cgroup_blkio);
1665 MODULE_LICENSE("GPL");