Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
[pandora-kernel.git] / net / sched / cls_cgroup.c
1 /*
2  * net/sched/cls_cgroup.c       Control Group Classifier
3  *
4  *              This program is free software; you can redistribute it and/or
5  *              modify it under the terms of the GNU General Public License
6  *              as published by the Free Software Foundation; either version
7  *              2 of the License, or (at your option) any later version.
8  *
9  * Authors:     Thomas Graf <tgraf@suug.ch>
10  */
11
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/skbuff.h>
18 #include <linux/cgroup.h>
19 #include <net/rtnetlink.h>
20 #include <net/pkt_cls.h>
21
22 struct cgroup_cls_state
23 {
24         struct cgroup_subsys_state css;
25         u32 classid;
26 };
27
28 static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
29                                                struct cgroup *cgrp);
30 static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp);
31 static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp);
32
33 struct cgroup_subsys net_cls_subsys = {
34         .name           = "net_cls",
35         .create         = cgrp_create,
36         .destroy        = cgrp_destroy,
37         .populate       = cgrp_populate,
38 #ifdef CONFIG_NET_CLS_CGROUP
39         .subsys_id      = net_cls_subsys_id,
40 #else
41 #define net_cls_subsys_id net_cls_subsys.subsys_id
42 #endif
43         .module         = THIS_MODULE,
44 };
45
46
47 static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp)
48 {
49         return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id),
50                             struct cgroup_cls_state, css);
51 }
52
53 static inline struct cgroup_cls_state *task_cls_state(struct task_struct *p)
54 {
55         return container_of(task_subsys_state(p, net_cls_subsys_id),
56                             struct cgroup_cls_state, css);
57 }
58
59 static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
60                                                  struct cgroup *cgrp)
61 {
62         struct cgroup_cls_state *cs;
63
64         if (!(cs = kzalloc(sizeof(*cs), GFP_KERNEL)))
65                 return ERR_PTR(-ENOMEM);
66
67         if (cgrp->parent)
68                 cs->classid = cgrp_cls_state(cgrp->parent)->classid;
69
70         return &cs->css;
71 }
72
73 static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
74 {
75         kfree(cgrp_cls_state(cgrp));
76 }
77
78 static u64 read_classid(struct cgroup *cgrp, struct cftype *cft)
79 {
80         return cgrp_cls_state(cgrp)->classid;
81 }
82
83 static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value)
84 {
85         cgrp_cls_state(cgrp)->classid = (u32) value;
86         return 0;
87 }
88
89 static struct cftype ss_files[] = {
90         {
91                 .name = "classid",
92                 .read_u64 = read_classid,
93                 .write_u64 = write_classid,
94         },
95 };
96
97 static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
98 {
99         return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files));
100 }
101
102 struct cls_cgroup_head
103 {
104         u32                     handle;
105         struct tcf_exts         exts;
106         struct tcf_ematch_tree  ematches;
107 };
108
109 static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp,
110                                struct tcf_result *res)
111 {
112         struct cls_cgroup_head *head = tp->root;
113         u32 classid;
114
115         /*
116          * Due to the nature of the classifier it is required to ignore all
117          * packets originating from softirq context as accessing `current'
118          * would lead to false results.
119          *
120          * This test assumes that all callers of dev_queue_xmit() explicitely
121          * disable bh. Knowing this, it is possible to detect softirq based
122          * calls by looking at the number of nested bh disable calls because
123          * softirqs always disables bh.
124          */
125         if (softirq_count() != SOFTIRQ_OFFSET)
126                 return -1;
127
128         rcu_read_lock();
129         classid = task_cls_state(current)->classid;
130         rcu_read_unlock();
131
132         if (!classid)
133                 return -1;
134
135         if (!tcf_em_tree_match(skb, &head->ematches, NULL))
136                 return -1;
137
138         res->classid = classid;
139         res->class = 0;
140         return tcf_exts_exec(skb, &head->exts, res);
141 }
142
143 static unsigned long cls_cgroup_get(struct tcf_proto *tp, u32 handle)
144 {
145         return 0UL;
146 }
147
148 static void cls_cgroup_put(struct tcf_proto *tp, unsigned long f)
149 {
150 }
151
152 static int cls_cgroup_init(struct tcf_proto *tp)
153 {
154         return 0;
155 }
156
157 static const struct tcf_ext_map cgroup_ext_map = {
158         .action = TCA_CGROUP_ACT,
159         .police = TCA_CGROUP_POLICE,
160 };
161
162 static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
163         [TCA_CGROUP_EMATCHES]   = { .type = NLA_NESTED },
164 };
165
166 static int cls_cgroup_change(struct tcf_proto *tp, unsigned long base,
167                              u32 handle, struct nlattr **tca,
168                              unsigned long *arg)
169 {
170         struct nlattr *tb[TCA_CGROUP_MAX+1];
171         struct cls_cgroup_head *head = tp->root;
172         struct tcf_ematch_tree t;
173         struct tcf_exts e;
174         int err;
175
176         if (!tca[TCA_OPTIONS])
177                 return -EINVAL;
178
179         if (head == NULL) {
180                 if (!handle)
181                         return -EINVAL;
182
183                 head = kzalloc(sizeof(*head), GFP_KERNEL);
184                 if (head == NULL)
185                         return -ENOBUFS;
186
187                 head->handle = handle;
188
189                 tcf_tree_lock(tp);
190                 tp->root = head;
191                 tcf_tree_unlock(tp);
192         }
193
194         if (handle != head->handle)
195                 return -ENOENT;
196
197         err = nla_parse_nested(tb, TCA_CGROUP_MAX, tca[TCA_OPTIONS],
198                                cgroup_policy);
199         if (err < 0)
200                 return err;
201
202         err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &cgroup_ext_map);
203         if (err < 0)
204                 return err;
205
206         err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &t);
207         if (err < 0)
208                 return err;
209
210         tcf_exts_change(tp, &head->exts, &e);
211         tcf_em_tree_change(tp, &head->ematches, &t);
212
213         return 0;
214 }
215
216 static void cls_cgroup_destroy(struct tcf_proto *tp)
217 {
218         struct cls_cgroup_head *head = tp->root;
219
220         if (head) {
221                 tcf_exts_destroy(tp, &head->exts);
222                 tcf_em_tree_destroy(tp, &head->ematches);
223                 kfree(head);
224         }
225 }
226
227 static int cls_cgroup_delete(struct tcf_proto *tp, unsigned long arg)
228 {
229         return -EOPNOTSUPP;
230 }
231
232 static void cls_cgroup_walk(struct tcf_proto *tp, struct tcf_walker *arg)
233 {
234         struct cls_cgroup_head *head = tp->root;
235
236         if (arg->count < arg->skip)
237                 goto skip;
238
239         if (arg->fn(tp, (unsigned long) head, arg) < 0) {
240                 arg->stop = 1;
241                 return;
242         }
243 skip:
244         arg->count++;
245 }
246
247 static int cls_cgroup_dump(struct tcf_proto *tp, unsigned long fh,
248                            struct sk_buff *skb, struct tcmsg *t)
249 {
250         struct cls_cgroup_head *head = tp->root;
251         unsigned char *b = skb_tail_pointer(skb);
252         struct nlattr *nest;
253
254         t->tcm_handle = head->handle;
255
256         nest = nla_nest_start(skb, TCA_OPTIONS);
257         if (nest == NULL)
258                 goto nla_put_failure;
259
260         if (tcf_exts_dump(skb, &head->exts, &cgroup_ext_map) < 0 ||
261             tcf_em_tree_dump(skb, &head->ematches, TCA_CGROUP_EMATCHES) < 0)
262                 goto nla_put_failure;
263
264         nla_nest_end(skb, nest);
265
266         if (tcf_exts_dump_stats(skb, &head->exts, &cgroup_ext_map) < 0)
267                 goto nla_put_failure;
268
269         return skb->len;
270
271 nla_put_failure:
272         nlmsg_trim(skb, b);
273         return -1;
274 }
275
276 static struct tcf_proto_ops cls_cgroup_ops __read_mostly = {
277         .kind           =       "cgroup",
278         .init           =       cls_cgroup_init,
279         .change         =       cls_cgroup_change,
280         .classify       =       cls_cgroup_classify,
281         .destroy        =       cls_cgroup_destroy,
282         .get            =       cls_cgroup_get,
283         .put            =       cls_cgroup_put,
284         .delete         =       cls_cgroup_delete,
285         .walk           =       cls_cgroup_walk,
286         .dump           =       cls_cgroup_dump,
287         .owner          =       THIS_MODULE,
288 };
289
290 static int __init init_cgroup_cls(void)
291 {
292         int ret = register_tcf_proto_ops(&cls_cgroup_ops);
293         if (ret)
294                 return ret;
295         ret = cgroup_load_subsys(&net_cls_subsys);
296         if (ret)
297                 unregister_tcf_proto_ops(&cls_cgroup_ops);
298         return ret;
299 }
300
301 static void __exit exit_cgroup_cls(void)
302 {
303         unregister_tcf_proto_ops(&cls_cgroup_ops);
304         cgroup_unload_subsys(&net_cls_subsys);
305 }
306
307 module_init(init_cgroup_cls);
308 module_exit(exit_cgroup_cls);
309 MODULE_LICENSE("GPL");