Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[pandora-kernel.git] / net / ipv4 / netfilter / nf_conntrack_l3proto_ipv4_compat.c
1 /* ip_conntrack proc compat - based on ip_conntrack_standalone.c
2  *
3  * (C) 1999-2001 Paul `Rusty' Russell
4  * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/types.h>
11 #include <linux/proc_fs.h>
12 #include <linux/seq_file.h>
13 #include <linux/percpu.h>
14 #include <linux/security.h>
15 #include <net/net_namespace.h>
16
17 #include <linux/netfilter.h>
18 #include <net/netfilter/nf_conntrack_core.h>
19 #include <net/netfilter/nf_conntrack_l3proto.h>
20 #include <net/netfilter/nf_conntrack_l4proto.h>
21 #include <net/netfilter/nf_conntrack_expect.h>
22 #include <net/netfilter/nf_conntrack_acct.h>
23
24 struct ct_iter_state {
25         struct seq_net_private p;
26         unsigned int bucket;
27 };
28
29 static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
30 {
31         struct net *net = seq_file_net(seq);
32         struct ct_iter_state *st = seq->private;
33         struct hlist_nulls_node *n;
34
35         for (st->bucket = 0;
36              st->bucket < net->ct.htable_size;
37              st->bucket++) {
38                 n = rcu_dereference(net->ct.hash[st->bucket].first);
39                 if (!is_a_nulls(n))
40                         return n;
41         }
42         return NULL;
43 }
44
45 static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
46                                       struct hlist_nulls_node *head)
47 {
48         struct net *net = seq_file_net(seq);
49         struct ct_iter_state *st = seq->private;
50
51         head = rcu_dereference(head->next);
52         while (is_a_nulls(head)) {
53                 if (likely(get_nulls_value(head) == st->bucket)) {
54                         if (++st->bucket >= net->ct.htable_size)
55                                 return NULL;
56                 }
57                 head = rcu_dereference(net->ct.hash[st->bucket].first);
58         }
59         return head;
60 }
61
62 static struct hlist_nulls_node *ct_get_idx(struct seq_file *seq, loff_t pos)
63 {
64         struct hlist_nulls_node *head = ct_get_first(seq);
65
66         if (head)
67                 while (pos && (head = ct_get_next(seq, head)))
68                         pos--;
69         return pos ? NULL : head;
70 }
71
72 static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
73         __acquires(RCU)
74 {
75         rcu_read_lock();
76         return ct_get_idx(seq, *pos);
77 }
78
79 static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos)
80 {
81         (*pos)++;
82         return ct_get_next(s, v);
83 }
84
85 static void ct_seq_stop(struct seq_file *s, void *v)
86         __releases(RCU)
87 {
88         rcu_read_unlock();
89 }
90
91 #ifdef CONFIG_NF_CONNTRACK_SECMARK
92 static int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
93 {
94         int ret;
95         u32 len;
96         char *secctx;
97
98         ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
99         if (ret)
100                 return ret;
101
102         ret = seq_printf(s, "secctx=%s ", secctx);
103
104         security_release_secctx(secctx, len);
105         return ret;
106 }
107 #else
108 static inline int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
109 {
110         return 0;
111 }
112 #endif
113
114 static int ct_seq_show(struct seq_file *s, void *v)
115 {
116         struct nf_conntrack_tuple_hash *hash = v;
117         struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash);
118         const struct nf_conntrack_l3proto *l3proto;
119         const struct nf_conntrack_l4proto *l4proto;
120         int ret = 0;
121
122         NF_CT_ASSERT(ct);
123         if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
124                 return 0;
125
126
127         /* we only want to print DIR_ORIGINAL */
128         if (NF_CT_DIRECTION(hash))
129                 goto release;
130         if (nf_ct_l3num(ct) != AF_INET)
131                 goto release;
132
133         l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
134         NF_CT_ASSERT(l3proto);
135         l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
136         NF_CT_ASSERT(l4proto);
137
138         ret = -ENOSPC;
139         if (seq_printf(s, "%-8s %u %ld ",
140                       l4proto->name, nf_ct_protonum(ct),
141                       timer_pending(&ct->timeout)
142                       ? (long)(ct->timeout.expires - jiffies)/HZ : 0) != 0)
143                 goto release;
144
145         if (l4proto->print_conntrack && l4proto->print_conntrack(s, ct))
146                 goto release;
147
148         if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
149                         l3proto, l4proto))
150                 goto release;
151
152         if (seq_print_acct(s, ct, IP_CT_DIR_ORIGINAL))
153                 goto release;
154
155         if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status)))
156                 if (seq_printf(s, "[UNREPLIED] "))
157                         goto release;
158
159         if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
160                         l3proto, l4proto))
161                 goto release;
162
163         if (seq_print_acct(s, ct, IP_CT_DIR_REPLY))
164                 goto release;
165
166         if (test_bit(IPS_ASSURED_BIT, &ct->status))
167                 if (seq_printf(s, "[ASSURED] "))
168                         goto release;
169
170 #ifdef CONFIG_NF_CONNTRACK_MARK
171         if (seq_printf(s, "mark=%u ", ct->mark))
172                 goto release;
173 #endif
174
175         if (ct_show_secctx(s, ct))
176                 goto release;
177
178         if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
179                 goto release;
180         ret = 0;
181 release:
182         nf_ct_put(ct);
183         return ret;
184 }
185
186 static const struct seq_operations ct_seq_ops = {
187         .start = ct_seq_start,
188         .next  = ct_seq_next,
189         .stop  = ct_seq_stop,
190         .show  = ct_seq_show
191 };
192
193 static int ct_open(struct inode *inode, struct file *file)
194 {
195         return seq_open_net(inode, file, &ct_seq_ops,
196                             sizeof(struct ct_iter_state));
197 }
198
199 static const struct file_operations ct_file_ops = {
200         .owner   = THIS_MODULE,
201         .open    = ct_open,
202         .read    = seq_read,
203         .llseek  = seq_lseek,
204         .release = seq_release_net,
205 };
206
207 /* expects */
208 struct ct_expect_iter_state {
209         struct seq_net_private p;
210         unsigned int bucket;
211 };
212
213 static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
214 {
215         struct net *net = seq_file_net(seq);
216         struct ct_expect_iter_state *st = seq->private;
217         struct hlist_node *n;
218
219         for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
220                 n = rcu_dereference(net->ct.expect_hash[st->bucket].first);
221                 if (n)
222                         return n;
223         }
224         return NULL;
225 }
226
227 static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
228                                              struct hlist_node *head)
229 {
230         struct net *net = seq_file_net(seq);
231         struct ct_expect_iter_state *st = seq->private;
232
233         head = rcu_dereference(head->next);
234         while (head == NULL) {
235                 if (++st->bucket >= nf_ct_expect_hsize)
236                         return NULL;
237                 head = rcu_dereference(net->ct.expect_hash[st->bucket].first);
238         }
239         return head;
240 }
241
242 static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
243 {
244         struct hlist_node *head = ct_expect_get_first(seq);
245
246         if (head)
247                 while (pos && (head = ct_expect_get_next(seq, head)))
248                         pos--;
249         return pos ? NULL : head;
250 }
251
252 static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
253         __acquires(RCU)
254 {
255         rcu_read_lock();
256         return ct_expect_get_idx(seq, *pos);
257 }
258
259 static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
260 {
261         (*pos)++;
262         return ct_expect_get_next(seq, v);
263 }
264
265 static void exp_seq_stop(struct seq_file *seq, void *v)
266         __releases(RCU)
267 {
268         rcu_read_unlock();
269 }
270
271 static int exp_seq_show(struct seq_file *s, void *v)
272 {
273         struct nf_conntrack_expect *exp;
274         const struct hlist_node *n = v;
275
276         exp = hlist_entry(n, struct nf_conntrack_expect, hnode);
277
278         if (exp->tuple.src.l3num != AF_INET)
279                 return 0;
280
281         if (exp->timeout.function)
282                 seq_printf(s, "%ld ", timer_pending(&exp->timeout)
283                            ? (long)(exp->timeout.expires - jiffies)/HZ : 0);
284         else
285                 seq_printf(s, "- ");
286
287         seq_printf(s, "proto=%u ", exp->tuple.dst.protonum);
288
289         print_tuple(s, &exp->tuple,
290                     __nf_ct_l3proto_find(exp->tuple.src.l3num),
291                     __nf_ct_l4proto_find(exp->tuple.src.l3num,
292                                          exp->tuple.dst.protonum));
293         return seq_putc(s, '\n');
294 }
295
296 static const struct seq_operations exp_seq_ops = {
297         .start = exp_seq_start,
298         .next = exp_seq_next,
299         .stop = exp_seq_stop,
300         .show = exp_seq_show
301 };
302
303 static int exp_open(struct inode *inode, struct file *file)
304 {
305         return seq_open_net(inode, file, &exp_seq_ops,
306                             sizeof(struct ct_expect_iter_state));
307 }
308
309 static const struct file_operations ip_exp_file_ops = {
310         .owner   = THIS_MODULE,
311         .open    = exp_open,
312         .read    = seq_read,
313         .llseek  = seq_lseek,
314         .release = seq_release_net,
315 };
316
317 static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos)
318 {
319         struct net *net = seq_file_net(seq);
320         int cpu;
321
322         if (*pos == 0)
323                 return SEQ_START_TOKEN;
324
325         for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
326                 if (!cpu_possible(cpu))
327                         continue;
328                 *pos = cpu+1;
329                 return per_cpu_ptr(net->ct.stat, cpu);
330         }
331
332         return NULL;
333 }
334
335 static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
336 {
337         struct net *net = seq_file_net(seq);
338         int cpu;
339
340         for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
341                 if (!cpu_possible(cpu))
342                         continue;
343                 *pos = cpu+1;
344                 return per_cpu_ptr(net->ct.stat, cpu);
345         }
346
347         return NULL;
348 }
349
350 static void ct_cpu_seq_stop(struct seq_file *seq, void *v)
351 {
352 }
353
354 static int ct_cpu_seq_show(struct seq_file *seq, void *v)
355 {
356         struct net *net = seq_file_net(seq);
357         unsigned int nr_conntracks = atomic_read(&net->ct.count);
358         const struct ip_conntrack_stat *st = v;
359
360         if (v == SEQ_START_TOKEN) {
361                 seq_printf(seq, "entries  searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error  expect_new expect_create expect_delete search_restart\n");
362                 return 0;
363         }
364
365         seq_printf(seq, "%08x  %08x %08x %08x %08x %08x %08x %08x "
366                         "%08x %08x %08x %08x %08x  %08x %08x %08x %08x\n",
367                    nr_conntracks,
368                    st->searched,
369                    st->found,
370                    st->new,
371                    st->invalid,
372                    st->ignore,
373                    st->delete,
374                    st->delete_list,
375                    st->insert,
376                    st->insert_failed,
377                    st->drop,
378                    st->early_drop,
379                    st->error,
380
381                    st->expect_new,
382                    st->expect_create,
383                    st->expect_delete,
384                    st->search_restart
385                 );
386         return 0;
387 }
388
389 static const struct seq_operations ct_cpu_seq_ops = {
390         .start  = ct_cpu_seq_start,
391         .next   = ct_cpu_seq_next,
392         .stop   = ct_cpu_seq_stop,
393         .show   = ct_cpu_seq_show,
394 };
395
396 static int ct_cpu_seq_open(struct inode *inode, struct file *file)
397 {
398         return seq_open_net(inode, file, &ct_cpu_seq_ops,
399                             sizeof(struct seq_net_private));
400 }
401
402 static const struct file_operations ct_cpu_seq_fops = {
403         .owner   = THIS_MODULE,
404         .open    = ct_cpu_seq_open,
405         .read    = seq_read,
406         .llseek  = seq_lseek,
407         .release = seq_release_net,
408 };
409
410 static int __net_init ip_conntrack_net_init(struct net *net)
411 {
412         struct proc_dir_entry *proc, *proc_exp, *proc_stat;
413
414         proc = proc_net_fops_create(net, "ip_conntrack", 0440, &ct_file_ops);
415         if (!proc)
416                 goto err1;
417
418         proc_exp = proc_net_fops_create(net, "ip_conntrack_expect", 0440,
419                                         &ip_exp_file_ops);
420         if (!proc_exp)
421                 goto err2;
422
423         proc_stat = proc_create("ip_conntrack", S_IRUGO,
424                                 net->proc_net_stat, &ct_cpu_seq_fops);
425         if (!proc_stat)
426                 goto err3;
427         return 0;
428
429 err3:
430         proc_net_remove(net, "ip_conntrack_expect");
431 err2:
432         proc_net_remove(net, "ip_conntrack");
433 err1:
434         return -ENOMEM;
435 }
436
437 static void __net_exit ip_conntrack_net_exit(struct net *net)
438 {
439         remove_proc_entry("ip_conntrack", net->proc_net_stat);
440         proc_net_remove(net, "ip_conntrack_expect");
441         proc_net_remove(net, "ip_conntrack");
442 }
443
444 static struct pernet_operations ip_conntrack_net_ops = {
445         .init = ip_conntrack_net_init,
446         .exit = ip_conntrack_net_exit,
447 };
448
449 int __init nf_conntrack_ipv4_compat_init(void)
450 {
451         return register_pernet_subsys(&ip_conntrack_net_ops);
452 }
453
454 void __exit nf_conntrack_ipv4_compat_fini(void)
455 {
456         unregister_pernet_subsys(&ip_conntrack_net_ops);
457 }