net: Add export.h for EXPORT_SYMBOL/THIS_MODULE to non-modules
[pandora-kernel.git] / net / netfilter / nf_conntrack_expect.c
1 /* Expectation handling for nf_conntrack. */
2
3 /* (C) 1999-2001 Paul `Rusty' Russell
4  * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5  * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11
12 #include <linux/types.h>
13 #include <linux/netfilter.h>
14 #include <linux/skbuff.h>
15 #include <linux/proc_fs.h>
16 #include <linux/seq_file.h>
17 #include <linux/stddef.h>
18 #include <linux/slab.h>
19 #include <linux/err.h>
20 #include <linux/percpu.h>
21 #include <linux/kernel.h>
22 #include <linux/jhash.h>
23 #include <linux/moduleparam.h>
24 #include <linux/export.h>
25 #include <net/net_namespace.h>
26
27 #include <net/netfilter/nf_conntrack.h>
28 #include <net/netfilter/nf_conntrack_core.h>
29 #include <net/netfilter/nf_conntrack_expect.h>
30 #include <net/netfilter/nf_conntrack_helper.h>
31 #include <net/netfilter/nf_conntrack_tuple.h>
32 #include <net/netfilter/nf_conntrack_zones.h>
33
34 unsigned int nf_ct_expect_hsize __read_mostly;
35 EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
36
37 unsigned int nf_ct_expect_max __read_mostly;
38
39 static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
40
41 static HLIST_HEAD(nf_ct_userspace_expect_list);
42
43 /* nf_conntrack_expect helper functions */
44 void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
45                                 u32 pid, int report)
46 {
47         struct nf_conn_help *master_help = nfct_help(exp->master);
48         struct net *net = nf_ct_exp_net(exp);
49
50         NF_CT_ASSERT(!timer_pending(&exp->timeout));
51
52         hlist_del_rcu(&exp->hnode);
53         net->ct.expect_count--;
54
55         hlist_del(&exp->lnode);
56         if (!(exp->flags & NF_CT_EXPECT_USERSPACE))
57                 master_help->expecting[exp->class]--;
58
59         nf_ct_expect_event_report(IPEXP_DESTROY, exp, pid, report);
60         nf_ct_expect_put(exp);
61
62         NF_CT_STAT_INC(net, expect_delete);
63 }
64 EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report);
65
66 static void nf_ct_expectation_timed_out(unsigned long ul_expect)
67 {
68         struct nf_conntrack_expect *exp = (void *)ul_expect;
69
70         spin_lock_bh(&nf_conntrack_lock);
71         nf_ct_unlink_expect(exp);
72         spin_unlock_bh(&nf_conntrack_lock);
73         nf_ct_expect_put(exp);
74 }
75
76 static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple)
77 {
78         unsigned int hash;
79
80         if (unlikely(!nf_conntrack_hash_rnd)) {
81                 init_nf_conntrack_hash_rnd();
82         }
83
84         hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
85                       (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
86                        (__force __u16)tuple->dst.u.all) ^ nf_conntrack_hash_rnd);
87         return ((u64)hash * nf_ct_expect_hsize) >> 32;
88 }
89
90 struct nf_conntrack_expect *
91 __nf_ct_expect_find(struct net *net, u16 zone,
92                     const struct nf_conntrack_tuple *tuple)
93 {
94         struct nf_conntrack_expect *i;
95         struct hlist_node *n;
96         unsigned int h;
97
98         if (!net->ct.expect_count)
99                 return NULL;
100
101         h = nf_ct_expect_dst_hash(tuple);
102         hlist_for_each_entry_rcu(i, n, &net->ct.expect_hash[h], hnode) {
103                 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
104                     nf_ct_zone(i->master) == zone)
105                         return i;
106         }
107         return NULL;
108 }
109 EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
110
111 /* Just find a expectation corresponding to a tuple. */
112 struct nf_conntrack_expect *
113 nf_ct_expect_find_get(struct net *net, u16 zone,
114                       const struct nf_conntrack_tuple *tuple)
115 {
116         struct nf_conntrack_expect *i;
117
118         rcu_read_lock();
119         i = __nf_ct_expect_find(net, zone, tuple);
120         if (i && !atomic_inc_not_zero(&i->use))
121                 i = NULL;
122         rcu_read_unlock();
123
124         return i;
125 }
126 EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
127
128 /* If an expectation for this connection is found, it gets delete from
129  * global list then returned. */
130 struct nf_conntrack_expect *
131 nf_ct_find_expectation(struct net *net, u16 zone,
132                        const struct nf_conntrack_tuple *tuple)
133 {
134         struct nf_conntrack_expect *i, *exp = NULL;
135         struct hlist_node *n;
136         unsigned int h;
137
138         if (!net->ct.expect_count)
139                 return NULL;
140
141         h = nf_ct_expect_dst_hash(tuple);
142         hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) {
143                 if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
144                     nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
145                     nf_ct_zone(i->master) == zone) {
146                         exp = i;
147                         break;
148                 }
149         }
150         if (!exp)
151                 return NULL;
152
153         /* If master is not in hash table yet (ie. packet hasn't left
154            this machine yet), how can other end know about expected?
155            Hence these are not the droids you are looking for (if
156            master ct never got confirmed, we'd hold a reference to it
157            and weird things would happen to future packets). */
158         if (!nf_ct_is_confirmed(exp->master))
159                 return NULL;
160
161         if (exp->flags & NF_CT_EXPECT_PERMANENT) {
162                 atomic_inc(&exp->use);
163                 return exp;
164         } else if (del_timer(&exp->timeout)) {
165                 nf_ct_unlink_expect(exp);
166                 return exp;
167         }
168
169         return NULL;
170 }
171
172 /* delete all expectations for this conntrack */
173 void nf_ct_remove_expectations(struct nf_conn *ct)
174 {
175         struct nf_conn_help *help = nfct_help(ct);
176         struct nf_conntrack_expect *exp;
177         struct hlist_node *n, *next;
178
179         /* Optimization: most connection never expect any others. */
180         if (!help)
181                 return;
182
183         hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) {
184                 if (del_timer(&exp->timeout)) {
185                         nf_ct_unlink_expect(exp);
186                         nf_ct_expect_put(exp);
187                 }
188         }
189 }
190 EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
191
192 /* Would two expected things clash? */
193 static inline int expect_clash(const struct nf_conntrack_expect *a,
194                                const struct nf_conntrack_expect *b)
195 {
196         /* Part covered by intersection of masks must be unequal,
197            otherwise they clash */
198         struct nf_conntrack_tuple_mask intersect_mask;
199         int count;
200
201         intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
202
203         for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
204                 intersect_mask.src.u3.all[count] =
205                         a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
206         }
207
208         return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
209 }
210
211 static inline int expect_matches(const struct nf_conntrack_expect *a,
212                                  const struct nf_conntrack_expect *b)
213 {
214         return a->master == b->master && a->class == b->class &&
215                 nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
216                 nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
217                 nf_ct_zone(a->master) == nf_ct_zone(b->master);
218 }
219
220 /* Generally a bad idea to call this: could have matched already. */
221 void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
222 {
223         spin_lock_bh(&nf_conntrack_lock);
224         if (del_timer(&exp->timeout)) {
225                 nf_ct_unlink_expect(exp);
226                 nf_ct_expect_put(exp);
227         }
228         spin_unlock_bh(&nf_conntrack_lock);
229 }
230 EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
231
232 /* We don't increase the master conntrack refcount for non-fulfilled
233  * conntracks. During the conntrack destruction, the expectations are
234  * always killed before the conntrack itself */
235 struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
236 {
237         struct nf_conntrack_expect *new;
238
239         new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
240         if (!new)
241                 return NULL;
242
243         new->master = me;
244         atomic_set(&new->use, 1);
245         return new;
246 }
247 EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
248
249 void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
250                        u_int8_t family,
251                        const union nf_inet_addr *saddr,
252                        const union nf_inet_addr *daddr,
253                        u_int8_t proto, const __be16 *src, const __be16 *dst)
254 {
255         int len;
256
257         if (family == AF_INET)
258                 len = 4;
259         else
260                 len = 16;
261
262         exp->flags = 0;
263         exp->class = class;
264         exp->expectfn = NULL;
265         exp->helper = NULL;
266         exp->tuple.src.l3num = family;
267         exp->tuple.dst.protonum = proto;
268
269         if (saddr) {
270                 memcpy(&exp->tuple.src.u3, saddr, len);
271                 if (sizeof(exp->tuple.src.u3) > len)
272                         /* address needs to be cleared for nf_ct_tuple_equal */
273                         memset((void *)&exp->tuple.src.u3 + len, 0x00,
274                                sizeof(exp->tuple.src.u3) - len);
275                 memset(&exp->mask.src.u3, 0xFF, len);
276                 if (sizeof(exp->mask.src.u3) > len)
277                         memset((void *)&exp->mask.src.u3 + len, 0x00,
278                                sizeof(exp->mask.src.u3) - len);
279         } else {
280                 memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
281                 memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
282         }
283
284         if (src) {
285                 exp->tuple.src.u.all = *src;
286                 exp->mask.src.u.all = htons(0xFFFF);
287         } else {
288                 exp->tuple.src.u.all = 0;
289                 exp->mask.src.u.all = 0;
290         }
291
292         memcpy(&exp->tuple.dst.u3, daddr, len);
293         if (sizeof(exp->tuple.dst.u3) > len)
294                 /* address needs to be cleared for nf_ct_tuple_equal */
295                 memset((void *)&exp->tuple.dst.u3 + len, 0x00,
296                        sizeof(exp->tuple.dst.u3) - len);
297
298         exp->tuple.dst.u.all = *dst;
299 }
300 EXPORT_SYMBOL_GPL(nf_ct_expect_init);
301
302 static void nf_ct_expect_free_rcu(struct rcu_head *head)
303 {
304         struct nf_conntrack_expect *exp;
305
306         exp = container_of(head, struct nf_conntrack_expect, rcu);
307         kmem_cache_free(nf_ct_expect_cachep, exp);
308 }
309
310 void nf_ct_expect_put(struct nf_conntrack_expect *exp)
311 {
312         if (atomic_dec_and_test(&exp->use))
313                 call_rcu(&exp->rcu, nf_ct_expect_free_rcu);
314 }
315 EXPORT_SYMBOL_GPL(nf_ct_expect_put);
316
317 static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
318 {
319         struct nf_conn_help *master_help = nfct_help(exp->master);
320         struct net *net = nf_ct_exp_net(exp);
321         const struct nf_conntrack_expect_policy *p;
322         unsigned int h = nf_ct_expect_dst_hash(&exp->tuple);
323
324         /* two references : one for hash insert, one for the timer */
325         atomic_add(2, &exp->use);
326
327         if (master_help) {
328                 hlist_add_head(&exp->lnode, &master_help->expectations);
329                 master_help->expecting[exp->class]++;
330         } else if (exp->flags & NF_CT_EXPECT_USERSPACE)
331                 hlist_add_head(&exp->lnode, &nf_ct_userspace_expect_list);
332
333         hlist_add_head_rcu(&exp->hnode, &net->ct.expect_hash[h]);
334         net->ct.expect_count++;
335
336         setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
337                     (unsigned long)exp);
338         if (master_help) {
339                 p = &rcu_dereference_protected(
340                                 master_help->helper,
341                                 lockdep_is_held(&nf_conntrack_lock)
342                                 )->expect_policy[exp->class];
343                 exp->timeout.expires = jiffies + p->timeout * HZ;
344         }
345         add_timer(&exp->timeout);
346
347         NF_CT_STAT_INC(net, expect_create);
348 }
349
350 /* Race with expectations being used means we could have none to find; OK. */
351 static void evict_oldest_expect(struct nf_conn *master,
352                                 struct nf_conntrack_expect *new)
353 {
354         struct nf_conn_help *master_help = nfct_help(master);
355         struct nf_conntrack_expect *exp, *last = NULL;
356         struct hlist_node *n;
357
358         hlist_for_each_entry(exp, n, &master_help->expectations, lnode) {
359                 if (exp->class == new->class)
360                         last = exp;
361         }
362
363         if (last && del_timer(&last->timeout)) {
364                 nf_ct_unlink_expect(last);
365                 nf_ct_expect_put(last);
366         }
367 }
368
369 static inline int refresh_timer(struct nf_conntrack_expect *i)
370 {
371         struct nf_conn_help *master_help = nfct_help(i->master);
372         const struct nf_conntrack_expect_policy *p;
373
374         if (!del_timer(&i->timeout))
375                 return 0;
376
377         p = &rcu_dereference_protected(
378                 master_help->helper,
379                 lockdep_is_held(&nf_conntrack_lock)
380                 )->expect_policy[i->class];
381         i->timeout.expires = jiffies + p->timeout * HZ;
382         add_timer(&i->timeout);
383         return 1;
384 }
385
386 static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
387 {
388         const struct nf_conntrack_expect_policy *p;
389         struct nf_conntrack_expect *i;
390         struct nf_conn *master = expect->master;
391         struct nf_conn_help *master_help = nfct_help(master);
392         struct net *net = nf_ct_exp_net(expect);
393         struct hlist_node *n;
394         unsigned int h;
395         int ret = 1;
396
397         /* Don't allow expectations created from kernel-space with no helper */
398         if (!(expect->flags & NF_CT_EXPECT_USERSPACE) &&
399             (!master_help || (master_help && !master_help->helper))) {
400                 ret = -ESHUTDOWN;
401                 goto out;
402         }
403         h = nf_ct_expect_dst_hash(&expect->tuple);
404         hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) {
405                 if (expect_matches(i, expect)) {
406                         /* Refresh timer: if it's dying, ignore.. */
407                         if (refresh_timer(i)) {
408                                 ret = 0;
409                                 goto out;
410                         }
411                 } else if (expect_clash(i, expect)) {
412                         ret = -EBUSY;
413                         goto out;
414                 }
415         }
416         /* Will be over limit? */
417         if (master_help) {
418                 p = &rcu_dereference_protected(
419                         master_help->helper,
420                         lockdep_is_held(&nf_conntrack_lock)
421                         )->expect_policy[expect->class];
422                 if (p->max_expected &&
423                     master_help->expecting[expect->class] >= p->max_expected) {
424                         evict_oldest_expect(master, expect);
425                         if (master_help->expecting[expect->class]
426                                                 >= p->max_expected) {
427                                 ret = -EMFILE;
428                                 goto out;
429                         }
430                 }
431         }
432
433         if (net->ct.expect_count >= nf_ct_expect_max) {
434                 if (net_ratelimit())
435                         printk(KERN_WARNING
436                                "nf_conntrack: expectation table full\n");
437                 ret = -EMFILE;
438         }
439 out:
440         return ret;
441 }
442
443 int nf_ct_expect_related_report(struct nf_conntrack_expect *expect, 
444                                 u32 pid, int report)
445 {
446         int ret;
447
448         spin_lock_bh(&nf_conntrack_lock);
449         ret = __nf_ct_expect_check(expect);
450         if (ret <= 0)
451                 goto out;
452
453         ret = 0;
454         nf_ct_expect_insert(expect);
455         spin_unlock_bh(&nf_conntrack_lock);
456         nf_ct_expect_event_report(IPEXP_NEW, expect, pid, report);
457         return ret;
458 out:
459         spin_unlock_bh(&nf_conntrack_lock);
460         return ret;
461 }
462 EXPORT_SYMBOL_GPL(nf_ct_expect_related_report);
463
464 void nf_ct_remove_userspace_expectations(void)
465 {
466         struct nf_conntrack_expect *exp;
467         struct hlist_node *n, *next;
468
469         hlist_for_each_entry_safe(exp, n, next,
470                                   &nf_ct_userspace_expect_list, lnode) {
471                 if (del_timer(&exp->timeout)) {
472                         nf_ct_unlink_expect(exp);
473                         nf_ct_expect_put(exp);
474                 }
475         }
476 }
477 EXPORT_SYMBOL_GPL(nf_ct_remove_userspace_expectations);
478
479 #ifdef CONFIG_PROC_FS
480 struct ct_expect_iter_state {
481         struct seq_net_private p;
482         unsigned int bucket;
483 };
484
485 static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
486 {
487         struct net *net = seq_file_net(seq);
488         struct ct_expect_iter_state *st = seq->private;
489         struct hlist_node *n;
490
491         for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
492                 n = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
493                 if (n)
494                         return n;
495         }
496         return NULL;
497 }
498
499 static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
500                                              struct hlist_node *head)
501 {
502         struct net *net = seq_file_net(seq);
503         struct ct_expect_iter_state *st = seq->private;
504
505         head = rcu_dereference(hlist_next_rcu(head));
506         while (head == NULL) {
507                 if (++st->bucket >= nf_ct_expect_hsize)
508                         return NULL;
509                 head = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
510         }
511         return head;
512 }
513
514 static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
515 {
516         struct hlist_node *head = ct_expect_get_first(seq);
517
518         if (head)
519                 while (pos && (head = ct_expect_get_next(seq, head)))
520                         pos--;
521         return pos ? NULL : head;
522 }
523
524 static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
525         __acquires(RCU)
526 {
527         rcu_read_lock();
528         return ct_expect_get_idx(seq, *pos);
529 }
530
531 static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
532 {
533         (*pos)++;
534         return ct_expect_get_next(seq, v);
535 }
536
537 static void exp_seq_stop(struct seq_file *seq, void *v)
538         __releases(RCU)
539 {
540         rcu_read_unlock();
541 }
542
543 static int exp_seq_show(struct seq_file *s, void *v)
544 {
545         struct nf_conntrack_expect *expect;
546         struct nf_conntrack_helper *helper;
547         struct hlist_node *n = v;
548         char *delim = "";
549
550         expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
551
552         if (expect->timeout.function)
553                 seq_printf(s, "%ld ", timer_pending(&expect->timeout)
554                            ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
555         else
556                 seq_printf(s, "- ");
557         seq_printf(s, "l3proto = %u proto=%u ",
558                    expect->tuple.src.l3num,
559                    expect->tuple.dst.protonum);
560         print_tuple(s, &expect->tuple,
561                     __nf_ct_l3proto_find(expect->tuple.src.l3num),
562                     __nf_ct_l4proto_find(expect->tuple.src.l3num,
563                                        expect->tuple.dst.protonum));
564
565         if (expect->flags & NF_CT_EXPECT_PERMANENT) {
566                 seq_printf(s, "PERMANENT");
567                 delim = ",";
568         }
569         if (expect->flags & NF_CT_EXPECT_INACTIVE) {
570                 seq_printf(s, "%sINACTIVE", delim);
571                 delim = ",";
572         }
573         if (expect->flags & NF_CT_EXPECT_USERSPACE)
574                 seq_printf(s, "%sUSERSPACE", delim);
575
576         helper = rcu_dereference(nfct_help(expect->master)->helper);
577         if (helper) {
578                 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
579                 if (helper->expect_policy[expect->class].name)
580                         seq_printf(s, "/%s",
581                                    helper->expect_policy[expect->class].name);
582         }
583
584         return seq_putc(s, '\n');
585 }
586
587 static const struct seq_operations exp_seq_ops = {
588         .start = exp_seq_start,
589         .next = exp_seq_next,
590         .stop = exp_seq_stop,
591         .show = exp_seq_show
592 };
593
594 static int exp_open(struct inode *inode, struct file *file)
595 {
596         return seq_open_net(inode, file, &exp_seq_ops,
597                         sizeof(struct ct_expect_iter_state));
598 }
599
600 static const struct file_operations exp_file_ops = {
601         .owner   = THIS_MODULE,
602         .open    = exp_open,
603         .read    = seq_read,
604         .llseek  = seq_lseek,
605         .release = seq_release_net,
606 };
607 #endif /* CONFIG_PROC_FS */
608
609 static int exp_proc_init(struct net *net)
610 {
611 #ifdef CONFIG_PROC_FS
612         struct proc_dir_entry *proc;
613
614         proc = proc_net_fops_create(net, "nf_conntrack_expect", 0440, &exp_file_ops);
615         if (!proc)
616                 return -ENOMEM;
617 #endif /* CONFIG_PROC_FS */
618         return 0;
619 }
620
621 static void exp_proc_remove(struct net *net)
622 {
623 #ifdef CONFIG_PROC_FS
624         proc_net_remove(net, "nf_conntrack_expect");
625 #endif /* CONFIG_PROC_FS */
626 }
627
628 module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
629
630 int nf_conntrack_expect_init(struct net *net)
631 {
632         int err = -ENOMEM;
633
634         if (net_eq(net, &init_net)) {
635                 if (!nf_ct_expect_hsize) {
636                         nf_ct_expect_hsize = net->ct.htable_size / 256;
637                         if (!nf_ct_expect_hsize)
638                                 nf_ct_expect_hsize = 1;
639                 }
640                 nf_ct_expect_max = nf_ct_expect_hsize * 4;
641         }
642
643         net->ct.expect_count = 0;
644         net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
645         if (net->ct.expect_hash == NULL)
646                 goto err1;
647
648         if (net_eq(net, &init_net)) {
649                 nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
650                                         sizeof(struct nf_conntrack_expect),
651                                         0, 0, NULL);
652                 if (!nf_ct_expect_cachep)
653                         goto err2;
654         }
655
656         err = exp_proc_init(net);
657         if (err < 0)
658                 goto err3;
659
660         return 0;
661
662 err3:
663         if (net_eq(net, &init_net))
664                 kmem_cache_destroy(nf_ct_expect_cachep);
665 err2:
666         nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
667 err1:
668         return err;
669 }
670
671 void nf_conntrack_expect_fini(struct net *net)
672 {
673         exp_proc_remove(net);
674         if (net_eq(net, &init_net)) {
675                 rcu_barrier(); /* Wait for call_rcu() before destroy */
676                 kmem_cache_destroy(nf_ct_expect_cachep);
677         }
678         nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
679 }