netfilter: xt_TCPMSS: add more sanity tests on tcph->doff
[pandora-kernel.git] / net / netfilter / nf_conntrack_expect.c
1 /* Expectation handling for nf_conntrack. */
2
3 /* (C) 1999-2001 Paul `Rusty' Russell
4  * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5  * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11
12 #include <linux/types.h>
13 #include <linux/netfilter.h>
14 #include <linux/skbuff.h>
15 #include <linux/proc_fs.h>
16 #include <linux/seq_file.h>
17 #include <linux/stddef.h>
18 #include <linux/slab.h>
19 #include <linux/err.h>
20 #include <linux/percpu.h>
21 #include <linux/kernel.h>
22 #include <linux/jhash.h>
23 #include <linux/moduleparam.h>
24 #include <linux/export.h>
25 #include <net/net_namespace.h>
26
27 #include <net/netfilter/nf_conntrack.h>
28 #include <net/netfilter/nf_conntrack_core.h>
29 #include <net/netfilter/nf_conntrack_expect.h>
30 #include <net/netfilter/nf_conntrack_helper.h>
31 #include <net/netfilter/nf_conntrack_tuple.h>
32 #include <net/netfilter/nf_conntrack_zones.h>
33
34 unsigned int nf_ct_expect_hsize __read_mostly;
35 EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
36
37 unsigned int nf_ct_expect_max __read_mostly;
38
39 static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
40
41 static HLIST_HEAD(nf_ct_userspace_expect_list);
42
43 /* nf_conntrack_expect helper functions */
44 void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
45                                 u32 pid, int report)
46 {
47         struct nf_conn_help *master_help = nfct_help(exp->master);
48         struct net *net = nf_ct_exp_net(exp);
49
50         NF_CT_ASSERT(!timer_pending(&exp->timeout));
51
52         hlist_del_rcu(&exp->hnode);
53         net->ct.expect_count--;
54
55         hlist_del(&exp->lnode);
56         if (!(exp->flags & NF_CT_EXPECT_USERSPACE))
57                 master_help->expecting[exp->class]--;
58
59         nf_ct_expect_event_report(IPEXP_DESTROY, exp, pid, report);
60         nf_ct_expect_put(exp);
61
62         NF_CT_STAT_INC(net, expect_delete);
63 }
64 EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report);
65
66 static void nf_ct_expectation_timed_out(unsigned long ul_expect)
67 {
68         struct nf_conntrack_expect *exp = (void *)ul_expect;
69
70         spin_lock_bh(&nf_conntrack_lock);
71         nf_ct_unlink_expect(exp);
72         spin_unlock_bh(&nf_conntrack_lock);
73         nf_ct_expect_put(exp);
74 }
75
76 static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple)
77 {
78         unsigned int hash;
79
80         if (unlikely(!nf_conntrack_hash_rnd)) {
81                 init_nf_conntrack_hash_rnd();
82         }
83
84         hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
85                       (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
86                        (__force __u16)tuple->dst.u.all) ^ nf_conntrack_hash_rnd);
87         return ((u64)hash * nf_ct_expect_hsize) >> 32;
88 }
89
90 struct nf_conntrack_expect *
91 __nf_ct_expect_find(struct net *net, u16 zone,
92                     const struct nf_conntrack_tuple *tuple)
93 {
94         struct nf_conntrack_expect *i;
95         struct hlist_node *n;
96         unsigned int h;
97
98         if (!net->ct.expect_count)
99                 return NULL;
100
101         h = nf_ct_expect_dst_hash(tuple);
102         hlist_for_each_entry_rcu(i, n, &net->ct.expect_hash[h], hnode) {
103                 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
104                     nf_ct_zone(i->master) == zone)
105                         return i;
106         }
107         return NULL;
108 }
109 EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
110
111 /* Just find a expectation corresponding to a tuple. */
112 struct nf_conntrack_expect *
113 nf_ct_expect_find_get(struct net *net, u16 zone,
114                       const struct nf_conntrack_tuple *tuple)
115 {
116         struct nf_conntrack_expect *i;
117
118         rcu_read_lock();
119         i = __nf_ct_expect_find(net, zone, tuple);
120         if (i && !atomic_inc_not_zero(&i->use))
121                 i = NULL;
122         rcu_read_unlock();
123
124         return i;
125 }
126 EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
127
128 /* If an expectation for this connection is found, it gets delete from
129  * global list then returned. */
130 struct nf_conntrack_expect *
131 nf_ct_find_expectation(struct net *net, u16 zone,
132                        const struct nf_conntrack_tuple *tuple)
133 {
134         struct nf_conntrack_expect *i, *exp = NULL;
135         struct hlist_node *n;
136         unsigned int h;
137
138         if (!net->ct.expect_count)
139                 return NULL;
140
141         h = nf_ct_expect_dst_hash(tuple);
142         hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) {
143                 if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
144                     nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
145                     nf_ct_zone(i->master) == zone) {
146                         exp = i;
147                         break;
148                 }
149         }
150         if (!exp)
151                 return NULL;
152
153         /* If master is not in hash table yet (ie. packet hasn't left
154            this machine yet), how can other end know about expected?
155            Hence these are not the droids you are looking for (if
156            master ct never got confirmed, we'd hold a reference to it
157            and weird things would happen to future packets). */
158         if (!nf_ct_is_confirmed(exp->master))
159                 return NULL;
160
161         if (exp->flags & NF_CT_EXPECT_PERMANENT) {
162                 atomic_inc(&exp->use);
163                 return exp;
164         } else if (del_timer(&exp->timeout)) {
165                 nf_ct_unlink_expect(exp);
166                 return exp;
167         }
168
169         return NULL;
170 }
171
172 /* delete all expectations for this conntrack */
173 void nf_ct_remove_expectations(struct nf_conn *ct)
174 {
175         struct nf_conn_help *help = nfct_help(ct);
176         struct nf_conntrack_expect *exp;
177         struct hlist_node *n, *next;
178
179         /* Optimization: most connection never expect any others. */
180         if (!help)
181                 return;
182
183         hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) {
184                 if (del_timer(&exp->timeout)) {
185                         nf_ct_unlink_expect(exp);
186                         nf_ct_expect_put(exp);
187                 }
188         }
189 }
190 EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
191
192 /* Would two expected things clash? */
193 static inline int expect_clash(const struct nf_conntrack_expect *a,
194                                const struct nf_conntrack_expect *b)
195 {
196         /* Part covered by intersection of masks must be unequal,
197            otherwise they clash */
198         struct nf_conntrack_tuple_mask intersect_mask;
199         int count;
200
201         intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
202
203         for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
204                 intersect_mask.src.u3.all[count] =
205                         a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
206         }
207
208         return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
209                nf_ct_zone(a->master) == nf_ct_zone(b->master);
210 }
211
212 static inline int expect_matches(const struct nf_conntrack_expect *a,
213                                  const struct nf_conntrack_expect *b)
214 {
215         return a->master == b->master && a->class == b->class &&
216                 nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
217                 nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
218                 nf_ct_zone(a->master) == nf_ct_zone(b->master);
219 }
220
221 /* Generally a bad idea to call this: could have matched already. */
222 void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
223 {
224         spin_lock_bh(&nf_conntrack_lock);
225         if (del_timer(&exp->timeout)) {
226                 nf_ct_unlink_expect(exp);
227                 nf_ct_expect_put(exp);
228         }
229         spin_unlock_bh(&nf_conntrack_lock);
230 }
231 EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
232
233 /* We don't increase the master conntrack refcount for non-fulfilled
234  * conntracks. During the conntrack destruction, the expectations are
235  * always killed before the conntrack itself */
236 struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
237 {
238         struct nf_conntrack_expect *new;
239
240         new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
241         if (!new)
242                 return NULL;
243
244         new->master = me;
245         atomic_set(&new->use, 1);
246         return new;
247 }
248 EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
249
250 void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
251                        u_int8_t family,
252                        const union nf_inet_addr *saddr,
253                        const union nf_inet_addr *daddr,
254                        u_int8_t proto, const __be16 *src, const __be16 *dst)
255 {
256         int len;
257
258         if (family == AF_INET)
259                 len = 4;
260         else
261                 len = 16;
262
263         exp->flags = 0;
264         exp->class = class;
265         exp->expectfn = NULL;
266         exp->helper = NULL;
267         exp->tuple.src.l3num = family;
268         exp->tuple.dst.protonum = proto;
269
270         if (saddr) {
271                 memcpy(&exp->tuple.src.u3, saddr, len);
272                 if (sizeof(exp->tuple.src.u3) > len)
273                         /* address needs to be cleared for nf_ct_tuple_equal */
274                         memset((void *)&exp->tuple.src.u3 + len, 0x00,
275                                sizeof(exp->tuple.src.u3) - len);
276                 memset(&exp->mask.src.u3, 0xFF, len);
277                 if (sizeof(exp->mask.src.u3) > len)
278                         memset((void *)&exp->mask.src.u3 + len, 0x00,
279                                sizeof(exp->mask.src.u3) - len);
280         } else {
281                 memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
282                 memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
283         }
284
285         if (src) {
286                 exp->tuple.src.u.all = *src;
287                 exp->mask.src.u.all = htons(0xFFFF);
288         } else {
289                 exp->tuple.src.u.all = 0;
290                 exp->mask.src.u.all = 0;
291         }
292
293         memcpy(&exp->tuple.dst.u3, daddr, len);
294         if (sizeof(exp->tuple.dst.u3) > len)
295                 /* address needs to be cleared for nf_ct_tuple_equal */
296                 memset((void *)&exp->tuple.dst.u3 + len, 0x00,
297                        sizeof(exp->tuple.dst.u3) - len);
298
299         exp->tuple.dst.u.all = *dst;
300 }
301 EXPORT_SYMBOL_GPL(nf_ct_expect_init);
302
303 static void nf_ct_expect_free_rcu(struct rcu_head *head)
304 {
305         struct nf_conntrack_expect *exp;
306
307         exp = container_of(head, struct nf_conntrack_expect, rcu);
308         kmem_cache_free(nf_ct_expect_cachep, exp);
309 }
310
311 void nf_ct_expect_put(struct nf_conntrack_expect *exp)
312 {
313         if (atomic_dec_and_test(&exp->use))
314                 call_rcu(&exp->rcu, nf_ct_expect_free_rcu);
315 }
316 EXPORT_SYMBOL_GPL(nf_ct_expect_put);
317
318 static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
319 {
320         struct nf_conn_help *master_help = nfct_help(exp->master);
321         struct net *net = nf_ct_exp_net(exp);
322         const struct nf_conntrack_expect_policy *p;
323         unsigned int h = nf_ct_expect_dst_hash(&exp->tuple);
324
325         /* two references : one for hash insert, one for the timer */
326         atomic_add(2, &exp->use);
327
328         if (master_help) {
329                 hlist_add_head(&exp->lnode, &master_help->expectations);
330                 master_help->expecting[exp->class]++;
331         } else if (exp->flags & NF_CT_EXPECT_USERSPACE)
332                 hlist_add_head(&exp->lnode, &nf_ct_userspace_expect_list);
333
334         hlist_add_head_rcu(&exp->hnode, &net->ct.expect_hash[h]);
335         net->ct.expect_count++;
336
337         setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
338                     (unsigned long)exp);
339         if (master_help) {
340                 p = &rcu_dereference_protected(
341                                 master_help->helper,
342                                 lockdep_is_held(&nf_conntrack_lock)
343                                 )->expect_policy[exp->class];
344                 exp->timeout.expires = jiffies + p->timeout * HZ;
345         }
346         add_timer(&exp->timeout);
347
348         NF_CT_STAT_INC(net, expect_create);
349 }
350
351 /* Race with expectations being used means we could have none to find; OK. */
352 static void evict_oldest_expect(struct nf_conn *master,
353                                 struct nf_conntrack_expect *new)
354 {
355         struct nf_conn_help *master_help = nfct_help(master);
356         struct nf_conntrack_expect *exp, *last = NULL;
357         struct hlist_node *n;
358
359         hlist_for_each_entry(exp, n, &master_help->expectations, lnode) {
360                 if (exp->class == new->class)
361                         last = exp;
362         }
363
364         if (last && del_timer(&last->timeout)) {
365                 nf_ct_unlink_expect(last);
366                 nf_ct_expect_put(last);
367         }
368 }
369
370 static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
371 {
372         const struct nf_conntrack_expect_policy *p;
373         struct nf_conntrack_expect *i;
374         struct nf_conn *master = expect->master;
375         struct nf_conn_help *master_help = nfct_help(master);
376         struct net *net = nf_ct_exp_net(expect);
377         struct hlist_node *n, *next;
378         unsigned int h;
379         int ret = 1;
380
381         /* Don't allow expectations created from kernel-space with no helper */
382         if (!(expect->flags & NF_CT_EXPECT_USERSPACE) &&
383             (!master_help || (master_help && !master_help->helper))) {
384                 ret = -ESHUTDOWN;
385                 goto out;
386         }
387         h = nf_ct_expect_dst_hash(&expect->tuple);
388         hlist_for_each_entry_safe(i, n, next, &net->ct.expect_hash[h], hnode) {
389                 if (expect_matches(i, expect)) {
390                         if (del_timer(&i->timeout)) {
391                                 nf_ct_unlink_expect(i);
392                                 nf_ct_expect_put(i);
393                                 break;
394                         }
395                 } else if (expect_clash(i, expect)) {
396                         ret = -EBUSY;
397                         goto out;
398                 }
399         }
400         /* Will be over limit? */
401         if (master_help) {
402                 p = &rcu_dereference_protected(
403                         master_help->helper,
404                         lockdep_is_held(&nf_conntrack_lock)
405                         )->expect_policy[expect->class];
406                 if (p->max_expected &&
407                     master_help->expecting[expect->class] >= p->max_expected) {
408                         evict_oldest_expect(master, expect);
409                         if (master_help->expecting[expect->class]
410                                                 >= p->max_expected) {
411                                 ret = -EMFILE;
412                                 goto out;
413                         }
414                 }
415         }
416
417         if (net->ct.expect_count >= nf_ct_expect_max) {
418                 if (net_ratelimit())
419                         printk(KERN_WARNING
420                                "nf_conntrack: expectation table full\n");
421                 ret = -EMFILE;
422         }
423 out:
424         return ret;
425 }
426
427 int nf_ct_expect_related_report(struct nf_conntrack_expect *expect, 
428                                 u32 pid, int report)
429 {
430         int ret;
431
432         spin_lock_bh(&nf_conntrack_lock);
433         ret = __nf_ct_expect_check(expect);
434         if (ret <= 0)
435                 goto out;
436
437         ret = 0;
438         nf_ct_expect_insert(expect);
439         spin_unlock_bh(&nf_conntrack_lock);
440         nf_ct_expect_event_report(IPEXP_NEW, expect, pid, report);
441         return ret;
442 out:
443         spin_unlock_bh(&nf_conntrack_lock);
444         return ret;
445 }
446 EXPORT_SYMBOL_GPL(nf_ct_expect_related_report);
447
448 void nf_ct_remove_userspace_expectations(void)
449 {
450         struct nf_conntrack_expect *exp;
451         struct hlist_node *n, *next;
452
453         hlist_for_each_entry_safe(exp, n, next,
454                                   &nf_ct_userspace_expect_list, lnode) {
455                 if (del_timer(&exp->timeout)) {
456                         nf_ct_unlink_expect(exp);
457                         nf_ct_expect_put(exp);
458                 }
459         }
460 }
461 EXPORT_SYMBOL_GPL(nf_ct_remove_userspace_expectations);
462
463 #ifdef CONFIG_PROC_FS
464 struct ct_expect_iter_state {
465         struct seq_net_private p;
466         unsigned int bucket;
467 };
468
469 static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
470 {
471         struct net *net = seq_file_net(seq);
472         struct ct_expect_iter_state *st = seq->private;
473         struct hlist_node *n;
474
475         for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
476                 n = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
477                 if (n)
478                         return n;
479         }
480         return NULL;
481 }
482
483 static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
484                                              struct hlist_node *head)
485 {
486         struct net *net = seq_file_net(seq);
487         struct ct_expect_iter_state *st = seq->private;
488
489         head = rcu_dereference(hlist_next_rcu(head));
490         while (head == NULL) {
491                 if (++st->bucket >= nf_ct_expect_hsize)
492                         return NULL;
493                 head = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
494         }
495         return head;
496 }
497
498 static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
499 {
500         struct hlist_node *head = ct_expect_get_first(seq);
501
502         if (head)
503                 while (pos && (head = ct_expect_get_next(seq, head)))
504                         pos--;
505         return pos ? NULL : head;
506 }
507
508 static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
509         __acquires(RCU)
510 {
511         rcu_read_lock();
512         return ct_expect_get_idx(seq, *pos);
513 }
514
515 static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
516 {
517         (*pos)++;
518         return ct_expect_get_next(seq, v);
519 }
520
521 static void exp_seq_stop(struct seq_file *seq, void *v)
522         __releases(RCU)
523 {
524         rcu_read_unlock();
525 }
526
527 static int exp_seq_show(struct seq_file *s, void *v)
528 {
529         struct nf_conntrack_expect *expect;
530         struct nf_conntrack_helper *helper;
531         struct hlist_node *n = v;
532         char *delim = "";
533
534         expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
535
536         if (expect->timeout.function)
537                 seq_printf(s, "%ld ", timer_pending(&expect->timeout)
538                            ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
539         else
540                 seq_printf(s, "- ");
541         seq_printf(s, "l3proto = %u proto=%u ",
542                    expect->tuple.src.l3num,
543                    expect->tuple.dst.protonum);
544         print_tuple(s, &expect->tuple,
545                     __nf_ct_l3proto_find(expect->tuple.src.l3num),
546                     __nf_ct_l4proto_find(expect->tuple.src.l3num,
547                                        expect->tuple.dst.protonum));
548
549         if (expect->flags & NF_CT_EXPECT_PERMANENT) {
550                 seq_printf(s, "PERMANENT");
551                 delim = ",";
552         }
553         if (expect->flags & NF_CT_EXPECT_INACTIVE) {
554                 seq_printf(s, "%sINACTIVE", delim);
555                 delim = ",";
556         }
557         if (expect->flags & NF_CT_EXPECT_USERSPACE)
558                 seq_printf(s, "%sUSERSPACE", delim);
559
560         helper = rcu_dereference(nfct_help(expect->master)->helper);
561         if (helper) {
562                 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
563                 if (helper->expect_policy[expect->class].name)
564                         seq_printf(s, "/%s",
565                                    helper->expect_policy[expect->class].name);
566         }
567
568         return seq_putc(s, '\n');
569 }
570
571 static const struct seq_operations exp_seq_ops = {
572         .start = exp_seq_start,
573         .next = exp_seq_next,
574         .stop = exp_seq_stop,
575         .show = exp_seq_show
576 };
577
578 static int exp_open(struct inode *inode, struct file *file)
579 {
580         return seq_open_net(inode, file, &exp_seq_ops,
581                         sizeof(struct ct_expect_iter_state));
582 }
583
584 static const struct file_operations exp_file_ops = {
585         .owner   = THIS_MODULE,
586         .open    = exp_open,
587         .read    = seq_read,
588         .llseek  = seq_lseek,
589         .release = seq_release_net,
590 };
591 #endif /* CONFIG_PROC_FS */
592
593 static int exp_proc_init(struct net *net)
594 {
595 #ifdef CONFIG_PROC_FS
596         struct proc_dir_entry *proc;
597
598         proc = proc_net_fops_create(net, "nf_conntrack_expect", 0440, &exp_file_ops);
599         if (!proc)
600                 return -ENOMEM;
601 #endif /* CONFIG_PROC_FS */
602         return 0;
603 }
604
605 static void exp_proc_remove(struct net *net)
606 {
607 #ifdef CONFIG_PROC_FS
608         proc_net_remove(net, "nf_conntrack_expect");
609 #endif /* CONFIG_PROC_FS */
610 }
611
612 module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
613
614 int nf_conntrack_expect_init(struct net *net)
615 {
616         int err = -ENOMEM;
617
618         if (net_eq(net, &init_net)) {
619                 if (!nf_ct_expect_hsize) {
620                         nf_ct_expect_hsize = net->ct.htable_size / 256;
621                         if (!nf_ct_expect_hsize)
622                                 nf_ct_expect_hsize = 1;
623                 }
624                 nf_ct_expect_max = nf_ct_expect_hsize * 4;
625         }
626
627         net->ct.expect_count = 0;
628         net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
629         if (net->ct.expect_hash == NULL)
630                 goto err1;
631
632         if (net_eq(net, &init_net)) {
633                 nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
634                                         sizeof(struct nf_conntrack_expect),
635                                         0, 0, NULL);
636                 if (!nf_ct_expect_cachep)
637                         goto err2;
638         }
639
640         err = exp_proc_init(net);
641         if (err < 0)
642                 goto err3;
643
644         return 0;
645
646 err3:
647         if (net_eq(net, &init_net))
648                 kmem_cache_destroy(nf_ct_expect_cachep);
649 err2:
650         nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
651 err1:
652         return err;
653 }
654
655 void nf_conntrack_expect_fini(struct net *net)
656 {
657         exp_proc_remove(net);
658         if (net_eq(net, &init_net)) {
659                 rcu_barrier(); /* Wait for call_rcu() before destroy */
660                 kmem_cache_destroy(nf_ct_expect_cachep);
661         }
662         nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
663 }