netfilter: conntrack: disable generic tracking for known protocols
[pandora-kernel.git] / net / netfilter / nf_conntrack_expect.c
1 /* Expectation handling for nf_conntrack. */
2
3 /* (C) 1999-2001 Paul `Rusty' Russell
4  * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5  * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11
12 #include <linux/types.h>
13 #include <linux/netfilter.h>
14 #include <linux/skbuff.h>
15 #include <linux/proc_fs.h>
16 #include <linux/seq_file.h>
17 #include <linux/stddef.h>
18 #include <linux/slab.h>
19 #include <linux/err.h>
20 #include <linux/percpu.h>
21 #include <linux/kernel.h>
22 #include <linux/jhash.h>
23 #include <linux/moduleparam.h>
24 #include <linux/export.h>
25 #include <net/net_namespace.h>
26
27 #include <net/netfilter/nf_conntrack.h>
28 #include <net/netfilter/nf_conntrack_core.h>
29 #include <net/netfilter/nf_conntrack_expect.h>
30 #include <net/netfilter/nf_conntrack_helper.h>
31 #include <net/netfilter/nf_conntrack_tuple.h>
32 #include <net/netfilter/nf_conntrack_zones.h>
33
34 unsigned int nf_ct_expect_hsize __read_mostly;
35 EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
36
37 unsigned int nf_ct_expect_max __read_mostly;
38
39 static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
40
41 static HLIST_HEAD(nf_ct_userspace_expect_list);
42
43 /* nf_conntrack_expect helper functions */
44 void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
45                                 u32 pid, int report)
46 {
47         struct nf_conn_help *master_help = nfct_help(exp->master);
48         struct net *net = nf_ct_exp_net(exp);
49
50         NF_CT_ASSERT(!timer_pending(&exp->timeout));
51
52         hlist_del_rcu(&exp->hnode);
53         net->ct.expect_count--;
54
55         hlist_del(&exp->lnode);
56         if (!(exp->flags & NF_CT_EXPECT_USERSPACE))
57                 master_help->expecting[exp->class]--;
58
59         nf_ct_expect_event_report(IPEXP_DESTROY, exp, pid, report);
60         nf_ct_expect_put(exp);
61
62         NF_CT_STAT_INC(net, expect_delete);
63 }
64 EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report);
65
66 static void nf_ct_expectation_timed_out(unsigned long ul_expect)
67 {
68         struct nf_conntrack_expect *exp = (void *)ul_expect;
69
70         spin_lock_bh(&nf_conntrack_lock);
71         nf_ct_unlink_expect(exp);
72         spin_unlock_bh(&nf_conntrack_lock);
73         nf_ct_expect_put(exp);
74 }
75
76 static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple)
77 {
78         unsigned int hash;
79
80         if (unlikely(!nf_conntrack_hash_rnd)) {
81                 init_nf_conntrack_hash_rnd();
82         }
83
84         hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
85                       (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
86                        (__force __u16)tuple->dst.u.all) ^ nf_conntrack_hash_rnd);
87         return ((u64)hash * nf_ct_expect_hsize) >> 32;
88 }
89
90 struct nf_conntrack_expect *
91 __nf_ct_expect_find(struct net *net, u16 zone,
92                     const struct nf_conntrack_tuple *tuple)
93 {
94         struct nf_conntrack_expect *i;
95         struct hlist_node *n;
96         unsigned int h;
97
98         if (!net->ct.expect_count)
99                 return NULL;
100
101         h = nf_ct_expect_dst_hash(tuple);
102         hlist_for_each_entry_rcu(i, n, &net->ct.expect_hash[h], hnode) {
103                 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
104                     nf_ct_zone(i->master) == zone)
105                         return i;
106         }
107         return NULL;
108 }
109 EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
110
111 /* Just find a expectation corresponding to a tuple. */
112 struct nf_conntrack_expect *
113 nf_ct_expect_find_get(struct net *net, u16 zone,
114                       const struct nf_conntrack_tuple *tuple)
115 {
116         struct nf_conntrack_expect *i;
117
118         rcu_read_lock();
119         i = __nf_ct_expect_find(net, zone, tuple);
120         if (i && !atomic_inc_not_zero(&i->use))
121                 i = NULL;
122         rcu_read_unlock();
123
124         return i;
125 }
126 EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
127
128 /* If an expectation for this connection is found, it gets delete from
129  * global list then returned. */
130 struct nf_conntrack_expect *
131 nf_ct_find_expectation(struct net *net, u16 zone,
132                        const struct nf_conntrack_tuple *tuple)
133 {
134         struct nf_conntrack_expect *i, *exp = NULL;
135         struct hlist_node *n;
136         unsigned int h;
137
138         if (!net->ct.expect_count)
139                 return NULL;
140
141         h = nf_ct_expect_dst_hash(tuple);
142         hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) {
143                 if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
144                     nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
145                     nf_ct_zone(i->master) == zone) {
146                         exp = i;
147                         break;
148                 }
149         }
150         if (!exp)
151                 return NULL;
152
153         /* If master is not in hash table yet (ie. packet hasn't left
154            this machine yet), how can other end know about expected?
155            Hence these are not the droids you are looking for (if
156            master ct never got confirmed, we'd hold a reference to it
157            and weird things would happen to future packets). */
158         if (!nf_ct_is_confirmed(exp->master))
159                 return NULL;
160
161         if (exp->flags & NF_CT_EXPECT_PERMANENT) {
162                 atomic_inc(&exp->use);
163                 return exp;
164         } else if (del_timer(&exp->timeout)) {
165                 nf_ct_unlink_expect(exp);
166                 return exp;
167         }
168
169         return NULL;
170 }
171
172 /* delete all expectations for this conntrack */
173 void nf_ct_remove_expectations(struct nf_conn *ct)
174 {
175         struct nf_conn_help *help = nfct_help(ct);
176         struct nf_conntrack_expect *exp;
177         struct hlist_node *n, *next;
178
179         /* Optimization: most connection never expect any others. */
180         if (!help)
181                 return;
182
183         hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) {
184                 if (del_timer(&exp->timeout)) {
185                         nf_ct_unlink_expect(exp);
186                         nf_ct_expect_put(exp);
187                 }
188         }
189 }
190 EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
191
192 /* Would two expected things clash? */
193 static inline int expect_clash(const struct nf_conntrack_expect *a,
194                                const struct nf_conntrack_expect *b)
195 {
196         /* Part covered by intersection of masks must be unequal,
197            otherwise they clash */
198         struct nf_conntrack_tuple_mask intersect_mask;
199         int count;
200
201         intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
202
203         for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
204                 intersect_mask.src.u3.all[count] =
205                         a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
206         }
207
208         return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
209 }
210
211 static inline int expect_matches(const struct nf_conntrack_expect *a,
212                                  const struct nf_conntrack_expect *b)
213 {
214         return a->master == b->master && a->class == b->class &&
215                 nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
216                 nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
217                 nf_ct_zone(a->master) == nf_ct_zone(b->master);
218 }
219
220 /* Generally a bad idea to call this: could have matched already. */
221 void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
222 {
223         spin_lock_bh(&nf_conntrack_lock);
224         if (del_timer(&exp->timeout)) {
225                 nf_ct_unlink_expect(exp);
226                 nf_ct_expect_put(exp);
227         }
228         spin_unlock_bh(&nf_conntrack_lock);
229 }
230 EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
231
232 /* We don't increase the master conntrack refcount for non-fulfilled
233  * conntracks. During the conntrack destruction, the expectations are
234  * always killed before the conntrack itself */
235 struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
236 {
237         struct nf_conntrack_expect *new;
238
239         new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
240         if (!new)
241                 return NULL;
242
243         new->master = me;
244         atomic_set(&new->use, 1);
245         return new;
246 }
247 EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
248
249 void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
250                        u_int8_t family,
251                        const union nf_inet_addr *saddr,
252                        const union nf_inet_addr *daddr,
253                        u_int8_t proto, const __be16 *src, const __be16 *dst)
254 {
255         int len;
256
257         if (family == AF_INET)
258                 len = 4;
259         else
260                 len = 16;
261
262         exp->flags = 0;
263         exp->class = class;
264         exp->expectfn = NULL;
265         exp->helper = NULL;
266         exp->tuple.src.l3num = family;
267         exp->tuple.dst.protonum = proto;
268
269         if (saddr) {
270                 memcpy(&exp->tuple.src.u3, saddr, len);
271                 if (sizeof(exp->tuple.src.u3) > len)
272                         /* address needs to be cleared for nf_ct_tuple_equal */
273                         memset((void *)&exp->tuple.src.u3 + len, 0x00,
274                                sizeof(exp->tuple.src.u3) - len);
275                 memset(&exp->mask.src.u3, 0xFF, len);
276                 if (sizeof(exp->mask.src.u3) > len)
277                         memset((void *)&exp->mask.src.u3 + len, 0x00,
278                                sizeof(exp->mask.src.u3) - len);
279         } else {
280                 memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
281                 memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
282         }
283
284         if (src) {
285                 exp->tuple.src.u.all = *src;
286                 exp->mask.src.u.all = htons(0xFFFF);
287         } else {
288                 exp->tuple.src.u.all = 0;
289                 exp->mask.src.u.all = 0;
290         }
291
292         memcpy(&exp->tuple.dst.u3, daddr, len);
293         if (sizeof(exp->tuple.dst.u3) > len)
294                 /* address needs to be cleared for nf_ct_tuple_equal */
295                 memset((void *)&exp->tuple.dst.u3 + len, 0x00,
296                        sizeof(exp->tuple.dst.u3) - len);
297
298         exp->tuple.dst.u.all = *dst;
299 }
300 EXPORT_SYMBOL_GPL(nf_ct_expect_init);
301
302 static void nf_ct_expect_free_rcu(struct rcu_head *head)
303 {
304         struct nf_conntrack_expect *exp;
305
306         exp = container_of(head, struct nf_conntrack_expect, rcu);
307         kmem_cache_free(nf_ct_expect_cachep, exp);
308 }
309
310 void nf_ct_expect_put(struct nf_conntrack_expect *exp)
311 {
312         if (atomic_dec_and_test(&exp->use))
313                 call_rcu(&exp->rcu, nf_ct_expect_free_rcu);
314 }
315 EXPORT_SYMBOL_GPL(nf_ct_expect_put);
316
317 static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
318 {
319         struct nf_conn_help *master_help = nfct_help(exp->master);
320         struct net *net = nf_ct_exp_net(exp);
321         const struct nf_conntrack_expect_policy *p;
322         unsigned int h = nf_ct_expect_dst_hash(&exp->tuple);
323
324         /* two references : one for hash insert, one for the timer */
325         atomic_add(2, &exp->use);
326
327         if (master_help) {
328                 hlist_add_head(&exp->lnode, &master_help->expectations);
329                 master_help->expecting[exp->class]++;
330         } else if (exp->flags & NF_CT_EXPECT_USERSPACE)
331                 hlist_add_head(&exp->lnode, &nf_ct_userspace_expect_list);
332
333         hlist_add_head_rcu(&exp->hnode, &net->ct.expect_hash[h]);
334         net->ct.expect_count++;
335
336         setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
337                     (unsigned long)exp);
338         if (master_help) {
339                 p = &rcu_dereference_protected(
340                                 master_help->helper,
341                                 lockdep_is_held(&nf_conntrack_lock)
342                                 )->expect_policy[exp->class];
343                 exp->timeout.expires = jiffies + p->timeout * HZ;
344         }
345         add_timer(&exp->timeout);
346
347         NF_CT_STAT_INC(net, expect_create);
348 }
349
350 /* Race with expectations being used means we could have none to find; OK. */
351 static void evict_oldest_expect(struct nf_conn *master,
352                                 struct nf_conntrack_expect *new)
353 {
354         struct nf_conn_help *master_help = nfct_help(master);
355         struct nf_conntrack_expect *exp, *last = NULL;
356         struct hlist_node *n;
357
358         hlist_for_each_entry(exp, n, &master_help->expectations, lnode) {
359                 if (exp->class == new->class)
360                         last = exp;
361         }
362
363         if (last && del_timer(&last->timeout)) {
364                 nf_ct_unlink_expect(last);
365                 nf_ct_expect_put(last);
366         }
367 }
368
369 static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
370 {
371         const struct nf_conntrack_expect_policy *p;
372         struct nf_conntrack_expect *i;
373         struct nf_conn *master = expect->master;
374         struct nf_conn_help *master_help = nfct_help(master);
375         struct net *net = nf_ct_exp_net(expect);
376         struct hlist_node *n, *next;
377         unsigned int h;
378         int ret = 1;
379
380         /* Don't allow expectations created from kernel-space with no helper */
381         if (!(expect->flags & NF_CT_EXPECT_USERSPACE) &&
382             (!master_help || (master_help && !master_help->helper))) {
383                 ret = -ESHUTDOWN;
384                 goto out;
385         }
386         h = nf_ct_expect_dst_hash(&expect->tuple);
387         hlist_for_each_entry_safe(i, n, next, &net->ct.expect_hash[h], hnode) {
388                 if (expect_matches(i, expect)) {
389                         if (del_timer(&i->timeout)) {
390                                 nf_ct_unlink_expect(i);
391                                 nf_ct_expect_put(i);
392                                 break;
393                         }
394                 } else if (expect_clash(i, expect)) {
395                         ret = -EBUSY;
396                         goto out;
397                 }
398         }
399         /* Will be over limit? */
400         if (master_help) {
401                 p = &rcu_dereference_protected(
402                         master_help->helper,
403                         lockdep_is_held(&nf_conntrack_lock)
404                         )->expect_policy[expect->class];
405                 if (p->max_expected &&
406                     master_help->expecting[expect->class] >= p->max_expected) {
407                         evict_oldest_expect(master, expect);
408                         if (master_help->expecting[expect->class]
409                                                 >= p->max_expected) {
410                                 ret = -EMFILE;
411                                 goto out;
412                         }
413                 }
414         }
415
416         if (net->ct.expect_count >= nf_ct_expect_max) {
417                 if (net_ratelimit())
418                         printk(KERN_WARNING
419                                "nf_conntrack: expectation table full\n");
420                 ret = -EMFILE;
421         }
422 out:
423         return ret;
424 }
425
426 int nf_ct_expect_related_report(struct nf_conntrack_expect *expect, 
427                                 u32 pid, int report)
428 {
429         int ret;
430
431         spin_lock_bh(&nf_conntrack_lock);
432         ret = __nf_ct_expect_check(expect);
433         if (ret <= 0)
434                 goto out;
435
436         ret = 0;
437         nf_ct_expect_insert(expect);
438         spin_unlock_bh(&nf_conntrack_lock);
439         nf_ct_expect_event_report(IPEXP_NEW, expect, pid, report);
440         return ret;
441 out:
442         spin_unlock_bh(&nf_conntrack_lock);
443         return ret;
444 }
445 EXPORT_SYMBOL_GPL(nf_ct_expect_related_report);
446
447 void nf_ct_remove_userspace_expectations(void)
448 {
449         struct nf_conntrack_expect *exp;
450         struct hlist_node *n, *next;
451
452         hlist_for_each_entry_safe(exp, n, next,
453                                   &nf_ct_userspace_expect_list, lnode) {
454                 if (del_timer(&exp->timeout)) {
455                         nf_ct_unlink_expect(exp);
456                         nf_ct_expect_put(exp);
457                 }
458         }
459 }
460 EXPORT_SYMBOL_GPL(nf_ct_remove_userspace_expectations);
461
462 #ifdef CONFIG_PROC_FS
463 struct ct_expect_iter_state {
464         struct seq_net_private p;
465         unsigned int bucket;
466 };
467
468 static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
469 {
470         struct net *net = seq_file_net(seq);
471         struct ct_expect_iter_state *st = seq->private;
472         struct hlist_node *n;
473
474         for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
475                 n = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
476                 if (n)
477                         return n;
478         }
479         return NULL;
480 }
481
482 static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
483                                              struct hlist_node *head)
484 {
485         struct net *net = seq_file_net(seq);
486         struct ct_expect_iter_state *st = seq->private;
487
488         head = rcu_dereference(hlist_next_rcu(head));
489         while (head == NULL) {
490                 if (++st->bucket >= nf_ct_expect_hsize)
491                         return NULL;
492                 head = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
493         }
494         return head;
495 }
496
497 static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
498 {
499         struct hlist_node *head = ct_expect_get_first(seq);
500
501         if (head)
502                 while (pos && (head = ct_expect_get_next(seq, head)))
503                         pos--;
504         return pos ? NULL : head;
505 }
506
507 static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
508         __acquires(RCU)
509 {
510         rcu_read_lock();
511         return ct_expect_get_idx(seq, *pos);
512 }
513
514 static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
515 {
516         (*pos)++;
517         return ct_expect_get_next(seq, v);
518 }
519
520 static void exp_seq_stop(struct seq_file *seq, void *v)
521         __releases(RCU)
522 {
523         rcu_read_unlock();
524 }
525
526 static int exp_seq_show(struct seq_file *s, void *v)
527 {
528         struct nf_conntrack_expect *expect;
529         struct nf_conntrack_helper *helper;
530         struct hlist_node *n = v;
531         char *delim = "";
532
533         expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
534
535         if (expect->timeout.function)
536                 seq_printf(s, "%ld ", timer_pending(&expect->timeout)
537                            ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
538         else
539                 seq_printf(s, "- ");
540         seq_printf(s, "l3proto = %u proto=%u ",
541                    expect->tuple.src.l3num,
542                    expect->tuple.dst.protonum);
543         print_tuple(s, &expect->tuple,
544                     __nf_ct_l3proto_find(expect->tuple.src.l3num),
545                     __nf_ct_l4proto_find(expect->tuple.src.l3num,
546                                        expect->tuple.dst.protonum));
547
548         if (expect->flags & NF_CT_EXPECT_PERMANENT) {
549                 seq_printf(s, "PERMANENT");
550                 delim = ",";
551         }
552         if (expect->flags & NF_CT_EXPECT_INACTIVE) {
553                 seq_printf(s, "%sINACTIVE", delim);
554                 delim = ",";
555         }
556         if (expect->flags & NF_CT_EXPECT_USERSPACE)
557                 seq_printf(s, "%sUSERSPACE", delim);
558
559         helper = rcu_dereference(nfct_help(expect->master)->helper);
560         if (helper) {
561                 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
562                 if (helper->expect_policy[expect->class].name)
563                         seq_printf(s, "/%s",
564                                    helper->expect_policy[expect->class].name);
565         }
566
567         return seq_putc(s, '\n');
568 }
569
570 static const struct seq_operations exp_seq_ops = {
571         .start = exp_seq_start,
572         .next = exp_seq_next,
573         .stop = exp_seq_stop,
574         .show = exp_seq_show
575 };
576
577 static int exp_open(struct inode *inode, struct file *file)
578 {
579         return seq_open_net(inode, file, &exp_seq_ops,
580                         sizeof(struct ct_expect_iter_state));
581 }
582
583 static const struct file_operations exp_file_ops = {
584         .owner   = THIS_MODULE,
585         .open    = exp_open,
586         .read    = seq_read,
587         .llseek  = seq_lseek,
588         .release = seq_release_net,
589 };
590 #endif /* CONFIG_PROC_FS */
591
592 static int exp_proc_init(struct net *net)
593 {
594 #ifdef CONFIG_PROC_FS
595         struct proc_dir_entry *proc;
596
597         proc = proc_net_fops_create(net, "nf_conntrack_expect", 0440, &exp_file_ops);
598         if (!proc)
599                 return -ENOMEM;
600 #endif /* CONFIG_PROC_FS */
601         return 0;
602 }
603
604 static void exp_proc_remove(struct net *net)
605 {
606 #ifdef CONFIG_PROC_FS
607         proc_net_remove(net, "nf_conntrack_expect");
608 #endif /* CONFIG_PROC_FS */
609 }
610
611 module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
612
613 int nf_conntrack_expect_init(struct net *net)
614 {
615         int err = -ENOMEM;
616
617         if (net_eq(net, &init_net)) {
618                 if (!nf_ct_expect_hsize) {
619                         nf_ct_expect_hsize = net->ct.htable_size / 256;
620                         if (!nf_ct_expect_hsize)
621                                 nf_ct_expect_hsize = 1;
622                 }
623                 nf_ct_expect_max = nf_ct_expect_hsize * 4;
624         }
625
626         net->ct.expect_count = 0;
627         net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
628         if (net->ct.expect_hash == NULL)
629                 goto err1;
630
631         if (net_eq(net, &init_net)) {
632                 nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
633                                         sizeof(struct nf_conntrack_expect),
634                                         0, 0, NULL);
635                 if (!nf_ct_expect_cachep)
636                         goto err2;
637         }
638
639         err = exp_proc_init(net);
640         if (err < 0)
641                 goto err3;
642
643         return 0;
644
645 err3:
646         if (net_eq(net, &init_net))
647                 kmem_cache_destroy(nf_ct_expect_cachep);
648 err2:
649         nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
650 err1:
651         return err;
652 }
653
654 void nf_conntrack_expect_fini(struct net *net)
655 {
656         exp_proc_remove(net);
657         if (net_eq(net, &init_net)) {
658                 rcu_barrier(); /* Wait for call_rcu() before destroy */
659                 kmem_cache_destroy(nf_ct_expect_cachep);
660         }
661         nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
662 }