Merge branch 'for-2.6.37' of git://linux-nfs.org/~bfields/linux
[pandora-kernel.git] / net / netfilter / nf_conntrack_core.c
1 /* Connection state tracking for netfilter.  This is separated from,
2    but required by, the NAT layer; it can also be used by an iptables
3    extension. */
4
5 /* (C) 1999-2001 Paul `Rusty' Russell
6  * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
7  * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13
14 #include <linux/types.h>
15 #include <linux/netfilter.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/skbuff.h>
19 #include <linux/proc_fs.h>
20 #include <linux/vmalloc.h>
21 #include <linux/stddef.h>
22 #include <linux/slab.h>
23 #include <linux/random.h>
24 #include <linux/jhash.h>
25 #include <linux/err.h>
26 #include <linux/percpu.h>
27 #include <linux/moduleparam.h>
28 #include <linux/notifier.h>
29 #include <linux/kernel.h>
30 #include <linux/netdevice.h>
31 #include <linux/socket.h>
32 #include <linux/mm.h>
33 #include <linux/nsproxy.h>
34 #include <linux/rculist_nulls.h>
35
36 #include <net/netfilter/nf_conntrack.h>
37 #include <net/netfilter/nf_conntrack_l3proto.h>
38 #include <net/netfilter/nf_conntrack_l4proto.h>
39 #include <net/netfilter/nf_conntrack_expect.h>
40 #include <net/netfilter/nf_conntrack_helper.h>
41 #include <net/netfilter/nf_conntrack_core.h>
42 #include <net/netfilter/nf_conntrack_extend.h>
43 #include <net/netfilter/nf_conntrack_acct.h>
44 #include <net/netfilter/nf_conntrack_ecache.h>
45 #include <net/netfilter/nf_conntrack_zones.h>
46 #include <net/netfilter/nf_nat.h>
47 #include <net/netfilter/nf_nat_core.h>
48
49 #define NF_CONNTRACK_VERSION    "0.5.0"
50
51 int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct,
52                                       enum nf_nat_manip_type manip,
53                                       const struct nlattr *attr) __read_mostly;
54 EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook);
55
56 DEFINE_SPINLOCK(nf_conntrack_lock);
57 EXPORT_SYMBOL_GPL(nf_conntrack_lock);
58
59 unsigned int nf_conntrack_htable_size __read_mostly;
60 EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
61
62 unsigned int nf_conntrack_max __read_mostly;
63 EXPORT_SYMBOL_GPL(nf_conntrack_max);
64
65 DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
66 EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
67
68 static unsigned int nf_conntrack_hash_rnd __read_mostly;
69
70 static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone)
71 {
72         unsigned int n;
73
74         /* The direction must be ignored, so we hash everything up to the
75          * destination ports (which is a multiple of 4) and treat the last
76          * three bytes manually.
77          */
78         n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
79         return jhash2((u32 *)tuple, n, zone ^ nf_conntrack_hash_rnd ^
80                       (((__force __u16)tuple->dst.u.all << 16) |
81                       tuple->dst.protonum));
82 }
83
84 static u32 __hash_bucket(u32 hash, unsigned int size)
85 {
86         return ((u64)hash * size) >> 32;
87 }
88
89 static u32 hash_bucket(u32 hash, const struct net *net)
90 {
91         return __hash_bucket(hash, net->ct.htable_size);
92 }
93
94 static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
95                                   u16 zone, unsigned int size)
96 {
97         return __hash_bucket(hash_conntrack_raw(tuple, zone), size);
98 }
99
100 static inline u_int32_t hash_conntrack(const struct net *net, u16 zone,
101                                        const struct nf_conntrack_tuple *tuple)
102 {
103         return __hash_conntrack(tuple, zone, net->ct.htable_size);
104 }
105
106 bool
107 nf_ct_get_tuple(const struct sk_buff *skb,
108                 unsigned int nhoff,
109                 unsigned int dataoff,
110                 u_int16_t l3num,
111                 u_int8_t protonum,
112                 struct nf_conntrack_tuple *tuple,
113                 const struct nf_conntrack_l3proto *l3proto,
114                 const struct nf_conntrack_l4proto *l4proto)
115 {
116         memset(tuple, 0, sizeof(*tuple));
117
118         tuple->src.l3num = l3num;
119         if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0)
120                 return false;
121
122         tuple->dst.protonum = protonum;
123         tuple->dst.dir = IP_CT_DIR_ORIGINAL;
124
125         return l4proto->pkt_to_tuple(skb, dataoff, tuple);
126 }
127 EXPORT_SYMBOL_GPL(nf_ct_get_tuple);
128
129 bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
130                        u_int16_t l3num, struct nf_conntrack_tuple *tuple)
131 {
132         struct nf_conntrack_l3proto *l3proto;
133         struct nf_conntrack_l4proto *l4proto;
134         unsigned int protoff;
135         u_int8_t protonum;
136         int ret;
137
138         rcu_read_lock();
139
140         l3proto = __nf_ct_l3proto_find(l3num);
141         ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum);
142         if (ret != NF_ACCEPT) {
143                 rcu_read_unlock();
144                 return false;
145         }
146
147         l4proto = __nf_ct_l4proto_find(l3num, protonum);
148
149         ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, tuple,
150                               l3proto, l4proto);
151
152         rcu_read_unlock();
153         return ret;
154 }
155 EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
156
157 bool
158 nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
159                    const struct nf_conntrack_tuple *orig,
160                    const struct nf_conntrack_l3proto *l3proto,
161                    const struct nf_conntrack_l4proto *l4proto)
162 {
163         memset(inverse, 0, sizeof(*inverse));
164
165         inverse->src.l3num = orig->src.l3num;
166         if (l3proto->invert_tuple(inverse, orig) == 0)
167                 return false;
168
169         inverse->dst.dir = !orig->dst.dir;
170
171         inverse->dst.protonum = orig->dst.protonum;
172         return l4proto->invert_tuple(inverse, orig);
173 }
174 EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
175
176 static void
177 clean_from_lists(struct nf_conn *ct)
178 {
179         pr_debug("clean_from_lists(%p)\n", ct);
180         hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
181         hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
182
183         /* Destroy all pending expectations */
184         nf_ct_remove_expectations(ct);
185 }
186
187 static void
188 destroy_conntrack(struct nf_conntrack *nfct)
189 {
190         struct nf_conn *ct = (struct nf_conn *)nfct;
191         struct net *net = nf_ct_net(ct);
192         struct nf_conntrack_l4proto *l4proto;
193
194         pr_debug("destroy_conntrack(%p)\n", ct);
195         NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
196         NF_CT_ASSERT(!timer_pending(&ct->timeout));
197
198         /* To make sure we don't get any weird locking issues here:
199          * destroy_conntrack() MUST NOT be called with a write lock
200          * to nf_conntrack_lock!!! -HW */
201         rcu_read_lock();
202         l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
203         if (l4proto && l4proto->destroy)
204                 l4proto->destroy(ct);
205
206         rcu_read_unlock();
207
208         spin_lock_bh(&nf_conntrack_lock);
209         /* Expectations will have been removed in clean_from_lists,
210          * except TFTP can create an expectation on the first packet,
211          * before connection is in the list, so we need to clean here,
212          * too. */
213         nf_ct_remove_expectations(ct);
214
215         /* We overload first tuple to link into unconfirmed list. */
216         if (!nf_ct_is_confirmed(ct)) {
217                 BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
218                 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
219         }
220
221         NF_CT_STAT_INC(net, delete);
222         spin_unlock_bh(&nf_conntrack_lock);
223
224         if (ct->master)
225                 nf_ct_put(ct->master);
226
227         pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
228         nf_conntrack_free(ct);
229 }
230
231 void nf_ct_delete_from_lists(struct nf_conn *ct)
232 {
233         struct net *net = nf_ct_net(ct);
234
235         nf_ct_helper_destroy(ct);
236         spin_lock_bh(&nf_conntrack_lock);
237         /* Inside lock so preempt is disabled on module removal path.
238          * Otherwise we can get spurious warnings. */
239         NF_CT_STAT_INC(net, delete_list);
240         clean_from_lists(ct);
241         spin_unlock_bh(&nf_conntrack_lock);
242 }
243 EXPORT_SYMBOL_GPL(nf_ct_delete_from_lists);
244
245 static void death_by_event(unsigned long ul_conntrack)
246 {
247         struct nf_conn *ct = (void *)ul_conntrack;
248         struct net *net = nf_ct_net(ct);
249
250         if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) {
251                 /* bad luck, let's retry again */
252                 ct->timeout.expires = jiffies +
253                         (random32() % net->ct.sysctl_events_retry_timeout);
254                 add_timer(&ct->timeout);
255                 return;
256         }
257         /* we've got the event delivered, now it's dying */
258         set_bit(IPS_DYING_BIT, &ct->status);
259         spin_lock(&nf_conntrack_lock);
260         hlist_nulls_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
261         spin_unlock(&nf_conntrack_lock);
262         nf_ct_put(ct);
263 }
264
265 void nf_ct_insert_dying_list(struct nf_conn *ct)
266 {
267         struct net *net = nf_ct_net(ct);
268
269         /* add this conntrack to the dying list */
270         spin_lock_bh(&nf_conntrack_lock);
271         hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
272                              &net->ct.dying);
273         spin_unlock_bh(&nf_conntrack_lock);
274         /* set a new timer to retry event delivery */
275         setup_timer(&ct->timeout, death_by_event, (unsigned long)ct);
276         ct->timeout.expires = jiffies +
277                 (random32() % net->ct.sysctl_events_retry_timeout);
278         add_timer(&ct->timeout);
279 }
280 EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list);
281
282 static void death_by_timeout(unsigned long ul_conntrack)
283 {
284         struct nf_conn *ct = (void *)ul_conntrack;
285
286         if (!test_bit(IPS_DYING_BIT, &ct->status) &&
287             unlikely(nf_conntrack_event(IPCT_DESTROY, ct) < 0)) {
288                 /* destroy event was not delivered */
289                 nf_ct_delete_from_lists(ct);
290                 nf_ct_insert_dying_list(ct);
291                 return;
292         }
293         set_bit(IPS_DYING_BIT, &ct->status);
294         nf_ct_delete_from_lists(ct);
295         nf_ct_put(ct);
296 }
297
298 /*
299  * Warning :
300  * - Caller must take a reference on returned object
301  *   and recheck nf_ct_tuple_equal(tuple, &h->tuple)
302  * OR
303  * - Caller must lock nf_conntrack_lock before calling this function
304  */
305 static struct nf_conntrack_tuple_hash *
306 ____nf_conntrack_find(struct net *net, u16 zone,
307                       const struct nf_conntrack_tuple *tuple, u32 hash)
308 {
309         struct nf_conntrack_tuple_hash *h;
310         struct hlist_nulls_node *n;
311         unsigned int bucket = hash_bucket(hash, net);
312
313         /* Disable BHs the entire time since we normally need to disable them
314          * at least once for the stats anyway.
315          */
316         local_bh_disable();
317 begin:
318         hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) {
319                 if (nf_ct_tuple_equal(tuple, &h->tuple) &&
320                     nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)) == zone) {
321                         NF_CT_STAT_INC(net, found);
322                         local_bh_enable();
323                         return h;
324                 }
325                 NF_CT_STAT_INC(net, searched);
326         }
327         /*
328          * if the nulls value we got at the end of this lookup is
329          * not the expected one, we must restart lookup.
330          * We probably met an item that was moved to another chain.
331          */
332         if (get_nulls_value(n) != bucket) {
333                 NF_CT_STAT_INC(net, search_restart);
334                 goto begin;
335         }
336         local_bh_enable();
337
338         return NULL;
339 }
340
341 struct nf_conntrack_tuple_hash *
342 __nf_conntrack_find(struct net *net, u16 zone,
343                     const struct nf_conntrack_tuple *tuple)
344 {
345         return ____nf_conntrack_find(net, zone, tuple,
346                                      hash_conntrack_raw(tuple, zone));
347 }
348 EXPORT_SYMBOL_GPL(__nf_conntrack_find);
349
350 /* Find a connection corresponding to a tuple. */
351 static struct nf_conntrack_tuple_hash *
352 __nf_conntrack_find_get(struct net *net, u16 zone,
353                         const struct nf_conntrack_tuple *tuple, u32 hash)
354 {
355         struct nf_conntrack_tuple_hash *h;
356         struct nf_conn *ct;
357
358         rcu_read_lock();
359 begin:
360         h = ____nf_conntrack_find(net, zone, tuple, hash);
361         if (h) {
362                 ct = nf_ct_tuplehash_to_ctrack(h);
363                 if (unlikely(nf_ct_is_dying(ct) ||
364                              !atomic_inc_not_zero(&ct->ct_general.use)))
365                         h = NULL;
366                 else {
367                         if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple) ||
368                                      nf_ct_zone(ct) != zone)) {
369                                 nf_ct_put(ct);
370                                 goto begin;
371                         }
372                 }
373         }
374         rcu_read_unlock();
375
376         return h;
377 }
378
379 struct nf_conntrack_tuple_hash *
380 nf_conntrack_find_get(struct net *net, u16 zone,
381                       const struct nf_conntrack_tuple *tuple)
382 {
383         return __nf_conntrack_find_get(net, zone, tuple,
384                                        hash_conntrack_raw(tuple, zone));
385 }
386 EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
387
388 static void __nf_conntrack_hash_insert(struct nf_conn *ct,
389                                        unsigned int hash,
390                                        unsigned int repl_hash)
391 {
392         struct net *net = nf_ct_net(ct);
393
394         hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
395                            &net->ct.hash[hash]);
396         hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
397                            &net->ct.hash[repl_hash]);
398 }
399
400 void nf_conntrack_hash_insert(struct nf_conn *ct)
401 {
402         struct net *net = nf_ct_net(ct);
403         unsigned int hash, repl_hash;
404         u16 zone;
405
406         zone = nf_ct_zone(ct);
407         hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
408         repl_hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
409
410         __nf_conntrack_hash_insert(ct, hash, repl_hash);
411 }
412 EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert);
413
414 /* Confirm a connection given skb; places it in hash table */
415 int
416 __nf_conntrack_confirm(struct sk_buff *skb)
417 {
418         unsigned int hash, repl_hash;
419         struct nf_conntrack_tuple_hash *h;
420         struct nf_conn *ct;
421         struct nf_conn_help *help;
422         struct hlist_nulls_node *n;
423         enum ip_conntrack_info ctinfo;
424         struct net *net;
425         u16 zone;
426
427         ct = nf_ct_get(skb, &ctinfo);
428         net = nf_ct_net(ct);
429
430         /* ipt_REJECT uses nf_conntrack_attach to attach related
431            ICMP/TCP RST packets in other direction.  Actual packet
432            which created connection will be IP_CT_NEW or for an
433            expected connection, IP_CT_RELATED. */
434         if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
435                 return NF_ACCEPT;
436
437         zone = nf_ct_zone(ct);
438         /* reuse the hash saved before */
439         hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
440         hash = hash_bucket(hash, net);
441         repl_hash = hash_conntrack(net, zone,
442                                    &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
443
444         /* We're not in hash table, and we refuse to set up related
445            connections for unconfirmed conns.  But packet copies and
446            REJECT will give spurious warnings here. */
447         /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
448
449         /* No external references means noone else could have
450            confirmed us. */
451         NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
452         pr_debug("Confirming conntrack %p\n", ct);
453
454         spin_lock_bh(&nf_conntrack_lock);
455
456         /* We have to check the DYING flag inside the lock to prevent
457            a race against nf_ct_get_next_corpse() possibly called from
458            user context, else we insert an already 'dead' hash, blocking
459            further use of that particular connection -JM */
460
461         if (unlikely(nf_ct_is_dying(ct))) {
462                 spin_unlock_bh(&nf_conntrack_lock);
463                 return NF_ACCEPT;
464         }
465
466         /* See if there's one in the list already, including reverse:
467            NAT could have grabbed it without realizing, since we're
468            not in the hash.  If there is, we lost race. */
469         hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
470                 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
471                                       &h->tuple) &&
472                     zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
473                         goto out;
474         hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode)
475                 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
476                                       &h->tuple) &&
477                     zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
478                         goto out;
479
480         /* Remove from unconfirmed list */
481         hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
482
483         /* Timer relative to confirmation time, not original
484            setting time, otherwise we'd get timer wrap in
485            weird delay cases. */
486         ct->timeout.expires += jiffies;
487         add_timer(&ct->timeout);
488         atomic_inc(&ct->ct_general.use);
489         set_bit(IPS_CONFIRMED_BIT, &ct->status);
490
491         /* Since the lookup is lockless, hash insertion must be done after
492          * starting the timer and setting the CONFIRMED bit. The RCU barriers
493          * guarantee that no other CPU can find the conntrack before the above
494          * stores are visible.
495          */
496         __nf_conntrack_hash_insert(ct, hash, repl_hash);
497         NF_CT_STAT_INC(net, insert);
498         spin_unlock_bh(&nf_conntrack_lock);
499
500         help = nfct_help(ct);
501         if (help && help->helper)
502                 nf_conntrack_event_cache(IPCT_HELPER, ct);
503
504         nf_conntrack_event_cache(master_ct(ct) ?
505                                  IPCT_RELATED : IPCT_NEW, ct);
506         return NF_ACCEPT;
507
508 out:
509         NF_CT_STAT_INC(net, insert_failed);
510         spin_unlock_bh(&nf_conntrack_lock);
511         return NF_DROP;
512 }
513 EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
514
515 /* Returns true if a connection correspondings to the tuple (required
516    for NAT). */
517 int
518 nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
519                          const struct nf_conn *ignored_conntrack)
520 {
521         struct net *net = nf_ct_net(ignored_conntrack);
522         struct nf_conntrack_tuple_hash *h;
523         struct hlist_nulls_node *n;
524         struct nf_conn *ct;
525         u16 zone = nf_ct_zone(ignored_conntrack);
526         unsigned int hash = hash_conntrack(net, zone, tuple);
527
528         /* Disable BHs the entire time since we need to disable them at
529          * least once for the stats anyway.
530          */
531         rcu_read_lock_bh();
532         hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
533                 ct = nf_ct_tuplehash_to_ctrack(h);
534                 if (ct != ignored_conntrack &&
535                     nf_ct_tuple_equal(tuple, &h->tuple) &&
536                     nf_ct_zone(ct) == zone) {
537                         NF_CT_STAT_INC(net, found);
538                         rcu_read_unlock_bh();
539                         return 1;
540                 }
541                 NF_CT_STAT_INC(net, searched);
542         }
543         rcu_read_unlock_bh();
544
545         return 0;
546 }
547 EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
548
549 #define NF_CT_EVICTION_RANGE    8
550
551 /* There's a small race here where we may free a just-assured
552    connection.  Too bad: we're in trouble anyway. */
553 static noinline int early_drop(struct net *net, unsigned int hash)
554 {
555         /* Use oldest entry, which is roughly LRU */
556         struct nf_conntrack_tuple_hash *h;
557         struct nf_conn *ct = NULL, *tmp;
558         struct hlist_nulls_node *n;
559         unsigned int i, cnt = 0;
560         int dropped = 0;
561
562         rcu_read_lock();
563         for (i = 0; i < net->ct.htable_size; i++) {
564                 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash],
565                                          hnnode) {
566                         tmp = nf_ct_tuplehash_to_ctrack(h);
567                         if (!test_bit(IPS_ASSURED_BIT, &tmp->status))
568                                 ct = tmp;
569                         cnt++;
570                 }
571
572                 if (ct != NULL) {
573                         if (likely(!nf_ct_is_dying(ct) &&
574                                    atomic_inc_not_zero(&ct->ct_general.use)))
575                                 break;
576                         else
577                                 ct = NULL;
578                 }
579
580                 if (cnt >= NF_CT_EVICTION_RANGE)
581                         break;
582
583                 hash = (hash + 1) % net->ct.htable_size;
584         }
585         rcu_read_unlock();
586
587         if (!ct)
588                 return dropped;
589
590         if (del_timer(&ct->timeout)) {
591                 death_by_timeout((unsigned long)ct);
592                 dropped = 1;
593                 NF_CT_STAT_INC_ATOMIC(net, early_drop);
594         }
595         nf_ct_put(ct);
596         return dropped;
597 }
598
599 static struct nf_conn *
600 __nf_conntrack_alloc(struct net *net, u16 zone,
601                      const struct nf_conntrack_tuple *orig,
602                      const struct nf_conntrack_tuple *repl,
603                      gfp_t gfp, u32 hash)
604 {
605         struct nf_conn *ct;
606
607         if (unlikely(!nf_conntrack_hash_rnd)) {
608                 unsigned int rand;
609
610                 /*
611                  * Why not initialize nf_conntrack_rnd in a "init()" function ?
612                  * Because there isn't enough entropy when system initializing,
613                  * and we initialize it as late as possible.
614                  */
615                 do {
616                         get_random_bytes(&rand, sizeof(rand));
617                 } while (!rand);
618                 cmpxchg(&nf_conntrack_hash_rnd, 0, rand);
619
620                 /* recompute the hash as nf_conntrack_hash_rnd is initialized */
621                 hash = hash_conntrack_raw(orig, zone);
622         }
623
624         /* We don't want any race condition at early drop stage */
625         atomic_inc(&net->ct.count);
626
627         if (nf_conntrack_max &&
628             unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
629                 if (!early_drop(net, hash_bucket(hash, net))) {
630                         atomic_dec(&net->ct.count);
631                         if (net_ratelimit())
632                                 printk(KERN_WARNING
633                                        "nf_conntrack: table full, dropping"
634                                        " packet.\n");
635                         return ERR_PTR(-ENOMEM);
636                 }
637         }
638
639         /*
640          * Do not use kmem_cache_zalloc(), as this cache uses
641          * SLAB_DESTROY_BY_RCU.
642          */
643         ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp);
644         if (ct == NULL) {
645                 pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n");
646                 atomic_dec(&net->ct.count);
647                 return ERR_PTR(-ENOMEM);
648         }
649         /*
650          * Let ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.next
651          * and ct->tuplehash[IP_CT_DIR_REPLY].hnnode.next unchanged.
652          */
653         memset(&ct->tuplehash[IP_CT_DIR_MAX], 0,
654                sizeof(*ct) - offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX]));
655         spin_lock_init(&ct->lock);
656         ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
657         ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
658         ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
659         /* save hash for reusing when confirming */
660         *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
661         /* Don't set timer yet: wait for confirmation */
662         setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
663         write_pnet(&ct->ct_net, net);
664 #ifdef CONFIG_NF_CONNTRACK_ZONES
665         if (zone) {
666                 struct nf_conntrack_zone *nf_ct_zone;
667
668                 nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, GFP_ATOMIC);
669                 if (!nf_ct_zone)
670                         goto out_free;
671                 nf_ct_zone->id = zone;
672         }
673 #endif
674         /*
675          * changes to lookup keys must be done before setting refcnt to 1
676          */
677         smp_wmb();
678         atomic_set(&ct->ct_general.use, 1);
679         return ct;
680
681 #ifdef CONFIG_NF_CONNTRACK_ZONES
682 out_free:
683         kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
684         return ERR_PTR(-ENOMEM);
685 #endif
686 }
687
688 struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
689                                    const struct nf_conntrack_tuple *orig,
690                                    const struct nf_conntrack_tuple *repl,
691                                    gfp_t gfp)
692 {
693         return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0);
694 }
695 EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
696
697 void nf_conntrack_free(struct nf_conn *ct)
698 {
699         struct net *net = nf_ct_net(ct);
700
701         nf_ct_ext_destroy(ct);
702         atomic_dec(&net->ct.count);
703         nf_ct_ext_free(ct);
704         kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
705 }
706 EXPORT_SYMBOL_GPL(nf_conntrack_free);
707
708 /* Allocate a new conntrack: we return -ENOMEM if classification
709    failed due to stress.  Otherwise it really is unclassifiable. */
710 static struct nf_conntrack_tuple_hash *
711 init_conntrack(struct net *net, struct nf_conn *tmpl,
712                const struct nf_conntrack_tuple *tuple,
713                struct nf_conntrack_l3proto *l3proto,
714                struct nf_conntrack_l4proto *l4proto,
715                struct sk_buff *skb,
716                unsigned int dataoff, u32 hash)
717 {
718         struct nf_conn *ct;
719         struct nf_conn_help *help;
720         struct nf_conntrack_tuple repl_tuple;
721         struct nf_conntrack_ecache *ecache;
722         struct nf_conntrack_expect *exp;
723         u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
724
725         if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
726                 pr_debug("Can't invert tuple.\n");
727                 return NULL;
728         }
729
730         ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
731                                   hash);
732         if (IS_ERR(ct)) {
733                 pr_debug("Can't allocate conntrack.\n");
734                 return (struct nf_conntrack_tuple_hash *)ct;
735         }
736
737         if (!l4proto->new(ct, skb, dataoff)) {
738                 nf_conntrack_free(ct);
739                 pr_debug("init conntrack: can't track with proto module\n");
740                 return NULL;
741         }
742
743         nf_ct_acct_ext_add(ct, GFP_ATOMIC);
744
745         ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
746         nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
747                                  ecache ? ecache->expmask : 0,
748                              GFP_ATOMIC);
749
750         spin_lock_bh(&nf_conntrack_lock);
751         exp = nf_ct_find_expectation(net, zone, tuple);
752         if (exp) {
753                 pr_debug("conntrack: expectation arrives ct=%p exp=%p\n",
754                          ct, exp);
755                 /* Welcome, Mr. Bond.  We've been expecting you... */
756                 __set_bit(IPS_EXPECTED_BIT, &ct->status);
757                 ct->master = exp->master;
758                 if (exp->helper) {
759                         help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
760                         if (help)
761                                 rcu_assign_pointer(help->helper, exp->helper);
762                 }
763
764 #ifdef CONFIG_NF_CONNTRACK_MARK
765                 ct->mark = exp->master->mark;
766 #endif
767 #ifdef CONFIG_NF_CONNTRACK_SECMARK
768                 ct->secmark = exp->master->secmark;
769 #endif
770                 nf_conntrack_get(&ct->master->ct_general);
771                 NF_CT_STAT_INC(net, expect_new);
772         } else {
773                 __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
774                 NF_CT_STAT_INC(net, new);
775         }
776
777         /* Overload tuple linked list to put us in unconfirmed list. */
778         hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
779                        &net->ct.unconfirmed);
780
781         spin_unlock_bh(&nf_conntrack_lock);
782
783         if (exp) {
784                 if (exp->expectfn)
785                         exp->expectfn(ct, exp);
786                 nf_ct_expect_put(exp);
787         }
788
789         return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
790 }
791
792 /* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
793 static inline struct nf_conn *
794 resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
795                   struct sk_buff *skb,
796                   unsigned int dataoff,
797                   u_int16_t l3num,
798                   u_int8_t protonum,
799                   struct nf_conntrack_l3proto *l3proto,
800                   struct nf_conntrack_l4proto *l4proto,
801                   int *set_reply,
802                   enum ip_conntrack_info *ctinfo)
803 {
804         struct nf_conntrack_tuple tuple;
805         struct nf_conntrack_tuple_hash *h;
806         struct nf_conn *ct;
807         u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
808         u32 hash;
809
810         if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
811                              dataoff, l3num, protonum, &tuple, l3proto,
812                              l4proto)) {
813                 pr_debug("resolve_normal_ct: Can't get tuple\n");
814                 return NULL;
815         }
816
817         /* look for tuple match */
818         hash = hash_conntrack_raw(&tuple, zone);
819         h = __nf_conntrack_find_get(net, zone, &tuple, hash);
820         if (!h) {
821                 h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
822                                    skb, dataoff, hash);
823                 if (!h)
824                         return NULL;
825                 if (IS_ERR(h))
826                         return (void *)h;
827         }
828         ct = nf_ct_tuplehash_to_ctrack(h);
829
830         /* It exists; we have (non-exclusive) reference. */
831         if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
832                 *ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY;
833                 /* Please set reply bit if this packet OK */
834                 *set_reply = 1;
835         } else {
836                 /* Once we've had two way comms, always ESTABLISHED. */
837                 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
838                         pr_debug("nf_conntrack_in: normal packet for %p\n", ct);
839                         *ctinfo = IP_CT_ESTABLISHED;
840                 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
841                         pr_debug("nf_conntrack_in: related packet for %p\n",
842                                  ct);
843                         *ctinfo = IP_CT_RELATED;
844                 } else {
845                         pr_debug("nf_conntrack_in: new packet for %p\n", ct);
846                         *ctinfo = IP_CT_NEW;
847                 }
848                 *set_reply = 0;
849         }
850         skb->nfct = &ct->ct_general;
851         skb->nfctinfo = *ctinfo;
852         return ct;
853 }
854
855 unsigned int
856 nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
857                 struct sk_buff *skb)
858 {
859         struct nf_conn *ct, *tmpl = NULL;
860         enum ip_conntrack_info ctinfo;
861         struct nf_conntrack_l3proto *l3proto;
862         struct nf_conntrack_l4proto *l4proto;
863         unsigned int dataoff;
864         u_int8_t protonum;
865         int set_reply = 0;
866         int ret;
867
868         if (skb->nfct) {
869                 /* Previously seen (loopback or untracked)?  Ignore. */
870                 tmpl = (struct nf_conn *)skb->nfct;
871                 if (!nf_ct_is_template(tmpl)) {
872                         NF_CT_STAT_INC_ATOMIC(net, ignore);
873                         return NF_ACCEPT;
874                 }
875                 skb->nfct = NULL;
876         }
877
878         /* rcu_read_lock()ed by nf_hook_slow */
879         l3proto = __nf_ct_l3proto_find(pf);
880         ret = l3proto->get_l4proto(skb, skb_network_offset(skb),
881                                    &dataoff, &protonum);
882         if (ret <= 0) {
883                 pr_debug("not prepared to track yet or error occured\n");
884                 NF_CT_STAT_INC_ATOMIC(net, error);
885                 NF_CT_STAT_INC_ATOMIC(net, invalid);
886                 ret = -ret;
887                 goto out;
888         }
889
890         l4proto = __nf_ct_l4proto_find(pf, protonum);
891
892         /* It may be an special packet, error, unclean...
893          * inverse of the return code tells to the netfilter
894          * core what to do with the packet. */
895         if (l4proto->error != NULL) {
896                 ret = l4proto->error(net, tmpl, skb, dataoff, &ctinfo,
897                                      pf, hooknum);
898                 if (ret <= 0) {
899                         NF_CT_STAT_INC_ATOMIC(net, error);
900                         NF_CT_STAT_INC_ATOMIC(net, invalid);
901                         ret = -ret;
902                         goto out;
903                 }
904         }
905
906         ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
907                                l3proto, l4proto, &set_reply, &ctinfo);
908         if (!ct) {
909                 /* Not valid part of a connection */
910                 NF_CT_STAT_INC_ATOMIC(net, invalid);
911                 ret = NF_ACCEPT;
912                 goto out;
913         }
914
915         if (IS_ERR(ct)) {
916                 /* Too stressed to deal. */
917                 NF_CT_STAT_INC_ATOMIC(net, drop);
918                 ret = NF_DROP;
919                 goto out;
920         }
921
922         NF_CT_ASSERT(skb->nfct);
923
924         ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum);
925         if (ret <= 0) {
926                 /* Invalid: inverse of the return code tells
927                  * the netfilter core what to do */
928                 pr_debug("nf_conntrack_in: Can't track with proto module\n");
929                 nf_conntrack_put(skb->nfct);
930                 skb->nfct = NULL;
931                 NF_CT_STAT_INC_ATOMIC(net, invalid);
932                 if (ret == -NF_DROP)
933                         NF_CT_STAT_INC_ATOMIC(net, drop);
934                 ret = -ret;
935                 goto out;
936         }
937
938         if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
939                 nf_conntrack_event_cache(IPCT_REPLY, ct);
940 out:
941         if (tmpl)
942                 nf_ct_put(tmpl);
943
944         return ret;
945 }
946 EXPORT_SYMBOL_GPL(nf_conntrack_in);
947
948 bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
949                           const struct nf_conntrack_tuple *orig)
950 {
951         bool ret;
952
953         rcu_read_lock();
954         ret = nf_ct_invert_tuple(inverse, orig,
955                                  __nf_ct_l3proto_find(orig->src.l3num),
956                                  __nf_ct_l4proto_find(orig->src.l3num,
957                                                       orig->dst.protonum));
958         rcu_read_unlock();
959         return ret;
960 }
961 EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr);
962
963 /* Alter reply tuple (maybe alter helper).  This is for NAT, and is
964    implicitly racy: see __nf_conntrack_confirm */
965 void nf_conntrack_alter_reply(struct nf_conn *ct,
966                               const struct nf_conntrack_tuple *newreply)
967 {
968         struct nf_conn_help *help = nfct_help(ct);
969
970         /* Should be unconfirmed, so not in hash table yet */
971         NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
972
973         pr_debug("Altering reply tuple of %p to ", ct);
974         nf_ct_dump_tuple(newreply);
975
976         ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
977         if (ct->master || (help && !hlist_empty(&help->expectations)))
978                 return;
979
980         rcu_read_lock();
981         __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
982         rcu_read_unlock();
983 }
984 EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
985
986 /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
987 void __nf_ct_refresh_acct(struct nf_conn *ct,
988                           enum ip_conntrack_info ctinfo,
989                           const struct sk_buff *skb,
990                           unsigned long extra_jiffies,
991                           int do_acct)
992 {
993         NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);
994         NF_CT_ASSERT(skb);
995
996         /* Only update if this is not a fixed timeout */
997         if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
998                 goto acct;
999
1000         /* If not in hash table, timer will not be active yet */
1001         if (!nf_ct_is_confirmed(ct)) {
1002                 ct->timeout.expires = extra_jiffies;
1003         } else {
1004                 unsigned long newtime = jiffies + extra_jiffies;
1005
1006                 /* Only update the timeout if the new timeout is at least
1007                    HZ jiffies from the old timeout. Need del_timer for race
1008                    avoidance (may already be dying). */
1009                 if (newtime - ct->timeout.expires >= HZ)
1010                         mod_timer_pending(&ct->timeout, newtime);
1011         }
1012
1013 acct:
1014         if (do_acct) {
1015                 struct nf_conn_counter *acct;
1016
1017                 acct = nf_conn_acct_find(ct);
1018                 if (acct) {
1019                         spin_lock_bh(&ct->lock);
1020                         acct[CTINFO2DIR(ctinfo)].packets++;
1021                         acct[CTINFO2DIR(ctinfo)].bytes += skb->len;
1022                         spin_unlock_bh(&ct->lock);
1023                 }
1024         }
1025 }
1026 EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
1027
1028 bool __nf_ct_kill_acct(struct nf_conn *ct,
1029                        enum ip_conntrack_info ctinfo,
1030                        const struct sk_buff *skb,
1031                        int do_acct)
1032 {
1033         if (do_acct) {
1034                 struct nf_conn_counter *acct;
1035
1036                 acct = nf_conn_acct_find(ct);
1037                 if (acct) {
1038                         spin_lock_bh(&ct->lock);
1039                         acct[CTINFO2DIR(ctinfo)].packets++;
1040                         acct[CTINFO2DIR(ctinfo)].bytes +=
1041                                 skb->len - skb_network_offset(skb);
1042                         spin_unlock_bh(&ct->lock);
1043                 }
1044         }
1045
1046         if (del_timer(&ct->timeout)) {
1047                 ct->timeout.function((unsigned long)ct);
1048                 return true;
1049         }
1050         return false;
1051 }
1052 EXPORT_SYMBOL_GPL(__nf_ct_kill_acct);
1053
1054 #ifdef CONFIG_NF_CONNTRACK_ZONES
1055 static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = {
1056         .len    = sizeof(struct nf_conntrack_zone),
1057         .align  = __alignof__(struct nf_conntrack_zone),
1058         .id     = NF_CT_EXT_ZONE,
1059 };
1060 #endif
1061
1062 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
1063
1064 #include <linux/netfilter/nfnetlink.h>
1065 #include <linux/netfilter/nfnetlink_conntrack.h>
1066 #include <linux/mutex.h>
1067
1068 /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
1069  * in ip_conntrack_core, since we don't want the protocols to autoload
1070  * or depend on ctnetlink */
1071 int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
1072                                const struct nf_conntrack_tuple *tuple)
1073 {
1074         NLA_PUT_BE16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port);
1075         NLA_PUT_BE16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port);
1076         return 0;
1077
1078 nla_put_failure:
1079         return -1;
1080 }
1081 EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
1082
1083 const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
1084         [CTA_PROTO_SRC_PORT]  = { .type = NLA_U16 },
1085         [CTA_PROTO_DST_PORT]  = { .type = NLA_U16 },
1086 };
1087 EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
1088
1089 int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
1090                                struct nf_conntrack_tuple *t)
1091 {
1092         if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT])
1093                 return -EINVAL;
1094
1095         t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
1096         t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
1097
1098         return 0;
1099 }
1100 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
1101
1102 int nf_ct_port_nlattr_tuple_size(void)
1103 {
1104         return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
1105 }
1106 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
1107 #endif
1108
1109 /* Used by ipt_REJECT and ip6t_REJECT. */
1110 static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
1111 {
1112         struct nf_conn *ct;
1113         enum ip_conntrack_info ctinfo;
1114
1115         /* This ICMP is in reverse direction to the packet which caused it */
1116         ct = nf_ct_get(skb, &ctinfo);
1117         if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
1118                 ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY;
1119         else
1120                 ctinfo = IP_CT_RELATED;
1121
1122         /* Attach to new skbuff, and increment count */
1123         nskb->nfct = &ct->ct_general;
1124         nskb->nfctinfo = ctinfo;
1125         nf_conntrack_get(nskb->nfct);
1126 }
1127
1128 /* Bring out ya dead! */
1129 static struct nf_conn *
1130 get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
1131                 void *data, unsigned int *bucket)
1132 {
1133         struct nf_conntrack_tuple_hash *h;
1134         struct nf_conn *ct;
1135         struct hlist_nulls_node *n;
1136
1137         spin_lock_bh(&nf_conntrack_lock);
1138         for (; *bucket < net->ct.htable_size; (*bucket)++) {
1139                 hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
1140                         ct = nf_ct_tuplehash_to_ctrack(h);
1141                         if (iter(ct, data))
1142                                 goto found;
1143                 }
1144         }
1145         hlist_nulls_for_each_entry(h, n, &net->ct.unconfirmed, hnnode) {
1146                 ct = nf_ct_tuplehash_to_ctrack(h);
1147                 if (iter(ct, data))
1148                         set_bit(IPS_DYING_BIT, &ct->status);
1149         }
1150         spin_unlock_bh(&nf_conntrack_lock);
1151         return NULL;
1152 found:
1153         atomic_inc(&ct->ct_general.use);
1154         spin_unlock_bh(&nf_conntrack_lock);
1155         return ct;
1156 }
1157
1158 void nf_ct_iterate_cleanup(struct net *net,
1159                            int (*iter)(struct nf_conn *i, void *data),
1160                            void *data)
1161 {
1162         struct nf_conn *ct;
1163         unsigned int bucket = 0;
1164
1165         while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
1166                 /* Time to push up daises... */
1167                 if (del_timer(&ct->timeout))
1168                         death_by_timeout((unsigned long)ct);
1169                 /* ... else the timer will get him soon. */
1170
1171                 nf_ct_put(ct);
1172         }
1173 }
1174 EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
1175
1176 struct __nf_ct_flush_report {
1177         u32 pid;
1178         int report;
1179 };
1180
1181 static int kill_report(struct nf_conn *i, void *data)
1182 {
1183         struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data;
1184
1185         /* If we fail to deliver the event, death_by_timeout() will retry */
1186         if (nf_conntrack_event_report(IPCT_DESTROY, i,
1187                                       fr->pid, fr->report) < 0)
1188                 return 1;
1189
1190         /* Avoid the delivery of the destroy event in death_by_timeout(). */
1191         set_bit(IPS_DYING_BIT, &i->status);
1192         return 1;
1193 }
1194
1195 static int kill_all(struct nf_conn *i, void *data)
1196 {
1197         return 1;
1198 }
1199
1200 void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size)
1201 {
1202         if (vmalloced)
1203                 vfree(hash);
1204         else
1205                 free_pages((unsigned long)hash,
1206                            get_order(sizeof(struct hlist_head) * size));
1207 }
1208 EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
1209
1210 void nf_conntrack_flush_report(struct net *net, u32 pid, int report)
1211 {
1212         struct __nf_ct_flush_report fr = {
1213                 .pid    = pid,
1214                 .report = report,
1215         };
1216         nf_ct_iterate_cleanup(net, kill_report, &fr);
1217 }
1218 EXPORT_SYMBOL_GPL(nf_conntrack_flush_report);
1219
1220 static void nf_ct_release_dying_list(struct net *net)
1221 {
1222         struct nf_conntrack_tuple_hash *h;
1223         struct nf_conn *ct;
1224         struct hlist_nulls_node *n;
1225
1226         spin_lock_bh(&nf_conntrack_lock);
1227         hlist_nulls_for_each_entry(h, n, &net->ct.dying, hnnode) {
1228                 ct = nf_ct_tuplehash_to_ctrack(h);
1229                 /* never fails to remove them, no listeners at this point */
1230                 nf_ct_kill(ct);
1231         }
1232         spin_unlock_bh(&nf_conntrack_lock);
1233 }
1234
1235 static int untrack_refs(void)
1236 {
1237         int cnt = 0, cpu;
1238
1239         for_each_possible_cpu(cpu) {
1240                 struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
1241
1242                 cnt += atomic_read(&ct->ct_general.use) - 1;
1243         }
1244         return cnt;
1245 }
1246
1247 static void nf_conntrack_cleanup_init_net(void)
1248 {
1249         while (untrack_refs() > 0)
1250                 schedule();
1251
1252         nf_conntrack_helper_fini();
1253         nf_conntrack_proto_fini();
1254 #ifdef CONFIG_NF_CONNTRACK_ZONES
1255         nf_ct_extend_unregister(&nf_ct_zone_extend);
1256 #endif
1257 }
1258
1259 static void nf_conntrack_cleanup_net(struct net *net)
1260 {
1261  i_see_dead_people:
1262         nf_ct_iterate_cleanup(net, kill_all, NULL);
1263         nf_ct_release_dying_list(net);
1264         if (atomic_read(&net->ct.count) != 0) {
1265                 schedule();
1266                 goto i_see_dead_people;
1267         }
1268
1269         nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
1270                              net->ct.htable_size);
1271         nf_conntrack_ecache_fini(net);
1272         nf_conntrack_acct_fini(net);
1273         nf_conntrack_expect_fini(net);
1274         kmem_cache_destroy(net->ct.nf_conntrack_cachep);
1275         kfree(net->ct.slabname);
1276         free_percpu(net->ct.stat);
1277 }
1278
1279 /* Mishearing the voices in his head, our hero wonders how he's
1280    supposed to kill the mall. */
1281 void nf_conntrack_cleanup(struct net *net)
1282 {
1283         if (net_eq(net, &init_net))
1284                 rcu_assign_pointer(ip_ct_attach, NULL);
1285
1286         /* This makes sure all current packets have passed through
1287            netfilter framework.  Roll on, two-stage module
1288            delete... */
1289         synchronize_net();
1290
1291         nf_conntrack_cleanup_net(net);
1292
1293         if (net_eq(net, &init_net)) {
1294                 rcu_assign_pointer(nf_ct_destroy, NULL);
1295                 nf_conntrack_cleanup_init_net();
1296         }
1297 }
1298
1299 void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls)
1300 {
1301         struct hlist_nulls_head *hash;
1302         unsigned int nr_slots, i;
1303         size_t sz;
1304
1305         *vmalloced = 0;
1306
1307         BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
1308         nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
1309         sz = nr_slots * sizeof(struct hlist_nulls_head);
1310         hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
1311                                         get_order(sz));
1312         if (!hash) {
1313                 *vmalloced = 1;
1314                 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
1315                 hash = __vmalloc(sz, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
1316                                  PAGE_KERNEL);
1317         }
1318
1319         if (hash && nulls)
1320                 for (i = 0; i < nr_slots; i++)
1321                         INIT_HLIST_NULLS_HEAD(&hash[i], i);
1322
1323         return hash;
1324 }
1325 EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
1326
1327 int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1328 {
1329         int i, bucket, vmalloced, old_vmalloced;
1330         unsigned int hashsize, old_size;
1331         struct hlist_nulls_head *hash, *old_hash;
1332         struct nf_conntrack_tuple_hash *h;
1333         struct nf_conn *ct;
1334
1335         if (current->nsproxy->net_ns != &init_net)
1336                 return -EOPNOTSUPP;
1337
1338         /* On boot, we can set this without any fancy locking. */
1339         if (!nf_conntrack_htable_size)
1340                 return param_set_uint(val, kp);
1341
1342         hashsize = simple_strtoul(val, NULL, 0);
1343         if (!hashsize)
1344                 return -EINVAL;
1345
1346         hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced, 1);
1347         if (!hash)
1348                 return -ENOMEM;
1349
1350         /* Lookups in the old hash might happen in parallel, which means we
1351          * might get false negatives during connection lookup. New connections
1352          * created because of a false negative won't make it into the hash
1353          * though since that required taking the lock.
1354          */
1355         spin_lock_bh(&nf_conntrack_lock);
1356         for (i = 0; i < init_net.ct.htable_size; i++) {
1357                 while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
1358                         h = hlist_nulls_entry(init_net.ct.hash[i].first,
1359                                         struct nf_conntrack_tuple_hash, hnnode);
1360                         ct = nf_ct_tuplehash_to_ctrack(h);
1361                         hlist_nulls_del_rcu(&h->hnnode);
1362                         bucket = __hash_conntrack(&h->tuple, nf_ct_zone(ct),
1363                                                   hashsize);
1364                         hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
1365                 }
1366         }
1367         old_size = init_net.ct.htable_size;
1368         old_vmalloced = init_net.ct.hash_vmalloc;
1369         old_hash = init_net.ct.hash;
1370
1371         init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
1372         init_net.ct.hash_vmalloc = vmalloced;
1373         init_net.ct.hash = hash;
1374         spin_unlock_bh(&nf_conntrack_lock);
1375
1376         nf_ct_free_hashtable(old_hash, old_vmalloced, old_size);
1377         return 0;
1378 }
1379 EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
1380
1381 module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
1382                   &nf_conntrack_htable_size, 0600);
1383
1384 void nf_ct_untracked_status_or(unsigned long bits)
1385 {
1386         int cpu;
1387
1388         for_each_possible_cpu(cpu)
1389                 per_cpu(nf_conntrack_untracked, cpu).status |= bits;
1390 }
1391 EXPORT_SYMBOL_GPL(nf_ct_untracked_status_or);
1392
1393 static int nf_conntrack_init_init_net(void)
1394 {
1395         int max_factor = 8;
1396         int ret, cpu;
1397
1398         /* Idea from tcp.c: use 1/16384 of memory.  On i386: 32MB
1399          * machine has 512 buckets. >= 1GB machines have 16384 buckets. */
1400         if (!nf_conntrack_htable_size) {
1401                 nf_conntrack_htable_size
1402                         = (((totalram_pages << PAGE_SHIFT) / 16384)
1403                            / sizeof(struct hlist_head));
1404                 if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
1405                         nf_conntrack_htable_size = 16384;
1406                 if (nf_conntrack_htable_size < 32)
1407                         nf_conntrack_htable_size = 32;
1408
1409                 /* Use a max. factor of four by default to get the same max as
1410                  * with the old struct list_heads. When a table size is given
1411                  * we use the old value of 8 to avoid reducing the max.
1412                  * entries. */
1413                 max_factor = 4;
1414         }
1415         nf_conntrack_max = max_factor * nf_conntrack_htable_size;
1416
1417         printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n",
1418                NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
1419                nf_conntrack_max);
1420
1421         ret = nf_conntrack_proto_init();
1422         if (ret < 0)
1423                 goto err_proto;
1424
1425         ret = nf_conntrack_helper_init();
1426         if (ret < 0)
1427                 goto err_helper;
1428
1429 #ifdef CONFIG_NF_CONNTRACK_ZONES
1430         ret = nf_ct_extend_register(&nf_ct_zone_extend);
1431         if (ret < 0)
1432                 goto err_extend;
1433 #endif
1434         /* Set up fake conntrack: to never be deleted, not in any hashes */
1435         for_each_possible_cpu(cpu) {
1436                 struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
1437                 write_pnet(&ct->ct_net, &init_net);
1438                 atomic_set(&ct->ct_general.use, 1);
1439         }
1440         /*  - and look it like as a confirmed connection */
1441         nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED);
1442         return 0;
1443
1444 #ifdef CONFIG_NF_CONNTRACK_ZONES
1445 err_extend:
1446         nf_conntrack_helper_fini();
1447 #endif
1448 err_helper:
1449         nf_conntrack_proto_fini();
1450 err_proto:
1451         return ret;
1452 }
1453
1454 /*
1455  * We need to use special "null" values, not used in hash table
1456  */
1457 #define UNCONFIRMED_NULLS_VAL   ((1<<30)+0)
1458 #define DYING_NULLS_VAL         ((1<<30)+1)
1459
1460 static int nf_conntrack_init_net(struct net *net)
1461 {
1462         int ret;
1463
1464         atomic_set(&net->ct.count, 0);
1465         INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, UNCONFIRMED_NULLS_VAL);
1466         INIT_HLIST_NULLS_HEAD(&net->ct.dying, DYING_NULLS_VAL);
1467         net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
1468         if (!net->ct.stat) {
1469                 ret = -ENOMEM;
1470                 goto err_stat;
1471         }
1472
1473         net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
1474         if (!net->ct.slabname) {
1475                 ret = -ENOMEM;
1476                 goto err_slabname;
1477         }
1478
1479         net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname,
1480                                                         sizeof(struct nf_conn), 0,
1481                                                         SLAB_DESTROY_BY_RCU, NULL);
1482         if (!net->ct.nf_conntrack_cachep) {
1483                 printk(KERN_ERR "Unable to create nf_conn slab cache\n");
1484                 ret = -ENOMEM;
1485                 goto err_cache;
1486         }
1487
1488         net->ct.htable_size = nf_conntrack_htable_size;
1489         net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size,
1490                                              &net->ct.hash_vmalloc, 1);
1491         if (!net->ct.hash) {
1492                 ret = -ENOMEM;
1493                 printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
1494                 goto err_hash;
1495         }
1496         ret = nf_conntrack_expect_init(net);
1497         if (ret < 0)
1498                 goto err_expect;
1499         ret = nf_conntrack_acct_init(net);
1500         if (ret < 0)
1501                 goto err_acct;
1502         ret = nf_conntrack_ecache_init(net);
1503         if (ret < 0)
1504                 goto err_ecache;
1505
1506         return 0;
1507
1508 err_ecache:
1509         nf_conntrack_acct_fini(net);
1510 err_acct:
1511         nf_conntrack_expect_fini(net);
1512 err_expect:
1513         nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
1514                              net->ct.htable_size);
1515 err_hash:
1516         kmem_cache_destroy(net->ct.nf_conntrack_cachep);
1517 err_cache:
1518         kfree(net->ct.slabname);
1519 err_slabname:
1520         free_percpu(net->ct.stat);
1521 err_stat:
1522         return ret;
1523 }
1524
1525 s16 (*nf_ct_nat_offset)(const struct nf_conn *ct,
1526                         enum ip_conntrack_dir dir,
1527                         u32 seq);
1528 EXPORT_SYMBOL_GPL(nf_ct_nat_offset);
1529
1530 int nf_conntrack_init(struct net *net)
1531 {
1532         int ret;
1533
1534         if (net_eq(net, &init_net)) {
1535                 ret = nf_conntrack_init_init_net();
1536                 if (ret < 0)
1537                         goto out_init_net;
1538         }
1539         ret = nf_conntrack_init_net(net);
1540         if (ret < 0)
1541                 goto out_net;
1542
1543         if (net_eq(net, &init_net)) {
1544                 /* For use by REJECT target */
1545                 rcu_assign_pointer(ip_ct_attach, nf_conntrack_attach);
1546                 rcu_assign_pointer(nf_ct_destroy, destroy_conntrack);
1547
1548                 /* Howto get NAT offsets */
1549                 rcu_assign_pointer(nf_ct_nat_offset, NULL);
1550         }
1551         return 0;
1552
1553 out_net:
1554         if (net_eq(net, &init_net))
1555                 nf_conntrack_cleanup_init_net();
1556 out_init_net:
1557         return ret;
1558 }