1 /* Connection state tracking for netfilter. This is separated from,
2 but required by, the NAT layer; it can also be used by an iptables
5 /* (C) 1999-2001 Paul `Rusty' Russell
6 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
7 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * 23 Apr 2001: Harald Welte <laforge@gnumonks.org>
14 * - new API and handling of conntrack/nat helpers
15 * - now capable of multiple expectations for one master
16 * 16 Jul 2002: Harald Welte <laforge@gnumonks.org>
17 * - add usage/reference counts to ip_conntrack_expect
18 * - export ip_conntrack[_expect]_{find_get,put} functions
19 * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
20 * - generalize L3 protocol denendent part.
21 * 23 Mar 2004: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
22 * - add support various size of conntrack structures.
23 * 26 Jan 2006: Harald Welte <laforge@netfilter.org>
24 * - restructure nf_conn (introduce nf_conn_help)
25 * - redesign 'features' how they were originally intended
27 * Derived from net/ipv4/netfilter/ip_conntrack_core.c
30 #include <linux/config.h>
31 #include <linux/types.h>
32 #include <linux/netfilter.h>
33 #include <linux/module.h>
34 #include <linux/skbuff.h>
35 #include <linux/proc_fs.h>
36 #include <linux/vmalloc.h>
37 #include <linux/stddef.h>
38 #include <linux/slab.h>
39 #include <linux/random.h>
40 #include <linux/jhash.h>
41 #include <linux/err.h>
42 #include <linux/percpu.h>
43 #include <linux/moduleparam.h>
44 #include <linux/notifier.h>
45 #include <linux/kernel.h>
46 #include <linux/netdevice.h>
47 #include <linux/socket.h>
49 /* This rwlock protects the main hash table, protocol/helper/expected
50 registrations, conntrack timers*/
51 #define ASSERT_READ_LOCK(x)
52 #define ASSERT_WRITE_LOCK(x)
54 #include <net/netfilter/nf_conntrack.h>
55 #include <net/netfilter/nf_conntrack_l3proto.h>
56 #include <net/netfilter/nf_conntrack_protocol.h>
57 #include <net/netfilter/nf_conntrack_helper.h>
58 #include <net/netfilter/nf_conntrack_core.h>
59 #include <linux/netfilter_ipv4/listhelp.h>
61 #define NF_CONNTRACK_VERSION "0.5.0"
66 #define DEBUGP(format, args...)
69 DEFINE_RWLOCK(nf_conntrack_lock);
71 /* nf_conntrack_standalone needs this */
72 atomic_t nf_conntrack_count = ATOMIC_INIT(0);
74 void (*nf_conntrack_destroyed)(struct nf_conn *conntrack) = NULL;
75 LIST_HEAD(nf_conntrack_expect_list);
76 struct nf_conntrack_protocol **nf_ct_protos[PF_MAX];
77 struct nf_conntrack_l3proto *nf_ct_l3protos[PF_MAX];
78 static LIST_HEAD(helpers);
79 unsigned int nf_conntrack_htable_size = 0;
81 struct list_head *nf_conntrack_hash;
82 static kmem_cache_t *nf_conntrack_expect_cachep;
83 struct nf_conn nf_conntrack_untracked;
84 unsigned int nf_ct_log_invalid;
85 static LIST_HEAD(unconfirmed);
86 static int nf_conntrack_vmalloc;
88 static unsigned int nf_conntrack_next_id = 1;
89 static unsigned int nf_conntrack_expect_next_id = 1;
90 #ifdef CONFIG_NF_CONNTRACK_EVENTS
91 struct notifier_block *nf_conntrack_chain;
92 struct notifier_block *nf_conntrack_expect_chain;
94 DEFINE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache);
96 /* deliver cached events and clear cache entry - must be called with locally
97 * disabled softirqs */
99 __nf_ct_deliver_cached_events(struct nf_conntrack_ecache *ecache)
101 DEBUGP("ecache: delivering events for %p\n", ecache->ct);
102 if (nf_ct_is_confirmed(ecache->ct) && !nf_ct_is_dying(ecache->ct)
104 notifier_call_chain(&nf_conntrack_chain, ecache->events,
108 nf_ct_put(ecache->ct);
112 /* Deliver all cached events for a particular conntrack. This is called
113 * by code prior to async packet handling for freeing the skb */
114 void nf_ct_deliver_cached_events(const struct nf_conn *ct)
116 struct nf_conntrack_ecache *ecache;
119 ecache = &__get_cpu_var(nf_conntrack_ecache);
120 if (ecache->ct == ct)
121 __nf_ct_deliver_cached_events(ecache);
125 /* Deliver cached events for old pending events, if current conntrack != old */
126 void __nf_ct_event_cache_init(struct nf_conn *ct)
128 struct nf_conntrack_ecache *ecache;
130 /* take care of delivering potentially old events */
131 ecache = &__get_cpu_var(nf_conntrack_ecache);
132 BUG_ON(ecache->ct == ct);
134 __nf_ct_deliver_cached_events(ecache);
135 /* initialize for this conntrack/packet */
137 nf_conntrack_get(&ct->ct_general);
140 /* flush the event cache - touches other CPU's data and must not be called
141 * while packets are still passing through the code */
142 static void nf_ct_event_cache_flush(void)
144 struct nf_conntrack_ecache *ecache;
148 ecache = &per_cpu(nf_conntrack_ecache, cpu);
150 nf_ct_put(ecache->ct);
154 static inline void nf_ct_event_cache_flush(void) {}
155 #endif /* CONFIG_NF_CONNTRACK_EVENTS */
157 DEFINE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat);
158 EXPORT_PER_CPU_SYMBOL(nf_conntrack_stat);
161 * This scheme offers various size of "struct nf_conn" dependent on
162 * features(helper, nat, ...)
165 #define NF_CT_FEATURES_NAMELEN 256
167 /* name of slab cache. printed in /proc/slabinfo */
170 /* size of slab cache */
173 /* slab cache pointer */
174 kmem_cache_t *cachep;
176 /* allocated slab cache + modules which uses this slab cache */
180 int (*init_conntrack)(struct nf_conn *, u_int32_t);
182 } nf_ct_cache[NF_CT_F_NUM];
184 /* protect members of nf_ct_cache except of "use" */
185 DEFINE_RWLOCK(nf_ct_cache_lock);
187 /* This avoids calling kmem_cache_create() with same name simultaneously */
188 DECLARE_MUTEX(nf_ct_cache_mutex);
190 extern struct nf_conntrack_protocol nf_conntrack_generic_protocol;
191 struct nf_conntrack_protocol *
192 __nf_ct_proto_find(u_int16_t l3proto, u_int8_t protocol)
194 if (unlikely(l3proto >= AF_MAX || nf_ct_protos[l3proto] == NULL))
195 return &nf_conntrack_generic_protocol;
197 return nf_ct_protos[l3proto][protocol];
200 /* this is guaranteed to always return a valid protocol helper, since
201 * it falls back to generic_protocol */
202 struct nf_conntrack_protocol *
203 nf_ct_proto_find_get(u_int16_t l3proto, u_int8_t protocol)
205 struct nf_conntrack_protocol *p;
208 p = __nf_ct_proto_find(l3proto, protocol);
210 if (!try_module_get(p->me))
211 p = &nf_conntrack_generic_protocol;
218 void nf_ct_proto_put(struct nf_conntrack_protocol *p)
223 struct nf_conntrack_l3proto *
224 nf_ct_l3proto_find_get(u_int16_t l3proto)
226 struct nf_conntrack_l3proto *p;
229 p = __nf_ct_l3proto_find(l3proto);
231 if (!try_module_get(p->me))
232 p = &nf_conntrack_generic_l3proto;
239 void nf_ct_l3proto_put(struct nf_conntrack_l3proto *p)
244 static int nf_conntrack_hash_rnd_initted;
245 static unsigned int nf_conntrack_hash_rnd;
247 static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
248 unsigned int size, unsigned int rnd)
251 a = jhash((void *)tuple->src.u3.all, sizeof(tuple->src.u3.all),
252 ((tuple->src.l3num) << 16) | tuple->dst.protonum);
253 b = jhash((void *)tuple->dst.u3.all, sizeof(tuple->dst.u3.all),
254 (tuple->src.u.all << 16) | tuple->dst.u.all);
256 return jhash_2words(a, b, rnd) % size;
259 static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple)
261 return __hash_conntrack(tuple, nf_conntrack_htable_size,
262 nf_conntrack_hash_rnd);
265 int nf_conntrack_register_cache(u_int32_t features, const char *name,
270 kmem_cache_t *cachep;
272 DEBUGP("nf_conntrack_register_cache: features=0x%x, name=%s, size=%d\n",
273 features, name, size);
275 if (features < NF_CT_F_BASIC || features >= NF_CT_F_NUM) {
276 DEBUGP("nf_conntrack_register_cache: invalid features.: 0x%x\n",
281 down(&nf_ct_cache_mutex);
283 write_lock_bh(&nf_ct_cache_lock);
284 /* e.g: multiple helpers are loaded */
285 if (nf_ct_cache[features].use > 0) {
286 DEBUGP("nf_conntrack_register_cache: already resisterd.\n");
287 if ((!strncmp(nf_ct_cache[features].name, name,
288 NF_CT_FEATURES_NAMELEN))
289 && nf_ct_cache[features].size == size) {
290 DEBUGP("nf_conntrack_register_cache: reusing.\n");
291 nf_ct_cache[features].use++;
296 write_unlock_bh(&nf_ct_cache_lock);
297 up(&nf_ct_cache_mutex);
300 write_unlock_bh(&nf_ct_cache_lock);
303 * The memory space for name of slab cache must be alive until
304 * cache is destroyed.
306 cache_name = kmalloc(sizeof(char)*NF_CT_FEATURES_NAMELEN, GFP_ATOMIC);
307 if (cache_name == NULL) {
308 DEBUGP("nf_conntrack_register_cache: can't alloc cache_name\n");
313 if (strlcpy(cache_name, name, NF_CT_FEATURES_NAMELEN)
314 >= NF_CT_FEATURES_NAMELEN) {
315 printk("nf_conntrack_register_cache: name too long\n");
320 cachep = kmem_cache_create(cache_name, size, 0, 0,
323 printk("nf_conntrack_register_cache: Can't create slab cache "
324 "for the features = 0x%x\n", features);
329 write_lock_bh(&nf_ct_cache_lock);
330 nf_ct_cache[features].use = 1;
331 nf_ct_cache[features].size = size;
332 nf_ct_cache[features].cachep = cachep;
333 nf_ct_cache[features].name = cache_name;
334 write_unlock_bh(&nf_ct_cache_lock);
341 up(&nf_ct_cache_mutex);
345 /* FIXME: In the current, only nf_conntrack_cleanup() can call this function. */
346 void nf_conntrack_unregister_cache(u_int32_t features)
348 kmem_cache_t *cachep;
352 * This assures that kmem_cache_create() isn't called before destroying
355 DEBUGP("nf_conntrack_unregister_cache: 0x%04x\n", features);
356 down(&nf_ct_cache_mutex);
358 write_lock_bh(&nf_ct_cache_lock);
359 if (--nf_ct_cache[features].use > 0) {
360 write_unlock_bh(&nf_ct_cache_lock);
361 up(&nf_ct_cache_mutex);
364 cachep = nf_ct_cache[features].cachep;
365 name = nf_ct_cache[features].name;
366 nf_ct_cache[features].cachep = NULL;
367 nf_ct_cache[features].name = NULL;
368 nf_ct_cache[features].size = 0;
369 write_unlock_bh(&nf_ct_cache_lock);
373 kmem_cache_destroy(cachep);
376 up(&nf_ct_cache_mutex);
380 nf_ct_get_tuple(const struct sk_buff *skb,
382 unsigned int dataoff,
385 struct nf_conntrack_tuple *tuple,
386 const struct nf_conntrack_l3proto *l3proto,
387 const struct nf_conntrack_protocol *protocol)
389 NF_CT_TUPLE_U_BLANK(tuple);
391 tuple->src.l3num = l3num;
392 if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0)
395 tuple->dst.protonum = protonum;
396 tuple->dst.dir = IP_CT_DIR_ORIGINAL;
398 return protocol->pkt_to_tuple(skb, dataoff, tuple);
402 nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
403 const struct nf_conntrack_tuple *orig,
404 const struct nf_conntrack_l3proto *l3proto,
405 const struct nf_conntrack_protocol *protocol)
407 NF_CT_TUPLE_U_BLANK(inverse);
409 inverse->src.l3num = orig->src.l3num;
410 if (l3proto->invert_tuple(inverse, orig) == 0)
413 inverse->dst.dir = !orig->dst.dir;
415 inverse->dst.protonum = orig->dst.protonum;
416 return protocol->invert_tuple(inverse, orig);
419 /* nf_conntrack_expect helper functions */
420 void nf_ct_unlink_expect(struct nf_conntrack_expect *exp)
422 struct nf_conn_help *master_help = nfct_help(exp->master);
424 NF_CT_ASSERT(master_help);
425 ASSERT_WRITE_LOCK(&nf_conntrack_lock);
426 NF_CT_ASSERT(!timer_pending(&exp->timeout));
428 list_del(&exp->list);
429 NF_CT_STAT_INC(expect_delete);
430 master_help->expecting--;
431 nf_conntrack_expect_put(exp);
434 static void expectation_timed_out(unsigned long ul_expect)
436 struct nf_conntrack_expect *exp = (void *)ul_expect;
438 write_lock_bh(&nf_conntrack_lock);
439 nf_ct_unlink_expect(exp);
440 write_unlock_bh(&nf_conntrack_lock);
441 nf_conntrack_expect_put(exp);
444 struct nf_conntrack_expect *
445 __nf_conntrack_expect_find(const struct nf_conntrack_tuple *tuple)
447 struct nf_conntrack_expect *i;
449 list_for_each_entry(i, &nf_conntrack_expect_list, list) {
450 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)) {
458 /* Just find a expectation corresponding to a tuple. */
459 struct nf_conntrack_expect *
460 nf_conntrack_expect_find(const struct nf_conntrack_tuple *tuple)
462 struct nf_conntrack_expect *i;
464 read_lock_bh(&nf_conntrack_lock);
465 i = __nf_conntrack_expect_find(tuple);
466 read_unlock_bh(&nf_conntrack_lock);
471 /* If an expectation for this connection is found, it gets delete from
472 * global list then returned. */
473 static struct nf_conntrack_expect *
474 find_expectation(const struct nf_conntrack_tuple *tuple)
476 struct nf_conntrack_expect *i;
478 list_for_each_entry(i, &nf_conntrack_expect_list, list) {
479 /* If master is not in hash table yet (ie. packet hasn't left
480 this machine yet), how can other end know about expected?
481 Hence these are not the droids you are looking for (if
482 master ct never got confirmed, we'd hold a reference to it
483 and weird things would happen to future packets). */
484 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)
485 && nf_ct_is_confirmed(i->master)) {
486 if (i->flags & NF_CT_EXPECT_PERMANENT) {
489 } else if (del_timer(&i->timeout)) {
490 nf_ct_unlink_expect(i);
498 /* delete all expectations for this conntrack */
499 void nf_ct_remove_expectations(struct nf_conn *ct)
501 struct nf_conntrack_expect *i, *tmp;
502 struct nf_conn_help *help = nfct_help(ct);
504 /* Optimization: most connection never expect any others. */
505 if (!help || help->expecting == 0)
508 list_for_each_entry_safe(i, tmp, &nf_conntrack_expect_list, list) {
509 if (i->master == ct && del_timer(&i->timeout)) {
510 nf_ct_unlink_expect(i);
511 nf_conntrack_expect_put(i);
517 clean_from_lists(struct nf_conn *ct)
521 DEBUGP("clean_from_lists(%p)\n", ct);
522 ASSERT_WRITE_LOCK(&nf_conntrack_lock);
524 ho = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
525 hr = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
526 LIST_DELETE(&nf_conntrack_hash[ho], &ct->tuplehash[IP_CT_DIR_ORIGINAL]);
527 LIST_DELETE(&nf_conntrack_hash[hr], &ct->tuplehash[IP_CT_DIR_REPLY]);
529 /* Destroy all pending expectations */
530 nf_ct_remove_expectations(ct);
534 destroy_conntrack(struct nf_conntrack *nfct)
536 struct nf_conn *ct = (struct nf_conn *)nfct;
537 struct nf_conntrack_l3proto *l3proto;
538 struct nf_conntrack_protocol *proto;
540 DEBUGP("destroy_conntrack(%p)\n", ct);
541 NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
542 NF_CT_ASSERT(!timer_pending(&ct->timeout));
544 nf_conntrack_event(IPCT_DESTROY, ct);
545 set_bit(IPS_DYING_BIT, &ct->status);
547 /* To make sure we don't get any weird locking issues here:
548 * destroy_conntrack() MUST NOT be called with a write lock
549 * to nf_conntrack_lock!!! -HW */
550 l3proto = __nf_ct_l3proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num);
551 if (l3proto && l3proto->destroy)
552 l3proto->destroy(ct);
554 proto = __nf_ct_proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num, ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum);
555 if (proto && proto->destroy)
558 if (nf_conntrack_destroyed)
559 nf_conntrack_destroyed(ct);
561 write_lock_bh(&nf_conntrack_lock);
562 /* Expectations will have been removed in clean_from_lists,
563 * except TFTP can create an expectation on the first packet,
564 * before connection is in the list, so we need to clean here,
566 nf_ct_remove_expectations(ct);
568 /* We overload first tuple to link into unconfirmed list. */
569 if (!nf_ct_is_confirmed(ct)) {
570 BUG_ON(list_empty(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list));
571 list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list);
574 NF_CT_STAT_INC(delete);
575 write_unlock_bh(&nf_conntrack_lock);
578 nf_ct_put(ct->master);
580 DEBUGP("destroy_conntrack: returning ct=%p to slab\n", ct);
581 nf_conntrack_free(ct);
584 static void death_by_timeout(unsigned long ul_conntrack)
586 struct nf_conn *ct = (void *)ul_conntrack;
588 write_lock_bh(&nf_conntrack_lock);
589 /* Inside lock so preempt is disabled on module removal path.
590 * Otherwise we can get spurious warnings. */
591 NF_CT_STAT_INC(delete_list);
592 clean_from_lists(ct);
593 write_unlock_bh(&nf_conntrack_lock);
598 conntrack_tuple_cmp(const struct nf_conntrack_tuple_hash *i,
599 const struct nf_conntrack_tuple *tuple,
600 const struct nf_conn *ignored_conntrack)
602 ASSERT_READ_LOCK(&nf_conntrack_lock);
603 return nf_ct_tuplehash_to_ctrack(i) != ignored_conntrack
604 && nf_ct_tuple_equal(tuple, &i->tuple);
607 struct nf_conntrack_tuple_hash *
608 __nf_conntrack_find(const struct nf_conntrack_tuple *tuple,
609 const struct nf_conn *ignored_conntrack)
611 struct nf_conntrack_tuple_hash *h;
612 unsigned int hash = hash_conntrack(tuple);
614 ASSERT_READ_LOCK(&nf_conntrack_lock);
615 list_for_each_entry(h, &nf_conntrack_hash[hash], list) {
616 if (conntrack_tuple_cmp(h, tuple, ignored_conntrack)) {
617 NF_CT_STAT_INC(found);
620 NF_CT_STAT_INC(searched);
626 /* Find a connection corresponding to a tuple. */
627 struct nf_conntrack_tuple_hash *
628 nf_conntrack_find_get(const struct nf_conntrack_tuple *tuple,
629 const struct nf_conn *ignored_conntrack)
631 struct nf_conntrack_tuple_hash *h;
633 read_lock_bh(&nf_conntrack_lock);
634 h = __nf_conntrack_find(tuple, ignored_conntrack);
636 atomic_inc(&nf_ct_tuplehash_to_ctrack(h)->ct_general.use);
637 read_unlock_bh(&nf_conntrack_lock);
642 static void __nf_conntrack_hash_insert(struct nf_conn *ct,
644 unsigned int repl_hash)
646 ct->id = ++nf_conntrack_next_id;
647 list_prepend(&nf_conntrack_hash[hash],
648 &ct->tuplehash[IP_CT_DIR_ORIGINAL].list);
649 list_prepend(&nf_conntrack_hash[repl_hash],
650 &ct->tuplehash[IP_CT_DIR_REPLY].list);
653 void nf_conntrack_hash_insert(struct nf_conn *ct)
655 unsigned int hash, repl_hash;
657 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
658 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
660 write_lock_bh(&nf_conntrack_lock);
661 __nf_conntrack_hash_insert(ct, hash, repl_hash);
662 write_unlock_bh(&nf_conntrack_lock);
665 /* Confirm a connection given skb; places it in hash table */
667 __nf_conntrack_confirm(struct sk_buff **pskb)
669 unsigned int hash, repl_hash;
671 enum ip_conntrack_info ctinfo;
673 ct = nf_ct_get(*pskb, &ctinfo);
675 /* ipt_REJECT uses nf_conntrack_attach to attach related
676 ICMP/TCP RST packets in other direction. Actual packet
677 which created connection will be IP_CT_NEW or for an
678 expected connection, IP_CT_RELATED. */
679 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
682 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
683 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
685 /* We're not in hash table, and we refuse to set up related
686 connections for unconfirmed conns. But packet copies and
687 REJECT will give spurious warnings here. */
688 /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
690 /* No external references means noone else could have
692 NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
693 DEBUGP("Confirming conntrack %p\n", ct);
695 write_lock_bh(&nf_conntrack_lock);
697 /* See if there's one in the list already, including reverse:
698 NAT could have grabbed it without realizing, since we're
699 not in the hash. If there is, we lost race. */
700 if (!LIST_FIND(&nf_conntrack_hash[hash],
702 struct nf_conntrack_tuple_hash *,
703 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, NULL)
704 && !LIST_FIND(&nf_conntrack_hash[repl_hash],
706 struct nf_conntrack_tuple_hash *,
707 &ct->tuplehash[IP_CT_DIR_REPLY].tuple, NULL)) {
708 struct nf_conn_help *help;
709 /* Remove from unconfirmed list */
710 list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list);
712 __nf_conntrack_hash_insert(ct, hash, repl_hash);
713 /* Timer relative to confirmation time, not original
714 setting time, otherwise we'd get timer wrap in
715 weird delay cases. */
716 ct->timeout.expires += jiffies;
717 add_timer(&ct->timeout);
718 atomic_inc(&ct->ct_general.use);
719 set_bit(IPS_CONFIRMED_BIT, &ct->status);
720 NF_CT_STAT_INC(insert);
721 write_unlock_bh(&nf_conntrack_lock);
722 help = nfct_help(ct);
723 if (help && help->helper)
724 nf_conntrack_event_cache(IPCT_HELPER, *pskb);
725 #ifdef CONFIG_NF_NAT_NEEDED
726 if (test_bit(IPS_SRC_NAT_DONE_BIT, &ct->status) ||
727 test_bit(IPS_DST_NAT_DONE_BIT, &ct->status))
728 nf_conntrack_event_cache(IPCT_NATINFO, *pskb);
730 nf_conntrack_event_cache(master_ct(ct) ?
731 IPCT_RELATED : IPCT_NEW, *pskb);
735 NF_CT_STAT_INC(insert_failed);
736 write_unlock_bh(&nf_conntrack_lock);
740 /* Returns true if a connection correspondings to the tuple (required
743 nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
744 const struct nf_conn *ignored_conntrack)
746 struct nf_conntrack_tuple_hash *h;
748 read_lock_bh(&nf_conntrack_lock);
749 h = __nf_conntrack_find(tuple, ignored_conntrack);
750 read_unlock_bh(&nf_conntrack_lock);
755 /* There's a small race here where we may free a just-assured
756 connection. Too bad: we're in trouble anyway. */
757 static inline int unreplied(const struct nf_conntrack_tuple_hash *i)
759 return !(test_bit(IPS_ASSURED_BIT,
760 &nf_ct_tuplehash_to_ctrack(i)->status));
763 static int early_drop(struct list_head *chain)
765 /* Traverse backwards: gives us oldest, which is roughly LRU */
766 struct nf_conntrack_tuple_hash *h;
767 struct nf_conn *ct = NULL;
770 read_lock_bh(&nf_conntrack_lock);
771 h = LIST_FIND_B(chain, unreplied, struct nf_conntrack_tuple_hash *);
773 ct = nf_ct_tuplehash_to_ctrack(h);
774 atomic_inc(&ct->ct_general.use);
776 read_unlock_bh(&nf_conntrack_lock);
781 if (del_timer(&ct->timeout)) {
782 death_by_timeout((unsigned long)ct);
784 NF_CT_STAT_INC(early_drop);
790 static inline int helper_cmp(const struct nf_conntrack_helper *i,
791 const struct nf_conntrack_tuple *rtuple)
793 return nf_ct_tuple_mask_cmp(rtuple, &i->tuple, &i->mask);
796 static struct nf_conntrack_helper *
797 __nf_ct_helper_find(const struct nf_conntrack_tuple *tuple)
799 return LIST_FIND(&helpers, helper_cmp,
800 struct nf_conntrack_helper *,
804 struct nf_conntrack_helper *
805 nf_ct_helper_find_get( const struct nf_conntrack_tuple *tuple)
807 struct nf_conntrack_helper *helper;
809 /* need nf_conntrack_lock to assure that helper exists until
810 * try_module_get() is called */
811 read_lock_bh(&nf_conntrack_lock);
813 helper = __nf_ct_helper_find(tuple);
815 /* need to increase module usage count to assure helper will
816 * not go away while the caller is e.g. busy putting a
817 * conntrack in the hash that uses the helper */
818 if (!try_module_get(helper->me))
822 read_unlock_bh(&nf_conntrack_lock);
827 void nf_ct_helper_put(struct nf_conntrack_helper *helper)
829 module_put(helper->me);
832 static struct nf_conn *
833 __nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
834 const struct nf_conntrack_tuple *repl,
835 const struct nf_conntrack_l3proto *l3proto)
837 struct nf_conn *conntrack = NULL;
838 u_int32_t features = 0;
839 struct nf_conntrack_helper *helper;
841 if (unlikely(!nf_conntrack_hash_rnd_initted)) {
842 get_random_bytes(&nf_conntrack_hash_rnd, 4);
843 nf_conntrack_hash_rnd_initted = 1;
847 && atomic_read(&nf_conntrack_count) >= nf_conntrack_max) {
848 unsigned int hash = hash_conntrack(orig);
849 /* Try dropping from this hash chain. */
850 if (!early_drop(&nf_conntrack_hash[hash])) {
853 "nf_conntrack: table full, dropping"
855 return ERR_PTR(-ENOMEM);
859 /* find features needed by this conntrack. */
860 features = l3proto->get_features(orig);
862 /* FIXME: protect helper list per RCU */
863 read_lock_bh(&nf_conntrack_lock);
864 helper = __nf_ct_helper_find(repl);
866 features |= NF_CT_F_HELP;
867 read_unlock_bh(&nf_conntrack_lock);
869 DEBUGP("nf_conntrack_alloc: features=0x%x\n", features);
871 read_lock_bh(&nf_ct_cache_lock);
873 if (unlikely(!nf_ct_cache[features].use)) {
874 DEBUGP("nf_conntrack_alloc: not supported features = 0x%x\n",
879 conntrack = kmem_cache_alloc(nf_ct_cache[features].cachep, GFP_ATOMIC);
880 if (conntrack == NULL) {
881 DEBUGP("nf_conntrack_alloc: Can't alloc conntrack from cache\n");
885 memset(conntrack, 0, nf_ct_cache[features].size);
886 conntrack->features = features;
888 struct nf_conn_help *help = nfct_help(conntrack);
890 help->helper = helper;
893 atomic_set(&conntrack->ct_general.use, 1);
894 conntrack->ct_general.destroy = destroy_conntrack;
895 conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
896 conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
897 /* Don't set timer yet: wait for confirmation */
898 init_timer(&conntrack->timeout);
899 conntrack->timeout.data = (unsigned long)conntrack;
900 conntrack->timeout.function = death_by_timeout;
902 atomic_inc(&nf_conntrack_count);
904 read_unlock_bh(&nf_ct_cache_lock);
908 struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
909 const struct nf_conntrack_tuple *repl)
911 struct nf_conntrack_l3proto *l3proto;
913 l3proto = __nf_ct_l3proto_find(orig->src.l3num);
914 return __nf_conntrack_alloc(orig, repl, l3proto);
917 void nf_conntrack_free(struct nf_conn *conntrack)
919 u_int32_t features = conntrack->features;
920 NF_CT_ASSERT(features >= NF_CT_F_BASIC && features < NF_CT_F_NUM);
921 DEBUGP("nf_conntrack_free: features = 0x%x, conntrack=%p\n", features,
923 kmem_cache_free(nf_ct_cache[features].cachep, conntrack);
924 atomic_dec(&nf_conntrack_count);
927 /* Allocate a new conntrack: we return -ENOMEM if classification
928 failed due to stress. Otherwise it really is unclassifiable. */
929 static struct nf_conntrack_tuple_hash *
930 init_conntrack(const struct nf_conntrack_tuple *tuple,
931 struct nf_conntrack_l3proto *l3proto,
932 struct nf_conntrack_protocol *protocol,
934 unsigned int dataoff)
936 struct nf_conn *conntrack;
937 struct nf_conntrack_tuple repl_tuple;
938 struct nf_conntrack_expect *exp;
940 if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, protocol)) {
941 DEBUGP("Can't invert tuple.\n");
945 conntrack = __nf_conntrack_alloc(tuple, &repl_tuple, l3proto);
946 if (conntrack == NULL || IS_ERR(conntrack)) {
947 DEBUGP("Can't allocate conntrack.\n");
948 return (struct nf_conntrack_tuple_hash *)conntrack;
951 if (!protocol->new(conntrack, skb, dataoff)) {
952 nf_conntrack_free(conntrack);
953 DEBUGP("init conntrack: can't track with proto module\n");
957 write_lock_bh(&nf_conntrack_lock);
958 exp = find_expectation(tuple);
961 DEBUGP("conntrack: expectation arrives ct=%p exp=%p\n",
963 /* Welcome, Mr. Bond. We've been expecting you... */
964 __set_bit(IPS_EXPECTED_BIT, &conntrack->status);
965 conntrack->master = exp->master;
966 #ifdef CONFIG_NF_CONNTRACK_MARK
967 conntrack->mark = exp->master->mark;
969 nf_conntrack_get(&conntrack->master->ct_general);
970 NF_CT_STAT_INC(expect_new);
974 /* Overload tuple linked list to put us in unconfirmed list. */
975 list_add(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL].list, &unconfirmed);
977 write_unlock_bh(&nf_conntrack_lock);
981 exp->expectfn(conntrack, exp);
982 nf_conntrack_expect_put(exp);
985 return &conntrack->tuplehash[IP_CT_DIR_ORIGINAL];
988 /* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
989 static inline struct nf_conn *
990 resolve_normal_ct(struct sk_buff *skb,
991 unsigned int dataoff,
994 struct nf_conntrack_l3proto *l3proto,
995 struct nf_conntrack_protocol *proto,
997 enum ip_conntrack_info *ctinfo)
999 struct nf_conntrack_tuple tuple;
1000 struct nf_conntrack_tuple_hash *h;
1003 if (!nf_ct_get_tuple(skb, (unsigned int)(skb->nh.raw - skb->data),
1004 dataoff, l3num, protonum, &tuple, l3proto,
1006 DEBUGP("resolve_normal_ct: Can't get tuple\n");
1010 /* look for tuple match */
1011 h = nf_conntrack_find_get(&tuple, NULL);
1013 h = init_conntrack(&tuple, l3proto, proto, skb, dataoff);
1019 ct = nf_ct_tuplehash_to_ctrack(h);
1021 /* It exists; we have (non-exclusive) reference. */
1022 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
1023 *ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY;
1024 /* Please set reply bit if this packet OK */
1027 /* Once we've had two way comms, always ESTABLISHED. */
1028 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
1029 DEBUGP("nf_conntrack_in: normal packet for %p\n", ct);
1030 *ctinfo = IP_CT_ESTABLISHED;
1031 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
1032 DEBUGP("nf_conntrack_in: related packet for %p\n", ct);
1033 *ctinfo = IP_CT_RELATED;
1035 DEBUGP("nf_conntrack_in: new packet for %p\n", ct);
1036 *ctinfo = IP_CT_NEW;
1040 skb->nfct = &ct->ct_general;
1041 skb->nfctinfo = *ctinfo;
1046 nf_conntrack_in(int pf, unsigned int hooknum, struct sk_buff **pskb)
1049 enum ip_conntrack_info ctinfo;
1050 struct nf_conntrack_l3proto *l3proto;
1051 struct nf_conntrack_protocol *proto;
1052 unsigned int dataoff;
1057 /* Previously seen (loopback or untracked)? Ignore. */
1058 if ((*pskb)->nfct) {
1059 NF_CT_STAT_INC(ignore);
1063 l3proto = __nf_ct_l3proto_find((u_int16_t)pf);
1064 if ((ret = l3proto->prepare(pskb, hooknum, &dataoff, &protonum)) <= 0) {
1065 DEBUGP("not prepared to track yet or error occured\n");
1069 proto = __nf_ct_proto_find((u_int16_t)pf, protonum);
1071 /* It may be an special packet, error, unclean...
1072 * inverse of the return code tells to the netfilter
1073 * core what to do with the packet. */
1074 if (proto->error != NULL &&
1075 (ret = proto->error(*pskb, dataoff, &ctinfo, pf, hooknum)) <= 0) {
1076 NF_CT_STAT_INC(error);
1077 NF_CT_STAT_INC(invalid);
1081 ct = resolve_normal_ct(*pskb, dataoff, pf, protonum, l3proto, proto,
1082 &set_reply, &ctinfo);
1084 /* Not valid part of a connection */
1085 NF_CT_STAT_INC(invalid);
1090 /* Too stressed to deal. */
1091 NF_CT_STAT_INC(drop);
1095 NF_CT_ASSERT((*pskb)->nfct);
1097 ret = proto->packet(ct, *pskb, dataoff, ctinfo, pf, hooknum);
1099 /* Invalid: inverse of the return code tells
1100 * the netfilter core what to do */
1101 DEBUGP("nf_conntrack_in: Can't track with proto module\n");
1102 nf_conntrack_put((*pskb)->nfct);
1103 (*pskb)->nfct = NULL;
1104 NF_CT_STAT_INC(invalid);
1108 if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
1109 nf_conntrack_event_cache(IPCT_STATUS, *pskb);
1114 int nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
1115 const struct nf_conntrack_tuple *orig)
1117 return nf_ct_invert_tuple(inverse, orig,
1118 __nf_ct_l3proto_find(orig->src.l3num),
1119 __nf_ct_proto_find(orig->src.l3num,
1120 orig->dst.protonum));
1123 /* Would two expected things clash? */
1124 static inline int expect_clash(const struct nf_conntrack_expect *a,
1125 const struct nf_conntrack_expect *b)
1127 /* Part covered by intersection of masks must be unequal,
1128 otherwise they clash */
1129 struct nf_conntrack_tuple intersect_mask;
1132 intersect_mask.src.l3num = a->mask.src.l3num & b->mask.src.l3num;
1133 intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
1134 intersect_mask.dst.u.all = a->mask.dst.u.all & b->mask.dst.u.all;
1135 intersect_mask.dst.protonum = a->mask.dst.protonum
1136 & b->mask.dst.protonum;
1138 for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
1139 intersect_mask.src.u3.all[count] =
1140 a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
1143 for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
1144 intersect_mask.dst.u3.all[count] =
1145 a->mask.dst.u3.all[count] & b->mask.dst.u3.all[count];
1148 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
1151 static inline int expect_matches(const struct nf_conntrack_expect *a,
1152 const struct nf_conntrack_expect *b)
1154 return a->master == b->master
1155 && nf_ct_tuple_equal(&a->tuple, &b->tuple)
1156 && nf_ct_tuple_equal(&a->mask, &b->mask);
1159 /* Generally a bad idea to call this: could have matched already. */
1160 void nf_conntrack_unexpect_related(struct nf_conntrack_expect *exp)
1162 struct nf_conntrack_expect *i;
1164 write_lock_bh(&nf_conntrack_lock);
1165 /* choose the the oldest expectation to evict */
1166 list_for_each_entry_reverse(i, &nf_conntrack_expect_list, list) {
1167 if (expect_matches(i, exp) && del_timer(&i->timeout)) {
1168 nf_ct_unlink_expect(i);
1169 write_unlock_bh(&nf_conntrack_lock);
1170 nf_conntrack_expect_put(i);
1174 write_unlock_bh(&nf_conntrack_lock);
1177 /* We don't increase the master conntrack refcount for non-fulfilled
1178 * conntracks. During the conntrack destruction, the expectations are
1179 * always killed before the conntrack itself */
1180 struct nf_conntrack_expect *nf_conntrack_expect_alloc(struct nf_conn *me)
1182 struct nf_conntrack_expect *new;
1184 new = kmem_cache_alloc(nf_conntrack_expect_cachep, GFP_ATOMIC);
1186 DEBUGP("expect_related: OOM allocating expect\n");
1190 atomic_set(&new->use, 1);
1194 void nf_conntrack_expect_put(struct nf_conntrack_expect *exp)
1196 if (atomic_dec_and_test(&exp->use))
1197 kmem_cache_free(nf_conntrack_expect_cachep, exp);
1200 static void nf_conntrack_expect_insert(struct nf_conntrack_expect *exp)
1202 struct nf_conn_help *master_help = nfct_help(exp->master);
1204 atomic_inc(&exp->use);
1205 master_help->expecting++;
1206 list_add(&exp->list, &nf_conntrack_expect_list);
1208 init_timer(&exp->timeout);
1209 exp->timeout.data = (unsigned long)exp;
1210 exp->timeout.function = expectation_timed_out;
1211 exp->timeout.expires = jiffies + master_help->helper->timeout * HZ;
1212 add_timer(&exp->timeout);
1214 exp->id = ++nf_conntrack_expect_next_id;
1215 atomic_inc(&exp->use);
1216 NF_CT_STAT_INC(expect_create);
1219 /* Race with expectations being used means we could have none to find; OK. */
1220 static void evict_oldest_expect(struct nf_conn *master)
1222 struct nf_conntrack_expect *i;
1224 list_for_each_entry_reverse(i, &nf_conntrack_expect_list, list) {
1225 if (i->master == master) {
1226 if (del_timer(&i->timeout)) {
1227 nf_ct_unlink_expect(i);
1228 nf_conntrack_expect_put(i);
1235 static inline int refresh_timer(struct nf_conntrack_expect *i)
1237 struct nf_conn_help *master_help = nfct_help(i->master);
1239 if (!del_timer(&i->timeout))
1242 i->timeout.expires = jiffies + master_help->helper->timeout*HZ;
1243 add_timer(&i->timeout);
1247 int nf_conntrack_expect_related(struct nf_conntrack_expect *expect)
1249 struct nf_conntrack_expect *i;
1250 struct nf_conn *master = expect->master;
1251 struct nf_conn_help *master_help = nfct_help(master);
1254 NF_CT_ASSERT(master_help);
1256 DEBUGP("nf_conntrack_expect_related %p\n", related_to);
1257 DEBUGP("tuple: "); NF_CT_DUMP_TUPLE(&expect->tuple);
1258 DEBUGP("mask: "); NF_CT_DUMP_TUPLE(&expect->mask);
1260 write_lock_bh(&nf_conntrack_lock);
1261 list_for_each_entry(i, &nf_conntrack_expect_list, list) {
1262 if (expect_matches(i, expect)) {
1263 /* Refresh timer: if it's dying, ignore.. */
1264 if (refresh_timer(i)) {
1268 } else if (expect_clash(i, expect)) {
1273 /* Will be over limit? */
1274 if (master_help->helper->max_expected &&
1275 master_help->expecting >= master_help->helper->max_expected)
1276 evict_oldest_expect(master);
1278 nf_conntrack_expect_insert(expect);
1279 nf_conntrack_expect_event(IPEXP_NEW, expect);
1282 write_unlock_bh(&nf_conntrack_lock);
1286 int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
1289 BUG_ON(me->timeout == 0);
1291 ret = nf_conntrack_register_cache(NF_CT_F_HELP, "nf_conntrack:help",
1292 sizeof(struct nf_conn)
1293 + sizeof(struct nf_conn_help)
1294 + __alignof__(struct nf_conn_help));
1296 printk(KERN_ERR "nf_conntrack_helper_reigster: Unable to create slab cache for conntracks\n");
1299 write_lock_bh(&nf_conntrack_lock);
1300 list_prepend(&helpers, me);
1301 write_unlock_bh(&nf_conntrack_lock);
1306 struct nf_conntrack_helper *
1307 __nf_conntrack_helper_find_byname(const char *name)
1309 struct nf_conntrack_helper *h;
1311 list_for_each_entry(h, &helpers, list) {
1312 if (!strcmp(h->name, name))
1319 static inline int unhelp(struct nf_conntrack_tuple_hash *i,
1320 const struct nf_conntrack_helper *me)
1322 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(i);
1323 struct nf_conn_help *help = nfct_help(ct);
1325 if (help && help->helper == me) {
1326 nf_conntrack_event(IPCT_HELPER, ct);
1327 help->helper = NULL;
1332 void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
1335 struct nf_conntrack_expect *exp, *tmp;
1337 /* Need write lock here, to delete helper. */
1338 write_lock_bh(&nf_conntrack_lock);
1339 LIST_DELETE(&helpers, me);
1341 /* Get rid of expectations */
1342 list_for_each_entry_safe(exp, tmp, &nf_conntrack_expect_list, list) {
1343 struct nf_conn_help *help = nfct_help(exp->master);
1344 if (help->helper == me && del_timer(&exp->timeout)) {
1345 nf_ct_unlink_expect(exp);
1346 nf_conntrack_expect_put(exp);
1350 /* Get rid of expecteds, set helpers to NULL. */
1351 LIST_FIND_W(&unconfirmed, unhelp, struct nf_conntrack_tuple_hash*, me);
1352 for (i = 0; i < nf_conntrack_htable_size; i++)
1353 LIST_FIND_W(&nf_conntrack_hash[i], unhelp,
1354 struct nf_conntrack_tuple_hash *, me);
1355 write_unlock_bh(&nf_conntrack_lock);
1357 /* Someone could be still looking at the helper in a bh. */
1361 /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
1362 void __nf_ct_refresh_acct(struct nf_conn *ct,
1363 enum ip_conntrack_info ctinfo,
1364 const struct sk_buff *skb,
1365 unsigned long extra_jiffies,
1370 NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);
1373 write_lock_bh(&nf_conntrack_lock);
1375 /* If not in hash table, timer will not be active yet */
1376 if (!nf_ct_is_confirmed(ct)) {
1377 ct->timeout.expires = extra_jiffies;
1378 event = IPCT_REFRESH;
1380 /* Need del_timer for race avoidance (may already be dying). */
1381 if (del_timer(&ct->timeout)) {
1382 ct->timeout.expires = jiffies + extra_jiffies;
1383 add_timer(&ct->timeout);
1384 event = IPCT_REFRESH;
1388 #ifdef CONFIG_NF_CT_ACCT
1390 ct->counters[CTINFO2DIR(ctinfo)].packets++;
1391 ct->counters[CTINFO2DIR(ctinfo)].bytes +=
1392 skb->len - (unsigned int)(skb->nh.raw - skb->data);
1393 if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000)
1394 || (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000))
1395 event |= IPCT_COUNTER_FILLING;
1399 write_unlock_bh(&nf_conntrack_lock);
1401 /* must be unlocked when calling event cache */
1403 nf_conntrack_event_cache(event, skb);
1406 #if defined(CONFIG_NF_CT_NETLINK) || \
1407 defined(CONFIG_NF_CT_NETLINK_MODULE)
1409 #include <linux/netfilter/nfnetlink.h>
1410 #include <linux/netfilter/nfnetlink_conntrack.h>
1412 /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
1413 * in ip_conntrack_core, since we don't want the protocols to autoload
1414 * or depend on ctnetlink */
1415 int nf_ct_port_tuple_to_nfattr(struct sk_buff *skb,
1416 const struct nf_conntrack_tuple *tuple)
1418 NFA_PUT(skb, CTA_PROTO_SRC_PORT, sizeof(u_int16_t),
1419 &tuple->src.u.tcp.port);
1420 NFA_PUT(skb, CTA_PROTO_DST_PORT, sizeof(u_int16_t),
1421 &tuple->dst.u.tcp.port);
1428 static const size_t cta_min_proto[CTA_PROTO_MAX] = {
1429 [CTA_PROTO_SRC_PORT-1] = sizeof(u_int16_t),
1430 [CTA_PROTO_DST_PORT-1] = sizeof(u_int16_t)
1433 int nf_ct_port_nfattr_to_tuple(struct nfattr *tb[],
1434 struct nf_conntrack_tuple *t)
1436 if (!tb[CTA_PROTO_SRC_PORT-1] || !tb[CTA_PROTO_DST_PORT-1])
1439 if (nfattr_bad_size(tb, CTA_PROTO_MAX, cta_min_proto))
1443 *(u_int16_t *)NFA_DATA(tb[CTA_PROTO_SRC_PORT-1]);
1445 *(u_int16_t *)NFA_DATA(tb[CTA_PROTO_DST_PORT-1]);
1451 /* Used by ipt_REJECT and ip6t_REJECT. */
1452 void __nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
1455 enum ip_conntrack_info ctinfo;
1457 /* This ICMP is in reverse direction to the packet which caused it */
1458 ct = nf_ct_get(skb, &ctinfo);
1459 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
1460 ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY;
1462 ctinfo = IP_CT_RELATED;
1464 /* Attach to new skbuff, and increment count */
1465 nskb->nfct = &ct->ct_general;
1466 nskb->nfctinfo = ctinfo;
1467 nf_conntrack_get(nskb->nfct);
1471 do_iter(const struct nf_conntrack_tuple_hash *i,
1472 int (*iter)(struct nf_conn *i, void *data),
1475 return iter(nf_ct_tuplehash_to_ctrack(i), data);
1478 /* Bring out ya dead! */
1479 static struct nf_conntrack_tuple_hash *
1480 get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
1481 void *data, unsigned int *bucket)
1483 struct nf_conntrack_tuple_hash *h = NULL;
1485 write_lock_bh(&nf_conntrack_lock);
1486 for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
1487 h = LIST_FIND_W(&nf_conntrack_hash[*bucket], do_iter,
1488 struct nf_conntrack_tuple_hash *, iter, data);
1493 h = LIST_FIND_W(&unconfirmed, do_iter,
1494 struct nf_conntrack_tuple_hash *, iter, data);
1496 atomic_inc(&nf_ct_tuplehash_to_ctrack(h)->ct_general.use);
1497 write_unlock_bh(&nf_conntrack_lock);
1503 nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data), void *data)
1505 struct nf_conntrack_tuple_hash *h;
1506 unsigned int bucket = 0;
1508 while ((h = get_next_corpse(iter, data, &bucket)) != NULL) {
1509 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
1510 /* Time to push up daises... */
1511 if (del_timer(&ct->timeout))
1512 death_by_timeout((unsigned long)ct);
1513 /* ... else the timer will get him soon. */
1519 static int kill_all(struct nf_conn *i, void *data)
1524 static void free_conntrack_hash(struct list_head *hash, int vmalloced, int size)
1529 free_pages((unsigned long)hash,
1530 get_order(sizeof(struct list_head) * size));
1533 void nf_conntrack_flush()
1535 nf_ct_iterate_cleanup(kill_all, NULL);
1538 /* Mishearing the voices in his head, our hero wonders how he's
1539 supposed to kill the mall. */
1540 void nf_conntrack_cleanup(void)
1544 ip_ct_attach = NULL;
1546 /* This makes sure all current packets have passed through
1547 netfilter framework. Roll on, two-stage module
1551 nf_ct_event_cache_flush();
1553 nf_conntrack_flush();
1554 if (atomic_read(&nf_conntrack_count) != 0) {
1556 goto i_see_dead_people;
1558 /* wait until all references to nf_conntrack_untracked are dropped */
1559 while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
1562 for (i = 0; i < NF_CT_F_NUM; i++) {
1563 if (nf_ct_cache[i].use == 0)
1566 NF_CT_ASSERT(nf_ct_cache[i].use == 1);
1567 nf_ct_cache[i].use = 1;
1568 nf_conntrack_unregister_cache(i);
1570 kmem_cache_destroy(nf_conntrack_expect_cachep);
1571 free_conntrack_hash(nf_conntrack_hash, nf_conntrack_vmalloc,
1572 nf_conntrack_htable_size);
1574 /* free l3proto protocol tables */
1575 for (i = 0; i < PF_MAX; i++)
1576 if (nf_ct_protos[i]) {
1577 kfree(nf_ct_protos[i]);
1578 nf_ct_protos[i] = NULL;
1582 static struct list_head *alloc_hashtable(int size, int *vmalloced)
1584 struct list_head *hash;
1588 hash = (void*)__get_free_pages(GFP_KERNEL,
1589 get_order(sizeof(struct list_head)
1593 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
1594 hash = vmalloc(sizeof(struct list_head) * size);
1598 for (i = 0; i < size; i++)
1599 INIT_LIST_HEAD(&hash[i]);
1604 int set_hashsize(const char *val, struct kernel_param *kp)
1606 int i, bucket, hashsize, vmalloced;
1607 int old_vmalloced, old_size;
1609 struct list_head *hash, *old_hash;
1610 struct nf_conntrack_tuple_hash *h;
1612 /* On boot, we can set this without any fancy locking. */
1613 if (!nf_conntrack_htable_size)
1614 return param_set_uint(val, kp);
1616 hashsize = simple_strtol(val, NULL, 0);
1620 hash = alloc_hashtable(hashsize, &vmalloced);
1624 /* We have to rehahs for the new table anyway, so we also can
1625 * use a newrandom seed */
1626 get_random_bytes(&rnd, 4);
1628 write_lock_bh(&nf_conntrack_lock);
1629 for (i = 0; i < nf_conntrack_htable_size; i++) {
1630 while (!list_empty(&nf_conntrack_hash[i])) {
1631 h = list_entry(nf_conntrack_hash[i].next,
1632 struct nf_conntrack_tuple_hash, list);
1634 bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
1635 list_add_tail(&h->list, &hash[bucket]);
1638 old_size = nf_conntrack_htable_size;
1639 old_vmalloced = nf_conntrack_vmalloc;
1640 old_hash = nf_conntrack_hash;
1642 nf_conntrack_htable_size = hashsize;
1643 nf_conntrack_vmalloc = vmalloced;
1644 nf_conntrack_hash = hash;
1645 nf_conntrack_hash_rnd = rnd;
1646 write_unlock_bh(&nf_conntrack_lock);
1648 free_conntrack_hash(old_hash, old_vmalloced, old_size);
1652 module_param_call(hashsize, set_hashsize, param_get_uint,
1653 &nf_conntrack_htable_size, 0600);
1655 int __init nf_conntrack_init(void)
1660 /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB
1661 * machine has 256 buckets. >= 1GB machines have 8192 buckets. */
1662 if (!nf_conntrack_htable_size) {
1663 nf_conntrack_htable_size
1664 = (((num_physpages << PAGE_SHIFT) / 16384)
1665 / sizeof(struct list_head));
1666 if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE))
1667 nf_conntrack_htable_size = 8192;
1668 if (nf_conntrack_htable_size < 16)
1669 nf_conntrack_htable_size = 16;
1671 nf_conntrack_max = 8 * nf_conntrack_htable_size;
1673 printk("nf_conntrack version %s (%u buckets, %d max)\n",
1674 NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
1677 nf_conntrack_hash = alloc_hashtable(nf_conntrack_htable_size,
1678 &nf_conntrack_vmalloc);
1679 if (!nf_conntrack_hash) {
1680 printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
1684 ret = nf_conntrack_register_cache(NF_CT_F_BASIC, "nf_conntrack:basic",
1685 sizeof(struct nf_conn));
1687 printk(KERN_ERR "Unable to create nf_conn slab cache\n");
1691 nf_conntrack_expect_cachep = kmem_cache_create("nf_conntrack_expect",
1692 sizeof(struct nf_conntrack_expect),
1694 if (!nf_conntrack_expect_cachep) {
1695 printk(KERN_ERR "Unable to create nf_expect slab cache\n");
1696 goto err_free_conntrack_slab;
1699 /* Don't NEED lock here, but good form anyway. */
1700 write_lock_bh(&nf_conntrack_lock);
1701 for (i = 0; i < PF_MAX; i++)
1702 nf_ct_l3protos[i] = &nf_conntrack_generic_l3proto;
1703 write_unlock_bh(&nf_conntrack_lock);
1705 /* For use by REJECT target */
1706 ip_ct_attach = __nf_conntrack_attach;
1708 /* Set up fake conntrack:
1709 - to never be deleted, not in any hashes */
1710 atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
1711 /* - and look it like as a confirmed connection */
1712 set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
1716 err_free_conntrack_slab:
1717 nf_conntrack_unregister_cache(NF_CT_F_BASIC);
1719 free_conntrack_hash(nf_conntrack_hash, nf_conntrack_vmalloc,
1720 nf_conntrack_htable_size);