1 /* NAT for netfilter; shared with compatibility layer. */
3 /* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/timer.h>
14 #include <linux/skbuff.h>
15 #include <net/checksum.h>
18 #include <net/tcp.h> /* For tcp_prot in getorigdst */
19 #include <linux/icmp.h>
20 #include <linux/udp.h>
21 #include <linux/jhash.h>
23 #include <linux/netfilter_ipv4.h>
24 #include <net/netfilter/nf_conntrack.h>
25 #include <net/netfilter/nf_conntrack_core.h>
26 #include <net/netfilter/nf_nat.h>
27 #include <net/netfilter/nf_nat_protocol.h>
28 #include <net/netfilter/nf_nat_core.h>
29 #include <net/netfilter/nf_nat_helper.h>
30 #include <net/netfilter/nf_conntrack_helper.h>
31 #include <net/netfilter/nf_conntrack_l3proto.h>
32 #include <net/netfilter/nf_conntrack_l4proto.h>
34 static DEFINE_SPINLOCK(nf_nat_lock);
36 static struct nf_conntrack_l3proto *l3proto __read_mostly;
38 /* Calculated at init based on memory size */
39 static unsigned int nf_nat_htable_size __read_mostly;
41 #define MAX_IP_NAT_PROTO 256
42 static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO]
45 static inline const struct nf_nat_protocol *
46 __nf_nat_proto_find(u_int8_t protonum)
48 return rcu_dereference(nf_nat_protos[protonum]);
51 const struct nf_nat_protocol *
52 nf_nat_proto_find_get(u_int8_t protonum)
54 const struct nf_nat_protocol *p;
57 p = __nf_nat_proto_find(protonum);
58 if (!try_module_get(p->me))
59 p = &nf_nat_unknown_protocol;
64 EXPORT_SYMBOL_GPL(nf_nat_proto_find_get);
67 nf_nat_proto_put(const struct nf_nat_protocol *p)
71 EXPORT_SYMBOL_GPL(nf_nat_proto_put);
73 /* We keep an extra hash for each conntrack, for fast searching. */
74 static inline unsigned int
75 hash_by_src(const struct nf_conntrack_tuple *tuple)
79 /* Original src, to ensure we map it consistently if poss. */
80 hash = jhash_3words((__force u32)tuple->src.u3.ip,
81 (__force u32)tuple->src.u.all,
82 tuple->dst.protonum, 0);
83 return ((u64)hash * nf_nat_htable_size) >> 32;
86 /* Is this tuple already taken? (not by us) */
88 nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
89 const struct nf_conn *ignored_conntrack)
91 /* Conntrack tracking doesn't keep track of outgoing tuples; only
92 incoming ones. NAT means they don't have a fixed mapping,
93 so we invert the tuple and look for the incoming reply.
95 We could keep a separate hash if this proves too slow. */
96 struct nf_conntrack_tuple reply;
98 nf_ct_invert_tuplepr(&reply, tuple);
99 return nf_conntrack_tuple_taken(&reply, ignored_conntrack);
101 EXPORT_SYMBOL(nf_nat_used_tuple);
103 /* If we source map this tuple so reply looks like reply_tuple, will
104 * that meet the constraints of range. */
106 in_range(const struct nf_conntrack_tuple *tuple,
107 const struct nf_nat_range *range)
109 const struct nf_nat_protocol *proto;
112 /* If we are supposed to map IPs, then we must be in the
113 range specified, otherwise let this drag us onto a new src IP. */
114 if (range->flags & IP_NAT_RANGE_MAP_IPS) {
115 if (ntohl(tuple->src.u3.ip) < ntohl(range->min_ip) ||
116 ntohl(tuple->src.u3.ip) > ntohl(range->max_ip))
121 proto = __nf_nat_proto_find(tuple->dst.protonum);
122 if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) ||
123 proto->in_range(tuple, IP_NAT_MANIP_SRC,
124 &range->min, &range->max))
132 same_src(const struct nf_conn *ct,
133 const struct nf_conntrack_tuple *tuple)
135 const struct nf_conntrack_tuple *t;
137 t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
138 return (t->dst.protonum == tuple->dst.protonum &&
139 t->src.u3.ip == tuple->src.u3.ip &&
140 t->src.u.all == tuple->src.u.all);
143 /* Only called for SRC manip */
145 find_appropriate_src(struct net *net,
146 const struct nf_conntrack_tuple *tuple,
147 struct nf_conntrack_tuple *result,
148 const struct nf_nat_range *range)
150 unsigned int h = hash_by_src(tuple);
151 const struct nf_conn_nat *nat;
152 const struct nf_conn *ct;
153 const struct hlist_node *n;
156 hlist_for_each_entry_rcu(nat, n, &net->ipv4.nat_bysource[h], bysource) {
158 if (same_src(ct, tuple)) {
159 /* Copy source part from reply tuple. */
160 nf_ct_invert_tuplepr(result,
161 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
162 result->dst = tuple->dst;
164 if (in_range(result, range)) {
174 /* For [FUTURE] fragmentation handling, we want the least-used
175 src-ip/dst-ip/proto triple. Fairness doesn't come into it. Thus
176 if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports
177 1-65535, we don't do pro-rata allocation based on ports; we choose
178 the ip with the lowest src-ip/dst-ip/proto usage.
181 find_best_ips_proto(struct nf_conntrack_tuple *tuple,
182 const struct nf_nat_range *range,
183 const struct nf_conn *ct,
184 enum nf_nat_manip_type maniptype)
188 u_int32_t minip, maxip, j;
190 /* No IP mapping? Do nothing. */
191 if (!(range->flags & IP_NAT_RANGE_MAP_IPS))
194 if (maniptype == IP_NAT_MANIP_SRC)
195 var_ipp = &tuple->src.u3.ip;
197 var_ipp = &tuple->dst.u3.ip;
199 /* Fast path: only one choice. */
200 if (range->min_ip == range->max_ip) {
201 *var_ipp = range->min_ip;
205 /* Hashing source and destination IPs gives a fairly even
206 * spread in practice (if there are a small number of IPs
207 * involved, there usually aren't that many connections
208 * anyway). The consistency means that servers see the same
209 * client coming from the same IP (some Internet Banking sites
210 * like this), even across reboots. */
211 minip = ntohl(range->min_ip);
212 maxip = ntohl(range->max_ip);
213 j = jhash_2words((__force u32)tuple->src.u3.ip,
214 (__force u32)tuple->dst.u3.ip, 0);
215 j = ((u64)j * (maxip - minip + 1)) >> 32;
216 *var_ipp = htonl(minip + j);
219 /* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING,
220 * we change the source to map into the range. For NF_INET_PRE_ROUTING
221 * and NF_INET_LOCAL_OUT, we change the destination to map into the
222 * range. It might not be possible to get a unique tuple, but we try.
223 * At worst (or if we race), we will end up with a final duplicate in
224 * __ip_conntrack_confirm and drop the packet. */
226 get_unique_tuple(struct nf_conntrack_tuple *tuple,
227 const struct nf_conntrack_tuple *orig_tuple,
228 const struct nf_nat_range *range,
230 enum nf_nat_manip_type maniptype)
232 struct net *net = nf_ct_net(ct);
233 const struct nf_nat_protocol *proto;
235 /* 1) If this srcip/proto/src-proto-part is currently mapped,
236 and that same mapping gives a unique tuple within the given
239 This is only required for source (ie. NAT/masq) mappings.
240 So far, we don't do local source mappings, so multiple
241 manips not an issue. */
242 if (maniptype == IP_NAT_MANIP_SRC &&
243 !(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) {
244 if (find_appropriate_src(net, orig_tuple, tuple, range)) {
245 pr_debug("get_unique_tuple: Found current src map\n");
246 if (!nf_nat_used_tuple(tuple, ct))
251 /* 2) Select the least-used IP/proto combination in the given
253 *tuple = *orig_tuple;
254 find_best_ips_proto(tuple, range, ct, maniptype);
256 /* 3) The per-protocol part of the manip is made to map into
257 the range to make a unique tuple. */
260 proto = __nf_nat_proto_find(orig_tuple->dst.protonum);
262 /* Change protocol info to have some randomization */
263 if (range->flags & IP_NAT_RANGE_PROTO_RANDOM) {
264 proto->unique_tuple(tuple, range, maniptype, ct);
268 /* Only bother mapping if it's not already in range and unique */
269 if ((!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) ||
270 proto->in_range(tuple, maniptype, &range->min, &range->max)) &&
271 !nf_nat_used_tuple(tuple, ct))
274 /* Last change: get protocol to try to obtain unique tuple. */
275 proto->unique_tuple(tuple, range, maniptype, ct);
281 nf_nat_setup_info(struct nf_conn *ct,
282 const struct nf_nat_range *range,
283 enum nf_nat_manip_type maniptype)
285 struct net *net = nf_ct_net(ct);
286 struct nf_conntrack_tuple curr_tuple, new_tuple;
287 struct nf_conn_nat *nat;
288 int have_to_hash = !(ct->status & IPS_NAT_DONE_MASK);
290 /* nat helper or nfctnetlink also setup binding */
293 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
295 pr_debug("failed to add NAT extension\n");
300 NF_CT_ASSERT(maniptype == IP_NAT_MANIP_SRC ||
301 maniptype == IP_NAT_MANIP_DST);
302 BUG_ON(nf_nat_initialized(ct, maniptype));
304 /* What we've got will look like inverse of reply. Normally
305 this is what is in the conntrack, except for prior
306 manipulations (future optimization: if num_manips == 0,
308 conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple) */
309 nf_ct_invert_tuplepr(&curr_tuple,
310 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
312 get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype);
314 if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) {
315 struct nf_conntrack_tuple reply;
317 /* Alter conntrack table so will recognize replies. */
318 nf_ct_invert_tuplepr(&reply, &new_tuple);
319 nf_conntrack_alter_reply(ct, &reply);
321 /* Non-atomic: we own this at the moment. */
322 if (maniptype == IP_NAT_MANIP_SRC)
323 ct->status |= IPS_SRC_NAT;
325 ct->status |= IPS_DST_NAT;
328 /* Place in source hash if this is the first time. */
330 unsigned int srchash;
332 srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
333 spin_lock_bh(&nf_nat_lock);
334 /* nf_conntrack_alter_reply might re-allocate exntension aera */
337 hlist_add_head_rcu(&nat->bysource,
338 &net->ipv4.nat_bysource[srchash]);
339 spin_unlock_bh(&nf_nat_lock);
343 if (maniptype == IP_NAT_MANIP_DST)
344 set_bit(IPS_DST_NAT_DONE_BIT, &ct->status);
346 set_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
350 EXPORT_SYMBOL(nf_nat_setup_info);
352 /* Returns true if succeeded. */
354 manip_pkt(u_int16_t proto,
356 unsigned int iphdroff,
357 const struct nf_conntrack_tuple *target,
358 enum nf_nat_manip_type maniptype)
361 const struct nf_nat_protocol *p;
363 if (!skb_make_writable(skb, iphdroff + sizeof(*iph)))
366 iph = (void *)skb->data + iphdroff;
368 /* Manipulate protcol part. */
370 /* rcu_read_lock()ed by nf_hook_slow */
371 p = __nf_nat_proto_find(proto);
372 if (!p->manip_pkt(skb, iphdroff, target, maniptype))
375 iph = (void *)skb->data + iphdroff;
377 if (maniptype == IP_NAT_MANIP_SRC) {
378 csum_replace4(&iph->check, iph->saddr, target->src.u3.ip);
379 iph->saddr = target->src.u3.ip;
381 csum_replace4(&iph->check, iph->daddr, target->dst.u3.ip);
382 iph->daddr = target->dst.u3.ip;
387 /* Do packet manipulations according to nf_nat_setup_info. */
388 unsigned int nf_nat_packet(struct nf_conn *ct,
389 enum ip_conntrack_info ctinfo,
390 unsigned int hooknum,
393 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
394 unsigned long statusbit;
395 enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum);
397 if (mtype == IP_NAT_MANIP_SRC)
398 statusbit = IPS_SRC_NAT;
400 statusbit = IPS_DST_NAT;
402 /* Invert if this is reply dir. */
403 if (dir == IP_CT_DIR_REPLY)
404 statusbit ^= IPS_NAT_MASK;
406 /* Non-atomic: these bits don't change. */
407 if (ct->status & statusbit) {
408 struct nf_conntrack_tuple target;
410 /* We are aiming to look like inverse of other direction. */
411 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
413 if (!manip_pkt(target.dst.protonum, skb, 0, &target, mtype))
418 EXPORT_SYMBOL_GPL(nf_nat_packet);
420 /* Dir is direction ICMP is coming from (opposite to packet it contains) */
421 int nf_nat_icmp_reply_translation(struct nf_conn *ct,
422 enum ip_conntrack_info ctinfo,
423 unsigned int hooknum,
430 const struct nf_conntrack_l4proto *l4proto;
431 struct nf_conntrack_tuple inner, target;
432 int hdrlen = ip_hdrlen(skb);
433 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
434 unsigned long statusbit;
435 enum nf_nat_manip_type manip = HOOK2MANIP(hooknum);
437 if (!skb_make_writable(skb, hdrlen + sizeof(*inside)))
440 inside = (void *)skb->data + ip_hdrlen(skb);
442 /* We're actually going to mangle it beyond trivial checksum
443 adjustment, so make sure the current checksum is correct. */
444 if (nf_ip_checksum(skb, hooknum, hdrlen, 0))
447 /* Must be RELATED */
448 NF_CT_ASSERT(skb->nfctinfo == IP_CT_RELATED ||
449 skb->nfctinfo == IP_CT_RELATED+IP_CT_IS_REPLY);
451 /* Redirects on non-null nats must be dropped, else they'll
452 start talking to each other without our translation, and be
454 if (inside->icmp.type == ICMP_REDIRECT) {
455 /* If NAT isn't finished, assume it and drop. */
456 if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK)
459 if (ct->status & IPS_NAT_MASK)
463 pr_debug("icmp_reply_translation: translating error %p manip %u "
464 "dir %s\n", skb, manip,
465 dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY");
467 /* rcu_read_lock()ed by nf_hook_slow */
468 l4proto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol);
470 if (!nf_ct_get_tuple(skb,
471 ip_hdrlen(skb) + sizeof(struct icmphdr),
473 sizeof(struct icmphdr) + inside->ip.ihl * 4),
476 &inner, l3proto, l4proto))
479 /* Change inner back to look like incoming packet. We do the
480 opposite manip on this hook to normal, because it might not
481 pass all hooks (locally-generated ICMP). Consider incoming
482 packet: PREROUTING (DST manip), routing produces ICMP, goes
483 through POSTROUTING (which must correct the DST manip). */
484 if (!manip_pkt(inside->ip.protocol, skb,
485 ip_hdrlen(skb) + sizeof(inside->icmp),
486 &ct->tuplehash[!dir].tuple,
490 if (skb->ip_summed != CHECKSUM_PARTIAL) {
491 /* Reloading "inside" here since manip_pkt inner. */
492 inside = (void *)skb->data + ip_hdrlen(skb);
493 inside->icmp.checksum = 0;
494 inside->icmp.checksum =
495 csum_fold(skb_checksum(skb, hdrlen,
496 skb->len - hdrlen, 0));
499 /* Change outer to look the reply to an incoming packet
500 * (proto 0 means don't invert per-proto part). */
501 if (manip == IP_NAT_MANIP_SRC)
502 statusbit = IPS_SRC_NAT;
504 statusbit = IPS_DST_NAT;
506 /* Invert if this is reply dir. */
507 if (dir == IP_CT_DIR_REPLY)
508 statusbit ^= IPS_NAT_MASK;
510 if (ct->status & statusbit) {
511 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
512 if (!manip_pkt(0, skb, 0, &target, manip))
518 EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation);
520 /* Protocol registration. */
521 int nf_nat_protocol_register(const struct nf_nat_protocol *proto)
525 spin_lock_bh(&nf_nat_lock);
526 if (nf_nat_protos[proto->protonum] != &nf_nat_unknown_protocol) {
530 rcu_assign_pointer(nf_nat_protos[proto->protonum], proto);
532 spin_unlock_bh(&nf_nat_lock);
535 EXPORT_SYMBOL(nf_nat_protocol_register);
537 /* Noone stores the protocol anywhere; simply delete it. */
538 void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto)
540 spin_lock_bh(&nf_nat_lock);
541 rcu_assign_pointer(nf_nat_protos[proto->protonum],
542 &nf_nat_unknown_protocol);
543 spin_unlock_bh(&nf_nat_lock);
546 EXPORT_SYMBOL(nf_nat_protocol_unregister);
548 /* Noone using conntrack by the time this called. */
549 static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
551 struct nf_conn_nat *nat = nf_ct_ext_find(ct, NF_CT_EXT_NAT);
553 if (nat == NULL || nat->ct == NULL)
556 NF_CT_ASSERT(nat->ct->status & IPS_NAT_DONE_MASK);
558 spin_lock_bh(&nf_nat_lock);
559 hlist_del_rcu(&nat->bysource);
560 spin_unlock_bh(&nf_nat_lock);
563 static void nf_nat_move_storage(void *new, void *old)
565 struct nf_conn_nat *new_nat = new;
566 struct nf_conn_nat *old_nat = old;
567 struct nf_conn *ct = old_nat->ct;
569 if (!ct || !(ct->status & IPS_NAT_DONE_MASK))
572 spin_lock_bh(&nf_nat_lock);
574 hlist_replace_rcu(&old_nat->bysource, &new_nat->bysource);
575 spin_unlock_bh(&nf_nat_lock);
578 static struct nf_ct_ext_type nat_extend __read_mostly = {
579 .len = sizeof(struct nf_conn_nat),
580 .align = __alignof__(struct nf_conn_nat),
581 .destroy = nf_nat_cleanup_conntrack,
582 .move = nf_nat_move_storage,
584 .flags = NF_CT_EXT_F_PREALLOC,
587 static int __net_init nf_nat_net_init(struct net *net)
589 net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size,
590 &net->ipv4.nat_vmalloced);
591 if (!net->ipv4.nat_bysource)
596 /* Clear NAT section of all conntracks, in case we're loaded again. */
597 static int clean_nat(struct nf_conn *i, void *data)
599 struct nf_conn_nat *nat = nfct_nat(i);
603 memset(nat, 0, sizeof(*nat));
604 i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | IPS_SEQ_ADJUST);
608 static void __net_exit nf_nat_net_exit(struct net *net)
610 nf_ct_iterate_cleanup(net, &clean_nat, NULL);
612 nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced,
616 static struct pernet_operations nf_nat_net_ops = {
617 .init = nf_nat_net_init,
618 .exit = nf_nat_net_exit,
621 static int __init nf_nat_init(void)
626 need_ipv4_conntrack();
628 ret = nf_ct_extend_register(&nat_extend);
630 printk(KERN_ERR "nf_nat_core: Unable to register extension\n");
634 /* Leave them the same for the moment. */
635 nf_nat_htable_size = nf_conntrack_htable_size;
637 ret = register_pernet_subsys(&nf_nat_net_ops);
641 /* Sew in builtin protocols. */
642 spin_lock_bh(&nf_nat_lock);
643 for (i = 0; i < MAX_IP_NAT_PROTO; i++)
644 rcu_assign_pointer(nf_nat_protos[i], &nf_nat_unknown_protocol);
645 rcu_assign_pointer(nf_nat_protos[IPPROTO_TCP], &nf_nat_protocol_tcp);
646 rcu_assign_pointer(nf_nat_protos[IPPROTO_UDP], &nf_nat_protocol_udp);
647 rcu_assign_pointer(nf_nat_protos[IPPROTO_ICMP], &nf_nat_protocol_icmp);
648 spin_unlock_bh(&nf_nat_lock);
650 /* Initialize fake conntrack so that NAT will skip it */
651 nf_conntrack_untracked.status |= IPS_NAT_DONE_MASK;
653 l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET);
655 BUG_ON(nf_nat_seq_adjust_hook != NULL);
656 rcu_assign_pointer(nf_nat_seq_adjust_hook, nf_nat_seq_adjust);
660 nf_ct_extend_unregister(&nat_extend);
664 static void __exit nf_nat_cleanup(void)
666 unregister_pernet_subsys(&nf_nat_net_ops);
667 nf_ct_l3proto_put(l3proto);
668 nf_ct_extend_unregister(&nat_extend);
669 rcu_assign_pointer(nf_nat_seq_adjust_hook, NULL);
673 MODULE_LICENSE("GPL");
675 module_init(nf_nat_init);
676 module_exit(nf_nat_cleanup);