2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
32 #include "../../netfilter/xt_repldata.h"
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36 MODULE_DESCRIPTION("IPv6 packet filter");
38 /*#define DEBUG_IP_FIREWALL*/
39 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
40 /*#define DEBUG_IP_FIREWALL_USER*/
42 #ifdef DEBUG_IP_FIREWALL
43 #define dprintf(format, args...) printk(format , ## args)
45 #define dprintf(format, args...)
48 #ifdef DEBUG_IP_FIREWALL_USER
49 #define duprintf(format, args...) printk(format , ## args)
51 #define duprintf(format, args...)
54 #ifdef CONFIG_NETFILTER_DEBUG
55 #define IP_NF_ASSERT(x) \
58 printk("IP_NF_ASSERT: %s:%s:%u\n", \
59 __func__, __FILE__, __LINE__); \
62 #define IP_NF_ASSERT(x)
66 /* All the better to debug you with... */
71 void *ip6t_alloc_initial_table(const struct xt_table *info)
73 return xt_alloc_initial_table(ip6t, IP6T);
75 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
78 We keep a set of rules for each CPU, so we can avoid write-locking
79 them in the softirq when updating the counters and therefore
80 only need to read-lock in the softirq; doing a write_lock_bh() in user
81 context stops packets coming through and allows user context to read
82 the counters or update the rules.
84 Hence the start of any table is given by get_table() below. */
86 /* Check for an extension */
88 ip6t_ext_hdr(u8 nexthdr)
90 return ( (nexthdr == IPPROTO_HOPOPTS) ||
91 (nexthdr == IPPROTO_ROUTING) ||
92 (nexthdr == IPPROTO_FRAGMENT) ||
93 (nexthdr == IPPROTO_ESP) ||
94 (nexthdr == IPPROTO_AH) ||
95 (nexthdr == IPPROTO_NONE) ||
96 (nexthdr == IPPROTO_DSTOPTS) );
99 /* Returns whether matches rule or not. */
100 /* Performance critical - called for every packet */
102 ip6_packet_match(const struct sk_buff *skb,
105 const struct ip6t_ip6 *ip6info,
106 unsigned int *protoff,
107 int *fragoff, bool *hotdrop)
110 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
112 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
114 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
115 &ip6info->src), IP6T_INV_SRCIP) ||
116 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
117 &ip6info->dst), IP6T_INV_DSTIP)) {
118 dprintf("Source or dest mismatch.\n");
120 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
121 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
122 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
123 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
124 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
125 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
129 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
131 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
132 dprintf("VIA in mismatch (%s vs %s).%s\n",
133 indev, ip6info->iniface,
134 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
138 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
140 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
141 dprintf("VIA out mismatch (%s vs %s).%s\n",
142 outdev, ip6info->outiface,
143 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
147 /* ... might want to do something with class and flowlabel here ... */
149 /* look for the desired protocol header */
150 if((ip6info->flags & IP6T_F_PROTO)) {
152 unsigned short _frag_off;
154 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
160 *fragoff = _frag_off;
162 dprintf("Packet protocol %hi ?= %s%hi.\n",
164 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
167 if (ip6info->proto == protohdr) {
168 if(ip6info->invflags & IP6T_INV_PROTO) {
174 /* We need match for the '-p all', too! */
175 if ((ip6info->proto != 0) &&
176 !(ip6info->invflags & IP6T_INV_PROTO))
182 /* should be ip6 safe */
184 ip6_checkentry(const struct ip6t_ip6 *ipv6)
186 if (ipv6->flags & ~IP6T_F_MASK) {
187 duprintf("Unknown flag bits set: %08X\n",
188 ipv6->flags & ~IP6T_F_MASK);
191 if (ipv6->invflags & ~IP6T_INV_MASK) {
192 duprintf("Unknown invflag bits set: %08X\n",
193 ipv6->invflags & ~IP6T_INV_MASK);
200 ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
203 printk("ip6_tables: error: `%s'\n",
204 (const char *)par->targinfo);
209 /* Performance critical - called for every packet */
211 do_match(const struct ip6t_entry_match *m, const struct sk_buff *skb,
212 struct xt_match_param *par)
214 par->match = m->u.kernel.match;
215 par->matchinfo = m->data;
217 /* Stop iteration if it doesn't match */
218 if (!m->u.kernel.match->match(skb, par))
224 static inline struct ip6t_entry *
225 get_entry(const void *base, unsigned int offset)
227 return (struct ip6t_entry *)(base + offset);
230 /* All zeroes == unconditional rule. */
231 /* Mildly perf critical (only if packet tracing is on) */
232 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
234 static const struct ip6t_ip6 uncond;
236 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
239 static inline const struct ip6t_entry_target *
240 ip6t_get_target_c(const struct ip6t_entry *e)
242 return ip6t_get_target((struct ip6t_entry *)e);
245 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
246 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
247 /* This cries for unification! */
248 static const char *const hooknames[] = {
249 [NF_INET_PRE_ROUTING] = "PREROUTING",
250 [NF_INET_LOCAL_IN] = "INPUT",
251 [NF_INET_FORWARD] = "FORWARD",
252 [NF_INET_LOCAL_OUT] = "OUTPUT",
253 [NF_INET_POST_ROUTING] = "POSTROUTING",
256 enum nf_ip_trace_comments {
257 NF_IP6_TRACE_COMMENT_RULE,
258 NF_IP6_TRACE_COMMENT_RETURN,
259 NF_IP6_TRACE_COMMENT_POLICY,
262 static const char *const comments[] = {
263 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
264 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
265 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
268 static struct nf_loginfo trace_loginfo = {
269 .type = NF_LOG_TYPE_LOG,
273 .logflags = NF_LOG_MASK,
278 /* Mildly perf critical (only if packet tracing is on) */
280 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
281 const char *hookname, const char **chainname,
282 const char **comment, unsigned int *rulenum)
284 const struct ip6t_standard_target *t = (void *)ip6t_get_target_c(s);
286 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
287 /* Head of user chain: ERROR target with chainname */
288 *chainname = t->target.data;
293 if (s->target_offset == sizeof(struct ip6t_entry) &&
294 strcmp(t->target.u.kernel.target->name,
295 IP6T_STANDARD_TARGET) == 0 &&
297 unconditional(&s->ipv6)) {
298 /* Tail of chains: STANDARD target (return/policy) */
299 *comment = *chainname == hookname
300 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
301 : comments[NF_IP6_TRACE_COMMENT_RETURN];
310 static void trace_packet(const struct sk_buff *skb,
312 const struct net_device *in,
313 const struct net_device *out,
314 const char *tablename,
315 const struct xt_table_info *private,
316 const struct ip6t_entry *e)
318 const void *table_base;
319 const struct ip6t_entry *root;
320 const char *hookname, *chainname, *comment;
321 const struct ip6t_entry *iter;
322 unsigned int rulenum = 0;
324 table_base = private->entries[smp_processor_id()];
325 root = get_entry(table_base, private->hook_entry[hook]);
327 hookname = chainname = hooknames[hook];
328 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
330 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
331 if (get_chainname_rulenum(iter, e, hookname,
332 &chainname, &comment, &rulenum) != 0)
335 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
336 "TRACE: %s:%s:%s:%u ",
337 tablename, chainname, comment, rulenum);
341 static inline __pure struct ip6t_entry *
342 ip6t_next_entry(const struct ip6t_entry *entry)
344 return (void *)entry + entry->next_offset;
347 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
349 ip6t_do_table(struct sk_buff *skb,
351 const struct net_device *in,
352 const struct net_device *out,
353 struct xt_table *table)
355 #define tb_comefrom ((struct ip6t_entry *)table_base)->comefrom
357 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
358 bool hotdrop = false;
359 /* Initializing verdict to NF_DROP keeps gcc happy. */
360 unsigned int verdict = NF_DROP;
361 const char *indev, *outdev;
362 const void *table_base;
363 struct ip6t_entry *e, *back;
364 const struct xt_table_info *private;
365 struct xt_match_param mtpar;
366 struct xt_target_param tgpar;
369 indev = in ? in->name : nulldevname;
370 outdev = out ? out->name : nulldevname;
371 /* We handle fragments by dealing with the first fragment as
372 * if it was a normal packet. All other fragments are treated
373 * normally, except that they will NEVER match rules that ask
374 * things we don't know, ie. tcp syn flag or ports). If the
375 * rule is also a fragment-specific rule, non-fragments won't
377 mtpar.hotdrop = &hotdrop;
378 mtpar.in = tgpar.in = in;
379 mtpar.out = tgpar.out = out;
380 mtpar.family = tgpar.family = NFPROTO_IPV6;
381 mtpar.hooknum = tgpar.hooknum = hook;
383 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
386 private = table->private;
387 table_base = private->entries[smp_processor_id()];
389 e = get_entry(table_base, private->hook_entry[hook]);
391 /* For return from builtin chain */
392 back = get_entry(table_base, private->underflow[hook]);
395 const struct ip6t_entry_target *t;
399 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
400 &mtpar.thoff, &mtpar.fragoff, &hotdrop) ||
401 IP6T_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0) {
402 e = ip6t_next_entry(e);
406 ADD_COUNTER(e->counters,
407 ntohs(ipv6_hdr(skb)->payload_len) +
408 sizeof(struct ipv6hdr), 1);
410 t = ip6t_get_target_c(e);
411 IP_NF_ASSERT(t->u.kernel.target);
413 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
414 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
415 /* The packet is traced: log it */
416 if (unlikely(skb->nf_trace))
417 trace_packet(skb, hook, in, out,
418 table->name, private, e);
420 /* Standard target? */
421 if (!t->u.kernel.target->target) {
424 v = ((struct ip6t_standard_target *)t)->verdict;
426 /* Pop from stack? */
427 if (v != IP6T_RETURN) {
428 verdict = (unsigned)(-v) - 1;
432 back = get_entry(table_base, back->comefrom);
435 if (table_base + v != ip6t_next_entry(e) &&
436 !(e->ipv6.flags & IP6T_F_GOTO)) {
437 /* Save old back ptr in next entry */
438 struct ip6t_entry *next = ip6t_next_entry(e);
439 next->comefrom = (void *)back - table_base;
440 /* set back pointer to next entry */
444 e = get_entry(table_base, v);
448 /* Targets which reenter must return
450 tgpar.target = t->u.kernel.target;
451 tgpar.targinfo = t->data;
453 #ifdef CONFIG_NETFILTER_DEBUG
454 tb_comefrom = 0xeeeeeeec;
456 verdict = t->u.kernel.target->target(skb, &tgpar);
458 #ifdef CONFIG_NETFILTER_DEBUG
459 if (tb_comefrom != 0xeeeeeeec && verdict == IP6T_CONTINUE) {
460 printk("Target %s reentered!\n",
461 t->u.kernel.target->name);
464 tb_comefrom = 0x57acc001;
466 if (verdict == IP6T_CONTINUE)
467 e = ip6t_next_entry(e);
473 #ifdef CONFIG_NETFILTER_DEBUG
474 tb_comefrom = NETFILTER_LINK_POISON;
476 xt_info_rdunlock_bh();
478 #ifdef DEBUG_ALLOW_ALL
489 /* Figures out from what hook each rule can be called: returns 0 if
490 there are loops. Puts hook bitmask in comefrom. */
492 mark_source_chains(const struct xt_table_info *newinfo,
493 unsigned int valid_hooks, void *entry0)
497 /* No recursion; use packet counter to save back ptrs (reset
498 to 0 as we leave), and comefrom to save source hook bitmask */
499 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
500 unsigned int pos = newinfo->hook_entry[hook];
501 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
503 if (!(valid_hooks & (1 << hook)))
506 /* Set initial back pointer. */
507 e->counters.pcnt = pos;
510 const struct ip6t_standard_target *t
511 = (void *)ip6t_get_target_c(e);
512 int visited = e->comefrom & (1 << hook);
514 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
515 printk("iptables: loop hook %u pos %u %08X.\n",
516 hook, pos, e->comefrom);
519 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
521 /* Unconditional return/END. */
522 if ((e->target_offset == sizeof(struct ip6t_entry) &&
523 (strcmp(t->target.u.user.name,
524 IP6T_STANDARD_TARGET) == 0) &&
526 unconditional(&e->ipv6)) || visited) {
527 unsigned int oldpos, size;
529 if ((strcmp(t->target.u.user.name,
530 IP6T_STANDARD_TARGET) == 0) &&
531 t->verdict < -NF_MAX_VERDICT - 1) {
532 duprintf("mark_source_chains: bad "
533 "negative verdict (%i)\n",
538 /* Return: backtrack through the last
541 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
542 #ifdef DEBUG_IP_FIREWALL_USER
544 & (1 << NF_INET_NUMHOOKS)) {
545 duprintf("Back unset "
552 pos = e->counters.pcnt;
553 e->counters.pcnt = 0;
555 /* We're at the start. */
559 e = (struct ip6t_entry *)
561 } while (oldpos == pos + e->next_offset);
564 size = e->next_offset;
565 e = (struct ip6t_entry *)
566 (entry0 + pos + size);
567 e->counters.pcnt = pos;
570 int newpos = t->verdict;
572 if (strcmp(t->target.u.user.name,
573 IP6T_STANDARD_TARGET) == 0 &&
575 if (newpos > newinfo->size -
576 sizeof(struct ip6t_entry)) {
577 duprintf("mark_source_chains: "
578 "bad verdict (%i)\n",
582 /* This a jump; chase it. */
583 duprintf("Jump rule %u -> %u\n",
586 /* ... this is a fallthru */
587 newpos = pos + e->next_offset;
589 e = (struct ip6t_entry *)
591 e->counters.pcnt = pos;
596 duprintf("Finished chain %u\n", hook);
602 cleanup_match(struct ip6t_entry_match *m, struct net *net, unsigned int *i)
604 struct xt_mtdtor_param par;
606 if (i && (*i)-- == 0)
610 par.match = m->u.kernel.match;
611 par.matchinfo = m->data;
612 par.family = NFPROTO_IPV6;
613 if (par.match->destroy != NULL)
614 par.match->destroy(&par);
615 module_put(par.match->me);
620 check_entry(const struct ip6t_entry *e, const char *name)
622 const struct ip6t_entry_target *t;
624 if (!ip6_checkentry(&e->ipv6)) {
625 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
629 if (e->target_offset + sizeof(struct ip6t_entry_target) >
633 t = ip6t_get_target_c(e);
634 if (e->target_offset + t->u.target_size > e->next_offset)
640 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
643 const struct ip6t_ip6 *ipv6 = par->entryinfo;
646 par->match = m->u.kernel.match;
647 par->matchinfo = m->data;
649 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
650 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
652 duprintf("ip_tables: check failed for `%s'.\n",
661 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
664 struct xt_match *match;
667 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
669 "ip6t_%s", m->u.user.name);
670 if (IS_ERR(match) || !match) {
671 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
672 return match ? PTR_ERR(match) : -ENOENT;
674 m->u.kernel.match = match;
676 ret = check_match(m, par, i);
682 module_put(m->u.kernel.match->me);
686 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
688 struct ip6t_entry_target *t = ip6t_get_target(e);
689 struct xt_tgchk_param par = {
693 .target = t->u.kernel.target,
695 .hook_mask = e->comefrom,
696 .family = NFPROTO_IPV6,
700 t = ip6t_get_target(e);
701 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
702 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
704 duprintf("ip_tables: check failed for `%s'.\n",
705 t->u.kernel.target->name);
712 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
715 struct ip6t_entry_target *t;
716 struct xt_target *target;
719 struct xt_mtchk_param mtpar;
721 ret = check_entry(e, name);
728 mtpar.entryinfo = &e->ipv6;
729 mtpar.hook_mask = e->comefrom;
730 mtpar.family = NFPROTO_IPV6;
731 ret = IP6T_MATCH_ITERATE(e, find_check_match, &mtpar, &j);
733 goto cleanup_matches;
735 t = ip6t_get_target(e);
736 target = try_then_request_module(xt_find_target(AF_INET6,
739 "ip6t_%s", t->u.user.name);
740 if (IS_ERR(target) || !target) {
741 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
742 ret = target ? PTR_ERR(target) : -ENOENT;
743 goto cleanup_matches;
745 t->u.kernel.target = target;
747 ret = check_target(e, net, name);
752 module_put(t->u.kernel.target->me);
754 IP6T_MATCH_ITERATE(e, cleanup_match, net, &j);
758 static bool check_underflow(const struct ip6t_entry *e)
760 const struct ip6t_entry_target *t;
761 unsigned int verdict;
763 if (!unconditional(&e->ipv6))
765 t = ip6t_get_target_c(e);
766 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
768 verdict = ((struct ip6t_standard_target *)t)->verdict;
769 verdict = -verdict - 1;
770 return verdict == NF_DROP || verdict == NF_ACCEPT;
774 check_entry_size_and_hooks(struct ip6t_entry *e,
775 struct xt_table_info *newinfo,
776 const unsigned char *base,
777 const unsigned char *limit,
778 const unsigned int *hook_entries,
779 const unsigned int *underflows,
780 unsigned int valid_hooks)
784 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
785 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
786 duprintf("Bad offset %p\n", e);
791 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
792 duprintf("checking: element %p size %u\n",
797 /* Check hooks & underflows */
798 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
799 if (!(valid_hooks & (1 << h)))
801 if ((unsigned char *)e - base == hook_entries[h])
802 newinfo->hook_entry[h] = hook_entries[h];
803 if ((unsigned char *)e - base == underflows[h]) {
804 if (!check_underflow(e)) {
805 pr_err("Underflows must be unconditional and "
806 "use the STANDARD target with "
810 newinfo->underflow[h] = underflows[h];
814 /* Clear counters and comefrom */
815 e->counters = ((struct xt_counters) { 0, 0 });
820 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
822 struct xt_tgdtor_param par;
823 struct ip6t_entry_target *t;
825 /* Cleanup all matches */
826 IP6T_MATCH_ITERATE(e, cleanup_match, net, NULL);
827 t = ip6t_get_target(e);
830 par.target = t->u.kernel.target;
831 par.targinfo = t->data;
832 par.family = NFPROTO_IPV6;
833 if (par.target->destroy != NULL)
834 par.target->destroy(&par);
835 module_put(par.target->me);
838 /* Checks and translates the user-supplied table segment (held in
841 translate_table(struct net *net,
843 unsigned int valid_hooks,
844 struct xt_table_info *newinfo,
848 const unsigned int *hook_entries,
849 const unsigned int *underflows)
851 struct ip6t_entry *iter;
855 newinfo->size = size;
856 newinfo->number = number;
858 /* Init all hooks to impossible value. */
859 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
860 newinfo->hook_entry[i] = 0xFFFFFFFF;
861 newinfo->underflow[i] = 0xFFFFFFFF;
864 duprintf("translate_table: size %u\n", newinfo->size);
866 /* Walk through entries, checking offsets. */
867 xt_entry_foreach(iter, entry0, newinfo->size) {
868 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
869 entry0 + size, hook_entries, underflows, valid_hooks);
876 duprintf("translate_table: %u not %u entries\n",
881 /* Check hooks all assigned */
882 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
883 /* Only hooks which are valid */
884 if (!(valid_hooks & (1 << i)))
886 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
887 duprintf("Invalid hook entry %u %u\n",
891 if (newinfo->underflow[i] == 0xFFFFFFFF) {
892 duprintf("Invalid underflow %u %u\n",
898 if (!mark_source_chains(newinfo, valid_hooks, entry0))
901 /* Finally, each sanity check must pass */
903 xt_entry_foreach(iter, entry0, newinfo->size) {
904 ret = find_check_entry(iter, net, name, size);
911 xt_entry_foreach(iter, entry0, newinfo->size) {
914 cleanup_entry(iter, net);
919 /* And one copy for every other CPU */
920 for_each_possible_cpu(i) {
921 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
922 memcpy(newinfo->entries[i], entry0, newinfo->size);
929 get_counters(const struct xt_table_info *t,
930 struct xt_counters counters[])
932 struct ip6t_entry *iter;
937 /* Instead of clearing (by a previous call to memset())
938 * the counters and using adds, we set the counters
939 * with data used by 'current' CPU
941 * Bottom half has to be disabled to prevent deadlock
942 * if new softirq were to run and call ipt_do_table
945 curcpu = smp_processor_id();
948 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
949 SET_COUNTER(counters[i], iter->counters.bcnt,
950 iter->counters.pcnt);
954 for_each_possible_cpu(cpu) {
959 xt_entry_foreach(iter, t->entries[cpu], t->size) {
960 ADD_COUNTER(counters[i], iter->counters.bcnt,
961 iter->counters.pcnt);
964 xt_info_wrunlock(cpu);
969 static struct xt_counters *alloc_counters(const struct xt_table *table)
971 unsigned int countersize;
972 struct xt_counters *counters;
973 const struct xt_table_info *private = table->private;
975 /* We need atomic snapshot of counters: rest doesn't change
976 (other than comefrom, which userspace doesn't care
978 countersize = sizeof(struct xt_counters) * private->number;
979 counters = vmalloc_node(countersize, numa_node_id());
981 if (counters == NULL)
982 return ERR_PTR(-ENOMEM);
984 get_counters(private, counters);
990 copy_entries_to_user(unsigned int total_size,
991 const struct xt_table *table,
992 void __user *userptr)
994 unsigned int off, num;
995 const struct ip6t_entry *e;
996 struct xt_counters *counters;
997 const struct xt_table_info *private = table->private;
999 const void *loc_cpu_entry;
1001 counters = alloc_counters(table);
1002 if (IS_ERR(counters))
1003 return PTR_ERR(counters);
1005 /* choose the copy that is on our node/cpu, ...
1006 * This choice is lazy (because current thread is
1007 * allowed to migrate to another cpu)
1009 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1010 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1015 /* FIXME: use iterator macros --RR */
1016 /* ... then go back and fix counters and names */
1017 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1019 const struct ip6t_entry_match *m;
1020 const struct ip6t_entry_target *t;
1022 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1023 if (copy_to_user(userptr + off
1024 + offsetof(struct ip6t_entry, counters),
1026 sizeof(counters[num])) != 0) {
1031 for (i = sizeof(struct ip6t_entry);
1032 i < e->target_offset;
1033 i += m->u.match_size) {
1036 if (copy_to_user(userptr + off + i
1037 + offsetof(struct ip6t_entry_match,
1039 m->u.kernel.match->name,
1040 strlen(m->u.kernel.match->name)+1)
1047 t = ip6t_get_target_c(e);
1048 if (copy_to_user(userptr + off + e->target_offset
1049 + offsetof(struct ip6t_entry_target,
1051 t->u.kernel.target->name,
1052 strlen(t->u.kernel.target->name)+1) != 0) {
1063 #ifdef CONFIG_COMPAT
1064 static void compat_standard_from_user(void *dst, const void *src)
1066 int v = *(compat_int_t *)src;
1069 v += xt_compat_calc_jump(AF_INET6, v);
1070 memcpy(dst, &v, sizeof(v));
1073 static int compat_standard_to_user(void __user *dst, const void *src)
1075 compat_int_t cv = *(int *)src;
1078 cv -= xt_compat_calc_jump(AF_INET6, cv);
1079 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1083 compat_calc_match(const struct ip6t_entry_match *m, int *size)
1085 *size += xt_compat_match_offset(m->u.kernel.match);
1089 static int compat_calc_entry(const struct ip6t_entry *e,
1090 const struct xt_table_info *info,
1091 const void *base, struct xt_table_info *newinfo)
1093 const struct ip6t_entry_target *t;
1094 unsigned int entry_offset;
1097 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1098 entry_offset = (void *)e - base;
1099 IP6T_MATCH_ITERATE(e, compat_calc_match, &off);
1100 t = ip6t_get_target_c(e);
1101 off += xt_compat_target_offset(t->u.kernel.target);
1102 newinfo->size -= off;
1103 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1107 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1108 if (info->hook_entry[i] &&
1109 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1110 newinfo->hook_entry[i] -= off;
1111 if (info->underflow[i] &&
1112 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1113 newinfo->underflow[i] -= off;
1118 static int compat_table_info(const struct xt_table_info *info,
1119 struct xt_table_info *newinfo)
1121 struct ip6t_entry *iter;
1122 void *loc_cpu_entry;
1125 if (!newinfo || !info)
1128 /* we dont care about newinfo->entries[] */
1129 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1130 newinfo->initial_entries = 0;
1131 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1132 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1133 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1141 static int get_info(struct net *net, void __user *user,
1142 const int *len, int compat)
1144 char name[IP6T_TABLE_MAXNAMELEN];
1148 if (*len != sizeof(struct ip6t_getinfo)) {
1149 duprintf("length %u != %zu\n", *len,
1150 sizeof(struct ip6t_getinfo));
1154 if (copy_from_user(name, user, sizeof(name)) != 0)
1157 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1158 #ifdef CONFIG_COMPAT
1160 xt_compat_lock(AF_INET6);
1162 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1163 "ip6table_%s", name);
1164 if (t && !IS_ERR(t)) {
1165 struct ip6t_getinfo info;
1166 const struct xt_table_info *private = t->private;
1167 #ifdef CONFIG_COMPAT
1168 struct xt_table_info tmp;
1171 ret = compat_table_info(private, &tmp);
1172 xt_compat_flush_offsets(AF_INET6);
1176 info.valid_hooks = t->valid_hooks;
1177 memcpy(info.hook_entry, private->hook_entry,
1178 sizeof(info.hook_entry));
1179 memcpy(info.underflow, private->underflow,
1180 sizeof(info.underflow));
1181 info.num_entries = private->number;
1182 info.size = private->size;
1183 strcpy(info.name, name);
1185 if (copy_to_user(user, &info, *len) != 0)
1193 ret = t ? PTR_ERR(t) : -ENOENT;
1194 #ifdef CONFIG_COMPAT
1196 xt_compat_unlock(AF_INET6);
1202 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1206 struct ip6t_get_entries get;
1209 if (*len < sizeof(get)) {
1210 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1213 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1215 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1216 duprintf("get_entries: %u != %zu\n",
1217 *len, sizeof(get) + get.size);
1221 t = xt_find_table_lock(net, AF_INET6, get.name);
1222 if (t && !IS_ERR(t)) {
1223 struct xt_table_info *private = t->private;
1224 duprintf("t->private->number = %u\n", private->number);
1225 if (get.size == private->size)
1226 ret = copy_entries_to_user(private->size,
1227 t, uptr->entrytable);
1229 duprintf("get_entries: I've got %u not %u!\n",
1230 private->size, get.size);
1236 ret = t ? PTR_ERR(t) : -ENOENT;
1242 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1243 struct xt_table_info *newinfo, unsigned int num_counters,
1244 void __user *counters_ptr)
1248 struct xt_table_info *oldinfo;
1249 struct xt_counters *counters;
1250 const void *loc_cpu_old_entry;
1251 struct ip6t_entry *iter;
1254 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1261 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1262 "ip6table_%s", name);
1263 if (!t || IS_ERR(t)) {
1264 ret = t ? PTR_ERR(t) : -ENOENT;
1265 goto free_newinfo_counters_untrans;
1269 if (valid_hooks != t->valid_hooks) {
1270 duprintf("Valid hook crap: %08X vs %08X\n",
1271 valid_hooks, t->valid_hooks);
1276 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1280 /* Update module usage count based on number of rules */
1281 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1282 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1283 if ((oldinfo->number > oldinfo->initial_entries) ||
1284 (newinfo->number <= oldinfo->initial_entries))
1286 if ((oldinfo->number > oldinfo->initial_entries) &&
1287 (newinfo->number <= oldinfo->initial_entries))
1290 /* Get the old counters, and synchronize with replace */
1291 get_counters(oldinfo, counters);
1293 /* Decrease module usage counts and free resource */
1294 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1295 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1296 cleanup_entry(iter, net);
1298 xt_free_table_info(oldinfo);
1299 if (copy_to_user(counters_ptr, counters,
1300 sizeof(struct xt_counters) * num_counters) != 0)
1309 free_newinfo_counters_untrans:
1316 do_replace(struct net *net, const void __user *user, unsigned int len)
1319 struct ip6t_replace tmp;
1320 struct xt_table_info *newinfo;
1321 void *loc_cpu_entry;
1322 struct ip6t_entry *iter;
1324 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1327 /* overflow check */
1328 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1331 newinfo = xt_alloc_table_info(tmp.size);
1335 /* choose the copy that is on our node/cpu */
1336 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1337 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1343 ret = translate_table(net, tmp.name, tmp.valid_hooks,
1344 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1345 tmp.hook_entry, tmp.underflow);
1349 duprintf("ip_tables: Translated table\n");
1351 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1352 tmp.num_counters, tmp.counters);
1354 goto free_newinfo_untrans;
1357 free_newinfo_untrans:
1358 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1359 cleanup_entry(iter, net);
1361 xt_free_table_info(newinfo);
1366 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1369 unsigned int i, curcpu;
1370 struct xt_counters_info tmp;
1371 struct xt_counters *paddc;
1372 unsigned int num_counters;
1377 const struct xt_table_info *private;
1379 const void *loc_cpu_entry;
1380 struct ip6t_entry *iter;
1381 #ifdef CONFIG_COMPAT
1382 struct compat_xt_counters_info compat_tmp;
1386 size = sizeof(struct compat_xt_counters_info);
1391 size = sizeof(struct xt_counters_info);
1394 if (copy_from_user(ptmp, user, size) != 0)
1397 #ifdef CONFIG_COMPAT
1399 num_counters = compat_tmp.num_counters;
1400 name = compat_tmp.name;
1404 num_counters = tmp.num_counters;
1408 if (len != size + num_counters * sizeof(struct xt_counters))
1411 paddc = vmalloc_node(len - size, numa_node_id());
1415 if (copy_from_user(paddc, user + size, len - size) != 0) {
1420 t = xt_find_table_lock(net, AF_INET6, name);
1421 if (!t || IS_ERR(t)) {
1422 ret = t ? PTR_ERR(t) : -ENOENT;
1428 private = t->private;
1429 if (private->number != num_counters) {
1431 goto unlock_up_free;
1435 /* Choose the copy that is on our node */
1436 curcpu = smp_processor_id();
1437 xt_info_wrlock(curcpu);
1438 loc_cpu_entry = private->entries[curcpu];
1439 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1440 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1443 xt_info_wrunlock(curcpu);
1455 #ifdef CONFIG_COMPAT
1456 struct compat_ip6t_replace {
1457 char name[IP6T_TABLE_MAXNAMELEN];
1461 u32 hook_entry[NF_INET_NUMHOOKS];
1462 u32 underflow[NF_INET_NUMHOOKS];
1464 compat_uptr_t counters; /* struct ip6t_counters * */
1465 struct compat_ip6t_entry entries[0];
1469 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1470 unsigned int *size, struct xt_counters *counters,
1473 struct ip6t_entry_target *t;
1474 struct compat_ip6t_entry __user *ce;
1475 u_int16_t target_offset, next_offset;
1476 compat_uint_t origsize;
1480 ce = (struct compat_ip6t_entry __user *)*dstptr;
1481 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1482 copy_to_user(&ce->counters, &counters[i],
1483 sizeof(counters[i])) != 0)
1486 *dstptr += sizeof(struct compat_ip6t_entry);
1487 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1489 ret = IP6T_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1490 target_offset = e->target_offset - (origsize - *size);
1493 t = ip6t_get_target(e);
1494 ret = xt_compat_target_to_user(t, dstptr, size);
1497 next_offset = e->next_offset - (origsize - *size);
1498 if (put_user(target_offset, &ce->target_offset) != 0 ||
1499 put_user(next_offset, &ce->next_offset) != 0)
1505 compat_find_calc_match(struct ip6t_entry_match *m,
1507 const struct ip6t_ip6 *ipv6,
1508 unsigned int hookmask,
1509 int *size, unsigned int *i)
1511 struct xt_match *match;
1513 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
1514 m->u.user.revision),
1515 "ip6t_%s", m->u.user.name);
1516 if (IS_ERR(match) || !match) {
1517 duprintf("compat_check_calc_match: `%s' not found\n",
1519 return match ? PTR_ERR(match) : -ENOENT;
1521 m->u.kernel.match = match;
1522 *size += xt_compat_match_offset(match);
1529 compat_release_match(struct ip6t_entry_match *m, unsigned int *i)
1531 if (i && (*i)-- == 0)
1534 module_put(m->u.kernel.match->me);
1538 static void compat_release_entry(struct compat_ip6t_entry *e)
1540 struct ip6t_entry_target *t;
1542 /* Cleanup all matches */
1543 COMPAT_IP6T_MATCH_ITERATE(e, compat_release_match, NULL);
1544 t = compat_ip6t_get_target(e);
1545 module_put(t->u.kernel.target->me);
1549 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1550 struct xt_table_info *newinfo,
1552 const unsigned char *base,
1553 const unsigned char *limit,
1554 const unsigned int *hook_entries,
1555 const unsigned int *underflows,
1558 struct ip6t_entry_target *t;
1559 struct xt_target *target;
1560 unsigned int entry_offset;
1564 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1565 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1566 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1567 duprintf("Bad offset %p, limit = %p\n", e, limit);
1571 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1572 sizeof(struct compat_xt_entry_target)) {
1573 duprintf("checking: element %p size %u\n",
1578 /* For purposes of check_entry casting the compat entry is fine */
1579 ret = check_entry((struct ip6t_entry *)e, name);
1583 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1584 entry_offset = (void *)e - (void *)base;
1586 ret = COMPAT_IP6T_MATCH_ITERATE(e, compat_find_calc_match, name,
1587 &e->ipv6, e->comefrom, &off, &j);
1589 goto release_matches;
1591 t = compat_ip6t_get_target(e);
1592 target = try_then_request_module(xt_find_target(AF_INET6,
1594 t->u.user.revision),
1595 "ip6t_%s", t->u.user.name);
1596 if (IS_ERR(target) || !target) {
1597 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1599 ret = target ? PTR_ERR(target) : -ENOENT;
1600 goto release_matches;
1602 t->u.kernel.target = target;
1604 off += xt_compat_target_offset(target);
1606 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1610 /* Check hooks & underflows */
1611 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1612 if ((unsigned char *)e - base == hook_entries[h])
1613 newinfo->hook_entry[h] = hook_entries[h];
1614 if ((unsigned char *)e - base == underflows[h])
1615 newinfo->underflow[h] = underflows[h];
1618 /* Clear counters and comefrom */
1619 memset(&e->counters, 0, sizeof(e->counters));
1624 module_put(t->u.kernel.target->me);
1626 IP6T_MATCH_ITERATE(e, compat_release_match, &j);
1631 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1632 unsigned int *size, const char *name,
1633 struct xt_table_info *newinfo, unsigned char *base)
1635 struct ip6t_entry_target *t;
1636 struct xt_target *target;
1637 struct ip6t_entry *de;
1638 unsigned int origsize;
1643 de = (struct ip6t_entry *)*dstptr;
1644 memcpy(de, e, sizeof(struct ip6t_entry));
1645 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1647 *dstptr += sizeof(struct ip6t_entry);
1648 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1650 ret = COMPAT_IP6T_MATCH_ITERATE(e, xt_compat_match_from_user,
1654 de->target_offset = e->target_offset - (origsize - *size);
1655 t = compat_ip6t_get_target(e);
1656 target = t->u.kernel.target;
1657 xt_compat_target_from_user(t, dstptr, size);
1659 de->next_offset = e->next_offset - (origsize - *size);
1660 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1661 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1662 newinfo->hook_entry[h] -= origsize - *size;
1663 if ((unsigned char *)de - base < newinfo->underflow[h])
1664 newinfo->underflow[h] -= origsize - *size;
1669 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1674 struct xt_mtchk_param mtpar;
1679 mtpar.entryinfo = &e->ipv6;
1680 mtpar.hook_mask = e->comefrom;
1681 mtpar.family = NFPROTO_IPV6;
1682 ret = IP6T_MATCH_ITERATE(e, check_match, &mtpar, &j);
1684 goto cleanup_matches;
1686 ret = check_target(e, net, name);
1688 goto cleanup_matches;
1692 IP6T_MATCH_ITERATE(e, cleanup_match, net, &j);
1697 translate_compat_table(struct net *net,
1699 unsigned int valid_hooks,
1700 struct xt_table_info **pinfo,
1702 unsigned int total_size,
1703 unsigned int number,
1704 unsigned int *hook_entries,
1705 unsigned int *underflows)
1708 struct xt_table_info *newinfo, *info;
1709 void *pos, *entry0, *entry1;
1710 struct compat_ip6t_entry *iter0;
1711 struct ip6t_entry *iter1;
1718 info->number = number;
1720 /* Init all hooks to impossible value. */
1721 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1722 info->hook_entry[i] = 0xFFFFFFFF;
1723 info->underflow[i] = 0xFFFFFFFF;
1726 duprintf("translate_compat_table: size %u\n", info->size);
1728 xt_compat_lock(AF_INET6);
1729 /* Walk through entries, checking offsets. */
1730 xt_entry_foreach(iter0, entry0, total_size) {
1731 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1732 entry0, entry0 + total_size, hook_entries, underflows,
1741 duprintf("translate_compat_table: %u not %u entries\n",
1746 /* Check hooks all assigned */
1747 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1748 /* Only hooks which are valid */
1749 if (!(valid_hooks & (1 << i)))
1751 if (info->hook_entry[i] == 0xFFFFFFFF) {
1752 duprintf("Invalid hook entry %u %u\n",
1753 i, hook_entries[i]);
1756 if (info->underflow[i] == 0xFFFFFFFF) {
1757 duprintf("Invalid underflow %u %u\n",
1764 newinfo = xt_alloc_table_info(size);
1768 newinfo->number = number;
1769 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1770 newinfo->hook_entry[i] = info->hook_entry[i];
1771 newinfo->underflow[i] = info->underflow[i];
1773 entry1 = newinfo->entries[raw_smp_processor_id()];
1776 xt_entry_foreach(iter0, entry0, total_size) {
1777 ret = compat_copy_entry_from_user(iter0, &pos,
1778 &size, name, newinfo, entry1);
1782 xt_compat_flush_offsets(AF_INET6);
1783 xt_compat_unlock(AF_INET6);
1788 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1792 xt_entry_foreach(iter1, entry1, newinfo->size) {
1793 ret = compat_check_entry(iter1, net, name);
1800 * The first i matches need cleanup_entry (calls ->destroy)
1801 * because they had called ->check already. The other j-i
1802 * entries need only release.
1806 xt_entry_foreach(iter0, entry0, newinfo->size) {
1811 compat_release_entry(iter0);
1813 xt_entry_foreach(iter1, entry1, newinfo->size) {
1816 cleanup_entry(iter1, net);
1818 xt_free_table_info(newinfo);
1822 /* And one copy for every other CPU */
1823 for_each_possible_cpu(i)
1824 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1825 memcpy(newinfo->entries[i], entry1, newinfo->size);
1829 xt_free_table_info(info);
1833 xt_free_table_info(newinfo);
1835 xt_entry_foreach(iter0, entry0, total_size) {
1838 compat_release_entry(iter0);
1842 xt_compat_flush_offsets(AF_INET6);
1843 xt_compat_unlock(AF_INET6);
1848 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1851 struct compat_ip6t_replace tmp;
1852 struct xt_table_info *newinfo;
1853 void *loc_cpu_entry;
1854 struct ip6t_entry *iter;
1856 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1859 /* overflow check */
1860 if (tmp.size >= INT_MAX / num_possible_cpus())
1862 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1865 newinfo = xt_alloc_table_info(tmp.size);
1869 /* choose the copy that is on our node/cpu */
1870 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1871 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1877 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1878 &newinfo, &loc_cpu_entry, tmp.size,
1879 tmp.num_entries, tmp.hook_entry,
1884 duprintf("compat_do_replace: Translated table\n");
1886 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1887 tmp.num_counters, compat_ptr(tmp.counters));
1889 goto free_newinfo_untrans;
1892 free_newinfo_untrans:
1893 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1894 cleanup_entry(iter, net);
1896 xt_free_table_info(newinfo);
1901 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1906 if (!capable(CAP_NET_ADMIN))
1910 case IP6T_SO_SET_REPLACE:
1911 ret = compat_do_replace(sock_net(sk), user, len);
1914 case IP6T_SO_SET_ADD_COUNTERS:
1915 ret = do_add_counters(sock_net(sk), user, len, 1);
1919 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1926 struct compat_ip6t_get_entries {
1927 char name[IP6T_TABLE_MAXNAMELEN];
1929 struct compat_ip6t_entry entrytable[0];
1933 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1934 void __user *userptr)
1936 struct xt_counters *counters;
1937 const struct xt_table_info *private = table->private;
1941 const void *loc_cpu_entry;
1943 struct ip6t_entry *iter;
1945 counters = alloc_counters(table);
1946 if (IS_ERR(counters))
1947 return PTR_ERR(counters);
1949 /* choose the copy that is on our node/cpu, ...
1950 * This choice is lazy (because current thread is
1951 * allowed to migrate to another cpu)
1953 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1956 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1957 ret = compat_copy_entry_to_user(iter, &pos,
1958 &size, counters, i++);
1968 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1972 struct compat_ip6t_get_entries get;
1975 if (*len < sizeof(get)) {
1976 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1980 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1983 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1984 duprintf("compat_get_entries: %u != %zu\n",
1985 *len, sizeof(get) + get.size);
1989 xt_compat_lock(AF_INET6);
1990 t = xt_find_table_lock(net, AF_INET6, get.name);
1991 if (t && !IS_ERR(t)) {
1992 const struct xt_table_info *private = t->private;
1993 struct xt_table_info info;
1994 duprintf("t->private->number = %u\n", private->number);
1995 ret = compat_table_info(private, &info);
1996 if (!ret && get.size == info.size) {
1997 ret = compat_copy_entries_to_user(private->size,
1998 t, uptr->entrytable);
2000 duprintf("compat_get_entries: I've got %u not %u!\n",
2001 private->size, get.size);
2004 xt_compat_flush_offsets(AF_INET6);
2008 ret = t ? PTR_ERR(t) : -ENOENT;
2010 xt_compat_unlock(AF_INET6);
2014 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
2017 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2021 if (!capable(CAP_NET_ADMIN))
2025 case IP6T_SO_GET_INFO:
2026 ret = get_info(sock_net(sk), user, len, 1);
2028 case IP6T_SO_GET_ENTRIES:
2029 ret = compat_get_entries(sock_net(sk), user, len);
2032 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2039 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2043 if (!capable(CAP_NET_ADMIN))
2047 case IP6T_SO_SET_REPLACE:
2048 ret = do_replace(sock_net(sk), user, len);
2051 case IP6T_SO_SET_ADD_COUNTERS:
2052 ret = do_add_counters(sock_net(sk), user, len, 0);
2056 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2064 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2068 if (!capable(CAP_NET_ADMIN))
2072 case IP6T_SO_GET_INFO:
2073 ret = get_info(sock_net(sk), user, len, 0);
2076 case IP6T_SO_GET_ENTRIES:
2077 ret = get_entries(sock_net(sk), user, len);
2080 case IP6T_SO_GET_REVISION_MATCH:
2081 case IP6T_SO_GET_REVISION_TARGET: {
2082 struct ip6t_get_revision rev;
2085 if (*len != sizeof(rev)) {
2089 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2094 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2099 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2102 "ip6t_%s", rev.name);
2107 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2114 struct xt_table *ip6t_register_table(struct net *net,
2115 const struct xt_table *table,
2116 const struct ip6t_replace *repl)
2119 struct xt_table_info *newinfo;
2120 struct xt_table_info bootstrap
2121 = { 0, 0, 0, { 0 }, { 0 }, { } };
2122 void *loc_cpu_entry;
2123 struct xt_table *new_table;
2125 newinfo = xt_alloc_table_info(repl->size);
2131 /* choose the copy on our node/cpu, but dont care about preemption */
2132 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2133 memcpy(loc_cpu_entry, repl->entries, repl->size);
2135 ret = translate_table(net, table->name, table->valid_hooks,
2136 newinfo, loc_cpu_entry, repl->size,
2143 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2144 if (IS_ERR(new_table)) {
2145 ret = PTR_ERR(new_table);
2151 xt_free_table_info(newinfo);
2153 return ERR_PTR(ret);
2156 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2158 struct xt_table_info *private;
2159 void *loc_cpu_entry;
2160 struct module *table_owner = table->me;
2161 struct ip6t_entry *iter;
2163 private = xt_unregister_table(table);
2165 /* Decrease module usage counts and free resources */
2166 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2167 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2168 cleanup_entry(iter, net);
2169 if (private->number > private->initial_entries)
2170 module_put(table_owner);
2171 xt_free_table_info(private);
2174 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2176 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2177 u_int8_t type, u_int8_t code,
2180 return (type == test_type && code >= min_code && code <= max_code)
2185 icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
2187 const struct icmp6hdr *ic;
2188 struct icmp6hdr _icmph;
2189 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2191 /* Must not be a fragment. */
2192 if (par->fragoff != 0)
2195 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2197 /* We've been asked to examine this packet, and we
2198 * can't. Hence, no choice but to drop.
2200 duprintf("Dropping evil ICMP tinygram.\n");
2201 *par->hotdrop = true;
2205 return icmp6_type_code_match(icmpinfo->type,
2208 ic->icmp6_type, ic->icmp6_code,
2209 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2212 /* Called when user tries to insert an entry of this type. */
2213 static bool icmp6_checkentry(const struct xt_mtchk_param *par)
2215 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2217 /* Must specify no unknown invflags */
2218 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
2221 /* The built-in targets: standard (NULL) and error. */
2222 static struct xt_target ip6t_standard_target __read_mostly = {
2223 .name = IP6T_STANDARD_TARGET,
2224 .targetsize = sizeof(int),
2225 .family = NFPROTO_IPV6,
2226 #ifdef CONFIG_COMPAT
2227 .compatsize = sizeof(compat_int_t),
2228 .compat_from_user = compat_standard_from_user,
2229 .compat_to_user = compat_standard_to_user,
2233 static struct xt_target ip6t_error_target __read_mostly = {
2234 .name = IP6T_ERROR_TARGET,
2235 .target = ip6t_error,
2236 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2237 .family = NFPROTO_IPV6,
2240 static struct nf_sockopt_ops ip6t_sockopts = {
2242 .set_optmin = IP6T_BASE_CTL,
2243 .set_optmax = IP6T_SO_SET_MAX+1,
2244 .set = do_ip6t_set_ctl,
2245 #ifdef CONFIG_COMPAT
2246 .compat_set = compat_do_ip6t_set_ctl,
2248 .get_optmin = IP6T_BASE_CTL,
2249 .get_optmax = IP6T_SO_GET_MAX+1,
2250 .get = do_ip6t_get_ctl,
2251 #ifdef CONFIG_COMPAT
2252 .compat_get = compat_do_ip6t_get_ctl,
2254 .owner = THIS_MODULE,
2257 static struct xt_match icmp6_matchstruct __read_mostly = {
2259 .match = icmp6_match,
2260 .matchsize = sizeof(struct ip6t_icmp),
2261 .checkentry = icmp6_checkentry,
2262 .proto = IPPROTO_ICMPV6,
2263 .family = NFPROTO_IPV6,
2266 static int __net_init ip6_tables_net_init(struct net *net)
2268 return xt_proto_init(net, NFPROTO_IPV6);
2271 static void __net_exit ip6_tables_net_exit(struct net *net)
2273 xt_proto_fini(net, NFPROTO_IPV6);
2276 static struct pernet_operations ip6_tables_net_ops = {
2277 .init = ip6_tables_net_init,
2278 .exit = ip6_tables_net_exit,
2281 static int __init ip6_tables_init(void)
2285 ret = register_pernet_subsys(&ip6_tables_net_ops);
2289 /* Noone else will be downing sem now, so we won't sleep */
2290 ret = xt_register_target(&ip6t_standard_target);
2293 ret = xt_register_target(&ip6t_error_target);
2296 ret = xt_register_match(&icmp6_matchstruct);
2300 /* Register setsockopt */
2301 ret = nf_register_sockopt(&ip6t_sockopts);
2305 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2309 xt_unregister_match(&icmp6_matchstruct);
2311 xt_unregister_target(&ip6t_error_target);
2313 xt_unregister_target(&ip6t_standard_target);
2315 unregister_pernet_subsys(&ip6_tables_net_ops);
2320 static void __exit ip6_tables_fini(void)
2322 nf_unregister_sockopt(&ip6t_sockopts);
2324 xt_unregister_match(&icmp6_matchstruct);
2325 xt_unregister_target(&ip6t_error_target);
2326 xt_unregister_target(&ip6t_standard_target);
2328 unregister_pernet_subsys(&ip6_tables_net_ops);
2332 * find the offset to specified header or the protocol number of last header
2333 * if target < 0. "last header" is transport protocol header, ESP, or
2336 * If target header is found, its offset is set in *offset and return protocol
2337 * number. Otherwise, return -1.
2339 * If the first fragment doesn't contain the final protocol header or
2340 * NEXTHDR_NONE it is considered invalid.
2342 * Note that non-1st fragment is special case that "the protocol number
2343 * of last header" is "next header" field in Fragment header. In this case,
2344 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2348 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2349 int target, unsigned short *fragoff)
2351 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2352 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2353 unsigned int len = skb->len - start;
2358 while (nexthdr != target) {
2359 struct ipv6_opt_hdr _hdr, *hp;
2360 unsigned int hdrlen;
2362 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2368 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2371 if (nexthdr == NEXTHDR_FRAGMENT) {
2372 unsigned short _frag_off;
2374 fp = skb_header_pointer(skb,
2375 start+offsetof(struct frag_hdr,
2382 _frag_off = ntohs(*fp) & ~0x7;
2385 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2386 hp->nexthdr == NEXTHDR_NONE)) {
2388 *fragoff = _frag_off;
2394 } else if (nexthdr == NEXTHDR_AUTH)
2395 hdrlen = (hp->hdrlen + 2) << 2;
2397 hdrlen = ipv6_optlen(hp);
2399 nexthdr = hp->nexthdr;
2408 EXPORT_SYMBOL(ip6t_register_table);
2409 EXPORT_SYMBOL(ip6t_unregister_table);
2410 EXPORT_SYMBOL(ip6t_do_table);
2411 EXPORT_SYMBOL(ip6t_ext_hdr);
2412 EXPORT_SYMBOL(ipv6_find_hdr);
2414 module_init(ip6_tables_init);
2415 module_exit(ip6_tables_fini);