2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
32 MODULE_LICENSE("GPL");
33 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
34 MODULE_DESCRIPTION("IPv6 packet filter");
36 /*#define DEBUG_IP_FIREWALL*/
37 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
38 /*#define DEBUG_IP_FIREWALL_USER*/
40 #ifdef DEBUG_IP_FIREWALL
41 #define dprintf(format, args...) printk(format , ## args)
43 #define dprintf(format, args...)
46 #ifdef DEBUG_IP_FIREWALL_USER
47 #define duprintf(format, args...) printk(format , ## args)
49 #define duprintf(format, args...)
52 #ifdef CONFIG_NETFILTER_DEBUG
53 #define IP_NF_ASSERT(x) \
56 printk("IP_NF_ASSERT: %s:%s:%u\n", \
57 __FUNCTION__, __FILE__, __LINE__); \
60 #define IP_NF_ASSERT(x)
64 /* All the better to debug you with... */
70 We keep a set of rules for each CPU, so we can avoid write-locking
71 them in the softirq when updating the counters and therefore
72 only need to read-lock in the softirq; doing a write_lock_bh() in user
73 context stops packets coming through and allows user context to read
74 the counters or update the rules.
76 Hence the start of any table is given by get_table() below. */
78 /* Check for an extension */
80 ip6t_ext_hdr(u8 nexthdr)
82 return ( (nexthdr == IPPROTO_HOPOPTS) ||
83 (nexthdr == IPPROTO_ROUTING) ||
84 (nexthdr == IPPROTO_FRAGMENT) ||
85 (nexthdr == IPPROTO_ESP) ||
86 (nexthdr == IPPROTO_AH) ||
87 (nexthdr == IPPROTO_NONE) ||
88 (nexthdr == IPPROTO_DSTOPTS) );
91 /* Returns whether matches rule or not. */
93 ip6_packet_match(const struct sk_buff *skb,
96 const struct ip6t_ip6 *ip6info,
97 unsigned int *protoff,
98 int *fragoff, bool *hotdrop)
102 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
104 #define FWINV(bool,invflg) ((bool) ^ !!(ip6info->invflags & invflg))
106 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
107 &ip6info->src), IP6T_INV_SRCIP)
108 || FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
109 &ip6info->dst), IP6T_INV_DSTIP)) {
110 dprintf("Source or dest mismatch.\n");
112 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
113 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
114 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
115 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
116 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
117 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
121 /* Look for ifname matches; this should unroll nicely. */
122 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
123 ret |= (((const unsigned long *)indev)[i]
124 ^ ((const unsigned long *)ip6info->iniface)[i])
125 & ((const unsigned long *)ip6info->iniface_mask)[i];
128 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
129 dprintf("VIA in mismatch (%s vs %s).%s\n",
130 indev, ip6info->iniface,
131 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
135 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
136 ret |= (((const unsigned long *)outdev)[i]
137 ^ ((const unsigned long *)ip6info->outiface)[i])
138 & ((const unsigned long *)ip6info->outiface_mask)[i];
141 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
142 dprintf("VIA out mismatch (%s vs %s).%s\n",
143 outdev, ip6info->outiface,
144 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
148 /* ... might want to do something with class and flowlabel here ... */
150 /* look for the desired protocol header */
151 if((ip6info->flags & IP6T_F_PROTO)) {
153 unsigned short _frag_off;
155 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
161 *fragoff = _frag_off;
163 dprintf("Packet protocol %hi ?= %s%hi.\n",
165 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
168 if (ip6info->proto == protohdr) {
169 if(ip6info->invflags & IP6T_INV_PROTO) {
175 /* We need match for the '-p all', too! */
176 if ((ip6info->proto != 0) &&
177 !(ip6info->invflags & IP6T_INV_PROTO))
183 /* should be ip6 safe */
185 ip6_checkentry(const struct ip6t_ip6 *ipv6)
187 if (ipv6->flags & ~IP6T_F_MASK) {
188 duprintf("Unknown flag bits set: %08X\n",
189 ipv6->flags & ~IP6T_F_MASK);
192 if (ipv6->invflags & ~IP6T_INV_MASK) {
193 duprintf("Unknown invflag bits set: %08X\n",
194 ipv6->invflags & ~IP6T_INV_MASK);
201 ip6t_error(struct sk_buff *skb,
202 const struct net_device *in,
203 const struct net_device *out,
204 unsigned int hooknum,
205 const struct xt_target *target,
206 const void *targinfo)
209 printk("ip6_tables: error: `%s'\n", (char *)targinfo);
215 bool do_match(struct ip6t_entry_match *m,
216 const struct sk_buff *skb,
217 const struct net_device *in,
218 const struct net_device *out,
220 unsigned int protoff,
223 /* Stop iteration if it doesn't match */
224 if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
225 offset, protoff, hotdrop))
231 static inline struct ip6t_entry *
232 get_entry(void *base, unsigned int offset)
234 return (struct ip6t_entry *)(base + offset);
237 /* All zeroes == unconditional rule. */
239 unconditional(const struct ip6t_ip6 *ipv6)
243 for (i = 0; i < sizeof(*ipv6); i++)
244 if (((char *)ipv6)[i])
247 return (i == sizeof(*ipv6));
250 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
251 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
252 /* This cries for unification! */
253 static const char *hooknames[] = {
254 [NF_INET_PRE_ROUTING] = "PREROUTING",
255 [NF_INET_LOCAL_IN] = "INPUT",
256 [NF_INET_FORWARD] = "FORWARD",
257 [NF_INET_LOCAL_OUT] = "OUTPUT",
258 [NF_INET_POST_ROUTING] = "POSTROUTING",
261 enum nf_ip_trace_comments {
262 NF_IP6_TRACE_COMMENT_RULE,
263 NF_IP6_TRACE_COMMENT_RETURN,
264 NF_IP6_TRACE_COMMENT_POLICY,
267 static const char *comments[] = {
268 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
269 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
270 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
273 static struct nf_loginfo trace_loginfo = {
274 .type = NF_LOG_TYPE_LOG,
278 .logflags = NF_LOG_MASK,
284 get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
285 char *hookname, char **chainname,
286 char **comment, unsigned int *rulenum)
288 struct ip6t_standard_target *t = (void *)ip6t_get_target(s);
290 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
291 /* Head of user chain: ERROR target with chainname */
292 *chainname = t->target.data;
297 if (s->target_offset == sizeof(struct ip6t_entry)
298 && strcmp(t->target.u.kernel.target->name,
299 IP6T_STANDARD_TARGET) == 0
301 && unconditional(&s->ipv6)) {
302 /* Tail of chains: STANDARD target (return/policy) */
303 *comment = *chainname == hookname
304 ? (char *)comments[NF_IP6_TRACE_COMMENT_POLICY]
305 : (char *)comments[NF_IP6_TRACE_COMMENT_RETURN];
314 static void trace_packet(struct sk_buff *skb,
316 const struct net_device *in,
317 const struct net_device *out,
319 struct xt_table_info *private,
320 struct ip6t_entry *e)
323 struct ip6t_entry *root;
324 char *hookname, *chainname, *comment;
325 unsigned int rulenum = 0;
327 table_base = (void *)private->entries[smp_processor_id()];
328 root = get_entry(table_base, private->hook_entry[hook]);
330 hookname = chainname = (char *)hooknames[hook];
331 comment = (char *)comments[NF_IP6_TRACE_COMMENT_RULE];
333 IP6T_ENTRY_ITERATE(root,
334 private->size - private->hook_entry[hook],
335 get_chainname_rulenum,
336 e, hookname, &chainname, &comment, &rulenum);
338 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
339 "TRACE: %s:%s:%s:%u ",
340 tablename, chainname, comment, rulenum);
344 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
346 ip6t_do_table(struct sk_buff *skb,
348 const struct net_device *in,
349 const struct net_device *out,
350 struct xt_table *table)
352 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
354 unsigned int protoff = 0;
355 bool hotdrop = false;
356 /* Initializing verdict to NF_DROP keeps gcc happy. */
357 unsigned int verdict = NF_DROP;
358 const char *indev, *outdev;
360 struct ip6t_entry *e, *back;
361 struct xt_table_info *private;
364 indev = in ? in->name : nulldevname;
365 outdev = out ? out->name : nulldevname;
366 /* We handle fragments by dealing with the first fragment as
367 * if it was a normal packet. All other fragments are treated
368 * normally, except that they will NEVER match rules that ask
369 * things we don't know, ie. tcp syn flag or ports). If the
370 * rule is also a fragment-specific rule, non-fragments won't
373 read_lock_bh(&table->lock);
374 private = table->private;
375 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
376 table_base = (void *)private->entries[smp_processor_id()];
377 e = get_entry(table_base, private->hook_entry[hook]);
379 /* For return from builtin chain */
380 back = get_entry(table_base, private->underflow[hook]);
385 if (ip6_packet_match(skb, indev, outdev, &e->ipv6,
386 &protoff, &offset, &hotdrop)) {
387 struct ip6t_entry_target *t;
389 if (IP6T_MATCH_ITERATE(e, do_match,
391 offset, protoff, &hotdrop) != 0)
394 ADD_COUNTER(e->counters,
395 ntohs(ipv6_hdr(skb)->payload_len) +
396 sizeof(struct ipv6hdr), 1);
398 t = ip6t_get_target(e);
399 IP_NF_ASSERT(t->u.kernel.target);
401 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
402 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
403 /* The packet is traced: log it */
404 if (unlikely(skb->nf_trace))
405 trace_packet(skb, hook, in, out,
406 table->name, private, e);
408 /* Standard target? */
409 if (!t->u.kernel.target->target) {
412 v = ((struct ip6t_standard_target *)t)->verdict;
414 /* Pop from stack? */
415 if (v != IP6T_RETURN) {
416 verdict = (unsigned)(-v) - 1;
420 back = get_entry(table_base,
424 if (table_base + v != (void *)e + e->next_offset
425 && !(e->ipv6.flags & IP6T_F_GOTO)) {
426 /* Save old back ptr in next entry */
427 struct ip6t_entry *next
428 = (void *)e + e->next_offset;
430 = (void *)back - table_base;
431 /* set back pointer to next entry */
435 e = get_entry(table_base, v);
437 /* Targets which reenter must return
439 #ifdef CONFIG_NETFILTER_DEBUG
440 ((struct ip6t_entry *)table_base)->comefrom
443 verdict = t->u.kernel.target->target(skb,
449 #ifdef CONFIG_NETFILTER_DEBUG
450 if (((struct ip6t_entry *)table_base)->comefrom
452 && verdict == IP6T_CONTINUE) {
453 printk("Target %s reentered!\n",
454 t->u.kernel.target->name);
457 ((struct ip6t_entry *)table_base)->comefrom
460 if (verdict == IP6T_CONTINUE)
461 e = (void *)e + e->next_offset;
469 e = (void *)e + e->next_offset;
473 #ifdef CONFIG_NETFILTER_DEBUG
474 ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON;
476 read_unlock_bh(&table->lock);
478 #ifdef DEBUG_ALLOW_ALL
487 /* Figures out from what hook each rule can be called: returns 0 if
488 there are loops. Puts hook bitmask in comefrom. */
490 mark_source_chains(struct xt_table_info *newinfo,
491 unsigned int valid_hooks, void *entry0)
495 /* No recursion; use packet counter to save back ptrs (reset
496 to 0 as we leave), and comefrom to save source hook bitmask */
497 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
498 unsigned int pos = newinfo->hook_entry[hook];
500 = (struct ip6t_entry *)(entry0 + pos);
501 int visited = e->comefrom & (1 << hook);
503 if (!(valid_hooks & (1 << hook)))
506 /* Set initial back pointer. */
507 e->counters.pcnt = pos;
510 struct ip6t_standard_target *t
511 = (void *)ip6t_get_target(e);
513 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
514 printk("iptables: loop hook %u pos %u %08X.\n",
515 hook, pos, e->comefrom);
519 |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
521 /* Unconditional return/END. */
522 if ((e->target_offset == sizeof(struct ip6t_entry)
523 && (strcmp(t->target.u.user.name,
524 IP6T_STANDARD_TARGET) == 0)
526 && unconditional(&e->ipv6)) || visited) {
527 unsigned int oldpos, size;
529 if (t->verdict < -NF_MAX_VERDICT - 1) {
530 duprintf("mark_source_chains: bad "
531 "negative verdict (%i)\n",
536 /* Return: backtrack through the last
539 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
540 #ifdef DEBUG_IP_FIREWALL_USER
542 & (1 << NF_INET_NUMHOOKS)) {
543 duprintf("Back unset "
550 pos = e->counters.pcnt;
551 e->counters.pcnt = 0;
553 /* We're at the start. */
557 e = (struct ip6t_entry *)
559 } while (oldpos == pos + e->next_offset);
562 size = e->next_offset;
563 e = (struct ip6t_entry *)
564 (entry0 + pos + size);
565 e->counters.pcnt = pos;
568 int newpos = t->verdict;
570 if (strcmp(t->target.u.user.name,
571 IP6T_STANDARD_TARGET) == 0
573 if (newpos > newinfo->size -
574 sizeof(struct ip6t_entry)) {
575 duprintf("mark_source_chains: "
576 "bad verdict (%i)\n",
580 /* This a jump; chase it. */
581 duprintf("Jump rule %u -> %u\n",
584 /* ... this is a fallthru */
585 newpos = pos + e->next_offset;
587 e = (struct ip6t_entry *)
589 e->counters.pcnt = pos;
594 duprintf("Finished chain %u\n", hook);
600 cleanup_match(struct ip6t_entry_match *m, unsigned int *i)
602 if (i && (*i)-- == 0)
605 if (m->u.kernel.match->destroy)
606 m->u.kernel.match->destroy(m->u.kernel.match, m->data);
607 module_put(m->u.kernel.match->me);
612 check_entry(struct ip6t_entry *e, const char *name)
614 struct ip6t_entry_target *t;
616 if (!ip6_checkentry(&e->ipv6)) {
617 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
621 if (e->target_offset + sizeof(struct ip6t_entry_target) >
625 t = ip6t_get_target(e);
626 if (e->target_offset + t->u.target_size > e->next_offset)
632 static inline int check_match(struct ip6t_entry_match *m, const char *name,
633 const struct ip6t_ip6 *ipv6,
634 unsigned int hookmask, unsigned int *i)
636 struct xt_match *match;
639 match = m->u.kernel.match;
640 ret = xt_check_match(match, AF_INET6, m->u.match_size - sizeof(*m),
641 name, hookmask, ipv6->proto,
642 ipv6->invflags & IP6T_INV_PROTO);
643 if (!ret && m->u.kernel.match->checkentry
644 && !m->u.kernel.match->checkentry(name, ipv6, match, m->data,
646 duprintf("ip_tables: check failed for `%s'.\n",
647 m->u.kernel.match->name);
656 find_check_match(struct ip6t_entry_match *m,
658 const struct ip6t_ip6 *ipv6,
659 unsigned int hookmask,
662 struct xt_match *match;
665 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
667 "ip6t_%s", m->u.user.name);
668 if (IS_ERR(match) || !match) {
669 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
670 return match ? PTR_ERR(match) : -ENOENT;
672 m->u.kernel.match = match;
674 ret = check_match(m, name, ipv6, hookmask, i);
680 module_put(m->u.kernel.match->me);
684 static inline int check_target(struct ip6t_entry *e, const char *name)
686 struct ip6t_entry_target *t;
687 struct xt_target *target;
690 t = ip6t_get_target(e);
691 target = t->u.kernel.target;
692 ret = xt_check_target(target, AF_INET6, t->u.target_size - sizeof(*t),
693 name, e->comefrom, e->ipv6.proto,
694 e->ipv6.invflags & IP6T_INV_PROTO);
695 if (!ret && t->u.kernel.target->checkentry
696 && !t->u.kernel.target->checkentry(name, e, target, t->data,
698 duprintf("ip_tables: check failed for `%s'.\n",
699 t->u.kernel.target->name);
706 find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
709 struct ip6t_entry_target *t;
710 struct xt_target *target;
714 ret = check_entry(e, name);
719 ret = IP6T_MATCH_ITERATE(e, find_check_match, name, &e->ipv6,
722 goto cleanup_matches;
724 t = ip6t_get_target(e);
725 target = try_then_request_module(xt_find_target(AF_INET6,
728 "ip6t_%s", t->u.user.name);
729 if (IS_ERR(target) || !target) {
730 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
731 ret = target ? PTR_ERR(target) : -ENOENT;
732 goto cleanup_matches;
734 t->u.kernel.target = target;
736 ret = check_target(e, name);
743 module_put(t->u.kernel.target->me);
745 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
750 check_entry_size_and_hooks(struct ip6t_entry *e,
751 struct xt_table_info *newinfo,
753 unsigned char *limit,
754 const unsigned int *hook_entries,
755 const unsigned int *underflows,
760 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0
761 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
762 duprintf("Bad offset %p\n", e);
767 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
768 duprintf("checking: element %p size %u\n",
773 /* Check hooks & underflows */
774 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
775 if ((unsigned char *)e - base == hook_entries[h])
776 newinfo->hook_entry[h] = hook_entries[h];
777 if ((unsigned char *)e - base == underflows[h])
778 newinfo->underflow[h] = underflows[h];
781 /* FIXME: underflows must be unconditional, standard verdicts
782 < 0 (not IP6T_RETURN). --RR */
784 /* Clear counters and comefrom */
785 e->counters = ((struct xt_counters) { 0, 0 });
793 cleanup_entry(struct ip6t_entry *e, unsigned int *i)
795 struct ip6t_entry_target *t;
797 if (i && (*i)-- == 0)
800 /* Cleanup all matches */
801 IP6T_MATCH_ITERATE(e, cleanup_match, NULL);
802 t = ip6t_get_target(e);
803 if (t->u.kernel.target->destroy)
804 t->u.kernel.target->destroy(t->u.kernel.target, t->data);
805 module_put(t->u.kernel.target->me);
809 /* Checks and translates the user-supplied table segment (held in
812 translate_table(const char *name,
813 unsigned int valid_hooks,
814 struct xt_table_info *newinfo,
818 const unsigned int *hook_entries,
819 const unsigned int *underflows)
824 newinfo->size = size;
825 newinfo->number = number;
827 /* Init all hooks to impossible value. */
828 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
829 newinfo->hook_entry[i] = 0xFFFFFFFF;
830 newinfo->underflow[i] = 0xFFFFFFFF;
833 duprintf("translate_table: size %u\n", newinfo->size);
835 /* Walk through entries, checking offsets. */
836 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
837 check_entry_size_and_hooks,
841 hook_entries, underflows, &i);
846 duprintf("translate_table: %u not %u entries\n",
851 /* Check hooks all assigned */
852 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
853 /* Only hooks which are valid */
854 if (!(valid_hooks & (1 << i)))
856 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
857 duprintf("Invalid hook entry %u %u\n",
861 if (newinfo->underflow[i] == 0xFFFFFFFF) {
862 duprintf("Invalid underflow %u %u\n",
868 if (!mark_source_chains(newinfo, valid_hooks, entry0))
871 /* Finally, each sanity check must pass */
873 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
874 find_check_entry, name, size, &i);
877 IP6T_ENTRY_ITERATE(entry0, newinfo->size,
882 /* And one copy for every other CPU */
883 for_each_possible_cpu(i) {
884 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
885 memcpy(newinfo->entries[i], entry0, newinfo->size);
893 add_entry_to_counter(const struct ip6t_entry *e,
894 struct xt_counters total[],
897 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
904 set_entry_to_counter(const struct ip6t_entry *e,
905 struct ip6t_counters total[],
908 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
915 get_counters(const struct xt_table_info *t,
916 struct xt_counters counters[])
922 /* Instead of clearing (by a previous call to memset())
923 * the counters and using adds, we set the counters
924 * with data used by 'current' CPU
925 * We dont care about preemption here.
927 curcpu = raw_smp_processor_id();
930 IP6T_ENTRY_ITERATE(t->entries[curcpu],
932 set_entry_to_counter,
936 for_each_possible_cpu(cpu) {
940 IP6T_ENTRY_ITERATE(t->entries[cpu],
942 add_entry_to_counter,
948 static inline struct xt_counters *alloc_counters(struct xt_table *table)
950 unsigned int countersize;
951 struct xt_counters *counters;
952 struct xt_table_info *private = table->private;
954 /* We need atomic snapshot of counters: rest doesn't change
955 (other than comefrom, which userspace doesn't care
957 countersize = sizeof(struct xt_counters) * private->number;
958 counters = vmalloc_node(countersize, numa_node_id());
960 if (counters == NULL)
961 return ERR_PTR(-ENOMEM);
963 /* First, sum counters... */
964 write_lock_bh(&table->lock);
965 get_counters(private, counters);
966 write_unlock_bh(&table->lock);
972 copy_entries_to_user(unsigned int total_size,
973 struct xt_table *table,
974 void __user *userptr)
976 unsigned int off, num;
977 struct ip6t_entry *e;
978 struct xt_counters *counters;
979 struct xt_table_info *private = table->private;
983 counters = alloc_counters(table);
984 if (IS_ERR(counters))
985 return PTR_ERR(counters);
987 /* choose the copy that is on ourc node/cpu */
988 loc_cpu_entry = private->entries[raw_smp_processor_id()];
989 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
994 /* FIXME: use iterator macros --RR */
995 /* ... then go back and fix counters and names */
996 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
998 struct ip6t_entry_match *m;
999 struct ip6t_entry_target *t;
1001 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1002 if (copy_to_user(userptr + off
1003 + offsetof(struct ip6t_entry, counters),
1005 sizeof(counters[num])) != 0) {
1010 for (i = sizeof(struct ip6t_entry);
1011 i < e->target_offset;
1012 i += m->u.match_size) {
1015 if (copy_to_user(userptr + off + i
1016 + offsetof(struct ip6t_entry_match,
1018 m->u.kernel.match->name,
1019 strlen(m->u.kernel.match->name)+1)
1026 t = ip6t_get_target(e);
1027 if (copy_to_user(userptr + off + e->target_offset
1028 + offsetof(struct ip6t_entry_target,
1030 t->u.kernel.target->name,
1031 strlen(t->u.kernel.target->name)+1) != 0) {
1042 #ifdef CONFIG_COMPAT
1043 static void compat_standard_from_user(void *dst, void *src)
1045 int v = *(compat_int_t *)src;
1048 v += xt_compat_calc_jump(AF_INET6, v);
1049 memcpy(dst, &v, sizeof(v));
1052 static int compat_standard_to_user(void __user *dst, void *src)
1054 compat_int_t cv = *(int *)src;
1057 cv -= xt_compat_calc_jump(AF_INET6, cv);
1058 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1062 compat_calc_match(struct ip6t_entry_match *m, int *size)
1064 *size += xt_compat_match_offset(m->u.kernel.match);
1068 static int compat_calc_entry(struct ip6t_entry *e,
1069 const struct xt_table_info *info,
1070 void *base, struct xt_table_info *newinfo)
1072 struct ip6t_entry_target *t;
1073 unsigned int entry_offset;
1076 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1077 entry_offset = (void *)e - base;
1078 IP6T_MATCH_ITERATE(e, compat_calc_match, &off);
1079 t = ip6t_get_target(e);
1080 off += xt_compat_target_offset(t->u.kernel.target);
1081 newinfo->size -= off;
1082 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1086 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1087 if (info->hook_entry[i] &&
1088 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1089 newinfo->hook_entry[i] -= off;
1090 if (info->underflow[i] &&
1091 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1092 newinfo->underflow[i] -= off;
1097 static int compat_table_info(const struct xt_table_info *info,
1098 struct xt_table_info *newinfo)
1100 void *loc_cpu_entry;
1102 if (!newinfo || !info)
1105 /* we dont care about newinfo->entries[] */
1106 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1107 newinfo->initial_entries = 0;
1108 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1109 return IP6T_ENTRY_ITERATE(loc_cpu_entry, info->size,
1110 compat_calc_entry, info, loc_cpu_entry,
1115 static int get_info(void __user *user, int *len, int compat)
1117 char name[IP6T_TABLE_MAXNAMELEN];
1121 if (*len != sizeof(struct ip6t_getinfo)) {
1122 duprintf("length %u != %u\n", *len,
1123 sizeof(struct ip6t_getinfo));
1127 if (copy_from_user(name, user, sizeof(name)) != 0)
1130 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1131 #ifdef CONFIG_COMPAT
1133 xt_compat_lock(AF_INET6);
1135 t = try_then_request_module(xt_find_table_lock(AF_INET6, name),
1136 "ip6table_%s", name);
1137 if (t && !IS_ERR(t)) {
1138 struct ip6t_getinfo info;
1139 struct xt_table_info *private = t->private;
1141 #ifdef CONFIG_COMPAT
1143 struct xt_table_info tmp;
1144 ret = compat_table_info(private, &tmp);
1145 xt_compat_flush_offsets(AF_INET6);
1149 info.valid_hooks = t->valid_hooks;
1150 memcpy(info.hook_entry, private->hook_entry,
1151 sizeof(info.hook_entry));
1152 memcpy(info.underflow, private->underflow,
1153 sizeof(info.underflow));
1154 info.num_entries = private->number;
1155 info.size = private->size;
1156 memcpy(info.name, name, sizeof(info.name));
1158 if (copy_to_user(user, &info, *len) != 0)
1166 ret = t ? PTR_ERR(t) : -ENOENT;
1167 #ifdef CONFIG_COMPAT
1169 xt_compat_unlock(AF_INET6);
1175 get_entries(struct ip6t_get_entries __user *uptr, int *len)
1178 struct ip6t_get_entries get;
1181 if (*len < sizeof(get)) {
1182 duprintf("get_entries: %u < %u\n", *len, sizeof(get));
1185 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1187 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1188 duprintf("get_entries: %u != %u\n", *len,
1189 sizeof(struct ip6t_get_entries) + get.size);
1193 t = xt_find_table_lock(AF_INET6, get.name);
1194 if (t && !IS_ERR(t)) {
1195 struct xt_table_info *private = t->private;
1196 duprintf("t->private->number = %u\n", private->number);
1197 if (get.size == private->size)
1198 ret = copy_entries_to_user(private->size,
1199 t, uptr->entrytable);
1201 duprintf("get_entries: I've got %u not %u!\n",
1202 private->size, entries->size);
1208 ret = t ? PTR_ERR(t) : -ENOENT;
1214 __do_replace(const char *name, unsigned int valid_hooks,
1215 struct xt_table_info *newinfo, unsigned int num_counters,
1216 void __user *counters_ptr)
1220 struct xt_table_info *oldinfo;
1221 struct xt_counters *counters;
1222 void *loc_cpu_old_entry;
1225 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1232 t = try_then_request_module(xt_find_table_lock(AF_INET6, name),
1233 "ip6table_%s", name);
1234 if (!t || IS_ERR(t)) {
1235 ret = t ? PTR_ERR(t) : -ENOENT;
1236 goto free_newinfo_counters_untrans;
1240 if (valid_hooks != t->valid_hooks) {
1241 duprintf("Valid hook crap: %08X vs %08X\n",
1242 valid_hooks, t->valid_hooks);
1247 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1251 /* Update module usage count based on number of rules */
1252 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1253 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1254 if ((oldinfo->number > oldinfo->initial_entries) ||
1255 (newinfo->number <= oldinfo->initial_entries))
1257 if ((oldinfo->number > oldinfo->initial_entries) &&
1258 (newinfo->number <= oldinfo->initial_entries))
1261 /* Get the old counters. */
1262 get_counters(oldinfo, counters);
1263 /* Decrease module usage counts and free resource */
1264 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1265 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1267 xt_free_table_info(oldinfo);
1268 if (copy_to_user(counters_ptr, counters,
1269 sizeof(struct xt_counters) * num_counters) != 0)
1278 free_newinfo_counters_untrans:
1285 do_replace(void __user *user, unsigned int len)
1288 struct ip6t_replace tmp;
1289 struct xt_table_info *newinfo;
1290 void *loc_cpu_entry;
1292 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1295 /* overflow check */
1296 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1299 newinfo = xt_alloc_table_info(tmp.size);
1303 /* choose the copy that is on our node/cpu */
1304 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1305 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1311 ret = translate_table(tmp.name, tmp.valid_hooks,
1312 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1313 tmp.hook_entry, tmp.underflow);
1317 duprintf("ip_tables: Translated table\n");
1319 ret = __do_replace(tmp.name, tmp.valid_hooks, newinfo,
1320 tmp.num_counters, tmp.counters);
1322 goto free_newinfo_untrans;
1325 free_newinfo_untrans:
1326 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1328 xt_free_table_info(newinfo);
1332 /* We're lazy, and add to the first CPU; overflow works its fey magic
1333 * and everything is OK. */
1335 add_counter_to_entry(struct ip6t_entry *e,
1336 const struct xt_counters addme[],
1340 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1342 (long unsigned int)e->counters.pcnt,
1343 (long unsigned int)e->counters.bcnt,
1344 (long unsigned int)addme[*i].pcnt,
1345 (long unsigned int)addme[*i].bcnt);
1348 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1355 do_add_counters(void __user *user, unsigned int len, int compat)
1358 struct xt_counters_info tmp;
1359 struct xt_counters *paddc;
1360 unsigned int num_counters;
1364 struct xt_table_info *private;
1367 void *loc_cpu_entry;
1368 #ifdef CONFIG_COMPAT
1369 struct compat_xt_counters_info compat_tmp;
1373 size = sizeof(struct compat_xt_counters_info);
1378 size = sizeof(struct xt_counters_info);
1381 if (copy_from_user(ptmp, user, size) != 0)
1384 #ifdef CONFIG_COMPAT
1386 num_counters = compat_tmp.num_counters;
1387 name = compat_tmp.name;
1391 num_counters = tmp.num_counters;
1395 if (len != size + num_counters * sizeof(struct xt_counters))
1398 paddc = vmalloc_node(len - size, numa_node_id());
1402 if (copy_from_user(paddc, user + size, len - size) != 0) {
1407 t = xt_find_table_lock(AF_INET6, name);
1408 if (!t || IS_ERR(t)) {
1409 ret = t ? PTR_ERR(t) : -ENOENT;
1413 write_lock_bh(&t->lock);
1414 private = t->private;
1415 if (private->number != num_counters) {
1417 goto unlock_up_free;
1421 /* Choose the copy that is on our node */
1422 loc_cpu_entry = private->entries[smp_processor_id()];
1423 IP6T_ENTRY_ITERATE(loc_cpu_entry,
1425 add_counter_to_entry,
1429 write_unlock_bh(&t->lock);
1438 #ifdef CONFIG_COMPAT
1439 struct compat_ip6t_replace {
1440 char name[IP6T_TABLE_MAXNAMELEN];
1444 u32 hook_entry[NF_INET_NUMHOOKS];
1445 u32 underflow[NF_INET_NUMHOOKS];
1447 compat_uptr_t counters; /* struct ip6t_counters * */
1448 struct compat_ip6t_entry entries[0];
1452 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1453 compat_uint_t *size, struct xt_counters *counters,
1456 struct ip6t_entry_target *t;
1457 struct compat_ip6t_entry __user *ce;
1458 u_int16_t target_offset, next_offset;
1459 compat_uint_t origsize;
1464 ce = (struct compat_ip6t_entry __user *)*dstptr;
1465 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)))
1468 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1471 *dstptr += sizeof(struct compat_ip6t_entry);
1472 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1474 ret = IP6T_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1475 target_offset = e->target_offset - (origsize - *size);
1478 t = ip6t_get_target(e);
1479 ret = xt_compat_target_to_user(t, dstptr, size);
1483 next_offset = e->next_offset - (origsize - *size);
1484 if (put_user(target_offset, &ce->target_offset))
1486 if (put_user(next_offset, &ce->next_offset))
1496 compat_find_calc_match(struct ip6t_entry_match *m,
1498 const struct ip6t_ip6 *ipv6,
1499 unsigned int hookmask,
1502 struct xt_match *match;
1504 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
1505 m->u.user.revision),
1506 "ip6t_%s", m->u.user.name);
1507 if (IS_ERR(match) || !match) {
1508 duprintf("compat_check_calc_match: `%s' not found\n",
1510 return match ? PTR_ERR(match) : -ENOENT;
1512 m->u.kernel.match = match;
1513 *size += xt_compat_match_offset(match);
1520 compat_release_match(struct ip6t_entry_match *m, unsigned int *i)
1522 if (i && (*i)-- == 0)
1525 module_put(m->u.kernel.match->me);
1530 compat_release_entry(struct compat_ip6t_entry *e, unsigned int *i)
1532 struct ip6t_entry_target *t;
1534 if (i && (*i)-- == 0)
1537 /* Cleanup all matches */
1538 COMPAT_IP6T_MATCH_ITERATE(e, compat_release_match, NULL);
1539 t = compat_ip6t_get_target(e);
1540 module_put(t->u.kernel.target->me);
1545 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1546 struct xt_table_info *newinfo,
1548 unsigned char *base,
1549 unsigned char *limit,
1550 unsigned int *hook_entries,
1551 unsigned int *underflows,
1555 struct ip6t_entry_target *t;
1556 struct xt_target *target;
1557 unsigned int entry_offset;
1560 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1561 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0
1562 || (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1563 duprintf("Bad offset %p, limit = %p\n", e, limit);
1567 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1568 sizeof(struct compat_xt_entry_target)) {
1569 duprintf("checking: element %p size %u\n",
1574 /* For purposes of check_entry casting the compat entry is fine */
1575 ret = check_entry((struct ip6t_entry *)e, name);
1579 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1580 entry_offset = (void *)e - (void *)base;
1582 ret = COMPAT_IP6T_MATCH_ITERATE(e, compat_find_calc_match, name,
1583 &e->ipv6, e->comefrom, &off, &j);
1585 goto release_matches;
1587 t = compat_ip6t_get_target(e);
1588 target = try_then_request_module(xt_find_target(AF_INET6,
1590 t->u.user.revision),
1591 "ip6t_%s", t->u.user.name);
1592 if (IS_ERR(target) || !target) {
1593 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1595 ret = target ? PTR_ERR(target) : -ENOENT;
1596 goto release_matches;
1598 t->u.kernel.target = target;
1600 off += xt_compat_target_offset(target);
1602 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1606 /* Check hooks & underflows */
1607 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1608 if ((unsigned char *)e - base == hook_entries[h])
1609 newinfo->hook_entry[h] = hook_entries[h];
1610 if ((unsigned char *)e - base == underflows[h])
1611 newinfo->underflow[h] = underflows[h];
1614 /* Clear counters and comefrom */
1615 memset(&e->counters, 0, sizeof(e->counters));
1622 module_put(t->u.kernel.target->me);
1624 IP6T_MATCH_ITERATE(e, compat_release_match, &j);
1629 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1630 unsigned int *size, const char *name,
1631 struct xt_table_info *newinfo, unsigned char *base)
1633 struct ip6t_entry_target *t;
1634 struct xt_target *target;
1635 struct ip6t_entry *de;
1636 unsigned int origsize;
1641 de = (struct ip6t_entry *)*dstptr;
1642 memcpy(de, e, sizeof(struct ip6t_entry));
1643 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1645 *dstptr += sizeof(struct ip6t_entry);
1646 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1648 ret = COMPAT_IP6T_MATCH_ITERATE(e, xt_compat_match_from_user,
1652 de->target_offset = e->target_offset - (origsize - *size);
1653 t = compat_ip6t_get_target(e);
1654 target = t->u.kernel.target;
1655 xt_compat_target_from_user(t, dstptr, size);
1657 de->next_offset = e->next_offset - (origsize - *size);
1658 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1659 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1660 newinfo->hook_entry[h] -= origsize - *size;
1661 if ((unsigned char *)de - base < newinfo->underflow[h])
1662 newinfo->underflow[h] -= origsize - *size;
1667 static inline int compat_check_entry(struct ip6t_entry *e, const char *name,
1673 ret = IP6T_MATCH_ITERATE(e, check_match, name, &e->ipv6,
1676 goto cleanup_matches;
1678 ret = check_target(e, name);
1680 goto cleanup_matches;
1686 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
1691 translate_compat_table(const char *name,
1692 unsigned int valid_hooks,
1693 struct xt_table_info **pinfo,
1695 unsigned int total_size,
1696 unsigned int number,
1697 unsigned int *hook_entries,
1698 unsigned int *underflows)
1701 struct xt_table_info *newinfo, *info;
1702 void *pos, *entry0, *entry1;
1709 info->number = number;
1711 /* Init all hooks to impossible value. */
1712 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1713 info->hook_entry[i] = 0xFFFFFFFF;
1714 info->underflow[i] = 0xFFFFFFFF;
1717 duprintf("translate_compat_table: size %u\n", info->size);
1719 xt_compat_lock(AF_INET6);
1720 /* Walk through entries, checking offsets. */
1721 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1722 check_compat_entry_size_and_hooks,
1723 info, &size, entry0,
1724 entry0 + total_size,
1725 hook_entries, underflows, &j, name);
1731 duprintf("translate_compat_table: %u not %u entries\n",
1736 /* Check hooks all assigned */
1737 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1738 /* Only hooks which are valid */
1739 if (!(valid_hooks & (1 << i)))
1741 if (info->hook_entry[i] == 0xFFFFFFFF) {
1742 duprintf("Invalid hook entry %u %u\n",
1743 i, hook_entries[i]);
1746 if (info->underflow[i] == 0xFFFFFFFF) {
1747 duprintf("Invalid underflow %u %u\n",
1754 newinfo = xt_alloc_table_info(size);
1758 newinfo->number = number;
1759 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1760 newinfo->hook_entry[i] = info->hook_entry[i];
1761 newinfo->underflow[i] = info->underflow[i];
1763 entry1 = newinfo->entries[raw_smp_processor_id()];
1766 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1767 compat_copy_entry_from_user,
1768 &pos, &size, name, newinfo, entry1);
1769 xt_compat_flush_offsets(AF_INET6);
1770 xt_compat_unlock(AF_INET6);
1775 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1779 ret = IP6T_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1783 COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1784 compat_release_entry, &j);
1785 IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1786 xt_free_table_info(newinfo);
1790 /* And one copy for every other CPU */
1791 for_each_possible_cpu(i)
1792 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1793 memcpy(newinfo->entries[i], entry1, newinfo->size);
1797 xt_free_table_info(info);
1801 xt_free_table_info(newinfo);
1803 COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1806 xt_compat_flush_offsets(AF_INET6);
1807 xt_compat_unlock(AF_INET6);
1812 compat_do_replace(void __user *user, unsigned int len)
1815 struct compat_ip6t_replace tmp;
1816 struct xt_table_info *newinfo;
1817 void *loc_cpu_entry;
1819 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1822 /* overflow check */
1823 if (tmp.size >= INT_MAX / num_possible_cpus())
1825 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1828 newinfo = xt_alloc_table_info(tmp.size);
1832 /* choose the copy that is our node/cpu */
1833 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1834 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1840 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1841 &newinfo, &loc_cpu_entry, tmp.size,
1842 tmp.num_entries, tmp.hook_entry,
1847 duprintf("compat_do_replace: Translated table\n");
1849 ret = __do_replace(tmp.name, tmp.valid_hooks, newinfo,
1850 tmp.num_counters, compat_ptr(tmp.counters));
1852 goto free_newinfo_untrans;
1855 free_newinfo_untrans:
1856 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1858 xt_free_table_info(newinfo);
1863 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1868 if (!capable(CAP_NET_ADMIN))
1872 case IP6T_SO_SET_REPLACE:
1873 ret = compat_do_replace(user, len);
1876 case IP6T_SO_SET_ADD_COUNTERS:
1877 ret = do_add_counters(user, len, 1);
1881 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1888 struct compat_ip6t_get_entries {
1889 char name[IP6T_TABLE_MAXNAMELEN];
1891 struct compat_ip6t_entry entrytable[0];
1895 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1896 void __user *userptr)
1898 struct xt_counters *counters;
1899 struct xt_table_info *private = table->private;
1903 void *loc_cpu_entry;
1906 counters = alloc_counters(table);
1907 if (IS_ERR(counters))
1908 return PTR_ERR(counters);
1910 /* choose the copy that is on our node/cpu, ...
1911 * This choice is lazy (because current thread is
1912 * allowed to migrate to another cpu)
1914 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1917 ret = IP6T_ENTRY_ITERATE(loc_cpu_entry, total_size,
1918 compat_copy_entry_to_user,
1919 &pos, &size, counters, &i);
1926 compat_get_entries(struct compat_ip6t_get_entries __user *uptr, int *len)
1929 struct compat_ip6t_get_entries get;
1932 if (*len < sizeof(get)) {
1933 duprintf("compat_get_entries: %u < %u\n",
1934 *len, (unsigned int)sizeof(get));
1938 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1941 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1942 duprintf("compat_get_entries: %u != %u\n", *len,
1943 (unsigned int)(sizeof(struct compat_ip6t_get_entries) +
1948 xt_compat_lock(AF_INET6);
1949 t = xt_find_table_lock(AF_INET6, get.name);
1950 if (t && !IS_ERR(t)) {
1951 struct xt_table_info *private = t->private;
1952 struct xt_table_info info;
1953 duprintf("t->private->number = %u\n",
1955 ret = compat_table_info(private, &info);
1956 if (!ret && get.size == info.size) {
1957 ret = compat_copy_entries_to_user(private->size,
1958 t, uptr->entrytable);
1960 duprintf("compat_get_entries: I've got %u not %u!\n",
1965 xt_compat_flush_offsets(AF_INET6);
1969 ret = t ? PTR_ERR(t) : -ENOENT;
1971 xt_compat_unlock(AF_INET6);
1975 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1978 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1982 if (!capable(CAP_NET_ADMIN))
1986 case IP6T_SO_GET_INFO:
1987 ret = get_info(user, len, 1);
1989 case IP6T_SO_GET_ENTRIES:
1990 ret = compat_get_entries(user, len);
1993 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2000 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2004 if (!capable(CAP_NET_ADMIN))
2008 case IP6T_SO_SET_REPLACE:
2009 ret = do_replace(user, len);
2012 case IP6T_SO_SET_ADD_COUNTERS:
2013 ret = do_add_counters(user, len, 0);
2017 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2025 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2029 if (!capable(CAP_NET_ADMIN))
2033 case IP6T_SO_GET_INFO:
2034 ret = get_info(user, len, 0);
2037 case IP6T_SO_GET_ENTRIES:
2038 ret = get_entries(user, len);
2041 case IP6T_SO_GET_REVISION_MATCH:
2042 case IP6T_SO_GET_REVISION_TARGET: {
2043 struct ip6t_get_revision rev;
2046 if (*len != sizeof(rev)) {
2050 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2055 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2060 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2063 "ip6t_%s", rev.name);
2068 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2075 int ip6t_register_table(struct xt_table *table,
2076 const struct ip6t_replace *repl)
2079 struct xt_table_info *newinfo;
2080 struct xt_table_info bootstrap
2081 = { 0, 0, 0, { 0 }, { 0 }, { } };
2082 void *loc_cpu_entry;
2084 newinfo = xt_alloc_table_info(repl->size);
2088 /* choose the copy on our node/cpu */
2089 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2090 memcpy(loc_cpu_entry, repl->entries, repl->size);
2092 ret = translate_table(table->name, table->valid_hooks,
2093 newinfo, loc_cpu_entry, repl->size,
2098 xt_free_table_info(newinfo);
2102 ret = xt_register_table(table, &bootstrap, newinfo);
2104 xt_free_table_info(newinfo);
2111 void ip6t_unregister_table(struct xt_table *table)
2113 struct xt_table_info *private;
2114 void *loc_cpu_entry;
2116 private = xt_unregister_table(table);
2118 /* Decrease module usage counts and free resources */
2119 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2120 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2121 xt_free_table_info(private);
2124 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2126 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2127 u_int8_t type, u_int8_t code,
2130 return (type == test_type && code >= min_code && code <= max_code)
2135 icmp6_match(const struct sk_buff *skb,
2136 const struct net_device *in,
2137 const struct net_device *out,
2138 const struct xt_match *match,
2139 const void *matchinfo,
2141 unsigned int protoff,
2144 struct icmp6hdr _icmp, *ic;
2145 const struct ip6t_icmp *icmpinfo = matchinfo;
2147 /* Must not be a fragment. */
2151 ic = skb_header_pointer(skb, protoff, sizeof(_icmp), &_icmp);
2153 /* We've been asked to examine this packet, and we
2154 can't. Hence, no choice but to drop. */
2155 duprintf("Dropping evil ICMP tinygram.\n");
2160 return icmp6_type_code_match(icmpinfo->type,
2163 ic->icmp6_type, ic->icmp6_code,
2164 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2167 /* Called when user tries to insert an entry of this type. */
2169 icmp6_checkentry(const char *tablename,
2171 const struct xt_match *match,
2173 unsigned int hook_mask)
2175 const struct ip6t_icmp *icmpinfo = matchinfo;
2177 /* Must specify no unknown invflags */
2178 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
2181 /* The built-in targets: standard (NULL) and error. */
2182 static struct xt_target ip6t_standard_target __read_mostly = {
2183 .name = IP6T_STANDARD_TARGET,
2184 .targetsize = sizeof(int),
2186 #ifdef CONFIG_COMPAT
2187 .compatsize = sizeof(compat_int_t),
2188 .compat_from_user = compat_standard_from_user,
2189 .compat_to_user = compat_standard_to_user,
2193 static struct xt_target ip6t_error_target __read_mostly = {
2194 .name = IP6T_ERROR_TARGET,
2195 .target = ip6t_error,
2196 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2200 static struct nf_sockopt_ops ip6t_sockopts = {
2202 .set_optmin = IP6T_BASE_CTL,
2203 .set_optmax = IP6T_SO_SET_MAX+1,
2204 .set = do_ip6t_set_ctl,
2205 #ifdef CONFIG_COMPAT
2206 .compat_set = compat_do_ip6t_set_ctl,
2208 .get_optmin = IP6T_BASE_CTL,
2209 .get_optmax = IP6T_SO_GET_MAX+1,
2210 .get = do_ip6t_get_ctl,
2211 #ifdef CONFIG_COMPAT
2212 .compat_get = compat_do_ip6t_get_ctl,
2214 .owner = THIS_MODULE,
2217 static struct xt_match icmp6_matchstruct __read_mostly = {
2219 .match = &icmp6_match,
2220 .matchsize = sizeof(struct ip6t_icmp),
2221 .checkentry = icmp6_checkentry,
2222 .proto = IPPROTO_ICMPV6,
2226 static int __init ip6_tables_init(void)
2230 ret = xt_proto_init(AF_INET6);
2234 /* Noone else will be downing sem now, so we won't sleep */
2235 ret = xt_register_target(&ip6t_standard_target);
2238 ret = xt_register_target(&ip6t_error_target);
2241 ret = xt_register_match(&icmp6_matchstruct);
2245 /* Register setsockopt */
2246 ret = nf_register_sockopt(&ip6t_sockopts);
2250 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2254 xt_unregister_match(&icmp6_matchstruct);
2256 xt_unregister_target(&ip6t_error_target);
2258 xt_unregister_target(&ip6t_standard_target);
2260 xt_proto_fini(AF_INET6);
2265 static void __exit ip6_tables_fini(void)
2267 nf_unregister_sockopt(&ip6t_sockopts);
2268 xt_unregister_match(&icmp6_matchstruct);
2269 xt_unregister_target(&ip6t_error_target);
2270 xt_unregister_target(&ip6t_standard_target);
2271 xt_proto_fini(AF_INET6);
2275 * find the offset to specified header or the protocol number of last header
2276 * if target < 0. "last header" is transport protocol header, ESP, or
2279 * If target header is found, its offset is set in *offset and return protocol
2280 * number. Otherwise, return -1.
2282 * If the first fragment doesn't contain the final protocol header or
2283 * NEXTHDR_NONE it is considered invalid.
2285 * Note that non-1st fragment is special case that "the protocol number
2286 * of last header" is "next header" field in Fragment header. In this case,
2287 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2291 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2292 int target, unsigned short *fragoff)
2294 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2295 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2296 unsigned int len = skb->len - start;
2301 while (nexthdr != target) {
2302 struct ipv6_opt_hdr _hdr, *hp;
2303 unsigned int hdrlen;
2305 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2311 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2314 if (nexthdr == NEXTHDR_FRAGMENT) {
2315 unsigned short _frag_off;
2317 fp = skb_header_pointer(skb,
2318 start+offsetof(struct frag_hdr,
2325 _frag_off = ntohs(*fp) & ~0x7;
2328 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2329 hp->nexthdr == NEXTHDR_NONE)) {
2331 *fragoff = _frag_off;
2337 } else if (nexthdr == NEXTHDR_AUTH)
2338 hdrlen = (hp->hdrlen + 2) << 2;
2340 hdrlen = ipv6_optlen(hp);
2342 nexthdr = hp->nexthdr;
2351 EXPORT_SYMBOL(ip6t_register_table);
2352 EXPORT_SYMBOL(ip6t_unregister_table);
2353 EXPORT_SYMBOL(ip6t_do_table);
2354 EXPORT_SYMBOL(ip6t_ext_hdr);
2355 EXPORT_SYMBOL(ipv6_find_hdr);
2357 module_init(ip6_tables_init);
2358 module_exit(ip6_tables_fini);