2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * 19 Jan 2002 Harald Welte <laforge@gnumonks.org>
12 * - increase module usage count as soon as we have rules inside
14 * 06 Jun 2002 Andras Kis-Szabo <kisza@sch.bme.hu>
15 * - new extension header parser code
16 * 15 Oct 2005 Harald Welte <laforge@netfilter.org>
17 * - Unification of {ip,ip6}_tables into x_tables
18 * - Removed tcp and udp code, since it's not ipv6 specific
21 #include <linux/capability.h>
22 #include <linux/config.h>
24 #include <linux/skbuff.h>
25 #include <linux/kmod.h>
26 #include <linux/vmalloc.h>
27 #include <linux/netdevice.h>
28 #include <linux/module.h>
29 #include <linux/icmpv6.h>
31 #include <asm/uaccess.h>
32 #include <asm/semaphore.h>
33 #include <linux/proc_fs.h>
34 #include <linux/cpumask.h>
36 #include <linux/netfilter_ipv6/ip6_tables.h>
37 #include <linux/netfilter/x_tables.h>
39 MODULE_LICENSE("GPL");
40 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
41 MODULE_DESCRIPTION("IPv6 packet filter");
43 #define IPV6_HDR_LEN (sizeof(struct ipv6hdr))
44 #define IPV6_OPTHDR_LEN (sizeof(struct ipv6_opt_hdr))
46 /*#define DEBUG_IP_FIREWALL*/
47 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
48 /*#define DEBUG_IP_FIREWALL_USER*/
50 #ifdef DEBUG_IP_FIREWALL
51 #define dprintf(format, args...) printk(format , ## args)
53 #define dprintf(format, args...)
56 #ifdef DEBUG_IP_FIREWALL_USER
57 #define duprintf(format, args...) printk(format , ## args)
59 #define duprintf(format, args...)
62 #ifdef CONFIG_NETFILTER_DEBUG
63 #define IP_NF_ASSERT(x) \
66 printk("IP_NF_ASSERT: %s:%s:%u\n", \
67 __FUNCTION__, __FILE__, __LINE__); \
70 #define IP_NF_ASSERT(x)
74 #include <linux/netfilter_ipv4/listhelp.h>
77 /* All the better to debug you with... */
83 We keep a set of rules for each CPU, so we can avoid write-locking
84 them in the softirq when updating the counters and therefore
85 only need to read-lock in the softirq; doing a write_lock_bh() in user
86 context stops packets coming through and allows user context to read
87 the counters or update the rules.
89 Hence the start of any table is given by get_table() below. */
92 #define down(x) do { printk("DOWN:%u:" #x "\n", __LINE__); down(x); } while(0)
93 #define down_interruptible(x) ({ int __r; printk("DOWNi:%u:" #x "\n", __LINE__); __r = down_interruptible(x); if (__r != 0) printk("ABORT-DOWNi:%u\n", __LINE__); __r; })
94 #define up(x) do { printk("UP:%u:" #x "\n", __LINE__); up(x); } while(0)
98 ip6_masked_addrcmp(const struct in6_addr *addr1, const struct in6_addr *mask,
99 const struct in6_addr *addr2)
102 for( i = 0; i < 16; i++){
103 if((addr1->s6_addr[i] & mask->s6_addr[i]) !=
104 (addr2->s6_addr[i] & mask->s6_addr[i]))
110 /* Check for an extension */
112 ip6t_ext_hdr(u8 nexthdr)
114 return ( (nexthdr == IPPROTO_HOPOPTS) ||
115 (nexthdr == IPPROTO_ROUTING) ||
116 (nexthdr == IPPROTO_FRAGMENT) ||
117 (nexthdr == IPPROTO_ESP) ||
118 (nexthdr == IPPROTO_AH) ||
119 (nexthdr == IPPROTO_NONE) ||
120 (nexthdr == IPPROTO_DSTOPTS) );
123 /* Returns whether matches rule or not. */
125 ip6_packet_match(const struct sk_buff *skb,
128 const struct ip6t_ip6 *ip6info,
129 unsigned int *protoff,
134 const struct ipv6hdr *ipv6 = skb->nh.ipv6h;
136 #define FWINV(bool,invflg) ((bool) ^ !!(ip6info->invflags & invflg))
138 if (FWINV(ip6_masked_addrcmp(&ipv6->saddr, &ip6info->smsk,
139 &ip6info->src), IP6T_INV_SRCIP)
140 || FWINV(ip6_masked_addrcmp(&ipv6->daddr, &ip6info->dmsk,
141 &ip6info->dst), IP6T_INV_DSTIP)) {
142 dprintf("Source or dest mismatch.\n");
144 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
145 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
146 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
147 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
148 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
149 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
153 /* Look for ifname matches; this should unroll nicely. */
154 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
155 ret |= (((const unsigned long *)indev)[i]
156 ^ ((const unsigned long *)ip6info->iniface)[i])
157 & ((const unsigned long *)ip6info->iniface_mask)[i];
160 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
161 dprintf("VIA in mismatch (%s vs %s).%s\n",
162 indev, ip6info->iniface,
163 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
167 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
168 ret |= (((const unsigned long *)outdev)[i]
169 ^ ((const unsigned long *)ip6info->outiface)[i])
170 & ((const unsigned long *)ip6info->outiface_mask)[i];
173 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
174 dprintf("VIA out mismatch (%s vs %s).%s\n",
175 outdev, ip6info->outiface,
176 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
180 /* ... might want to do something with class and flowlabel here ... */
182 /* look for the desired protocol header */
183 if((ip6info->flags & IP6T_F_PROTO)) {
185 unsigned short _frag_off;
187 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
191 *fragoff = _frag_off;
193 dprintf("Packet protocol %hi ?= %s%hi.\n",
195 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
198 if (ip6info->proto == protohdr) {
199 if(ip6info->invflags & IP6T_INV_PROTO) {
205 /* We need match for the '-p all', too! */
206 if ((ip6info->proto != 0) &&
207 !(ip6info->invflags & IP6T_INV_PROTO))
213 /* should be ip6 safe */
215 ip6_checkentry(const struct ip6t_ip6 *ipv6)
217 if (ipv6->flags & ~IP6T_F_MASK) {
218 duprintf("Unknown flag bits set: %08X\n",
219 ipv6->flags & ~IP6T_F_MASK);
222 if (ipv6->invflags & ~IP6T_INV_MASK) {
223 duprintf("Unknown invflag bits set: %08X\n",
224 ipv6->invflags & ~IP6T_INV_MASK);
231 ip6t_error(struct sk_buff **pskb,
232 const struct net_device *in,
233 const struct net_device *out,
234 unsigned int hooknum,
235 const void *targinfo,
239 printk("ip6_tables: error: `%s'\n", (char *)targinfo);
245 int do_match(struct ip6t_entry_match *m,
246 const struct sk_buff *skb,
247 const struct net_device *in,
248 const struct net_device *out,
250 unsigned int protoff,
253 /* Stop iteration if it doesn't match */
254 if (!m->u.kernel.match->match(skb, in, out, m->data,
255 offset, protoff, hotdrop))
261 static inline struct ip6t_entry *
262 get_entry(void *base, unsigned int offset)
264 return (struct ip6t_entry *)(base + offset);
267 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
269 ip6t_do_table(struct sk_buff **pskb,
271 const struct net_device *in,
272 const struct net_device *out,
273 struct xt_table *table,
276 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
278 unsigned int protoff = 0;
280 /* Initializing verdict to NF_DROP keeps gcc happy. */
281 unsigned int verdict = NF_DROP;
282 const char *indev, *outdev;
284 struct ip6t_entry *e, *back;
285 struct xt_table_info *private;
288 indev = in ? in->name : nulldevname;
289 outdev = out ? out->name : nulldevname;
290 /* We handle fragments by dealing with the first fragment as
291 * if it was a normal packet. All other fragments are treated
292 * normally, except that they will NEVER match rules that ask
293 * things we don't know, ie. tcp syn flag or ports). If the
294 * rule is also a fragment-specific rule, non-fragments won't
297 read_lock_bh(&table->lock);
298 private = table->private;
299 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
300 table_base = (void *)private->entries[smp_processor_id()];
301 e = get_entry(table_base, private->hook_entry[hook]);
303 #ifdef CONFIG_NETFILTER_DEBUG
304 /* Check noone else using our table */
305 if (((struct ip6t_entry *)table_base)->comefrom != 0xdead57ac
306 && ((struct ip6t_entry *)table_base)->comefrom != 0xeeeeeeec) {
307 printk("ASSERT: CPU #%u, %s comefrom(%p) = %X\n",
310 &((struct ip6t_entry *)table_base)->comefrom,
311 ((struct ip6t_entry *)table_base)->comefrom);
313 ((struct ip6t_entry *)table_base)->comefrom = 0x57acc001;
316 /* For return from builtin chain */
317 back = get_entry(table_base, private->underflow[hook]);
322 if (ip6_packet_match(*pskb, indev, outdev, &e->ipv6,
323 &protoff, &offset)) {
324 struct ip6t_entry_target *t;
326 if (IP6T_MATCH_ITERATE(e, do_match,
328 offset, protoff, &hotdrop) != 0)
331 ADD_COUNTER(e->counters,
332 ntohs((*pskb)->nh.ipv6h->payload_len)
336 t = ip6t_get_target(e);
337 IP_NF_ASSERT(t->u.kernel.target);
338 /* Standard target? */
339 if (!t->u.kernel.target->target) {
342 v = ((struct ip6t_standard_target *)t)->verdict;
344 /* Pop from stack? */
345 if (v != IP6T_RETURN) {
346 verdict = (unsigned)(-v) - 1;
350 back = get_entry(table_base,
354 if (table_base + v != (void *)e + e->next_offset
355 && !(e->ipv6.flags & IP6T_F_GOTO)) {
356 /* Save old back ptr in next entry */
357 struct ip6t_entry *next
358 = (void *)e + e->next_offset;
360 = (void *)back - table_base;
361 /* set back pointer to next entry */
365 e = get_entry(table_base, v);
367 /* Targets which reenter must return
369 #ifdef CONFIG_NETFILTER_DEBUG
370 ((struct ip6t_entry *)table_base)->comefrom
373 verdict = t->u.kernel.target->target(pskb,
379 #ifdef CONFIG_NETFILTER_DEBUG
380 if (((struct ip6t_entry *)table_base)->comefrom
382 && verdict == IP6T_CONTINUE) {
383 printk("Target %s reentered!\n",
384 t->u.kernel.target->name);
387 ((struct ip6t_entry *)table_base)->comefrom
390 if (verdict == IP6T_CONTINUE)
391 e = (void *)e + e->next_offset;
399 e = (void *)e + e->next_offset;
403 #ifdef CONFIG_NETFILTER_DEBUG
404 ((struct ip6t_entry *)table_base)->comefrom = 0xdead57ac;
406 read_unlock_bh(&table->lock);
408 #ifdef DEBUG_ALLOW_ALL
417 /* All zeroes == unconditional rule. */
419 unconditional(const struct ip6t_ip6 *ipv6)
423 for (i = 0; i < sizeof(*ipv6); i++)
424 if (((char *)ipv6)[i])
427 return (i == sizeof(*ipv6));
430 /* Figures out from what hook each rule can be called: returns 0 if
431 there are loops. Puts hook bitmask in comefrom. */
433 mark_source_chains(struct xt_table_info *newinfo,
434 unsigned int valid_hooks, void *entry0)
438 /* No recursion; use packet counter to save back ptrs (reset
439 to 0 as we leave), and comefrom to save source hook bitmask */
440 for (hook = 0; hook < NF_IP6_NUMHOOKS; hook++) {
441 unsigned int pos = newinfo->hook_entry[hook];
443 = (struct ip6t_entry *)(entry0 + pos);
445 if (!(valid_hooks & (1 << hook)))
448 /* Set initial back pointer. */
449 e->counters.pcnt = pos;
452 struct ip6t_standard_target *t
453 = (void *)ip6t_get_target(e);
455 if (e->comefrom & (1 << NF_IP6_NUMHOOKS)) {
456 printk("iptables: loop hook %u pos %u %08X.\n",
457 hook, pos, e->comefrom);
461 |= ((1 << hook) | (1 << NF_IP6_NUMHOOKS));
463 /* Unconditional return/END. */
464 if (e->target_offset == sizeof(struct ip6t_entry)
465 && (strcmp(t->target.u.user.name,
466 IP6T_STANDARD_TARGET) == 0)
468 && unconditional(&e->ipv6)) {
469 unsigned int oldpos, size;
471 /* Return: backtrack through the last
474 e->comefrom ^= (1<<NF_IP6_NUMHOOKS);
475 #ifdef DEBUG_IP_FIREWALL_USER
477 & (1 << NF_IP6_NUMHOOKS)) {
478 duprintf("Back unset "
485 pos = e->counters.pcnt;
486 e->counters.pcnt = 0;
488 /* We're at the start. */
492 e = (struct ip6t_entry *)
494 } while (oldpos == pos + e->next_offset);
497 size = e->next_offset;
498 e = (struct ip6t_entry *)
499 (entry0 + pos + size);
500 e->counters.pcnt = pos;
503 int newpos = t->verdict;
505 if (strcmp(t->target.u.user.name,
506 IP6T_STANDARD_TARGET) == 0
508 /* This a jump; chase it. */
509 duprintf("Jump rule %u -> %u\n",
512 /* ... this is a fallthru */
513 newpos = pos + e->next_offset;
515 e = (struct ip6t_entry *)
517 e->counters.pcnt = pos;
522 duprintf("Finished chain %u\n", hook);
528 cleanup_match(struct ip6t_entry_match *m, unsigned int *i)
530 if (i && (*i)-- == 0)
533 if (m->u.kernel.match->destroy)
534 m->u.kernel.match->destroy(m->data,
535 m->u.match_size - sizeof(*m));
536 module_put(m->u.kernel.match->me);
541 standard_check(const struct ip6t_entry_target *t,
542 unsigned int max_offset)
544 struct ip6t_standard_target *targ = (void *)t;
546 /* Check standard info. */
548 != IP6T_ALIGN(sizeof(struct ip6t_standard_target))) {
549 duprintf("standard_check: target size %u != %u\n",
551 IP6T_ALIGN(sizeof(struct ip6t_standard_target)));
555 if (targ->verdict >= 0
556 && targ->verdict > max_offset - sizeof(struct ip6t_entry)) {
557 duprintf("ip6t_standard_check: bad verdict (%i)\n",
562 if (targ->verdict < -NF_MAX_VERDICT - 1) {
563 duprintf("ip6t_standard_check: bad negative verdict (%i)\n",
571 check_match(struct ip6t_entry_match *m,
573 const struct ip6t_ip6 *ipv6,
574 unsigned int hookmask,
577 struct ip6t_match *match;
580 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
582 "ip6t_%s", m->u.user.name);
583 if (IS_ERR(match) || !match) {
584 duprintf("check_match: `%s' not found\n", m->u.user.name);
585 return match ? PTR_ERR(match) : -ENOENT;
587 m->u.kernel.match = match;
589 ret = xt_check_match(match, AF_INET6, m->u.match_size - sizeof(*m),
590 name, hookmask, ipv6->proto,
591 ipv6->invflags & IP6T_INV_PROTO);
595 if (m->u.kernel.match->checkentry
596 && !m->u.kernel.match->checkentry(name, ipv6, m->data,
597 m->u.match_size - sizeof(*m),
599 duprintf("ip_tables: check failed for `%s'.\n",
600 m->u.kernel.match->name);
608 module_put(m->u.kernel.match->me);
612 static struct ip6t_target ip6t_standard_target;
615 check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
618 struct ip6t_entry_target *t;
619 struct ip6t_target *target;
623 if (!ip6_checkentry(&e->ipv6)) {
624 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
629 ret = IP6T_MATCH_ITERATE(e, check_match, name, &e->ipv6, e->comefrom, &j);
631 goto cleanup_matches;
633 t = ip6t_get_target(e);
634 target = try_then_request_module(xt_find_target(AF_INET6,
637 "ip6t_%s", t->u.user.name);
638 if (IS_ERR(target) || !target) {
639 duprintf("check_entry: `%s' not found\n", t->u.user.name);
640 ret = target ? PTR_ERR(target) : -ENOENT;
641 goto cleanup_matches;
643 t->u.kernel.target = target;
645 ret = xt_check_target(target, AF_INET6, t->u.target_size - sizeof(*t),
646 name, e->comefrom, e->ipv6.proto,
647 e->ipv6.invflags & IP6T_INV_PROTO);
651 if (t->u.kernel.target == &ip6t_standard_target) {
652 if (!standard_check(t, size)) {
654 goto cleanup_matches;
656 } else if (t->u.kernel.target->checkentry
657 && !t->u.kernel.target->checkentry(name, e, t->data,
661 duprintf("ip_tables: check failed for `%s'.\n",
662 t->u.kernel.target->name);
670 module_put(t->u.kernel.target->me);
672 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
677 check_entry_size_and_hooks(struct ip6t_entry *e,
678 struct xt_table_info *newinfo,
680 unsigned char *limit,
681 const unsigned int *hook_entries,
682 const unsigned int *underflows,
687 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0
688 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
689 duprintf("Bad offset %p\n", e);
694 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
695 duprintf("checking: element %p size %u\n",
700 /* Check hooks & underflows */
701 for (h = 0; h < NF_IP6_NUMHOOKS; h++) {
702 if ((unsigned char *)e - base == hook_entries[h])
703 newinfo->hook_entry[h] = hook_entries[h];
704 if ((unsigned char *)e - base == underflows[h])
705 newinfo->underflow[h] = underflows[h];
708 /* FIXME: underflows must be unconditional, standard verdicts
709 < 0 (not IP6T_RETURN). --RR */
711 /* Clear counters and comefrom */
712 e->counters = ((struct xt_counters) { 0, 0 });
720 cleanup_entry(struct ip6t_entry *e, unsigned int *i)
722 struct ip6t_entry_target *t;
724 if (i && (*i)-- == 0)
727 /* Cleanup all matches */
728 IP6T_MATCH_ITERATE(e, cleanup_match, NULL);
729 t = ip6t_get_target(e);
730 if (t->u.kernel.target->destroy)
731 t->u.kernel.target->destroy(t->data,
732 t->u.target_size - sizeof(*t));
733 module_put(t->u.kernel.target->me);
737 /* Checks and translates the user-supplied table segment (held in
740 translate_table(const char *name,
741 unsigned int valid_hooks,
742 struct xt_table_info *newinfo,
746 const unsigned int *hook_entries,
747 const unsigned int *underflows)
752 newinfo->size = size;
753 newinfo->number = number;
755 /* Init all hooks to impossible value. */
756 for (i = 0; i < NF_IP6_NUMHOOKS; i++) {
757 newinfo->hook_entry[i] = 0xFFFFFFFF;
758 newinfo->underflow[i] = 0xFFFFFFFF;
761 duprintf("translate_table: size %u\n", newinfo->size);
763 /* Walk through entries, checking offsets. */
764 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
765 check_entry_size_and_hooks,
769 hook_entries, underflows, &i);
774 duprintf("translate_table: %u not %u entries\n",
779 /* Check hooks all assigned */
780 for (i = 0; i < NF_IP6_NUMHOOKS; i++) {
781 /* Only hooks which are valid */
782 if (!(valid_hooks & (1 << i)))
784 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
785 duprintf("Invalid hook entry %u %u\n",
789 if (newinfo->underflow[i] == 0xFFFFFFFF) {
790 duprintf("Invalid underflow %u %u\n",
796 if (!mark_source_chains(newinfo, valid_hooks, entry0))
799 /* Finally, each sanity check must pass */
801 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
802 check_entry, name, size, &i);
805 IP6T_ENTRY_ITERATE(entry0, newinfo->size,
810 /* And one copy for every other CPU */
812 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
813 memcpy(newinfo->entries[i], entry0, newinfo->size);
821 add_entry_to_counter(const struct ip6t_entry *e,
822 struct xt_counters total[],
825 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
832 set_entry_to_counter(const struct ip6t_entry *e,
833 struct ip6t_counters total[],
836 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
843 get_counters(const struct xt_table_info *t,
844 struct xt_counters counters[])
850 /* Instead of clearing (by a previous call to memset())
851 * the counters and using adds, we set the counters
852 * with data used by 'current' CPU
853 * We dont care about preemption here.
855 curcpu = raw_smp_processor_id();
858 IP6T_ENTRY_ITERATE(t->entries[curcpu],
860 set_entry_to_counter,
868 IP6T_ENTRY_ITERATE(t->entries[cpu],
870 add_entry_to_counter,
877 copy_entries_to_user(unsigned int total_size,
878 struct xt_table *table,
879 void __user *userptr)
881 unsigned int off, num, countersize;
882 struct ip6t_entry *e;
883 struct xt_counters *counters;
884 struct xt_table_info *private = table->private;
888 /* We need atomic snapshot of counters: rest doesn't change
889 (other than comefrom, which userspace doesn't care
891 countersize = sizeof(struct xt_counters) * private->number;
892 counters = vmalloc(countersize);
894 if (counters == NULL)
897 /* First, sum counters... */
898 write_lock_bh(&table->lock);
899 get_counters(private, counters);
900 write_unlock_bh(&table->lock);
902 /* choose the copy that is on ourc node/cpu */
903 loc_cpu_entry = private->entries[raw_smp_processor_id()];
904 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
909 /* FIXME: use iterator macros --RR */
910 /* ... then go back and fix counters and names */
911 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
913 struct ip6t_entry_match *m;
914 struct ip6t_entry_target *t;
916 e = (struct ip6t_entry *)(loc_cpu_entry + off);
917 if (copy_to_user(userptr + off
918 + offsetof(struct ip6t_entry, counters),
920 sizeof(counters[num])) != 0) {
925 for (i = sizeof(struct ip6t_entry);
926 i < e->target_offset;
927 i += m->u.match_size) {
930 if (copy_to_user(userptr + off + i
931 + offsetof(struct ip6t_entry_match,
933 m->u.kernel.match->name,
934 strlen(m->u.kernel.match->name)+1)
941 t = ip6t_get_target(e);
942 if (copy_to_user(userptr + off + e->target_offset
943 + offsetof(struct ip6t_entry_target,
945 t->u.kernel.target->name,
946 strlen(t->u.kernel.target->name)+1) != 0) {
958 get_entries(const struct ip6t_get_entries *entries,
959 struct ip6t_get_entries __user *uptr)
964 t = xt_find_table_lock(AF_INET6, entries->name);
965 if (t && !IS_ERR(t)) {
966 struct xt_table_info *private = t->private;
967 duprintf("t->private->number = %u\n", private->number);
968 if (entries->size == private->size)
969 ret = copy_entries_to_user(private->size,
970 t, uptr->entrytable);
972 duprintf("get_entries: I've got %u not %u!\n",
973 private->size, entries->size);
979 ret = t ? PTR_ERR(t) : -ENOENT;
985 do_replace(void __user *user, unsigned int len)
988 struct ip6t_replace tmp;
990 struct xt_table_info *newinfo, *oldinfo;
991 struct xt_counters *counters;
992 void *loc_cpu_entry, *loc_cpu_old_entry;
994 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
998 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1001 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1004 newinfo = xt_alloc_table_info(tmp.size);
1008 /* choose the copy that is on our node/cpu */
1009 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1010 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1016 counters = vmalloc(tmp.num_counters * sizeof(struct xt_counters));
1022 ret = translate_table(tmp.name, tmp.valid_hooks,
1023 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1024 tmp.hook_entry, tmp.underflow);
1026 goto free_newinfo_counters;
1028 duprintf("ip_tables: Translated table\n");
1030 t = try_then_request_module(xt_find_table_lock(AF_INET6, tmp.name),
1031 "ip6table_%s", tmp.name);
1032 if (!t || IS_ERR(t)) {
1033 ret = t ? PTR_ERR(t) : -ENOENT;
1034 goto free_newinfo_counters_untrans;
1038 if (tmp.valid_hooks != t->valid_hooks) {
1039 duprintf("Valid hook crap: %08X vs %08X\n",
1040 tmp.valid_hooks, t->valid_hooks);
1045 oldinfo = xt_replace_table(t, tmp.num_counters, newinfo, &ret);
1049 /* Update module usage count based on number of rules */
1050 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1051 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1052 if ((oldinfo->number > oldinfo->initial_entries) ||
1053 (newinfo->number <= oldinfo->initial_entries))
1055 if ((oldinfo->number > oldinfo->initial_entries) &&
1056 (newinfo->number <= oldinfo->initial_entries))
1059 /* Get the old counters. */
1060 get_counters(oldinfo, counters);
1061 /* Decrease module usage counts and free resource */
1062 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1063 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
1064 xt_free_table_info(oldinfo);
1065 if (copy_to_user(tmp.counters, counters,
1066 sizeof(struct xt_counters) * tmp.num_counters) != 0)
1075 free_newinfo_counters_untrans:
1076 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1077 free_newinfo_counters:
1080 xt_free_table_info(newinfo);
1084 /* We're lazy, and add to the first CPU; overflow works its fey magic
1085 * and everything is OK. */
1087 add_counter_to_entry(struct ip6t_entry *e,
1088 const struct xt_counters addme[],
1092 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1094 (long unsigned int)e->counters.pcnt,
1095 (long unsigned int)e->counters.bcnt,
1096 (long unsigned int)addme[*i].pcnt,
1097 (long unsigned int)addme[*i].bcnt);
1100 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1107 do_add_counters(void __user *user, unsigned int len)
1110 struct xt_counters_info tmp, *paddc;
1111 struct xt_table_info *private;
1114 void *loc_cpu_entry;
1116 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1119 if (len != sizeof(tmp) + tmp.num_counters*sizeof(struct xt_counters))
1122 paddc = vmalloc(len);
1126 if (copy_from_user(paddc, user, len) != 0) {
1131 t = xt_find_table_lock(AF_INET6, tmp.name);
1132 if (!t || IS_ERR(t)) {
1133 ret = t ? PTR_ERR(t) : -ENOENT;
1137 write_lock_bh(&t->lock);
1138 private = t->private;
1139 if (private->number != paddc->num_counters) {
1141 goto unlock_up_free;
1145 /* Choose the copy that is on our node */
1146 loc_cpu_entry = private->entries[smp_processor_id()];
1147 IP6T_ENTRY_ITERATE(loc_cpu_entry,
1149 add_counter_to_entry,
1153 write_unlock_bh(&t->lock);
1163 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1167 if (!capable(CAP_NET_ADMIN))
1171 case IP6T_SO_SET_REPLACE:
1172 ret = do_replace(user, len);
1175 case IP6T_SO_SET_ADD_COUNTERS:
1176 ret = do_add_counters(user, len);
1180 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1188 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1192 if (!capable(CAP_NET_ADMIN))
1196 case IP6T_SO_GET_INFO: {
1197 char name[IP6T_TABLE_MAXNAMELEN];
1200 if (*len != sizeof(struct ip6t_getinfo)) {
1201 duprintf("length %u != %u\n", *len,
1202 sizeof(struct ip6t_getinfo));
1207 if (copy_from_user(name, user, sizeof(name)) != 0) {
1211 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1213 t = try_then_request_module(xt_find_table_lock(AF_INET6, name),
1214 "ip6table_%s", name);
1215 if (t && !IS_ERR(t)) {
1216 struct ip6t_getinfo info;
1217 struct xt_table_info *private = t->private;
1219 info.valid_hooks = t->valid_hooks;
1220 memcpy(info.hook_entry, private->hook_entry,
1221 sizeof(info.hook_entry));
1222 memcpy(info.underflow, private->underflow,
1223 sizeof(info.underflow));
1224 info.num_entries = private->number;
1225 info.size = private->size;
1226 memcpy(info.name, name, sizeof(info.name));
1228 if (copy_to_user(user, &info, *len) != 0)
1235 ret = t ? PTR_ERR(t) : -ENOENT;
1239 case IP6T_SO_GET_ENTRIES: {
1240 struct ip6t_get_entries get;
1242 if (*len < sizeof(get)) {
1243 duprintf("get_entries: %u < %u\n", *len, sizeof(get));
1245 } else if (copy_from_user(&get, user, sizeof(get)) != 0) {
1247 } else if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1248 duprintf("get_entries: %u != %u\n", *len,
1249 sizeof(struct ip6t_get_entries) + get.size);
1252 ret = get_entries(&get, user);
1256 case IP6T_SO_GET_REVISION_MATCH:
1257 case IP6T_SO_GET_REVISION_TARGET: {
1258 struct ip6t_get_revision rev;
1261 if (*len != sizeof(rev)) {
1265 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
1270 if (cmd == IP6T_SO_GET_REVISION_TARGET)
1275 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
1278 "ip6t_%s", rev.name);
1283 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
1290 int ip6t_register_table(struct xt_table *table,
1291 const struct ip6t_replace *repl)
1294 struct xt_table_info *newinfo;
1295 static struct xt_table_info bootstrap
1296 = { 0, 0, 0, { 0 }, { 0 }, { } };
1297 void *loc_cpu_entry;
1299 newinfo = xt_alloc_table_info(repl->size);
1303 /* choose the copy on our node/cpu */
1304 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1305 memcpy(loc_cpu_entry, repl->entries, repl->size);
1307 ret = translate_table(table->name, table->valid_hooks,
1308 newinfo, loc_cpu_entry, repl->size,
1313 xt_free_table_info(newinfo);
1317 if (xt_register_table(table, &bootstrap, newinfo) != 0) {
1318 xt_free_table_info(newinfo);
1325 void ip6t_unregister_table(struct xt_table *table)
1327 struct xt_table_info *private;
1328 void *loc_cpu_entry;
1330 private = xt_unregister_table(table);
1332 /* Decrease module usage counts and free resources */
1333 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1334 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
1335 xt_free_table_info(private);
1338 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
1340 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
1341 u_int8_t type, u_int8_t code,
1344 return (type == test_type && code >= min_code && code <= max_code)
1349 icmp6_match(const struct sk_buff *skb,
1350 const struct net_device *in,
1351 const struct net_device *out,
1352 const void *matchinfo,
1354 unsigned int protoff,
1357 struct icmp6hdr _icmp, *ic;
1358 const struct ip6t_icmp *icmpinfo = matchinfo;
1360 /* Must not be a fragment. */
1364 ic = skb_header_pointer(skb, protoff, sizeof(_icmp), &_icmp);
1366 /* We've been asked to examine this packet, and we
1367 can't. Hence, no choice but to drop. */
1368 duprintf("Dropping evil ICMP tinygram.\n");
1373 return icmp6_type_code_match(icmpinfo->type,
1376 ic->icmp6_type, ic->icmp6_code,
1377 !!(icmpinfo->invflags&IP6T_ICMP_INV));
1380 /* Called when user tries to insert an entry of this type. */
1382 icmp6_checkentry(const char *tablename,
1385 unsigned int matchsize,
1386 unsigned int hook_mask)
1388 const struct ip6t_ip6 *ipv6 = entry;
1389 const struct ip6t_icmp *icmpinfo = matchinfo;
1391 /* Must specify proto == ICMP, and no unknown invflags */
1392 return ipv6->proto == IPPROTO_ICMPV6
1393 && !(ipv6->invflags & IP6T_INV_PROTO)
1394 && matchsize == IP6T_ALIGN(sizeof(struct ip6t_icmp))
1395 && !(icmpinfo->invflags & ~IP6T_ICMP_INV);
1398 /* The built-in targets: standard (NULL) and error. */
1399 static struct ip6t_target ip6t_standard_target = {
1400 .name = IP6T_STANDARD_TARGET,
1403 static struct ip6t_target ip6t_error_target = {
1404 .name = IP6T_ERROR_TARGET,
1405 .target = ip6t_error,
1408 static struct nf_sockopt_ops ip6t_sockopts = {
1410 .set_optmin = IP6T_BASE_CTL,
1411 .set_optmax = IP6T_SO_SET_MAX+1,
1412 .set = do_ip6t_set_ctl,
1413 .get_optmin = IP6T_BASE_CTL,
1414 .get_optmax = IP6T_SO_GET_MAX+1,
1415 .get = do_ip6t_get_ctl,
1418 static struct ip6t_match icmp6_matchstruct = {
1420 .match = &icmp6_match,
1421 .checkentry = &icmp6_checkentry,
1424 static int __init init(void)
1428 xt_proto_init(AF_INET6);
1430 /* Noone else will be downing sem now, so we won't sleep */
1431 xt_register_target(AF_INET6, &ip6t_standard_target);
1432 xt_register_target(AF_INET6, &ip6t_error_target);
1433 xt_register_match(AF_INET6, &icmp6_matchstruct);
1435 /* Register setsockopt */
1436 ret = nf_register_sockopt(&ip6t_sockopts);
1438 duprintf("Unable to register sockopts.\n");
1439 xt_proto_fini(AF_INET6);
1443 printk("ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
1447 static void __exit fini(void)
1449 nf_unregister_sockopt(&ip6t_sockopts);
1450 xt_unregister_match(AF_INET6, &icmp6_matchstruct);
1451 xt_unregister_target(AF_INET6, &ip6t_error_target);
1452 xt_unregister_target(AF_INET6, &ip6t_standard_target);
1453 xt_proto_fini(AF_INET6);
1457 * find the offset to specified header or the protocol number of last header
1458 * if target < 0. "last header" is transport protocol header, ESP, or
1461 * If target header is found, its offset is set in *offset and return protocol
1462 * number. Otherwise, return -1.
1464 * Note that non-1st fragment is special case that "the protocol number
1465 * of last header" is "next header" field in Fragment header. In this case,
1466 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
1470 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
1471 int target, unsigned short *fragoff)
1473 unsigned int start = (u8*)(skb->nh.ipv6h + 1) - skb->data;
1474 u8 nexthdr = skb->nh.ipv6h->nexthdr;
1475 unsigned int len = skb->len - start;
1480 while (nexthdr != target) {
1481 struct ipv6_opt_hdr _hdr, *hp;
1482 unsigned int hdrlen;
1484 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
1490 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
1493 if (nexthdr == NEXTHDR_FRAGMENT) {
1494 unsigned short _frag_off, *fp;
1495 fp = skb_header_pointer(skb,
1496 start+offsetof(struct frag_hdr,
1503 _frag_off = ntohs(*fp) & ~0x7;
1506 ((!ipv6_ext_hdr(hp->nexthdr)) ||
1507 nexthdr == NEXTHDR_NONE)) {
1509 *fragoff = _frag_off;
1515 } else if (nexthdr == NEXTHDR_AUTH)
1516 hdrlen = (hp->hdrlen + 2) << 2;
1518 hdrlen = ipv6_optlen(hp);
1520 nexthdr = hp->nexthdr;
1529 EXPORT_SYMBOL(ip6t_register_table);
1530 EXPORT_SYMBOL(ip6t_unregister_table);
1531 EXPORT_SYMBOL(ip6t_do_table);
1532 EXPORT_SYMBOL(ip6t_ext_hdr);
1533 EXPORT_SYMBOL(ipv6_find_hdr);
1534 EXPORT_SYMBOL(ip6_masked_addrcmp);