2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
32 #include "../../netfilter/xt_repldata.h"
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36 MODULE_DESCRIPTION("IPv6 packet filter");
38 /*#define DEBUG_IP_FIREWALL*/
39 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
40 /*#define DEBUG_IP_FIREWALL_USER*/
42 #ifdef DEBUG_IP_FIREWALL
43 #define dprintf(format, args...) pr_info(format , ## args)
45 #define dprintf(format, args...)
48 #ifdef DEBUG_IP_FIREWALL_USER
49 #define duprintf(format, args...) pr_info(format , ## args)
51 #define duprintf(format, args...)
54 #ifdef CONFIG_NETFILTER_DEBUG
55 #define IP_NF_ASSERT(x) WARN_ON(!(x))
57 #define IP_NF_ASSERT(x)
61 /* All the better to debug you with... */
66 void *ip6t_alloc_initial_table(const struct xt_table *info)
68 return xt_alloc_initial_table(ip6t, IP6T);
70 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
73 We keep a set of rules for each CPU, so we can avoid write-locking
74 them in the softirq when updating the counters and therefore
75 only need to read-lock in the softirq; doing a write_lock_bh() in user
76 context stops packets coming through and allows user context to read
77 the counters or update the rules.
79 Hence the start of any table is given by get_table() below. */
81 /* Check for an extension */
83 ip6t_ext_hdr(u8 nexthdr)
85 return (nexthdr == IPPROTO_HOPOPTS) ||
86 (nexthdr == IPPROTO_ROUTING) ||
87 (nexthdr == IPPROTO_FRAGMENT) ||
88 (nexthdr == IPPROTO_ESP) ||
89 (nexthdr == IPPROTO_AH) ||
90 (nexthdr == IPPROTO_NONE) ||
91 (nexthdr == IPPROTO_DSTOPTS);
94 /* Returns whether matches rule or not. */
95 /* Performance critical - called for every packet */
97 ip6_packet_match(const struct sk_buff *skb,
100 const struct ip6t_ip6 *ip6info,
101 unsigned int *protoff,
102 int *fragoff, bool *hotdrop)
105 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
107 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
109 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
110 &ip6info->src), IP6T_INV_SRCIP) ||
111 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
112 &ip6info->dst), IP6T_INV_DSTIP)) {
113 dprintf("Source or dest mismatch.\n");
115 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
116 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
117 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
118 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
119 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
120 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
124 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
126 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
127 dprintf("VIA in mismatch (%s vs %s).%s\n",
128 indev, ip6info->iniface,
129 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
133 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
135 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
136 dprintf("VIA out mismatch (%s vs %s).%s\n",
137 outdev, ip6info->outiface,
138 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
142 /* ... might want to do something with class and flowlabel here ... */
144 /* look for the desired protocol header */
145 if((ip6info->flags & IP6T_F_PROTO)) {
147 unsigned short _frag_off;
149 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
155 *fragoff = _frag_off;
157 dprintf("Packet protocol %hi ?= %s%hi.\n",
159 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
162 if (ip6info->proto == protohdr) {
163 if(ip6info->invflags & IP6T_INV_PROTO) {
169 /* We need match for the '-p all', too! */
170 if ((ip6info->proto != 0) &&
171 !(ip6info->invflags & IP6T_INV_PROTO))
177 /* should be ip6 safe */
179 ip6_checkentry(const struct ip6t_ip6 *ipv6)
181 if (ipv6->flags & ~IP6T_F_MASK) {
182 duprintf("Unknown flag bits set: %08X\n",
183 ipv6->flags & ~IP6T_F_MASK);
186 if (ipv6->invflags & ~IP6T_INV_MASK) {
187 duprintf("Unknown invflag bits set: %08X\n",
188 ipv6->invflags & ~IP6T_INV_MASK);
195 ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
198 pr_info("error: `%s'\n", (const char *)par->targinfo);
203 static inline struct ip6t_entry *
204 get_entry(const void *base, unsigned int offset)
206 return (struct ip6t_entry *)(base + offset);
209 /* All zeroes == unconditional rule. */
210 /* Mildly perf critical (only if packet tracing is on) */
211 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
213 static const struct ip6t_ip6 uncond;
215 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
218 static inline const struct xt_entry_target *
219 ip6t_get_target_c(const struct ip6t_entry *e)
221 return ip6t_get_target((struct ip6t_entry *)e);
224 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
225 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
226 /* This cries for unification! */
227 static const char *const hooknames[] = {
228 [NF_INET_PRE_ROUTING] = "PREROUTING",
229 [NF_INET_LOCAL_IN] = "INPUT",
230 [NF_INET_FORWARD] = "FORWARD",
231 [NF_INET_LOCAL_OUT] = "OUTPUT",
232 [NF_INET_POST_ROUTING] = "POSTROUTING",
235 enum nf_ip_trace_comments {
236 NF_IP6_TRACE_COMMENT_RULE,
237 NF_IP6_TRACE_COMMENT_RETURN,
238 NF_IP6_TRACE_COMMENT_POLICY,
241 static const char *const comments[] = {
242 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
243 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
244 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
247 static struct nf_loginfo trace_loginfo = {
248 .type = NF_LOG_TYPE_LOG,
252 .logflags = NF_LOG_MASK,
257 /* Mildly perf critical (only if packet tracing is on) */
259 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
260 const char *hookname, const char **chainname,
261 const char **comment, unsigned int *rulenum)
263 const struct xt_standard_target *t = (void *)ip6t_get_target_c(s);
265 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
266 /* Head of user chain: ERROR target with chainname */
267 *chainname = t->target.data;
272 if (s->target_offset == sizeof(struct ip6t_entry) &&
273 strcmp(t->target.u.kernel.target->name,
274 XT_STANDARD_TARGET) == 0 &&
276 unconditional(&s->ipv6)) {
277 /* Tail of chains: STANDARD target (return/policy) */
278 *comment = *chainname == hookname
279 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
280 : comments[NF_IP6_TRACE_COMMENT_RETURN];
289 static void trace_packet(const struct sk_buff *skb,
291 const struct net_device *in,
292 const struct net_device *out,
293 const char *tablename,
294 const struct xt_table_info *private,
295 const struct ip6t_entry *e)
297 const void *table_base;
298 const struct ip6t_entry *root;
299 const char *hookname, *chainname, *comment;
300 const struct ip6t_entry *iter;
301 unsigned int rulenum = 0;
303 table_base = private->entries[smp_processor_id()];
304 root = get_entry(table_base, private->hook_entry[hook]);
306 hookname = chainname = hooknames[hook];
307 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
309 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
310 if (get_chainname_rulenum(iter, e, hookname,
311 &chainname, &comment, &rulenum) != 0)
314 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
315 "TRACE: %s:%s:%s:%u ",
316 tablename, chainname, comment, rulenum);
320 static inline __pure struct ip6t_entry *
321 ip6t_next_entry(const struct ip6t_entry *entry)
323 return (void *)entry + entry->next_offset;
326 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
328 ip6t_do_table(struct sk_buff *skb,
330 const struct net_device *in,
331 const struct net_device *out,
332 struct xt_table *table)
334 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
335 /* Initializing verdict to NF_DROP keeps gcc happy. */
336 unsigned int verdict = NF_DROP;
337 const char *indev, *outdev;
338 const void *table_base;
339 struct ip6t_entry *e, **jumpstack;
340 unsigned int *stackptr, origptr, cpu;
341 const struct xt_table_info *private;
342 struct xt_action_param acpar;
346 indev = in ? in->name : nulldevname;
347 outdev = out ? out->name : nulldevname;
348 /* We handle fragments by dealing with the first fragment as
349 * if it was a normal packet. All other fragments are treated
350 * normally, except that they will NEVER match rules that ask
351 * things we don't know, ie. tcp syn flag or ports). If the
352 * rule is also a fragment-specific rule, non-fragments won't
354 acpar.hotdrop = false;
357 acpar.family = NFPROTO_IPV6;
358 acpar.hooknum = hook;
360 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
363 addend = xt_write_recseq_begin();
364 private = table->private;
365 cpu = smp_processor_id();
366 table_base = private->entries[cpu];
367 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
368 stackptr = per_cpu_ptr(private->stackptr, cpu);
371 e = get_entry(table_base, private->hook_entry[hook]);
374 const struct xt_entry_target *t;
375 const struct xt_entry_match *ematch;
378 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
379 &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
381 e = ip6t_next_entry(e);
385 xt_ematch_foreach(ematch, e) {
386 acpar.match = ematch->u.kernel.match;
387 acpar.matchinfo = ematch->data;
388 if (!acpar.match->match(skb, &acpar))
392 ADD_COUNTER(e->counters, skb->len, 1);
394 t = ip6t_get_target_c(e);
395 IP_NF_ASSERT(t->u.kernel.target);
397 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
398 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
399 /* The packet is traced: log it */
400 if (unlikely(skb->nf_trace))
401 trace_packet(skb, hook, in, out,
402 table->name, private, e);
404 /* Standard target? */
405 if (!t->u.kernel.target->target) {
408 v = ((struct xt_standard_target *)t)->verdict;
410 /* Pop from stack? */
411 if (v != XT_RETURN) {
412 verdict = (unsigned)(-v) - 1;
415 if (*stackptr <= origptr)
416 e = get_entry(table_base,
417 private->underflow[hook]);
419 e = ip6t_next_entry(jumpstack[--*stackptr]);
422 if (table_base + v != ip6t_next_entry(e) &&
423 !(e->ipv6.flags & IP6T_F_GOTO)) {
424 if (*stackptr >= private->stacksize) {
428 jumpstack[(*stackptr)++] = e;
431 e = get_entry(table_base, v);
435 acpar.target = t->u.kernel.target;
436 acpar.targinfo = t->data;
438 verdict = t->u.kernel.target->target(skb, &acpar);
439 if (verdict == XT_CONTINUE)
440 e = ip6t_next_entry(e);
444 } while (!acpar.hotdrop);
448 xt_write_recseq_end(addend);
451 #ifdef DEBUG_ALLOW_ALL
460 /* Figures out from what hook each rule can be called: returns 0 if
461 there are loops. Puts hook bitmask in comefrom. */
463 mark_source_chains(const struct xt_table_info *newinfo,
464 unsigned int valid_hooks, void *entry0)
468 /* No recursion; use packet counter to save back ptrs (reset
469 to 0 as we leave), and comefrom to save source hook bitmask */
470 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
471 unsigned int pos = newinfo->hook_entry[hook];
472 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
474 if (!(valid_hooks & (1 << hook)))
477 /* Set initial back pointer. */
478 e->counters.pcnt = pos;
481 const struct xt_standard_target *t
482 = (void *)ip6t_get_target_c(e);
483 int visited = e->comefrom & (1 << hook);
485 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
486 pr_err("iptables: loop hook %u pos %u %08X.\n",
487 hook, pos, e->comefrom);
490 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
492 /* Unconditional return/END. */
493 if ((e->target_offset == sizeof(struct ip6t_entry) &&
494 (strcmp(t->target.u.user.name,
495 XT_STANDARD_TARGET) == 0) &&
497 unconditional(&e->ipv6)) || visited) {
498 unsigned int oldpos, size;
500 if ((strcmp(t->target.u.user.name,
501 XT_STANDARD_TARGET) == 0) &&
502 t->verdict < -NF_MAX_VERDICT - 1) {
503 duprintf("mark_source_chains: bad "
504 "negative verdict (%i)\n",
509 /* Return: backtrack through the last
512 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
513 #ifdef DEBUG_IP_FIREWALL_USER
515 & (1 << NF_INET_NUMHOOKS)) {
516 duprintf("Back unset "
523 pos = e->counters.pcnt;
524 e->counters.pcnt = 0;
526 /* We're at the start. */
530 e = (struct ip6t_entry *)
532 } while (oldpos == pos + e->next_offset);
535 size = e->next_offset;
536 e = (struct ip6t_entry *)
537 (entry0 + pos + size);
538 e->counters.pcnt = pos;
541 int newpos = t->verdict;
543 if (strcmp(t->target.u.user.name,
544 XT_STANDARD_TARGET) == 0 &&
546 if (newpos > newinfo->size -
547 sizeof(struct ip6t_entry)) {
548 duprintf("mark_source_chains: "
549 "bad verdict (%i)\n",
553 /* This a jump; chase it. */
554 duprintf("Jump rule %u -> %u\n",
557 /* ... this is a fallthru */
558 newpos = pos + e->next_offset;
560 e = (struct ip6t_entry *)
562 e->counters.pcnt = pos;
567 duprintf("Finished chain %u\n", hook);
572 static void cleanup_match(struct xt_entry_match *m, struct net *net)
574 struct xt_mtdtor_param par;
577 par.match = m->u.kernel.match;
578 par.matchinfo = m->data;
579 par.family = NFPROTO_IPV6;
580 if (par.match->destroy != NULL)
581 par.match->destroy(&par);
582 module_put(par.match->me);
586 check_entry(const struct ip6t_entry *e)
588 const struct xt_entry_target *t;
590 if (!ip6_checkentry(&e->ipv6))
593 if (e->target_offset + sizeof(struct xt_entry_target) >
597 t = ip6t_get_target_c(e);
598 if (e->target_offset + t->u.target_size > e->next_offset)
604 static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
606 const struct ip6t_ip6 *ipv6 = par->entryinfo;
609 par->match = m->u.kernel.match;
610 par->matchinfo = m->data;
612 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
613 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
615 duprintf("ip_tables: check failed for `%s'.\n",
623 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
625 struct xt_match *match;
628 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
631 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
632 return PTR_ERR(match);
634 m->u.kernel.match = match;
636 ret = check_match(m, par);
642 module_put(m->u.kernel.match->me);
646 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
648 struct xt_entry_target *t = ip6t_get_target(e);
649 struct xt_tgchk_param par = {
653 .target = t->u.kernel.target,
655 .hook_mask = e->comefrom,
656 .family = NFPROTO_IPV6,
660 t = ip6t_get_target(e);
661 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
662 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
664 duprintf("ip_tables: check failed for `%s'.\n",
665 t->u.kernel.target->name);
672 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
675 struct xt_entry_target *t;
676 struct xt_target *target;
679 struct xt_mtchk_param mtpar;
680 struct xt_entry_match *ematch;
685 mtpar.entryinfo = &e->ipv6;
686 mtpar.hook_mask = e->comefrom;
687 mtpar.family = NFPROTO_IPV6;
688 xt_ematch_foreach(ematch, e) {
689 ret = find_check_match(ematch, &mtpar);
691 goto cleanup_matches;
695 t = ip6t_get_target(e);
696 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
698 if (IS_ERR(target)) {
699 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
700 ret = PTR_ERR(target);
701 goto cleanup_matches;
703 t->u.kernel.target = target;
705 ret = check_target(e, net, name);
710 module_put(t->u.kernel.target->me);
712 xt_ematch_foreach(ematch, e) {
715 cleanup_match(ematch, net);
720 static bool check_underflow(const struct ip6t_entry *e)
722 const struct xt_entry_target *t;
723 unsigned int verdict;
725 if (!unconditional(&e->ipv6))
727 t = ip6t_get_target_c(e);
728 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
730 verdict = ((struct xt_standard_target *)t)->verdict;
731 verdict = -verdict - 1;
732 return verdict == NF_DROP || verdict == NF_ACCEPT;
736 check_entry_size_and_hooks(struct ip6t_entry *e,
737 struct xt_table_info *newinfo,
738 const unsigned char *base,
739 const unsigned char *limit,
740 const unsigned int *hook_entries,
741 const unsigned int *underflows,
742 unsigned int valid_hooks)
747 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
748 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
749 duprintf("Bad offset %p\n", e);
754 < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) {
755 duprintf("checking: element %p size %u\n",
760 err = check_entry(e);
764 /* Check hooks & underflows */
765 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
766 if (!(valid_hooks & (1 << h)))
768 if ((unsigned char *)e - base == hook_entries[h])
769 newinfo->hook_entry[h] = hook_entries[h];
770 if ((unsigned char *)e - base == underflows[h]) {
771 if (!check_underflow(e)) {
772 pr_err("Underflows must be unconditional and "
773 "use the STANDARD target with "
777 newinfo->underflow[h] = underflows[h];
781 /* Clear counters and comefrom */
782 e->counters = ((struct xt_counters) { 0, 0 });
787 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
789 struct xt_tgdtor_param par;
790 struct xt_entry_target *t;
791 struct xt_entry_match *ematch;
793 /* Cleanup all matches */
794 xt_ematch_foreach(ematch, e)
795 cleanup_match(ematch, net);
796 t = ip6t_get_target(e);
799 par.target = t->u.kernel.target;
800 par.targinfo = t->data;
801 par.family = NFPROTO_IPV6;
802 if (par.target->destroy != NULL)
803 par.target->destroy(&par);
804 module_put(par.target->me);
807 /* Checks and translates the user-supplied table segment (held in
810 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
811 const struct ip6t_replace *repl)
813 struct ip6t_entry *iter;
817 newinfo->size = repl->size;
818 newinfo->number = repl->num_entries;
820 /* Init all hooks to impossible value. */
821 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
822 newinfo->hook_entry[i] = 0xFFFFFFFF;
823 newinfo->underflow[i] = 0xFFFFFFFF;
826 duprintf("translate_table: size %u\n", newinfo->size);
828 /* Walk through entries, checking offsets. */
829 xt_entry_foreach(iter, entry0, newinfo->size) {
830 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
838 if (strcmp(ip6t_get_target(iter)->u.user.name,
839 XT_ERROR_TARGET) == 0)
840 ++newinfo->stacksize;
843 if (i != repl->num_entries) {
844 duprintf("translate_table: %u not %u entries\n",
845 i, repl->num_entries);
849 /* Check hooks all assigned */
850 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
851 /* Only hooks which are valid */
852 if (!(repl->valid_hooks & (1 << i)))
854 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
855 duprintf("Invalid hook entry %u %u\n",
856 i, repl->hook_entry[i]);
859 if (newinfo->underflow[i] == 0xFFFFFFFF) {
860 duprintf("Invalid underflow %u %u\n",
861 i, repl->underflow[i]);
866 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
869 /* Finally, each sanity check must pass */
871 xt_entry_foreach(iter, entry0, newinfo->size) {
872 ret = find_check_entry(iter, net, repl->name, repl->size);
879 xt_entry_foreach(iter, entry0, newinfo->size) {
882 cleanup_entry(iter, net);
887 /* And one copy for every other CPU */
888 for_each_possible_cpu(i) {
889 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
890 memcpy(newinfo->entries[i], entry0, newinfo->size);
897 get_counters(const struct xt_table_info *t,
898 struct xt_counters counters[])
900 struct ip6t_entry *iter;
904 for_each_possible_cpu(cpu) {
905 seqcount_t *s = &per_cpu(xt_recseq, cpu);
908 xt_entry_foreach(iter, t->entries[cpu], t->size) {
913 start = read_seqcount_begin(s);
914 bcnt = iter->counters.bcnt;
915 pcnt = iter->counters.pcnt;
916 } while (read_seqcount_retry(s, start));
918 ADD_COUNTER(counters[i], bcnt, pcnt);
924 static struct xt_counters *alloc_counters(const struct xt_table *table)
926 unsigned int countersize;
927 struct xt_counters *counters;
928 const struct xt_table_info *private = table->private;
930 /* We need atomic snapshot of counters: rest doesn't change
931 (other than comefrom, which userspace doesn't care
933 countersize = sizeof(struct xt_counters) * private->number;
934 counters = vzalloc(countersize);
936 if (counters == NULL)
937 return ERR_PTR(-ENOMEM);
939 get_counters(private, counters);
945 copy_entries_to_user(unsigned int total_size,
946 const struct xt_table *table,
947 void __user *userptr)
949 unsigned int off, num;
950 const struct ip6t_entry *e;
951 struct xt_counters *counters;
952 const struct xt_table_info *private = table->private;
954 const void *loc_cpu_entry;
956 counters = alloc_counters(table);
957 if (IS_ERR(counters))
958 return PTR_ERR(counters);
960 /* choose the copy that is on our node/cpu, ...
961 * This choice is lazy (because current thread is
962 * allowed to migrate to another cpu)
964 loc_cpu_entry = private->entries[raw_smp_processor_id()];
965 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
970 /* FIXME: use iterator macros --RR */
971 /* ... then go back and fix counters and names */
972 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
974 const struct xt_entry_match *m;
975 const struct xt_entry_target *t;
977 e = (struct ip6t_entry *)(loc_cpu_entry + off);
978 if (copy_to_user(userptr + off
979 + offsetof(struct ip6t_entry, counters),
981 sizeof(counters[num])) != 0) {
986 for (i = sizeof(struct ip6t_entry);
987 i < e->target_offset;
988 i += m->u.match_size) {
991 if (copy_to_user(userptr + off + i
992 + offsetof(struct xt_entry_match,
994 m->u.kernel.match->name,
995 strlen(m->u.kernel.match->name)+1)
1002 t = ip6t_get_target_c(e);
1003 if (copy_to_user(userptr + off + e->target_offset
1004 + offsetof(struct xt_entry_target,
1006 t->u.kernel.target->name,
1007 strlen(t->u.kernel.target->name)+1) != 0) {
1018 #ifdef CONFIG_COMPAT
1019 static void compat_standard_from_user(void *dst, const void *src)
1021 int v = *(compat_int_t *)src;
1024 v += xt_compat_calc_jump(AF_INET6, v);
1025 memcpy(dst, &v, sizeof(v));
1028 static int compat_standard_to_user(void __user *dst, const void *src)
1030 compat_int_t cv = *(int *)src;
1033 cv -= xt_compat_calc_jump(AF_INET6, cv);
1034 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1037 static int compat_calc_entry(const struct ip6t_entry *e,
1038 const struct xt_table_info *info,
1039 const void *base, struct xt_table_info *newinfo)
1041 const struct xt_entry_match *ematch;
1042 const struct xt_entry_target *t;
1043 unsigned int entry_offset;
1046 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1047 entry_offset = (void *)e - base;
1048 xt_ematch_foreach(ematch, e)
1049 off += xt_compat_match_offset(ematch->u.kernel.match);
1050 t = ip6t_get_target_c(e);
1051 off += xt_compat_target_offset(t->u.kernel.target);
1052 newinfo->size -= off;
1053 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1057 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1058 if (info->hook_entry[i] &&
1059 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1060 newinfo->hook_entry[i] -= off;
1061 if (info->underflow[i] &&
1062 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1063 newinfo->underflow[i] -= off;
1068 static int compat_table_info(const struct xt_table_info *info,
1069 struct xt_table_info *newinfo)
1071 struct ip6t_entry *iter;
1072 void *loc_cpu_entry;
1075 if (!newinfo || !info)
1078 /* we dont care about newinfo->entries[] */
1079 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1080 newinfo->initial_entries = 0;
1081 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1082 xt_compat_init_offsets(AF_INET6, info->number);
1083 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1084 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1092 static int get_info(struct net *net, void __user *user,
1093 const int *len, int compat)
1095 char name[XT_TABLE_MAXNAMELEN];
1099 if (*len != sizeof(struct ip6t_getinfo)) {
1100 duprintf("length %u != %zu\n", *len,
1101 sizeof(struct ip6t_getinfo));
1105 if (copy_from_user(name, user, sizeof(name)) != 0)
1108 name[XT_TABLE_MAXNAMELEN-1] = '\0';
1109 #ifdef CONFIG_COMPAT
1111 xt_compat_lock(AF_INET6);
1113 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1114 "ip6table_%s", name);
1115 if (t && !IS_ERR(t)) {
1116 struct ip6t_getinfo info;
1117 const struct xt_table_info *private = t->private;
1118 #ifdef CONFIG_COMPAT
1119 struct xt_table_info tmp;
1122 ret = compat_table_info(private, &tmp);
1123 xt_compat_flush_offsets(AF_INET6);
1127 memset(&info, 0, sizeof(info));
1128 info.valid_hooks = t->valid_hooks;
1129 memcpy(info.hook_entry, private->hook_entry,
1130 sizeof(info.hook_entry));
1131 memcpy(info.underflow, private->underflow,
1132 sizeof(info.underflow));
1133 info.num_entries = private->number;
1134 info.size = private->size;
1135 strcpy(info.name, name);
1137 if (copy_to_user(user, &info, *len) != 0)
1145 ret = t ? PTR_ERR(t) : -ENOENT;
1146 #ifdef CONFIG_COMPAT
1148 xt_compat_unlock(AF_INET6);
1154 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1158 struct ip6t_get_entries get;
1161 if (*len < sizeof(get)) {
1162 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1165 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1167 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1168 duprintf("get_entries: %u != %zu\n",
1169 *len, sizeof(get) + get.size);
1173 t = xt_find_table_lock(net, AF_INET6, get.name);
1174 if (t && !IS_ERR(t)) {
1175 struct xt_table_info *private = t->private;
1176 duprintf("t->private->number = %u\n", private->number);
1177 if (get.size == private->size)
1178 ret = copy_entries_to_user(private->size,
1179 t, uptr->entrytable);
1181 duprintf("get_entries: I've got %u not %u!\n",
1182 private->size, get.size);
1188 ret = t ? PTR_ERR(t) : -ENOENT;
1194 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1195 struct xt_table_info *newinfo, unsigned int num_counters,
1196 void __user *counters_ptr)
1200 struct xt_table_info *oldinfo;
1201 struct xt_counters *counters;
1202 const void *loc_cpu_old_entry;
1203 struct ip6t_entry *iter;
1206 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1212 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1213 "ip6table_%s", name);
1214 if (!t || IS_ERR(t)) {
1215 ret = t ? PTR_ERR(t) : -ENOENT;
1216 goto free_newinfo_counters_untrans;
1220 if (valid_hooks != t->valid_hooks) {
1221 duprintf("Valid hook crap: %08X vs %08X\n",
1222 valid_hooks, t->valid_hooks);
1227 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1231 /* Update module usage count based on number of rules */
1232 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1233 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1234 if ((oldinfo->number > oldinfo->initial_entries) ||
1235 (newinfo->number <= oldinfo->initial_entries))
1237 if ((oldinfo->number > oldinfo->initial_entries) &&
1238 (newinfo->number <= oldinfo->initial_entries))
1241 /* Get the old counters, and synchronize with replace */
1242 get_counters(oldinfo, counters);
1244 /* Decrease module usage counts and free resource */
1245 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1246 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1247 cleanup_entry(iter, net);
1249 xt_free_table_info(oldinfo);
1250 if (copy_to_user(counters_ptr, counters,
1251 sizeof(struct xt_counters) * num_counters) != 0) {
1252 /* Silent error, can't fail, new table is already in place */
1253 net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
1262 free_newinfo_counters_untrans:
1269 do_replace(struct net *net, const void __user *user, unsigned int len)
1272 struct ip6t_replace tmp;
1273 struct xt_table_info *newinfo;
1274 void *loc_cpu_entry;
1275 struct ip6t_entry *iter;
1277 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1280 /* overflow check */
1281 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1283 tmp.name[sizeof(tmp.name)-1] = 0;
1285 newinfo = xt_alloc_table_info(tmp.size);
1289 /* choose the copy that is on our node/cpu */
1290 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1291 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1297 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1301 duprintf("ip_tables: Translated table\n");
1303 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1304 tmp.num_counters, tmp.counters);
1306 goto free_newinfo_untrans;
1309 free_newinfo_untrans:
1310 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1311 cleanup_entry(iter, net);
1313 xt_free_table_info(newinfo);
1318 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1321 unsigned int i, curcpu;
1322 struct xt_counters_info tmp;
1323 struct xt_counters *paddc;
1324 unsigned int num_counters;
1329 const struct xt_table_info *private;
1331 const void *loc_cpu_entry;
1332 struct ip6t_entry *iter;
1333 unsigned int addend;
1334 #ifdef CONFIG_COMPAT
1335 struct compat_xt_counters_info compat_tmp;
1339 size = sizeof(struct compat_xt_counters_info);
1344 size = sizeof(struct xt_counters_info);
1347 if (copy_from_user(ptmp, user, size) != 0)
1350 #ifdef CONFIG_COMPAT
1352 num_counters = compat_tmp.num_counters;
1353 name = compat_tmp.name;
1357 num_counters = tmp.num_counters;
1361 if (len != size + num_counters * sizeof(struct xt_counters))
1364 paddc = vmalloc(len - size);
1368 if (copy_from_user(paddc, user + size, len - size) != 0) {
1373 t = xt_find_table_lock(net, AF_INET6, name);
1374 if (!t || IS_ERR(t)) {
1375 ret = t ? PTR_ERR(t) : -ENOENT;
1381 private = t->private;
1382 if (private->number != num_counters) {
1384 goto unlock_up_free;
1388 /* Choose the copy that is on our node */
1389 curcpu = smp_processor_id();
1390 addend = xt_write_recseq_begin();
1391 loc_cpu_entry = private->entries[curcpu];
1392 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1393 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1396 xt_write_recseq_end(addend);
1408 #ifdef CONFIG_COMPAT
1409 struct compat_ip6t_replace {
1410 char name[XT_TABLE_MAXNAMELEN];
1414 u32 hook_entry[NF_INET_NUMHOOKS];
1415 u32 underflow[NF_INET_NUMHOOKS];
1417 compat_uptr_t counters; /* struct xt_counters * */
1418 struct compat_ip6t_entry entries[0];
1422 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1423 unsigned int *size, struct xt_counters *counters,
1426 struct xt_entry_target *t;
1427 struct compat_ip6t_entry __user *ce;
1428 u_int16_t target_offset, next_offset;
1429 compat_uint_t origsize;
1430 const struct xt_entry_match *ematch;
1434 ce = (struct compat_ip6t_entry __user *)*dstptr;
1435 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1436 copy_to_user(&ce->counters, &counters[i],
1437 sizeof(counters[i])) != 0)
1440 *dstptr += sizeof(struct compat_ip6t_entry);
1441 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1443 xt_ematch_foreach(ematch, e) {
1444 ret = xt_compat_match_to_user(ematch, dstptr, size);
1448 target_offset = e->target_offset - (origsize - *size);
1449 t = ip6t_get_target(e);
1450 ret = xt_compat_target_to_user(t, dstptr, size);
1453 next_offset = e->next_offset - (origsize - *size);
1454 if (put_user(target_offset, &ce->target_offset) != 0 ||
1455 put_user(next_offset, &ce->next_offset) != 0)
1461 compat_find_calc_match(struct xt_entry_match *m,
1463 const struct ip6t_ip6 *ipv6,
1464 unsigned int hookmask,
1467 struct xt_match *match;
1469 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1470 m->u.user.revision);
1471 if (IS_ERR(match)) {
1472 duprintf("compat_check_calc_match: `%s' not found\n",
1474 return PTR_ERR(match);
1476 m->u.kernel.match = match;
1477 *size += xt_compat_match_offset(match);
1481 static void compat_release_entry(struct compat_ip6t_entry *e)
1483 struct xt_entry_target *t;
1484 struct xt_entry_match *ematch;
1486 /* Cleanup all matches */
1487 xt_ematch_foreach(ematch, e)
1488 module_put(ematch->u.kernel.match->me);
1489 t = compat_ip6t_get_target(e);
1490 module_put(t->u.kernel.target->me);
1494 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1495 struct xt_table_info *newinfo,
1497 const unsigned char *base,
1498 const unsigned char *limit,
1499 const unsigned int *hook_entries,
1500 const unsigned int *underflows,
1503 struct xt_entry_match *ematch;
1504 struct xt_entry_target *t;
1505 struct xt_target *target;
1506 unsigned int entry_offset;
1510 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1511 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1512 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1513 duprintf("Bad offset %p, limit = %p\n", e, limit);
1517 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1518 sizeof(struct compat_xt_entry_target)) {
1519 duprintf("checking: element %p size %u\n",
1524 /* For purposes of check_entry casting the compat entry is fine */
1525 ret = check_entry((struct ip6t_entry *)e);
1529 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1530 entry_offset = (void *)e - (void *)base;
1532 xt_ematch_foreach(ematch, e) {
1533 ret = compat_find_calc_match(ematch, name,
1534 &e->ipv6, e->comefrom, &off);
1536 goto release_matches;
1540 t = compat_ip6t_get_target(e);
1541 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1542 t->u.user.revision);
1543 if (IS_ERR(target)) {
1544 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1546 ret = PTR_ERR(target);
1547 goto release_matches;
1549 t->u.kernel.target = target;
1551 off += xt_compat_target_offset(target);
1553 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1557 /* Check hooks & underflows */
1558 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1559 if ((unsigned char *)e - base == hook_entries[h])
1560 newinfo->hook_entry[h] = hook_entries[h];
1561 if ((unsigned char *)e - base == underflows[h])
1562 newinfo->underflow[h] = underflows[h];
1565 /* Clear counters and comefrom */
1566 memset(&e->counters, 0, sizeof(e->counters));
1571 module_put(t->u.kernel.target->me);
1573 xt_ematch_foreach(ematch, e) {
1576 module_put(ematch->u.kernel.match->me);
1582 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1583 unsigned int *size, const char *name,
1584 struct xt_table_info *newinfo, unsigned char *base)
1586 struct xt_entry_target *t;
1587 struct ip6t_entry *de;
1588 unsigned int origsize;
1590 struct xt_entry_match *ematch;
1594 de = (struct ip6t_entry *)*dstptr;
1595 memcpy(de, e, sizeof(struct ip6t_entry));
1596 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1598 *dstptr += sizeof(struct ip6t_entry);
1599 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1601 xt_ematch_foreach(ematch, e) {
1602 ret = xt_compat_match_from_user(ematch, dstptr, size);
1606 de->target_offset = e->target_offset - (origsize - *size);
1607 t = compat_ip6t_get_target(e);
1608 xt_compat_target_from_user(t, dstptr, size);
1610 de->next_offset = e->next_offset - (origsize - *size);
1611 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1612 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1613 newinfo->hook_entry[h] -= origsize - *size;
1614 if ((unsigned char *)de - base < newinfo->underflow[h])
1615 newinfo->underflow[h] -= origsize - *size;
1620 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1625 struct xt_mtchk_param mtpar;
1626 struct xt_entry_match *ematch;
1631 mtpar.entryinfo = &e->ipv6;
1632 mtpar.hook_mask = e->comefrom;
1633 mtpar.family = NFPROTO_IPV6;
1634 xt_ematch_foreach(ematch, e) {
1635 ret = check_match(ematch, &mtpar);
1637 goto cleanup_matches;
1641 ret = check_target(e, net, name);
1643 goto cleanup_matches;
1647 xt_ematch_foreach(ematch, e) {
1650 cleanup_match(ematch, net);
1656 translate_compat_table(struct net *net,
1658 unsigned int valid_hooks,
1659 struct xt_table_info **pinfo,
1661 unsigned int total_size,
1662 unsigned int number,
1663 unsigned int *hook_entries,
1664 unsigned int *underflows)
1667 struct xt_table_info *newinfo, *info;
1668 void *pos, *entry0, *entry1;
1669 struct compat_ip6t_entry *iter0;
1670 struct ip6t_entry *iter1;
1677 info->number = number;
1679 /* Init all hooks to impossible value. */
1680 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1681 info->hook_entry[i] = 0xFFFFFFFF;
1682 info->underflow[i] = 0xFFFFFFFF;
1685 duprintf("translate_compat_table: size %u\n", info->size);
1687 xt_compat_lock(AF_INET6);
1688 xt_compat_init_offsets(AF_INET6, number);
1689 /* Walk through entries, checking offsets. */
1690 xt_entry_foreach(iter0, entry0, total_size) {
1691 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1693 entry0 + total_size,
1704 duprintf("translate_compat_table: %u not %u entries\n",
1709 /* Check hooks all assigned */
1710 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1711 /* Only hooks which are valid */
1712 if (!(valid_hooks & (1 << i)))
1714 if (info->hook_entry[i] == 0xFFFFFFFF) {
1715 duprintf("Invalid hook entry %u %u\n",
1716 i, hook_entries[i]);
1719 if (info->underflow[i] == 0xFFFFFFFF) {
1720 duprintf("Invalid underflow %u %u\n",
1727 newinfo = xt_alloc_table_info(size);
1731 newinfo->number = number;
1732 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1733 newinfo->hook_entry[i] = info->hook_entry[i];
1734 newinfo->underflow[i] = info->underflow[i];
1736 entry1 = newinfo->entries[raw_smp_processor_id()];
1739 xt_entry_foreach(iter0, entry0, total_size) {
1740 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1741 name, newinfo, entry1);
1745 xt_compat_flush_offsets(AF_INET6);
1746 xt_compat_unlock(AF_INET6);
1751 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1755 xt_entry_foreach(iter1, entry1, newinfo->size) {
1756 ret = compat_check_entry(iter1, net, name);
1760 if (strcmp(ip6t_get_target(iter1)->u.user.name,
1761 XT_ERROR_TARGET) == 0)
1762 ++newinfo->stacksize;
1766 * The first i matches need cleanup_entry (calls ->destroy)
1767 * because they had called ->check already. The other j-i
1768 * entries need only release.
1772 xt_entry_foreach(iter0, entry0, newinfo->size) {
1777 compat_release_entry(iter0);
1779 xt_entry_foreach(iter1, entry1, newinfo->size) {
1782 cleanup_entry(iter1, net);
1784 xt_free_table_info(newinfo);
1788 /* And one copy for every other CPU */
1789 for_each_possible_cpu(i)
1790 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1791 memcpy(newinfo->entries[i], entry1, newinfo->size);
1795 xt_free_table_info(info);
1799 xt_free_table_info(newinfo);
1801 xt_entry_foreach(iter0, entry0, total_size) {
1804 compat_release_entry(iter0);
1808 xt_compat_flush_offsets(AF_INET6);
1809 xt_compat_unlock(AF_INET6);
1814 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1817 struct compat_ip6t_replace tmp;
1818 struct xt_table_info *newinfo;
1819 void *loc_cpu_entry;
1820 struct ip6t_entry *iter;
1822 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1825 /* overflow check */
1826 if (tmp.size >= INT_MAX / num_possible_cpus())
1828 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1830 tmp.name[sizeof(tmp.name)-1] = 0;
1832 newinfo = xt_alloc_table_info(tmp.size);
1836 /* choose the copy that is on our node/cpu */
1837 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1838 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1844 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1845 &newinfo, &loc_cpu_entry, tmp.size,
1846 tmp.num_entries, tmp.hook_entry,
1851 duprintf("compat_do_replace: Translated table\n");
1853 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1854 tmp.num_counters, compat_ptr(tmp.counters));
1856 goto free_newinfo_untrans;
1859 free_newinfo_untrans:
1860 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1861 cleanup_entry(iter, net);
1863 xt_free_table_info(newinfo);
1868 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1873 if (!capable(CAP_NET_ADMIN))
1877 case IP6T_SO_SET_REPLACE:
1878 ret = compat_do_replace(sock_net(sk), user, len);
1881 case IP6T_SO_SET_ADD_COUNTERS:
1882 ret = do_add_counters(sock_net(sk), user, len, 1);
1886 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1893 struct compat_ip6t_get_entries {
1894 char name[XT_TABLE_MAXNAMELEN];
1896 struct compat_ip6t_entry entrytable[0];
1900 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1901 void __user *userptr)
1903 struct xt_counters *counters;
1904 const struct xt_table_info *private = table->private;
1908 const void *loc_cpu_entry;
1910 struct ip6t_entry *iter;
1912 counters = alloc_counters(table);
1913 if (IS_ERR(counters))
1914 return PTR_ERR(counters);
1916 /* choose the copy that is on our node/cpu, ...
1917 * This choice is lazy (because current thread is
1918 * allowed to migrate to another cpu)
1920 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1923 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1924 ret = compat_copy_entry_to_user(iter, &pos,
1925 &size, counters, i++);
1935 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1939 struct compat_ip6t_get_entries get;
1942 if (*len < sizeof(get)) {
1943 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1947 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1950 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1951 duprintf("compat_get_entries: %u != %zu\n",
1952 *len, sizeof(get) + get.size);
1956 xt_compat_lock(AF_INET6);
1957 t = xt_find_table_lock(net, AF_INET6, get.name);
1958 if (t && !IS_ERR(t)) {
1959 const struct xt_table_info *private = t->private;
1960 struct xt_table_info info;
1961 duprintf("t->private->number = %u\n", private->number);
1962 ret = compat_table_info(private, &info);
1963 if (!ret && get.size == info.size) {
1964 ret = compat_copy_entries_to_user(private->size,
1965 t, uptr->entrytable);
1967 duprintf("compat_get_entries: I've got %u not %u!\n",
1968 private->size, get.size);
1971 xt_compat_flush_offsets(AF_INET6);
1975 ret = t ? PTR_ERR(t) : -ENOENT;
1977 xt_compat_unlock(AF_INET6);
1981 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1984 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1988 if (!capable(CAP_NET_ADMIN))
1992 case IP6T_SO_GET_INFO:
1993 ret = get_info(sock_net(sk), user, len, 1);
1995 case IP6T_SO_GET_ENTRIES:
1996 ret = compat_get_entries(sock_net(sk), user, len);
1999 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2006 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2010 if (!capable(CAP_NET_ADMIN))
2014 case IP6T_SO_SET_REPLACE:
2015 ret = do_replace(sock_net(sk), user, len);
2018 case IP6T_SO_SET_ADD_COUNTERS:
2019 ret = do_add_counters(sock_net(sk), user, len, 0);
2023 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2031 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2035 if (!capable(CAP_NET_ADMIN))
2039 case IP6T_SO_GET_INFO:
2040 ret = get_info(sock_net(sk), user, len, 0);
2043 case IP6T_SO_GET_ENTRIES:
2044 ret = get_entries(sock_net(sk), user, len);
2047 case IP6T_SO_GET_REVISION_MATCH:
2048 case IP6T_SO_GET_REVISION_TARGET: {
2049 struct xt_get_revision rev;
2052 if (*len != sizeof(rev)) {
2056 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2060 rev.name[sizeof(rev.name)-1] = 0;
2062 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2067 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2070 "ip6t_%s", rev.name);
2075 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2082 struct xt_table *ip6t_register_table(struct net *net,
2083 const struct xt_table *table,
2084 const struct ip6t_replace *repl)
2087 struct xt_table_info *newinfo;
2088 struct xt_table_info bootstrap = {0};
2089 void *loc_cpu_entry;
2090 struct xt_table *new_table;
2092 newinfo = xt_alloc_table_info(repl->size);
2098 /* choose the copy on our node/cpu, but dont care about preemption */
2099 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2100 memcpy(loc_cpu_entry, repl->entries, repl->size);
2102 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2106 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2107 if (IS_ERR(new_table)) {
2108 ret = PTR_ERR(new_table);
2114 xt_free_table_info(newinfo);
2116 return ERR_PTR(ret);
2119 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2121 struct xt_table_info *private;
2122 void *loc_cpu_entry;
2123 struct module *table_owner = table->me;
2124 struct ip6t_entry *iter;
2126 private = xt_unregister_table(table);
2128 /* Decrease module usage counts and free resources */
2129 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2130 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2131 cleanup_entry(iter, net);
2132 if (private->number > private->initial_entries)
2133 module_put(table_owner);
2134 xt_free_table_info(private);
2137 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2139 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2140 u_int8_t type, u_int8_t code,
2143 return (type == test_type && code >= min_code && code <= max_code)
2148 icmp6_match(const struct sk_buff *skb, struct xt_action_param *par)
2150 const struct icmp6hdr *ic;
2151 struct icmp6hdr _icmph;
2152 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2154 /* Must not be a fragment. */
2155 if (par->fragoff != 0)
2158 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2160 /* We've been asked to examine this packet, and we
2161 * can't. Hence, no choice but to drop.
2163 duprintf("Dropping evil ICMP tinygram.\n");
2164 par->hotdrop = true;
2168 return icmp6_type_code_match(icmpinfo->type,
2171 ic->icmp6_type, ic->icmp6_code,
2172 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2175 /* Called when user tries to insert an entry of this type. */
2176 static int icmp6_checkentry(const struct xt_mtchk_param *par)
2178 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2180 /* Must specify no unknown invflags */
2181 return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
2184 /* The built-in targets: standard (NULL) and error. */
2185 static struct xt_target ip6t_builtin_tg[] __read_mostly = {
2187 .name = XT_STANDARD_TARGET,
2188 .targetsize = sizeof(int),
2189 .family = NFPROTO_IPV6,
2190 #ifdef CONFIG_COMPAT
2191 .compatsize = sizeof(compat_int_t),
2192 .compat_from_user = compat_standard_from_user,
2193 .compat_to_user = compat_standard_to_user,
2197 .name = XT_ERROR_TARGET,
2198 .target = ip6t_error,
2199 .targetsize = XT_FUNCTION_MAXNAMELEN,
2200 .family = NFPROTO_IPV6,
2204 static struct nf_sockopt_ops ip6t_sockopts = {
2206 .set_optmin = IP6T_BASE_CTL,
2207 .set_optmax = IP6T_SO_SET_MAX+1,
2208 .set = do_ip6t_set_ctl,
2209 #ifdef CONFIG_COMPAT
2210 .compat_set = compat_do_ip6t_set_ctl,
2212 .get_optmin = IP6T_BASE_CTL,
2213 .get_optmax = IP6T_SO_GET_MAX+1,
2214 .get = do_ip6t_get_ctl,
2215 #ifdef CONFIG_COMPAT
2216 .compat_get = compat_do_ip6t_get_ctl,
2218 .owner = THIS_MODULE,
2221 static struct xt_match ip6t_builtin_mt[] __read_mostly = {
2224 .match = icmp6_match,
2225 .matchsize = sizeof(struct ip6t_icmp),
2226 .checkentry = icmp6_checkentry,
2227 .proto = IPPROTO_ICMPV6,
2228 .family = NFPROTO_IPV6,
2232 static int __net_init ip6_tables_net_init(struct net *net)
2234 return xt_proto_init(net, NFPROTO_IPV6);
2237 static void __net_exit ip6_tables_net_exit(struct net *net)
2239 xt_proto_fini(net, NFPROTO_IPV6);
2242 static struct pernet_operations ip6_tables_net_ops = {
2243 .init = ip6_tables_net_init,
2244 .exit = ip6_tables_net_exit,
2247 static int __init ip6_tables_init(void)
2251 ret = register_pernet_subsys(&ip6_tables_net_ops);
2255 /* No one else will be downing sem now, so we won't sleep */
2256 ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2259 ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2263 /* Register setsockopt */
2264 ret = nf_register_sockopt(&ip6t_sockopts);
2268 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2272 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2274 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2276 unregister_pernet_subsys(&ip6_tables_net_ops);
2281 static void __exit ip6_tables_fini(void)
2283 nf_unregister_sockopt(&ip6t_sockopts);
2285 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2286 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2287 unregister_pernet_subsys(&ip6_tables_net_ops);
2291 * find the offset to specified header or the protocol number of last header
2292 * if target < 0. "last header" is transport protocol header, ESP, or
2295 * If target header is found, its offset is set in *offset and return protocol
2296 * number. Otherwise, return -1.
2298 * If the first fragment doesn't contain the final protocol header or
2299 * NEXTHDR_NONE it is considered invalid.
2301 * Note that non-1st fragment is special case that "the protocol number
2302 * of last header" is "next header" field in Fragment header. In this case,
2303 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2307 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2308 int target, unsigned short *fragoff)
2310 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2311 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2312 unsigned int len = skb->len - start;
2317 while (nexthdr != target) {
2318 struct ipv6_opt_hdr _hdr, *hp;
2319 unsigned int hdrlen;
2321 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2327 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2330 if (nexthdr == NEXTHDR_FRAGMENT) {
2331 unsigned short _frag_off;
2333 fp = skb_header_pointer(skb,
2334 start+offsetof(struct frag_hdr,
2341 _frag_off = ntohs(*fp) & ~0x7;
2344 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2345 hp->nexthdr == NEXTHDR_NONE)) {
2347 *fragoff = _frag_off;
2353 } else if (nexthdr == NEXTHDR_AUTH)
2354 hdrlen = (hp->hdrlen + 2) << 2;
2356 hdrlen = ipv6_optlen(hp);
2358 nexthdr = hp->nexthdr;
2367 EXPORT_SYMBOL(ip6t_register_table);
2368 EXPORT_SYMBOL(ip6t_unregister_table);
2369 EXPORT_SYMBOL(ip6t_do_table);
2370 EXPORT_SYMBOL(ip6t_ext_hdr);
2371 EXPORT_SYMBOL(ipv6_find_hdr);
2373 module_init(ip6_tables_init);
2374 module_exit(ip6_tables_fini);