2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
32 #include "../../netfilter/xt_repldata.h"
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36 MODULE_DESCRIPTION("IPv6 packet filter");
38 /*#define DEBUG_IP_FIREWALL*/
39 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
40 /*#define DEBUG_IP_FIREWALL_USER*/
42 #ifdef DEBUG_IP_FIREWALL
43 #define dprintf(format, args...) pr_info(format , ## args)
45 #define dprintf(format, args...)
48 #ifdef DEBUG_IP_FIREWALL_USER
49 #define duprintf(format, args...) pr_info(format , ## args)
51 #define duprintf(format, args...)
54 #ifdef CONFIG_NETFILTER_DEBUG
55 #define IP_NF_ASSERT(x) \
58 printk("IP_NF_ASSERT: %s:%s:%u\n", \
59 __func__, __FILE__, __LINE__); \
62 #define IP_NF_ASSERT(x)
66 /* All the better to debug you with... */
71 void *ip6t_alloc_initial_table(const struct xt_table *info)
73 return xt_alloc_initial_table(ip6t, IP6T);
75 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
78 We keep a set of rules for each CPU, so we can avoid write-locking
79 them in the softirq when updating the counters and therefore
80 only need to read-lock in the softirq; doing a write_lock_bh() in user
81 context stops packets coming through and allows user context to read
82 the counters or update the rules.
84 Hence the start of any table is given by get_table() below. */
86 /* Check for an extension */
88 ip6t_ext_hdr(u8 nexthdr)
90 return ( (nexthdr == IPPROTO_HOPOPTS) ||
91 (nexthdr == IPPROTO_ROUTING) ||
92 (nexthdr == IPPROTO_FRAGMENT) ||
93 (nexthdr == IPPROTO_ESP) ||
94 (nexthdr == IPPROTO_AH) ||
95 (nexthdr == IPPROTO_NONE) ||
96 (nexthdr == IPPROTO_DSTOPTS) );
99 /* Returns whether matches rule or not. */
100 /* Performance critical - called for every packet */
102 ip6_packet_match(const struct sk_buff *skb,
105 const struct ip6t_ip6 *ip6info,
106 unsigned int *protoff,
107 int *fragoff, bool *hotdrop)
110 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
112 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
114 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
115 &ip6info->src), IP6T_INV_SRCIP) ||
116 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
117 &ip6info->dst), IP6T_INV_DSTIP)) {
118 dprintf("Source or dest mismatch.\n");
120 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
121 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
122 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
123 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
124 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
125 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
129 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
131 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
132 dprintf("VIA in mismatch (%s vs %s).%s\n",
133 indev, ip6info->iniface,
134 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
138 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
140 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
141 dprintf("VIA out mismatch (%s vs %s).%s\n",
142 outdev, ip6info->outiface,
143 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
147 /* ... might want to do something with class and flowlabel here ... */
149 /* look for the desired protocol header */
150 if((ip6info->flags & IP6T_F_PROTO)) {
152 unsigned short _frag_off;
154 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
160 *fragoff = _frag_off;
162 dprintf("Packet protocol %hi ?= %s%hi.\n",
164 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
167 if (ip6info->proto == protohdr) {
168 if(ip6info->invflags & IP6T_INV_PROTO) {
174 /* We need match for the '-p all', too! */
175 if ((ip6info->proto != 0) &&
176 !(ip6info->invflags & IP6T_INV_PROTO))
182 /* should be ip6 safe */
184 ip6_checkentry(const struct ip6t_ip6 *ipv6)
186 if (ipv6->flags & ~IP6T_F_MASK) {
187 duprintf("Unknown flag bits set: %08X\n",
188 ipv6->flags & ~IP6T_F_MASK);
191 if (ipv6->invflags & ~IP6T_INV_MASK) {
192 duprintf("Unknown invflag bits set: %08X\n",
193 ipv6->invflags & ~IP6T_INV_MASK);
200 ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
203 pr_info("error: `%s'\n", (const char *)par->targinfo);
208 /* Performance critical - called for every packet */
210 do_match(const struct ip6t_entry_match *m, const struct sk_buff *skb,
211 struct xt_match_param *par)
213 par->match = m->u.kernel.match;
214 par->matchinfo = m->data;
216 /* Stop iteration if it doesn't match */
217 if (!m->u.kernel.match->match(skb, par))
223 static inline struct ip6t_entry *
224 get_entry(const void *base, unsigned int offset)
226 return (struct ip6t_entry *)(base + offset);
229 /* All zeroes == unconditional rule. */
230 /* Mildly perf critical (only if packet tracing is on) */
231 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
233 static const struct ip6t_ip6 uncond;
235 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
238 static inline const struct ip6t_entry_target *
239 ip6t_get_target_c(const struct ip6t_entry *e)
241 return ip6t_get_target((struct ip6t_entry *)e);
244 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
245 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
246 /* This cries for unification! */
247 static const char *const hooknames[] = {
248 [NF_INET_PRE_ROUTING] = "PREROUTING",
249 [NF_INET_LOCAL_IN] = "INPUT",
250 [NF_INET_FORWARD] = "FORWARD",
251 [NF_INET_LOCAL_OUT] = "OUTPUT",
252 [NF_INET_POST_ROUTING] = "POSTROUTING",
255 enum nf_ip_trace_comments {
256 NF_IP6_TRACE_COMMENT_RULE,
257 NF_IP6_TRACE_COMMENT_RETURN,
258 NF_IP6_TRACE_COMMENT_POLICY,
261 static const char *const comments[] = {
262 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
263 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
264 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
267 static struct nf_loginfo trace_loginfo = {
268 .type = NF_LOG_TYPE_LOG,
272 .logflags = NF_LOG_MASK,
277 /* Mildly perf critical (only if packet tracing is on) */
279 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
280 const char *hookname, const char **chainname,
281 const char **comment, unsigned int *rulenum)
283 const struct ip6t_standard_target *t = (void *)ip6t_get_target_c(s);
285 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
286 /* Head of user chain: ERROR target with chainname */
287 *chainname = t->target.data;
292 if (s->target_offset == sizeof(struct ip6t_entry) &&
293 strcmp(t->target.u.kernel.target->name,
294 IP6T_STANDARD_TARGET) == 0 &&
296 unconditional(&s->ipv6)) {
297 /* Tail of chains: STANDARD target (return/policy) */
298 *comment = *chainname == hookname
299 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
300 : comments[NF_IP6_TRACE_COMMENT_RETURN];
309 static void trace_packet(const struct sk_buff *skb,
311 const struct net_device *in,
312 const struct net_device *out,
313 const char *tablename,
314 const struct xt_table_info *private,
315 const struct ip6t_entry *e)
317 const void *table_base;
318 const struct ip6t_entry *root;
319 const char *hookname, *chainname, *comment;
320 const struct ip6t_entry *iter;
321 unsigned int rulenum = 0;
323 table_base = private->entries[smp_processor_id()];
324 root = get_entry(table_base, private->hook_entry[hook]);
326 hookname = chainname = hooknames[hook];
327 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
329 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
330 if (get_chainname_rulenum(iter, e, hookname,
331 &chainname, &comment, &rulenum) != 0)
334 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
335 "TRACE: %s:%s:%s:%u ",
336 tablename, chainname, comment, rulenum);
340 static inline __pure struct ip6t_entry *
341 ip6t_next_entry(const struct ip6t_entry *entry)
343 return (void *)entry + entry->next_offset;
346 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
348 ip6t_do_table(struct sk_buff *skb,
350 const struct net_device *in,
351 const struct net_device *out,
352 struct xt_table *table)
354 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
355 bool hotdrop = false;
356 /* Initializing verdict to NF_DROP keeps gcc happy. */
357 unsigned int verdict = NF_DROP;
358 const char *indev, *outdev;
359 const void *table_base;
360 struct ip6t_entry *e, **jumpstack;
361 unsigned int *stackptr, origptr, cpu;
362 const struct xt_table_info *private;
363 struct xt_match_param mtpar;
364 struct xt_target_param tgpar;
367 indev = in ? in->name : nulldevname;
368 outdev = out ? out->name : nulldevname;
369 /* We handle fragments by dealing with the first fragment as
370 * if it was a normal packet. All other fragments are treated
371 * normally, except that they will NEVER match rules that ask
372 * things we don't know, ie. tcp syn flag or ports). If the
373 * rule is also a fragment-specific rule, non-fragments won't
375 mtpar.hotdrop = &hotdrop;
376 mtpar.in = tgpar.in = in;
377 mtpar.out = tgpar.out = out;
378 mtpar.family = tgpar.family = NFPROTO_IPV6;
379 mtpar.hooknum = tgpar.hooknum = hook;
381 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
384 private = table->private;
385 cpu = smp_processor_id();
386 table_base = private->entries[cpu];
387 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
388 stackptr = &private->stackptr[cpu];
391 e = get_entry(table_base, private->hook_entry[hook]);
394 const struct ip6t_entry_target *t;
395 const struct xt_entry_match *ematch;
398 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
399 &mtpar.thoff, &mtpar.fragoff, &hotdrop)) {
401 e = ip6t_next_entry(e);
405 xt_ematch_foreach(ematch, e)
406 if (do_match(ematch, skb, &mtpar) != 0)
409 ADD_COUNTER(e->counters,
410 ntohs(ipv6_hdr(skb)->payload_len) +
411 sizeof(struct ipv6hdr), 1);
413 t = ip6t_get_target_c(e);
414 IP_NF_ASSERT(t->u.kernel.target);
416 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
417 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
418 /* The packet is traced: log it */
419 if (unlikely(skb->nf_trace))
420 trace_packet(skb, hook, in, out,
421 table->name, private, e);
423 /* Standard target? */
424 if (!t->u.kernel.target->target) {
427 v = ((struct ip6t_standard_target *)t)->verdict;
429 /* Pop from stack? */
430 if (v != IP6T_RETURN) {
431 verdict = (unsigned)(-v) - 1;
435 e = get_entry(table_base,
436 private->underflow[hook]);
438 e = ip6t_next_entry(jumpstack[--*stackptr]);
441 if (table_base + v != ip6t_next_entry(e) &&
442 !(e->ipv6.flags & IP6T_F_GOTO)) {
443 if (*stackptr >= private->stacksize) {
447 jumpstack[(*stackptr)++] = e;
450 e = get_entry(table_base, v);
454 /* Targets which reenter must return
456 tgpar.target = t->u.kernel.target;
457 tgpar.targinfo = t->data;
459 verdict = t->u.kernel.target->target(skb, &tgpar);
460 if (verdict == IP6T_CONTINUE)
461 e = ip6t_next_entry(e);
467 xt_info_rdunlock_bh();
470 #ifdef DEBUG_ALLOW_ALL
479 /* Figures out from what hook each rule can be called: returns 0 if
480 there are loops. Puts hook bitmask in comefrom. */
482 mark_source_chains(const struct xt_table_info *newinfo,
483 unsigned int valid_hooks, void *entry0)
487 /* No recursion; use packet counter to save back ptrs (reset
488 to 0 as we leave), and comefrom to save source hook bitmask */
489 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
490 unsigned int pos = newinfo->hook_entry[hook];
491 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
493 if (!(valid_hooks & (1 << hook)))
496 /* Set initial back pointer. */
497 e->counters.pcnt = pos;
500 const struct ip6t_standard_target *t
501 = (void *)ip6t_get_target_c(e);
502 int visited = e->comefrom & (1 << hook);
504 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
505 printk("iptables: loop hook %u pos %u %08X.\n",
506 hook, pos, e->comefrom);
509 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
511 /* Unconditional return/END. */
512 if ((e->target_offset == sizeof(struct ip6t_entry) &&
513 (strcmp(t->target.u.user.name,
514 IP6T_STANDARD_TARGET) == 0) &&
516 unconditional(&e->ipv6)) || visited) {
517 unsigned int oldpos, size;
519 if ((strcmp(t->target.u.user.name,
520 IP6T_STANDARD_TARGET) == 0) &&
521 t->verdict < -NF_MAX_VERDICT - 1) {
522 duprintf("mark_source_chains: bad "
523 "negative verdict (%i)\n",
528 /* Return: backtrack through the last
531 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
532 #ifdef DEBUG_IP_FIREWALL_USER
534 & (1 << NF_INET_NUMHOOKS)) {
535 duprintf("Back unset "
542 pos = e->counters.pcnt;
543 e->counters.pcnt = 0;
545 /* We're at the start. */
549 e = (struct ip6t_entry *)
551 } while (oldpos == pos + e->next_offset);
554 size = e->next_offset;
555 e = (struct ip6t_entry *)
556 (entry0 + pos + size);
557 e->counters.pcnt = pos;
560 int newpos = t->verdict;
562 if (strcmp(t->target.u.user.name,
563 IP6T_STANDARD_TARGET) == 0 &&
565 if (newpos > newinfo->size -
566 sizeof(struct ip6t_entry)) {
567 duprintf("mark_source_chains: "
568 "bad verdict (%i)\n",
572 /* This a jump; chase it. */
573 duprintf("Jump rule %u -> %u\n",
576 /* ... this is a fallthru */
577 newpos = pos + e->next_offset;
579 e = (struct ip6t_entry *)
581 e->counters.pcnt = pos;
586 duprintf("Finished chain %u\n", hook);
591 static void cleanup_match(struct ip6t_entry_match *m, struct net *net)
593 struct xt_mtdtor_param par;
596 par.match = m->u.kernel.match;
597 par.matchinfo = m->data;
598 par.family = NFPROTO_IPV6;
599 if (par.match->destroy != NULL)
600 par.match->destroy(&par);
601 module_put(par.match->me);
605 check_entry(const struct ip6t_entry *e, const char *name)
607 const struct ip6t_entry_target *t;
609 if (!ip6_checkentry(&e->ipv6)) {
610 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
614 if (e->target_offset + sizeof(struct ip6t_entry_target) >
618 t = ip6t_get_target_c(e);
619 if (e->target_offset + t->u.target_size > e->next_offset)
625 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par)
627 const struct ip6t_ip6 *ipv6 = par->entryinfo;
630 par->match = m->u.kernel.match;
631 par->matchinfo = m->data;
633 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
634 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
636 duprintf("ip_tables: check failed for `%s'.\n",
644 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par)
646 struct xt_match *match;
649 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
652 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
653 return PTR_ERR(match);
655 m->u.kernel.match = match;
657 ret = check_match(m, par);
663 module_put(m->u.kernel.match->me);
667 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
669 struct ip6t_entry_target *t = ip6t_get_target(e);
670 struct xt_tgchk_param par = {
674 .target = t->u.kernel.target,
676 .hook_mask = e->comefrom,
677 .family = NFPROTO_IPV6,
681 t = ip6t_get_target(e);
682 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
683 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
685 duprintf("ip_tables: check failed for `%s'.\n",
686 t->u.kernel.target->name);
693 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
696 struct ip6t_entry_target *t;
697 struct xt_target *target;
700 struct xt_mtchk_param mtpar;
701 struct xt_entry_match *ematch;
703 ret = check_entry(e, name);
710 mtpar.entryinfo = &e->ipv6;
711 mtpar.hook_mask = e->comefrom;
712 mtpar.family = NFPROTO_IPV6;
713 xt_ematch_foreach(ematch, e) {
714 ret = find_check_match(ematch, &mtpar);
716 goto cleanup_matches;
720 t = ip6t_get_target(e);
721 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
723 if (IS_ERR(target)) {
724 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
725 ret = PTR_ERR(target);
726 goto cleanup_matches;
728 t->u.kernel.target = target;
730 ret = check_target(e, net, name);
735 module_put(t->u.kernel.target->me);
737 xt_ematch_foreach(ematch, e) {
740 cleanup_match(ematch, net);
745 static bool check_underflow(const struct ip6t_entry *e)
747 const struct ip6t_entry_target *t;
748 unsigned int verdict;
750 if (!unconditional(&e->ipv6))
752 t = ip6t_get_target_c(e);
753 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
755 verdict = ((struct ip6t_standard_target *)t)->verdict;
756 verdict = -verdict - 1;
757 return verdict == NF_DROP || verdict == NF_ACCEPT;
761 check_entry_size_and_hooks(struct ip6t_entry *e,
762 struct xt_table_info *newinfo,
763 const unsigned char *base,
764 const unsigned char *limit,
765 const unsigned int *hook_entries,
766 const unsigned int *underflows,
767 unsigned int valid_hooks)
771 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
772 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
773 duprintf("Bad offset %p\n", e);
778 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
779 duprintf("checking: element %p size %u\n",
784 /* Check hooks & underflows */
785 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
786 if (!(valid_hooks & (1 << h)))
788 if ((unsigned char *)e - base == hook_entries[h])
789 newinfo->hook_entry[h] = hook_entries[h];
790 if ((unsigned char *)e - base == underflows[h]) {
791 if (!check_underflow(e)) {
792 pr_err("Underflows must be unconditional and "
793 "use the STANDARD target with "
797 newinfo->underflow[h] = underflows[h];
801 /* Clear counters and comefrom */
802 e->counters = ((struct xt_counters) { 0, 0 });
807 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
809 struct xt_tgdtor_param par;
810 struct ip6t_entry_target *t;
811 struct xt_entry_match *ematch;
813 /* Cleanup all matches */
814 xt_ematch_foreach(ematch, e)
815 cleanup_match(ematch, net);
816 t = ip6t_get_target(e);
819 par.target = t->u.kernel.target;
820 par.targinfo = t->data;
821 par.family = NFPROTO_IPV6;
822 if (par.target->destroy != NULL)
823 par.target->destroy(&par);
824 module_put(par.target->me);
827 /* Checks and translates the user-supplied table segment (held in
830 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
831 const struct ip6t_replace *repl)
833 struct ip6t_entry *iter;
837 newinfo->size = repl->size;
838 newinfo->number = repl->num_entries;
840 /* Init all hooks to impossible value. */
841 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
842 newinfo->hook_entry[i] = 0xFFFFFFFF;
843 newinfo->underflow[i] = 0xFFFFFFFF;
846 duprintf("translate_table: size %u\n", newinfo->size);
848 /* Walk through entries, checking offsets. */
849 xt_entry_foreach(iter, entry0, newinfo->size) {
850 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
858 if (strcmp(ip6t_get_target(iter)->u.user.name,
859 XT_ERROR_TARGET) == 0)
860 ++newinfo->stacksize;
863 if (i != repl->num_entries) {
864 duprintf("translate_table: %u not %u entries\n",
865 i, repl->num_entries);
869 /* Check hooks all assigned */
870 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
871 /* Only hooks which are valid */
872 if (!(repl->valid_hooks & (1 << i)))
874 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
875 duprintf("Invalid hook entry %u %u\n",
876 i, repl->hook_entry[i]);
879 if (newinfo->underflow[i] == 0xFFFFFFFF) {
880 duprintf("Invalid underflow %u %u\n",
881 i, repl->underflow[i]);
886 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
889 /* Finally, each sanity check must pass */
891 xt_entry_foreach(iter, entry0, newinfo->size) {
892 ret = find_check_entry(iter, net, repl->name, repl->size);
899 xt_entry_foreach(iter, entry0, newinfo->size) {
902 cleanup_entry(iter, net);
907 /* And one copy for every other CPU */
908 for_each_possible_cpu(i) {
909 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
910 memcpy(newinfo->entries[i], entry0, newinfo->size);
917 get_counters(const struct xt_table_info *t,
918 struct xt_counters counters[])
920 struct ip6t_entry *iter;
925 /* Instead of clearing (by a previous call to memset())
926 * the counters and using adds, we set the counters
927 * with data used by 'current' CPU
929 * Bottom half has to be disabled to prevent deadlock
930 * if new softirq were to run and call ipt_do_table
933 curcpu = smp_processor_id();
936 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
937 SET_COUNTER(counters[i], iter->counters.bcnt,
938 iter->counters.pcnt);
942 for_each_possible_cpu(cpu) {
947 xt_entry_foreach(iter, t->entries[cpu], t->size) {
948 ADD_COUNTER(counters[i], iter->counters.bcnt,
949 iter->counters.pcnt);
952 xt_info_wrunlock(cpu);
957 static struct xt_counters *alloc_counters(const struct xt_table *table)
959 unsigned int countersize;
960 struct xt_counters *counters;
961 const struct xt_table_info *private = table->private;
963 /* We need atomic snapshot of counters: rest doesn't change
964 (other than comefrom, which userspace doesn't care
966 countersize = sizeof(struct xt_counters) * private->number;
967 counters = vmalloc_node(countersize, numa_node_id());
969 if (counters == NULL)
970 return ERR_PTR(-ENOMEM);
972 get_counters(private, counters);
978 copy_entries_to_user(unsigned int total_size,
979 const struct xt_table *table,
980 void __user *userptr)
982 unsigned int off, num;
983 const struct ip6t_entry *e;
984 struct xt_counters *counters;
985 const struct xt_table_info *private = table->private;
987 const void *loc_cpu_entry;
989 counters = alloc_counters(table);
990 if (IS_ERR(counters))
991 return PTR_ERR(counters);
993 /* choose the copy that is on our node/cpu, ...
994 * This choice is lazy (because current thread is
995 * allowed to migrate to another cpu)
997 loc_cpu_entry = private->entries[raw_smp_processor_id()];
998 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1003 /* FIXME: use iterator macros --RR */
1004 /* ... then go back and fix counters and names */
1005 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1007 const struct ip6t_entry_match *m;
1008 const struct ip6t_entry_target *t;
1010 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1011 if (copy_to_user(userptr + off
1012 + offsetof(struct ip6t_entry, counters),
1014 sizeof(counters[num])) != 0) {
1019 for (i = sizeof(struct ip6t_entry);
1020 i < e->target_offset;
1021 i += m->u.match_size) {
1024 if (copy_to_user(userptr + off + i
1025 + offsetof(struct ip6t_entry_match,
1027 m->u.kernel.match->name,
1028 strlen(m->u.kernel.match->name)+1)
1035 t = ip6t_get_target_c(e);
1036 if (copy_to_user(userptr + off + e->target_offset
1037 + offsetof(struct ip6t_entry_target,
1039 t->u.kernel.target->name,
1040 strlen(t->u.kernel.target->name)+1) != 0) {
1051 #ifdef CONFIG_COMPAT
1052 static void compat_standard_from_user(void *dst, const void *src)
1054 int v = *(compat_int_t *)src;
1057 v += xt_compat_calc_jump(AF_INET6, v);
1058 memcpy(dst, &v, sizeof(v));
1061 static int compat_standard_to_user(void __user *dst, const void *src)
1063 compat_int_t cv = *(int *)src;
1066 cv -= xt_compat_calc_jump(AF_INET6, cv);
1067 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1070 static int compat_calc_entry(const struct ip6t_entry *e,
1071 const struct xt_table_info *info,
1072 const void *base, struct xt_table_info *newinfo)
1074 const struct xt_entry_match *ematch;
1075 const struct ip6t_entry_target *t;
1076 unsigned int entry_offset;
1079 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1080 entry_offset = (void *)e - base;
1081 xt_ematch_foreach(ematch, e)
1082 off += xt_compat_match_offset(ematch->u.kernel.match);
1083 t = ip6t_get_target_c(e);
1084 off += xt_compat_target_offset(t->u.kernel.target);
1085 newinfo->size -= off;
1086 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1090 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1091 if (info->hook_entry[i] &&
1092 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1093 newinfo->hook_entry[i] -= off;
1094 if (info->underflow[i] &&
1095 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1096 newinfo->underflow[i] -= off;
1101 static int compat_table_info(const struct xt_table_info *info,
1102 struct xt_table_info *newinfo)
1104 struct ip6t_entry *iter;
1105 void *loc_cpu_entry;
1108 if (!newinfo || !info)
1111 /* we dont care about newinfo->entries[] */
1112 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1113 newinfo->initial_entries = 0;
1114 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1115 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1116 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1124 static int get_info(struct net *net, void __user *user,
1125 const int *len, int compat)
1127 char name[IP6T_TABLE_MAXNAMELEN];
1131 if (*len != sizeof(struct ip6t_getinfo)) {
1132 duprintf("length %u != %zu\n", *len,
1133 sizeof(struct ip6t_getinfo));
1137 if (copy_from_user(name, user, sizeof(name)) != 0)
1140 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1141 #ifdef CONFIG_COMPAT
1143 xt_compat_lock(AF_INET6);
1145 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1146 "ip6table_%s", name);
1147 if (t && !IS_ERR(t)) {
1148 struct ip6t_getinfo info;
1149 const struct xt_table_info *private = t->private;
1150 #ifdef CONFIG_COMPAT
1151 struct xt_table_info tmp;
1154 ret = compat_table_info(private, &tmp);
1155 xt_compat_flush_offsets(AF_INET6);
1159 info.valid_hooks = t->valid_hooks;
1160 memcpy(info.hook_entry, private->hook_entry,
1161 sizeof(info.hook_entry));
1162 memcpy(info.underflow, private->underflow,
1163 sizeof(info.underflow));
1164 info.num_entries = private->number;
1165 info.size = private->size;
1166 strcpy(info.name, name);
1168 if (copy_to_user(user, &info, *len) != 0)
1176 ret = t ? PTR_ERR(t) : -ENOENT;
1177 #ifdef CONFIG_COMPAT
1179 xt_compat_unlock(AF_INET6);
1185 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1189 struct ip6t_get_entries get;
1192 if (*len < sizeof(get)) {
1193 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1196 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1198 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1199 duprintf("get_entries: %u != %zu\n",
1200 *len, sizeof(get) + get.size);
1204 t = xt_find_table_lock(net, AF_INET6, get.name);
1205 if (t && !IS_ERR(t)) {
1206 struct xt_table_info *private = t->private;
1207 duprintf("t->private->number = %u\n", private->number);
1208 if (get.size == private->size)
1209 ret = copy_entries_to_user(private->size,
1210 t, uptr->entrytable);
1212 duprintf("get_entries: I've got %u not %u!\n",
1213 private->size, get.size);
1219 ret = t ? PTR_ERR(t) : -ENOENT;
1225 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1226 struct xt_table_info *newinfo, unsigned int num_counters,
1227 void __user *counters_ptr)
1231 struct xt_table_info *oldinfo;
1232 struct xt_counters *counters;
1233 const void *loc_cpu_old_entry;
1234 struct ip6t_entry *iter;
1237 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1244 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1245 "ip6table_%s", name);
1246 if (!t || IS_ERR(t)) {
1247 ret = t ? PTR_ERR(t) : -ENOENT;
1248 goto free_newinfo_counters_untrans;
1252 if (valid_hooks != t->valid_hooks) {
1253 duprintf("Valid hook crap: %08X vs %08X\n",
1254 valid_hooks, t->valid_hooks);
1259 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1263 /* Update module usage count based on number of rules */
1264 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1265 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1266 if ((oldinfo->number > oldinfo->initial_entries) ||
1267 (newinfo->number <= oldinfo->initial_entries))
1269 if ((oldinfo->number > oldinfo->initial_entries) &&
1270 (newinfo->number <= oldinfo->initial_entries))
1273 /* Get the old counters, and synchronize with replace */
1274 get_counters(oldinfo, counters);
1276 /* Decrease module usage counts and free resource */
1277 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1278 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1279 cleanup_entry(iter, net);
1281 xt_free_table_info(oldinfo);
1282 if (copy_to_user(counters_ptr, counters,
1283 sizeof(struct xt_counters) * num_counters) != 0)
1292 free_newinfo_counters_untrans:
1299 do_replace(struct net *net, const void __user *user, unsigned int len)
1302 struct ip6t_replace tmp;
1303 struct xt_table_info *newinfo;
1304 void *loc_cpu_entry;
1305 struct ip6t_entry *iter;
1307 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1310 /* overflow check */
1311 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1314 newinfo = xt_alloc_table_info(tmp.size);
1318 /* choose the copy that is on our node/cpu */
1319 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1320 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1326 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1330 duprintf("ip_tables: Translated table\n");
1332 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1333 tmp.num_counters, tmp.counters);
1335 goto free_newinfo_untrans;
1338 free_newinfo_untrans:
1339 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1340 cleanup_entry(iter, net);
1342 xt_free_table_info(newinfo);
1347 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1350 unsigned int i, curcpu;
1351 struct xt_counters_info tmp;
1352 struct xt_counters *paddc;
1353 unsigned int num_counters;
1358 const struct xt_table_info *private;
1360 const void *loc_cpu_entry;
1361 struct ip6t_entry *iter;
1362 #ifdef CONFIG_COMPAT
1363 struct compat_xt_counters_info compat_tmp;
1367 size = sizeof(struct compat_xt_counters_info);
1372 size = sizeof(struct xt_counters_info);
1375 if (copy_from_user(ptmp, user, size) != 0)
1378 #ifdef CONFIG_COMPAT
1380 num_counters = compat_tmp.num_counters;
1381 name = compat_tmp.name;
1385 num_counters = tmp.num_counters;
1389 if (len != size + num_counters * sizeof(struct xt_counters))
1392 paddc = vmalloc_node(len - size, numa_node_id());
1396 if (copy_from_user(paddc, user + size, len - size) != 0) {
1401 t = xt_find_table_lock(net, AF_INET6, name);
1402 if (!t || IS_ERR(t)) {
1403 ret = t ? PTR_ERR(t) : -ENOENT;
1409 private = t->private;
1410 if (private->number != num_counters) {
1412 goto unlock_up_free;
1416 /* Choose the copy that is on our node */
1417 curcpu = smp_processor_id();
1418 xt_info_wrlock(curcpu);
1419 loc_cpu_entry = private->entries[curcpu];
1420 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1421 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1424 xt_info_wrunlock(curcpu);
1436 #ifdef CONFIG_COMPAT
1437 struct compat_ip6t_replace {
1438 char name[IP6T_TABLE_MAXNAMELEN];
1442 u32 hook_entry[NF_INET_NUMHOOKS];
1443 u32 underflow[NF_INET_NUMHOOKS];
1445 compat_uptr_t counters; /* struct ip6t_counters * */
1446 struct compat_ip6t_entry entries[0];
1450 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1451 unsigned int *size, struct xt_counters *counters,
1454 struct ip6t_entry_target *t;
1455 struct compat_ip6t_entry __user *ce;
1456 u_int16_t target_offset, next_offset;
1457 compat_uint_t origsize;
1458 const struct xt_entry_match *ematch;
1462 ce = (struct compat_ip6t_entry __user *)*dstptr;
1463 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1464 copy_to_user(&ce->counters, &counters[i],
1465 sizeof(counters[i])) != 0)
1468 *dstptr += sizeof(struct compat_ip6t_entry);
1469 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1471 xt_ematch_foreach(ematch, e) {
1472 ret = xt_compat_match_to_user(ematch, dstptr, size);
1476 target_offset = e->target_offset - (origsize - *size);
1477 t = ip6t_get_target(e);
1478 ret = xt_compat_target_to_user(t, dstptr, size);
1481 next_offset = e->next_offset - (origsize - *size);
1482 if (put_user(target_offset, &ce->target_offset) != 0 ||
1483 put_user(next_offset, &ce->next_offset) != 0)
1489 compat_find_calc_match(struct ip6t_entry_match *m,
1491 const struct ip6t_ip6 *ipv6,
1492 unsigned int hookmask,
1495 struct xt_match *match;
1497 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1498 m->u.user.revision);
1499 if (IS_ERR(match)) {
1500 duprintf("compat_check_calc_match: `%s' not found\n",
1502 return PTR_ERR(match);
1504 m->u.kernel.match = match;
1505 *size += xt_compat_match_offset(match);
1509 static void compat_release_entry(struct compat_ip6t_entry *e)
1511 struct ip6t_entry_target *t;
1512 struct xt_entry_match *ematch;
1514 /* Cleanup all matches */
1515 xt_ematch_foreach(ematch, e)
1516 module_put(ematch->u.kernel.match->me);
1517 t = compat_ip6t_get_target(e);
1518 module_put(t->u.kernel.target->me);
1522 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1523 struct xt_table_info *newinfo,
1525 const unsigned char *base,
1526 const unsigned char *limit,
1527 const unsigned int *hook_entries,
1528 const unsigned int *underflows,
1531 struct xt_entry_match *ematch;
1532 struct ip6t_entry_target *t;
1533 struct xt_target *target;
1534 unsigned int entry_offset;
1538 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1539 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1540 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1541 duprintf("Bad offset %p, limit = %p\n", e, limit);
1545 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1546 sizeof(struct compat_xt_entry_target)) {
1547 duprintf("checking: element %p size %u\n",
1552 /* For purposes of check_entry casting the compat entry is fine */
1553 ret = check_entry((struct ip6t_entry *)e, name);
1557 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1558 entry_offset = (void *)e - (void *)base;
1560 xt_ematch_foreach(ematch, e) {
1561 ret = compat_find_calc_match(ematch, name,
1562 &e->ipv6, e->comefrom, &off);
1564 goto release_matches;
1568 t = compat_ip6t_get_target(e);
1569 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1570 t->u.user.revision);
1571 if (IS_ERR(target)) {
1572 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1574 ret = PTR_ERR(target);
1575 goto release_matches;
1577 t->u.kernel.target = target;
1579 off += xt_compat_target_offset(target);
1581 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1585 /* Check hooks & underflows */
1586 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1587 if ((unsigned char *)e - base == hook_entries[h])
1588 newinfo->hook_entry[h] = hook_entries[h];
1589 if ((unsigned char *)e - base == underflows[h])
1590 newinfo->underflow[h] = underflows[h];
1593 /* Clear counters and comefrom */
1594 memset(&e->counters, 0, sizeof(e->counters));
1599 module_put(t->u.kernel.target->me);
1601 xt_ematch_foreach(ematch, e) {
1604 module_put(ematch->u.kernel.match->me);
1610 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1611 unsigned int *size, const char *name,
1612 struct xt_table_info *newinfo, unsigned char *base)
1614 struct ip6t_entry_target *t;
1615 struct xt_target *target;
1616 struct ip6t_entry *de;
1617 unsigned int origsize;
1619 struct xt_entry_match *ematch;
1623 de = (struct ip6t_entry *)*dstptr;
1624 memcpy(de, e, sizeof(struct ip6t_entry));
1625 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1627 *dstptr += sizeof(struct ip6t_entry);
1628 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1630 xt_ematch_foreach(ematch, e) {
1631 ret = xt_compat_match_from_user(ematch, dstptr, size);
1635 de->target_offset = e->target_offset - (origsize - *size);
1636 t = compat_ip6t_get_target(e);
1637 target = t->u.kernel.target;
1638 xt_compat_target_from_user(t, dstptr, size);
1640 de->next_offset = e->next_offset - (origsize - *size);
1641 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1642 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1643 newinfo->hook_entry[h] -= origsize - *size;
1644 if ((unsigned char *)de - base < newinfo->underflow[h])
1645 newinfo->underflow[h] -= origsize - *size;
1650 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1655 struct xt_mtchk_param mtpar;
1656 struct xt_entry_match *ematch;
1661 mtpar.entryinfo = &e->ipv6;
1662 mtpar.hook_mask = e->comefrom;
1663 mtpar.family = NFPROTO_IPV6;
1664 xt_ematch_foreach(ematch, e) {
1665 ret = check_match(ematch, &mtpar);
1667 goto cleanup_matches;
1671 ret = check_target(e, net, name);
1673 goto cleanup_matches;
1677 xt_ematch_foreach(ematch, e) {
1680 cleanup_match(ematch, net);
1686 translate_compat_table(struct net *net,
1688 unsigned int valid_hooks,
1689 struct xt_table_info **pinfo,
1691 unsigned int total_size,
1692 unsigned int number,
1693 unsigned int *hook_entries,
1694 unsigned int *underflows)
1697 struct xt_table_info *newinfo, *info;
1698 void *pos, *entry0, *entry1;
1699 struct compat_ip6t_entry *iter0;
1700 struct ip6t_entry *iter1;
1707 info->number = number;
1709 /* Init all hooks to impossible value. */
1710 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1711 info->hook_entry[i] = 0xFFFFFFFF;
1712 info->underflow[i] = 0xFFFFFFFF;
1715 duprintf("translate_compat_table: size %u\n", info->size);
1717 xt_compat_lock(AF_INET6);
1718 /* Walk through entries, checking offsets. */
1719 xt_entry_foreach(iter0, entry0, total_size) {
1720 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1722 entry0 + total_size,
1733 duprintf("translate_compat_table: %u not %u entries\n",
1738 /* Check hooks all assigned */
1739 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1740 /* Only hooks which are valid */
1741 if (!(valid_hooks & (1 << i)))
1743 if (info->hook_entry[i] == 0xFFFFFFFF) {
1744 duprintf("Invalid hook entry %u %u\n",
1745 i, hook_entries[i]);
1748 if (info->underflow[i] == 0xFFFFFFFF) {
1749 duprintf("Invalid underflow %u %u\n",
1756 newinfo = xt_alloc_table_info(size);
1760 newinfo->number = number;
1761 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1762 newinfo->hook_entry[i] = info->hook_entry[i];
1763 newinfo->underflow[i] = info->underflow[i];
1765 entry1 = newinfo->entries[raw_smp_processor_id()];
1768 xt_entry_foreach(iter0, entry0, total_size) {
1769 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1770 name, newinfo, entry1);
1774 xt_compat_flush_offsets(AF_INET6);
1775 xt_compat_unlock(AF_INET6);
1780 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1784 xt_entry_foreach(iter1, entry1, newinfo->size) {
1785 ret = compat_check_entry(iter1, net, name);
1792 * The first i matches need cleanup_entry (calls ->destroy)
1793 * because they had called ->check already. The other j-i
1794 * entries need only release.
1798 xt_entry_foreach(iter0, entry0, newinfo->size) {
1803 compat_release_entry(iter0);
1805 xt_entry_foreach(iter1, entry1, newinfo->size) {
1808 cleanup_entry(iter1, net);
1810 xt_free_table_info(newinfo);
1814 /* And one copy for every other CPU */
1815 for_each_possible_cpu(i)
1816 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1817 memcpy(newinfo->entries[i], entry1, newinfo->size);
1821 xt_free_table_info(info);
1825 xt_free_table_info(newinfo);
1827 xt_entry_foreach(iter0, entry0, total_size) {
1830 compat_release_entry(iter0);
1834 xt_compat_flush_offsets(AF_INET6);
1835 xt_compat_unlock(AF_INET6);
1840 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1843 struct compat_ip6t_replace tmp;
1844 struct xt_table_info *newinfo;
1845 void *loc_cpu_entry;
1846 struct ip6t_entry *iter;
1848 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1851 /* overflow check */
1852 if (tmp.size >= INT_MAX / num_possible_cpus())
1854 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1857 newinfo = xt_alloc_table_info(tmp.size);
1861 /* choose the copy that is on our node/cpu */
1862 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1863 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1869 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1870 &newinfo, &loc_cpu_entry, tmp.size,
1871 tmp.num_entries, tmp.hook_entry,
1876 duprintf("compat_do_replace: Translated table\n");
1878 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1879 tmp.num_counters, compat_ptr(tmp.counters));
1881 goto free_newinfo_untrans;
1884 free_newinfo_untrans:
1885 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1886 cleanup_entry(iter, net);
1888 xt_free_table_info(newinfo);
1893 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1898 if (!capable(CAP_NET_ADMIN))
1902 case IP6T_SO_SET_REPLACE:
1903 ret = compat_do_replace(sock_net(sk), user, len);
1906 case IP6T_SO_SET_ADD_COUNTERS:
1907 ret = do_add_counters(sock_net(sk), user, len, 1);
1911 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1918 struct compat_ip6t_get_entries {
1919 char name[IP6T_TABLE_MAXNAMELEN];
1921 struct compat_ip6t_entry entrytable[0];
1925 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1926 void __user *userptr)
1928 struct xt_counters *counters;
1929 const struct xt_table_info *private = table->private;
1933 const void *loc_cpu_entry;
1935 struct ip6t_entry *iter;
1937 counters = alloc_counters(table);
1938 if (IS_ERR(counters))
1939 return PTR_ERR(counters);
1941 /* choose the copy that is on our node/cpu, ...
1942 * This choice is lazy (because current thread is
1943 * allowed to migrate to another cpu)
1945 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1948 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1949 ret = compat_copy_entry_to_user(iter, &pos,
1950 &size, counters, i++);
1960 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1964 struct compat_ip6t_get_entries get;
1967 if (*len < sizeof(get)) {
1968 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1972 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1975 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1976 duprintf("compat_get_entries: %u != %zu\n",
1977 *len, sizeof(get) + get.size);
1981 xt_compat_lock(AF_INET6);
1982 t = xt_find_table_lock(net, AF_INET6, get.name);
1983 if (t && !IS_ERR(t)) {
1984 const struct xt_table_info *private = t->private;
1985 struct xt_table_info info;
1986 duprintf("t->private->number = %u\n", private->number);
1987 ret = compat_table_info(private, &info);
1988 if (!ret && get.size == info.size) {
1989 ret = compat_copy_entries_to_user(private->size,
1990 t, uptr->entrytable);
1992 duprintf("compat_get_entries: I've got %u not %u!\n",
1993 private->size, get.size);
1996 xt_compat_flush_offsets(AF_INET6);
2000 ret = t ? PTR_ERR(t) : -ENOENT;
2002 xt_compat_unlock(AF_INET6);
2006 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
2009 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2013 if (!capable(CAP_NET_ADMIN))
2017 case IP6T_SO_GET_INFO:
2018 ret = get_info(sock_net(sk), user, len, 1);
2020 case IP6T_SO_GET_ENTRIES:
2021 ret = compat_get_entries(sock_net(sk), user, len);
2024 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2031 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2035 if (!capable(CAP_NET_ADMIN))
2039 case IP6T_SO_SET_REPLACE:
2040 ret = do_replace(sock_net(sk), user, len);
2043 case IP6T_SO_SET_ADD_COUNTERS:
2044 ret = do_add_counters(sock_net(sk), user, len, 0);
2048 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2056 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2060 if (!capable(CAP_NET_ADMIN))
2064 case IP6T_SO_GET_INFO:
2065 ret = get_info(sock_net(sk), user, len, 0);
2068 case IP6T_SO_GET_ENTRIES:
2069 ret = get_entries(sock_net(sk), user, len);
2072 case IP6T_SO_GET_REVISION_MATCH:
2073 case IP6T_SO_GET_REVISION_TARGET: {
2074 struct ip6t_get_revision rev;
2077 if (*len != sizeof(rev)) {
2081 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2086 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2091 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2094 "ip6t_%s", rev.name);
2099 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2106 struct xt_table *ip6t_register_table(struct net *net,
2107 const struct xt_table *table,
2108 const struct ip6t_replace *repl)
2111 struct xt_table_info *newinfo;
2112 struct xt_table_info bootstrap = {0};
2113 void *loc_cpu_entry;
2114 struct xt_table *new_table;
2116 newinfo = xt_alloc_table_info(repl->size);
2122 /* choose the copy on our node/cpu, but dont care about preemption */
2123 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2124 memcpy(loc_cpu_entry, repl->entries, repl->size);
2126 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2130 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2131 if (IS_ERR(new_table)) {
2132 ret = PTR_ERR(new_table);
2138 xt_free_table_info(newinfo);
2140 return ERR_PTR(ret);
2143 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2145 struct xt_table_info *private;
2146 void *loc_cpu_entry;
2147 struct module *table_owner = table->me;
2148 struct ip6t_entry *iter;
2150 private = xt_unregister_table(table);
2152 /* Decrease module usage counts and free resources */
2153 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2154 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2155 cleanup_entry(iter, net);
2156 if (private->number > private->initial_entries)
2157 module_put(table_owner);
2158 xt_free_table_info(private);
2161 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2163 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2164 u_int8_t type, u_int8_t code,
2167 return (type == test_type && code >= min_code && code <= max_code)
2172 icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
2174 const struct icmp6hdr *ic;
2175 struct icmp6hdr _icmph;
2176 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2178 /* Must not be a fragment. */
2179 if (par->fragoff != 0)
2182 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2184 /* We've been asked to examine this packet, and we
2185 * can't. Hence, no choice but to drop.
2187 duprintf("Dropping evil ICMP tinygram.\n");
2188 *par->hotdrop = true;
2192 return icmp6_type_code_match(icmpinfo->type,
2195 ic->icmp6_type, ic->icmp6_code,
2196 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2199 /* Called when user tries to insert an entry of this type. */
2200 static int icmp6_checkentry(const struct xt_mtchk_param *par)
2202 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2204 /* Must specify no unknown invflags */
2205 return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
2208 /* The built-in targets: standard (NULL) and error. */
2209 static struct xt_target ip6t_standard_target __read_mostly = {
2210 .name = IP6T_STANDARD_TARGET,
2211 .targetsize = sizeof(int),
2212 .family = NFPROTO_IPV6,
2213 #ifdef CONFIG_COMPAT
2214 .compatsize = sizeof(compat_int_t),
2215 .compat_from_user = compat_standard_from_user,
2216 .compat_to_user = compat_standard_to_user,
2220 static struct xt_target ip6t_error_target __read_mostly = {
2221 .name = IP6T_ERROR_TARGET,
2222 .target = ip6t_error,
2223 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2224 .family = NFPROTO_IPV6,
2227 static struct nf_sockopt_ops ip6t_sockopts = {
2229 .set_optmin = IP6T_BASE_CTL,
2230 .set_optmax = IP6T_SO_SET_MAX+1,
2231 .set = do_ip6t_set_ctl,
2232 #ifdef CONFIG_COMPAT
2233 .compat_set = compat_do_ip6t_set_ctl,
2235 .get_optmin = IP6T_BASE_CTL,
2236 .get_optmax = IP6T_SO_GET_MAX+1,
2237 .get = do_ip6t_get_ctl,
2238 #ifdef CONFIG_COMPAT
2239 .compat_get = compat_do_ip6t_get_ctl,
2241 .owner = THIS_MODULE,
2244 static struct xt_match icmp6_matchstruct __read_mostly = {
2246 .match = icmp6_match,
2247 .matchsize = sizeof(struct ip6t_icmp),
2248 .checkentry = icmp6_checkentry,
2249 .proto = IPPROTO_ICMPV6,
2250 .family = NFPROTO_IPV6,
2253 static int __net_init ip6_tables_net_init(struct net *net)
2255 return xt_proto_init(net, NFPROTO_IPV6);
2258 static void __net_exit ip6_tables_net_exit(struct net *net)
2260 xt_proto_fini(net, NFPROTO_IPV6);
2263 static struct pernet_operations ip6_tables_net_ops = {
2264 .init = ip6_tables_net_init,
2265 .exit = ip6_tables_net_exit,
2268 static int __init ip6_tables_init(void)
2272 ret = register_pernet_subsys(&ip6_tables_net_ops);
2276 /* Noone else will be downing sem now, so we won't sleep */
2277 ret = xt_register_target(&ip6t_standard_target);
2280 ret = xt_register_target(&ip6t_error_target);
2283 ret = xt_register_match(&icmp6_matchstruct);
2287 /* Register setsockopt */
2288 ret = nf_register_sockopt(&ip6t_sockopts);
2292 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2296 xt_unregister_match(&icmp6_matchstruct);
2298 xt_unregister_target(&ip6t_error_target);
2300 xt_unregister_target(&ip6t_standard_target);
2302 unregister_pernet_subsys(&ip6_tables_net_ops);
2307 static void __exit ip6_tables_fini(void)
2309 nf_unregister_sockopt(&ip6t_sockopts);
2311 xt_unregister_match(&icmp6_matchstruct);
2312 xt_unregister_target(&ip6t_error_target);
2313 xt_unregister_target(&ip6t_standard_target);
2315 unregister_pernet_subsys(&ip6_tables_net_ops);
2319 * find the offset to specified header or the protocol number of last header
2320 * if target < 0. "last header" is transport protocol header, ESP, or
2323 * If target header is found, its offset is set in *offset and return protocol
2324 * number. Otherwise, return -1.
2326 * If the first fragment doesn't contain the final protocol header or
2327 * NEXTHDR_NONE it is considered invalid.
2329 * Note that non-1st fragment is special case that "the protocol number
2330 * of last header" is "next header" field in Fragment header. In this case,
2331 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2335 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2336 int target, unsigned short *fragoff)
2338 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2339 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2340 unsigned int len = skb->len - start;
2345 while (nexthdr != target) {
2346 struct ipv6_opt_hdr _hdr, *hp;
2347 unsigned int hdrlen;
2349 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2355 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2358 if (nexthdr == NEXTHDR_FRAGMENT) {
2359 unsigned short _frag_off;
2361 fp = skb_header_pointer(skb,
2362 start+offsetof(struct frag_hdr,
2369 _frag_off = ntohs(*fp) & ~0x7;
2372 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2373 hp->nexthdr == NEXTHDR_NONE)) {
2375 *fragoff = _frag_off;
2381 } else if (nexthdr == NEXTHDR_AUTH)
2382 hdrlen = (hp->hdrlen + 2) << 2;
2384 hdrlen = ipv6_optlen(hp);
2386 nexthdr = hp->nexthdr;
2395 EXPORT_SYMBOL(ip6t_register_table);
2396 EXPORT_SYMBOL(ip6t_unregister_table);
2397 EXPORT_SYMBOL(ip6t_do_table);
2398 EXPORT_SYMBOL(ip6t_ext_hdr);
2399 EXPORT_SYMBOL(ipv6_find_hdr);
2401 module_init(ip6_tables_init);
2402 module_exit(ip6_tables_fini);