2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
32 #include "../../netfilter/xt_repldata.h"
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36 MODULE_DESCRIPTION("IPv6 packet filter");
38 /*#define DEBUG_IP_FIREWALL*/
39 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
40 /*#define DEBUG_IP_FIREWALL_USER*/
42 #ifdef DEBUG_IP_FIREWALL
43 #define dprintf(format, args...) pr_info(format , ## args)
45 #define dprintf(format, args...)
48 #ifdef DEBUG_IP_FIREWALL_USER
49 #define duprintf(format, args...) pr_info(format , ## args)
51 #define duprintf(format, args...)
54 #ifdef CONFIG_NETFILTER_DEBUG
55 #define IP_NF_ASSERT(x) \
58 printk("IP_NF_ASSERT: %s:%s:%u\n", \
59 __func__, __FILE__, __LINE__); \
62 #define IP_NF_ASSERT(x)
66 /* All the better to debug you with... */
71 void *ip6t_alloc_initial_table(const struct xt_table *info)
73 return xt_alloc_initial_table(ip6t, IP6T);
75 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
78 We keep a set of rules for each CPU, so we can avoid write-locking
79 them in the softirq when updating the counters and therefore
80 only need to read-lock in the softirq; doing a write_lock_bh() in user
81 context stops packets coming through and allows user context to read
82 the counters or update the rules.
84 Hence the start of any table is given by get_table() below. */
86 /* Check for an extension */
88 ip6t_ext_hdr(u8 nexthdr)
90 return ( (nexthdr == IPPROTO_HOPOPTS) ||
91 (nexthdr == IPPROTO_ROUTING) ||
92 (nexthdr == IPPROTO_FRAGMENT) ||
93 (nexthdr == IPPROTO_ESP) ||
94 (nexthdr == IPPROTO_AH) ||
95 (nexthdr == IPPROTO_NONE) ||
96 (nexthdr == IPPROTO_DSTOPTS) );
99 /* Returns whether matches rule or not. */
100 /* Performance critical - called for every packet */
102 ip6_packet_match(const struct sk_buff *skb,
105 const struct ip6t_ip6 *ip6info,
106 unsigned int *protoff,
107 int *fragoff, bool *hotdrop)
110 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
112 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
114 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
115 &ip6info->src), IP6T_INV_SRCIP) ||
116 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
117 &ip6info->dst), IP6T_INV_DSTIP)) {
118 dprintf("Source or dest mismatch.\n");
120 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
121 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
122 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
123 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
124 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
125 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
129 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
131 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
132 dprintf("VIA in mismatch (%s vs %s).%s\n",
133 indev, ip6info->iniface,
134 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
138 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
140 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
141 dprintf("VIA out mismatch (%s vs %s).%s\n",
142 outdev, ip6info->outiface,
143 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
147 /* ... might want to do something with class and flowlabel here ... */
149 /* look for the desired protocol header */
150 if((ip6info->flags & IP6T_F_PROTO)) {
152 unsigned short _frag_off;
154 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
160 *fragoff = _frag_off;
162 dprintf("Packet protocol %hi ?= %s%hi.\n",
164 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
167 if (ip6info->proto == protohdr) {
168 if(ip6info->invflags & IP6T_INV_PROTO) {
174 /* We need match for the '-p all', too! */
175 if ((ip6info->proto != 0) &&
176 !(ip6info->invflags & IP6T_INV_PROTO))
182 /* should be ip6 safe */
184 ip6_checkentry(const struct ip6t_ip6 *ipv6)
186 if (ipv6->flags & ~IP6T_F_MASK) {
187 duprintf("Unknown flag bits set: %08X\n",
188 ipv6->flags & ~IP6T_F_MASK);
191 if (ipv6->invflags & ~IP6T_INV_MASK) {
192 duprintf("Unknown invflag bits set: %08X\n",
193 ipv6->invflags & ~IP6T_INV_MASK);
200 ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
203 pr_info("error: `%s'\n", (const char *)par->targinfo);
208 /* Performance critical - called for every packet */
210 do_match(const struct ip6t_entry_match *m, const struct sk_buff *skb,
211 struct xt_match_param *par)
213 par->match = m->u.kernel.match;
214 par->matchinfo = m->data;
216 /* Stop iteration if it doesn't match */
217 if (!m->u.kernel.match->match(skb, par))
223 static inline struct ip6t_entry *
224 get_entry(const void *base, unsigned int offset)
226 return (struct ip6t_entry *)(base + offset);
229 /* All zeroes == unconditional rule. */
230 /* Mildly perf critical (only if packet tracing is on) */
231 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
233 static const struct ip6t_ip6 uncond;
235 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
238 static inline const struct ip6t_entry_target *
239 ip6t_get_target_c(const struct ip6t_entry *e)
241 return ip6t_get_target((struct ip6t_entry *)e);
244 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
245 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
246 /* This cries for unification! */
247 static const char *const hooknames[] = {
248 [NF_INET_PRE_ROUTING] = "PREROUTING",
249 [NF_INET_LOCAL_IN] = "INPUT",
250 [NF_INET_FORWARD] = "FORWARD",
251 [NF_INET_LOCAL_OUT] = "OUTPUT",
252 [NF_INET_POST_ROUTING] = "POSTROUTING",
255 enum nf_ip_trace_comments {
256 NF_IP6_TRACE_COMMENT_RULE,
257 NF_IP6_TRACE_COMMENT_RETURN,
258 NF_IP6_TRACE_COMMENT_POLICY,
261 static const char *const comments[] = {
262 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
263 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
264 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
267 static struct nf_loginfo trace_loginfo = {
268 .type = NF_LOG_TYPE_LOG,
272 .logflags = NF_LOG_MASK,
277 /* Mildly perf critical (only if packet tracing is on) */
279 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
280 const char *hookname, const char **chainname,
281 const char **comment, unsigned int *rulenum)
283 const struct ip6t_standard_target *t = (void *)ip6t_get_target_c(s);
285 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
286 /* Head of user chain: ERROR target with chainname */
287 *chainname = t->target.data;
292 if (s->target_offset == sizeof(struct ip6t_entry) &&
293 strcmp(t->target.u.kernel.target->name,
294 IP6T_STANDARD_TARGET) == 0 &&
296 unconditional(&s->ipv6)) {
297 /* Tail of chains: STANDARD target (return/policy) */
298 *comment = *chainname == hookname
299 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
300 : comments[NF_IP6_TRACE_COMMENT_RETURN];
309 static void trace_packet(const struct sk_buff *skb,
311 const struct net_device *in,
312 const struct net_device *out,
313 const char *tablename,
314 const struct xt_table_info *private,
315 const struct ip6t_entry *e)
317 const void *table_base;
318 const struct ip6t_entry *root;
319 const char *hookname, *chainname, *comment;
320 const struct ip6t_entry *iter;
321 unsigned int rulenum = 0;
323 table_base = private->entries[smp_processor_id()];
324 root = get_entry(table_base, private->hook_entry[hook]);
326 hookname = chainname = hooknames[hook];
327 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
329 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
330 if (get_chainname_rulenum(iter, e, hookname,
331 &chainname, &comment, &rulenum) != 0)
334 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
335 "TRACE: %s:%s:%s:%u ",
336 tablename, chainname, comment, rulenum);
340 static inline __pure struct ip6t_entry *
341 ip6t_next_entry(const struct ip6t_entry *entry)
343 return (void *)entry + entry->next_offset;
346 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
348 ip6t_do_table(struct sk_buff *skb,
350 const struct net_device *in,
351 const struct net_device *out,
352 struct xt_table *table)
354 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
355 bool hotdrop = false;
356 /* Initializing verdict to NF_DROP keeps gcc happy. */
357 unsigned int verdict = NF_DROP;
358 const char *indev, *outdev;
359 const void *table_base;
360 struct ip6t_entry *e, **jumpstack;
361 unsigned int *stackptr, origptr, cpu;
362 const struct xt_table_info *private;
363 struct xt_match_param mtpar;
364 struct xt_target_param tgpar;
367 indev = in ? in->name : nulldevname;
368 outdev = out ? out->name : nulldevname;
369 /* We handle fragments by dealing with the first fragment as
370 * if it was a normal packet. All other fragments are treated
371 * normally, except that they will NEVER match rules that ask
372 * things we don't know, ie. tcp syn flag or ports). If the
373 * rule is also a fragment-specific rule, non-fragments won't
375 mtpar.hotdrop = &hotdrop;
376 mtpar.in = tgpar.in = in;
377 mtpar.out = tgpar.out = out;
378 mtpar.family = tgpar.family = NFPROTO_IPV6;
379 mtpar.hooknum = tgpar.hooknum = hook;
381 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
384 private = table->private;
385 cpu = smp_processor_id();
386 table_base = private->entries[cpu];
387 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
388 stackptr = &private->stackptr[cpu];
391 e = get_entry(table_base, private->hook_entry[hook]);
394 const struct ip6t_entry_target *t;
395 const struct xt_entry_match *ematch;
398 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
399 &mtpar.thoff, &mtpar.fragoff, &hotdrop)) {
401 e = ip6t_next_entry(e);
405 xt_ematch_foreach(ematch, e)
406 if (do_match(ematch, skb, &mtpar) != 0)
409 ADD_COUNTER(e->counters,
410 ntohs(ipv6_hdr(skb)->payload_len) +
411 sizeof(struct ipv6hdr), 1);
413 t = ip6t_get_target_c(e);
414 IP_NF_ASSERT(t->u.kernel.target);
416 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
417 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
418 /* The packet is traced: log it */
419 if (unlikely(skb->nf_trace))
420 trace_packet(skb, hook, in, out,
421 table->name, private, e);
423 /* Standard target? */
424 if (!t->u.kernel.target->target) {
427 v = ((struct ip6t_standard_target *)t)->verdict;
429 /* Pop from stack? */
430 if (v != IP6T_RETURN) {
431 verdict = (unsigned)(-v) - 1;
435 e = get_entry(table_base,
436 private->underflow[hook]);
438 e = ip6t_next_entry(jumpstack[--*stackptr]);
441 if (table_base + v != ip6t_next_entry(e) &&
442 !(e->ipv6.flags & IP6T_F_GOTO)) {
443 if (*stackptr >= private->stacksize) {
447 jumpstack[(*stackptr)++] = e;
450 e = get_entry(table_base, v);
454 tgpar.target = t->u.kernel.target;
455 tgpar.targinfo = t->data;
457 verdict = t->u.kernel.target->target(skb, &tgpar);
458 if (verdict == IP6T_CONTINUE)
459 e = ip6t_next_entry(e);
465 xt_info_rdunlock_bh();
468 #ifdef DEBUG_ALLOW_ALL
477 /* Figures out from what hook each rule can be called: returns 0 if
478 there are loops. Puts hook bitmask in comefrom. */
480 mark_source_chains(const struct xt_table_info *newinfo,
481 unsigned int valid_hooks, void *entry0)
485 /* No recursion; use packet counter to save back ptrs (reset
486 to 0 as we leave), and comefrom to save source hook bitmask */
487 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
488 unsigned int pos = newinfo->hook_entry[hook];
489 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
491 if (!(valid_hooks & (1 << hook)))
494 /* Set initial back pointer. */
495 e->counters.pcnt = pos;
498 const struct ip6t_standard_target *t
499 = (void *)ip6t_get_target_c(e);
500 int visited = e->comefrom & (1 << hook);
502 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
503 printk("iptables: loop hook %u pos %u %08X.\n",
504 hook, pos, e->comefrom);
507 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
509 /* Unconditional return/END. */
510 if ((e->target_offset == sizeof(struct ip6t_entry) &&
511 (strcmp(t->target.u.user.name,
512 IP6T_STANDARD_TARGET) == 0) &&
514 unconditional(&e->ipv6)) || visited) {
515 unsigned int oldpos, size;
517 if ((strcmp(t->target.u.user.name,
518 IP6T_STANDARD_TARGET) == 0) &&
519 t->verdict < -NF_MAX_VERDICT - 1) {
520 duprintf("mark_source_chains: bad "
521 "negative verdict (%i)\n",
526 /* Return: backtrack through the last
529 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
530 #ifdef DEBUG_IP_FIREWALL_USER
532 & (1 << NF_INET_NUMHOOKS)) {
533 duprintf("Back unset "
540 pos = e->counters.pcnt;
541 e->counters.pcnt = 0;
543 /* We're at the start. */
547 e = (struct ip6t_entry *)
549 } while (oldpos == pos + e->next_offset);
552 size = e->next_offset;
553 e = (struct ip6t_entry *)
554 (entry0 + pos + size);
555 e->counters.pcnt = pos;
558 int newpos = t->verdict;
560 if (strcmp(t->target.u.user.name,
561 IP6T_STANDARD_TARGET) == 0 &&
563 if (newpos > newinfo->size -
564 sizeof(struct ip6t_entry)) {
565 duprintf("mark_source_chains: "
566 "bad verdict (%i)\n",
570 /* This a jump; chase it. */
571 duprintf("Jump rule %u -> %u\n",
574 /* ... this is a fallthru */
575 newpos = pos + e->next_offset;
577 e = (struct ip6t_entry *)
579 e->counters.pcnt = pos;
584 duprintf("Finished chain %u\n", hook);
589 static void cleanup_match(struct ip6t_entry_match *m, struct net *net)
591 struct xt_mtdtor_param par;
594 par.match = m->u.kernel.match;
595 par.matchinfo = m->data;
596 par.family = NFPROTO_IPV6;
597 if (par.match->destroy != NULL)
598 par.match->destroy(&par);
599 module_put(par.match->me);
603 check_entry(const struct ip6t_entry *e, const char *name)
605 const struct ip6t_entry_target *t;
607 if (!ip6_checkentry(&e->ipv6)) {
608 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
612 if (e->target_offset + sizeof(struct ip6t_entry_target) >
616 t = ip6t_get_target_c(e);
617 if (e->target_offset + t->u.target_size > e->next_offset)
623 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par)
625 const struct ip6t_ip6 *ipv6 = par->entryinfo;
628 par->match = m->u.kernel.match;
629 par->matchinfo = m->data;
631 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
632 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
634 duprintf("ip_tables: check failed for `%s'.\n",
642 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par)
644 struct xt_match *match;
647 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
650 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
651 return PTR_ERR(match);
653 m->u.kernel.match = match;
655 ret = check_match(m, par);
661 module_put(m->u.kernel.match->me);
665 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
667 struct ip6t_entry_target *t = ip6t_get_target(e);
668 struct xt_tgchk_param par = {
672 .target = t->u.kernel.target,
674 .hook_mask = e->comefrom,
675 .family = NFPROTO_IPV6,
679 t = ip6t_get_target(e);
680 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
681 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
683 duprintf("ip_tables: check failed for `%s'.\n",
684 t->u.kernel.target->name);
691 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
694 struct ip6t_entry_target *t;
695 struct xt_target *target;
698 struct xt_mtchk_param mtpar;
699 struct xt_entry_match *ematch;
701 ret = check_entry(e, name);
708 mtpar.entryinfo = &e->ipv6;
709 mtpar.hook_mask = e->comefrom;
710 mtpar.family = NFPROTO_IPV6;
711 xt_ematch_foreach(ematch, e) {
712 ret = find_check_match(ematch, &mtpar);
714 goto cleanup_matches;
718 t = ip6t_get_target(e);
719 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
721 if (IS_ERR(target)) {
722 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
723 ret = PTR_ERR(target);
724 goto cleanup_matches;
726 t->u.kernel.target = target;
728 ret = check_target(e, net, name);
733 module_put(t->u.kernel.target->me);
735 xt_ematch_foreach(ematch, e) {
738 cleanup_match(ematch, net);
743 static bool check_underflow(const struct ip6t_entry *e)
745 const struct ip6t_entry_target *t;
746 unsigned int verdict;
748 if (!unconditional(&e->ipv6))
750 t = ip6t_get_target_c(e);
751 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
753 verdict = ((struct ip6t_standard_target *)t)->verdict;
754 verdict = -verdict - 1;
755 return verdict == NF_DROP || verdict == NF_ACCEPT;
759 check_entry_size_and_hooks(struct ip6t_entry *e,
760 struct xt_table_info *newinfo,
761 const unsigned char *base,
762 const unsigned char *limit,
763 const unsigned int *hook_entries,
764 const unsigned int *underflows,
765 unsigned int valid_hooks)
769 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
770 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
771 duprintf("Bad offset %p\n", e);
776 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
777 duprintf("checking: element %p size %u\n",
782 /* Check hooks & underflows */
783 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
784 if (!(valid_hooks & (1 << h)))
786 if ((unsigned char *)e - base == hook_entries[h])
787 newinfo->hook_entry[h] = hook_entries[h];
788 if ((unsigned char *)e - base == underflows[h]) {
789 if (!check_underflow(e)) {
790 pr_err("Underflows must be unconditional and "
791 "use the STANDARD target with "
795 newinfo->underflow[h] = underflows[h];
799 /* Clear counters and comefrom */
800 e->counters = ((struct xt_counters) { 0, 0 });
805 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
807 struct xt_tgdtor_param par;
808 struct ip6t_entry_target *t;
809 struct xt_entry_match *ematch;
811 /* Cleanup all matches */
812 xt_ematch_foreach(ematch, e)
813 cleanup_match(ematch, net);
814 t = ip6t_get_target(e);
817 par.target = t->u.kernel.target;
818 par.targinfo = t->data;
819 par.family = NFPROTO_IPV6;
820 if (par.target->destroy != NULL)
821 par.target->destroy(&par);
822 module_put(par.target->me);
825 /* Checks and translates the user-supplied table segment (held in
828 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
829 const struct ip6t_replace *repl)
831 struct ip6t_entry *iter;
835 newinfo->size = repl->size;
836 newinfo->number = repl->num_entries;
838 /* Init all hooks to impossible value. */
839 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
840 newinfo->hook_entry[i] = 0xFFFFFFFF;
841 newinfo->underflow[i] = 0xFFFFFFFF;
844 duprintf("translate_table: size %u\n", newinfo->size);
846 /* Walk through entries, checking offsets. */
847 xt_entry_foreach(iter, entry0, newinfo->size) {
848 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
856 if (strcmp(ip6t_get_target(iter)->u.user.name,
857 XT_ERROR_TARGET) == 0)
858 ++newinfo->stacksize;
861 if (i != repl->num_entries) {
862 duprintf("translate_table: %u not %u entries\n",
863 i, repl->num_entries);
867 /* Check hooks all assigned */
868 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
869 /* Only hooks which are valid */
870 if (!(repl->valid_hooks & (1 << i)))
872 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
873 duprintf("Invalid hook entry %u %u\n",
874 i, repl->hook_entry[i]);
877 if (newinfo->underflow[i] == 0xFFFFFFFF) {
878 duprintf("Invalid underflow %u %u\n",
879 i, repl->underflow[i]);
884 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
887 /* Finally, each sanity check must pass */
889 xt_entry_foreach(iter, entry0, newinfo->size) {
890 ret = find_check_entry(iter, net, repl->name, repl->size);
897 xt_entry_foreach(iter, entry0, newinfo->size) {
900 cleanup_entry(iter, net);
905 /* And one copy for every other CPU */
906 for_each_possible_cpu(i) {
907 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
908 memcpy(newinfo->entries[i], entry0, newinfo->size);
915 get_counters(const struct xt_table_info *t,
916 struct xt_counters counters[])
918 struct ip6t_entry *iter;
923 /* Instead of clearing (by a previous call to memset())
924 * the counters and using adds, we set the counters
925 * with data used by 'current' CPU
927 * Bottom half has to be disabled to prevent deadlock
928 * if new softirq were to run and call ipt_do_table
931 curcpu = smp_processor_id();
934 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
935 SET_COUNTER(counters[i], iter->counters.bcnt,
936 iter->counters.pcnt);
940 for_each_possible_cpu(cpu) {
945 xt_entry_foreach(iter, t->entries[cpu], t->size) {
946 ADD_COUNTER(counters[i], iter->counters.bcnt,
947 iter->counters.pcnt);
950 xt_info_wrunlock(cpu);
955 static struct xt_counters *alloc_counters(const struct xt_table *table)
957 unsigned int countersize;
958 struct xt_counters *counters;
959 const struct xt_table_info *private = table->private;
961 /* We need atomic snapshot of counters: rest doesn't change
962 (other than comefrom, which userspace doesn't care
964 countersize = sizeof(struct xt_counters) * private->number;
965 counters = vmalloc_node(countersize, numa_node_id());
967 if (counters == NULL)
968 return ERR_PTR(-ENOMEM);
970 get_counters(private, counters);
976 copy_entries_to_user(unsigned int total_size,
977 const struct xt_table *table,
978 void __user *userptr)
980 unsigned int off, num;
981 const struct ip6t_entry *e;
982 struct xt_counters *counters;
983 const struct xt_table_info *private = table->private;
985 const void *loc_cpu_entry;
987 counters = alloc_counters(table);
988 if (IS_ERR(counters))
989 return PTR_ERR(counters);
991 /* choose the copy that is on our node/cpu, ...
992 * This choice is lazy (because current thread is
993 * allowed to migrate to another cpu)
995 loc_cpu_entry = private->entries[raw_smp_processor_id()];
996 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1001 /* FIXME: use iterator macros --RR */
1002 /* ... then go back and fix counters and names */
1003 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1005 const struct ip6t_entry_match *m;
1006 const struct ip6t_entry_target *t;
1008 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1009 if (copy_to_user(userptr + off
1010 + offsetof(struct ip6t_entry, counters),
1012 sizeof(counters[num])) != 0) {
1017 for (i = sizeof(struct ip6t_entry);
1018 i < e->target_offset;
1019 i += m->u.match_size) {
1022 if (copy_to_user(userptr + off + i
1023 + offsetof(struct ip6t_entry_match,
1025 m->u.kernel.match->name,
1026 strlen(m->u.kernel.match->name)+1)
1033 t = ip6t_get_target_c(e);
1034 if (copy_to_user(userptr + off + e->target_offset
1035 + offsetof(struct ip6t_entry_target,
1037 t->u.kernel.target->name,
1038 strlen(t->u.kernel.target->name)+1) != 0) {
1049 #ifdef CONFIG_COMPAT
1050 static void compat_standard_from_user(void *dst, const void *src)
1052 int v = *(compat_int_t *)src;
1055 v += xt_compat_calc_jump(AF_INET6, v);
1056 memcpy(dst, &v, sizeof(v));
1059 static int compat_standard_to_user(void __user *dst, const void *src)
1061 compat_int_t cv = *(int *)src;
1064 cv -= xt_compat_calc_jump(AF_INET6, cv);
1065 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1068 static int compat_calc_entry(const struct ip6t_entry *e,
1069 const struct xt_table_info *info,
1070 const void *base, struct xt_table_info *newinfo)
1072 const struct xt_entry_match *ematch;
1073 const struct ip6t_entry_target *t;
1074 unsigned int entry_offset;
1077 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1078 entry_offset = (void *)e - base;
1079 xt_ematch_foreach(ematch, e)
1080 off += xt_compat_match_offset(ematch->u.kernel.match);
1081 t = ip6t_get_target_c(e);
1082 off += xt_compat_target_offset(t->u.kernel.target);
1083 newinfo->size -= off;
1084 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1088 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1089 if (info->hook_entry[i] &&
1090 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1091 newinfo->hook_entry[i] -= off;
1092 if (info->underflow[i] &&
1093 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1094 newinfo->underflow[i] -= off;
1099 static int compat_table_info(const struct xt_table_info *info,
1100 struct xt_table_info *newinfo)
1102 struct ip6t_entry *iter;
1103 void *loc_cpu_entry;
1106 if (!newinfo || !info)
1109 /* we dont care about newinfo->entries[] */
1110 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1111 newinfo->initial_entries = 0;
1112 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1113 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1114 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1122 static int get_info(struct net *net, void __user *user,
1123 const int *len, int compat)
1125 char name[IP6T_TABLE_MAXNAMELEN];
1129 if (*len != sizeof(struct ip6t_getinfo)) {
1130 duprintf("length %u != %zu\n", *len,
1131 sizeof(struct ip6t_getinfo));
1135 if (copy_from_user(name, user, sizeof(name)) != 0)
1138 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1139 #ifdef CONFIG_COMPAT
1141 xt_compat_lock(AF_INET6);
1143 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1144 "ip6table_%s", name);
1145 if (t && !IS_ERR(t)) {
1146 struct ip6t_getinfo info;
1147 const struct xt_table_info *private = t->private;
1148 #ifdef CONFIG_COMPAT
1149 struct xt_table_info tmp;
1152 ret = compat_table_info(private, &tmp);
1153 xt_compat_flush_offsets(AF_INET6);
1157 info.valid_hooks = t->valid_hooks;
1158 memcpy(info.hook_entry, private->hook_entry,
1159 sizeof(info.hook_entry));
1160 memcpy(info.underflow, private->underflow,
1161 sizeof(info.underflow));
1162 info.num_entries = private->number;
1163 info.size = private->size;
1164 strcpy(info.name, name);
1166 if (copy_to_user(user, &info, *len) != 0)
1174 ret = t ? PTR_ERR(t) : -ENOENT;
1175 #ifdef CONFIG_COMPAT
1177 xt_compat_unlock(AF_INET6);
1183 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1187 struct ip6t_get_entries get;
1190 if (*len < sizeof(get)) {
1191 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1194 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1196 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1197 duprintf("get_entries: %u != %zu\n",
1198 *len, sizeof(get) + get.size);
1202 t = xt_find_table_lock(net, AF_INET6, get.name);
1203 if (t && !IS_ERR(t)) {
1204 struct xt_table_info *private = t->private;
1205 duprintf("t->private->number = %u\n", private->number);
1206 if (get.size == private->size)
1207 ret = copy_entries_to_user(private->size,
1208 t, uptr->entrytable);
1210 duprintf("get_entries: I've got %u not %u!\n",
1211 private->size, get.size);
1217 ret = t ? PTR_ERR(t) : -ENOENT;
1223 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1224 struct xt_table_info *newinfo, unsigned int num_counters,
1225 void __user *counters_ptr)
1229 struct xt_table_info *oldinfo;
1230 struct xt_counters *counters;
1231 const void *loc_cpu_old_entry;
1232 struct ip6t_entry *iter;
1235 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1242 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1243 "ip6table_%s", name);
1244 if (!t || IS_ERR(t)) {
1245 ret = t ? PTR_ERR(t) : -ENOENT;
1246 goto free_newinfo_counters_untrans;
1250 if (valid_hooks != t->valid_hooks) {
1251 duprintf("Valid hook crap: %08X vs %08X\n",
1252 valid_hooks, t->valid_hooks);
1257 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1261 /* Update module usage count based on number of rules */
1262 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1263 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1264 if ((oldinfo->number > oldinfo->initial_entries) ||
1265 (newinfo->number <= oldinfo->initial_entries))
1267 if ((oldinfo->number > oldinfo->initial_entries) &&
1268 (newinfo->number <= oldinfo->initial_entries))
1271 /* Get the old counters, and synchronize with replace */
1272 get_counters(oldinfo, counters);
1274 /* Decrease module usage counts and free resource */
1275 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1276 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1277 cleanup_entry(iter, net);
1279 xt_free_table_info(oldinfo);
1280 if (copy_to_user(counters_ptr, counters,
1281 sizeof(struct xt_counters) * num_counters) != 0)
1290 free_newinfo_counters_untrans:
1297 do_replace(struct net *net, const void __user *user, unsigned int len)
1300 struct ip6t_replace tmp;
1301 struct xt_table_info *newinfo;
1302 void *loc_cpu_entry;
1303 struct ip6t_entry *iter;
1305 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1308 /* overflow check */
1309 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1312 newinfo = xt_alloc_table_info(tmp.size);
1316 /* choose the copy that is on our node/cpu */
1317 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1318 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1324 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1328 duprintf("ip_tables: Translated table\n");
1330 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1331 tmp.num_counters, tmp.counters);
1333 goto free_newinfo_untrans;
1336 free_newinfo_untrans:
1337 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1338 cleanup_entry(iter, net);
1340 xt_free_table_info(newinfo);
1345 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1348 unsigned int i, curcpu;
1349 struct xt_counters_info tmp;
1350 struct xt_counters *paddc;
1351 unsigned int num_counters;
1356 const struct xt_table_info *private;
1358 const void *loc_cpu_entry;
1359 struct ip6t_entry *iter;
1360 #ifdef CONFIG_COMPAT
1361 struct compat_xt_counters_info compat_tmp;
1365 size = sizeof(struct compat_xt_counters_info);
1370 size = sizeof(struct xt_counters_info);
1373 if (copy_from_user(ptmp, user, size) != 0)
1376 #ifdef CONFIG_COMPAT
1378 num_counters = compat_tmp.num_counters;
1379 name = compat_tmp.name;
1383 num_counters = tmp.num_counters;
1387 if (len != size + num_counters * sizeof(struct xt_counters))
1390 paddc = vmalloc_node(len - size, numa_node_id());
1394 if (copy_from_user(paddc, user + size, len - size) != 0) {
1399 t = xt_find_table_lock(net, AF_INET6, name);
1400 if (!t || IS_ERR(t)) {
1401 ret = t ? PTR_ERR(t) : -ENOENT;
1407 private = t->private;
1408 if (private->number != num_counters) {
1410 goto unlock_up_free;
1414 /* Choose the copy that is on our node */
1415 curcpu = smp_processor_id();
1416 xt_info_wrlock(curcpu);
1417 loc_cpu_entry = private->entries[curcpu];
1418 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1419 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1422 xt_info_wrunlock(curcpu);
1434 #ifdef CONFIG_COMPAT
1435 struct compat_ip6t_replace {
1436 char name[IP6T_TABLE_MAXNAMELEN];
1440 u32 hook_entry[NF_INET_NUMHOOKS];
1441 u32 underflow[NF_INET_NUMHOOKS];
1443 compat_uptr_t counters; /* struct ip6t_counters * */
1444 struct compat_ip6t_entry entries[0];
1448 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1449 unsigned int *size, struct xt_counters *counters,
1452 struct ip6t_entry_target *t;
1453 struct compat_ip6t_entry __user *ce;
1454 u_int16_t target_offset, next_offset;
1455 compat_uint_t origsize;
1456 const struct xt_entry_match *ematch;
1460 ce = (struct compat_ip6t_entry __user *)*dstptr;
1461 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1462 copy_to_user(&ce->counters, &counters[i],
1463 sizeof(counters[i])) != 0)
1466 *dstptr += sizeof(struct compat_ip6t_entry);
1467 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1469 xt_ematch_foreach(ematch, e) {
1470 ret = xt_compat_match_to_user(ematch, dstptr, size);
1474 target_offset = e->target_offset - (origsize - *size);
1475 t = ip6t_get_target(e);
1476 ret = xt_compat_target_to_user(t, dstptr, size);
1479 next_offset = e->next_offset - (origsize - *size);
1480 if (put_user(target_offset, &ce->target_offset) != 0 ||
1481 put_user(next_offset, &ce->next_offset) != 0)
1487 compat_find_calc_match(struct ip6t_entry_match *m,
1489 const struct ip6t_ip6 *ipv6,
1490 unsigned int hookmask,
1493 struct xt_match *match;
1495 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1496 m->u.user.revision);
1497 if (IS_ERR(match)) {
1498 duprintf("compat_check_calc_match: `%s' not found\n",
1500 return PTR_ERR(match);
1502 m->u.kernel.match = match;
1503 *size += xt_compat_match_offset(match);
1507 static void compat_release_entry(struct compat_ip6t_entry *e)
1509 struct ip6t_entry_target *t;
1510 struct xt_entry_match *ematch;
1512 /* Cleanup all matches */
1513 xt_ematch_foreach(ematch, e)
1514 module_put(ematch->u.kernel.match->me);
1515 t = compat_ip6t_get_target(e);
1516 module_put(t->u.kernel.target->me);
1520 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1521 struct xt_table_info *newinfo,
1523 const unsigned char *base,
1524 const unsigned char *limit,
1525 const unsigned int *hook_entries,
1526 const unsigned int *underflows,
1529 struct xt_entry_match *ematch;
1530 struct ip6t_entry_target *t;
1531 struct xt_target *target;
1532 unsigned int entry_offset;
1536 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1537 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1538 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1539 duprintf("Bad offset %p, limit = %p\n", e, limit);
1543 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1544 sizeof(struct compat_xt_entry_target)) {
1545 duprintf("checking: element %p size %u\n",
1550 /* For purposes of check_entry casting the compat entry is fine */
1551 ret = check_entry((struct ip6t_entry *)e, name);
1555 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1556 entry_offset = (void *)e - (void *)base;
1558 xt_ematch_foreach(ematch, e) {
1559 ret = compat_find_calc_match(ematch, name,
1560 &e->ipv6, e->comefrom, &off);
1562 goto release_matches;
1566 t = compat_ip6t_get_target(e);
1567 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1568 t->u.user.revision);
1569 if (IS_ERR(target)) {
1570 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1572 ret = PTR_ERR(target);
1573 goto release_matches;
1575 t->u.kernel.target = target;
1577 off += xt_compat_target_offset(target);
1579 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1583 /* Check hooks & underflows */
1584 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1585 if ((unsigned char *)e - base == hook_entries[h])
1586 newinfo->hook_entry[h] = hook_entries[h];
1587 if ((unsigned char *)e - base == underflows[h])
1588 newinfo->underflow[h] = underflows[h];
1591 /* Clear counters and comefrom */
1592 memset(&e->counters, 0, sizeof(e->counters));
1597 module_put(t->u.kernel.target->me);
1599 xt_ematch_foreach(ematch, e) {
1602 module_put(ematch->u.kernel.match->me);
1608 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1609 unsigned int *size, const char *name,
1610 struct xt_table_info *newinfo, unsigned char *base)
1612 struct ip6t_entry_target *t;
1613 struct xt_target *target;
1614 struct ip6t_entry *de;
1615 unsigned int origsize;
1617 struct xt_entry_match *ematch;
1621 de = (struct ip6t_entry *)*dstptr;
1622 memcpy(de, e, sizeof(struct ip6t_entry));
1623 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1625 *dstptr += sizeof(struct ip6t_entry);
1626 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1628 xt_ematch_foreach(ematch, e) {
1629 ret = xt_compat_match_from_user(ematch, dstptr, size);
1633 de->target_offset = e->target_offset - (origsize - *size);
1634 t = compat_ip6t_get_target(e);
1635 target = t->u.kernel.target;
1636 xt_compat_target_from_user(t, dstptr, size);
1638 de->next_offset = e->next_offset - (origsize - *size);
1639 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1640 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1641 newinfo->hook_entry[h] -= origsize - *size;
1642 if ((unsigned char *)de - base < newinfo->underflow[h])
1643 newinfo->underflow[h] -= origsize - *size;
1648 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1653 struct xt_mtchk_param mtpar;
1654 struct xt_entry_match *ematch;
1659 mtpar.entryinfo = &e->ipv6;
1660 mtpar.hook_mask = e->comefrom;
1661 mtpar.family = NFPROTO_IPV6;
1662 xt_ematch_foreach(ematch, e) {
1663 ret = check_match(ematch, &mtpar);
1665 goto cleanup_matches;
1669 ret = check_target(e, net, name);
1671 goto cleanup_matches;
1675 xt_ematch_foreach(ematch, e) {
1678 cleanup_match(ematch, net);
1684 translate_compat_table(struct net *net,
1686 unsigned int valid_hooks,
1687 struct xt_table_info **pinfo,
1689 unsigned int total_size,
1690 unsigned int number,
1691 unsigned int *hook_entries,
1692 unsigned int *underflows)
1695 struct xt_table_info *newinfo, *info;
1696 void *pos, *entry0, *entry1;
1697 struct compat_ip6t_entry *iter0;
1698 struct ip6t_entry *iter1;
1705 info->number = number;
1707 /* Init all hooks to impossible value. */
1708 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1709 info->hook_entry[i] = 0xFFFFFFFF;
1710 info->underflow[i] = 0xFFFFFFFF;
1713 duprintf("translate_compat_table: size %u\n", info->size);
1715 xt_compat_lock(AF_INET6);
1716 /* Walk through entries, checking offsets. */
1717 xt_entry_foreach(iter0, entry0, total_size) {
1718 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1720 entry0 + total_size,
1731 duprintf("translate_compat_table: %u not %u entries\n",
1736 /* Check hooks all assigned */
1737 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1738 /* Only hooks which are valid */
1739 if (!(valid_hooks & (1 << i)))
1741 if (info->hook_entry[i] == 0xFFFFFFFF) {
1742 duprintf("Invalid hook entry %u %u\n",
1743 i, hook_entries[i]);
1746 if (info->underflow[i] == 0xFFFFFFFF) {
1747 duprintf("Invalid underflow %u %u\n",
1754 newinfo = xt_alloc_table_info(size);
1758 newinfo->number = number;
1759 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1760 newinfo->hook_entry[i] = info->hook_entry[i];
1761 newinfo->underflow[i] = info->underflow[i];
1763 entry1 = newinfo->entries[raw_smp_processor_id()];
1766 xt_entry_foreach(iter0, entry0, total_size) {
1767 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1768 name, newinfo, entry1);
1772 xt_compat_flush_offsets(AF_INET6);
1773 xt_compat_unlock(AF_INET6);
1778 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1782 xt_entry_foreach(iter1, entry1, newinfo->size) {
1783 ret = compat_check_entry(iter1, net, name);
1790 * The first i matches need cleanup_entry (calls ->destroy)
1791 * because they had called ->check already. The other j-i
1792 * entries need only release.
1796 xt_entry_foreach(iter0, entry0, newinfo->size) {
1801 compat_release_entry(iter0);
1803 xt_entry_foreach(iter1, entry1, newinfo->size) {
1806 cleanup_entry(iter1, net);
1808 xt_free_table_info(newinfo);
1812 /* And one copy for every other CPU */
1813 for_each_possible_cpu(i)
1814 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1815 memcpy(newinfo->entries[i], entry1, newinfo->size);
1819 xt_free_table_info(info);
1823 xt_free_table_info(newinfo);
1825 xt_entry_foreach(iter0, entry0, total_size) {
1828 compat_release_entry(iter0);
1832 xt_compat_flush_offsets(AF_INET6);
1833 xt_compat_unlock(AF_INET6);
1838 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1841 struct compat_ip6t_replace tmp;
1842 struct xt_table_info *newinfo;
1843 void *loc_cpu_entry;
1844 struct ip6t_entry *iter;
1846 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1849 /* overflow check */
1850 if (tmp.size >= INT_MAX / num_possible_cpus())
1852 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1855 newinfo = xt_alloc_table_info(tmp.size);
1859 /* choose the copy that is on our node/cpu */
1860 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1861 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1867 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1868 &newinfo, &loc_cpu_entry, tmp.size,
1869 tmp.num_entries, tmp.hook_entry,
1874 duprintf("compat_do_replace: Translated table\n");
1876 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1877 tmp.num_counters, compat_ptr(tmp.counters));
1879 goto free_newinfo_untrans;
1882 free_newinfo_untrans:
1883 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1884 cleanup_entry(iter, net);
1886 xt_free_table_info(newinfo);
1891 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1896 if (!capable(CAP_NET_ADMIN))
1900 case IP6T_SO_SET_REPLACE:
1901 ret = compat_do_replace(sock_net(sk), user, len);
1904 case IP6T_SO_SET_ADD_COUNTERS:
1905 ret = do_add_counters(sock_net(sk), user, len, 1);
1909 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1916 struct compat_ip6t_get_entries {
1917 char name[IP6T_TABLE_MAXNAMELEN];
1919 struct compat_ip6t_entry entrytable[0];
1923 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1924 void __user *userptr)
1926 struct xt_counters *counters;
1927 const struct xt_table_info *private = table->private;
1931 const void *loc_cpu_entry;
1933 struct ip6t_entry *iter;
1935 counters = alloc_counters(table);
1936 if (IS_ERR(counters))
1937 return PTR_ERR(counters);
1939 /* choose the copy that is on our node/cpu, ...
1940 * This choice is lazy (because current thread is
1941 * allowed to migrate to another cpu)
1943 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1946 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1947 ret = compat_copy_entry_to_user(iter, &pos,
1948 &size, counters, i++);
1958 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1962 struct compat_ip6t_get_entries get;
1965 if (*len < sizeof(get)) {
1966 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1970 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1973 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1974 duprintf("compat_get_entries: %u != %zu\n",
1975 *len, sizeof(get) + get.size);
1979 xt_compat_lock(AF_INET6);
1980 t = xt_find_table_lock(net, AF_INET6, get.name);
1981 if (t && !IS_ERR(t)) {
1982 const struct xt_table_info *private = t->private;
1983 struct xt_table_info info;
1984 duprintf("t->private->number = %u\n", private->number);
1985 ret = compat_table_info(private, &info);
1986 if (!ret && get.size == info.size) {
1987 ret = compat_copy_entries_to_user(private->size,
1988 t, uptr->entrytable);
1990 duprintf("compat_get_entries: I've got %u not %u!\n",
1991 private->size, get.size);
1994 xt_compat_flush_offsets(AF_INET6);
1998 ret = t ? PTR_ERR(t) : -ENOENT;
2000 xt_compat_unlock(AF_INET6);
2004 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
2007 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2011 if (!capable(CAP_NET_ADMIN))
2015 case IP6T_SO_GET_INFO:
2016 ret = get_info(sock_net(sk), user, len, 1);
2018 case IP6T_SO_GET_ENTRIES:
2019 ret = compat_get_entries(sock_net(sk), user, len);
2022 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2029 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2033 if (!capable(CAP_NET_ADMIN))
2037 case IP6T_SO_SET_REPLACE:
2038 ret = do_replace(sock_net(sk), user, len);
2041 case IP6T_SO_SET_ADD_COUNTERS:
2042 ret = do_add_counters(sock_net(sk), user, len, 0);
2046 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2054 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2058 if (!capable(CAP_NET_ADMIN))
2062 case IP6T_SO_GET_INFO:
2063 ret = get_info(sock_net(sk), user, len, 0);
2066 case IP6T_SO_GET_ENTRIES:
2067 ret = get_entries(sock_net(sk), user, len);
2070 case IP6T_SO_GET_REVISION_MATCH:
2071 case IP6T_SO_GET_REVISION_TARGET: {
2072 struct ip6t_get_revision rev;
2075 if (*len != sizeof(rev)) {
2079 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2084 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2089 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2092 "ip6t_%s", rev.name);
2097 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2104 struct xt_table *ip6t_register_table(struct net *net,
2105 const struct xt_table *table,
2106 const struct ip6t_replace *repl)
2109 struct xt_table_info *newinfo;
2110 struct xt_table_info bootstrap = {0};
2111 void *loc_cpu_entry;
2112 struct xt_table *new_table;
2114 newinfo = xt_alloc_table_info(repl->size);
2120 /* choose the copy on our node/cpu, but dont care about preemption */
2121 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2122 memcpy(loc_cpu_entry, repl->entries, repl->size);
2124 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2128 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2129 if (IS_ERR(new_table)) {
2130 ret = PTR_ERR(new_table);
2136 xt_free_table_info(newinfo);
2138 return ERR_PTR(ret);
2141 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2143 struct xt_table_info *private;
2144 void *loc_cpu_entry;
2145 struct module *table_owner = table->me;
2146 struct ip6t_entry *iter;
2148 private = xt_unregister_table(table);
2150 /* Decrease module usage counts and free resources */
2151 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2152 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2153 cleanup_entry(iter, net);
2154 if (private->number > private->initial_entries)
2155 module_put(table_owner);
2156 xt_free_table_info(private);
2159 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2161 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2162 u_int8_t type, u_int8_t code,
2165 return (type == test_type && code >= min_code && code <= max_code)
2170 icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
2172 const struct icmp6hdr *ic;
2173 struct icmp6hdr _icmph;
2174 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2176 /* Must not be a fragment. */
2177 if (par->fragoff != 0)
2180 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2182 /* We've been asked to examine this packet, and we
2183 * can't. Hence, no choice but to drop.
2185 duprintf("Dropping evil ICMP tinygram.\n");
2186 *par->hotdrop = true;
2190 return icmp6_type_code_match(icmpinfo->type,
2193 ic->icmp6_type, ic->icmp6_code,
2194 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2197 /* Called when user tries to insert an entry of this type. */
2198 static int icmp6_checkentry(const struct xt_mtchk_param *par)
2200 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2202 /* Must specify no unknown invflags */
2203 return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
2206 /* The built-in targets: standard (NULL) and error. */
2207 static struct xt_target ip6t_standard_target __read_mostly = {
2208 .name = IP6T_STANDARD_TARGET,
2209 .targetsize = sizeof(int),
2210 .family = NFPROTO_IPV6,
2211 #ifdef CONFIG_COMPAT
2212 .compatsize = sizeof(compat_int_t),
2213 .compat_from_user = compat_standard_from_user,
2214 .compat_to_user = compat_standard_to_user,
2218 static struct xt_target ip6t_error_target __read_mostly = {
2219 .name = IP6T_ERROR_TARGET,
2220 .target = ip6t_error,
2221 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2222 .family = NFPROTO_IPV6,
2225 static struct nf_sockopt_ops ip6t_sockopts = {
2227 .set_optmin = IP6T_BASE_CTL,
2228 .set_optmax = IP6T_SO_SET_MAX+1,
2229 .set = do_ip6t_set_ctl,
2230 #ifdef CONFIG_COMPAT
2231 .compat_set = compat_do_ip6t_set_ctl,
2233 .get_optmin = IP6T_BASE_CTL,
2234 .get_optmax = IP6T_SO_GET_MAX+1,
2235 .get = do_ip6t_get_ctl,
2236 #ifdef CONFIG_COMPAT
2237 .compat_get = compat_do_ip6t_get_ctl,
2239 .owner = THIS_MODULE,
2242 static struct xt_match icmp6_matchstruct __read_mostly = {
2244 .match = icmp6_match,
2245 .matchsize = sizeof(struct ip6t_icmp),
2246 .checkentry = icmp6_checkentry,
2247 .proto = IPPROTO_ICMPV6,
2248 .family = NFPROTO_IPV6,
2251 static int __net_init ip6_tables_net_init(struct net *net)
2253 return xt_proto_init(net, NFPROTO_IPV6);
2256 static void __net_exit ip6_tables_net_exit(struct net *net)
2258 xt_proto_fini(net, NFPROTO_IPV6);
2261 static struct pernet_operations ip6_tables_net_ops = {
2262 .init = ip6_tables_net_init,
2263 .exit = ip6_tables_net_exit,
2266 static int __init ip6_tables_init(void)
2270 ret = register_pernet_subsys(&ip6_tables_net_ops);
2274 /* Noone else will be downing sem now, so we won't sleep */
2275 ret = xt_register_target(&ip6t_standard_target);
2278 ret = xt_register_target(&ip6t_error_target);
2281 ret = xt_register_match(&icmp6_matchstruct);
2285 /* Register setsockopt */
2286 ret = nf_register_sockopt(&ip6t_sockopts);
2290 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2294 xt_unregister_match(&icmp6_matchstruct);
2296 xt_unregister_target(&ip6t_error_target);
2298 xt_unregister_target(&ip6t_standard_target);
2300 unregister_pernet_subsys(&ip6_tables_net_ops);
2305 static void __exit ip6_tables_fini(void)
2307 nf_unregister_sockopt(&ip6t_sockopts);
2309 xt_unregister_match(&icmp6_matchstruct);
2310 xt_unregister_target(&ip6t_error_target);
2311 xt_unregister_target(&ip6t_standard_target);
2313 unregister_pernet_subsys(&ip6_tables_net_ops);
2317 * find the offset to specified header or the protocol number of last header
2318 * if target < 0. "last header" is transport protocol header, ESP, or
2321 * If target header is found, its offset is set in *offset and return protocol
2322 * number. Otherwise, return -1.
2324 * If the first fragment doesn't contain the final protocol header or
2325 * NEXTHDR_NONE it is considered invalid.
2327 * Note that non-1st fragment is special case that "the protocol number
2328 * of last header" is "next header" field in Fragment header. In this case,
2329 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2333 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2334 int target, unsigned short *fragoff)
2336 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2337 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2338 unsigned int len = skb->len - start;
2343 while (nexthdr != target) {
2344 struct ipv6_opt_hdr _hdr, *hp;
2345 unsigned int hdrlen;
2347 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2353 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2356 if (nexthdr == NEXTHDR_FRAGMENT) {
2357 unsigned short _frag_off;
2359 fp = skb_header_pointer(skb,
2360 start+offsetof(struct frag_hdr,
2367 _frag_off = ntohs(*fp) & ~0x7;
2370 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2371 hp->nexthdr == NEXTHDR_NONE)) {
2373 *fragoff = _frag_off;
2379 } else if (nexthdr == NEXTHDR_AUTH)
2380 hdrlen = (hp->hdrlen + 2) << 2;
2382 hdrlen = ipv6_optlen(hp);
2384 nexthdr = hp->nexthdr;
2393 EXPORT_SYMBOL(ip6t_register_table);
2394 EXPORT_SYMBOL(ip6t_unregister_table);
2395 EXPORT_SYMBOL(ip6t_do_table);
2396 EXPORT_SYMBOL(ip6t_ext_hdr);
2397 EXPORT_SYMBOL(ipv6_find_hdr);
2399 module_init(ip6_tables_init);
2400 module_exit(ip6_tables_fini);