2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
32 #include "../../netfilter/xt_repldata.h"
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36 MODULE_DESCRIPTION("IPv6 packet filter");
38 /*#define DEBUG_IP_FIREWALL*/
39 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
40 /*#define DEBUG_IP_FIREWALL_USER*/
42 #ifdef DEBUG_IP_FIREWALL
43 #define dprintf(format, args...) pr_info(format , ## args)
45 #define dprintf(format, args...)
48 #ifdef DEBUG_IP_FIREWALL_USER
49 #define duprintf(format, args...) pr_info(format , ## args)
51 #define duprintf(format, args...)
54 #ifdef CONFIG_NETFILTER_DEBUG
55 #define IP_NF_ASSERT(x) \
58 printk("IP_NF_ASSERT: %s:%s:%u\n", \
59 __func__, __FILE__, __LINE__); \
62 #define IP_NF_ASSERT(x)
66 /* All the better to debug you with... */
71 void *ip6t_alloc_initial_table(const struct xt_table *info)
73 return xt_alloc_initial_table(ip6t, IP6T);
75 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
78 We keep a set of rules for each CPU, so we can avoid write-locking
79 them in the softirq when updating the counters and therefore
80 only need to read-lock in the softirq; doing a write_lock_bh() in user
81 context stops packets coming through and allows user context to read
82 the counters or update the rules.
84 Hence the start of any table is given by get_table() below. */
86 /* Check for an extension */
88 ip6t_ext_hdr(u8 nexthdr)
90 return ( (nexthdr == IPPROTO_HOPOPTS) ||
91 (nexthdr == IPPROTO_ROUTING) ||
92 (nexthdr == IPPROTO_FRAGMENT) ||
93 (nexthdr == IPPROTO_ESP) ||
94 (nexthdr == IPPROTO_AH) ||
95 (nexthdr == IPPROTO_NONE) ||
96 (nexthdr == IPPROTO_DSTOPTS) );
99 /* Returns whether matches rule or not. */
100 /* Performance critical - called for every packet */
102 ip6_packet_match(const struct sk_buff *skb,
105 const struct ip6t_ip6 *ip6info,
106 unsigned int *protoff,
107 int *fragoff, bool *hotdrop)
110 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
112 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
114 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
115 &ip6info->src), IP6T_INV_SRCIP) ||
116 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
117 &ip6info->dst), IP6T_INV_DSTIP)) {
118 dprintf("Source or dest mismatch.\n");
120 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
121 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
122 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
123 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
124 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
125 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
129 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
131 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
132 dprintf("VIA in mismatch (%s vs %s).%s\n",
133 indev, ip6info->iniface,
134 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
138 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
140 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
141 dprintf("VIA out mismatch (%s vs %s).%s\n",
142 outdev, ip6info->outiface,
143 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
147 /* ... might want to do something with class and flowlabel here ... */
149 /* look for the desired protocol header */
150 if((ip6info->flags & IP6T_F_PROTO)) {
152 unsigned short _frag_off;
154 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
160 *fragoff = _frag_off;
162 dprintf("Packet protocol %hi ?= %s%hi.\n",
164 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
167 if (ip6info->proto == protohdr) {
168 if(ip6info->invflags & IP6T_INV_PROTO) {
174 /* We need match for the '-p all', too! */
175 if ((ip6info->proto != 0) &&
176 !(ip6info->invflags & IP6T_INV_PROTO))
182 /* should be ip6 safe */
184 ip6_checkentry(const struct ip6t_ip6 *ipv6)
186 if (ipv6->flags & ~IP6T_F_MASK) {
187 duprintf("Unknown flag bits set: %08X\n",
188 ipv6->flags & ~IP6T_F_MASK);
191 if (ipv6->invflags & ~IP6T_INV_MASK) {
192 duprintf("Unknown invflag bits set: %08X\n",
193 ipv6->invflags & ~IP6T_INV_MASK);
200 ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
203 pr_info("error: `%s'\n", (const char *)par->targinfo);
208 /* Performance critical - called for every packet */
210 do_match(const struct ip6t_entry_match *m, const struct sk_buff *skb,
211 struct xt_match_param *par)
213 par->match = m->u.kernel.match;
214 par->matchinfo = m->data;
216 /* Stop iteration if it doesn't match */
217 if (!m->u.kernel.match->match(skb, par))
223 static inline struct ip6t_entry *
224 get_entry(const void *base, unsigned int offset)
226 return (struct ip6t_entry *)(base + offset);
229 /* All zeroes == unconditional rule. */
230 /* Mildly perf critical (only if packet tracing is on) */
231 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
233 static const struct ip6t_ip6 uncond;
235 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
238 static inline const struct ip6t_entry_target *
239 ip6t_get_target_c(const struct ip6t_entry *e)
241 return ip6t_get_target((struct ip6t_entry *)e);
244 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
245 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
246 /* This cries for unification! */
247 static const char *const hooknames[] = {
248 [NF_INET_PRE_ROUTING] = "PREROUTING",
249 [NF_INET_LOCAL_IN] = "INPUT",
250 [NF_INET_FORWARD] = "FORWARD",
251 [NF_INET_LOCAL_OUT] = "OUTPUT",
252 [NF_INET_POST_ROUTING] = "POSTROUTING",
255 enum nf_ip_trace_comments {
256 NF_IP6_TRACE_COMMENT_RULE,
257 NF_IP6_TRACE_COMMENT_RETURN,
258 NF_IP6_TRACE_COMMENT_POLICY,
261 static const char *const comments[] = {
262 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
263 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
264 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
267 static struct nf_loginfo trace_loginfo = {
268 .type = NF_LOG_TYPE_LOG,
272 .logflags = NF_LOG_MASK,
277 /* Mildly perf critical (only if packet tracing is on) */
279 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
280 const char *hookname, const char **chainname,
281 const char **comment, unsigned int *rulenum)
283 const struct ip6t_standard_target *t = (void *)ip6t_get_target_c(s);
285 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
286 /* Head of user chain: ERROR target with chainname */
287 *chainname = t->target.data;
292 if (s->target_offset == sizeof(struct ip6t_entry) &&
293 strcmp(t->target.u.kernel.target->name,
294 IP6T_STANDARD_TARGET) == 0 &&
296 unconditional(&s->ipv6)) {
297 /* Tail of chains: STANDARD target (return/policy) */
298 *comment = *chainname == hookname
299 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
300 : comments[NF_IP6_TRACE_COMMENT_RETURN];
309 static void trace_packet(const struct sk_buff *skb,
311 const struct net_device *in,
312 const struct net_device *out,
313 const char *tablename,
314 const struct xt_table_info *private,
315 const struct ip6t_entry *e)
317 const void *table_base;
318 const struct ip6t_entry *root;
319 const char *hookname, *chainname, *comment;
320 const struct ip6t_entry *iter;
321 unsigned int rulenum = 0;
323 table_base = private->entries[smp_processor_id()];
324 root = get_entry(table_base, private->hook_entry[hook]);
326 hookname = chainname = hooknames[hook];
327 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
329 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
330 if (get_chainname_rulenum(iter, e, hookname,
331 &chainname, &comment, &rulenum) != 0)
334 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
335 "TRACE: %s:%s:%s:%u ",
336 tablename, chainname, comment, rulenum);
340 static inline __pure struct ip6t_entry *
341 ip6t_next_entry(const struct ip6t_entry *entry)
343 return (void *)entry + entry->next_offset;
346 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
348 ip6t_do_table(struct sk_buff *skb,
350 const struct net_device *in,
351 const struct net_device *out,
352 struct xt_table *table)
354 #define tb_comefrom ((struct ip6t_entry *)table_base)->comefrom
356 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
357 bool hotdrop = false;
358 /* Initializing verdict to NF_DROP keeps gcc happy. */
359 unsigned int verdict = NF_DROP;
360 const char *indev, *outdev;
361 const void *table_base;
362 struct ip6t_entry *e, *back;
363 const struct xt_table_info *private;
364 struct xt_match_param mtpar;
365 struct xt_target_param tgpar;
368 indev = in ? in->name : nulldevname;
369 outdev = out ? out->name : nulldevname;
370 /* We handle fragments by dealing with the first fragment as
371 * if it was a normal packet. All other fragments are treated
372 * normally, except that they will NEVER match rules that ask
373 * things we don't know, ie. tcp syn flag or ports). If the
374 * rule is also a fragment-specific rule, non-fragments won't
376 mtpar.hotdrop = &hotdrop;
377 mtpar.in = tgpar.in = in;
378 mtpar.out = tgpar.out = out;
379 mtpar.family = tgpar.family = NFPROTO_IPV6;
380 mtpar.hooknum = tgpar.hooknum = hook;
382 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
385 private = table->private;
386 table_base = private->entries[smp_processor_id()];
388 e = get_entry(table_base, private->hook_entry[hook]);
390 /* For return from builtin chain */
391 back = get_entry(table_base, private->underflow[hook]);
394 const struct ip6t_entry_target *t;
395 const struct xt_entry_match *ematch;
399 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
400 &mtpar.thoff, &mtpar.fragoff, &hotdrop)) {
402 e = ip6t_next_entry(e);
406 xt_ematch_foreach(ematch, e)
407 if (do_match(ematch, skb, &mtpar) != 0)
410 ADD_COUNTER(e->counters,
411 ntohs(ipv6_hdr(skb)->payload_len) +
412 sizeof(struct ipv6hdr), 1);
414 t = ip6t_get_target_c(e);
415 IP_NF_ASSERT(t->u.kernel.target);
417 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
418 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
419 /* The packet is traced: log it */
420 if (unlikely(skb->nf_trace))
421 trace_packet(skb, hook, in, out,
422 table->name, private, e);
424 /* Standard target? */
425 if (!t->u.kernel.target->target) {
428 v = ((struct ip6t_standard_target *)t)->verdict;
430 /* Pop from stack? */
431 if (v != IP6T_RETURN) {
432 verdict = (unsigned)(-v) - 1;
436 back = get_entry(table_base, back->comefrom);
439 if (table_base + v != ip6t_next_entry(e) &&
440 !(e->ipv6.flags & IP6T_F_GOTO)) {
441 /* Save old back ptr in next entry */
442 struct ip6t_entry *next = ip6t_next_entry(e);
443 next->comefrom = (void *)back - table_base;
444 /* set back pointer to next entry */
448 e = get_entry(table_base, v);
452 /* Targets which reenter must return
454 tgpar.target = t->u.kernel.target;
455 tgpar.targinfo = t->data;
457 #ifdef CONFIG_NETFILTER_DEBUG
458 tb_comefrom = 0xeeeeeeec;
460 verdict = t->u.kernel.target->target(skb, &tgpar);
462 #ifdef CONFIG_NETFILTER_DEBUG
463 if (tb_comefrom != 0xeeeeeeec && verdict == IP6T_CONTINUE) {
464 printk("Target %s reentered!\n",
465 t->u.kernel.target->name);
468 tb_comefrom = 0x57acc001;
470 if (verdict == IP6T_CONTINUE)
471 e = ip6t_next_entry(e);
477 #ifdef CONFIG_NETFILTER_DEBUG
478 tb_comefrom = NETFILTER_LINK_POISON;
480 xt_info_rdunlock_bh();
482 #ifdef DEBUG_ALLOW_ALL
493 /* Figures out from what hook each rule can be called: returns 0 if
494 there are loops. Puts hook bitmask in comefrom. */
496 mark_source_chains(const struct xt_table_info *newinfo,
497 unsigned int valid_hooks, void *entry0)
501 /* No recursion; use packet counter to save back ptrs (reset
502 to 0 as we leave), and comefrom to save source hook bitmask */
503 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
504 unsigned int pos = newinfo->hook_entry[hook];
505 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
507 if (!(valid_hooks & (1 << hook)))
510 /* Set initial back pointer. */
511 e->counters.pcnt = pos;
514 const struct ip6t_standard_target *t
515 = (void *)ip6t_get_target_c(e);
516 int visited = e->comefrom & (1 << hook);
518 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
519 printk("iptables: loop hook %u pos %u %08X.\n",
520 hook, pos, e->comefrom);
523 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
525 /* Unconditional return/END. */
526 if ((e->target_offset == sizeof(struct ip6t_entry) &&
527 (strcmp(t->target.u.user.name,
528 IP6T_STANDARD_TARGET) == 0) &&
530 unconditional(&e->ipv6)) || visited) {
531 unsigned int oldpos, size;
533 if ((strcmp(t->target.u.user.name,
534 IP6T_STANDARD_TARGET) == 0) &&
535 t->verdict < -NF_MAX_VERDICT - 1) {
536 duprintf("mark_source_chains: bad "
537 "negative verdict (%i)\n",
542 /* Return: backtrack through the last
545 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
546 #ifdef DEBUG_IP_FIREWALL_USER
548 & (1 << NF_INET_NUMHOOKS)) {
549 duprintf("Back unset "
556 pos = e->counters.pcnt;
557 e->counters.pcnt = 0;
559 /* We're at the start. */
563 e = (struct ip6t_entry *)
565 } while (oldpos == pos + e->next_offset);
568 size = e->next_offset;
569 e = (struct ip6t_entry *)
570 (entry0 + pos + size);
571 e->counters.pcnt = pos;
574 int newpos = t->verdict;
576 if (strcmp(t->target.u.user.name,
577 IP6T_STANDARD_TARGET) == 0 &&
579 if (newpos > newinfo->size -
580 sizeof(struct ip6t_entry)) {
581 duprintf("mark_source_chains: "
582 "bad verdict (%i)\n",
586 /* This a jump; chase it. */
587 duprintf("Jump rule %u -> %u\n",
590 /* ... this is a fallthru */
591 newpos = pos + e->next_offset;
593 e = (struct ip6t_entry *)
595 e->counters.pcnt = pos;
600 duprintf("Finished chain %u\n", hook);
605 static void cleanup_match(struct ip6t_entry_match *m, struct net *net)
607 struct xt_mtdtor_param par;
610 par.match = m->u.kernel.match;
611 par.matchinfo = m->data;
612 par.family = NFPROTO_IPV6;
613 if (par.match->destroy != NULL)
614 par.match->destroy(&par);
615 module_put(par.match->me);
619 check_entry(const struct ip6t_entry *e, const char *name)
621 const struct ip6t_entry_target *t;
623 if (!ip6_checkentry(&e->ipv6)) {
624 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
628 if (e->target_offset + sizeof(struct ip6t_entry_target) >
632 t = ip6t_get_target_c(e);
633 if (e->target_offset + t->u.target_size > e->next_offset)
639 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par)
641 const struct ip6t_ip6 *ipv6 = par->entryinfo;
644 par->match = m->u.kernel.match;
645 par->matchinfo = m->data;
647 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
648 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
650 duprintf("ip_tables: check failed for `%s'.\n",
658 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par)
660 struct xt_match *match;
663 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
666 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
667 return PTR_ERR(match);
669 m->u.kernel.match = match;
671 ret = check_match(m, par);
677 module_put(m->u.kernel.match->me);
681 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
683 struct ip6t_entry_target *t = ip6t_get_target(e);
684 struct xt_tgchk_param par = {
688 .target = t->u.kernel.target,
690 .hook_mask = e->comefrom,
691 .family = NFPROTO_IPV6,
695 t = ip6t_get_target(e);
696 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
697 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
699 duprintf("ip_tables: check failed for `%s'.\n",
700 t->u.kernel.target->name);
707 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
710 struct ip6t_entry_target *t;
711 struct xt_target *target;
714 struct xt_mtchk_param mtpar;
715 struct xt_entry_match *ematch;
717 ret = check_entry(e, name);
724 mtpar.entryinfo = &e->ipv6;
725 mtpar.hook_mask = e->comefrom;
726 mtpar.family = NFPROTO_IPV6;
727 xt_ematch_foreach(ematch, e) {
728 ret = find_check_match(ematch, &mtpar);
730 goto cleanup_matches;
734 t = ip6t_get_target(e);
735 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
737 if (IS_ERR(target)) {
738 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
739 ret = PTR_ERR(target);
740 goto cleanup_matches;
742 t->u.kernel.target = target;
744 ret = check_target(e, net, name);
749 module_put(t->u.kernel.target->me);
751 xt_ematch_foreach(ematch, e) {
754 cleanup_match(ematch, net);
759 static bool check_underflow(const struct ip6t_entry *e)
761 const struct ip6t_entry_target *t;
762 unsigned int verdict;
764 if (!unconditional(&e->ipv6))
766 t = ip6t_get_target_c(e);
767 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
769 verdict = ((struct ip6t_standard_target *)t)->verdict;
770 verdict = -verdict - 1;
771 return verdict == NF_DROP || verdict == NF_ACCEPT;
775 check_entry_size_and_hooks(struct ip6t_entry *e,
776 struct xt_table_info *newinfo,
777 const unsigned char *base,
778 const unsigned char *limit,
779 const unsigned int *hook_entries,
780 const unsigned int *underflows,
781 unsigned int valid_hooks)
785 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
786 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
787 duprintf("Bad offset %p\n", e);
792 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
793 duprintf("checking: element %p size %u\n",
798 /* Check hooks & underflows */
799 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
800 if (!(valid_hooks & (1 << h)))
802 if ((unsigned char *)e - base == hook_entries[h])
803 newinfo->hook_entry[h] = hook_entries[h];
804 if ((unsigned char *)e - base == underflows[h]) {
805 if (!check_underflow(e)) {
806 pr_err("Underflows must be unconditional and "
807 "use the STANDARD target with "
811 newinfo->underflow[h] = underflows[h];
815 /* Clear counters and comefrom */
816 e->counters = ((struct xt_counters) { 0, 0 });
821 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
823 struct xt_tgdtor_param par;
824 struct ip6t_entry_target *t;
825 struct xt_entry_match *ematch;
827 /* Cleanup all matches */
828 xt_ematch_foreach(ematch, e)
829 cleanup_match(ematch, net);
830 t = ip6t_get_target(e);
833 par.target = t->u.kernel.target;
834 par.targinfo = t->data;
835 par.family = NFPROTO_IPV6;
836 if (par.target->destroy != NULL)
837 par.target->destroy(&par);
838 module_put(par.target->me);
841 /* Checks and translates the user-supplied table segment (held in
844 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
845 const struct ip6t_replace *repl)
847 struct ip6t_entry *iter;
851 newinfo->size = repl->size;
852 newinfo->number = repl->num_entries;
854 /* Init all hooks to impossible value. */
855 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
856 newinfo->hook_entry[i] = 0xFFFFFFFF;
857 newinfo->underflow[i] = 0xFFFFFFFF;
860 duprintf("translate_table: size %u\n", newinfo->size);
862 /* Walk through entries, checking offsets. */
863 xt_entry_foreach(iter, entry0, newinfo->size) {
864 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
874 if (i != repl->num_entries) {
875 duprintf("translate_table: %u not %u entries\n",
876 i, repl->num_entries);
880 /* Check hooks all assigned */
881 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
882 /* Only hooks which are valid */
883 if (!(repl->valid_hooks & (1 << i)))
885 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
886 duprintf("Invalid hook entry %u %u\n",
887 i, repl->hook_entry[i]);
890 if (newinfo->underflow[i] == 0xFFFFFFFF) {
891 duprintf("Invalid underflow %u %u\n",
892 i, repl->underflow[i]);
897 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
900 /* Finally, each sanity check must pass */
902 xt_entry_foreach(iter, entry0, newinfo->size) {
903 ret = find_check_entry(iter, net, repl->name, repl->size);
910 xt_entry_foreach(iter, entry0, newinfo->size) {
913 cleanup_entry(iter, net);
918 /* And one copy for every other CPU */
919 for_each_possible_cpu(i) {
920 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
921 memcpy(newinfo->entries[i], entry0, newinfo->size);
928 get_counters(const struct xt_table_info *t,
929 struct xt_counters counters[])
931 struct ip6t_entry *iter;
936 /* Instead of clearing (by a previous call to memset())
937 * the counters and using adds, we set the counters
938 * with data used by 'current' CPU
940 * Bottom half has to be disabled to prevent deadlock
941 * if new softirq were to run and call ipt_do_table
944 curcpu = smp_processor_id();
947 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
948 SET_COUNTER(counters[i], iter->counters.bcnt,
949 iter->counters.pcnt);
953 for_each_possible_cpu(cpu) {
958 xt_entry_foreach(iter, t->entries[cpu], t->size) {
959 ADD_COUNTER(counters[i], iter->counters.bcnt,
960 iter->counters.pcnt);
963 xt_info_wrunlock(cpu);
968 static struct xt_counters *alloc_counters(const struct xt_table *table)
970 unsigned int countersize;
971 struct xt_counters *counters;
972 const struct xt_table_info *private = table->private;
974 /* We need atomic snapshot of counters: rest doesn't change
975 (other than comefrom, which userspace doesn't care
977 countersize = sizeof(struct xt_counters) * private->number;
978 counters = vmalloc_node(countersize, numa_node_id());
980 if (counters == NULL)
981 return ERR_PTR(-ENOMEM);
983 get_counters(private, counters);
989 copy_entries_to_user(unsigned int total_size,
990 const struct xt_table *table,
991 void __user *userptr)
993 unsigned int off, num;
994 const struct ip6t_entry *e;
995 struct xt_counters *counters;
996 const struct xt_table_info *private = table->private;
998 const void *loc_cpu_entry;
1000 counters = alloc_counters(table);
1001 if (IS_ERR(counters))
1002 return PTR_ERR(counters);
1004 /* choose the copy that is on our node/cpu, ...
1005 * This choice is lazy (because current thread is
1006 * allowed to migrate to another cpu)
1008 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1009 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1014 /* FIXME: use iterator macros --RR */
1015 /* ... then go back and fix counters and names */
1016 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1018 const struct ip6t_entry_match *m;
1019 const struct ip6t_entry_target *t;
1021 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1022 if (copy_to_user(userptr + off
1023 + offsetof(struct ip6t_entry, counters),
1025 sizeof(counters[num])) != 0) {
1030 for (i = sizeof(struct ip6t_entry);
1031 i < e->target_offset;
1032 i += m->u.match_size) {
1035 if (copy_to_user(userptr + off + i
1036 + offsetof(struct ip6t_entry_match,
1038 m->u.kernel.match->name,
1039 strlen(m->u.kernel.match->name)+1)
1046 t = ip6t_get_target_c(e);
1047 if (copy_to_user(userptr + off + e->target_offset
1048 + offsetof(struct ip6t_entry_target,
1050 t->u.kernel.target->name,
1051 strlen(t->u.kernel.target->name)+1) != 0) {
1062 #ifdef CONFIG_COMPAT
1063 static void compat_standard_from_user(void *dst, const void *src)
1065 int v = *(compat_int_t *)src;
1068 v += xt_compat_calc_jump(AF_INET6, v);
1069 memcpy(dst, &v, sizeof(v));
1072 static int compat_standard_to_user(void __user *dst, const void *src)
1074 compat_int_t cv = *(int *)src;
1077 cv -= xt_compat_calc_jump(AF_INET6, cv);
1078 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1081 static int compat_calc_entry(const struct ip6t_entry *e,
1082 const struct xt_table_info *info,
1083 const void *base, struct xt_table_info *newinfo)
1085 const struct xt_entry_match *ematch;
1086 const struct ip6t_entry_target *t;
1087 unsigned int entry_offset;
1090 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1091 entry_offset = (void *)e - base;
1092 xt_ematch_foreach(ematch, e)
1093 off += xt_compat_match_offset(ematch->u.kernel.match);
1094 t = ip6t_get_target_c(e);
1095 off += xt_compat_target_offset(t->u.kernel.target);
1096 newinfo->size -= off;
1097 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1101 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1102 if (info->hook_entry[i] &&
1103 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1104 newinfo->hook_entry[i] -= off;
1105 if (info->underflow[i] &&
1106 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1107 newinfo->underflow[i] -= off;
1112 static int compat_table_info(const struct xt_table_info *info,
1113 struct xt_table_info *newinfo)
1115 struct ip6t_entry *iter;
1116 void *loc_cpu_entry;
1119 if (!newinfo || !info)
1122 /* we dont care about newinfo->entries[] */
1123 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1124 newinfo->initial_entries = 0;
1125 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1126 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1127 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1135 static int get_info(struct net *net, void __user *user,
1136 const int *len, int compat)
1138 char name[IP6T_TABLE_MAXNAMELEN];
1142 if (*len != sizeof(struct ip6t_getinfo)) {
1143 duprintf("length %u != %zu\n", *len,
1144 sizeof(struct ip6t_getinfo));
1148 if (copy_from_user(name, user, sizeof(name)) != 0)
1151 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1152 #ifdef CONFIG_COMPAT
1154 xt_compat_lock(AF_INET6);
1156 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1157 "ip6table_%s", name);
1158 if (t && !IS_ERR(t)) {
1159 struct ip6t_getinfo info;
1160 const struct xt_table_info *private = t->private;
1161 #ifdef CONFIG_COMPAT
1162 struct xt_table_info tmp;
1165 ret = compat_table_info(private, &tmp);
1166 xt_compat_flush_offsets(AF_INET6);
1170 info.valid_hooks = t->valid_hooks;
1171 memcpy(info.hook_entry, private->hook_entry,
1172 sizeof(info.hook_entry));
1173 memcpy(info.underflow, private->underflow,
1174 sizeof(info.underflow));
1175 info.num_entries = private->number;
1176 info.size = private->size;
1177 strcpy(info.name, name);
1179 if (copy_to_user(user, &info, *len) != 0)
1187 ret = t ? PTR_ERR(t) : -ENOENT;
1188 #ifdef CONFIG_COMPAT
1190 xt_compat_unlock(AF_INET6);
1196 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1200 struct ip6t_get_entries get;
1203 if (*len < sizeof(get)) {
1204 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1207 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1209 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1210 duprintf("get_entries: %u != %zu\n",
1211 *len, sizeof(get) + get.size);
1215 t = xt_find_table_lock(net, AF_INET6, get.name);
1216 if (t && !IS_ERR(t)) {
1217 struct xt_table_info *private = t->private;
1218 duprintf("t->private->number = %u\n", private->number);
1219 if (get.size == private->size)
1220 ret = copy_entries_to_user(private->size,
1221 t, uptr->entrytable);
1223 duprintf("get_entries: I've got %u not %u!\n",
1224 private->size, get.size);
1230 ret = t ? PTR_ERR(t) : -ENOENT;
1236 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1237 struct xt_table_info *newinfo, unsigned int num_counters,
1238 void __user *counters_ptr)
1242 struct xt_table_info *oldinfo;
1243 struct xt_counters *counters;
1244 const void *loc_cpu_old_entry;
1245 struct ip6t_entry *iter;
1248 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1255 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1256 "ip6table_%s", name);
1257 if (!t || IS_ERR(t)) {
1258 ret = t ? PTR_ERR(t) : -ENOENT;
1259 goto free_newinfo_counters_untrans;
1263 if (valid_hooks != t->valid_hooks) {
1264 duprintf("Valid hook crap: %08X vs %08X\n",
1265 valid_hooks, t->valid_hooks);
1270 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1274 /* Update module usage count based on number of rules */
1275 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1276 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1277 if ((oldinfo->number > oldinfo->initial_entries) ||
1278 (newinfo->number <= oldinfo->initial_entries))
1280 if ((oldinfo->number > oldinfo->initial_entries) &&
1281 (newinfo->number <= oldinfo->initial_entries))
1284 /* Get the old counters, and synchronize with replace */
1285 get_counters(oldinfo, counters);
1287 /* Decrease module usage counts and free resource */
1288 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1289 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1290 cleanup_entry(iter, net);
1292 xt_free_table_info(oldinfo);
1293 if (copy_to_user(counters_ptr, counters,
1294 sizeof(struct xt_counters) * num_counters) != 0)
1303 free_newinfo_counters_untrans:
1310 do_replace(struct net *net, const void __user *user, unsigned int len)
1313 struct ip6t_replace tmp;
1314 struct xt_table_info *newinfo;
1315 void *loc_cpu_entry;
1316 struct ip6t_entry *iter;
1318 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1321 /* overflow check */
1322 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1325 newinfo = xt_alloc_table_info(tmp.size);
1329 /* choose the copy that is on our node/cpu */
1330 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1331 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1337 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1341 duprintf("ip_tables: Translated table\n");
1343 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1344 tmp.num_counters, tmp.counters);
1346 goto free_newinfo_untrans;
1349 free_newinfo_untrans:
1350 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1351 cleanup_entry(iter, net);
1353 xt_free_table_info(newinfo);
1358 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1361 unsigned int i, curcpu;
1362 struct xt_counters_info tmp;
1363 struct xt_counters *paddc;
1364 unsigned int num_counters;
1369 const struct xt_table_info *private;
1371 const void *loc_cpu_entry;
1372 struct ip6t_entry *iter;
1373 #ifdef CONFIG_COMPAT
1374 struct compat_xt_counters_info compat_tmp;
1378 size = sizeof(struct compat_xt_counters_info);
1383 size = sizeof(struct xt_counters_info);
1386 if (copy_from_user(ptmp, user, size) != 0)
1389 #ifdef CONFIG_COMPAT
1391 num_counters = compat_tmp.num_counters;
1392 name = compat_tmp.name;
1396 num_counters = tmp.num_counters;
1400 if (len != size + num_counters * sizeof(struct xt_counters))
1403 paddc = vmalloc_node(len - size, numa_node_id());
1407 if (copy_from_user(paddc, user + size, len - size) != 0) {
1412 t = xt_find_table_lock(net, AF_INET6, name);
1413 if (!t || IS_ERR(t)) {
1414 ret = t ? PTR_ERR(t) : -ENOENT;
1420 private = t->private;
1421 if (private->number != num_counters) {
1423 goto unlock_up_free;
1427 /* Choose the copy that is on our node */
1428 curcpu = smp_processor_id();
1429 xt_info_wrlock(curcpu);
1430 loc_cpu_entry = private->entries[curcpu];
1431 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1432 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1435 xt_info_wrunlock(curcpu);
1447 #ifdef CONFIG_COMPAT
1448 struct compat_ip6t_replace {
1449 char name[IP6T_TABLE_MAXNAMELEN];
1453 u32 hook_entry[NF_INET_NUMHOOKS];
1454 u32 underflow[NF_INET_NUMHOOKS];
1456 compat_uptr_t counters; /* struct ip6t_counters * */
1457 struct compat_ip6t_entry entries[0];
1461 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1462 unsigned int *size, struct xt_counters *counters,
1465 struct ip6t_entry_target *t;
1466 struct compat_ip6t_entry __user *ce;
1467 u_int16_t target_offset, next_offset;
1468 compat_uint_t origsize;
1469 const struct xt_entry_match *ematch;
1473 ce = (struct compat_ip6t_entry __user *)*dstptr;
1474 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1475 copy_to_user(&ce->counters, &counters[i],
1476 sizeof(counters[i])) != 0)
1479 *dstptr += sizeof(struct compat_ip6t_entry);
1480 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1482 xt_ematch_foreach(ematch, e) {
1483 ret = xt_compat_match_to_user(ematch, dstptr, size);
1487 target_offset = e->target_offset - (origsize - *size);
1488 t = ip6t_get_target(e);
1489 ret = xt_compat_target_to_user(t, dstptr, size);
1492 next_offset = e->next_offset - (origsize - *size);
1493 if (put_user(target_offset, &ce->target_offset) != 0 ||
1494 put_user(next_offset, &ce->next_offset) != 0)
1500 compat_find_calc_match(struct ip6t_entry_match *m,
1502 const struct ip6t_ip6 *ipv6,
1503 unsigned int hookmask,
1506 struct xt_match *match;
1508 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1509 m->u.user.revision);
1510 if (IS_ERR(match)) {
1511 duprintf("compat_check_calc_match: `%s' not found\n",
1513 return PTR_ERR(match);
1515 m->u.kernel.match = match;
1516 *size += xt_compat_match_offset(match);
1520 static void compat_release_entry(struct compat_ip6t_entry *e)
1522 struct ip6t_entry_target *t;
1523 struct xt_entry_match *ematch;
1525 /* Cleanup all matches */
1526 xt_ematch_foreach(ematch, e)
1527 module_put(ematch->u.kernel.match->me);
1528 t = compat_ip6t_get_target(e);
1529 module_put(t->u.kernel.target->me);
1533 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1534 struct xt_table_info *newinfo,
1536 const unsigned char *base,
1537 const unsigned char *limit,
1538 const unsigned int *hook_entries,
1539 const unsigned int *underflows,
1542 struct xt_entry_match *ematch;
1543 struct ip6t_entry_target *t;
1544 struct xt_target *target;
1545 unsigned int entry_offset;
1549 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1550 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1551 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1552 duprintf("Bad offset %p, limit = %p\n", e, limit);
1556 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1557 sizeof(struct compat_xt_entry_target)) {
1558 duprintf("checking: element %p size %u\n",
1563 /* For purposes of check_entry casting the compat entry is fine */
1564 ret = check_entry((struct ip6t_entry *)e, name);
1568 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1569 entry_offset = (void *)e - (void *)base;
1571 xt_ematch_foreach(ematch, e) {
1572 ret = compat_find_calc_match(ematch, name,
1573 &e->ipv6, e->comefrom, &off);
1575 goto release_matches;
1579 t = compat_ip6t_get_target(e);
1580 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1581 t->u.user.revision);
1582 if (IS_ERR(target)) {
1583 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1585 ret = PTR_ERR(target);
1586 goto release_matches;
1588 t->u.kernel.target = target;
1590 off += xt_compat_target_offset(target);
1592 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1596 /* Check hooks & underflows */
1597 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1598 if ((unsigned char *)e - base == hook_entries[h])
1599 newinfo->hook_entry[h] = hook_entries[h];
1600 if ((unsigned char *)e - base == underflows[h])
1601 newinfo->underflow[h] = underflows[h];
1604 /* Clear counters and comefrom */
1605 memset(&e->counters, 0, sizeof(e->counters));
1610 module_put(t->u.kernel.target->me);
1612 xt_ematch_foreach(ematch, e) {
1615 module_put(ematch->u.kernel.match->me);
1621 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1622 unsigned int *size, const char *name,
1623 struct xt_table_info *newinfo, unsigned char *base)
1625 struct ip6t_entry_target *t;
1626 struct xt_target *target;
1627 struct ip6t_entry *de;
1628 unsigned int origsize;
1630 struct xt_entry_match *ematch;
1634 de = (struct ip6t_entry *)*dstptr;
1635 memcpy(de, e, sizeof(struct ip6t_entry));
1636 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1638 *dstptr += sizeof(struct ip6t_entry);
1639 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1641 xt_ematch_foreach(ematch, e) {
1642 ret = xt_compat_match_from_user(ematch, dstptr, size);
1646 de->target_offset = e->target_offset - (origsize - *size);
1647 t = compat_ip6t_get_target(e);
1648 target = t->u.kernel.target;
1649 xt_compat_target_from_user(t, dstptr, size);
1651 de->next_offset = e->next_offset - (origsize - *size);
1652 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1653 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1654 newinfo->hook_entry[h] -= origsize - *size;
1655 if ((unsigned char *)de - base < newinfo->underflow[h])
1656 newinfo->underflow[h] -= origsize - *size;
1661 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1666 struct xt_mtchk_param mtpar;
1667 struct xt_entry_match *ematch;
1672 mtpar.entryinfo = &e->ipv6;
1673 mtpar.hook_mask = e->comefrom;
1674 mtpar.family = NFPROTO_IPV6;
1675 xt_ematch_foreach(ematch, e) {
1676 ret = check_match(ematch, &mtpar);
1678 goto cleanup_matches;
1682 ret = check_target(e, net, name);
1684 goto cleanup_matches;
1688 xt_ematch_foreach(ematch, e) {
1691 cleanup_match(ematch, net);
1697 translate_compat_table(struct net *net,
1699 unsigned int valid_hooks,
1700 struct xt_table_info **pinfo,
1702 unsigned int total_size,
1703 unsigned int number,
1704 unsigned int *hook_entries,
1705 unsigned int *underflows)
1708 struct xt_table_info *newinfo, *info;
1709 void *pos, *entry0, *entry1;
1710 struct compat_ip6t_entry *iter0;
1711 struct ip6t_entry *iter1;
1718 info->number = number;
1720 /* Init all hooks to impossible value. */
1721 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1722 info->hook_entry[i] = 0xFFFFFFFF;
1723 info->underflow[i] = 0xFFFFFFFF;
1726 duprintf("translate_compat_table: size %u\n", info->size);
1728 xt_compat_lock(AF_INET6);
1729 /* Walk through entries, checking offsets. */
1730 xt_entry_foreach(iter0, entry0, total_size) {
1731 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1733 entry0 + total_size,
1744 duprintf("translate_compat_table: %u not %u entries\n",
1749 /* Check hooks all assigned */
1750 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1751 /* Only hooks which are valid */
1752 if (!(valid_hooks & (1 << i)))
1754 if (info->hook_entry[i] == 0xFFFFFFFF) {
1755 duprintf("Invalid hook entry %u %u\n",
1756 i, hook_entries[i]);
1759 if (info->underflow[i] == 0xFFFFFFFF) {
1760 duprintf("Invalid underflow %u %u\n",
1767 newinfo = xt_alloc_table_info(size);
1771 newinfo->number = number;
1772 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1773 newinfo->hook_entry[i] = info->hook_entry[i];
1774 newinfo->underflow[i] = info->underflow[i];
1776 entry1 = newinfo->entries[raw_smp_processor_id()];
1779 xt_entry_foreach(iter0, entry0, total_size) {
1780 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1781 name, newinfo, entry1);
1785 xt_compat_flush_offsets(AF_INET6);
1786 xt_compat_unlock(AF_INET6);
1791 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1795 xt_entry_foreach(iter1, entry1, newinfo->size) {
1796 ret = compat_check_entry(iter1, net, name);
1803 * The first i matches need cleanup_entry (calls ->destroy)
1804 * because they had called ->check already. The other j-i
1805 * entries need only release.
1809 xt_entry_foreach(iter0, entry0, newinfo->size) {
1814 compat_release_entry(iter0);
1816 xt_entry_foreach(iter1, entry1, newinfo->size) {
1819 cleanup_entry(iter1, net);
1821 xt_free_table_info(newinfo);
1825 /* And one copy for every other CPU */
1826 for_each_possible_cpu(i)
1827 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1828 memcpy(newinfo->entries[i], entry1, newinfo->size);
1832 xt_free_table_info(info);
1836 xt_free_table_info(newinfo);
1838 xt_entry_foreach(iter0, entry0, total_size) {
1841 compat_release_entry(iter0);
1845 xt_compat_flush_offsets(AF_INET6);
1846 xt_compat_unlock(AF_INET6);
1851 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1854 struct compat_ip6t_replace tmp;
1855 struct xt_table_info *newinfo;
1856 void *loc_cpu_entry;
1857 struct ip6t_entry *iter;
1859 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1862 /* overflow check */
1863 if (tmp.size >= INT_MAX / num_possible_cpus())
1865 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1868 newinfo = xt_alloc_table_info(tmp.size);
1872 /* choose the copy that is on our node/cpu */
1873 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1874 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1880 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1881 &newinfo, &loc_cpu_entry, tmp.size,
1882 tmp.num_entries, tmp.hook_entry,
1887 duprintf("compat_do_replace: Translated table\n");
1889 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1890 tmp.num_counters, compat_ptr(tmp.counters));
1892 goto free_newinfo_untrans;
1895 free_newinfo_untrans:
1896 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1897 cleanup_entry(iter, net);
1899 xt_free_table_info(newinfo);
1904 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1909 if (!capable(CAP_NET_ADMIN))
1913 case IP6T_SO_SET_REPLACE:
1914 ret = compat_do_replace(sock_net(sk), user, len);
1917 case IP6T_SO_SET_ADD_COUNTERS:
1918 ret = do_add_counters(sock_net(sk), user, len, 1);
1922 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1929 struct compat_ip6t_get_entries {
1930 char name[IP6T_TABLE_MAXNAMELEN];
1932 struct compat_ip6t_entry entrytable[0];
1936 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1937 void __user *userptr)
1939 struct xt_counters *counters;
1940 const struct xt_table_info *private = table->private;
1944 const void *loc_cpu_entry;
1946 struct ip6t_entry *iter;
1948 counters = alloc_counters(table);
1949 if (IS_ERR(counters))
1950 return PTR_ERR(counters);
1952 /* choose the copy that is on our node/cpu, ...
1953 * This choice is lazy (because current thread is
1954 * allowed to migrate to another cpu)
1956 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1959 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1960 ret = compat_copy_entry_to_user(iter, &pos,
1961 &size, counters, i++);
1971 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1975 struct compat_ip6t_get_entries get;
1978 if (*len < sizeof(get)) {
1979 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1983 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1986 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1987 duprintf("compat_get_entries: %u != %zu\n",
1988 *len, sizeof(get) + get.size);
1992 xt_compat_lock(AF_INET6);
1993 t = xt_find_table_lock(net, AF_INET6, get.name);
1994 if (t && !IS_ERR(t)) {
1995 const struct xt_table_info *private = t->private;
1996 struct xt_table_info info;
1997 duprintf("t->private->number = %u\n", private->number);
1998 ret = compat_table_info(private, &info);
1999 if (!ret && get.size == info.size) {
2000 ret = compat_copy_entries_to_user(private->size,
2001 t, uptr->entrytable);
2003 duprintf("compat_get_entries: I've got %u not %u!\n",
2004 private->size, get.size);
2007 xt_compat_flush_offsets(AF_INET6);
2011 ret = t ? PTR_ERR(t) : -ENOENT;
2013 xt_compat_unlock(AF_INET6);
2017 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
2020 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2024 if (!capable(CAP_NET_ADMIN))
2028 case IP6T_SO_GET_INFO:
2029 ret = get_info(sock_net(sk), user, len, 1);
2031 case IP6T_SO_GET_ENTRIES:
2032 ret = compat_get_entries(sock_net(sk), user, len);
2035 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2042 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2046 if (!capable(CAP_NET_ADMIN))
2050 case IP6T_SO_SET_REPLACE:
2051 ret = do_replace(sock_net(sk), user, len);
2054 case IP6T_SO_SET_ADD_COUNTERS:
2055 ret = do_add_counters(sock_net(sk), user, len, 0);
2059 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2067 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2071 if (!capable(CAP_NET_ADMIN))
2075 case IP6T_SO_GET_INFO:
2076 ret = get_info(sock_net(sk), user, len, 0);
2079 case IP6T_SO_GET_ENTRIES:
2080 ret = get_entries(sock_net(sk), user, len);
2083 case IP6T_SO_GET_REVISION_MATCH:
2084 case IP6T_SO_GET_REVISION_TARGET: {
2085 struct ip6t_get_revision rev;
2088 if (*len != sizeof(rev)) {
2092 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2097 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2102 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2105 "ip6t_%s", rev.name);
2110 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2117 struct xt_table *ip6t_register_table(struct net *net,
2118 const struct xt_table *table,
2119 const struct ip6t_replace *repl)
2122 struct xt_table_info *newinfo;
2123 struct xt_table_info bootstrap
2124 = { 0, 0, 0, { 0 }, { 0 }, { } };
2125 void *loc_cpu_entry;
2126 struct xt_table *new_table;
2128 newinfo = xt_alloc_table_info(repl->size);
2134 /* choose the copy on our node/cpu, but dont care about preemption */
2135 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2136 memcpy(loc_cpu_entry, repl->entries, repl->size);
2138 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2142 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2143 if (IS_ERR(new_table)) {
2144 ret = PTR_ERR(new_table);
2150 xt_free_table_info(newinfo);
2152 return ERR_PTR(ret);
2155 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2157 struct xt_table_info *private;
2158 void *loc_cpu_entry;
2159 struct module *table_owner = table->me;
2160 struct ip6t_entry *iter;
2162 private = xt_unregister_table(table);
2164 /* Decrease module usage counts and free resources */
2165 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2166 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2167 cleanup_entry(iter, net);
2168 if (private->number > private->initial_entries)
2169 module_put(table_owner);
2170 xt_free_table_info(private);
2173 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2175 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2176 u_int8_t type, u_int8_t code,
2179 return (type == test_type && code >= min_code && code <= max_code)
2184 icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
2186 const struct icmp6hdr *ic;
2187 struct icmp6hdr _icmph;
2188 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2190 /* Must not be a fragment. */
2191 if (par->fragoff != 0)
2194 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2196 /* We've been asked to examine this packet, and we
2197 * can't. Hence, no choice but to drop.
2199 duprintf("Dropping evil ICMP tinygram.\n");
2200 *par->hotdrop = true;
2204 return icmp6_type_code_match(icmpinfo->type,
2207 ic->icmp6_type, ic->icmp6_code,
2208 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2211 /* Called when user tries to insert an entry of this type. */
2212 static int icmp6_checkentry(const struct xt_mtchk_param *par)
2214 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2216 /* Must specify no unknown invflags */
2217 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
2220 /* The built-in targets: standard (NULL) and error. */
2221 static struct xt_target ip6t_standard_target __read_mostly = {
2222 .name = IP6T_STANDARD_TARGET,
2223 .targetsize = sizeof(int),
2224 .family = NFPROTO_IPV6,
2225 #ifdef CONFIG_COMPAT
2226 .compatsize = sizeof(compat_int_t),
2227 .compat_from_user = compat_standard_from_user,
2228 .compat_to_user = compat_standard_to_user,
2232 static struct xt_target ip6t_error_target __read_mostly = {
2233 .name = IP6T_ERROR_TARGET,
2234 .target = ip6t_error,
2235 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2236 .family = NFPROTO_IPV6,
2239 static struct nf_sockopt_ops ip6t_sockopts = {
2241 .set_optmin = IP6T_BASE_CTL,
2242 .set_optmax = IP6T_SO_SET_MAX+1,
2243 .set = do_ip6t_set_ctl,
2244 #ifdef CONFIG_COMPAT
2245 .compat_set = compat_do_ip6t_set_ctl,
2247 .get_optmin = IP6T_BASE_CTL,
2248 .get_optmax = IP6T_SO_GET_MAX+1,
2249 .get = do_ip6t_get_ctl,
2250 #ifdef CONFIG_COMPAT
2251 .compat_get = compat_do_ip6t_get_ctl,
2253 .owner = THIS_MODULE,
2256 static struct xt_match icmp6_matchstruct __read_mostly = {
2258 .match = icmp6_match,
2259 .matchsize = sizeof(struct ip6t_icmp),
2260 .checkentry = icmp6_checkentry,
2261 .proto = IPPROTO_ICMPV6,
2262 .family = NFPROTO_IPV6,
2265 static int __net_init ip6_tables_net_init(struct net *net)
2267 return xt_proto_init(net, NFPROTO_IPV6);
2270 static void __net_exit ip6_tables_net_exit(struct net *net)
2272 xt_proto_fini(net, NFPROTO_IPV6);
2275 static struct pernet_operations ip6_tables_net_ops = {
2276 .init = ip6_tables_net_init,
2277 .exit = ip6_tables_net_exit,
2280 static int __init ip6_tables_init(void)
2284 ret = register_pernet_subsys(&ip6_tables_net_ops);
2288 /* Noone else will be downing sem now, so we won't sleep */
2289 ret = xt_register_target(&ip6t_standard_target);
2292 ret = xt_register_target(&ip6t_error_target);
2295 ret = xt_register_match(&icmp6_matchstruct);
2299 /* Register setsockopt */
2300 ret = nf_register_sockopt(&ip6t_sockopts);
2304 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2308 xt_unregister_match(&icmp6_matchstruct);
2310 xt_unregister_target(&ip6t_error_target);
2312 xt_unregister_target(&ip6t_standard_target);
2314 unregister_pernet_subsys(&ip6_tables_net_ops);
2319 static void __exit ip6_tables_fini(void)
2321 nf_unregister_sockopt(&ip6t_sockopts);
2323 xt_unregister_match(&icmp6_matchstruct);
2324 xt_unregister_target(&ip6t_error_target);
2325 xt_unregister_target(&ip6t_standard_target);
2327 unregister_pernet_subsys(&ip6_tables_net_ops);
2331 * find the offset to specified header or the protocol number of last header
2332 * if target < 0. "last header" is transport protocol header, ESP, or
2335 * If target header is found, its offset is set in *offset and return protocol
2336 * number. Otherwise, return -1.
2338 * If the first fragment doesn't contain the final protocol header or
2339 * NEXTHDR_NONE it is considered invalid.
2341 * Note that non-1st fragment is special case that "the protocol number
2342 * of last header" is "next header" field in Fragment header. In this case,
2343 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2347 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2348 int target, unsigned short *fragoff)
2350 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2351 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2352 unsigned int len = skb->len - start;
2357 while (nexthdr != target) {
2358 struct ipv6_opt_hdr _hdr, *hp;
2359 unsigned int hdrlen;
2361 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2367 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2370 if (nexthdr == NEXTHDR_FRAGMENT) {
2371 unsigned short _frag_off;
2373 fp = skb_header_pointer(skb,
2374 start+offsetof(struct frag_hdr,
2381 _frag_off = ntohs(*fp) & ~0x7;
2384 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2385 hp->nexthdr == NEXTHDR_NONE)) {
2387 *fragoff = _frag_off;
2393 } else if (nexthdr == NEXTHDR_AUTH)
2394 hdrlen = (hp->hdrlen + 2) << 2;
2396 hdrlen = ipv6_optlen(hp);
2398 nexthdr = hp->nexthdr;
2407 EXPORT_SYMBOL(ip6t_register_table);
2408 EXPORT_SYMBOL(ip6t_unregister_table);
2409 EXPORT_SYMBOL(ip6t_do_table);
2410 EXPORT_SYMBOL(ip6t_ext_hdr);
2411 EXPORT_SYMBOL(ipv6_find_hdr);
2413 module_init(ip6_tables_init);
2414 module_exit(ip6_tables_fini);