2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * 19 Jan 2002 Harald Welte <laforge@gnumonks.org>
12 * - increase module usage count as soon as we have rules inside
14 * 08 Oct 2005 Harald Welte <lafore@netfilter.org>
15 * - Generalize into "x_tables" layer and "{ip,ip6,arp}_tables"
17 #include <linux/cache.h>
18 #include <linux/capability.h>
19 #include <linux/skbuff.h>
20 #include <linux/kmod.h>
21 #include <linux/vmalloc.h>
22 #include <linux/netdevice.h>
23 #include <linux/module.h>
24 #include <linux/icmp.h>
26 #include <net/compat.h>
27 #include <asm/uaccess.h>
28 #include <linux/mutex.h>
29 #include <linux/proc_fs.h>
30 #include <linux/err.h>
31 #include <linux/cpumask.h>
33 #include <linux/netfilter/x_tables.h>
34 #include <linux/netfilter_ipv4/ip_tables.h>
36 MODULE_LICENSE("GPL");
37 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
38 MODULE_DESCRIPTION("IPv4 packet filter");
40 /*#define DEBUG_IP_FIREWALL*/
41 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
42 /*#define DEBUG_IP_FIREWALL_USER*/
44 #ifdef DEBUG_IP_FIREWALL
45 #define dprintf(format, args...) printk(format , ## args)
47 #define dprintf(format, args...)
50 #ifdef DEBUG_IP_FIREWALL_USER
51 #define duprintf(format, args...) printk(format , ## args)
53 #define duprintf(format, args...)
56 #ifdef CONFIG_NETFILTER_DEBUG
57 #define IP_NF_ASSERT(x) \
60 printk("IP_NF_ASSERT: %s:%s:%u\n", \
61 __FUNCTION__, __FILE__, __LINE__); \
64 #define IP_NF_ASSERT(x)
68 /* All the better to debug you with... */
74 We keep a set of rules for each CPU, so we can avoid write-locking
75 them in the softirq when updating the counters and therefore
76 only need to read-lock in the softirq; doing a write_lock_bh() in user
77 context stops packets coming through and allows user context to read
78 the counters or update the rules.
80 Hence the start of any table is given by get_table() below. */
82 /* Returns whether matches rule or not. */
84 ip_packet_match(const struct iphdr *ip,
87 const struct ipt_ip *ipinfo,
93 #define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
95 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
97 || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
99 dprintf("Source or dest mismatch.\n");
101 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
103 NIPQUAD(ipinfo->smsk.s_addr),
104 NIPQUAD(ipinfo->src.s_addr),
105 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
106 dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
108 NIPQUAD(ipinfo->dmsk.s_addr),
109 NIPQUAD(ipinfo->dst.s_addr),
110 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
114 /* Look for ifname matches; this should unroll nicely. */
115 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
116 ret |= (((const unsigned long *)indev)[i]
117 ^ ((const unsigned long *)ipinfo->iniface)[i])
118 & ((const unsigned long *)ipinfo->iniface_mask)[i];
121 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
122 dprintf("VIA in mismatch (%s vs %s).%s\n",
123 indev, ipinfo->iniface,
124 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
128 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
129 ret |= (((const unsigned long *)outdev)[i]
130 ^ ((const unsigned long *)ipinfo->outiface)[i])
131 & ((const unsigned long *)ipinfo->outiface_mask)[i];
134 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
135 dprintf("VIA out mismatch (%s vs %s).%s\n",
136 outdev, ipinfo->outiface,
137 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
141 /* Check specific protocol */
143 && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
144 dprintf("Packet protocol %hi does not match %hi.%s\n",
145 ip->protocol, ipinfo->proto,
146 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
150 /* If we have a fragment rule but the packet is not a fragment
151 * then we return zero */
152 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
153 dprintf("Fragment rule but not fragment.%s\n",
154 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
162 ip_checkentry(const struct ipt_ip *ip)
164 if (ip->flags & ~IPT_F_MASK) {
165 duprintf("Unknown flag bits set: %08X\n",
166 ip->flags & ~IPT_F_MASK);
169 if (ip->invflags & ~IPT_INV_MASK) {
170 duprintf("Unknown invflag bits set: %08X\n",
171 ip->invflags & ~IPT_INV_MASK);
178 ipt_error(struct sk_buff **pskb,
179 const struct net_device *in,
180 const struct net_device *out,
181 unsigned int hooknum,
182 const struct xt_target *target,
183 const void *targinfo)
186 printk("ip_tables: error: `%s'\n", (char *)targinfo);
192 int do_match(struct ipt_entry_match *m,
193 const struct sk_buff *skb,
194 const struct net_device *in,
195 const struct net_device *out,
199 /* Stop iteration if it doesn't match */
200 if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
201 offset, skb->nh.iph->ihl*4, hotdrop))
207 static inline struct ipt_entry *
208 get_entry(void *base, unsigned int offset)
210 return (struct ipt_entry *)(base + offset);
213 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
215 ipt_do_table(struct sk_buff **pskb,
217 const struct net_device *in,
218 const struct net_device *out,
219 struct ipt_table *table)
221 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
226 /* Initializing verdict to NF_DROP keeps gcc happy. */
227 unsigned int verdict = NF_DROP;
228 const char *indev, *outdev;
230 struct ipt_entry *e, *back;
231 struct xt_table_info *private;
234 ip = (*pskb)->nh.iph;
235 datalen = (*pskb)->len - ip->ihl * 4;
236 indev = in ? in->name : nulldevname;
237 outdev = out ? out->name : nulldevname;
238 /* We handle fragments by dealing with the first fragment as
239 * if it was a normal packet. All other fragments are treated
240 * normally, except that they will NEVER match rules that ask
241 * things we don't know, ie. tcp syn flag or ports). If the
242 * rule is also a fragment-specific rule, non-fragments won't
244 offset = ntohs(ip->frag_off) & IP_OFFSET;
246 read_lock_bh(&table->lock);
247 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
248 private = table->private;
249 table_base = (void *)private->entries[smp_processor_id()];
250 e = get_entry(table_base, private->hook_entry[hook]);
252 /* For return from builtin chain */
253 back = get_entry(table_base, private->underflow[hook]);
258 if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
259 struct ipt_entry_target *t;
261 if (IPT_MATCH_ITERATE(e, do_match,
263 offset, &hotdrop) != 0)
266 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
268 t = ipt_get_target(e);
269 IP_NF_ASSERT(t->u.kernel.target);
270 /* Standard target? */
271 if (!t->u.kernel.target->target) {
274 v = ((struct ipt_standard_target *)t)->verdict;
276 /* Pop from stack? */
277 if (v != IPT_RETURN) {
278 verdict = (unsigned)(-v) - 1;
282 back = get_entry(table_base,
286 if (table_base + v != (void *)e + e->next_offset
287 && !(e->ip.flags & IPT_F_GOTO)) {
288 /* Save old back ptr in next entry */
289 struct ipt_entry *next
290 = (void *)e + e->next_offset;
292 = (void *)back - table_base;
293 /* set back pointer to next entry */
297 e = get_entry(table_base, v);
299 /* Targets which reenter must return
301 #ifdef CONFIG_NETFILTER_DEBUG
302 ((struct ipt_entry *)table_base)->comefrom
305 verdict = t->u.kernel.target->target(pskb,
311 #ifdef CONFIG_NETFILTER_DEBUG
312 if (((struct ipt_entry *)table_base)->comefrom
314 && verdict == IPT_CONTINUE) {
315 printk("Target %s reentered!\n",
316 t->u.kernel.target->name);
319 ((struct ipt_entry *)table_base)->comefrom
322 /* Target might have changed stuff. */
323 ip = (*pskb)->nh.iph;
324 datalen = (*pskb)->len - ip->ihl * 4;
326 if (verdict == IPT_CONTINUE)
327 e = (void *)e + e->next_offset;
335 e = (void *)e + e->next_offset;
339 read_unlock_bh(&table->lock);
341 #ifdef DEBUG_ALLOW_ALL
350 /* All zeroes == unconditional rule. */
352 unconditional(const struct ipt_ip *ip)
356 for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
357 if (((__u32 *)ip)[i])
363 /* Figures out from what hook each rule can be called: returns 0 if
364 there are loops. Puts hook bitmask in comefrom. */
366 mark_source_chains(struct xt_table_info *newinfo,
367 unsigned int valid_hooks, void *entry0)
371 /* No recursion; use packet counter to save back ptrs (reset
372 to 0 as we leave), and comefrom to save source hook bitmask */
373 for (hook = 0; hook < NF_IP_NUMHOOKS; hook++) {
374 unsigned int pos = newinfo->hook_entry[hook];
376 = (struct ipt_entry *)(entry0 + pos);
378 if (!(valid_hooks & (1 << hook)))
381 /* Set initial back pointer. */
382 e->counters.pcnt = pos;
385 struct ipt_standard_target *t
386 = (void *)ipt_get_target(e);
388 if (e->comefrom & (1 << NF_IP_NUMHOOKS)) {
389 printk("iptables: loop hook %u pos %u %08X.\n",
390 hook, pos, e->comefrom);
394 |= ((1 << hook) | (1 << NF_IP_NUMHOOKS));
396 /* Unconditional return/END. */
397 if (e->target_offset == sizeof(struct ipt_entry)
398 && (strcmp(t->target.u.user.name,
399 IPT_STANDARD_TARGET) == 0)
401 && unconditional(&e->ip)) {
402 unsigned int oldpos, size;
404 if (t->verdict < -NF_MAX_VERDICT - 1) {
405 duprintf("mark_source_chains: bad "
406 "negative verdict (%i)\n",
411 /* Return: backtrack through the last
414 e->comefrom ^= (1<<NF_IP_NUMHOOKS);
415 #ifdef DEBUG_IP_FIREWALL_USER
417 & (1 << NF_IP_NUMHOOKS)) {
418 duprintf("Back unset "
425 pos = e->counters.pcnt;
426 e->counters.pcnt = 0;
428 /* We're at the start. */
432 e = (struct ipt_entry *)
434 } while (oldpos == pos + e->next_offset);
437 size = e->next_offset;
438 e = (struct ipt_entry *)
439 (entry0 + pos + size);
440 e->counters.pcnt = pos;
443 int newpos = t->verdict;
445 if (strcmp(t->target.u.user.name,
446 IPT_STANDARD_TARGET) == 0
448 if (newpos > newinfo->size -
449 sizeof(struct ipt_entry)) {
450 duprintf("mark_source_chains: "
451 "bad verdict (%i)\n",
455 /* This a jump; chase it. */
456 duprintf("Jump rule %u -> %u\n",
459 /* ... this is a fallthru */
460 newpos = pos + e->next_offset;
462 e = (struct ipt_entry *)
464 e->counters.pcnt = pos;
469 duprintf("Finished chain %u\n", hook);
475 cleanup_match(struct ipt_entry_match *m, unsigned int *i)
477 if (i && (*i)-- == 0)
480 if (m->u.kernel.match->destroy)
481 m->u.kernel.match->destroy(m->u.kernel.match, m->data);
482 module_put(m->u.kernel.match->me);
487 check_entry(struct ipt_entry *e, const char *name)
489 struct ipt_entry_target *t;
491 if (!ip_checkentry(&e->ip)) {
492 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
496 if (e->target_offset + sizeof(struct ipt_entry_target) > e->next_offset)
499 t = ipt_get_target(e);
500 if (e->target_offset + t->u.target_size > e->next_offset)
506 static inline int check_match(struct ipt_entry_match *m, const char *name,
507 const struct ipt_ip *ip, unsigned int hookmask)
509 struct ipt_match *match;
512 match = m->u.kernel.match;
513 ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m),
514 name, hookmask, ip->proto,
515 ip->invflags & IPT_INV_PROTO);
516 if (!ret && m->u.kernel.match->checkentry
517 && !m->u.kernel.match->checkentry(name, ip, match, m->data,
519 duprintf("ip_tables: check failed for `%s'.\n",
520 m->u.kernel.match->name);
527 find_check_match(struct ipt_entry_match *m,
529 const struct ipt_ip *ip,
530 unsigned int hookmask,
533 struct ipt_match *match;
536 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
538 "ipt_%s", m->u.user.name);
539 if (IS_ERR(match) || !match) {
540 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
541 return match ? PTR_ERR(match) : -ENOENT;
543 m->u.kernel.match = match;
545 ret = check_match(m, name, ip, hookmask);
552 module_put(m->u.kernel.match->me);
556 static inline int check_target(struct ipt_entry *e, const char *name)
558 struct ipt_entry_target *t;
559 struct ipt_target *target;
562 t = ipt_get_target(e);
563 target = t->u.kernel.target;
564 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
565 name, e->comefrom, e->ip.proto,
566 e->ip.invflags & IPT_INV_PROTO);
567 if (!ret && t->u.kernel.target->checkentry
568 && !t->u.kernel.target->checkentry(name, e, target,
569 t->data, e->comefrom)) {
570 duprintf("ip_tables: check failed for `%s'.\n",
571 t->u.kernel.target->name);
578 find_check_entry(struct ipt_entry *e, const char *name, unsigned int size,
581 struct ipt_entry_target *t;
582 struct ipt_target *target;
586 ret = check_entry(e, name);
591 ret = IPT_MATCH_ITERATE(e, find_check_match, name, &e->ip,
594 goto cleanup_matches;
596 t = ipt_get_target(e);
597 target = try_then_request_module(xt_find_target(AF_INET,
600 "ipt_%s", t->u.user.name);
601 if (IS_ERR(target) || !target) {
602 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
603 ret = target ? PTR_ERR(target) : -ENOENT;
604 goto cleanup_matches;
606 t->u.kernel.target = target;
608 ret = check_target(e, name);
615 module_put(t->u.kernel.target->me);
617 IPT_MATCH_ITERATE(e, cleanup_match, &j);
622 check_entry_size_and_hooks(struct ipt_entry *e,
623 struct xt_table_info *newinfo,
625 unsigned char *limit,
626 const unsigned int *hook_entries,
627 const unsigned int *underflows,
632 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
633 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
634 duprintf("Bad offset %p\n", e);
639 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
640 duprintf("checking: element %p size %u\n",
645 /* Check hooks & underflows */
646 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
647 if ((unsigned char *)e - base == hook_entries[h])
648 newinfo->hook_entry[h] = hook_entries[h];
649 if ((unsigned char *)e - base == underflows[h])
650 newinfo->underflow[h] = underflows[h];
653 /* FIXME: underflows must be unconditional, standard verdicts
654 < 0 (not IPT_RETURN). --RR */
656 /* Clear counters and comefrom */
657 e->counters = ((struct xt_counters) { 0, 0 });
665 cleanup_entry(struct ipt_entry *e, unsigned int *i)
667 struct ipt_entry_target *t;
669 if (i && (*i)-- == 0)
672 /* Cleanup all matches */
673 IPT_MATCH_ITERATE(e, cleanup_match, NULL);
674 t = ipt_get_target(e);
675 if (t->u.kernel.target->destroy)
676 t->u.kernel.target->destroy(t->u.kernel.target, t->data);
677 module_put(t->u.kernel.target->me);
681 /* Checks and translates the user-supplied table segment (held in
684 translate_table(const char *name,
685 unsigned int valid_hooks,
686 struct xt_table_info *newinfo,
690 const unsigned int *hook_entries,
691 const unsigned int *underflows)
696 newinfo->size = size;
697 newinfo->number = number;
699 /* Init all hooks to impossible value. */
700 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
701 newinfo->hook_entry[i] = 0xFFFFFFFF;
702 newinfo->underflow[i] = 0xFFFFFFFF;
705 duprintf("translate_table: size %u\n", newinfo->size);
707 /* Walk through entries, checking offsets. */
708 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
709 check_entry_size_and_hooks,
713 hook_entries, underflows, &i);
718 duprintf("translate_table: %u not %u entries\n",
723 /* Check hooks all assigned */
724 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
725 /* Only hooks which are valid */
726 if (!(valid_hooks & (1 << i)))
728 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
729 duprintf("Invalid hook entry %u %u\n",
733 if (newinfo->underflow[i] == 0xFFFFFFFF) {
734 duprintf("Invalid underflow %u %u\n",
740 if (!mark_source_chains(newinfo, valid_hooks, entry0))
743 /* Finally, each sanity check must pass */
745 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
746 find_check_entry, name, size, &i);
749 IPT_ENTRY_ITERATE(entry0, newinfo->size,
754 /* And one copy for every other CPU */
755 for_each_possible_cpu(i) {
756 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
757 memcpy(newinfo->entries[i], entry0, newinfo->size);
765 add_entry_to_counter(const struct ipt_entry *e,
766 struct xt_counters total[],
769 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
776 set_entry_to_counter(const struct ipt_entry *e,
777 struct ipt_counters total[],
780 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
787 get_counters(const struct xt_table_info *t,
788 struct xt_counters counters[])
794 /* Instead of clearing (by a previous call to memset())
795 * the counters and using adds, we set the counters
796 * with data used by 'current' CPU
797 * We dont care about preemption here.
799 curcpu = raw_smp_processor_id();
802 IPT_ENTRY_ITERATE(t->entries[curcpu],
804 set_entry_to_counter,
808 for_each_possible_cpu(cpu) {
812 IPT_ENTRY_ITERATE(t->entries[cpu],
814 add_entry_to_counter,
820 static inline struct xt_counters * alloc_counters(struct ipt_table *table)
822 unsigned int countersize;
823 struct xt_counters *counters;
824 struct xt_table_info *private = table->private;
826 /* We need atomic snapshot of counters: rest doesn't change
827 (other than comefrom, which userspace doesn't care
829 countersize = sizeof(struct xt_counters) * private->number;
830 counters = vmalloc_node(countersize, numa_node_id());
832 if (counters == NULL)
833 return ERR_PTR(-ENOMEM);
835 /* First, sum counters... */
836 write_lock_bh(&table->lock);
837 get_counters(private, counters);
838 write_unlock_bh(&table->lock);
844 copy_entries_to_user(unsigned int total_size,
845 struct ipt_table *table,
846 void __user *userptr)
848 unsigned int off, num;
850 struct xt_counters *counters;
851 struct xt_table_info *private = table->private;
855 counters = alloc_counters(table);
856 if (IS_ERR(counters))
857 return PTR_ERR(counters);
859 /* choose the copy that is on our node/cpu, ...
860 * This choice is lazy (because current thread is
861 * allowed to migrate to another cpu)
863 loc_cpu_entry = private->entries[raw_smp_processor_id()];
864 /* ... then copy entire thing ... */
865 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
870 /* FIXME: use iterator macros --RR */
871 /* ... then go back and fix counters and names */
872 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
874 struct ipt_entry_match *m;
875 struct ipt_entry_target *t;
877 e = (struct ipt_entry *)(loc_cpu_entry + off);
878 if (copy_to_user(userptr + off
879 + offsetof(struct ipt_entry, counters),
881 sizeof(counters[num])) != 0) {
886 for (i = sizeof(struct ipt_entry);
887 i < e->target_offset;
888 i += m->u.match_size) {
891 if (copy_to_user(userptr + off + i
892 + offsetof(struct ipt_entry_match,
894 m->u.kernel.match->name,
895 strlen(m->u.kernel.match->name)+1)
902 t = ipt_get_target(e);
903 if (copy_to_user(userptr + off + e->target_offset
904 + offsetof(struct ipt_entry_target,
906 t->u.kernel.target->name,
907 strlen(t->u.kernel.target->name)+1) != 0) {
919 struct compat_delta {
920 struct compat_delta *next;
925 static struct compat_delta *compat_offsets = NULL;
927 static int compat_add_offset(u_int16_t offset, short delta)
929 struct compat_delta *tmp;
931 tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
934 tmp->offset = offset;
936 if (compat_offsets) {
937 tmp->next = compat_offsets->next;
938 compat_offsets->next = tmp;
940 compat_offsets = tmp;
946 static void compat_flush_offsets(void)
948 struct compat_delta *tmp, *next;
950 if (compat_offsets) {
951 for(tmp = compat_offsets; tmp; tmp = next) {
955 compat_offsets = NULL;
959 static short compat_calc_jump(u_int16_t offset)
961 struct compat_delta *tmp;
964 for(tmp = compat_offsets, delta = 0; tmp; tmp = tmp->next)
965 if (tmp->offset < offset)
970 static void compat_standard_from_user(void *dst, void *src)
972 int v = *(compat_int_t *)src;
975 v += compat_calc_jump(v);
976 memcpy(dst, &v, sizeof(v));
979 static int compat_standard_to_user(void __user *dst, void *src)
981 compat_int_t cv = *(int *)src;
984 cv -= compat_calc_jump(cv);
985 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
989 compat_calc_match(struct ipt_entry_match *m, int * size)
991 *size += xt_compat_match_offset(m->u.kernel.match);
995 static int compat_calc_entry(struct ipt_entry *e, struct xt_table_info *info,
996 void *base, struct xt_table_info *newinfo)
998 struct ipt_entry_target *t;
999 u_int16_t entry_offset;
1003 entry_offset = (void *)e - base;
1004 IPT_MATCH_ITERATE(e, compat_calc_match, &off);
1005 t = ipt_get_target(e);
1006 off += xt_compat_target_offset(t->u.kernel.target);
1007 newinfo->size -= off;
1008 ret = compat_add_offset(entry_offset, off);
1012 for (i = 0; i< NF_IP_NUMHOOKS; i++) {
1013 if (info->hook_entry[i] && (e < (struct ipt_entry *)
1014 (base + info->hook_entry[i])))
1015 newinfo->hook_entry[i] -= off;
1016 if (info->underflow[i] && (e < (struct ipt_entry *)
1017 (base + info->underflow[i])))
1018 newinfo->underflow[i] -= off;
1023 static int compat_table_info(struct xt_table_info *info,
1024 struct xt_table_info *newinfo)
1026 void *loc_cpu_entry;
1029 if (!newinfo || !info)
1032 memset(newinfo, 0, sizeof(struct xt_table_info));
1033 newinfo->size = info->size;
1034 newinfo->number = info->number;
1035 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1036 newinfo->hook_entry[i] = info->hook_entry[i];
1037 newinfo->underflow[i] = info->underflow[i];
1039 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1040 return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
1041 compat_calc_entry, info, loc_cpu_entry, newinfo);
1045 static int get_info(void __user *user, int *len, int compat)
1047 char name[IPT_TABLE_MAXNAMELEN];
1048 struct ipt_table *t;
1051 if (*len != sizeof(struct ipt_getinfo)) {
1052 duprintf("length %u != %u\n", *len,
1053 (unsigned int)sizeof(struct ipt_getinfo));
1057 if (copy_from_user(name, user, sizeof(name)) != 0)
1060 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1061 #ifdef CONFIG_COMPAT
1063 xt_compat_lock(AF_INET);
1065 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1066 "iptable_%s", name);
1067 if (t && !IS_ERR(t)) {
1068 struct ipt_getinfo info;
1069 struct xt_table_info *private = t->private;
1071 #ifdef CONFIG_COMPAT
1073 struct xt_table_info tmp;
1074 ret = compat_table_info(private, &tmp);
1075 compat_flush_offsets();
1079 info.valid_hooks = t->valid_hooks;
1080 memcpy(info.hook_entry, private->hook_entry,
1081 sizeof(info.hook_entry));
1082 memcpy(info.underflow, private->underflow,
1083 sizeof(info.underflow));
1084 info.num_entries = private->number;
1085 info.size = private->size;
1086 strcpy(info.name, name);
1088 if (copy_to_user(user, &info, *len) != 0)
1096 ret = t ? PTR_ERR(t) : -ENOENT;
1097 #ifdef CONFIG_COMPAT
1099 xt_compat_unlock(AF_INET);
1105 get_entries(struct ipt_get_entries __user *uptr, int *len)
1108 struct ipt_get_entries get;
1109 struct ipt_table *t;
1111 if (*len < sizeof(get)) {
1112 duprintf("get_entries: %u < %d\n", *len,
1113 (unsigned int)sizeof(get));
1116 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1118 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1119 duprintf("get_entries: %u != %u\n", *len,
1120 (unsigned int)(sizeof(struct ipt_get_entries) +
1125 t = xt_find_table_lock(AF_INET, get.name);
1126 if (t && !IS_ERR(t)) {
1127 struct xt_table_info *private = t->private;
1128 duprintf("t->private->number = %u\n",
1130 if (get.size == private->size)
1131 ret = copy_entries_to_user(private->size,
1132 t, uptr->entrytable);
1134 duprintf("get_entries: I've got %u not %u!\n",
1142 ret = t ? PTR_ERR(t) : -ENOENT;
1148 __do_replace(const char *name, unsigned int valid_hooks,
1149 struct xt_table_info *newinfo, unsigned int num_counters,
1150 void __user *counters_ptr)
1153 struct ipt_table *t;
1154 struct xt_table_info *oldinfo;
1155 struct xt_counters *counters;
1156 void *loc_cpu_old_entry;
1159 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1165 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1166 "iptable_%s", name);
1167 if (!t || IS_ERR(t)) {
1168 ret = t ? PTR_ERR(t) : -ENOENT;
1169 goto free_newinfo_counters_untrans;
1173 if (valid_hooks != t->valid_hooks) {
1174 duprintf("Valid hook crap: %08X vs %08X\n",
1175 valid_hooks, t->valid_hooks);
1180 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1184 /* Update module usage count based on number of rules */
1185 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1186 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1187 if ((oldinfo->number > oldinfo->initial_entries) ||
1188 (newinfo->number <= oldinfo->initial_entries))
1190 if ((oldinfo->number > oldinfo->initial_entries) &&
1191 (newinfo->number <= oldinfo->initial_entries))
1194 /* Get the old counters. */
1195 get_counters(oldinfo, counters);
1196 /* Decrease module usage counts and free resource */
1197 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1198 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
1199 xt_free_table_info(oldinfo);
1200 if (copy_to_user(counters_ptr, counters,
1201 sizeof(struct xt_counters) * num_counters) != 0)
1210 free_newinfo_counters_untrans:
1217 do_replace(void __user *user, unsigned int len)
1220 struct ipt_replace tmp;
1221 struct xt_table_info *newinfo;
1222 void *loc_cpu_entry;
1224 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1227 /* Hack: Causes ipchains to give correct error msg --RR */
1228 if (len != sizeof(tmp) + tmp.size)
1229 return -ENOPROTOOPT;
1231 /* overflow check */
1232 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1235 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1238 newinfo = xt_alloc_table_info(tmp.size);
1242 /* choose the copy that is our node/cpu */
1243 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1244 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1250 ret = translate_table(tmp.name, tmp.valid_hooks,
1251 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1252 tmp.hook_entry, tmp.underflow);
1256 duprintf("ip_tables: Translated table\n");
1258 ret = __do_replace(tmp.name, tmp.valid_hooks,
1259 newinfo, tmp.num_counters,
1262 goto free_newinfo_untrans;
1265 free_newinfo_untrans:
1266 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1268 xt_free_table_info(newinfo);
1272 /* We're lazy, and add to the first CPU; overflow works its fey magic
1273 * and everything is OK. */
1275 add_counter_to_entry(struct ipt_entry *e,
1276 const struct xt_counters addme[],
1280 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1282 (long unsigned int)e->counters.pcnt,
1283 (long unsigned int)e->counters.bcnt,
1284 (long unsigned int)addme[*i].pcnt,
1285 (long unsigned int)addme[*i].bcnt);
1288 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1295 do_add_counters(void __user *user, unsigned int len, int compat)
1298 struct xt_counters_info tmp;
1299 struct xt_counters *paddc;
1300 unsigned int num_counters;
1304 struct ipt_table *t;
1305 struct xt_table_info *private;
1307 void *loc_cpu_entry;
1308 #ifdef CONFIG_COMPAT
1309 struct compat_xt_counters_info compat_tmp;
1313 size = sizeof(struct compat_xt_counters_info);
1318 size = sizeof(struct xt_counters_info);
1321 if (copy_from_user(ptmp, user, size) != 0)
1324 #ifdef CONFIG_COMPAT
1326 num_counters = compat_tmp.num_counters;
1327 name = compat_tmp.name;
1331 num_counters = tmp.num_counters;
1335 if (len != size + num_counters * sizeof(struct xt_counters))
1338 paddc = vmalloc_node(len - size, numa_node_id());
1342 if (copy_from_user(paddc, user + size, len - size) != 0) {
1347 t = xt_find_table_lock(AF_INET, name);
1348 if (!t || IS_ERR(t)) {
1349 ret = t ? PTR_ERR(t) : -ENOENT;
1353 write_lock_bh(&t->lock);
1354 private = t->private;
1355 if (private->number != num_counters) {
1357 goto unlock_up_free;
1361 /* Choose the copy that is on our node */
1362 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1363 IPT_ENTRY_ITERATE(loc_cpu_entry,
1365 add_counter_to_entry,
1369 write_unlock_bh(&t->lock);
1378 #ifdef CONFIG_COMPAT
1379 struct compat_ipt_replace {
1380 char name[IPT_TABLE_MAXNAMELEN];
1384 u32 hook_entry[NF_IP_NUMHOOKS];
1385 u32 underflow[NF_IP_NUMHOOKS];
1387 compat_uptr_t counters; /* struct ipt_counters * */
1388 struct compat_ipt_entry entries[0];
1391 static inline int compat_copy_match_to_user(struct ipt_entry_match *m,
1392 void __user **dstptr, compat_uint_t *size)
1394 return xt_compat_match_to_user(m, dstptr, size);
1397 static int compat_copy_entry_to_user(struct ipt_entry *e,
1398 void __user **dstptr, compat_uint_t *size)
1400 struct ipt_entry_target *t;
1401 struct compat_ipt_entry __user *ce;
1402 u_int16_t target_offset, next_offset;
1403 compat_uint_t origsize;
1408 ce = (struct compat_ipt_entry __user *)*dstptr;
1409 if (copy_to_user(ce, e, sizeof(struct ipt_entry)))
1412 *dstptr += sizeof(struct compat_ipt_entry);
1413 ret = IPT_MATCH_ITERATE(e, compat_copy_match_to_user, dstptr, size);
1414 target_offset = e->target_offset - (origsize - *size);
1417 t = ipt_get_target(e);
1418 ret = xt_compat_target_to_user(t, dstptr, size);
1422 next_offset = e->next_offset - (origsize - *size);
1423 if (put_user(target_offset, &ce->target_offset))
1425 if (put_user(next_offset, &ce->next_offset))
1433 compat_check_calc_match(struct ipt_entry_match *m,
1435 const struct ipt_ip *ip,
1436 unsigned int hookmask,
1439 struct ipt_match *match;
1441 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1442 m->u.user.revision),
1443 "ipt_%s", m->u.user.name);
1444 if (IS_ERR(match) || !match) {
1445 duprintf("compat_check_calc_match: `%s' not found\n",
1447 return match ? PTR_ERR(match) : -ENOENT;
1449 m->u.kernel.match = match;
1450 *size += xt_compat_match_offset(match);
1457 check_compat_entry_size_and_hooks(struct ipt_entry *e,
1458 struct xt_table_info *newinfo,
1460 unsigned char *base,
1461 unsigned char *limit,
1462 unsigned int *hook_entries,
1463 unsigned int *underflows,
1467 struct ipt_entry_target *t;
1468 struct ipt_target *target;
1469 u_int16_t entry_offset;
1472 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1473 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
1474 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1475 duprintf("Bad offset %p, limit = %p\n", e, limit);
1479 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1480 sizeof(struct compat_xt_entry_target)) {
1481 duprintf("checking: element %p size %u\n",
1486 ret = check_entry(e, name);
1491 entry_offset = (void *)e - (void *)base;
1493 ret = IPT_MATCH_ITERATE(e, compat_check_calc_match, name, &e->ip,
1494 e->comefrom, &off, &j);
1496 goto cleanup_matches;
1498 t = ipt_get_target(e);
1499 target = try_then_request_module(xt_find_target(AF_INET,
1501 t->u.user.revision),
1502 "ipt_%s", t->u.user.name);
1503 if (IS_ERR(target) || !target) {
1504 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1506 ret = target ? PTR_ERR(target) : -ENOENT;
1507 goto cleanup_matches;
1509 t->u.kernel.target = target;
1511 off += xt_compat_target_offset(target);
1513 ret = compat_add_offset(entry_offset, off);
1517 /* Check hooks & underflows */
1518 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1519 if ((unsigned char *)e - base == hook_entries[h])
1520 newinfo->hook_entry[h] = hook_entries[h];
1521 if ((unsigned char *)e - base == underflows[h])
1522 newinfo->underflow[h] = underflows[h];
1525 /* Clear counters and comefrom */
1526 e->counters = ((struct ipt_counters) { 0, 0 });
1533 module_put(t->u.kernel.target->me);
1535 IPT_MATCH_ITERATE(e, cleanup_match, &j);
1539 static inline int compat_copy_match_from_user(struct ipt_entry_match *m,
1540 void **dstptr, compat_uint_t *size, const char *name,
1541 const struct ipt_ip *ip, unsigned int hookmask)
1543 xt_compat_match_from_user(m, dstptr, size);
1547 static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr,
1548 unsigned int *size, const char *name,
1549 struct xt_table_info *newinfo, unsigned char *base)
1551 struct ipt_entry_target *t;
1552 struct ipt_target *target;
1553 struct ipt_entry *de;
1554 unsigned int origsize;
1559 de = (struct ipt_entry *)*dstptr;
1560 memcpy(de, e, sizeof(struct ipt_entry));
1562 *dstptr += sizeof(struct compat_ipt_entry);
1563 ret = IPT_MATCH_ITERATE(e, compat_copy_match_from_user, dstptr, size,
1564 name, &de->ip, de->comefrom);
1567 de->target_offset = e->target_offset - (origsize - *size);
1568 t = ipt_get_target(e);
1569 target = t->u.kernel.target;
1570 xt_compat_target_from_user(t, dstptr, size);
1572 de->next_offset = e->next_offset - (origsize - *size);
1573 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1574 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1575 newinfo->hook_entry[h] -= origsize - *size;
1576 if ((unsigned char *)de - base < newinfo->underflow[h])
1577 newinfo->underflow[h] -= origsize - *size;
1582 static inline int compat_check_entry(struct ipt_entry *e, const char *name)
1586 ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip, e->comefrom);
1590 return check_target(e, name);
1594 translate_compat_table(const char *name,
1595 unsigned int valid_hooks,
1596 struct xt_table_info **pinfo,
1598 unsigned int total_size,
1599 unsigned int number,
1600 unsigned int *hook_entries,
1601 unsigned int *underflows)
1604 struct xt_table_info *newinfo, *info;
1605 void *pos, *entry0, *entry1;
1612 info->number = number;
1614 /* Init all hooks to impossible value. */
1615 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1616 info->hook_entry[i] = 0xFFFFFFFF;
1617 info->underflow[i] = 0xFFFFFFFF;
1620 duprintf("translate_compat_table: size %u\n", info->size);
1622 xt_compat_lock(AF_INET);
1623 /* Walk through entries, checking offsets. */
1624 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1625 check_compat_entry_size_and_hooks,
1626 info, &size, entry0,
1627 entry0 + total_size,
1628 hook_entries, underflows, &j, name);
1634 duprintf("translate_compat_table: %u not %u entries\n",
1639 /* Check hooks all assigned */
1640 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1641 /* Only hooks which are valid */
1642 if (!(valid_hooks & (1 << i)))
1644 if (info->hook_entry[i] == 0xFFFFFFFF) {
1645 duprintf("Invalid hook entry %u %u\n",
1646 i, hook_entries[i]);
1649 if (info->underflow[i] == 0xFFFFFFFF) {
1650 duprintf("Invalid underflow %u %u\n",
1657 newinfo = xt_alloc_table_info(size);
1661 newinfo->number = number;
1662 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1663 newinfo->hook_entry[i] = info->hook_entry[i];
1664 newinfo->underflow[i] = info->underflow[i];
1666 entry1 = newinfo->entries[raw_smp_processor_id()];
1669 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1670 compat_copy_entry_from_user, &pos, &size,
1671 name, newinfo, entry1);
1672 compat_flush_offsets();
1673 xt_compat_unlock(AF_INET);
1678 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1681 ret = IPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1686 /* And one copy for every other CPU */
1687 for_each_possible_cpu(i)
1688 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1689 memcpy(newinfo->entries[i], entry1, newinfo->size);
1693 xt_free_table_info(info);
1697 xt_free_table_info(newinfo);
1699 IPT_ENTRY_ITERATE(entry0, total_size, cleanup_entry, &j);
1702 compat_flush_offsets();
1703 xt_compat_unlock(AF_INET);
1708 compat_do_replace(void __user *user, unsigned int len)
1711 struct compat_ipt_replace tmp;
1712 struct xt_table_info *newinfo;
1713 void *loc_cpu_entry;
1715 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1718 /* Hack: Causes ipchains to give correct error msg --RR */
1719 if (len != sizeof(tmp) + tmp.size)
1720 return -ENOPROTOOPT;
1722 /* overflow check */
1723 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1726 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1729 newinfo = xt_alloc_table_info(tmp.size);
1733 /* choose the copy that is our node/cpu */
1734 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1735 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1741 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1742 &newinfo, &loc_cpu_entry, tmp.size,
1743 tmp.num_entries, tmp.hook_entry, tmp.underflow);
1747 duprintf("compat_do_replace: Translated table\n");
1749 ret = __do_replace(tmp.name, tmp.valid_hooks,
1750 newinfo, tmp.num_counters,
1751 compat_ptr(tmp.counters));
1753 goto free_newinfo_untrans;
1756 free_newinfo_untrans:
1757 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1759 xt_free_table_info(newinfo);
1764 compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1769 if (!capable(CAP_NET_ADMIN))
1773 case IPT_SO_SET_REPLACE:
1774 ret = compat_do_replace(user, len);
1777 case IPT_SO_SET_ADD_COUNTERS:
1778 ret = do_add_counters(user, len, 1);
1782 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1789 struct compat_ipt_get_entries
1791 char name[IPT_TABLE_MAXNAMELEN];
1793 struct compat_ipt_entry entrytable[0];
1796 static int compat_copy_entries_to_user(unsigned int total_size,
1797 struct ipt_table *table, void __user *userptr)
1799 unsigned int off, num;
1800 struct compat_ipt_entry e;
1801 struct xt_counters *counters;
1802 struct xt_table_info *private = table->private;
1806 void *loc_cpu_entry;
1808 counters = alloc_counters(table);
1809 if (IS_ERR(counters))
1810 return PTR_ERR(counters);
1812 /* choose the copy that is on our node/cpu, ...
1813 * This choice is lazy (because current thread is
1814 * allowed to migrate to another cpu)
1816 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1819 ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
1820 compat_copy_entry_to_user, &pos, &size);
1824 /* ... then go back and fix counters and names */
1825 for (off = 0, num = 0; off < size; off += e.next_offset, num++) {
1827 struct ipt_entry_match m;
1828 struct ipt_entry_target t;
1831 if (copy_from_user(&e, userptr + off,
1832 sizeof(struct compat_ipt_entry)))
1834 if (copy_to_user(userptr + off +
1835 offsetof(struct compat_ipt_entry, counters),
1836 &counters[num], sizeof(counters[num])))
1839 for (i = sizeof(struct compat_ipt_entry);
1840 i < e.target_offset; i += m.u.match_size) {
1841 if (copy_from_user(&m, userptr + off + i,
1842 sizeof(struct ipt_entry_match)))
1844 if (copy_to_user(userptr + off + i +
1845 offsetof(struct ipt_entry_match, u.user.name),
1846 m.u.kernel.match->name,
1847 strlen(m.u.kernel.match->name) + 1))
1851 if (copy_from_user(&t, userptr + off + e.target_offset,
1852 sizeof(struct ipt_entry_target)))
1854 if (copy_to_user(userptr + off + e.target_offset +
1855 offsetof(struct ipt_entry_target, u.user.name),
1856 t.u.kernel.target->name,
1857 strlen(t.u.kernel.target->name) + 1))
1867 compat_get_entries(struct compat_ipt_get_entries __user *uptr, int *len)
1870 struct compat_ipt_get_entries get;
1871 struct ipt_table *t;
1874 if (*len < sizeof(get)) {
1875 duprintf("compat_get_entries: %u < %u\n",
1876 *len, (unsigned int)sizeof(get));
1880 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1883 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1884 duprintf("compat_get_entries: %u != %u\n", *len,
1885 (unsigned int)(sizeof(struct compat_ipt_get_entries) +
1890 xt_compat_lock(AF_INET);
1891 t = xt_find_table_lock(AF_INET, get.name);
1892 if (t && !IS_ERR(t)) {
1893 struct xt_table_info *private = t->private;
1894 struct xt_table_info info;
1895 duprintf("t->private->number = %u\n",
1897 ret = compat_table_info(private, &info);
1898 if (!ret && get.size == info.size) {
1899 ret = compat_copy_entries_to_user(private->size,
1900 t, uptr->entrytable);
1902 duprintf("compat_get_entries: I've got %u not %u!\n",
1907 compat_flush_offsets();
1911 ret = t ? PTR_ERR(t) : -ENOENT;
1913 xt_compat_unlock(AF_INET);
1917 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1920 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1924 if (!capable(CAP_NET_ADMIN))
1928 case IPT_SO_GET_INFO:
1929 ret = get_info(user, len, 1);
1931 case IPT_SO_GET_ENTRIES:
1932 ret = compat_get_entries(user, len);
1935 ret = do_ipt_get_ctl(sk, cmd, user, len);
1942 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1946 if (!capable(CAP_NET_ADMIN))
1950 case IPT_SO_SET_REPLACE:
1951 ret = do_replace(user, len);
1954 case IPT_SO_SET_ADD_COUNTERS:
1955 ret = do_add_counters(user, len, 0);
1959 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1967 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1971 if (!capable(CAP_NET_ADMIN))
1975 case IPT_SO_GET_INFO:
1976 ret = get_info(user, len, 0);
1979 case IPT_SO_GET_ENTRIES:
1980 ret = get_entries(user, len);
1983 case IPT_SO_GET_REVISION_MATCH:
1984 case IPT_SO_GET_REVISION_TARGET: {
1985 struct ipt_get_revision rev;
1988 if (*len != sizeof(rev)) {
1992 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
1997 if (cmd == IPT_SO_GET_REVISION_TARGET)
2002 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2005 "ipt_%s", rev.name);
2010 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2017 int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
2020 struct xt_table_info *newinfo;
2021 static struct xt_table_info bootstrap
2022 = { 0, 0, 0, { 0 }, { 0 }, { } };
2023 void *loc_cpu_entry;
2025 newinfo = xt_alloc_table_info(repl->size);
2029 /* choose the copy on our node/cpu
2030 * but dont care of preemption
2032 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2033 memcpy(loc_cpu_entry, repl->entries, repl->size);
2035 ret = translate_table(table->name, table->valid_hooks,
2036 newinfo, loc_cpu_entry, repl->size,
2041 xt_free_table_info(newinfo);
2045 ret = xt_register_table(table, &bootstrap, newinfo);
2047 xt_free_table_info(newinfo);
2054 void ipt_unregister_table(struct ipt_table *table)
2056 struct xt_table_info *private;
2057 void *loc_cpu_entry;
2059 private = xt_unregister_table(table);
2061 /* Decrease module usage counts and free resources */
2062 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2063 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2064 xt_free_table_info(private);
2067 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2069 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2070 u_int8_t type, u_int8_t code,
2073 return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code))
2078 icmp_match(const struct sk_buff *skb,
2079 const struct net_device *in,
2080 const struct net_device *out,
2081 const struct xt_match *match,
2082 const void *matchinfo,
2084 unsigned int protoff,
2087 struct icmphdr _icmph, *ic;
2088 const struct ipt_icmp *icmpinfo = matchinfo;
2090 /* Must not be a fragment. */
2094 ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
2096 /* We've been asked to examine this packet, and we
2097 * can't. Hence, no choice but to drop.
2099 duprintf("Dropping evil ICMP tinygram.\n");
2104 return icmp_type_code_match(icmpinfo->type,
2108 !!(icmpinfo->invflags&IPT_ICMP_INV));
2111 /* Called when user tries to insert an entry of this type. */
2113 icmp_checkentry(const char *tablename,
2115 const struct xt_match *match,
2117 unsigned int hook_mask)
2119 const struct ipt_icmp *icmpinfo = matchinfo;
2121 /* Must specify no unknown invflags */
2122 return !(icmpinfo->invflags & ~IPT_ICMP_INV);
2125 /* The built-in targets: standard (NULL) and error. */
2126 static struct ipt_target ipt_standard_target = {
2127 .name = IPT_STANDARD_TARGET,
2128 .targetsize = sizeof(int),
2130 #ifdef CONFIG_COMPAT
2131 .compatsize = sizeof(compat_int_t),
2132 .compat_from_user = compat_standard_from_user,
2133 .compat_to_user = compat_standard_to_user,
2137 static struct ipt_target ipt_error_target = {
2138 .name = IPT_ERROR_TARGET,
2139 .target = ipt_error,
2140 .targetsize = IPT_FUNCTION_MAXNAMELEN,
2144 static struct nf_sockopt_ops ipt_sockopts = {
2146 .set_optmin = IPT_BASE_CTL,
2147 .set_optmax = IPT_SO_SET_MAX+1,
2148 .set = do_ipt_set_ctl,
2149 #ifdef CONFIG_COMPAT
2150 .compat_set = compat_do_ipt_set_ctl,
2152 .get_optmin = IPT_BASE_CTL,
2153 .get_optmax = IPT_SO_GET_MAX+1,
2154 .get = do_ipt_get_ctl,
2155 #ifdef CONFIG_COMPAT
2156 .compat_get = compat_do_ipt_get_ctl,
2160 static struct ipt_match icmp_matchstruct = {
2162 .match = icmp_match,
2163 .matchsize = sizeof(struct ipt_icmp),
2164 .proto = IPPROTO_ICMP,
2166 .checkentry = icmp_checkentry,
2169 static int __init ip_tables_init(void)
2173 ret = xt_proto_init(AF_INET);
2177 /* Noone else will be downing sem now, so we won't sleep */
2178 ret = xt_register_target(&ipt_standard_target);
2181 ret = xt_register_target(&ipt_error_target);
2184 ret = xt_register_match(&icmp_matchstruct);
2188 /* Register setsockopt */
2189 ret = nf_register_sockopt(&ipt_sockopts);
2193 printk("ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2197 xt_unregister_match(&icmp_matchstruct);
2199 xt_unregister_target(&ipt_error_target);
2201 xt_unregister_target(&ipt_standard_target);
2203 xt_proto_fini(AF_INET);
2208 static void __exit ip_tables_fini(void)
2210 nf_unregister_sockopt(&ipt_sockopts);
2212 xt_unregister_match(&icmp_matchstruct);
2213 xt_unregister_target(&ipt_error_target);
2214 xt_unregister_target(&ipt_standard_target);
2216 xt_proto_fini(AF_INET);
2219 EXPORT_SYMBOL(ipt_register_table);
2220 EXPORT_SYMBOL(ipt_unregister_table);
2221 EXPORT_SYMBOL(ipt_do_table);
2222 module_init(ip_tables_init);
2223 module_exit(ip_tables_fini);