2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * 19 Jan 2002 Harald Welte <laforge@gnumonks.org>
12 * - increase module usage count as soon as we have rules inside
14 * 08 Oct 2005 Harald Welte <lafore@netfilter.org>
15 * - Generalize into "x_tables" layer and "{ip,ip6,arp}_tables"
17 #include <linux/cache.h>
18 #include <linux/capability.h>
19 #include <linux/skbuff.h>
20 #include <linux/kmod.h>
21 #include <linux/vmalloc.h>
22 #include <linux/netdevice.h>
23 #include <linux/module.h>
24 #include <linux/icmp.h>
26 #include <net/compat.h>
27 #include <asm/uaccess.h>
28 #include <linux/mutex.h>
29 #include <linux/proc_fs.h>
30 #include <linux/err.h>
31 #include <linux/cpumask.h>
33 #include <linux/netfilter/x_tables.h>
34 #include <linux/netfilter_ipv4/ip_tables.h>
36 MODULE_LICENSE("GPL");
37 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
38 MODULE_DESCRIPTION("IPv4 packet filter");
40 /*#define DEBUG_IP_FIREWALL*/
41 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
42 /*#define DEBUG_IP_FIREWALL_USER*/
44 #ifdef DEBUG_IP_FIREWALL
45 #define dprintf(format, args...) printk(format , ## args)
47 #define dprintf(format, args...)
50 #ifdef DEBUG_IP_FIREWALL_USER
51 #define duprintf(format, args...) printk(format , ## args)
53 #define duprintf(format, args...)
56 #ifdef CONFIG_NETFILTER_DEBUG
57 #define IP_NF_ASSERT(x) \
60 printk("IP_NF_ASSERT: %s:%s:%u\n", \
61 __FUNCTION__, __FILE__, __LINE__); \
64 #define IP_NF_ASSERT(x)
68 /* All the better to debug you with... */
74 We keep a set of rules for each CPU, so we can avoid write-locking
75 them in the softirq when updating the counters and therefore
76 only need to read-lock in the softirq; doing a write_lock_bh() in user
77 context stops packets coming through and allows user context to read
78 the counters or update the rules.
80 Hence the start of any table is given by get_table() below. */
82 /* Returns whether matches rule or not. */
84 ip_packet_match(const struct iphdr *ip,
87 const struct ipt_ip *ipinfo,
93 #define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
95 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
97 || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
99 dprintf("Source or dest mismatch.\n");
101 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
103 NIPQUAD(ipinfo->smsk.s_addr),
104 NIPQUAD(ipinfo->src.s_addr),
105 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
106 dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
108 NIPQUAD(ipinfo->dmsk.s_addr),
109 NIPQUAD(ipinfo->dst.s_addr),
110 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
114 /* Look for ifname matches; this should unroll nicely. */
115 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
116 ret |= (((const unsigned long *)indev)[i]
117 ^ ((const unsigned long *)ipinfo->iniface)[i])
118 & ((const unsigned long *)ipinfo->iniface_mask)[i];
121 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
122 dprintf("VIA in mismatch (%s vs %s).%s\n",
123 indev, ipinfo->iniface,
124 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
128 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
129 ret |= (((const unsigned long *)outdev)[i]
130 ^ ((const unsigned long *)ipinfo->outiface)[i])
131 & ((const unsigned long *)ipinfo->outiface_mask)[i];
134 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
135 dprintf("VIA out mismatch (%s vs %s).%s\n",
136 outdev, ipinfo->outiface,
137 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
141 /* Check specific protocol */
143 && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
144 dprintf("Packet protocol %hi does not match %hi.%s\n",
145 ip->protocol, ipinfo->proto,
146 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
150 /* If we have a fragment rule but the packet is not a fragment
151 * then we return zero */
152 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
153 dprintf("Fragment rule but not fragment.%s\n",
154 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
162 ip_checkentry(const struct ipt_ip *ip)
164 if (ip->flags & ~IPT_F_MASK) {
165 duprintf("Unknown flag bits set: %08X\n",
166 ip->flags & ~IPT_F_MASK);
169 if (ip->invflags & ~IPT_INV_MASK) {
170 duprintf("Unknown invflag bits set: %08X\n",
171 ip->invflags & ~IPT_INV_MASK);
178 ipt_error(struct sk_buff **pskb,
179 const struct net_device *in,
180 const struct net_device *out,
181 unsigned int hooknum,
182 const struct xt_target *target,
183 const void *targinfo)
186 printk("ip_tables: error: `%s'\n", (char *)targinfo);
192 int do_match(struct ipt_entry_match *m,
193 const struct sk_buff *skb,
194 const struct net_device *in,
195 const struct net_device *out,
199 /* Stop iteration if it doesn't match */
200 if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
201 offset, skb->nh.iph->ihl*4, hotdrop))
207 static inline struct ipt_entry *
208 get_entry(void *base, unsigned int offset)
210 return (struct ipt_entry *)(base + offset);
213 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
215 ipt_do_table(struct sk_buff **pskb,
217 const struct net_device *in,
218 const struct net_device *out,
219 struct ipt_table *table)
221 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
226 /* Initializing verdict to NF_DROP keeps gcc happy. */
227 unsigned int verdict = NF_DROP;
228 const char *indev, *outdev;
230 struct ipt_entry *e, *back;
231 struct xt_table_info *private;
234 ip = (*pskb)->nh.iph;
235 datalen = (*pskb)->len - ip->ihl * 4;
236 indev = in ? in->name : nulldevname;
237 outdev = out ? out->name : nulldevname;
238 /* We handle fragments by dealing with the first fragment as
239 * if it was a normal packet. All other fragments are treated
240 * normally, except that they will NEVER match rules that ask
241 * things we don't know, ie. tcp syn flag or ports). If the
242 * rule is also a fragment-specific rule, non-fragments won't
244 offset = ntohs(ip->frag_off) & IP_OFFSET;
246 read_lock_bh(&table->lock);
247 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
248 private = table->private;
249 table_base = (void *)private->entries[smp_processor_id()];
250 e = get_entry(table_base, private->hook_entry[hook]);
252 /* For return from builtin chain */
253 back = get_entry(table_base, private->underflow[hook]);
258 if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
259 struct ipt_entry_target *t;
261 if (IPT_MATCH_ITERATE(e, do_match,
263 offset, &hotdrop) != 0)
266 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
268 t = ipt_get_target(e);
269 IP_NF_ASSERT(t->u.kernel.target);
270 /* Standard target? */
271 if (!t->u.kernel.target->target) {
274 v = ((struct ipt_standard_target *)t)->verdict;
276 /* Pop from stack? */
277 if (v != IPT_RETURN) {
278 verdict = (unsigned)(-v) - 1;
282 back = get_entry(table_base,
286 if (table_base + v != (void *)e + e->next_offset
287 && !(e->ip.flags & IPT_F_GOTO)) {
288 /* Save old back ptr in next entry */
289 struct ipt_entry *next
290 = (void *)e + e->next_offset;
292 = (void *)back - table_base;
293 /* set back pointer to next entry */
297 e = get_entry(table_base, v);
299 /* Targets which reenter must return
301 #ifdef CONFIG_NETFILTER_DEBUG
302 ((struct ipt_entry *)table_base)->comefrom
305 verdict = t->u.kernel.target->target(pskb,
311 #ifdef CONFIG_NETFILTER_DEBUG
312 if (((struct ipt_entry *)table_base)->comefrom
314 && verdict == IPT_CONTINUE) {
315 printk("Target %s reentered!\n",
316 t->u.kernel.target->name);
319 ((struct ipt_entry *)table_base)->comefrom
322 /* Target might have changed stuff. */
323 ip = (*pskb)->nh.iph;
324 datalen = (*pskb)->len - ip->ihl * 4;
326 if (verdict == IPT_CONTINUE)
327 e = (void *)e + e->next_offset;
335 e = (void *)e + e->next_offset;
339 read_unlock_bh(&table->lock);
341 #ifdef DEBUG_ALLOW_ALL
350 /* All zeroes == unconditional rule. */
352 unconditional(const struct ipt_ip *ip)
356 for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
357 if (((__u32 *)ip)[i])
363 /* Figures out from what hook each rule can be called: returns 0 if
364 there are loops. Puts hook bitmask in comefrom. */
366 mark_source_chains(struct xt_table_info *newinfo,
367 unsigned int valid_hooks, void *entry0)
371 /* No recursion; use packet counter to save back ptrs (reset
372 to 0 as we leave), and comefrom to save source hook bitmask */
373 for (hook = 0; hook < NF_IP_NUMHOOKS; hook++) {
374 unsigned int pos = newinfo->hook_entry[hook];
376 = (struct ipt_entry *)(entry0 + pos);
378 if (!(valid_hooks & (1 << hook)))
381 /* Set initial back pointer. */
382 e->counters.pcnt = pos;
385 struct ipt_standard_target *t
386 = (void *)ipt_get_target(e);
388 if (e->comefrom & (1 << NF_IP_NUMHOOKS)) {
389 printk("iptables: loop hook %u pos %u %08X.\n",
390 hook, pos, e->comefrom);
394 |= ((1 << hook) | (1 << NF_IP_NUMHOOKS));
396 /* Unconditional return/END. */
397 if (e->target_offset == sizeof(struct ipt_entry)
398 && (strcmp(t->target.u.user.name,
399 IPT_STANDARD_TARGET) == 0)
401 && unconditional(&e->ip)) {
402 unsigned int oldpos, size;
404 /* Return: backtrack through the last
407 e->comefrom ^= (1<<NF_IP_NUMHOOKS);
408 #ifdef DEBUG_IP_FIREWALL_USER
410 & (1 << NF_IP_NUMHOOKS)) {
411 duprintf("Back unset "
418 pos = e->counters.pcnt;
419 e->counters.pcnt = 0;
421 /* We're at the start. */
425 e = (struct ipt_entry *)
427 } while (oldpos == pos + e->next_offset);
430 size = e->next_offset;
431 e = (struct ipt_entry *)
432 (entry0 + pos + size);
433 e->counters.pcnt = pos;
436 int newpos = t->verdict;
438 if (strcmp(t->target.u.user.name,
439 IPT_STANDARD_TARGET) == 0
441 /* This a jump; chase it. */
442 duprintf("Jump rule %u -> %u\n",
445 /* ... this is a fallthru */
446 newpos = pos + e->next_offset;
448 e = (struct ipt_entry *)
450 e->counters.pcnt = pos;
455 duprintf("Finished chain %u\n", hook);
461 cleanup_match(struct ipt_entry_match *m, unsigned int *i)
463 if (i && (*i)-- == 0)
466 if (m->u.kernel.match->destroy)
467 m->u.kernel.match->destroy(m->u.kernel.match, m->data);
468 module_put(m->u.kernel.match->me);
473 standard_check(const struct ipt_entry_target *t,
474 unsigned int max_offset)
476 struct ipt_standard_target *targ = (void *)t;
478 /* Check standard info. */
479 if (targ->verdict >= 0
480 && targ->verdict > max_offset - sizeof(struct ipt_entry)) {
481 duprintf("ipt_standard_check: bad verdict (%i)\n",
485 if (targ->verdict < -NF_MAX_VERDICT - 1) {
486 duprintf("ipt_standard_check: bad negative verdict (%i)\n",
494 check_match(struct ipt_entry_match *m,
496 const struct ipt_ip *ip,
497 unsigned int hookmask,
500 struct ipt_match *match;
503 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
505 "ipt_%s", m->u.user.name);
506 if (IS_ERR(match) || !match) {
507 duprintf("check_match: `%s' not found\n", m->u.user.name);
508 return match ? PTR_ERR(match) : -ENOENT;
510 m->u.kernel.match = match;
512 ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m),
513 name, hookmask, ip->proto,
514 ip->invflags & IPT_INV_PROTO);
518 if (m->u.kernel.match->checkentry
519 && !m->u.kernel.match->checkentry(name, ip, match, m->data,
521 duprintf("ip_tables: check failed for `%s'.\n",
522 m->u.kernel.match->name);
530 module_put(m->u.kernel.match->me);
534 static struct ipt_target ipt_standard_target;
537 check_entry(struct ipt_entry *e, const char *name, unsigned int size,
540 struct ipt_entry_target *t;
541 struct ipt_target *target;
545 if (!ip_checkentry(&e->ip)) {
546 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
550 if (e->target_offset + sizeof(struct ipt_entry_target) > e->next_offset)
554 ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip, e->comefrom, &j);
556 goto cleanup_matches;
558 t = ipt_get_target(e);
560 if (e->target_offset + t->u.target_size > e->next_offset)
561 goto cleanup_matches;
562 target = try_then_request_module(xt_find_target(AF_INET,
565 "ipt_%s", t->u.user.name);
566 if (IS_ERR(target) || !target) {
567 duprintf("check_entry: `%s' not found\n", t->u.user.name);
568 ret = target ? PTR_ERR(target) : -ENOENT;
569 goto cleanup_matches;
571 t->u.kernel.target = target;
573 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
574 name, e->comefrom, e->ip.proto,
575 e->ip.invflags & IPT_INV_PROTO);
579 if (t->u.kernel.target == &ipt_standard_target) {
580 if (!standard_check(t, size)) {
584 } else if (t->u.kernel.target->checkentry
585 && !t->u.kernel.target->checkentry(name, e, target, t->data,
587 duprintf("ip_tables: check failed for `%s'.\n",
588 t->u.kernel.target->name);
596 module_put(t->u.kernel.target->me);
598 IPT_MATCH_ITERATE(e, cleanup_match, &j);
603 check_entry_size_and_hooks(struct ipt_entry *e,
604 struct xt_table_info *newinfo,
606 unsigned char *limit,
607 const unsigned int *hook_entries,
608 const unsigned int *underflows,
613 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
614 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
615 duprintf("Bad offset %p\n", e);
620 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
621 duprintf("checking: element %p size %u\n",
626 /* Check hooks & underflows */
627 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
628 if ((unsigned char *)e - base == hook_entries[h])
629 newinfo->hook_entry[h] = hook_entries[h];
630 if ((unsigned char *)e - base == underflows[h])
631 newinfo->underflow[h] = underflows[h];
634 /* FIXME: underflows must be unconditional, standard verdicts
635 < 0 (not IPT_RETURN). --RR */
637 /* Clear counters and comefrom */
638 e->counters = ((struct xt_counters) { 0, 0 });
646 cleanup_entry(struct ipt_entry *e, unsigned int *i)
648 struct ipt_entry_target *t;
650 if (i && (*i)-- == 0)
653 /* Cleanup all matches */
654 IPT_MATCH_ITERATE(e, cleanup_match, NULL);
655 t = ipt_get_target(e);
656 if (t->u.kernel.target->destroy)
657 t->u.kernel.target->destroy(t->u.kernel.target, t->data);
658 module_put(t->u.kernel.target->me);
662 /* Checks and translates the user-supplied table segment (held in
665 translate_table(const char *name,
666 unsigned int valid_hooks,
667 struct xt_table_info *newinfo,
671 const unsigned int *hook_entries,
672 const unsigned int *underflows)
677 newinfo->size = size;
678 newinfo->number = number;
680 /* Init all hooks to impossible value. */
681 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
682 newinfo->hook_entry[i] = 0xFFFFFFFF;
683 newinfo->underflow[i] = 0xFFFFFFFF;
686 duprintf("translate_table: size %u\n", newinfo->size);
688 /* Walk through entries, checking offsets. */
689 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
690 check_entry_size_and_hooks,
694 hook_entries, underflows, &i);
699 duprintf("translate_table: %u not %u entries\n",
704 /* Check hooks all assigned */
705 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
706 /* Only hooks which are valid */
707 if (!(valid_hooks & (1 << i)))
709 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
710 duprintf("Invalid hook entry %u %u\n",
714 if (newinfo->underflow[i] == 0xFFFFFFFF) {
715 duprintf("Invalid underflow %u %u\n",
721 /* Finally, each sanity check must pass */
723 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
724 check_entry, name, size, &i);
730 if (!mark_source_chains(newinfo, valid_hooks, entry0))
733 /* And one copy for every other CPU */
734 for_each_possible_cpu(i) {
735 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
736 memcpy(newinfo->entries[i], entry0, newinfo->size);
741 IPT_ENTRY_ITERATE(entry0, newinfo->size, cleanup_entry, &i);
747 add_entry_to_counter(const struct ipt_entry *e,
748 struct xt_counters total[],
751 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
758 set_entry_to_counter(const struct ipt_entry *e,
759 struct ipt_counters total[],
762 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
769 get_counters(const struct xt_table_info *t,
770 struct xt_counters counters[])
776 /* Instead of clearing (by a previous call to memset())
777 * the counters and using adds, we set the counters
778 * with data used by 'current' CPU
779 * We dont care about preemption here.
781 curcpu = raw_smp_processor_id();
784 IPT_ENTRY_ITERATE(t->entries[curcpu],
786 set_entry_to_counter,
790 for_each_possible_cpu(cpu) {
794 IPT_ENTRY_ITERATE(t->entries[cpu],
796 add_entry_to_counter,
802 static inline struct xt_counters * alloc_counters(struct ipt_table *table)
804 unsigned int countersize;
805 struct xt_counters *counters;
806 struct xt_table_info *private = table->private;
808 /* We need atomic snapshot of counters: rest doesn't change
809 (other than comefrom, which userspace doesn't care
811 countersize = sizeof(struct xt_counters) * private->number;
812 counters = vmalloc_node(countersize, numa_node_id());
814 if (counters == NULL)
815 return ERR_PTR(-ENOMEM);
817 /* First, sum counters... */
818 write_lock_bh(&table->lock);
819 get_counters(private, counters);
820 write_unlock_bh(&table->lock);
826 copy_entries_to_user(unsigned int total_size,
827 struct ipt_table *table,
828 void __user *userptr)
830 unsigned int off, num;
832 struct xt_counters *counters;
833 struct xt_table_info *private = table->private;
837 counters = alloc_counters(table);
838 if (IS_ERR(counters))
839 return PTR_ERR(counters);
841 /* choose the copy that is on our node/cpu, ...
842 * This choice is lazy (because current thread is
843 * allowed to migrate to another cpu)
845 loc_cpu_entry = private->entries[raw_smp_processor_id()];
846 /* ... then copy entire thing ... */
847 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
852 /* FIXME: use iterator macros --RR */
853 /* ... then go back and fix counters and names */
854 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
856 struct ipt_entry_match *m;
857 struct ipt_entry_target *t;
859 e = (struct ipt_entry *)(loc_cpu_entry + off);
860 if (copy_to_user(userptr + off
861 + offsetof(struct ipt_entry, counters),
863 sizeof(counters[num])) != 0) {
868 for (i = sizeof(struct ipt_entry);
869 i < e->target_offset;
870 i += m->u.match_size) {
873 if (copy_to_user(userptr + off + i
874 + offsetof(struct ipt_entry_match,
876 m->u.kernel.match->name,
877 strlen(m->u.kernel.match->name)+1)
884 t = ipt_get_target(e);
885 if (copy_to_user(userptr + off + e->target_offset
886 + offsetof(struct ipt_entry_target,
888 t->u.kernel.target->name,
889 strlen(t->u.kernel.target->name)+1) != 0) {
901 struct compat_delta {
902 struct compat_delta *next;
907 static struct compat_delta *compat_offsets = NULL;
909 static int compat_add_offset(u_int16_t offset, short delta)
911 struct compat_delta *tmp;
913 tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
916 tmp->offset = offset;
918 if (compat_offsets) {
919 tmp->next = compat_offsets->next;
920 compat_offsets->next = tmp;
922 compat_offsets = tmp;
928 static void compat_flush_offsets(void)
930 struct compat_delta *tmp, *next;
932 if (compat_offsets) {
933 for(tmp = compat_offsets; tmp; tmp = next) {
937 compat_offsets = NULL;
941 static short compat_calc_jump(u_int16_t offset)
943 struct compat_delta *tmp;
946 for(tmp = compat_offsets, delta = 0; tmp; tmp = tmp->next)
947 if (tmp->offset < offset)
952 static void compat_standard_from_user(void *dst, void *src)
954 int v = *(compat_int_t *)src;
957 v += compat_calc_jump(v);
958 memcpy(dst, &v, sizeof(v));
961 static int compat_standard_to_user(void __user *dst, void *src)
963 compat_int_t cv = *(int *)src;
966 cv -= compat_calc_jump(cv);
967 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
971 compat_calc_match(struct ipt_entry_match *m, int * size)
973 *size += xt_compat_match_offset(m->u.kernel.match);
977 static int compat_calc_entry(struct ipt_entry *e, struct xt_table_info *info,
978 void *base, struct xt_table_info *newinfo)
980 struct ipt_entry_target *t;
981 u_int16_t entry_offset;
985 entry_offset = (void *)e - base;
986 IPT_MATCH_ITERATE(e, compat_calc_match, &off);
987 t = ipt_get_target(e);
988 off += xt_compat_target_offset(t->u.kernel.target);
989 newinfo->size -= off;
990 ret = compat_add_offset(entry_offset, off);
994 for (i = 0; i< NF_IP_NUMHOOKS; i++) {
995 if (info->hook_entry[i] && (e < (struct ipt_entry *)
996 (base + info->hook_entry[i])))
997 newinfo->hook_entry[i] -= off;
998 if (info->underflow[i] && (e < (struct ipt_entry *)
999 (base + info->underflow[i])))
1000 newinfo->underflow[i] -= off;
1005 static int compat_table_info(struct xt_table_info *info,
1006 struct xt_table_info *newinfo)
1008 void *loc_cpu_entry;
1011 if (!newinfo || !info)
1014 memset(newinfo, 0, sizeof(struct xt_table_info));
1015 newinfo->size = info->size;
1016 newinfo->number = info->number;
1017 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1018 newinfo->hook_entry[i] = info->hook_entry[i];
1019 newinfo->underflow[i] = info->underflow[i];
1021 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1022 return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
1023 compat_calc_entry, info, loc_cpu_entry, newinfo);
1027 static int get_info(void __user *user, int *len, int compat)
1029 char name[IPT_TABLE_MAXNAMELEN];
1030 struct ipt_table *t;
1033 if (*len != sizeof(struct ipt_getinfo)) {
1034 duprintf("length %u != %u\n", *len,
1035 (unsigned int)sizeof(struct ipt_getinfo));
1039 if (copy_from_user(name, user, sizeof(name)) != 0)
1042 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1043 #ifdef CONFIG_COMPAT
1045 xt_compat_lock(AF_INET);
1047 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1048 "iptable_%s", name);
1049 if (t && !IS_ERR(t)) {
1050 struct ipt_getinfo info;
1051 struct xt_table_info *private = t->private;
1053 #ifdef CONFIG_COMPAT
1055 struct xt_table_info tmp;
1056 ret = compat_table_info(private, &tmp);
1057 compat_flush_offsets();
1061 info.valid_hooks = t->valid_hooks;
1062 memcpy(info.hook_entry, private->hook_entry,
1063 sizeof(info.hook_entry));
1064 memcpy(info.underflow, private->underflow,
1065 sizeof(info.underflow));
1066 info.num_entries = private->number;
1067 info.size = private->size;
1068 strcpy(info.name, name);
1070 if (copy_to_user(user, &info, *len) != 0)
1078 ret = t ? PTR_ERR(t) : -ENOENT;
1079 #ifdef CONFIG_COMPAT
1081 xt_compat_unlock(AF_INET);
1087 get_entries(struct ipt_get_entries __user *uptr, int *len)
1090 struct ipt_get_entries get;
1091 struct ipt_table *t;
1093 if (*len < sizeof(get)) {
1094 duprintf("get_entries: %u < %d\n", *len,
1095 (unsigned int)sizeof(get));
1098 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1100 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1101 duprintf("get_entries: %u != %u\n", *len,
1102 (unsigned int)(sizeof(struct ipt_get_entries) +
1107 t = xt_find_table_lock(AF_INET, get.name);
1108 if (t && !IS_ERR(t)) {
1109 struct xt_table_info *private = t->private;
1110 duprintf("t->private->number = %u\n",
1112 if (get.size == private->size)
1113 ret = copy_entries_to_user(private->size,
1114 t, uptr->entrytable);
1116 duprintf("get_entries: I've got %u not %u!\n",
1124 ret = t ? PTR_ERR(t) : -ENOENT;
1130 __do_replace(const char *name, unsigned int valid_hooks,
1131 struct xt_table_info *newinfo, unsigned int num_counters,
1132 void __user *counters_ptr)
1135 struct ipt_table *t;
1136 struct xt_table_info *oldinfo;
1137 struct xt_counters *counters;
1138 void *loc_cpu_old_entry;
1141 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1147 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1148 "iptable_%s", name);
1149 if (!t || IS_ERR(t)) {
1150 ret = t ? PTR_ERR(t) : -ENOENT;
1151 goto free_newinfo_counters_untrans;
1155 if (valid_hooks != t->valid_hooks) {
1156 duprintf("Valid hook crap: %08X vs %08X\n",
1157 valid_hooks, t->valid_hooks);
1162 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1166 /* Update module usage count based on number of rules */
1167 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1168 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1169 if ((oldinfo->number > oldinfo->initial_entries) ||
1170 (newinfo->number <= oldinfo->initial_entries))
1172 if ((oldinfo->number > oldinfo->initial_entries) &&
1173 (newinfo->number <= oldinfo->initial_entries))
1176 /* Get the old counters. */
1177 get_counters(oldinfo, counters);
1178 /* Decrease module usage counts and free resource */
1179 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1180 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
1181 xt_free_table_info(oldinfo);
1182 if (copy_to_user(counters_ptr, counters,
1183 sizeof(struct xt_counters) * num_counters) != 0)
1192 free_newinfo_counters_untrans:
1199 do_replace(void __user *user, unsigned int len)
1202 struct ipt_replace tmp;
1203 struct xt_table_info *newinfo;
1204 void *loc_cpu_entry;
1206 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1209 /* Hack: Causes ipchains to give correct error msg --RR */
1210 if (len != sizeof(tmp) + tmp.size)
1211 return -ENOPROTOOPT;
1213 /* overflow check */
1214 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1217 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1220 newinfo = xt_alloc_table_info(tmp.size);
1224 /* choose the copy that is our node/cpu */
1225 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1226 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1232 ret = translate_table(tmp.name, tmp.valid_hooks,
1233 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1234 tmp.hook_entry, tmp.underflow);
1238 duprintf("ip_tables: Translated table\n");
1240 ret = __do_replace(tmp.name, tmp.valid_hooks,
1241 newinfo, tmp.num_counters,
1244 goto free_newinfo_untrans;
1247 free_newinfo_untrans:
1248 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1250 xt_free_table_info(newinfo);
1254 /* We're lazy, and add to the first CPU; overflow works its fey magic
1255 * and everything is OK. */
1257 add_counter_to_entry(struct ipt_entry *e,
1258 const struct xt_counters addme[],
1262 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1264 (long unsigned int)e->counters.pcnt,
1265 (long unsigned int)e->counters.bcnt,
1266 (long unsigned int)addme[*i].pcnt,
1267 (long unsigned int)addme[*i].bcnt);
1270 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1277 do_add_counters(void __user *user, unsigned int len, int compat)
1280 struct xt_counters_info tmp;
1281 struct xt_counters *paddc;
1282 unsigned int num_counters;
1286 struct ipt_table *t;
1287 struct xt_table_info *private;
1289 void *loc_cpu_entry;
1290 #ifdef CONFIG_COMPAT
1291 struct compat_xt_counters_info compat_tmp;
1295 size = sizeof(struct compat_xt_counters_info);
1300 size = sizeof(struct xt_counters_info);
1303 if (copy_from_user(ptmp, user, size) != 0)
1306 #ifdef CONFIG_COMPAT
1308 num_counters = compat_tmp.num_counters;
1309 name = compat_tmp.name;
1313 num_counters = tmp.num_counters;
1317 if (len != size + num_counters * sizeof(struct xt_counters))
1320 paddc = vmalloc_node(len - size, numa_node_id());
1324 if (copy_from_user(paddc, user + size, len - size) != 0) {
1329 t = xt_find_table_lock(AF_INET, name);
1330 if (!t || IS_ERR(t)) {
1331 ret = t ? PTR_ERR(t) : -ENOENT;
1335 write_lock_bh(&t->lock);
1336 private = t->private;
1337 if (private->number != num_counters) {
1339 goto unlock_up_free;
1343 /* Choose the copy that is on our node */
1344 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1345 IPT_ENTRY_ITERATE(loc_cpu_entry,
1347 add_counter_to_entry,
1351 write_unlock_bh(&t->lock);
1360 #ifdef CONFIG_COMPAT
1361 struct compat_ipt_replace {
1362 char name[IPT_TABLE_MAXNAMELEN];
1366 u32 hook_entry[NF_IP_NUMHOOKS];
1367 u32 underflow[NF_IP_NUMHOOKS];
1369 compat_uptr_t counters; /* struct ipt_counters * */
1370 struct compat_ipt_entry entries[0];
1373 static inline int compat_copy_match_to_user(struct ipt_entry_match *m,
1374 void __user **dstptr, compat_uint_t *size)
1376 return xt_compat_match_to_user(m, dstptr, size);
1379 static int compat_copy_entry_to_user(struct ipt_entry *e,
1380 void __user **dstptr, compat_uint_t *size)
1382 struct ipt_entry_target *t;
1383 struct compat_ipt_entry __user *ce;
1384 u_int16_t target_offset, next_offset;
1385 compat_uint_t origsize;
1390 ce = (struct compat_ipt_entry __user *)*dstptr;
1391 if (copy_to_user(ce, e, sizeof(struct ipt_entry)))
1394 *dstptr += sizeof(struct compat_ipt_entry);
1395 ret = IPT_MATCH_ITERATE(e, compat_copy_match_to_user, dstptr, size);
1396 target_offset = e->target_offset - (origsize - *size);
1399 t = ipt_get_target(e);
1400 ret = xt_compat_target_to_user(t, dstptr, size);
1404 next_offset = e->next_offset - (origsize - *size);
1405 if (put_user(target_offset, &ce->target_offset))
1407 if (put_user(next_offset, &ce->next_offset))
1415 compat_check_calc_match(struct ipt_entry_match *m,
1417 const struct ipt_ip *ip,
1418 unsigned int hookmask,
1421 struct ipt_match *match;
1423 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1424 m->u.user.revision),
1425 "ipt_%s", m->u.user.name);
1426 if (IS_ERR(match) || !match) {
1427 duprintf("compat_check_calc_match: `%s' not found\n",
1429 return match ? PTR_ERR(match) : -ENOENT;
1431 m->u.kernel.match = match;
1432 *size += xt_compat_match_offset(match);
1439 check_compat_entry_size_and_hooks(struct ipt_entry *e,
1440 struct xt_table_info *newinfo,
1442 unsigned char *base,
1443 unsigned char *limit,
1444 unsigned int *hook_entries,
1445 unsigned int *underflows,
1449 struct ipt_entry_target *t;
1450 struct ipt_target *target;
1451 u_int16_t entry_offset;
1454 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1455 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
1456 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1457 duprintf("Bad offset %p, limit = %p\n", e, limit);
1461 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1462 sizeof(struct compat_xt_entry_target)) {
1463 duprintf("checking: element %p size %u\n",
1468 if (!ip_checkentry(&e->ip)) {
1469 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
1473 if (e->target_offset + sizeof(struct compat_xt_entry_target) >
1478 entry_offset = (void *)e - (void *)base;
1480 ret = IPT_MATCH_ITERATE(e, compat_check_calc_match, name, &e->ip,
1481 e->comefrom, &off, &j);
1483 goto cleanup_matches;
1485 t = ipt_get_target(e);
1487 if (e->target_offset + t->u.target_size > e->next_offset)
1488 goto cleanup_matches;
1489 target = try_then_request_module(xt_find_target(AF_INET,
1491 t->u.user.revision),
1492 "ipt_%s", t->u.user.name);
1493 if (IS_ERR(target) || !target) {
1494 duprintf("check_entry: `%s' not found\n", t->u.user.name);
1495 ret = target ? PTR_ERR(target) : -ENOENT;
1496 goto cleanup_matches;
1498 t->u.kernel.target = target;
1500 off += xt_compat_target_offset(target);
1502 ret = compat_add_offset(entry_offset, off);
1506 /* Check hooks & underflows */
1507 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1508 if ((unsigned char *)e - base == hook_entries[h])
1509 newinfo->hook_entry[h] = hook_entries[h];
1510 if ((unsigned char *)e - base == underflows[h])
1511 newinfo->underflow[h] = underflows[h];
1514 /* Clear counters and comefrom */
1515 e->counters = ((struct ipt_counters) { 0, 0 });
1522 module_put(t->u.kernel.target->me);
1524 IPT_MATCH_ITERATE(e, cleanup_match, &j);
1528 static inline int compat_copy_match_from_user(struct ipt_entry_match *m,
1529 void **dstptr, compat_uint_t *size, const char *name,
1530 const struct ipt_ip *ip, unsigned int hookmask)
1532 struct ipt_entry_match *dm;
1533 struct ipt_match *match;
1536 dm = (struct ipt_entry_match *)*dstptr;
1537 match = m->u.kernel.match;
1538 xt_compat_match_from_user(m, dstptr, size);
1540 ret = xt_check_match(match, AF_INET, dm->u.match_size - sizeof(*dm),
1541 name, hookmask, ip->proto,
1542 ip->invflags & IPT_INV_PROTO);
1543 if (!ret && m->u.kernel.match->checkentry
1544 && !m->u.kernel.match->checkentry(name, ip, match, dm->data,
1546 duprintf("ip_tables: check failed for `%s'.\n",
1547 m->u.kernel.match->name);
1553 static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr,
1554 unsigned int *size, const char *name,
1555 struct xt_table_info *newinfo, unsigned char *base)
1557 struct ipt_entry_target *t;
1558 struct ipt_target *target;
1559 struct ipt_entry *de;
1560 unsigned int origsize;
1565 de = (struct ipt_entry *)*dstptr;
1566 memcpy(de, e, sizeof(struct ipt_entry));
1568 *dstptr += sizeof(struct compat_ipt_entry);
1569 ret = IPT_MATCH_ITERATE(e, compat_copy_match_from_user, dstptr, size,
1570 name, &de->ip, de->comefrom);
1573 de->target_offset = e->target_offset - (origsize - *size);
1574 t = ipt_get_target(e);
1575 target = t->u.kernel.target;
1576 xt_compat_target_from_user(t, dstptr, size);
1578 de->next_offset = e->next_offset - (origsize - *size);
1579 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1580 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1581 newinfo->hook_entry[h] -= origsize - *size;
1582 if ((unsigned char *)de - base < newinfo->underflow[h])
1583 newinfo->underflow[h] -= origsize - *size;
1586 t = ipt_get_target(de);
1587 target = t->u.kernel.target;
1588 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
1589 name, e->comefrom, e->ip.proto,
1590 e->ip.invflags & IPT_INV_PROTO);
1595 if (t->u.kernel.target == &ipt_standard_target) {
1596 if (!standard_check(t, *size))
1598 } else if (t->u.kernel.target->checkentry
1599 && !t->u.kernel.target->checkentry(name, de, target,
1600 t->data, de->comefrom)) {
1601 duprintf("ip_tables: compat: check failed for `%s'.\n",
1602 t->u.kernel.target->name);
1611 translate_compat_table(const char *name,
1612 unsigned int valid_hooks,
1613 struct xt_table_info **pinfo,
1615 unsigned int total_size,
1616 unsigned int number,
1617 unsigned int *hook_entries,
1618 unsigned int *underflows)
1621 struct xt_table_info *newinfo, *info;
1622 void *pos, *entry0, *entry1;
1629 info->number = number;
1631 /* Init all hooks to impossible value. */
1632 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1633 info->hook_entry[i] = 0xFFFFFFFF;
1634 info->underflow[i] = 0xFFFFFFFF;
1637 duprintf("translate_compat_table: size %u\n", info->size);
1639 xt_compat_lock(AF_INET);
1640 /* Walk through entries, checking offsets. */
1641 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1642 check_compat_entry_size_and_hooks,
1643 info, &size, entry0,
1644 entry0 + total_size,
1645 hook_entries, underflows, &j, name);
1651 duprintf("translate_compat_table: %u not %u entries\n",
1656 /* Check hooks all assigned */
1657 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1658 /* Only hooks which are valid */
1659 if (!(valid_hooks & (1 << i)))
1661 if (info->hook_entry[i] == 0xFFFFFFFF) {
1662 duprintf("Invalid hook entry %u %u\n",
1663 i, hook_entries[i]);
1666 if (info->underflow[i] == 0xFFFFFFFF) {
1667 duprintf("Invalid underflow %u %u\n",
1674 newinfo = xt_alloc_table_info(size);
1678 newinfo->number = number;
1679 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1680 newinfo->hook_entry[i] = info->hook_entry[i];
1681 newinfo->underflow[i] = info->underflow[i];
1683 entry1 = newinfo->entries[raw_smp_processor_id()];
1686 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1687 compat_copy_entry_from_user, &pos, &size,
1688 name, newinfo, entry1);
1689 compat_flush_offsets();
1690 xt_compat_unlock(AF_INET);
1695 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1698 /* And one copy for every other CPU */
1699 for_each_possible_cpu(i)
1700 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1701 memcpy(newinfo->entries[i], entry1, newinfo->size);
1705 xt_free_table_info(info);
1709 xt_free_table_info(newinfo);
1711 IPT_ENTRY_ITERATE(entry0, total_size, cleanup_entry, &j);
1714 compat_flush_offsets();
1715 xt_compat_unlock(AF_INET);
1720 compat_do_replace(void __user *user, unsigned int len)
1723 struct compat_ipt_replace tmp;
1724 struct xt_table_info *newinfo;
1725 void *loc_cpu_entry;
1727 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1730 /* Hack: Causes ipchains to give correct error msg --RR */
1731 if (len != sizeof(tmp) + tmp.size)
1732 return -ENOPROTOOPT;
1734 /* overflow check */
1735 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1738 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1741 newinfo = xt_alloc_table_info(tmp.size);
1745 /* choose the copy that is our node/cpu */
1746 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1747 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1753 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1754 &newinfo, &loc_cpu_entry, tmp.size,
1755 tmp.num_entries, tmp.hook_entry, tmp.underflow);
1759 duprintf("compat_do_replace: Translated table\n");
1761 ret = __do_replace(tmp.name, tmp.valid_hooks,
1762 newinfo, tmp.num_counters,
1763 compat_ptr(tmp.counters));
1765 goto free_newinfo_untrans;
1768 free_newinfo_untrans:
1769 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1771 xt_free_table_info(newinfo);
1776 compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1781 if (!capable(CAP_NET_ADMIN))
1785 case IPT_SO_SET_REPLACE:
1786 ret = compat_do_replace(user, len);
1789 case IPT_SO_SET_ADD_COUNTERS:
1790 ret = do_add_counters(user, len, 1);
1794 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1801 struct compat_ipt_get_entries
1803 char name[IPT_TABLE_MAXNAMELEN];
1805 struct compat_ipt_entry entrytable[0];
1808 static int compat_copy_entries_to_user(unsigned int total_size,
1809 struct ipt_table *table, void __user *userptr)
1811 unsigned int off, num;
1812 struct compat_ipt_entry e;
1813 struct xt_counters *counters;
1814 struct xt_table_info *private = table->private;
1818 void *loc_cpu_entry;
1820 counters = alloc_counters(table);
1821 if (IS_ERR(counters))
1822 return PTR_ERR(counters);
1824 /* choose the copy that is on our node/cpu, ...
1825 * This choice is lazy (because current thread is
1826 * allowed to migrate to another cpu)
1828 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1831 ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
1832 compat_copy_entry_to_user, &pos, &size);
1836 /* ... then go back and fix counters and names */
1837 for (off = 0, num = 0; off < size; off += e.next_offset, num++) {
1839 struct ipt_entry_match m;
1840 struct ipt_entry_target t;
1843 if (copy_from_user(&e, userptr + off,
1844 sizeof(struct compat_ipt_entry)))
1846 if (copy_to_user(userptr + off +
1847 offsetof(struct compat_ipt_entry, counters),
1848 &counters[num], sizeof(counters[num])))
1851 for (i = sizeof(struct compat_ipt_entry);
1852 i < e.target_offset; i += m.u.match_size) {
1853 if (copy_from_user(&m, userptr + off + i,
1854 sizeof(struct ipt_entry_match)))
1856 if (copy_to_user(userptr + off + i +
1857 offsetof(struct ipt_entry_match, u.user.name),
1858 m.u.kernel.match->name,
1859 strlen(m.u.kernel.match->name) + 1))
1863 if (copy_from_user(&t, userptr + off + e.target_offset,
1864 sizeof(struct ipt_entry_target)))
1866 if (copy_to_user(userptr + off + e.target_offset +
1867 offsetof(struct ipt_entry_target, u.user.name),
1868 t.u.kernel.target->name,
1869 strlen(t.u.kernel.target->name) + 1))
1879 compat_get_entries(struct compat_ipt_get_entries __user *uptr, int *len)
1882 struct compat_ipt_get_entries get;
1883 struct ipt_table *t;
1886 if (*len < sizeof(get)) {
1887 duprintf("compat_get_entries: %u < %u\n",
1888 *len, (unsigned int)sizeof(get));
1892 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1895 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1896 duprintf("compat_get_entries: %u != %u\n", *len,
1897 (unsigned int)(sizeof(struct compat_ipt_get_entries) +
1902 xt_compat_lock(AF_INET);
1903 t = xt_find_table_lock(AF_INET, get.name);
1904 if (t && !IS_ERR(t)) {
1905 struct xt_table_info *private = t->private;
1906 struct xt_table_info info;
1907 duprintf("t->private->number = %u\n",
1909 ret = compat_table_info(private, &info);
1910 if (!ret && get.size == info.size) {
1911 ret = compat_copy_entries_to_user(private->size,
1912 t, uptr->entrytable);
1914 duprintf("compat_get_entries: I've got %u not %u!\n",
1919 compat_flush_offsets();
1923 ret = t ? PTR_ERR(t) : -ENOENT;
1925 xt_compat_unlock(AF_INET);
1929 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1932 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1936 if (!capable(CAP_NET_ADMIN))
1940 case IPT_SO_GET_INFO:
1941 ret = get_info(user, len, 1);
1943 case IPT_SO_GET_ENTRIES:
1944 ret = compat_get_entries(user, len);
1947 ret = do_ipt_get_ctl(sk, cmd, user, len);
1954 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1958 if (!capable(CAP_NET_ADMIN))
1962 case IPT_SO_SET_REPLACE:
1963 ret = do_replace(user, len);
1966 case IPT_SO_SET_ADD_COUNTERS:
1967 ret = do_add_counters(user, len, 0);
1971 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1979 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1983 if (!capable(CAP_NET_ADMIN))
1987 case IPT_SO_GET_INFO:
1988 ret = get_info(user, len, 0);
1991 case IPT_SO_GET_ENTRIES:
1992 ret = get_entries(user, len);
1995 case IPT_SO_GET_REVISION_MATCH:
1996 case IPT_SO_GET_REVISION_TARGET: {
1997 struct ipt_get_revision rev;
2000 if (*len != sizeof(rev)) {
2004 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2009 if (cmd == IPT_SO_GET_REVISION_TARGET)
2014 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2017 "ipt_%s", rev.name);
2022 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2029 int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
2032 struct xt_table_info *newinfo;
2033 static struct xt_table_info bootstrap
2034 = { 0, 0, 0, { 0 }, { 0 }, { } };
2035 void *loc_cpu_entry;
2037 newinfo = xt_alloc_table_info(repl->size);
2041 /* choose the copy on our node/cpu
2042 * but dont care of preemption
2044 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2045 memcpy(loc_cpu_entry, repl->entries, repl->size);
2047 ret = translate_table(table->name, table->valid_hooks,
2048 newinfo, loc_cpu_entry, repl->size,
2053 xt_free_table_info(newinfo);
2057 ret = xt_register_table(table, &bootstrap, newinfo);
2059 xt_free_table_info(newinfo);
2066 void ipt_unregister_table(struct ipt_table *table)
2068 struct xt_table_info *private;
2069 void *loc_cpu_entry;
2071 private = xt_unregister_table(table);
2073 /* Decrease module usage counts and free resources */
2074 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2075 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2076 xt_free_table_info(private);
2079 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2081 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2082 u_int8_t type, u_int8_t code,
2085 return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code))
2090 icmp_match(const struct sk_buff *skb,
2091 const struct net_device *in,
2092 const struct net_device *out,
2093 const struct xt_match *match,
2094 const void *matchinfo,
2096 unsigned int protoff,
2099 struct icmphdr _icmph, *ic;
2100 const struct ipt_icmp *icmpinfo = matchinfo;
2102 /* Must not be a fragment. */
2106 ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
2108 /* We've been asked to examine this packet, and we
2109 * can't. Hence, no choice but to drop.
2111 duprintf("Dropping evil ICMP tinygram.\n");
2116 return icmp_type_code_match(icmpinfo->type,
2120 !!(icmpinfo->invflags&IPT_ICMP_INV));
2123 /* Called when user tries to insert an entry of this type. */
2125 icmp_checkentry(const char *tablename,
2127 const struct xt_match *match,
2129 unsigned int hook_mask)
2131 const struct ipt_icmp *icmpinfo = matchinfo;
2133 /* Must specify no unknown invflags */
2134 return !(icmpinfo->invflags & ~IPT_ICMP_INV);
2137 /* The built-in targets: standard (NULL) and error. */
2138 static struct ipt_target ipt_standard_target = {
2139 .name = IPT_STANDARD_TARGET,
2140 .targetsize = sizeof(int),
2142 #ifdef CONFIG_COMPAT
2143 .compatsize = sizeof(compat_int_t),
2144 .compat_from_user = compat_standard_from_user,
2145 .compat_to_user = compat_standard_to_user,
2149 static struct ipt_target ipt_error_target = {
2150 .name = IPT_ERROR_TARGET,
2151 .target = ipt_error,
2152 .targetsize = IPT_FUNCTION_MAXNAMELEN,
2156 static struct nf_sockopt_ops ipt_sockopts = {
2158 .set_optmin = IPT_BASE_CTL,
2159 .set_optmax = IPT_SO_SET_MAX+1,
2160 .set = do_ipt_set_ctl,
2161 #ifdef CONFIG_COMPAT
2162 .compat_set = compat_do_ipt_set_ctl,
2164 .get_optmin = IPT_BASE_CTL,
2165 .get_optmax = IPT_SO_GET_MAX+1,
2166 .get = do_ipt_get_ctl,
2167 #ifdef CONFIG_COMPAT
2168 .compat_get = compat_do_ipt_get_ctl,
2172 static struct ipt_match icmp_matchstruct = {
2174 .match = icmp_match,
2175 .matchsize = sizeof(struct ipt_icmp),
2176 .proto = IPPROTO_ICMP,
2178 .checkentry = icmp_checkentry,
2181 static int __init ip_tables_init(void)
2185 ret = xt_proto_init(AF_INET);
2189 /* Noone else will be downing sem now, so we won't sleep */
2190 ret = xt_register_target(&ipt_standard_target);
2193 ret = xt_register_target(&ipt_error_target);
2196 ret = xt_register_match(&icmp_matchstruct);
2200 /* Register setsockopt */
2201 ret = nf_register_sockopt(&ipt_sockopts);
2205 printk("ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2209 xt_unregister_match(&icmp_matchstruct);
2211 xt_unregister_target(&ipt_error_target);
2213 xt_unregister_target(&ipt_standard_target);
2215 xt_proto_fini(AF_INET);
2220 static void __exit ip_tables_fini(void)
2222 nf_unregister_sockopt(&ipt_sockopts);
2224 xt_unregister_match(&icmp_matchstruct);
2225 xt_unregister_target(&ipt_error_target);
2226 xt_unregister_target(&ipt_standard_target);
2228 xt_proto_fini(AF_INET);
2231 EXPORT_SYMBOL(ipt_register_table);
2232 EXPORT_SYMBOL(ipt_unregister_table);
2233 EXPORT_SYMBOL(ipt_do_table);
2234 module_init(ip_tables_init);
2235 module_exit(ip_tables_fini);