[NETFILTER]: ip_tables: ipt and ipt_compat checks unification
[pandora-kernel.git] / net / ipv4 / netfilter / ip_tables.c
1 /*
2  * Packet matching code.
3  *
4  * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5  * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * 19 Jan 2002 Harald Welte <laforge@gnumonks.org>
12  *      - increase module usage count as soon as we have rules inside
13  *        a table
14  * 08 Oct 2005 Harald Welte <lafore@netfilter.org>
15  *      - Generalize into "x_tables" layer and "{ip,ip6,arp}_tables"
16  */
17 #include <linux/cache.h>
18 #include <linux/capability.h>
19 #include <linux/skbuff.h>
20 #include <linux/kmod.h>
21 #include <linux/vmalloc.h>
22 #include <linux/netdevice.h>
23 #include <linux/module.h>
24 #include <linux/icmp.h>
25 #include <net/ip.h>
26 #include <net/compat.h>
27 #include <asm/uaccess.h>
28 #include <linux/mutex.h>
29 #include <linux/proc_fs.h>
30 #include <linux/err.h>
31 #include <linux/cpumask.h>
32
33 #include <linux/netfilter/x_tables.h>
34 #include <linux/netfilter_ipv4/ip_tables.h>
35
36 MODULE_LICENSE("GPL");
37 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
38 MODULE_DESCRIPTION("IPv4 packet filter");
39
40 /*#define DEBUG_IP_FIREWALL*/
41 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
42 /*#define DEBUG_IP_FIREWALL_USER*/
43
44 #ifdef DEBUG_IP_FIREWALL
45 #define dprintf(format, args...)  printk(format , ## args)
46 #else
47 #define dprintf(format, args...)
48 #endif
49
50 #ifdef DEBUG_IP_FIREWALL_USER
51 #define duprintf(format, args...) printk(format , ## args)
52 #else
53 #define duprintf(format, args...)
54 #endif
55
56 #ifdef CONFIG_NETFILTER_DEBUG
57 #define IP_NF_ASSERT(x)                                         \
58 do {                                                            \
59         if (!(x))                                               \
60                 printk("IP_NF_ASSERT: %s:%s:%u\n",              \
61                        __FUNCTION__, __FILE__, __LINE__);       \
62 } while(0)
63 #else
64 #define IP_NF_ASSERT(x)
65 #endif
66
67 #if 0
68 /* All the better to debug you with... */
69 #define static
70 #define inline
71 #endif
72
73 /*
74    We keep a set of rules for each CPU, so we can avoid write-locking
75    them in the softirq when updating the counters and therefore
76    only need to read-lock in the softirq; doing a write_lock_bh() in user
77    context stops packets coming through and allows user context to read
78    the counters or update the rules.
79
80    Hence the start of any table is given by get_table() below.  */
81
82 /* Returns whether matches rule or not. */
83 static inline int
84 ip_packet_match(const struct iphdr *ip,
85                 const char *indev,
86                 const char *outdev,
87                 const struct ipt_ip *ipinfo,
88                 int isfrag)
89 {
90         size_t i;
91         unsigned long ret;
92
93 #define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
94
95         if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
96                   IPT_INV_SRCIP)
97             || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
98                      IPT_INV_DSTIP)) {
99                 dprintf("Source or dest mismatch.\n");
100
101                 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
102                         NIPQUAD(ip->saddr),
103                         NIPQUAD(ipinfo->smsk.s_addr),
104                         NIPQUAD(ipinfo->src.s_addr),
105                         ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
106                 dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
107                         NIPQUAD(ip->daddr),
108                         NIPQUAD(ipinfo->dmsk.s_addr),
109                         NIPQUAD(ipinfo->dst.s_addr),
110                         ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
111                 return 0;
112         }
113
114         /* Look for ifname matches; this should unroll nicely. */
115         for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
116                 ret |= (((const unsigned long *)indev)[i]
117                         ^ ((const unsigned long *)ipinfo->iniface)[i])
118                         & ((const unsigned long *)ipinfo->iniface_mask)[i];
119         }
120
121         if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
122                 dprintf("VIA in mismatch (%s vs %s).%s\n",
123                         indev, ipinfo->iniface,
124                         ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
125                 return 0;
126         }
127
128         for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
129                 ret |= (((const unsigned long *)outdev)[i]
130                         ^ ((const unsigned long *)ipinfo->outiface)[i])
131                         & ((const unsigned long *)ipinfo->outiface_mask)[i];
132         }
133
134         if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
135                 dprintf("VIA out mismatch (%s vs %s).%s\n",
136                         outdev, ipinfo->outiface,
137                         ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
138                 return 0;
139         }
140
141         /* Check specific protocol */
142         if (ipinfo->proto
143             && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
144                 dprintf("Packet protocol %hi does not match %hi.%s\n",
145                         ip->protocol, ipinfo->proto,
146                         ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
147                 return 0;
148         }
149
150         /* If we have a fragment rule but the packet is not a fragment
151          * then we return zero */
152         if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
153                 dprintf("Fragment rule but not fragment.%s\n",
154                         ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
155                 return 0;
156         }
157
158         return 1;
159 }
160
161 static inline int
162 ip_checkentry(const struct ipt_ip *ip)
163 {
164         if (ip->flags & ~IPT_F_MASK) {
165                 duprintf("Unknown flag bits set: %08X\n",
166                          ip->flags & ~IPT_F_MASK);
167                 return 0;
168         }
169         if (ip->invflags & ~IPT_INV_MASK) {
170                 duprintf("Unknown invflag bits set: %08X\n",
171                          ip->invflags & ~IPT_INV_MASK);
172                 return 0;
173         }
174         return 1;
175 }
176
177 static unsigned int
178 ipt_error(struct sk_buff **pskb,
179           const struct net_device *in,
180           const struct net_device *out,
181           unsigned int hooknum,
182           const struct xt_target *target,
183           const void *targinfo)
184 {
185         if (net_ratelimit())
186                 printk("ip_tables: error: `%s'\n", (char *)targinfo);
187
188         return NF_DROP;
189 }
190
191 static inline
192 int do_match(struct ipt_entry_match *m,
193              const struct sk_buff *skb,
194              const struct net_device *in,
195              const struct net_device *out,
196              int offset,
197              int *hotdrop)
198 {
199         /* Stop iteration if it doesn't match */
200         if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
201                                       offset, skb->nh.iph->ihl*4, hotdrop))
202                 return 1;
203         else
204                 return 0;
205 }
206
207 static inline struct ipt_entry *
208 get_entry(void *base, unsigned int offset)
209 {
210         return (struct ipt_entry *)(base + offset);
211 }
212
213 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
214 unsigned int
215 ipt_do_table(struct sk_buff **pskb,
216              unsigned int hook,
217              const struct net_device *in,
218              const struct net_device *out,
219              struct ipt_table *table)
220 {
221         static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
222         u_int16_t offset;
223         struct iphdr *ip;
224         u_int16_t datalen;
225         int hotdrop = 0;
226         /* Initializing verdict to NF_DROP keeps gcc happy. */
227         unsigned int verdict = NF_DROP;
228         const char *indev, *outdev;
229         void *table_base;
230         struct ipt_entry *e, *back;
231         struct xt_table_info *private;
232
233         /* Initialization */
234         ip = (*pskb)->nh.iph;
235         datalen = (*pskb)->len - ip->ihl * 4;
236         indev = in ? in->name : nulldevname;
237         outdev = out ? out->name : nulldevname;
238         /* We handle fragments by dealing with the first fragment as
239          * if it was a normal packet.  All other fragments are treated
240          * normally, except that they will NEVER match rules that ask
241          * things we don't know, ie. tcp syn flag or ports).  If the
242          * rule is also a fragment-specific rule, non-fragments won't
243          * match it. */
244         offset = ntohs(ip->frag_off) & IP_OFFSET;
245
246         read_lock_bh(&table->lock);
247         IP_NF_ASSERT(table->valid_hooks & (1 << hook));
248         private = table->private;
249         table_base = (void *)private->entries[smp_processor_id()];
250         e = get_entry(table_base, private->hook_entry[hook]);
251
252         /* For return from builtin chain */
253         back = get_entry(table_base, private->underflow[hook]);
254
255         do {
256                 IP_NF_ASSERT(e);
257                 IP_NF_ASSERT(back);
258                 if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
259                         struct ipt_entry_target *t;
260
261                         if (IPT_MATCH_ITERATE(e, do_match,
262                                               *pskb, in, out,
263                                               offset, &hotdrop) != 0)
264                                 goto no_match;
265
266                         ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
267
268                         t = ipt_get_target(e);
269                         IP_NF_ASSERT(t->u.kernel.target);
270                         /* Standard target? */
271                         if (!t->u.kernel.target->target) {
272                                 int v;
273
274                                 v = ((struct ipt_standard_target *)t)->verdict;
275                                 if (v < 0) {
276                                         /* Pop from stack? */
277                                         if (v != IPT_RETURN) {
278                                                 verdict = (unsigned)(-v) - 1;
279                                                 break;
280                                         }
281                                         e = back;
282                                         back = get_entry(table_base,
283                                                          back->comefrom);
284                                         continue;
285                                 }
286                                 if (table_base + v != (void *)e + e->next_offset
287                                     && !(e->ip.flags & IPT_F_GOTO)) {
288                                         /* Save old back ptr in next entry */
289                                         struct ipt_entry *next
290                                                 = (void *)e + e->next_offset;
291                                         next->comefrom
292                                                 = (void *)back - table_base;
293                                         /* set back pointer to next entry */
294                                         back = next;
295                                 }
296
297                                 e = get_entry(table_base, v);
298                         } else {
299                                 /* Targets which reenter must return
300                                    abs. verdicts */
301 #ifdef CONFIG_NETFILTER_DEBUG
302                                 ((struct ipt_entry *)table_base)->comefrom
303                                         = 0xeeeeeeec;
304 #endif
305                                 verdict = t->u.kernel.target->target(pskb,
306                                                                      in, out,
307                                                                      hook,
308                                                                      t->u.kernel.target,
309                                                                      t->data);
310
311 #ifdef CONFIG_NETFILTER_DEBUG
312                                 if (((struct ipt_entry *)table_base)->comefrom
313                                     != 0xeeeeeeec
314                                     && verdict == IPT_CONTINUE) {
315                                         printk("Target %s reentered!\n",
316                                                t->u.kernel.target->name);
317                                         verdict = NF_DROP;
318                                 }
319                                 ((struct ipt_entry *)table_base)->comefrom
320                                         = 0x57acc001;
321 #endif
322                                 /* Target might have changed stuff. */
323                                 ip = (*pskb)->nh.iph;
324                                 datalen = (*pskb)->len - ip->ihl * 4;
325
326                                 if (verdict == IPT_CONTINUE)
327                                         e = (void *)e + e->next_offset;
328                                 else
329                                         /* Verdict */
330                                         break;
331                         }
332                 } else {
333
334                 no_match:
335                         e = (void *)e + e->next_offset;
336                 }
337         } while (!hotdrop);
338
339         read_unlock_bh(&table->lock);
340
341 #ifdef DEBUG_ALLOW_ALL
342         return NF_ACCEPT;
343 #else
344         if (hotdrop)
345                 return NF_DROP;
346         else return verdict;
347 #endif
348 }
349
350 /* All zeroes == unconditional rule. */
351 static inline int
352 unconditional(const struct ipt_ip *ip)
353 {
354         unsigned int i;
355
356         for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
357                 if (((__u32 *)ip)[i])
358                         return 0;
359
360         return 1;
361 }
362
363 /* Figures out from what hook each rule can be called: returns 0 if
364    there are loops.  Puts hook bitmask in comefrom. */
365 static int
366 mark_source_chains(struct xt_table_info *newinfo,
367                    unsigned int valid_hooks, void *entry0)
368 {
369         unsigned int hook;
370
371         /* No recursion; use packet counter to save back ptrs (reset
372            to 0 as we leave), and comefrom to save source hook bitmask */
373         for (hook = 0; hook < NF_IP_NUMHOOKS; hook++) {
374                 unsigned int pos = newinfo->hook_entry[hook];
375                 struct ipt_entry *e
376                         = (struct ipt_entry *)(entry0 + pos);
377
378                 if (!(valid_hooks & (1 << hook)))
379                         continue;
380
381                 /* Set initial back pointer. */
382                 e->counters.pcnt = pos;
383
384                 for (;;) {
385                         struct ipt_standard_target *t
386                                 = (void *)ipt_get_target(e);
387
388                         if (e->comefrom & (1 << NF_IP_NUMHOOKS)) {
389                                 printk("iptables: loop hook %u pos %u %08X.\n",
390                                        hook, pos, e->comefrom);
391                                 return 0;
392                         }
393                         e->comefrom
394                                 |= ((1 << hook) | (1 << NF_IP_NUMHOOKS));
395
396                         /* Unconditional return/END. */
397                         if (e->target_offset == sizeof(struct ipt_entry)
398                             && (strcmp(t->target.u.user.name,
399                                        IPT_STANDARD_TARGET) == 0)
400                             && t->verdict < 0
401                             && unconditional(&e->ip)) {
402                                 unsigned int oldpos, size;
403
404                                 if (t->verdict < -NF_MAX_VERDICT - 1) {
405                                         duprintf("mark_source_chains: bad "
406                                                 "negative verdict (%i)\n",
407                                                                 t->verdict);
408                                         return 0;
409                                 }
410
411                                 /* Return: backtrack through the last
412                                    big jump. */
413                                 do {
414                                         e->comefrom ^= (1<<NF_IP_NUMHOOKS);
415 #ifdef DEBUG_IP_FIREWALL_USER
416                                         if (e->comefrom
417                                             & (1 << NF_IP_NUMHOOKS)) {
418                                                 duprintf("Back unset "
419                                                          "on hook %u "
420                                                          "rule %u\n",
421                                                          hook, pos);
422                                         }
423 #endif
424                                         oldpos = pos;
425                                         pos = e->counters.pcnt;
426                                         e->counters.pcnt = 0;
427
428                                         /* We're at the start. */
429                                         if (pos == oldpos)
430                                                 goto next;
431
432                                         e = (struct ipt_entry *)
433                                                 (entry0 + pos);
434                                 } while (oldpos == pos + e->next_offset);
435
436                                 /* Move along one */
437                                 size = e->next_offset;
438                                 e = (struct ipt_entry *)
439                                         (entry0 + pos + size);
440                                 e->counters.pcnt = pos;
441                                 pos += size;
442                         } else {
443                                 int newpos = t->verdict;
444
445                                 if (strcmp(t->target.u.user.name,
446                                            IPT_STANDARD_TARGET) == 0
447                                     && newpos >= 0) {
448                                         if (newpos > newinfo->size -
449                                                 sizeof(struct ipt_entry)) {
450                                                 duprintf("mark_source_chains: "
451                                                         "bad verdict (%i)\n",
452                                                                 newpos);
453                                                 return 0;
454                                         }
455                                         /* This a jump; chase it. */
456                                         duprintf("Jump rule %u -> %u\n",
457                                                  pos, newpos);
458                                 } else {
459                                         /* ... this is a fallthru */
460                                         newpos = pos + e->next_offset;
461                                 }
462                                 e = (struct ipt_entry *)
463                                         (entry0 + newpos);
464                                 e->counters.pcnt = pos;
465                                 pos = newpos;
466                         }
467                 }
468                 next:
469                 duprintf("Finished chain %u\n", hook);
470         }
471         return 1;
472 }
473
474 static inline int
475 cleanup_match(struct ipt_entry_match *m, unsigned int *i)
476 {
477         if (i && (*i)-- == 0)
478                 return 1;
479
480         if (m->u.kernel.match->destroy)
481                 m->u.kernel.match->destroy(m->u.kernel.match, m->data);
482         module_put(m->u.kernel.match->me);
483         return 0;
484 }
485
486 static inline int
487 check_entry(struct ipt_entry *e, const char *name)
488 {
489         struct ipt_entry_target *t;
490
491         if (!ip_checkentry(&e->ip)) {
492                 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
493                 return -EINVAL;
494         }
495
496         if (e->target_offset + sizeof(struct ipt_entry_target) > e->next_offset)
497                 return -EINVAL;
498
499         t = ipt_get_target(e);
500         if (e->target_offset + t->u.target_size > e->next_offset)
501                 return -EINVAL;
502
503         return 0;
504 }
505
506 static inline int check_match(struct ipt_entry_match *m, const char *name,
507                                 const struct ipt_ip *ip, unsigned int hookmask)
508 {
509         struct ipt_match *match;
510         int ret;
511
512         match = m->u.kernel.match;
513         ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m),
514                              name, hookmask, ip->proto,
515                              ip->invflags & IPT_INV_PROTO);
516         if (!ret && m->u.kernel.match->checkentry
517             && !m->u.kernel.match->checkentry(name, ip, match, m->data,
518                                               hookmask)) {
519                 duprintf("ip_tables: check failed for `%s'.\n",
520                          m->u.kernel.match->name);
521                 ret = -EINVAL;
522         }
523         return ret;
524 }
525
526 static inline int
527 find_check_match(struct ipt_entry_match *m,
528             const char *name,
529             const struct ipt_ip *ip,
530             unsigned int hookmask,
531             unsigned int *i)
532 {
533         struct ipt_match *match;
534         int ret;
535
536         match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
537                                                    m->u.user.revision),
538                                         "ipt_%s", m->u.user.name);
539         if (IS_ERR(match) || !match) {
540                 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
541                 return match ? PTR_ERR(match) : -ENOENT;
542         }
543         m->u.kernel.match = match;
544
545         ret = check_match(m, name, ip, hookmask);
546         if (ret)
547                 goto err;
548
549         (*i)++;
550         return 0;
551 err:
552         module_put(m->u.kernel.match->me);
553         return ret;
554 }
555
556 static inline int check_target(struct ipt_entry *e, const char *name)
557 {
558         struct ipt_entry_target *t;
559         struct ipt_target *target;
560         int ret;
561
562         t = ipt_get_target(e);
563         target = t->u.kernel.target;
564         ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
565                               name, e->comefrom, e->ip.proto,
566                               e->ip.invflags & IPT_INV_PROTO);
567         if (!ret && t->u.kernel.target->checkentry
568                    && !t->u.kernel.target->checkentry(name, e, target,
569                                                       t->data, e->comefrom)) {
570                 duprintf("ip_tables: check failed for `%s'.\n",
571                          t->u.kernel.target->name);
572                 ret = -EINVAL;
573         }
574         return ret;
575 }
576
577 static inline int
578 find_check_entry(struct ipt_entry *e, const char *name, unsigned int size,
579             unsigned int *i)
580 {
581         struct ipt_entry_target *t;
582         struct ipt_target *target;
583         int ret;
584         unsigned int j;
585
586         ret = check_entry(e, name);
587         if (ret)
588                 return ret;
589
590         j = 0;
591         ret = IPT_MATCH_ITERATE(e, find_check_match, name, &e->ip,
592                                                         e->comefrom, &j);
593         if (ret != 0)
594                 goto cleanup_matches;
595
596         t = ipt_get_target(e);
597         target = try_then_request_module(xt_find_target(AF_INET,
598                                                      t->u.user.name,
599                                                      t->u.user.revision),
600                                          "ipt_%s", t->u.user.name);
601         if (IS_ERR(target) || !target) {
602                 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
603                 ret = target ? PTR_ERR(target) : -ENOENT;
604                 goto cleanup_matches;
605         }
606         t->u.kernel.target = target;
607
608         ret = check_target(e, name);
609         if (ret)
610                 goto err;
611
612         (*i)++;
613         return 0;
614  err:
615         module_put(t->u.kernel.target->me);
616  cleanup_matches:
617         IPT_MATCH_ITERATE(e, cleanup_match, &j);
618         return ret;
619 }
620
621 static inline int
622 check_entry_size_and_hooks(struct ipt_entry *e,
623                            struct xt_table_info *newinfo,
624                            unsigned char *base,
625                            unsigned char *limit,
626                            const unsigned int *hook_entries,
627                            const unsigned int *underflows,
628                            unsigned int *i)
629 {
630         unsigned int h;
631
632         if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
633             || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
634                 duprintf("Bad offset %p\n", e);
635                 return -EINVAL;
636         }
637
638         if (e->next_offset
639             < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
640                 duprintf("checking: element %p size %u\n",
641                          e, e->next_offset);
642                 return -EINVAL;
643         }
644
645         /* Check hooks & underflows */
646         for (h = 0; h < NF_IP_NUMHOOKS; h++) {
647                 if ((unsigned char *)e - base == hook_entries[h])
648                         newinfo->hook_entry[h] = hook_entries[h];
649                 if ((unsigned char *)e - base == underflows[h])
650                         newinfo->underflow[h] = underflows[h];
651         }
652
653         /* FIXME: underflows must be unconditional, standard verdicts
654            < 0 (not IPT_RETURN). --RR */
655
656         /* Clear counters and comefrom */
657         e->counters = ((struct xt_counters) { 0, 0 });
658         e->comefrom = 0;
659
660         (*i)++;
661         return 0;
662 }
663
664 static inline int
665 cleanup_entry(struct ipt_entry *e, unsigned int *i)
666 {
667         struct ipt_entry_target *t;
668
669         if (i && (*i)-- == 0)
670                 return 1;
671
672         /* Cleanup all matches */
673         IPT_MATCH_ITERATE(e, cleanup_match, NULL);
674         t = ipt_get_target(e);
675         if (t->u.kernel.target->destroy)
676                 t->u.kernel.target->destroy(t->u.kernel.target, t->data);
677         module_put(t->u.kernel.target->me);
678         return 0;
679 }
680
681 /* Checks and translates the user-supplied table segment (held in
682    newinfo) */
683 static int
684 translate_table(const char *name,
685                 unsigned int valid_hooks,
686                 struct xt_table_info *newinfo,
687                 void *entry0,
688                 unsigned int size,
689                 unsigned int number,
690                 const unsigned int *hook_entries,
691                 const unsigned int *underflows)
692 {
693         unsigned int i;
694         int ret;
695
696         newinfo->size = size;
697         newinfo->number = number;
698
699         /* Init all hooks to impossible value. */
700         for (i = 0; i < NF_IP_NUMHOOKS; i++) {
701                 newinfo->hook_entry[i] = 0xFFFFFFFF;
702                 newinfo->underflow[i] = 0xFFFFFFFF;
703         }
704
705         duprintf("translate_table: size %u\n", newinfo->size);
706         i = 0;
707         /* Walk through entries, checking offsets. */
708         ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
709                                 check_entry_size_and_hooks,
710                                 newinfo,
711                                 entry0,
712                                 entry0 + size,
713                                 hook_entries, underflows, &i);
714         if (ret != 0)
715                 return ret;
716
717         if (i != number) {
718                 duprintf("translate_table: %u not %u entries\n",
719                          i, number);
720                 return -EINVAL;
721         }
722
723         /* Check hooks all assigned */
724         for (i = 0; i < NF_IP_NUMHOOKS; i++) {
725                 /* Only hooks which are valid */
726                 if (!(valid_hooks & (1 << i)))
727                         continue;
728                 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
729                         duprintf("Invalid hook entry %u %u\n",
730                                  i, hook_entries[i]);
731                         return -EINVAL;
732                 }
733                 if (newinfo->underflow[i] == 0xFFFFFFFF) {
734                         duprintf("Invalid underflow %u %u\n",
735                                  i, underflows[i]);
736                         return -EINVAL;
737                 }
738         }
739
740         if (!mark_source_chains(newinfo, valid_hooks, entry0))
741                 return -ELOOP;
742
743         /* Finally, each sanity check must pass */
744         i = 0;
745         ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
746                                 find_check_entry, name, size, &i);
747
748         if (ret != 0) {
749                 IPT_ENTRY_ITERATE(entry0, newinfo->size,
750                                 cleanup_entry, &i);
751                 return ret;
752         }
753
754         /* And one copy for every other CPU */
755         for_each_possible_cpu(i) {
756                 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
757                         memcpy(newinfo->entries[i], entry0, newinfo->size);
758         }
759
760         return ret;
761 }
762
763 /* Gets counters. */
764 static inline int
765 add_entry_to_counter(const struct ipt_entry *e,
766                      struct xt_counters total[],
767                      unsigned int *i)
768 {
769         ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
770
771         (*i)++;
772         return 0;
773 }
774
775 static inline int
776 set_entry_to_counter(const struct ipt_entry *e,
777                      struct ipt_counters total[],
778                      unsigned int *i)
779 {
780         SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
781
782         (*i)++;
783         return 0;
784 }
785
786 static void
787 get_counters(const struct xt_table_info *t,
788              struct xt_counters counters[])
789 {
790         unsigned int cpu;
791         unsigned int i;
792         unsigned int curcpu;
793
794         /* Instead of clearing (by a previous call to memset())
795          * the counters and using adds, we set the counters
796          * with data used by 'current' CPU
797          * We dont care about preemption here.
798          */
799         curcpu = raw_smp_processor_id();
800
801         i = 0;
802         IPT_ENTRY_ITERATE(t->entries[curcpu],
803                           t->size,
804                           set_entry_to_counter,
805                           counters,
806                           &i);
807
808         for_each_possible_cpu(cpu) {
809                 if (cpu == curcpu)
810                         continue;
811                 i = 0;
812                 IPT_ENTRY_ITERATE(t->entries[cpu],
813                                   t->size,
814                                   add_entry_to_counter,
815                                   counters,
816                                   &i);
817         }
818 }
819
820 static inline struct xt_counters * alloc_counters(struct ipt_table *table)
821 {
822         unsigned int countersize;
823         struct xt_counters *counters;
824         struct xt_table_info *private = table->private;
825
826         /* We need atomic snapshot of counters: rest doesn't change
827            (other than comefrom, which userspace doesn't care
828            about). */
829         countersize = sizeof(struct xt_counters) * private->number;
830         counters = vmalloc_node(countersize, numa_node_id());
831
832         if (counters == NULL)
833                 return ERR_PTR(-ENOMEM);
834
835         /* First, sum counters... */
836         write_lock_bh(&table->lock);
837         get_counters(private, counters);
838         write_unlock_bh(&table->lock);
839
840         return counters;
841 }
842
843 static int
844 copy_entries_to_user(unsigned int total_size,
845                      struct ipt_table *table,
846                      void __user *userptr)
847 {
848         unsigned int off, num;
849         struct ipt_entry *e;
850         struct xt_counters *counters;
851         struct xt_table_info *private = table->private;
852         int ret = 0;
853         void *loc_cpu_entry;
854
855         counters = alloc_counters(table);
856         if (IS_ERR(counters))
857                 return PTR_ERR(counters);
858
859         /* choose the copy that is on our node/cpu, ...
860          * This choice is lazy (because current thread is
861          * allowed to migrate to another cpu)
862          */
863         loc_cpu_entry = private->entries[raw_smp_processor_id()];
864         /* ... then copy entire thing ... */
865         if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
866                 ret = -EFAULT;
867                 goto free_counters;
868         }
869
870         /* FIXME: use iterator macros --RR */
871         /* ... then go back and fix counters and names */
872         for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
873                 unsigned int i;
874                 struct ipt_entry_match *m;
875                 struct ipt_entry_target *t;
876
877                 e = (struct ipt_entry *)(loc_cpu_entry + off);
878                 if (copy_to_user(userptr + off
879                                  + offsetof(struct ipt_entry, counters),
880                                  &counters[num],
881                                  sizeof(counters[num])) != 0) {
882                         ret = -EFAULT;
883                         goto free_counters;
884                 }
885
886                 for (i = sizeof(struct ipt_entry);
887                      i < e->target_offset;
888                      i += m->u.match_size) {
889                         m = (void *)e + i;
890
891                         if (copy_to_user(userptr + off + i
892                                          + offsetof(struct ipt_entry_match,
893                                                     u.user.name),
894                                          m->u.kernel.match->name,
895                                          strlen(m->u.kernel.match->name)+1)
896                             != 0) {
897                                 ret = -EFAULT;
898                                 goto free_counters;
899                         }
900                 }
901
902                 t = ipt_get_target(e);
903                 if (copy_to_user(userptr + off + e->target_offset
904                                  + offsetof(struct ipt_entry_target,
905                                             u.user.name),
906                                  t->u.kernel.target->name,
907                                  strlen(t->u.kernel.target->name)+1) != 0) {
908                         ret = -EFAULT;
909                         goto free_counters;
910                 }
911         }
912
913  free_counters:
914         vfree(counters);
915         return ret;
916 }
917
918 #ifdef CONFIG_COMPAT
919 struct compat_delta {
920         struct compat_delta *next;
921         u_int16_t offset;
922         short delta;
923 };
924
925 static struct compat_delta *compat_offsets = NULL;
926
927 static int compat_add_offset(u_int16_t offset, short delta)
928 {
929         struct compat_delta *tmp;
930
931         tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
932         if (!tmp)
933                 return -ENOMEM;
934         tmp->offset = offset;
935         tmp->delta = delta;
936         if (compat_offsets) {
937                 tmp->next = compat_offsets->next;
938                 compat_offsets->next = tmp;
939         } else {
940                 compat_offsets = tmp;
941                 tmp->next = NULL;
942         }
943         return 0;
944 }
945
946 static void compat_flush_offsets(void)
947 {
948         struct compat_delta *tmp, *next;
949
950         if (compat_offsets) {
951                 for(tmp = compat_offsets; tmp; tmp = next) {
952                         next = tmp->next;
953                         kfree(tmp);
954                 }
955                 compat_offsets = NULL;
956         }
957 }
958
959 static short compat_calc_jump(u_int16_t offset)
960 {
961         struct compat_delta *tmp;
962         short delta;
963
964         for(tmp = compat_offsets, delta = 0; tmp; tmp = tmp->next)
965                 if (tmp->offset < offset)
966                         delta += tmp->delta;
967         return delta;
968 }
969
970 static void compat_standard_from_user(void *dst, void *src)
971 {
972         int v = *(compat_int_t *)src;
973
974         if (v > 0)
975                 v += compat_calc_jump(v);
976         memcpy(dst, &v, sizeof(v));
977 }
978
979 static int compat_standard_to_user(void __user *dst, void *src)
980 {
981         compat_int_t cv = *(int *)src;
982
983         if (cv > 0)
984                 cv -= compat_calc_jump(cv);
985         return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
986 }
987
988 static inline int
989 compat_calc_match(struct ipt_entry_match *m, int * size)
990 {
991         *size += xt_compat_match_offset(m->u.kernel.match);
992         return 0;
993 }
994
995 static int compat_calc_entry(struct ipt_entry *e, struct xt_table_info *info,
996                 void *base, struct xt_table_info *newinfo)
997 {
998         struct ipt_entry_target *t;
999         u_int16_t entry_offset;
1000         int off, i, ret;
1001
1002         off = 0;
1003         entry_offset = (void *)e - base;
1004         IPT_MATCH_ITERATE(e, compat_calc_match, &off);
1005         t = ipt_get_target(e);
1006         off += xt_compat_target_offset(t->u.kernel.target);
1007         newinfo->size -= off;
1008         ret = compat_add_offset(entry_offset, off);
1009         if (ret)
1010                 return ret;
1011
1012         for (i = 0; i< NF_IP_NUMHOOKS; i++) {
1013                 if (info->hook_entry[i] && (e < (struct ipt_entry *)
1014                                 (base + info->hook_entry[i])))
1015                         newinfo->hook_entry[i] -= off;
1016                 if (info->underflow[i] && (e < (struct ipt_entry *)
1017                                 (base + info->underflow[i])))
1018                         newinfo->underflow[i] -= off;
1019         }
1020         return 0;
1021 }
1022
1023 static int compat_table_info(struct xt_table_info *info,
1024                 struct xt_table_info *newinfo)
1025 {
1026         void *loc_cpu_entry;
1027         int i;
1028
1029         if (!newinfo || !info)
1030                 return -EINVAL;
1031
1032         memset(newinfo, 0, sizeof(struct xt_table_info));
1033         newinfo->size = info->size;
1034         newinfo->number = info->number;
1035         for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1036                 newinfo->hook_entry[i] = info->hook_entry[i];
1037                 newinfo->underflow[i] = info->underflow[i];
1038         }
1039         loc_cpu_entry = info->entries[raw_smp_processor_id()];
1040         return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
1041                         compat_calc_entry, info, loc_cpu_entry, newinfo);
1042 }
1043 #endif
1044
1045 static int get_info(void __user *user, int *len, int compat)
1046 {
1047         char name[IPT_TABLE_MAXNAMELEN];
1048         struct ipt_table *t;
1049         int ret;
1050
1051         if (*len != sizeof(struct ipt_getinfo)) {
1052                 duprintf("length %u != %u\n", *len,
1053                         (unsigned int)sizeof(struct ipt_getinfo));
1054                 return -EINVAL;
1055         }
1056
1057         if (copy_from_user(name, user, sizeof(name)) != 0)
1058                 return -EFAULT;
1059
1060         name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1061 #ifdef CONFIG_COMPAT
1062         if (compat)
1063                 xt_compat_lock(AF_INET);
1064 #endif
1065         t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1066                         "iptable_%s", name);
1067         if (t && !IS_ERR(t)) {
1068                 struct ipt_getinfo info;
1069                 struct xt_table_info *private = t->private;
1070
1071 #ifdef CONFIG_COMPAT
1072                 if (compat) {
1073                         struct xt_table_info tmp;
1074                         ret = compat_table_info(private, &tmp);
1075                         compat_flush_offsets();
1076                         private =  &tmp;
1077                 }
1078 #endif
1079                 info.valid_hooks = t->valid_hooks;
1080                 memcpy(info.hook_entry, private->hook_entry,
1081                                 sizeof(info.hook_entry));
1082                 memcpy(info.underflow, private->underflow,
1083                                 sizeof(info.underflow));
1084                 info.num_entries = private->number;
1085                 info.size = private->size;
1086                 strcpy(info.name, name);
1087
1088                 if (copy_to_user(user, &info, *len) != 0)
1089                         ret = -EFAULT;
1090                 else
1091                         ret = 0;
1092
1093                 xt_table_unlock(t);
1094                 module_put(t->me);
1095         } else
1096                 ret = t ? PTR_ERR(t) : -ENOENT;
1097 #ifdef CONFIG_COMPAT
1098         if (compat)
1099                 xt_compat_unlock(AF_INET);
1100 #endif
1101         return ret;
1102 }
1103
1104 static int
1105 get_entries(struct ipt_get_entries __user *uptr, int *len)
1106 {
1107         int ret;
1108         struct ipt_get_entries get;
1109         struct ipt_table *t;
1110
1111         if (*len < sizeof(get)) {
1112                 duprintf("get_entries: %u < %d\n", *len,
1113                                 (unsigned int)sizeof(get));
1114                 return -EINVAL;
1115         }
1116         if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1117                 return -EFAULT;
1118         if (*len != sizeof(struct ipt_get_entries) + get.size) {
1119                 duprintf("get_entries: %u != %u\n", *len,
1120                                 (unsigned int)(sizeof(struct ipt_get_entries) +
1121                                 get.size));
1122                 return -EINVAL;
1123         }
1124
1125         t = xt_find_table_lock(AF_INET, get.name);
1126         if (t && !IS_ERR(t)) {
1127                 struct xt_table_info *private = t->private;
1128                 duprintf("t->private->number = %u\n",
1129                          private->number);
1130                 if (get.size == private->size)
1131                         ret = copy_entries_to_user(private->size,
1132                                                    t, uptr->entrytable);
1133                 else {
1134                         duprintf("get_entries: I've got %u not %u!\n",
1135                                  private->size,
1136                                  get.size);
1137                         ret = -EINVAL;
1138                 }
1139                 module_put(t->me);
1140                 xt_table_unlock(t);
1141         } else
1142                 ret = t ? PTR_ERR(t) : -ENOENT;
1143
1144         return ret;
1145 }
1146
1147 static int
1148 __do_replace(const char *name, unsigned int valid_hooks,
1149                 struct xt_table_info *newinfo, unsigned int num_counters,
1150                 void __user *counters_ptr)
1151 {
1152         int ret;
1153         struct ipt_table *t;
1154         struct xt_table_info *oldinfo;
1155         struct xt_counters *counters;
1156         void *loc_cpu_old_entry;
1157
1158         ret = 0;
1159         counters = vmalloc(num_counters * sizeof(struct xt_counters));
1160         if (!counters) {
1161                 ret = -ENOMEM;
1162                 goto out;
1163         }
1164
1165         t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1166                                     "iptable_%s", name);
1167         if (!t || IS_ERR(t)) {
1168                 ret = t ? PTR_ERR(t) : -ENOENT;
1169                 goto free_newinfo_counters_untrans;
1170         }
1171
1172         /* You lied! */
1173         if (valid_hooks != t->valid_hooks) {
1174                 duprintf("Valid hook crap: %08X vs %08X\n",
1175                          valid_hooks, t->valid_hooks);
1176                 ret = -EINVAL;
1177                 goto put_module;
1178         }
1179
1180         oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1181         if (!oldinfo)
1182                 goto put_module;
1183
1184         /* Update module usage count based on number of rules */
1185         duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1186                 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1187         if ((oldinfo->number > oldinfo->initial_entries) ||
1188             (newinfo->number <= oldinfo->initial_entries))
1189                 module_put(t->me);
1190         if ((oldinfo->number > oldinfo->initial_entries) &&
1191             (newinfo->number <= oldinfo->initial_entries))
1192                 module_put(t->me);
1193
1194         /* Get the old counters. */
1195         get_counters(oldinfo, counters);
1196         /* Decrease module usage counts and free resource */
1197         loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1198         IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
1199         xt_free_table_info(oldinfo);
1200         if (copy_to_user(counters_ptr, counters,
1201                          sizeof(struct xt_counters) * num_counters) != 0)
1202                 ret = -EFAULT;
1203         vfree(counters);
1204         xt_table_unlock(t);
1205         return ret;
1206
1207  put_module:
1208         module_put(t->me);
1209         xt_table_unlock(t);
1210  free_newinfo_counters_untrans:
1211         vfree(counters);
1212  out:
1213         return ret;
1214 }
1215
1216 static int
1217 do_replace(void __user *user, unsigned int len)
1218 {
1219         int ret;
1220         struct ipt_replace tmp;
1221         struct xt_table_info *newinfo;
1222         void *loc_cpu_entry;
1223
1224         if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1225                 return -EFAULT;
1226
1227         /* Hack: Causes ipchains to give correct error msg --RR */
1228         if (len != sizeof(tmp) + tmp.size)
1229                 return -ENOPROTOOPT;
1230
1231         /* overflow check */
1232         if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1233                         SMP_CACHE_BYTES)
1234                 return -ENOMEM;
1235         if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1236                 return -ENOMEM;
1237
1238         newinfo = xt_alloc_table_info(tmp.size);
1239         if (!newinfo)
1240                 return -ENOMEM;
1241
1242         /* choose the copy that is our node/cpu */
1243         loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1244         if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1245                            tmp.size) != 0) {
1246                 ret = -EFAULT;
1247                 goto free_newinfo;
1248         }
1249
1250         ret = translate_table(tmp.name, tmp.valid_hooks,
1251                               newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1252                               tmp.hook_entry, tmp.underflow);
1253         if (ret != 0)
1254                 goto free_newinfo;
1255
1256         duprintf("ip_tables: Translated table\n");
1257
1258         ret = __do_replace(tmp.name, tmp.valid_hooks,
1259                               newinfo, tmp.num_counters,
1260                               tmp.counters);
1261         if (ret)
1262                 goto free_newinfo_untrans;
1263         return 0;
1264
1265  free_newinfo_untrans:
1266         IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1267  free_newinfo:
1268         xt_free_table_info(newinfo);
1269         return ret;
1270 }
1271
1272 /* We're lazy, and add to the first CPU; overflow works its fey magic
1273  * and everything is OK. */
1274 static inline int
1275 add_counter_to_entry(struct ipt_entry *e,
1276                      const struct xt_counters addme[],
1277                      unsigned int *i)
1278 {
1279 #if 0
1280         duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1281                  *i,
1282                  (long unsigned int)e->counters.pcnt,
1283                  (long unsigned int)e->counters.bcnt,
1284                  (long unsigned int)addme[*i].pcnt,
1285                  (long unsigned int)addme[*i].bcnt);
1286 #endif
1287
1288         ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1289
1290         (*i)++;
1291         return 0;
1292 }
1293
1294 static int
1295 do_add_counters(void __user *user, unsigned int len, int compat)
1296 {
1297         unsigned int i;
1298         struct xt_counters_info tmp;
1299         struct xt_counters *paddc;
1300         unsigned int num_counters;
1301         char *name;
1302         int size;
1303         void *ptmp;
1304         struct ipt_table *t;
1305         struct xt_table_info *private;
1306         int ret = 0;
1307         void *loc_cpu_entry;
1308 #ifdef CONFIG_COMPAT
1309         struct compat_xt_counters_info compat_tmp;
1310
1311         if (compat) {
1312                 ptmp = &compat_tmp;
1313                 size = sizeof(struct compat_xt_counters_info);
1314         } else
1315 #endif
1316         {
1317                 ptmp = &tmp;
1318                 size = sizeof(struct xt_counters_info);
1319         }
1320
1321         if (copy_from_user(ptmp, user, size) != 0)
1322                 return -EFAULT;
1323
1324 #ifdef CONFIG_COMPAT
1325         if (compat) {
1326                 num_counters = compat_tmp.num_counters;
1327                 name = compat_tmp.name;
1328         } else
1329 #endif
1330         {
1331                 num_counters = tmp.num_counters;
1332                 name = tmp.name;
1333         }
1334
1335         if (len != size + num_counters * sizeof(struct xt_counters))
1336                 return -EINVAL;
1337
1338         paddc = vmalloc_node(len - size, numa_node_id());
1339         if (!paddc)
1340                 return -ENOMEM;
1341
1342         if (copy_from_user(paddc, user + size, len - size) != 0) {
1343                 ret = -EFAULT;
1344                 goto free;
1345         }
1346
1347         t = xt_find_table_lock(AF_INET, name);
1348         if (!t || IS_ERR(t)) {
1349                 ret = t ? PTR_ERR(t) : -ENOENT;
1350                 goto free;
1351         }
1352
1353         write_lock_bh(&t->lock);
1354         private = t->private;
1355         if (private->number != num_counters) {
1356                 ret = -EINVAL;
1357                 goto unlock_up_free;
1358         }
1359
1360         i = 0;
1361         /* Choose the copy that is on our node */
1362         loc_cpu_entry = private->entries[raw_smp_processor_id()];
1363         IPT_ENTRY_ITERATE(loc_cpu_entry,
1364                           private->size,
1365                           add_counter_to_entry,
1366                           paddc,
1367                           &i);
1368  unlock_up_free:
1369         write_unlock_bh(&t->lock);
1370         xt_table_unlock(t);
1371         module_put(t->me);
1372  free:
1373         vfree(paddc);
1374
1375         return ret;
1376 }
1377
1378 #ifdef CONFIG_COMPAT
1379 struct compat_ipt_replace {
1380         char                    name[IPT_TABLE_MAXNAMELEN];
1381         u32                     valid_hooks;
1382         u32                     num_entries;
1383         u32                     size;
1384         u32                     hook_entry[NF_IP_NUMHOOKS];
1385         u32                     underflow[NF_IP_NUMHOOKS];
1386         u32                     num_counters;
1387         compat_uptr_t           counters;       /* struct ipt_counters * */
1388         struct compat_ipt_entry entries[0];
1389 };
1390
1391 static inline int compat_copy_match_to_user(struct ipt_entry_match *m,
1392                 void __user **dstptr, compat_uint_t *size)
1393 {
1394         return xt_compat_match_to_user(m, dstptr, size);
1395 }
1396
1397 static int compat_copy_entry_to_user(struct ipt_entry *e,
1398                 void __user **dstptr, compat_uint_t *size)
1399 {
1400         struct ipt_entry_target *t;
1401         struct compat_ipt_entry __user *ce;
1402         u_int16_t target_offset, next_offset;
1403         compat_uint_t origsize;
1404         int ret;
1405
1406         ret = -EFAULT;
1407         origsize = *size;
1408         ce = (struct compat_ipt_entry __user *)*dstptr;
1409         if (copy_to_user(ce, e, sizeof(struct ipt_entry)))
1410                 goto out;
1411
1412         *dstptr += sizeof(struct compat_ipt_entry);
1413         ret = IPT_MATCH_ITERATE(e, compat_copy_match_to_user, dstptr, size);
1414         target_offset = e->target_offset - (origsize - *size);
1415         if (ret)
1416                 goto out;
1417         t = ipt_get_target(e);
1418         ret = xt_compat_target_to_user(t, dstptr, size);
1419         if (ret)
1420                 goto out;
1421         ret = -EFAULT;
1422         next_offset = e->next_offset - (origsize - *size);
1423         if (put_user(target_offset, &ce->target_offset))
1424                 goto out;
1425         if (put_user(next_offset, &ce->next_offset))
1426                 goto out;
1427         return 0;
1428 out:
1429         return ret;
1430 }
1431
1432 static inline int
1433 compat_check_calc_match(struct ipt_entry_match *m,
1434             const char *name,
1435             const struct ipt_ip *ip,
1436             unsigned int hookmask,
1437             int *size, int *i)
1438 {
1439         struct ipt_match *match;
1440
1441         match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1442                                                    m->u.user.revision),
1443                                         "ipt_%s", m->u.user.name);
1444         if (IS_ERR(match) || !match) {
1445                 duprintf("compat_check_calc_match: `%s' not found\n",
1446                                 m->u.user.name);
1447                 return match ? PTR_ERR(match) : -ENOENT;
1448         }
1449         m->u.kernel.match = match;
1450         *size += xt_compat_match_offset(match);
1451
1452         (*i)++;
1453         return 0;
1454 }
1455
1456 static inline int
1457 check_compat_entry_size_and_hooks(struct ipt_entry *e,
1458                            struct xt_table_info *newinfo,
1459                            unsigned int *size,
1460                            unsigned char *base,
1461                            unsigned char *limit,
1462                            unsigned int *hook_entries,
1463                            unsigned int *underflows,
1464                            unsigned int *i,
1465                            const char *name)
1466 {
1467         struct ipt_entry_target *t;
1468         struct ipt_target *target;
1469         u_int16_t entry_offset;
1470         int ret, off, h, j;
1471
1472         duprintf("check_compat_entry_size_and_hooks %p\n", e);
1473         if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
1474             || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1475                 duprintf("Bad offset %p, limit = %p\n", e, limit);
1476                 return -EINVAL;
1477         }
1478
1479         if (e->next_offset < sizeof(struct compat_ipt_entry) +
1480                         sizeof(struct compat_xt_entry_target)) {
1481                 duprintf("checking: element %p size %u\n",
1482                          e, e->next_offset);
1483                 return -EINVAL;
1484         }
1485
1486         ret = check_entry(e, name);
1487         if (ret)
1488                 return ret;
1489
1490         off = 0;
1491         entry_offset = (void *)e - (void *)base;
1492         j = 0;
1493         ret = IPT_MATCH_ITERATE(e, compat_check_calc_match, name, &e->ip,
1494                         e->comefrom, &off, &j);
1495         if (ret != 0)
1496                 goto cleanup_matches;
1497
1498         t = ipt_get_target(e);
1499         target = try_then_request_module(xt_find_target(AF_INET,
1500                                                      t->u.user.name,
1501                                                      t->u.user.revision),
1502                                          "ipt_%s", t->u.user.name);
1503         if (IS_ERR(target) || !target) {
1504                 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1505                                                         t->u.user.name);
1506                 ret = target ? PTR_ERR(target) : -ENOENT;
1507                 goto cleanup_matches;
1508         }
1509         t->u.kernel.target = target;
1510
1511         off += xt_compat_target_offset(target);
1512         *size += off;
1513         ret = compat_add_offset(entry_offset, off);
1514         if (ret)
1515                 goto out;
1516
1517         /* Check hooks & underflows */
1518         for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1519                 if ((unsigned char *)e - base == hook_entries[h])
1520                         newinfo->hook_entry[h] = hook_entries[h];
1521                 if ((unsigned char *)e - base == underflows[h])
1522                         newinfo->underflow[h] = underflows[h];
1523         }
1524
1525         /* Clear counters and comefrom */
1526         e->counters = ((struct ipt_counters) { 0, 0 });
1527         e->comefrom = 0;
1528
1529         (*i)++;
1530         return 0;
1531
1532 out:
1533         module_put(t->u.kernel.target->me);
1534 cleanup_matches:
1535         IPT_MATCH_ITERATE(e, cleanup_match, &j);
1536         return ret;
1537 }
1538
1539 static inline int compat_copy_match_from_user(struct ipt_entry_match *m,
1540         void **dstptr, compat_uint_t *size, const char *name,
1541         const struct ipt_ip *ip, unsigned int hookmask)
1542 {
1543         xt_compat_match_from_user(m, dstptr, size);
1544         return 0;
1545 }
1546
1547 static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr,
1548         unsigned int *size, const char *name,
1549         struct xt_table_info *newinfo, unsigned char *base)
1550 {
1551         struct ipt_entry_target *t;
1552         struct ipt_target *target;
1553         struct ipt_entry *de;
1554         unsigned int origsize;
1555         int ret, h;
1556
1557         ret = 0;
1558         origsize = *size;
1559         de = (struct ipt_entry *)*dstptr;
1560         memcpy(de, e, sizeof(struct ipt_entry));
1561
1562         *dstptr += sizeof(struct compat_ipt_entry);
1563         ret = IPT_MATCH_ITERATE(e, compat_copy_match_from_user, dstptr, size,
1564                         name, &de->ip, de->comefrom);
1565         if (ret)
1566                 return ret;
1567         de->target_offset = e->target_offset - (origsize - *size);
1568         t = ipt_get_target(e);
1569         target = t->u.kernel.target;
1570         xt_compat_target_from_user(t, dstptr, size);
1571
1572         de->next_offset = e->next_offset - (origsize - *size);
1573         for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1574                 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1575                         newinfo->hook_entry[h] -= origsize - *size;
1576                 if ((unsigned char *)de - base < newinfo->underflow[h])
1577                         newinfo->underflow[h] -= origsize - *size;
1578         }
1579         return ret;
1580 }
1581
1582 static inline int compat_check_entry(struct ipt_entry *e, const char *name)
1583 {
1584         int ret;
1585
1586         ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip, e->comefrom);
1587         if (ret)
1588                 return ret;
1589
1590         return check_target(e, name);
1591 }
1592
1593 static int
1594 translate_compat_table(const char *name,
1595                 unsigned int valid_hooks,
1596                 struct xt_table_info **pinfo,
1597                 void **pentry0,
1598                 unsigned int total_size,
1599                 unsigned int number,
1600                 unsigned int *hook_entries,
1601                 unsigned int *underflows)
1602 {
1603         unsigned int i, j;
1604         struct xt_table_info *newinfo, *info;
1605         void *pos, *entry0, *entry1;
1606         unsigned int size;
1607         int ret;
1608
1609         info = *pinfo;
1610         entry0 = *pentry0;
1611         size = total_size;
1612         info->number = number;
1613
1614         /* Init all hooks to impossible value. */
1615         for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1616                 info->hook_entry[i] = 0xFFFFFFFF;
1617                 info->underflow[i] = 0xFFFFFFFF;
1618         }
1619
1620         duprintf("translate_compat_table: size %u\n", info->size);
1621         j = 0;
1622         xt_compat_lock(AF_INET);
1623         /* Walk through entries, checking offsets. */
1624         ret = IPT_ENTRY_ITERATE(entry0, total_size,
1625                                 check_compat_entry_size_and_hooks,
1626                                 info, &size, entry0,
1627                                 entry0 + total_size,
1628                                 hook_entries, underflows, &j, name);
1629         if (ret != 0)
1630                 goto out_unlock;
1631
1632         ret = -EINVAL;
1633         if (j != number) {
1634                 duprintf("translate_compat_table: %u not %u entries\n",
1635                          j, number);
1636                 goto out_unlock;
1637         }
1638
1639         /* Check hooks all assigned */
1640         for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1641                 /* Only hooks which are valid */
1642                 if (!(valid_hooks & (1 << i)))
1643                         continue;
1644                 if (info->hook_entry[i] == 0xFFFFFFFF) {
1645                         duprintf("Invalid hook entry %u %u\n",
1646                                  i, hook_entries[i]);
1647                         goto out_unlock;
1648                 }
1649                 if (info->underflow[i] == 0xFFFFFFFF) {
1650                         duprintf("Invalid underflow %u %u\n",
1651                                  i, underflows[i]);
1652                         goto out_unlock;
1653                 }
1654         }
1655
1656         ret = -ENOMEM;
1657         newinfo = xt_alloc_table_info(size);
1658         if (!newinfo)
1659                 goto out_unlock;
1660
1661         newinfo->number = number;
1662         for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1663                 newinfo->hook_entry[i] = info->hook_entry[i];
1664                 newinfo->underflow[i] = info->underflow[i];
1665         }
1666         entry1 = newinfo->entries[raw_smp_processor_id()];
1667         pos = entry1;
1668         size =  total_size;
1669         ret = IPT_ENTRY_ITERATE(entry0, total_size,
1670                         compat_copy_entry_from_user, &pos, &size,
1671                         name, newinfo, entry1);
1672         compat_flush_offsets();
1673         xt_compat_unlock(AF_INET);
1674         if (ret)
1675                 goto free_newinfo;
1676
1677         ret = -ELOOP;
1678         if (!mark_source_chains(newinfo, valid_hooks, entry1))
1679                 goto free_newinfo;
1680
1681         ret = IPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1682                                                                         name);
1683         if (ret)
1684                 goto free_newinfo;
1685
1686         /* And one copy for every other CPU */
1687         for_each_possible_cpu(i)
1688                 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1689                         memcpy(newinfo->entries[i], entry1, newinfo->size);
1690
1691         *pinfo = newinfo;
1692         *pentry0 = entry1;
1693         xt_free_table_info(info);
1694         return 0;
1695
1696 free_newinfo:
1697         xt_free_table_info(newinfo);
1698 out:
1699         IPT_ENTRY_ITERATE(entry0, total_size, cleanup_entry, &j);
1700         return ret;
1701 out_unlock:
1702         compat_flush_offsets();
1703         xt_compat_unlock(AF_INET);
1704         goto out;
1705 }
1706
1707 static int
1708 compat_do_replace(void __user *user, unsigned int len)
1709 {
1710         int ret;
1711         struct compat_ipt_replace tmp;
1712         struct xt_table_info *newinfo;
1713         void *loc_cpu_entry;
1714
1715         if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1716                 return -EFAULT;
1717
1718         /* Hack: Causes ipchains to give correct error msg --RR */
1719         if (len != sizeof(tmp) + tmp.size)
1720                 return -ENOPROTOOPT;
1721
1722         /* overflow check */
1723         if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1724                         SMP_CACHE_BYTES)
1725                 return -ENOMEM;
1726         if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1727                 return -ENOMEM;
1728
1729         newinfo = xt_alloc_table_info(tmp.size);
1730         if (!newinfo)
1731                 return -ENOMEM;
1732
1733         /* choose the copy that is our node/cpu */
1734         loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1735         if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1736                            tmp.size) != 0) {
1737                 ret = -EFAULT;
1738                 goto free_newinfo;
1739         }
1740
1741         ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1742                               &newinfo, &loc_cpu_entry, tmp.size,
1743                               tmp.num_entries, tmp.hook_entry, tmp.underflow);
1744         if (ret != 0)
1745                 goto free_newinfo;
1746
1747         duprintf("compat_do_replace: Translated table\n");
1748
1749         ret = __do_replace(tmp.name, tmp.valid_hooks,
1750                               newinfo, tmp.num_counters,
1751                               compat_ptr(tmp.counters));
1752         if (ret)
1753                 goto free_newinfo_untrans;
1754         return 0;
1755
1756  free_newinfo_untrans:
1757         IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1758  free_newinfo:
1759         xt_free_table_info(newinfo);
1760         return ret;
1761 }
1762
1763 static int
1764 compat_do_ipt_set_ctl(struct sock *sk,  int cmd, void __user *user,
1765                 unsigned int len)
1766 {
1767         int ret;
1768
1769         if (!capable(CAP_NET_ADMIN))
1770                 return -EPERM;
1771
1772         switch (cmd) {
1773         case IPT_SO_SET_REPLACE:
1774                 ret = compat_do_replace(user, len);
1775                 break;
1776
1777         case IPT_SO_SET_ADD_COUNTERS:
1778                 ret = do_add_counters(user, len, 1);
1779                 break;
1780
1781         default:
1782                 duprintf("do_ipt_set_ctl:  unknown request %i\n", cmd);
1783                 ret = -EINVAL;
1784         }
1785
1786         return ret;
1787 }
1788
1789 struct compat_ipt_get_entries
1790 {
1791         char name[IPT_TABLE_MAXNAMELEN];
1792         compat_uint_t size;
1793         struct compat_ipt_entry entrytable[0];
1794 };
1795
1796 static int compat_copy_entries_to_user(unsigned int total_size,
1797                      struct ipt_table *table, void __user *userptr)
1798 {
1799         unsigned int off, num;
1800         struct compat_ipt_entry e;
1801         struct xt_counters *counters;
1802         struct xt_table_info *private = table->private;
1803         void __user *pos;
1804         unsigned int size;
1805         int ret = 0;
1806         void *loc_cpu_entry;
1807
1808         counters = alloc_counters(table);
1809         if (IS_ERR(counters))
1810                 return PTR_ERR(counters);
1811
1812         /* choose the copy that is on our node/cpu, ...
1813          * This choice is lazy (because current thread is
1814          * allowed to migrate to another cpu)
1815          */
1816         loc_cpu_entry = private->entries[raw_smp_processor_id()];
1817         pos = userptr;
1818         size = total_size;
1819         ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
1820                         compat_copy_entry_to_user, &pos, &size);
1821         if (ret)
1822                 goto free_counters;
1823
1824         /* ... then go back and fix counters and names */
1825         for (off = 0, num = 0; off < size; off += e.next_offset, num++) {
1826                 unsigned int i;
1827                 struct ipt_entry_match m;
1828                 struct ipt_entry_target t;
1829
1830                 ret = -EFAULT;
1831                 if (copy_from_user(&e, userptr + off,
1832                                         sizeof(struct compat_ipt_entry)))
1833                         goto free_counters;
1834                 if (copy_to_user(userptr + off +
1835                         offsetof(struct compat_ipt_entry, counters),
1836                          &counters[num], sizeof(counters[num])))
1837                         goto free_counters;
1838
1839                 for (i = sizeof(struct compat_ipt_entry);
1840                                 i < e.target_offset; i += m.u.match_size) {
1841                         if (copy_from_user(&m, userptr + off + i,
1842                                         sizeof(struct ipt_entry_match)))
1843                                 goto free_counters;
1844                         if (copy_to_user(userptr + off + i +
1845                                 offsetof(struct ipt_entry_match, u.user.name),
1846                                 m.u.kernel.match->name,
1847                                 strlen(m.u.kernel.match->name) + 1))
1848                                 goto free_counters;
1849                 }
1850
1851                 if (copy_from_user(&t, userptr + off + e.target_offset,
1852                                         sizeof(struct ipt_entry_target)))
1853                         goto free_counters;
1854                 if (copy_to_user(userptr + off + e.target_offset +
1855                         offsetof(struct ipt_entry_target, u.user.name),
1856                         t.u.kernel.target->name,
1857                         strlen(t.u.kernel.target->name) + 1))
1858                         goto free_counters;
1859         }
1860         ret = 0;
1861 free_counters:
1862         vfree(counters);
1863         return ret;
1864 }
1865
1866 static int
1867 compat_get_entries(struct compat_ipt_get_entries __user *uptr, int *len)
1868 {
1869         int ret;
1870         struct compat_ipt_get_entries get;
1871         struct ipt_table *t;
1872
1873
1874         if (*len < sizeof(get)) {
1875                 duprintf("compat_get_entries: %u < %u\n",
1876                                 *len, (unsigned int)sizeof(get));
1877                 return -EINVAL;
1878         }
1879
1880         if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1881                 return -EFAULT;
1882
1883         if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1884                 duprintf("compat_get_entries: %u != %u\n", *len,
1885                         (unsigned int)(sizeof(struct compat_ipt_get_entries) +
1886                         get.size));
1887                 return -EINVAL;
1888         }
1889
1890         xt_compat_lock(AF_INET);
1891         t = xt_find_table_lock(AF_INET, get.name);
1892         if (t && !IS_ERR(t)) {
1893                 struct xt_table_info *private = t->private;
1894                 struct xt_table_info info;
1895                 duprintf("t->private->number = %u\n",
1896                          private->number);
1897                 ret = compat_table_info(private, &info);
1898                 if (!ret && get.size == info.size) {
1899                         ret = compat_copy_entries_to_user(private->size,
1900                                                    t, uptr->entrytable);
1901                 } else if (!ret) {
1902                         duprintf("compat_get_entries: I've got %u not %u!\n",
1903                                  private->size,
1904                                  get.size);
1905                         ret = -EINVAL;
1906                 }
1907                 compat_flush_offsets();
1908                 module_put(t->me);
1909                 xt_table_unlock(t);
1910         } else
1911                 ret = t ? PTR_ERR(t) : -ENOENT;
1912
1913         xt_compat_unlock(AF_INET);
1914         return ret;
1915 }
1916
1917 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1918
1919 static int
1920 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1921 {
1922         int ret;
1923
1924         if (!capable(CAP_NET_ADMIN))
1925                 return -EPERM;
1926
1927         switch (cmd) {
1928         case IPT_SO_GET_INFO:
1929                 ret = get_info(user, len, 1);
1930                 break;
1931         case IPT_SO_GET_ENTRIES:
1932                 ret = compat_get_entries(user, len);
1933                 break;
1934         default:
1935                 ret = do_ipt_get_ctl(sk, cmd, user, len);
1936         }
1937         return ret;
1938 }
1939 #endif
1940
1941 static int
1942 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1943 {
1944         int ret;
1945
1946         if (!capable(CAP_NET_ADMIN))
1947                 return -EPERM;
1948
1949         switch (cmd) {
1950         case IPT_SO_SET_REPLACE:
1951                 ret = do_replace(user, len);
1952                 break;
1953
1954         case IPT_SO_SET_ADD_COUNTERS:
1955                 ret = do_add_counters(user, len, 0);
1956                 break;
1957
1958         default:
1959                 duprintf("do_ipt_set_ctl:  unknown request %i\n", cmd);
1960                 ret = -EINVAL;
1961         }
1962
1963         return ret;
1964 }
1965
1966 static int
1967 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1968 {
1969         int ret;
1970
1971         if (!capable(CAP_NET_ADMIN))
1972                 return -EPERM;
1973
1974         switch (cmd) {
1975         case IPT_SO_GET_INFO:
1976                 ret = get_info(user, len, 0);
1977                 break;
1978
1979         case IPT_SO_GET_ENTRIES:
1980                 ret = get_entries(user, len);
1981                 break;
1982
1983         case IPT_SO_GET_REVISION_MATCH:
1984         case IPT_SO_GET_REVISION_TARGET: {
1985                 struct ipt_get_revision rev;
1986                 int target;
1987
1988                 if (*len != sizeof(rev)) {
1989                         ret = -EINVAL;
1990                         break;
1991                 }
1992                 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
1993                         ret = -EFAULT;
1994                         break;
1995                 }
1996
1997                 if (cmd == IPT_SO_GET_REVISION_TARGET)
1998                         target = 1;
1999                 else
2000                         target = 0;
2001
2002                 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2003                                                          rev.revision,
2004                                                          target, &ret),
2005                                         "ipt_%s", rev.name);
2006                 break;
2007         }
2008
2009         default:
2010                 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2011                 ret = -EINVAL;
2012         }
2013
2014         return ret;
2015 }
2016
2017 int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
2018 {
2019         int ret;
2020         struct xt_table_info *newinfo;
2021         static struct xt_table_info bootstrap
2022                 = { 0, 0, 0, { 0 }, { 0 }, { } };
2023         void *loc_cpu_entry;
2024
2025         newinfo = xt_alloc_table_info(repl->size);
2026         if (!newinfo)
2027                 return -ENOMEM;
2028
2029         /* choose the copy on our node/cpu
2030          * but dont care of preemption
2031          */
2032         loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2033         memcpy(loc_cpu_entry, repl->entries, repl->size);
2034
2035         ret = translate_table(table->name, table->valid_hooks,
2036                               newinfo, loc_cpu_entry, repl->size,
2037                               repl->num_entries,
2038                               repl->hook_entry,
2039                               repl->underflow);
2040         if (ret != 0) {
2041                 xt_free_table_info(newinfo);
2042                 return ret;
2043         }
2044
2045         ret = xt_register_table(table, &bootstrap, newinfo);
2046         if (ret != 0) {
2047                 xt_free_table_info(newinfo);
2048                 return ret;
2049         }
2050
2051         return 0;
2052 }
2053
2054 void ipt_unregister_table(struct ipt_table *table)
2055 {
2056         struct xt_table_info *private;
2057         void *loc_cpu_entry;
2058
2059         private = xt_unregister_table(table);
2060
2061         /* Decrease module usage counts and free resources */
2062         loc_cpu_entry = private->entries[raw_smp_processor_id()];
2063         IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2064         xt_free_table_info(private);
2065 }
2066
2067 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2068 static inline int
2069 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2070                      u_int8_t type, u_int8_t code,
2071                      int invert)
2072 {
2073         return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code))
2074                 ^ invert;
2075 }
2076
2077 static int
2078 icmp_match(const struct sk_buff *skb,
2079            const struct net_device *in,
2080            const struct net_device *out,
2081            const struct xt_match *match,
2082            const void *matchinfo,
2083            int offset,
2084            unsigned int protoff,
2085            int *hotdrop)
2086 {
2087         struct icmphdr _icmph, *ic;
2088         const struct ipt_icmp *icmpinfo = matchinfo;
2089
2090         /* Must not be a fragment. */
2091         if (offset)
2092                 return 0;
2093
2094         ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
2095         if (ic == NULL) {
2096                 /* We've been asked to examine this packet, and we
2097                  * can't.  Hence, no choice but to drop.
2098                  */
2099                 duprintf("Dropping evil ICMP tinygram.\n");
2100                 *hotdrop = 1;
2101                 return 0;
2102         }
2103
2104         return icmp_type_code_match(icmpinfo->type,
2105                                     icmpinfo->code[0],
2106                                     icmpinfo->code[1],
2107                                     ic->type, ic->code,
2108                                     !!(icmpinfo->invflags&IPT_ICMP_INV));
2109 }
2110
2111 /* Called when user tries to insert an entry of this type. */
2112 static int
2113 icmp_checkentry(const char *tablename,
2114            const void *info,
2115            const struct xt_match *match,
2116            void *matchinfo,
2117            unsigned int hook_mask)
2118 {
2119         const struct ipt_icmp *icmpinfo = matchinfo;
2120
2121         /* Must specify no unknown invflags */
2122         return !(icmpinfo->invflags & ~IPT_ICMP_INV);
2123 }
2124
2125 /* The built-in targets: standard (NULL) and error. */
2126 static struct ipt_target ipt_standard_target = {
2127         .name           = IPT_STANDARD_TARGET,
2128         .targetsize     = sizeof(int),
2129         .family         = AF_INET,
2130 #ifdef CONFIG_COMPAT
2131         .compatsize     = sizeof(compat_int_t),
2132         .compat_from_user = compat_standard_from_user,
2133         .compat_to_user = compat_standard_to_user,
2134 #endif
2135 };
2136
2137 static struct ipt_target ipt_error_target = {
2138         .name           = IPT_ERROR_TARGET,
2139         .target         = ipt_error,
2140         .targetsize     = IPT_FUNCTION_MAXNAMELEN,
2141         .family         = AF_INET,
2142 };
2143
2144 static struct nf_sockopt_ops ipt_sockopts = {
2145         .pf             = PF_INET,
2146         .set_optmin     = IPT_BASE_CTL,
2147         .set_optmax     = IPT_SO_SET_MAX+1,
2148         .set            = do_ipt_set_ctl,
2149 #ifdef CONFIG_COMPAT
2150         .compat_set     = compat_do_ipt_set_ctl,
2151 #endif
2152         .get_optmin     = IPT_BASE_CTL,
2153         .get_optmax     = IPT_SO_GET_MAX+1,
2154         .get            = do_ipt_get_ctl,
2155 #ifdef CONFIG_COMPAT
2156         .compat_get     = compat_do_ipt_get_ctl,
2157 #endif
2158 };
2159
2160 static struct ipt_match icmp_matchstruct = {
2161         .name           = "icmp",
2162         .match          = icmp_match,
2163         .matchsize      = sizeof(struct ipt_icmp),
2164         .proto          = IPPROTO_ICMP,
2165         .family         = AF_INET,
2166         .checkentry     = icmp_checkentry,
2167 };
2168
2169 static int __init ip_tables_init(void)
2170 {
2171         int ret;
2172
2173         ret = xt_proto_init(AF_INET);
2174         if (ret < 0)
2175                 goto err1;
2176
2177         /* Noone else will be downing sem now, so we won't sleep */
2178         ret = xt_register_target(&ipt_standard_target);
2179         if (ret < 0)
2180                 goto err2;
2181         ret = xt_register_target(&ipt_error_target);
2182         if (ret < 0)
2183                 goto err3;
2184         ret = xt_register_match(&icmp_matchstruct);
2185         if (ret < 0)
2186                 goto err4;
2187
2188         /* Register setsockopt */
2189         ret = nf_register_sockopt(&ipt_sockopts);
2190         if (ret < 0)
2191                 goto err5;
2192
2193         printk("ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2194         return 0;
2195
2196 err5:
2197         xt_unregister_match(&icmp_matchstruct);
2198 err4:
2199         xt_unregister_target(&ipt_error_target);
2200 err3:
2201         xt_unregister_target(&ipt_standard_target);
2202 err2:
2203         xt_proto_fini(AF_INET);
2204 err1:
2205         return ret;
2206 }
2207
2208 static void __exit ip_tables_fini(void)
2209 {
2210         nf_unregister_sockopt(&ipt_sockopts);
2211
2212         xt_unregister_match(&icmp_matchstruct);
2213         xt_unregister_target(&ipt_error_target);
2214         xt_unregister_target(&ipt_standard_target);
2215
2216         xt_proto_fini(AF_INET);
2217 }
2218
2219 EXPORT_SYMBOL(ipt_register_table);
2220 EXPORT_SYMBOL(ipt_unregister_table);
2221 EXPORT_SYMBOL(ipt_do_table);
2222 module_init(ip_tables_init);
2223 module_exit(ip_tables_fini);