3 #include <linux/kernel.h>
4 #include <linux/types.h>
6 #define XT_FUNCTION_MAXNAMELEN 30
7 #define XT_TABLE_MAXNAMELEN 32
9 struct xt_entry_match {
14 /* Used by userspace */
15 char name[XT_FUNCTION_MAXNAMELEN-1];
22 /* Used inside the kernel */
23 struct xt_match *match;
30 unsigned char data[0];
33 struct xt_entry_target {
38 /* Used by userspace */
39 char name[XT_FUNCTION_MAXNAMELEN-1];
46 /* Used inside the kernel */
47 struct xt_target *target;
54 unsigned char data[0];
57 #define XT_TARGET_INIT(__name, __size) \
60 .target_size = XT_ALIGN(__size), \
65 struct xt_standard_target {
66 struct xt_entry_target target;
70 /* The argument to IPT_SO_GET_REVISION_*. Returns highest revision
71 * kernel supports, if >= revision. */
72 struct xt_get_revision {
73 char name[XT_FUNCTION_MAXNAMELEN-1];
78 /* CONTINUE verdict for targets */
79 #define XT_CONTINUE 0xFFFFFFFF
81 /* For standard target */
82 #define XT_RETURN (-NF_REPEAT - 1)
84 /* this is a dummy structure to find out the alignment requirement for a struct
85 * containing all the fundamental data types that are used in ipt_entry,
86 * ip6t_entry and arpt_entry. This sucks, and it is a hack. It will be my
87 * personal pleasure to remove it -HW
96 #define XT_ALIGN(s) __ALIGN_KERNEL((s), __alignof__(struct _xt_align))
98 /* Standard return verdict, or do jump. */
99 #define XT_STANDARD_TARGET ""
101 #define XT_ERROR_TARGET "ERROR"
103 #define SET_COUNTER(c,b,p) do { (c).bcnt = (b); (c).pcnt = (p); } while(0)
104 #define ADD_COUNTER(c,b,p) do { (c).bcnt += (b); (c).pcnt += (p); } while(0)
107 __u64 pcnt, bcnt; /* Packet and byte counters */
110 /* The argument to IPT_SO_ADD_COUNTERS. */
111 struct xt_counters_info {
113 char name[XT_TABLE_MAXNAMELEN];
115 unsigned int num_counters;
117 /* The counters (actually `number' of these). */
118 struct xt_counters counters[0];
121 #define XT_INV_PROTO 0x40 /* Invert the sense of PROTO. */
124 /* fn returns 0 to continue iteration */
125 #define XT_MATCH_ITERATE(type, e, fn, args...) \
129 struct xt_entry_match *__m; \
131 for (__i = sizeof(type); \
132 __i < (e)->target_offset; \
133 __i += __m->u.match_size) { \
134 __m = (void *)e + __i; \
136 __ret = fn(__m , ## args); \
143 /* fn returns 0 to continue iteration */
144 #define XT_ENTRY_ITERATE_CONTINUE(type, entries, size, n, fn, args...) \
146 unsigned int __i, __n; \
150 for (__i = 0, __n = 0; __i < (size); \
151 __i += __entry->next_offset, __n++) { \
152 __entry = (void *)(entries) + __i; \
156 __ret = fn(__entry , ## args); \
163 /* fn returns 0 to continue iteration */
164 #define XT_ENTRY_ITERATE(type, entries, size, fn, args...) \
165 XT_ENTRY_ITERATE_CONTINUE(type, entries, size, 0, fn, args)
167 #endif /* !__KERNEL__ */
169 /* pos is normally a struct ipt_entry/ip6t_entry/etc. */
170 #define xt_entry_foreach(pos, ehead, esize) \
171 for ((pos) = (typeof(pos))(ehead); \
172 (pos) < (typeof(pos))((char *)(ehead) + (esize)); \
173 (pos) = (typeof(pos))((char *)(pos) + (pos)->next_offset))
175 /* can only be xt_entry_match, so no use of typeof here */
176 #define xt_ematch_foreach(pos, entry) \
177 for ((pos) = (struct xt_entry_match *)entry->elems; \
178 (pos) < (struct xt_entry_match *)((char *)(entry) + \
179 (entry)->target_offset); \
180 (pos) = (struct xt_entry_match *)((char *)(pos) + \
181 (pos)->u.match_size))
185 #include <linux/netdevice.h>
188 * struct xt_match_param - parameters for match extensions' match functions
190 * @in: input netdevice
191 * @out: output netdevice
192 * @match: struct xt_match through which this function was invoked
193 * @matchinfo: per-match data
194 * @fragoff: packet is a fragment, this is the data offset
195 * @thoff: position of transport header relative to skb->data
196 * @hook: hook number given packet came from
197 * @family: Actual NFPROTO_* through which the function is invoked
198 * (helpful when match->family == NFPROTO_UNSPEC)
199 * @hotdrop: drop packet if we had inspection problems
200 * Network namespace obtainable using dev_net(in/out)
202 struct xt_match_param {
203 const struct net_device *in, *out;
204 const struct xt_match *match;
205 const void *matchinfo;
208 unsigned int hooknum;
214 * struct xt_mtchk_param - parameters for match extensions'
215 * checkentry functions
217 * @net: network namespace through which the check was invoked
218 * @table: table the rule is tried to be inserted into
219 * @entryinfo: the family-specific rule data
220 * (struct ipt_ip, ip6t_ip, arpt_arp or (note) ebt_entry)
221 * @match: struct xt_match through which this function was invoked
222 * @matchinfo: per-match data
223 * @hook_mask: via which hooks the new rule is reachable
224 * Other fields as above.
226 struct xt_mtchk_param {
229 const void *entryinfo;
230 const struct xt_match *match;
232 unsigned int hook_mask;
237 * struct xt_mdtor_param - match destructor parameters
240 struct xt_mtdtor_param {
242 const struct xt_match *match;
248 * struct xt_target_param - parameters for target extensions' target functions
250 * @hooknum: hook through which this target was invoked
251 * @target: struct xt_target through which this function was invoked
252 * @targinfo: per-target data
254 * Other fields see above.
256 struct xt_target_param {
257 const struct net_device *in, *out;
258 const struct xt_target *target;
259 const void *targinfo;
260 unsigned int hooknum;
265 * struct xt_tgchk_param - parameters for target extensions'
266 * checkentry functions
268 * @entryinfo: the family-specific rule data
269 * (struct ipt_entry, ip6t_entry, arpt_entry, ebt_entry)
271 * Other fields see above.
273 struct xt_tgchk_param {
276 const void *entryinfo;
277 const struct xt_target *target;
279 unsigned int hook_mask;
283 /* Target destructor parameters */
284 struct xt_tgdtor_param {
286 const struct xt_target *target;
292 struct list_head list;
294 const char name[XT_FUNCTION_MAXNAMELEN-1];
297 /* Return true or false: return FALSE and set *hotdrop = 1 to
298 force immediate packet drop. */
299 /* Arguments changed since 2.6.9, as this must now handle
300 non-linear skb, using skb_header_pointer and
301 skb_ip_make_writable. */
302 bool (*match)(const struct sk_buff *skb,
303 const struct xt_match_param *);
305 /* Called when user tries to insert an entry of this type. */
306 int (*checkentry)(const struct xt_mtchk_param *);
308 /* Called when entry of this type deleted. */
309 void (*destroy)(const struct xt_mtdtor_param *);
311 /* Called when userspace align differs from kernel space one */
312 void (*compat_from_user)(void *dst, const void *src);
313 int (*compat_to_user)(void __user *dst, const void *src);
315 /* Set this to THIS_MODULE if you are a module, otherwise NULL */
319 unsigned int matchsize;
321 unsigned int compatsize;
324 unsigned short proto;
326 unsigned short family;
329 /* Registration hooks for targets. */
331 struct list_head list;
333 const char name[XT_FUNCTION_MAXNAMELEN-1];
336 /* Returns verdict. Argument order changed since 2.6.9, as this
337 must now handle non-linear skbs, using skb_copy_bits and
338 skb_ip_make_writable. */
339 unsigned int (*target)(struct sk_buff *skb,
340 const struct xt_target_param *);
342 /* Called when user tries to insert an entry of this type:
343 hook_mask is a bitmask of hooks from which it can be
345 /* Should return true or false, or an error code (-Exxxx). */
346 int (*checkentry)(const struct xt_tgchk_param *);
348 /* Called when entry of this type deleted. */
349 void (*destroy)(const struct xt_tgdtor_param *);
351 /* Called when userspace align differs from kernel space one */
352 void (*compat_from_user)(void *dst, const void *src);
353 int (*compat_to_user)(void __user *dst, const void *src);
355 /* Set this to THIS_MODULE if you are a module, otherwise NULL */
359 unsigned int targetsize;
361 unsigned int compatsize;
364 unsigned short proto;
366 unsigned short family;
369 /* Furniture shopping... */
371 struct list_head list;
373 /* What hooks you will enter on */
374 unsigned int valid_hooks;
376 /* Man behind the curtain... */
377 struct xt_table_info *private;
379 /* Set this to THIS_MODULE if you are a module, otherwise NULL */
382 u_int8_t af; /* address/protocol family */
383 int priority; /* hook order */
385 /* A unique name... */
386 const char name[XT_TABLE_MAXNAMELEN];
389 #include <linux/netfilter_ipv4.h>
391 /* The table itself */
392 struct xt_table_info {
395 /* Number of entries: FIXME. --RR */
397 /* Initial number of entries. Needed for module usage count */
398 unsigned int initial_entries;
400 /* Entry points and underflows */
401 unsigned int hook_entry[NF_INET_NUMHOOKS];
402 unsigned int underflow[NF_INET_NUMHOOKS];
405 * Number of user chains. Since tables cannot have loops, at most
406 * @stacksize jumps (number of user chains) can possibly be made.
408 unsigned int stacksize;
409 unsigned int *stackptr;
411 /* ipt_entry tables: one per CPU */
412 /* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */
416 #define XT_TABLE_INFO_SZ (offsetof(struct xt_table_info, entries) \
417 + nr_cpu_ids * sizeof(char *))
418 extern int xt_register_target(struct xt_target *target);
419 extern void xt_unregister_target(struct xt_target *target);
420 extern int xt_register_targets(struct xt_target *target, unsigned int n);
421 extern void xt_unregister_targets(struct xt_target *target, unsigned int n);
423 extern int xt_register_match(struct xt_match *target);
424 extern void xt_unregister_match(struct xt_match *target);
425 extern int xt_register_matches(struct xt_match *match, unsigned int n);
426 extern void xt_unregister_matches(struct xt_match *match, unsigned int n);
428 extern int xt_check_match(struct xt_mtchk_param *,
429 unsigned int size, u_int8_t proto, bool inv_proto);
430 extern int xt_check_target(struct xt_tgchk_param *,
431 unsigned int size, u_int8_t proto, bool inv_proto);
433 extern struct xt_table *xt_register_table(struct net *net,
434 const struct xt_table *table,
435 struct xt_table_info *bootstrap,
436 struct xt_table_info *newinfo);
437 extern void *xt_unregister_table(struct xt_table *table);
439 extern struct xt_table_info *xt_replace_table(struct xt_table *table,
440 unsigned int num_counters,
441 struct xt_table_info *newinfo,
444 extern struct xt_match *xt_find_match(u8 af, const char *name, u8 revision);
445 extern struct xt_target *xt_find_target(u8 af, const char *name, u8 revision);
446 extern struct xt_match *xt_request_find_match(u8 af, const char *name,
448 extern struct xt_target *xt_request_find_target(u8 af, const char *name,
450 extern int xt_find_revision(u8 af, const char *name, u8 revision,
451 int target, int *err);
453 extern struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
455 extern void xt_table_unlock(struct xt_table *t);
457 extern int xt_proto_init(struct net *net, u_int8_t af);
458 extern void xt_proto_fini(struct net *net, u_int8_t af);
460 extern struct xt_table_info *xt_alloc_table_info(unsigned int size);
461 extern void xt_free_table_info(struct xt_table_info *info);
464 * Per-CPU spinlock associated with per-cpu table entries, and
465 * with a counter for the "reading" side that allows a recursive
466 * reader to avoid taking the lock and deadlocking.
468 * "reading" is used by ip/arp/ip6 tables rule processing which runs per-cpu.
469 * It needs to ensure that the rules are not being changed while the packet
470 * is being processed. In some cases, the read lock will be acquired
471 * twice on the same CPU; this is okay because of the count.
473 * "writing" is used when reading counters.
474 * During replace any readers that are using the old tables have to complete
475 * before freeing the old table. This is handled by the write locking
476 * necessary for reading the counters.
478 struct xt_info_lock {
480 unsigned char readers;
482 DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks);
485 * Note: we need to ensure that preemption is disabled before acquiring
486 * the per-cpu-variable, so we do it as a two step process rather than
487 * using "spin_lock_bh()".
489 * We _also_ need to disable bottom half processing before updating our
490 * nesting count, to make sure that the only kind of re-entrancy is this
491 * code being called by itself: since the count+lock is not an atomic
492 * operation, we can allow no races.
494 * _Only_ that special combination of being per-cpu and never getting
495 * re-entered asynchronously means that the count is safe.
497 static inline void xt_info_rdlock_bh(void)
499 struct xt_info_lock *lock;
502 lock = &__get_cpu_var(xt_info_locks);
503 if (likely(!lock->readers++))
504 spin_lock(&lock->lock);
507 static inline void xt_info_rdunlock_bh(void)
509 struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks);
511 if (likely(!--lock->readers))
512 spin_unlock(&lock->lock);
517 * The "writer" side needs to get exclusive access to the lock,
518 * regardless of readers. This must be called with bottom half
519 * processing (and thus also preemption) disabled.
521 static inline void xt_info_wrlock(unsigned int cpu)
523 spin_lock(&per_cpu(xt_info_locks, cpu).lock);
526 static inline void xt_info_wrunlock(unsigned int cpu)
528 spin_unlock(&per_cpu(xt_info_locks, cpu).lock);
532 * This helper is performance critical and must be inlined
534 static inline unsigned long ifname_compare_aligned(const char *_a,
538 const unsigned long *a = (const unsigned long *)_a;
539 const unsigned long *b = (const unsigned long *)_b;
540 const unsigned long *mask = (const unsigned long *)_mask;
543 ret = (a[0] ^ b[0]) & mask[0];
544 if (IFNAMSIZ > sizeof(unsigned long))
545 ret |= (a[1] ^ b[1]) & mask[1];
546 if (IFNAMSIZ > 2 * sizeof(unsigned long))
547 ret |= (a[2] ^ b[2]) & mask[2];
548 if (IFNAMSIZ > 3 * sizeof(unsigned long))
549 ret |= (a[3] ^ b[3]) & mask[3];
550 BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
554 extern struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *);
555 extern void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *);
558 #include <net/compat.h>
560 struct compat_xt_entry_match {
563 u_int16_t match_size;
564 char name[XT_FUNCTION_MAXNAMELEN - 1];
568 u_int16_t match_size;
571 u_int16_t match_size;
573 unsigned char data[0];
576 struct compat_xt_entry_target {
579 u_int16_t target_size;
580 char name[XT_FUNCTION_MAXNAMELEN - 1];
584 u_int16_t target_size;
585 compat_uptr_t target;
587 u_int16_t target_size;
589 unsigned char data[0];
592 /* FIXME: this works only on 32 bit tasks
593 * need to change whole approach in order to calculate align as function of
594 * current task alignment */
596 struct compat_xt_counters {
597 compat_u64 pcnt, bcnt; /* Packet and byte counters */
600 struct compat_xt_counters_info {
601 char name[XT_TABLE_MAXNAMELEN];
602 compat_uint_t num_counters;
603 struct compat_xt_counters counters[0];
606 struct _compat_xt_align {
613 #define COMPAT_XT_ALIGN(s) __ALIGN_KERNEL((s), __alignof__(struct _compat_xt_align))
615 extern void xt_compat_lock(u_int8_t af);
616 extern void xt_compat_unlock(u_int8_t af);
618 extern int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta);
619 extern void xt_compat_flush_offsets(u_int8_t af);
620 extern int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
622 extern int xt_compat_match_offset(const struct xt_match *match);
623 extern int xt_compat_match_from_user(struct xt_entry_match *m,
624 void **dstptr, unsigned int *size);
625 extern int xt_compat_match_to_user(const struct xt_entry_match *m,
626 void __user **dstptr, unsigned int *size);
628 extern int xt_compat_target_offset(const struct xt_target *target);
629 extern void xt_compat_target_from_user(struct xt_entry_target *t,
630 void **dstptr, unsigned int *size);
631 extern int xt_compat_target_to_user(const struct xt_entry_target *t,
632 void __user **dstptr, unsigned int *size);
634 #endif /* CONFIG_COMPAT */
635 #endif /* __KERNEL__ */
637 #endif /* _X_TABLES_H */