2 * Linux Socket Filter - Kernel level socket filtering
5 * Jay Schulist <jschlst@samba.org>
7 * Based on the design of:
8 * - The Berkeley Packet Filter
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
15 * Andi Kleen - Fix a few bad bugs and races.
16 * Kris Katterjohn - Added many additional checks in sk_chk_filter()
19 #include <linux/module.h>
20 #include <linux/types.h>
22 #include <linux/fcntl.h>
23 #include <linux/socket.h>
25 #include <linux/inet.h>
26 #include <linux/netdevice.h>
27 #include <linux/if_packet.h>
28 #include <linux/gfp.h>
30 #include <net/protocol.h>
31 #include <net/netlink.h>
32 #include <linux/skbuff.h>
34 #include <linux/errno.h>
35 #include <linux/timer.h>
36 #include <asm/system.h>
37 #include <asm/uaccess.h>
38 #include <asm/unaligned.h>
39 #include <linux/filter.h>
41 /* No hurry in this branch */
42 static void *__load_pointer(struct sk_buff *skb, int k)
47 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
48 else if (k >= SKF_LL_OFF)
49 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
51 if (ptr >= skb->head && ptr < skb_tail_pointer(skb))
56 static inline void *load_pointer(struct sk_buff *skb, int k,
57 unsigned int size, void *buffer)
60 return skb_header_pointer(skb, k, size, buffer);
64 return __load_pointer(skb, k);
69 * sk_filter - run a packet through a socket filter
70 * @sk: sock associated with &sk_buff
71 * @skb: buffer to filter
73 * Run the filter code and then cut skb->data to correct size returned by
74 * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
75 * than pkt_len we keep whole skb->data. This is the socket level
76 * wrapper to sk_run_filter. It returns 0 if the packet should
77 * be accepted or -EPERM if the packet should be tossed.
80 int sk_filter(struct sock *sk, struct sk_buff *skb)
83 struct sk_filter *filter;
85 err = security_sock_rcv_skb(sk, skb);
90 filter = rcu_dereference_bh(sk->sk_filter);
92 unsigned int pkt_len = sk_run_filter(skb, filter->insns, filter->len);
94 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
100 EXPORT_SYMBOL(sk_filter);
103 * sk_run_filter - run a filter on a socket
104 * @skb: buffer to run the filter on
105 * @filter: filter to apply
106 * @flen: length of filter
108 * Decode and apply filter instructions to the skb->data.
109 * Return length to keep, 0 for none. skb is the data we are
110 * filtering, filter is the array of filter instructions, and
111 * len is the number of filter blocks in the array.
113 unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen)
116 u32 A = 0; /* Accumulator */
117 u32 X = 0; /* Index Register */
118 u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
119 unsigned long memvalid = 0;
124 BUILD_BUG_ON(BPF_MEMWORDS > BITS_PER_LONG);
126 * Process array of filter instructions.
128 for (pc = 0; pc < flen; pc++) {
129 const struct sock_filter *fentry = &filter[pc];
132 switch (fentry->code) {
133 case BPF_S_ALU_ADD_X:
136 case BPF_S_ALU_ADD_K:
139 case BPF_S_ALU_SUB_X:
142 case BPF_S_ALU_SUB_K:
145 case BPF_S_ALU_MUL_X:
148 case BPF_S_ALU_MUL_K:
151 case BPF_S_ALU_DIV_X:
156 case BPF_S_ALU_DIV_K:
159 case BPF_S_ALU_AND_X:
162 case BPF_S_ALU_AND_K:
171 case BPF_S_ALU_LSH_X:
174 case BPF_S_ALU_LSH_K:
177 case BPF_S_ALU_RSH_X:
180 case BPF_S_ALU_RSH_K:
189 case BPF_S_JMP_JGT_K:
190 pc += (A > f_k) ? fentry->jt : fentry->jf;
192 case BPF_S_JMP_JGE_K:
193 pc += (A >= f_k) ? fentry->jt : fentry->jf;
195 case BPF_S_JMP_JEQ_K:
196 pc += (A == f_k) ? fentry->jt : fentry->jf;
198 case BPF_S_JMP_JSET_K:
199 pc += (A & f_k) ? fentry->jt : fentry->jf;
201 case BPF_S_JMP_JGT_X:
202 pc += (A > X) ? fentry->jt : fentry->jf;
204 case BPF_S_JMP_JGE_X:
205 pc += (A >= X) ? fentry->jt : fentry->jf;
207 case BPF_S_JMP_JEQ_X:
208 pc += (A == X) ? fentry->jt : fentry->jf;
210 case BPF_S_JMP_JSET_X:
211 pc += (A & X) ? fentry->jt : fentry->jf;
216 ptr = load_pointer(skb, k, 4, &tmp);
218 A = get_unaligned_be32(ptr);
225 ptr = load_pointer(skb, k, 2, &tmp);
227 A = get_unaligned_be16(ptr);
234 ptr = load_pointer(skb, k, 1, &tmp);
243 case BPF_S_LDX_W_LEN:
255 case BPF_S_LDX_B_MSH:
256 ptr = load_pointer(skb, f_k, 1, &tmp);
258 X = (*(u8 *)ptr & 0xf) << 2;
269 A = (memvalid & (1UL << f_k)) ?
273 X = (memvalid & (1UL << f_k)) ?
287 memvalid |= 1UL << f_k;
291 memvalid |= 1UL << f_k;
300 * Handle ancillary data, which are impossible
301 * (or very difficult) to get parsing packet contents.
303 switch (k-SKF_AD_OFF) {
304 case SKF_AD_PROTOCOL:
305 A = ntohs(skb->protocol);
313 A = skb->dev->ifindex;
319 A = skb->queue_mapping;
326 case SKF_AD_NLATTR: {
329 if (skb_is_nonlinear(skb))
331 if (A > skb->len - sizeof(struct nlattr))
334 nla = nla_find((struct nlattr *)&skb->data[A],
337 A = (void *)nla - (void *)skb->data;
342 case SKF_AD_NLATTR_NEST: {
345 if (skb_is_nonlinear(skb))
347 if (A > skb->len - sizeof(struct nlattr))
350 nla = (struct nlattr *)&skb->data[A];
351 if (nla->nla_len > A - skb->len)
354 nla = nla_find_nested(nla, X);
356 A = (void *)nla - (void *)skb->data;
368 EXPORT_SYMBOL(sk_run_filter);
371 * sk_chk_filter - verify socket filter code
372 * @filter: filter to verify
373 * @flen: length of filter
375 * Check the user's filter code. If we let some ugly
376 * filter code slip through kaboom! The filter must contain
377 * no references or jumps that are out of range, no illegal
378 * instructions, and must end with a RET instruction.
380 * All jumps are forward as they are not signed.
382 * Returns 0 if the rule set is legal or -EINVAL if not.
384 int sk_chk_filter(struct sock_filter *filter, int flen)
387 * Valid instructions are initialized to non-0.
388 * Invalid instructions are initialized to 0.
390 static const u8 codes[] = {
391 [BPF_ALU|BPF_ADD|BPF_K] = BPF_S_ALU_ADD_K + 1,
392 [BPF_ALU|BPF_ADD|BPF_X] = BPF_S_ALU_ADD_X + 1,
393 [BPF_ALU|BPF_SUB|BPF_K] = BPF_S_ALU_SUB_K + 1,
394 [BPF_ALU|BPF_SUB|BPF_X] = BPF_S_ALU_SUB_X + 1,
395 [BPF_ALU|BPF_MUL|BPF_K] = BPF_S_ALU_MUL_K + 1,
396 [BPF_ALU|BPF_MUL|BPF_X] = BPF_S_ALU_MUL_X + 1,
397 [BPF_ALU|BPF_DIV|BPF_X] = BPF_S_ALU_DIV_X + 1,
398 [BPF_ALU|BPF_AND|BPF_K] = BPF_S_ALU_AND_K + 1,
399 [BPF_ALU|BPF_AND|BPF_X] = BPF_S_ALU_AND_X + 1,
400 [BPF_ALU|BPF_OR|BPF_K] = BPF_S_ALU_OR_K + 1,
401 [BPF_ALU|BPF_OR|BPF_X] = BPF_S_ALU_OR_X + 1,
402 [BPF_ALU|BPF_LSH|BPF_K] = BPF_S_ALU_LSH_K + 1,
403 [BPF_ALU|BPF_LSH|BPF_X] = BPF_S_ALU_LSH_X + 1,
404 [BPF_ALU|BPF_RSH|BPF_K] = BPF_S_ALU_RSH_K + 1,
405 [BPF_ALU|BPF_RSH|BPF_X] = BPF_S_ALU_RSH_X + 1,
406 [BPF_ALU|BPF_NEG] = BPF_S_ALU_NEG + 1,
407 [BPF_LD|BPF_W|BPF_ABS] = BPF_S_LD_W_ABS + 1,
408 [BPF_LD|BPF_H|BPF_ABS] = BPF_S_LD_H_ABS + 1,
409 [BPF_LD|BPF_B|BPF_ABS] = BPF_S_LD_B_ABS + 1,
410 [BPF_LD|BPF_W|BPF_LEN] = BPF_S_LD_W_LEN + 1,
411 [BPF_LD|BPF_W|BPF_IND] = BPF_S_LD_W_IND + 1,
412 [BPF_LD|BPF_H|BPF_IND] = BPF_S_LD_H_IND + 1,
413 [BPF_LD|BPF_B|BPF_IND] = BPF_S_LD_B_IND + 1,
414 [BPF_LD|BPF_IMM] = BPF_S_LD_IMM + 1,
415 [BPF_LDX|BPF_W|BPF_LEN] = BPF_S_LDX_W_LEN + 1,
416 [BPF_LDX|BPF_B|BPF_MSH] = BPF_S_LDX_B_MSH + 1,
417 [BPF_LDX|BPF_IMM] = BPF_S_LDX_IMM + 1,
418 [BPF_MISC|BPF_TAX] = BPF_S_MISC_TAX + 1,
419 [BPF_MISC|BPF_TXA] = BPF_S_MISC_TXA + 1,
420 [BPF_RET|BPF_K] = BPF_S_RET_K + 1,
421 [BPF_RET|BPF_A] = BPF_S_RET_A + 1,
422 [BPF_ALU|BPF_DIV|BPF_K] = BPF_S_ALU_DIV_K + 1,
423 [BPF_LD|BPF_MEM] = BPF_S_LD_MEM + 1,
424 [BPF_LDX|BPF_MEM] = BPF_S_LDX_MEM + 1,
425 [BPF_ST] = BPF_S_ST + 1,
426 [BPF_STX] = BPF_S_STX + 1,
427 [BPF_JMP|BPF_JA] = BPF_S_JMP_JA + 1,
428 [BPF_JMP|BPF_JEQ|BPF_K] = BPF_S_JMP_JEQ_K + 1,
429 [BPF_JMP|BPF_JEQ|BPF_X] = BPF_S_JMP_JEQ_X + 1,
430 [BPF_JMP|BPF_JGE|BPF_K] = BPF_S_JMP_JGE_K + 1,
431 [BPF_JMP|BPF_JGE|BPF_X] = BPF_S_JMP_JGE_X + 1,
432 [BPF_JMP|BPF_JGT|BPF_K] = BPF_S_JMP_JGT_K + 1,
433 [BPF_JMP|BPF_JGT|BPF_X] = BPF_S_JMP_JGT_X + 1,
434 [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K + 1,
435 [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X + 1,
439 if (flen == 0 || flen > BPF_MAXINSNS)
442 /* check the filter code now */
443 for (pc = 0; pc < flen; pc++) {
444 struct sock_filter *ftest = &filter[pc];
445 u16 code = ftest->code;
447 if (code >= ARRAY_SIZE(codes))
450 /* Undo the '+ 1' in codes[] after validation. */
453 /* Some instructions need special checks */
455 case BPF_S_ALU_DIV_K:
456 /* check for division by zero */
464 /* check for invalid memory addresses */
465 if (ftest->k >= BPF_MEMWORDS)
470 * Note, the large ftest->k might cause loops.
471 * Compare this with conditional jumps below,
472 * where offsets are limited. --ANK (981016)
474 if (ftest->k >= (unsigned)(flen-pc-1))
477 case BPF_S_JMP_JEQ_K:
478 case BPF_S_JMP_JEQ_X:
479 case BPF_S_JMP_JGE_K:
480 case BPF_S_JMP_JGE_X:
481 case BPF_S_JMP_JGT_K:
482 case BPF_S_JMP_JGT_X:
483 case BPF_S_JMP_JSET_X:
484 case BPF_S_JMP_JSET_K:
485 /* for conditionals both must be safe */
486 if (pc + ftest->jt + 1 >= flen ||
487 pc + ftest->jf + 1 >= flen)
494 /* last instruction must be a RET code */
495 switch (filter[flen - 1].code) {
502 EXPORT_SYMBOL(sk_chk_filter);
505 * sk_filter_rcu_release: Release a socket filter by rcu_head
506 * @rcu: rcu_head that contains the sk_filter to free
508 static void sk_filter_rcu_release(struct rcu_head *rcu)
510 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
512 sk_filter_release(fp);
515 static void sk_filter_delayed_uncharge(struct sock *sk, struct sk_filter *fp)
517 unsigned int size = sk_filter_len(fp);
519 atomic_sub(size, &sk->sk_omem_alloc);
520 call_rcu_bh(&fp->rcu, sk_filter_rcu_release);
524 * sk_attach_filter - attach a socket filter
525 * @fprog: the filter program
526 * @sk: the socket to use
528 * Attach the user's filter code. We first run some sanity checks on
529 * it to make sure it does not explode on us later. If an error
530 * occurs or there is insufficient memory for the filter a negative
531 * errno code is returned. On success the return is zero.
533 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
535 struct sk_filter *fp, *old_fp;
536 unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
539 /* Make sure new filter is there and in the right amounts. */
540 if (fprog->filter == NULL)
543 fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL);
546 if (copy_from_user(fp->insns, fprog->filter, fsize)) {
547 sock_kfree_s(sk, fp, fsize+sizeof(*fp));
551 atomic_set(&fp->refcnt, 1);
552 fp->len = fprog->len;
554 err = sk_chk_filter(fp->insns, fp->len);
556 sk_filter_uncharge(sk, fp);
560 old_fp = rcu_dereference_protected(sk->sk_filter,
561 sock_owned_by_user(sk));
562 rcu_assign_pointer(sk->sk_filter, fp);
565 sk_filter_delayed_uncharge(sk, old_fp);
568 EXPORT_SYMBOL_GPL(sk_attach_filter);
570 int sk_detach_filter(struct sock *sk)
573 struct sk_filter *filter;
575 filter = rcu_dereference_protected(sk->sk_filter,
576 sock_owned_by_user(sk));
578 rcu_assign_pointer(sk->sk_filter, NULL);
579 sk_filter_delayed_uncharge(sk, filter);
584 EXPORT_SYMBOL_GPL(sk_detach_filter);