2 * Linux Socket Filter - Kernel level socket filtering
5 * Jay Schulist <jschlst@samba.org>
7 * Based on the design of:
8 * - The Berkeley Packet Filter
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
15 * Andi Kleen - Fix a few bad bugs and races.
16 * Kris Katterjohn - Added many additional checks in sk_chk_filter()
19 #include <linux/module.h>
20 #include <linux/types.h>
22 #include <linux/fcntl.h>
23 #include <linux/socket.h>
25 #include <linux/inet.h>
26 #include <linux/netdevice.h>
27 #include <linux/if_packet.h>
28 #include <linux/gfp.h>
30 #include <net/protocol.h>
31 #include <net/netlink.h>
32 #include <linux/skbuff.h>
34 #include <linux/errno.h>
35 #include <linux/timer.h>
36 #include <asm/system.h>
37 #include <asm/uaccess.h>
38 #include <asm/unaligned.h>
39 #include <linux/filter.h>
40 #include <linux/reciprocal_div.h>
41 #include <linux/ratelimit.h>
43 /* No hurry in this branch */
44 static void *__load_pointer(const struct sk_buff *skb, int k, unsigned int size)
49 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
50 else if (k >= SKF_LL_OFF)
51 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
53 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
58 static inline void *load_pointer(const struct sk_buff *skb, int k,
59 unsigned int size, void *buffer)
62 return skb_header_pointer(skb, k, size, buffer);
63 return __load_pointer(skb, k, size);
67 * sk_filter_trim_cap - run a packet through a socket filter
68 * @sk: sock associated with &sk_buff
69 * @skb: buffer to filter
70 * @cap: limit on how short the eBPF program may trim the packet
72 * Run the filter code and then cut skb->data to correct size returned by
73 * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
74 * than pkt_len we keep whole skb->data. This is the socket level
75 * wrapper to sk_run_filter. It returns 0 if the packet should
76 * be accepted or -EPERM if the packet should be tossed.
79 int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
82 struct sk_filter *filter;
84 err = security_sock_rcv_skb(sk, skb);
89 filter = rcu_dereference(sk->sk_filter);
91 unsigned int pkt_len = SK_RUN_FILTER(filter, skb);
92 err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
98 EXPORT_SYMBOL(sk_filter_trim_cap);
101 * sk_run_filter - run a filter on a socket
102 * @skb: buffer to run the filter on
103 * @fentry: filter to apply
105 * Decode and apply filter instructions to the skb->data.
106 * Return length to keep, 0 for none. @skb is the data we are
107 * filtering, @filter is the array of filter instructions.
108 * Because all jumps are guaranteed to be before last instruction,
109 * and last instruction guaranteed to be a RET, we dont need to check
110 * flen. (We used to pass to this function the length of filter)
112 unsigned int sk_run_filter(const struct sk_buff *skb,
113 const struct sock_filter *fentry)
116 u32 A = 0; /* Accumulator */
117 u32 X = 0; /* Index Register */
118 u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
123 * Process array of filter instructions.
126 #if defined(CONFIG_X86_32)
127 #define K (fentry->k)
129 const u32 K = fentry->k;
132 switch (fentry->code) {
133 case BPF_S_ALU_ADD_X:
136 case BPF_S_ALU_ADD_K:
139 case BPF_S_ALU_SUB_X:
142 case BPF_S_ALU_SUB_K:
145 case BPF_S_ALU_MUL_X:
148 case BPF_S_ALU_MUL_K:
151 case BPF_S_ALU_DIV_X:
156 case BPF_S_ALU_DIV_K:
157 A = reciprocal_divide(A, K);
159 case BPF_S_ALU_AND_X:
162 case BPF_S_ALU_AND_K:
171 case BPF_S_ALU_LSH_X:
174 case BPF_S_ALU_LSH_K:
177 case BPF_S_ALU_RSH_X:
180 case BPF_S_ALU_RSH_K:
189 case BPF_S_JMP_JGT_K:
190 fentry += (A > K) ? fentry->jt : fentry->jf;
192 case BPF_S_JMP_JGE_K:
193 fentry += (A >= K) ? fentry->jt : fentry->jf;
195 case BPF_S_JMP_JEQ_K:
196 fentry += (A == K) ? fentry->jt : fentry->jf;
198 case BPF_S_JMP_JSET_K:
199 fentry += (A & K) ? fentry->jt : fentry->jf;
201 case BPF_S_JMP_JGT_X:
202 fentry += (A > X) ? fentry->jt : fentry->jf;
204 case BPF_S_JMP_JGE_X:
205 fentry += (A >= X) ? fentry->jt : fentry->jf;
207 case BPF_S_JMP_JEQ_X:
208 fentry += (A == X) ? fentry->jt : fentry->jf;
210 case BPF_S_JMP_JSET_X:
211 fentry += (A & X) ? fentry->jt : fentry->jf;
216 ptr = load_pointer(skb, k, 4, &tmp);
218 A = get_unaligned_be32(ptr);
225 ptr = load_pointer(skb, k, 2, &tmp);
227 A = get_unaligned_be16(ptr);
234 ptr = load_pointer(skb, k, 1, &tmp);
243 case BPF_S_LDX_W_LEN:
255 case BPF_S_LDX_B_MSH:
256 ptr = load_pointer(skb, K, 1, &tmp);
258 X = (*(u8 *)ptr & 0xf) << 2;
290 case BPF_S_ANC_PROTOCOL:
291 A = ntohs(skb->protocol);
293 case BPF_S_ANC_PKTTYPE:
296 case BPF_S_ANC_IFINDEX:
299 A = skb->dev->ifindex;
304 case BPF_S_ANC_QUEUE:
305 A = skb->queue_mapping;
307 case BPF_S_ANC_HATYPE:
312 case BPF_S_ANC_RXHASH:
316 A = raw_smp_processor_id();
318 case BPF_S_ANC_NLATTR: {
321 if (skb_is_nonlinear(skb))
323 if (skb->len < sizeof(struct nlattr))
325 if (A > skb->len - sizeof(struct nlattr))
328 nla = nla_find((struct nlattr *)&skb->data[A],
331 A = (void *)nla - (void *)skb->data;
336 case BPF_S_ANC_NLATTR_NEST: {
339 if (skb_is_nonlinear(skb))
341 if (skb->len < sizeof(struct nlattr))
343 if (A > skb->len - sizeof(struct nlattr))
346 nla = (struct nlattr *)&skb->data[A];
347 if (nla->nla_len > skb->len - A)
350 nla = nla_find_nested(nla, X);
352 A = (void *)nla - (void *)skb->data;
358 WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n",
359 fentry->code, fentry->jt,
360 fentry->jf, fentry->k);
367 EXPORT_SYMBOL(sk_run_filter);
371 * A BPF program is able to use 16 cells of memory to store intermediate
372 * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter())
373 * As we dont want to clear mem[] array for each packet going through
374 * sk_run_filter(), we check that filter loaded by user never try to read
375 * a cell if not previously written, and we check all branches to be sure
376 * a malicious user doesn't try to abuse us.
378 static int check_load_and_stores(struct sock_filter *filter, int flen)
380 u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */
383 BUILD_BUG_ON(BPF_MEMWORDS > 16);
384 masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
387 memset(masks, 0xff, flen * sizeof(*masks));
389 for (pc = 0; pc < flen; pc++) {
390 memvalid &= masks[pc];
392 switch (filter[pc].code) {
395 memvalid |= (1 << filter[pc].k);
399 if (!(memvalid & (1 << filter[pc].k))) {
405 /* a jump must set masks on target */
406 masks[pc + 1 + filter[pc].k] &= memvalid;
409 case BPF_S_JMP_JEQ_K:
410 case BPF_S_JMP_JEQ_X:
411 case BPF_S_JMP_JGE_K:
412 case BPF_S_JMP_JGE_X:
413 case BPF_S_JMP_JGT_K:
414 case BPF_S_JMP_JGT_X:
415 case BPF_S_JMP_JSET_X:
416 case BPF_S_JMP_JSET_K:
417 /* a jump must set masks on targets */
418 masks[pc + 1 + filter[pc].jt] &= memvalid;
419 masks[pc + 1 + filter[pc].jf] &= memvalid;
430 * sk_chk_filter - verify socket filter code
431 * @filter: filter to verify
432 * @flen: length of filter
434 * Check the user's filter code. If we let some ugly
435 * filter code slip through kaboom! The filter must contain
436 * no references or jumps that are out of range, no illegal
437 * instructions, and must end with a RET instruction.
439 * All jumps are forward as they are not signed.
441 * Returns 0 if the rule set is legal or -EINVAL if not.
443 int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
446 * Valid instructions are initialized to non-0.
447 * Invalid instructions are initialized to 0.
449 static const u8 codes[] = {
450 [BPF_ALU|BPF_ADD|BPF_K] = BPF_S_ALU_ADD_K,
451 [BPF_ALU|BPF_ADD|BPF_X] = BPF_S_ALU_ADD_X,
452 [BPF_ALU|BPF_SUB|BPF_K] = BPF_S_ALU_SUB_K,
453 [BPF_ALU|BPF_SUB|BPF_X] = BPF_S_ALU_SUB_X,
454 [BPF_ALU|BPF_MUL|BPF_K] = BPF_S_ALU_MUL_K,
455 [BPF_ALU|BPF_MUL|BPF_X] = BPF_S_ALU_MUL_X,
456 [BPF_ALU|BPF_DIV|BPF_X] = BPF_S_ALU_DIV_X,
457 [BPF_ALU|BPF_AND|BPF_K] = BPF_S_ALU_AND_K,
458 [BPF_ALU|BPF_AND|BPF_X] = BPF_S_ALU_AND_X,
459 [BPF_ALU|BPF_OR|BPF_K] = BPF_S_ALU_OR_K,
460 [BPF_ALU|BPF_OR|BPF_X] = BPF_S_ALU_OR_X,
461 [BPF_ALU|BPF_LSH|BPF_K] = BPF_S_ALU_LSH_K,
462 [BPF_ALU|BPF_LSH|BPF_X] = BPF_S_ALU_LSH_X,
463 [BPF_ALU|BPF_RSH|BPF_K] = BPF_S_ALU_RSH_K,
464 [BPF_ALU|BPF_RSH|BPF_X] = BPF_S_ALU_RSH_X,
465 [BPF_ALU|BPF_NEG] = BPF_S_ALU_NEG,
466 [BPF_LD|BPF_W|BPF_ABS] = BPF_S_LD_W_ABS,
467 [BPF_LD|BPF_H|BPF_ABS] = BPF_S_LD_H_ABS,
468 [BPF_LD|BPF_B|BPF_ABS] = BPF_S_LD_B_ABS,
469 [BPF_LD|BPF_W|BPF_LEN] = BPF_S_LD_W_LEN,
470 [BPF_LD|BPF_W|BPF_IND] = BPF_S_LD_W_IND,
471 [BPF_LD|BPF_H|BPF_IND] = BPF_S_LD_H_IND,
472 [BPF_LD|BPF_B|BPF_IND] = BPF_S_LD_B_IND,
473 [BPF_LD|BPF_IMM] = BPF_S_LD_IMM,
474 [BPF_LDX|BPF_W|BPF_LEN] = BPF_S_LDX_W_LEN,
475 [BPF_LDX|BPF_B|BPF_MSH] = BPF_S_LDX_B_MSH,
476 [BPF_LDX|BPF_IMM] = BPF_S_LDX_IMM,
477 [BPF_MISC|BPF_TAX] = BPF_S_MISC_TAX,
478 [BPF_MISC|BPF_TXA] = BPF_S_MISC_TXA,
479 [BPF_RET|BPF_K] = BPF_S_RET_K,
480 [BPF_RET|BPF_A] = BPF_S_RET_A,
481 [BPF_ALU|BPF_DIV|BPF_K] = BPF_S_ALU_DIV_K,
482 [BPF_LD|BPF_MEM] = BPF_S_LD_MEM,
483 [BPF_LDX|BPF_MEM] = BPF_S_LDX_MEM,
485 [BPF_STX] = BPF_S_STX,
486 [BPF_JMP|BPF_JA] = BPF_S_JMP_JA,
487 [BPF_JMP|BPF_JEQ|BPF_K] = BPF_S_JMP_JEQ_K,
488 [BPF_JMP|BPF_JEQ|BPF_X] = BPF_S_JMP_JEQ_X,
489 [BPF_JMP|BPF_JGE|BPF_K] = BPF_S_JMP_JGE_K,
490 [BPF_JMP|BPF_JGE|BPF_X] = BPF_S_JMP_JGE_X,
491 [BPF_JMP|BPF_JGT|BPF_K] = BPF_S_JMP_JGT_K,
492 [BPF_JMP|BPF_JGT|BPF_X] = BPF_S_JMP_JGT_X,
493 [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K,
494 [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X,
498 if (flen == 0 || flen > BPF_MAXINSNS)
501 /* check the filter code now */
502 for (pc = 0; pc < flen; pc++) {
503 struct sock_filter *ftest = &filter[pc];
504 u16 code = ftest->code;
506 if (code >= ARRAY_SIZE(codes))
511 /* Some instructions need special checks */
513 case BPF_S_ALU_DIV_K:
514 /* check for division by zero */
517 ftest->k = reciprocal_value(ftest->k);
523 /* check for invalid memory addresses */
524 if (ftest->k >= BPF_MEMWORDS)
529 * Note, the large ftest->k might cause loops.
530 * Compare this with conditional jumps below,
531 * where offsets are limited. --ANK (981016)
533 if (ftest->k >= (unsigned)(flen-pc-1))
536 case BPF_S_JMP_JEQ_K:
537 case BPF_S_JMP_JEQ_X:
538 case BPF_S_JMP_JGE_K:
539 case BPF_S_JMP_JGE_X:
540 case BPF_S_JMP_JGT_K:
541 case BPF_S_JMP_JGT_X:
542 case BPF_S_JMP_JSET_X:
543 case BPF_S_JMP_JSET_K:
544 /* for conditionals both must be safe */
545 if (pc + ftest->jt + 1 >= flen ||
546 pc + ftest->jf + 1 >= flen)
552 #define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
553 code = BPF_S_ANC_##CODE; \
560 ANCILLARY(NLATTR_NEST);
571 /* last instruction must be a RET code */
572 switch (filter[flen - 1].code) {
575 return check_load_and_stores(filter, flen);
579 EXPORT_SYMBOL(sk_chk_filter);
582 * sk_filter_release_rcu - Release a socket filter by rcu_head
583 * @rcu: rcu_head that contains the sk_filter to free
585 void sk_filter_release_rcu(struct rcu_head *rcu)
587 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
592 EXPORT_SYMBOL(sk_filter_release_rcu);
595 * sk_attach_filter - attach a socket filter
596 * @fprog: the filter program
597 * @sk: the socket to use
599 * Attach the user's filter code. We first run some sanity checks on
600 * it to make sure it does not explode on us later. If an error
601 * occurs or there is insufficient memory for the filter a negative
602 * errno code is returned. On success the return is zero.
604 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
606 struct sk_filter *fp, *old_fp;
607 unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
610 /* Make sure new filter is there and in the right amounts. */
611 if (fprog->filter == NULL)
614 fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL);
617 if (copy_from_user(fp->insns, fprog->filter, fsize)) {
618 sock_kfree_s(sk, fp, fsize+sizeof(*fp));
622 atomic_set(&fp->refcnt, 1);
623 fp->len = fprog->len;
624 fp->bpf_func = sk_run_filter;
626 err = sk_chk_filter(fp->insns, fp->len);
628 sk_filter_uncharge(sk, fp);
634 old_fp = rcu_dereference_protected(sk->sk_filter,
635 sock_owned_by_user(sk));
636 rcu_assign_pointer(sk->sk_filter, fp);
639 sk_filter_uncharge(sk, old_fp);
642 EXPORT_SYMBOL_GPL(sk_attach_filter);
644 int sk_detach_filter(struct sock *sk)
647 struct sk_filter *filter;
649 filter = rcu_dereference_protected(sk->sk_filter,
650 sock_owned_by_user(sk));
652 RCU_INIT_POINTER(sk->sk_filter, NULL);
653 sk_filter_uncharge(sk, filter);
658 EXPORT_SYMBOL_GPL(sk_detach_filter);