Merge tag 'v2.6.39-rc7'
[pandora-kernel.git] / net / core / filter.c
1 /*
2  * Linux Socket Filter - Kernel level socket filtering
3  *
4  * Author:
5  *     Jay Schulist <jschlst@samba.org>
6  *
7  * Based on the design of:
8  *     - The Berkeley Packet Filter
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License
12  * as published by the Free Software Foundation; either version
13  * 2 of the License, or (at your option) any later version.
14  *
15  * Andi Kleen - Fix a few bad bugs and races.
16  * Kris Katterjohn - Added many additional checks in sk_chk_filter()
17  */
18
19 #include <linux/module.h>
20 #include <linux/types.h>
21 #include <linux/mm.h>
22 #include <linux/fcntl.h>
23 #include <linux/socket.h>
24 #include <linux/in.h>
25 #include <linux/inet.h>
26 #include <linux/netdevice.h>
27 #include <linux/if_packet.h>
28 #include <linux/gfp.h>
29 #include <net/ip.h>
30 #include <net/protocol.h>
31 #include <net/netlink.h>
32 #include <linux/skbuff.h>
33 #include <net/sock.h>
34 #include <linux/errno.h>
35 #include <linux/timer.h>
36 #include <asm/system.h>
37 #include <asm/uaccess.h>
38 #include <asm/unaligned.h>
39 #include <linux/filter.h>
40 #include <linux/reciprocal_div.h>
41
42 enum {
43         BPF_S_RET_K = 1,
44         BPF_S_RET_A,
45         BPF_S_ALU_ADD_K,
46         BPF_S_ALU_ADD_X,
47         BPF_S_ALU_SUB_K,
48         BPF_S_ALU_SUB_X,
49         BPF_S_ALU_MUL_K,
50         BPF_S_ALU_MUL_X,
51         BPF_S_ALU_DIV_X,
52         BPF_S_ALU_AND_K,
53         BPF_S_ALU_AND_X,
54         BPF_S_ALU_OR_K,
55         BPF_S_ALU_OR_X,
56         BPF_S_ALU_LSH_K,
57         BPF_S_ALU_LSH_X,
58         BPF_S_ALU_RSH_K,
59         BPF_S_ALU_RSH_X,
60         BPF_S_ALU_NEG,
61         BPF_S_LD_W_ABS,
62         BPF_S_LD_H_ABS,
63         BPF_S_LD_B_ABS,
64         BPF_S_LD_W_LEN,
65         BPF_S_LD_W_IND,
66         BPF_S_LD_H_IND,
67         BPF_S_LD_B_IND,
68         BPF_S_LD_IMM,
69         BPF_S_LDX_W_LEN,
70         BPF_S_LDX_B_MSH,
71         BPF_S_LDX_IMM,
72         BPF_S_MISC_TAX,
73         BPF_S_MISC_TXA,
74         BPF_S_ALU_DIV_K,
75         BPF_S_LD_MEM,
76         BPF_S_LDX_MEM,
77         BPF_S_ST,
78         BPF_S_STX,
79         BPF_S_JMP_JA,
80         BPF_S_JMP_JEQ_K,
81         BPF_S_JMP_JEQ_X,
82         BPF_S_JMP_JGE_K,
83         BPF_S_JMP_JGE_X,
84         BPF_S_JMP_JGT_K,
85         BPF_S_JMP_JGT_X,
86         BPF_S_JMP_JSET_K,
87         BPF_S_JMP_JSET_X,
88         /* Ancillary data */
89         BPF_S_ANC_PROTOCOL,
90         BPF_S_ANC_PKTTYPE,
91         BPF_S_ANC_IFINDEX,
92         BPF_S_ANC_NLATTR,
93         BPF_S_ANC_NLATTR_NEST,
94         BPF_S_ANC_MARK,
95         BPF_S_ANC_QUEUE,
96         BPF_S_ANC_HATYPE,
97         BPF_S_ANC_RXHASH,
98         BPF_S_ANC_CPU,
99 };
100
101 /* No hurry in this branch */
102 static void *__load_pointer(const struct sk_buff *skb, int k, unsigned int size)
103 {
104         u8 *ptr = NULL;
105
106         if (k >= SKF_NET_OFF)
107                 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
108         else if (k >= SKF_LL_OFF)
109                 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
110
111         if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
112                 return ptr;
113         return NULL;
114 }
115
116 static inline void *load_pointer(const struct sk_buff *skb, int k,
117                                  unsigned int size, void *buffer)
118 {
119         if (k >= 0)
120                 return skb_header_pointer(skb, k, size, buffer);
121         return __load_pointer(skb, k, size);
122 }
123
124 /**
125  *      sk_filter - run a packet through a socket filter
126  *      @sk: sock associated with &sk_buff
127  *      @skb: buffer to filter
128  *
129  * Run the filter code and then cut skb->data to correct size returned by
130  * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
131  * than pkt_len we keep whole skb->data. This is the socket level
132  * wrapper to sk_run_filter. It returns 0 if the packet should
133  * be accepted or -EPERM if the packet should be tossed.
134  *
135  */
136 int sk_filter(struct sock *sk, struct sk_buff *skb)
137 {
138         int err;
139         struct sk_filter *filter;
140
141         err = security_sock_rcv_skb(sk, skb);
142         if (err)
143                 return err;
144
145         rcu_read_lock();
146         filter = rcu_dereference(sk->sk_filter);
147         if (filter) {
148                 unsigned int pkt_len = sk_run_filter(skb, filter->insns);
149
150                 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
151         }
152         rcu_read_unlock();
153
154         return err;
155 }
156 EXPORT_SYMBOL(sk_filter);
157
158 /**
159  *      sk_run_filter - run a filter on a socket
160  *      @skb: buffer to run the filter on
161  *      @fentry: filter to apply
162  *
163  * Decode and apply filter instructions to the skb->data.
164  * Return length to keep, 0 for none. @skb is the data we are
165  * filtering, @filter is the array of filter instructions.
166  * Because all jumps are guaranteed to be before last instruction,
167  * and last instruction guaranteed to be a RET, we dont need to check
168  * flen. (We used to pass to this function the length of filter)
169  */
170 unsigned int sk_run_filter(const struct sk_buff *skb,
171                            const struct sock_filter *fentry)
172 {
173         void *ptr;
174         u32 A = 0;                      /* Accumulator */
175         u32 X = 0;                      /* Index Register */
176         u32 mem[BPF_MEMWORDS];          /* Scratch Memory Store */
177         u32 tmp;
178         int k;
179
180         /*
181          * Process array of filter instructions.
182          */
183         for (;; fentry++) {
184 #if defined(CONFIG_X86_32)
185 #define K (fentry->k)
186 #else
187                 const u32 K = fentry->k;
188 #endif
189
190                 switch (fentry->code) {
191                 case BPF_S_ALU_ADD_X:
192                         A += X;
193                         continue;
194                 case BPF_S_ALU_ADD_K:
195                         A += K;
196                         continue;
197                 case BPF_S_ALU_SUB_X:
198                         A -= X;
199                         continue;
200                 case BPF_S_ALU_SUB_K:
201                         A -= K;
202                         continue;
203                 case BPF_S_ALU_MUL_X:
204                         A *= X;
205                         continue;
206                 case BPF_S_ALU_MUL_K:
207                         A *= K;
208                         continue;
209                 case BPF_S_ALU_DIV_X:
210                         if (X == 0)
211                                 return 0;
212                         A /= X;
213                         continue;
214                 case BPF_S_ALU_DIV_K:
215                         A = reciprocal_divide(A, K);
216                         continue;
217                 case BPF_S_ALU_AND_X:
218                         A &= X;
219                         continue;
220                 case BPF_S_ALU_AND_K:
221                         A &= K;
222                         continue;
223                 case BPF_S_ALU_OR_X:
224                         A |= X;
225                         continue;
226                 case BPF_S_ALU_OR_K:
227                         A |= K;
228                         continue;
229                 case BPF_S_ALU_LSH_X:
230                         A <<= X;
231                         continue;
232                 case BPF_S_ALU_LSH_K:
233                         A <<= K;
234                         continue;
235                 case BPF_S_ALU_RSH_X:
236                         A >>= X;
237                         continue;
238                 case BPF_S_ALU_RSH_K:
239                         A >>= K;
240                         continue;
241                 case BPF_S_ALU_NEG:
242                         A = -A;
243                         continue;
244                 case BPF_S_JMP_JA:
245                         fentry += K;
246                         continue;
247                 case BPF_S_JMP_JGT_K:
248                         fentry += (A > K) ? fentry->jt : fentry->jf;
249                         continue;
250                 case BPF_S_JMP_JGE_K:
251                         fentry += (A >= K) ? fentry->jt : fentry->jf;
252                         continue;
253                 case BPF_S_JMP_JEQ_K:
254                         fentry += (A == K) ? fentry->jt : fentry->jf;
255                         continue;
256                 case BPF_S_JMP_JSET_K:
257                         fentry += (A & K) ? fentry->jt : fentry->jf;
258                         continue;
259                 case BPF_S_JMP_JGT_X:
260                         fentry += (A > X) ? fentry->jt : fentry->jf;
261                         continue;
262                 case BPF_S_JMP_JGE_X:
263                         fentry += (A >= X) ? fentry->jt : fentry->jf;
264                         continue;
265                 case BPF_S_JMP_JEQ_X:
266                         fentry += (A == X) ? fentry->jt : fentry->jf;
267                         continue;
268                 case BPF_S_JMP_JSET_X:
269                         fentry += (A & X) ? fentry->jt : fentry->jf;
270                         continue;
271                 case BPF_S_LD_W_ABS:
272                         k = K;
273 load_w:
274                         ptr = load_pointer(skb, k, 4, &tmp);
275                         if (ptr != NULL) {
276                                 A = get_unaligned_be32(ptr);
277                                 continue;
278                         }
279                         return 0;
280                 case BPF_S_LD_H_ABS:
281                         k = K;
282 load_h:
283                         ptr = load_pointer(skb, k, 2, &tmp);
284                         if (ptr != NULL) {
285                                 A = get_unaligned_be16(ptr);
286                                 continue;
287                         }
288                         return 0;
289                 case BPF_S_LD_B_ABS:
290                         k = K;
291 load_b:
292                         ptr = load_pointer(skb, k, 1, &tmp);
293                         if (ptr != NULL) {
294                                 A = *(u8 *)ptr;
295                                 continue;
296                         }
297                         return 0;
298                 case BPF_S_LD_W_LEN:
299                         A = skb->len;
300                         continue;
301                 case BPF_S_LDX_W_LEN:
302                         X = skb->len;
303                         continue;
304                 case BPF_S_LD_W_IND:
305                         k = X + K;
306                         goto load_w;
307                 case BPF_S_LD_H_IND:
308                         k = X + K;
309                         goto load_h;
310                 case BPF_S_LD_B_IND:
311                         k = X + K;
312                         goto load_b;
313                 case BPF_S_LDX_B_MSH:
314                         ptr = load_pointer(skb, K, 1, &tmp);
315                         if (ptr != NULL) {
316                                 X = (*(u8 *)ptr & 0xf) << 2;
317                                 continue;
318                         }
319                         return 0;
320                 case BPF_S_LD_IMM:
321                         A = K;
322                         continue;
323                 case BPF_S_LDX_IMM:
324                         X = K;
325                         continue;
326                 case BPF_S_LD_MEM:
327                         A = mem[K];
328                         continue;
329                 case BPF_S_LDX_MEM:
330                         X = mem[K];
331                         continue;
332                 case BPF_S_MISC_TAX:
333                         X = A;
334                         continue;
335                 case BPF_S_MISC_TXA:
336                         A = X;
337                         continue;
338                 case BPF_S_RET_K:
339                         return K;
340                 case BPF_S_RET_A:
341                         return A;
342                 case BPF_S_ST:
343                         mem[K] = A;
344                         continue;
345                 case BPF_S_STX:
346                         mem[K] = X;
347                         continue;
348                 case BPF_S_ANC_PROTOCOL:
349                         A = ntohs(skb->protocol);
350                         continue;
351                 case BPF_S_ANC_PKTTYPE:
352                         A = skb->pkt_type;
353                         continue;
354                 case BPF_S_ANC_IFINDEX:
355                         if (!skb->dev)
356                                 return 0;
357                         A = skb->dev->ifindex;
358                         continue;
359                 case BPF_S_ANC_MARK:
360                         A = skb->mark;
361                         continue;
362                 case BPF_S_ANC_QUEUE:
363                         A = skb->queue_mapping;
364                         continue;
365                 case BPF_S_ANC_HATYPE:
366                         if (!skb->dev)
367                                 return 0;
368                         A = skb->dev->type;
369                         continue;
370                 case BPF_S_ANC_RXHASH:
371                         A = skb->rxhash;
372                         continue;
373                 case BPF_S_ANC_CPU:
374                         A = raw_smp_processor_id();
375                         continue;
376                 case BPF_S_ANC_NLATTR: {
377                         struct nlattr *nla;
378
379                         if (skb_is_nonlinear(skb))
380                                 return 0;
381                         if (A > skb->len - sizeof(struct nlattr))
382                                 return 0;
383
384                         nla = nla_find((struct nlattr *)&skb->data[A],
385                                        skb->len - A, X);
386                         if (nla)
387                                 A = (void *)nla - (void *)skb->data;
388                         else
389                                 A = 0;
390                         continue;
391                 }
392                 case BPF_S_ANC_NLATTR_NEST: {
393                         struct nlattr *nla;
394
395                         if (skb_is_nonlinear(skb))
396                                 return 0;
397                         if (A > skb->len - sizeof(struct nlattr))
398                                 return 0;
399
400                         nla = (struct nlattr *)&skb->data[A];
401                         if (nla->nla_len > A - skb->len)
402                                 return 0;
403
404                         nla = nla_find_nested(nla, X);
405                         if (nla)
406                                 A = (void *)nla - (void *)skb->data;
407                         else
408                                 A = 0;
409                         continue;
410                 }
411                 default:
412                         WARN_ON(1);
413                         return 0;
414                 }
415         }
416
417         return 0;
418 }
419 EXPORT_SYMBOL(sk_run_filter);
420
421 /*
422  * Security :
423  * A BPF program is able to use 16 cells of memory to store intermediate
424  * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter())
425  * As we dont want to clear mem[] array for each packet going through
426  * sk_run_filter(), we check that filter loaded by user never try to read
427  * a cell if not previously written, and we check all branches to be sure
428  * a malicious user doesn't try to abuse us.
429  */
430 static int check_load_and_stores(struct sock_filter *filter, int flen)
431 {
432         u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */
433         int pc, ret = 0;
434
435         BUILD_BUG_ON(BPF_MEMWORDS > 16);
436         masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
437         if (!masks)
438                 return -ENOMEM;
439         memset(masks, 0xff, flen * sizeof(*masks));
440
441         for (pc = 0; pc < flen; pc++) {
442                 memvalid &= masks[pc];
443
444                 switch (filter[pc].code) {
445                 case BPF_S_ST:
446                 case BPF_S_STX:
447                         memvalid |= (1 << filter[pc].k);
448                         break;
449                 case BPF_S_LD_MEM:
450                 case BPF_S_LDX_MEM:
451                         if (!(memvalid & (1 << filter[pc].k))) {
452                                 ret = -EINVAL;
453                                 goto error;
454                         }
455                         break;
456                 case BPF_S_JMP_JA:
457                         /* a jump must set masks on target */
458                         masks[pc + 1 + filter[pc].k] &= memvalid;
459                         memvalid = ~0;
460                         break;
461                 case BPF_S_JMP_JEQ_K:
462                 case BPF_S_JMP_JEQ_X:
463                 case BPF_S_JMP_JGE_K:
464                 case BPF_S_JMP_JGE_X:
465                 case BPF_S_JMP_JGT_K:
466                 case BPF_S_JMP_JGT_X:
467                 case BPF_S_JMP_JSET_X:
468                 case BPF_S_JMP_JSET_K:
469                         /* a jump must set masks on targets */
470                         masks[pc + 1 + filter[pc].jt] &= memvalid;
471                         masks[pc + 1 + filter[pc].jf] &= memvalid;
472                         memvalid = ~0;
473                         break;
474                 }
475         }
476 error:
477         kfree(masks);
478         return ret;
479 }
480
481 /**
482  *      sk_chk_filter - verify socket filter code
483  *      @filter: filter to verify
484  *      @flen: length of filter
485  *
486  * Check the user's filter code. If we let some ugly
487  * filter code slip through kaboom! The filter must contain
488  * no references or jumps that are out of range, no illegal
489  * instructions, and must end with a RET instruction.
490  *
491  * All jumps are forward as they are not signed.
492  *
493  * Returns 0 if the rule set is legal or -EINVAL if not.
494  */
495 int sk_chk_filter(struct sock_filter *filter, int flen)
496 {
497         /*
498          * Valid instructions are initialized to non-0.
499          * Invalid instructions are initialized to 0.
500          */
501         static const u8 codes[] = {
502                 [BPF_ALU|BPF_ADD|BPF_K]  = BPF_S_ALU_ADD_K,
503                 [BPF_ALU|BPF_ADD|BPF_X]  = BPF_S_ALU_ADD_X,
504                 [BPF_ALU|BPF_SUB|BPF_K]  = BPF_S_ALU_SUB_K,
505                 [BPF_ALU|BPF_SUB|BPF_X]  = BPF_S_ALU_SUB_X,
506                 [BPF_ALU|BPF_MUL|BPF_K]  = BPF_S_ALU_MUL_K,
507                 [BPF_ALU|BPF_MUL|BPF_X]  = BPF_S_ALU_MUL_X,
508                 [BPF_ALU|BPF_DIV|BPF_X]  = BPF_S_ALU_DIV_X,
509                 [BPF_ALU|BPF_AND|BPF_K]  = BPF_S_ALU_AND_K,
510                 [BPF_ALU|BPF_AND|BPF_X]  = BPF_S_ALU_AND_X,
511                 [BPF_ALU|BPF_OR|BPF_K]   = BPF_S_ALU_OR_K,
512                 [BPF_ALU|BPF_OR|BPF_X]   = BPF_S_ALU_OR_X,
513                 [BPF_ALU|BPF_LSH|BPF_K]  = BPF_S_ALU_LSH_K,
514                 [BPF_ALU|BPF_LSH|BPF_X]  = BPF_S_ALU_LSH_X,
515                 [BPF_ALU|BPF_RSH|BPF_K]  = BPF_S_ALU_RSH_K,
516                 [BPF_ALU|BPF_RSH|BPF_X]  = BPF_S_ALU_RSH_X,
517                 [BPF_ALU|BPF_NEG]        = BPF_S_ALU_NEG,
518                 [BPF_LD|BPF_W|BPF_ABS]   = BPF_S_LD_W_ABS,
519                 [BPF_LD|BPF_H|BPF_ABS]   = BPF_S_LD_H_ABS,
520                 [BPF_LD|BPF_B|BPF_ABS]   = BPF_S_LD_B_ABS,
521                 [BPF_LD|BPF_W|BPF_LEN]   = BPF_S_LD_W_LEN,
522                 [BPF_LD|BPF_W|BPF_IND]   = BPF_S_LD_W_IND,
523                 [BPF_LD|BPF_H|BPF_IND]   = BPF_S_LD_H_IND,
524                 [BPF_LD|BPF_B|BPF_IND]   = BPF_S_LD_B_IND,
525                 [BPF_LD|BPF_IMM]         = BPF_S_LD_IMM,
526                 [BPF_LDX|BPF_W|BPF_LEN]  = BPF_S_LDX_W_LEN,
527                 [BPF_LDX|BPF_B|BPF_MSH]  = BPF_S_LDX_B_MSH,
528                 [BPF_LDX|BPF_IMM]        = BPF_S_LDX_IMM,
529                 [BPF_MISC|BPF_TAX]       = BPF_S_MISC_TAX,
530                 [BPF_MISC|BPF_TXA]       = BPF_S_MISC_TXA,
531                 [BPF_RET|BPF_K]          = BPF_S_RET_K,
532                 [BPF_RET|BPF_A]          = BPF_S_RET_A,
533                 [BPF_ALU|BPF_DIV|BPF_K]  = BPF_S_ALU_DIV_K,
534                 [BPF_LD|BPF_MEM]         = BPF_S_LD_MEM,
535                 [BPF_LDX|BPF_MEM]        = BPF_S_LDX_MEM,
536                 [BPF_ST]                 = BPF_S_ST,
537                 [BPF_STX]                = BPF_S_STX,
538                 [BPF_JMP|BPF_JA]         = BPF_S_JMP_JA,
539                 [BPF_JMP|BPF_JEQ|BPF_K]  = BPF_S_JMP_JEQ_K,
540                 [BPF_JMP|BPF_JEQ|BPF_X]  = BPF_S_JMP_JEQ_X,
541                 [BPF_JMP|BPF_JGE|BPF_K]  = BPF_S_JMP_JGE_K,
542                 [BPF_JMP|BPF_JGE|BPF_X]  = BPF_S_JMP_JGE_X,
543                 [BPF_JMP|BPF_JGT|BPF_K]  = BPF_S_JMP_JGT_K,
544                 [BPF_JMP|BPF_JGT|BPF_X]  = BPF_S_JMP_JGT_X,
545                 [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K,
546                 [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X,
547         };
548         int pc;
549
550         if (flen == 0 || flen > BPF_MAXINSNS)
551                 return -EINVAL;
552
553         /* check the filter code now */
554         for (pc = 0; pc < flen; pc++) {
555                 struct sock_filter *ftest = &filter[pc];
556                 u16 code = ftest->code;
557
558                 if (code >= ARRAY_SIZE(codes))
559                         return -EINVAL;
560                 code = codes[code];
561                 if (!code)
562                         return -EINVAL;
563                 /* Some instructions need special checks */
564                 switch (code) {
565                 case BPF_S_ALU_DIV_K:
566                         /* check for division by zero */
567                         if (ftest->k == 0)
568                                 return -EINVAL;
569                         ftest->k = reciprocal_value(ftest->k);
570                         break;
571                 case BPF_S_LD_MEM:
572                 case BPF_S_LDX_MEM:
573                 case BPF_S_ST:
574                 case BPF_S_STX:
575                         /* check for invalid memory addresses */
576                         if (ftest->k >= BPF_MEMWORDS)
577                                 return -EINVAL;
578                         break;
579                 case BPF_S_JMP_JA:
580                         /*
581                          * Note, the large ftest->k might cause loops.
582                          * Compare this with conditional jumps below,
583                          * where offsets are limited. --ANK (981016)
584                          */
585                         if (ftest->k >= (unsigned)(flen-pc-1))
586                                 return -EINVAL;
587                         break;
588                 case BPF_S_JMP_JEQ_K:
589                 case BPF_S_JMP_JEQ_X:
590                 case BPF_S_JMP_JGE_K:
591                 case BPF_S_JMP_JGE_X:
592                 case BPF_S_JMP_JGT_K:
593                 case BPF_S_JMP_JGT_X:
594                 case BPF_S_JMP_JSET_X:
595                 case BPF_S_JMP_JSET_K:
596                         /* for conditionals both must be safe */
597                         if (pc + ftest->jt + 1 >= flen ||
598                             pc + ftest->jf + 1 >= flen)
599                                 return -EINVAL;
600                         break;
601                 case BPF_S_LD_W_ABS:
602                 case BPF_S_LD_H_ABS:
603                 case BPF_S_LD_B_ABS:
604 #define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE:        \
605                                 code = BPF_S_ANC_##CODE;        \
606                                 break
607                         switch (ftest->k) {
608                         ANCILLARY(PROTOCOL);
609                         ANCILLARY(PKTTYPE);
610                         ANCILLARY(IFINDEX);
611                         ANCILLARY(NLATTR);
612                         ANCILLARY(NLATTR_NEST);
613                         ANCILLARY(MARK);
614                         ANCILLARY(QUEUE);
615                         ANCILLARY(HATYPE);
616                         ANCILLARY(RXHASH);
617                         ANCILLARY(CPU);
618                         }
619                 }
620                 ftest->code = code;
621         }
622
623         /* last instruction must be a RET code */
624         switch (filter[flen - 1].code) {
625         case BPF_S_RET_K:
626         case BPF_S_RET_A:
627                 return check_load_and_stores(filter, flen);
628         }
629         return -EINVAL;
630 }
631 EXPORT_SYMBOL(sk_chk_filter);
632
633 /**
634  *      sk_filter_release_rcu - Release a socket filter by rcu_head
635  *      @rcu: rcu_head that contains the sk_filter to free
636  */
637 void sk_filter_release_rcu(struct rcu_head *rcu)
638 {
639         struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
640
641         kfree(fp);
642 }
643 EXPORT_SYMBOL(sk_filter_release_rcu);
644
645 /**
646  *      sk_attach_filter - attach a socket filter
647  *      @fprog: the filter program
648  *      @sk: the socket to use
649  *
650  * Attach the user's filter code. We first run some sanity checks on
651  * it to make sure it does not explode on us later. If an error
652  * occurs or there is insufficient memory for the filter a negative
653  * errno code is returned. On success the return is zero.
654  */
655 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
656 {
657         struct sk_filter *fp, *old_fp;
658         unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
659         int err;
660
661         /* Make sure new filter is there and in the right amounts. */
662         if (fprog->filter == NULL)
663                 return -EINVAL;
664
665         fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL);
666         if (!fp)
667                 return -ENOMEM;
668         if (copy_from_user(fp->insns, fprog->filter, fsize)) {
669                 sock_kfree_s(sk, fp, fsize+sizeof(*fp));
670                 return -EFAULT;
671         }
672
673         atomic_set(&fp->refcnt, 1);
674         fp->len = fprog->len;
675
676         err = sk_chk_filter(fp->insns, fp->len);
677         if (err) {
678                 sk_filter_uncharge(sk, fp);
679                 return err;
680         }
681
682         old_fp = rcu_dereference_protected(sk->sk_filter,
683                                            sock_owned_by_user(sk));
684         rcu_assign_pointer(sk->sk_filter, fp);
685
686         if (old_fp)
687                 sk_filter_uncharge(sk, old_fp);
688         return 0;
689 }
690 EXPORT_SYMBOL_GPL(sk_attach_filter);
691
692 int sk_detach_filter(struct sock *sk)
693 {
694         int ret = -ENOENT;
695         struct sk_filter *filter;
696
697         filter = rcu_dereference_protected(sk->sk_filter,
698                                            sock_owned_by_user(sk));
699         if (filter) {
700                 rcu_assign_pointer(sk->sk_filter, NULL);
701                 sk_filter_uncharge(sk, filter);
702                 ret = 0;
703         }
704         return ret;
705 }
706 EXPORT_SYMBOL_GPL(sk_detach_filter);