Merge branch 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm
[pandora-kernel.git] / net / xfrm / xfrm_algo.c
1 /* 
2  * xfrm algorithm interface
3  *
4  * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License as published by the Free
8  * Software Foundation; either version 2 of the License, or (at your option) 
9  * any later version.
10  */
11
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/pfkeyv2.h>
15 #include <linux/crypto.h>
16 #include <net/xfrm.h>
17 #if defined(CONFIG_INET_AH) || defined(CONFIG_INET_AH_MODULE) || defined(CONFIG_INET6_AH) || defined(CONFIG_INET6_AH_MODULE)
18 #include <net/ah.h>
19 #endif
20 #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
21 #include <net/esp.h>
22 #endif
23 #include <asm/scatterlist.h>
24
25 /*
26  * Algorithms supported by IPsec.  These entries contain properties which
27  * are used in key negotiation and xfrm processing, and are used to verify
28  * that instantiated crypto transforms have correct parameters for IPsec
29  * purposes.
30  */
31 static struct xfrm_algo_desc aalg_list[] = {
32 {
33         .name = "hmac(digest_null)",
34         .compat = "digest_null",
35         
36         .uinfo = {
37                 .auth = {
38                         .icv_truncbits = 0,
39                         .icv_fullbits = 0,
40                 }
41         },
42         
43         .desc = {
44                 .sadb_alg_id = SADB_X_AALG_NULL,
45                 .sadb_alg_ivlen = 0,
46                 .sadb_alg_minbits = 0,
47                 .sadb_alg_maxbits = 0
48         }
49 },
50 {
51         .name = "hmac(md5)",
52         .compat = "md5",
53
54         .uinfo = {
55                 .auth = {
56                         .icv_truncbits = 96,
57                         .icv_fullbits = 128,
58                 }
59         },
60         
61         .desc = {
62                 .sadb_alg_id = SADB_AALG_MD5HMAC,
63                 .sadb_alg_ivlen = 0,
64                 .sadb_alg_minbits = 128,
65                 .sadb_alg_maxbits = 128
66         }
67 },
68 {
69         .name = "hmac(sha1)",
70         .compat = "sha1",
71
72         .uinfo = {
73                 .auth = {
74                         .icv_truncbits = 96,
75                         .icv_fullbits = 160,
76                 }
77         },
78
79         .desc = {
80                 .sadb_alg_id = SADB_AALG_SHA1HMAC,
81                 .sadb_alg_ivlen = 0,
82                 .sadb_alg_minbits = 160,
83                 .sadb_alg_maxbits = 160
84         }
85 },
86 {
87         .name = "hmac(sha256)",
88         .compat = "sha256",
89
90         .uinfo = {
91                 .auth = {
92                         .icv_truncbits = 96,
93                         .icv_fullbits = 256,
94                 }
95         },
96
97         .desc = {
98                 .sadb_alg_id = SADB_X_AALG_SHA2_256HMAC,
99                 .sadb_alg_ivlen = 0,
100                 .sadb_alg_minbits = 256,
101                 .sadb_alg_maxbits = 256
102         }
103 },
104 {
105         .name = "hmac(ripemd160)",
106         .compat = "ripemd160",
107
108         .uinfo = {
109                 .auth = {
110                         .icv_truncbits = 96,
111                         .icv_fullbits = 160,
112                 }
113         },
114
115         .desc = {
116                 .sadb_alg_id = SADB_X_AALG_RIPEMD160HMAC,
117                 .sadb_alg_ivlen = 0,
118                 .sadb_alg_minbits = 160,
119                 .sadb_alg_maxbits = 160
120         }
121 },
122 {
123         .name = "xcbc(aes)",
124
125         .uinfo = {
126                 .auth = {
127                         .icv_truncbits = 96,
128                         .icv_fullbits = 128,
129                 }
130         },
131
132         .desc = {
133                 .sadb_alg_id = SADB_X_AALG_AES_XCBC_MAC,
134                 .sadb_alg_ivlen = 0,
135                 .sadb_alg_minbits = 128,
136                 .sadb_alg_maxbits = 128
137         }
138 },
139 };
140
141 static struct xfrm_algo_desc ealg_list[] = {
142 {
143         .name = "ecb(cipher_null)",
144         .compat = "cipher_null",
145         
146         .uinfo = {
147                 .encr = {
148                         .blockbits = 8,
149                         .defkeybits = 0,
150                 }
151         },
152         
153         .desc = {
154                 .sadb_alg_id =  SADB_EALG_NULL,
155                 .sadb_alg_ivlen = 0,
156                 .sadb_alg_minbits = 0,
157                 .sadb_alg_maxbits = 0
158         }
159 },
160 {
161         .name = "cbc(des)",
162         .compat = "des",
163
164         .uinfo = {
165                 .encr = {
166                         .blockbits = 64,
167                         .defkeybits = 64,
168                 }
169         },
170
171         .desc = {
172                 .sadb_alg_id = SADB_EALG_DESCBC,
173                 .sadb_alg_ivlen = 8,
174                 .sadb_alg_minbits = 64,
175                 .sadb_alg_maxbits = 64
176         }
177 },
178 {
179         .name = "cbc(des3_ede)",
180         .compat = "des3_ede",
181
182         .uinfo = {
183                 .encr = {
184                         .blockbits = 64,
185                         .defkeybits = 192,
186                 }
187         },
188
189         .desc = {
190                 .sadb_alg_id = SADB_EALG_3DESCBC,
191                 .sadb_alg_ivlen = 8,
192                 .sadb_alg_minbits = 192,
193                 .sadb_alg_maxbits = 192
194         }
195 },
196 {
197         .name = "cbc(cast128)",
198         .compat = "cast128",
199
200         .uinfo = {
201                 .encr = {
202                         .blockbits = 64,
203                         .defkeybits = 128,
204                 }
205         },
206
207         .desc = {
208                 .sadb_alg_id = SADB_X_EALG_CASTCBC,
209                 .sadb_alg_ivlen = 8,
210                 .sadb_alg_minbits = 40,
211                 .sadb_alg_maxbits = 128
212         }
213 },
214 {
215         .name = "cbc(blowfish)",
216         .compat = "blowfish",
217
218         .uinfo = {
219                 .encr = {
220                         .blockbits = 64,
221                         .defkeybits = 128,
222                 }
223         },
224
225         .desc = {
226                 .sadb_alg_id = SADB_X_EALG_BLOWFISHCBC,
227                 .sadb_alg_ivlen = 8,
228                 .sadb_alg_minbits = 40,
229                 .sadb_alg_maxbits = 448
230         }
231 },
232 {
233         .name = "cbc(aes)",
234         .compat = "aes",
235
236         .uinfo = {
237                 .encr = {
238                         .blockbits = 128,
239                         .defkeybits = 128,
240                 }
241         },
242
243         .desc = {
244                 .sadb_alg_id = SADB_X_EALG_AESCBC,
245                 .sadb_alg_ivlen = 8,
246                 .sadb_alg_minbits = 128,
247                 .sadb_alg_maxbits = 256
248         }
249 },
250 {
251         .name = "cbc(serpent)",
252         .compat = "serpent",
253
254         .uinfo = {
255                 .encr = {
256                         .blockbits = 128,
257                         .defkeybits = 128,
258                 }
259         },
260
261         .desc = {
262                 .sadb_alg_id = SADB_X_EALG_SERPENTCBC,
263                 .sadb_alg_ivlen = 8,
264                 .sadb_alg_minbits = 128,
265                 .sadb_alg_maxbits = 256,
266         }
267 },
268 {
269         .name = "cbc(twofish)",
270         .compat = "twofish",
271                  
272         .uinfo = {
273                 .encr = {
274                         .blockbits = 128,
275                         .defkeybits = 128,
276                 }
277         },
278
279         .desc = {
280                 .sadb_alg_id = SADB_X_EALG_TWOFISHCBC,
281                 .sadb_alg_ivlen = 8,
282                 .sadb_alg_minbits = 128,
283                 .sadb_alg_maxbits = 256
284         }
285 },
286 };
287
288 static struct xfrm_algo_desc calg_list[] = {
289 {
290         .name = "deflate",
291         .uinfo = {
292                 .comp = {
293                         .threshold = 90,
294                 }
295         },
296         .desc = { .sadb_alg_id = SADB_X_CALG_DEFLATE }
297 },
298 {
299         .name = "lzs",
300         .uinfo = {
301                 .comp = {
302                         .threshold = 90,
303                 }
304         },
305         .desc = { .sadb_alg_id = SADB_X_CALG_LZS }
306 },
307 {
308         .name = "lzjh",
309         .uinfo = {
310                 .comp = {
311                         .threshold = 50,
312                 }
313         },
314         .desc = { .sadb_alg_id = SADB_X_CALG_LZJH }
315 },
316 };
317
318 static inline int aalg_entries(void)
319 {
320         return ARRAY_SIZE(aalg_list);
321 }
322
323 static inline int ealg_entries(void)
324 {
325         return ARRAY_SIZE(ealg_list);
326 }
327
328 static inline int calg_entries(void)
329 {
330         return ARRAY_SIZE(calg_list);
331 }
332
333 /* Todo: generic iterators */
334 struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id)
335 {
336         int i;
337
338         for (i = 0; i < aalg_entries(); i++) {
339                 if (aalg_list[i].desc.sadb_alg_id == alg_id) {
340                         if (aalg_list[i].available)
341                                 return &aalg_list[i];
342                         else
343                                 break;
344                 }
345         }
346         return NULL;
347 }
348 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byid);
349
350 struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id)
351 {
352         int i;
353
354         for (i = 0; i < ealg_entries(); i++) {
355                 if (ealg_list[i].desc.sadb_alg_id == alg_id) {
356                         if (ealg_list[i].available)
357                                 return &ealg_list[i];
358                         else
359                                 break;
360                 }
361         }
362         return NULL;
363 }
364 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byid);
365
366 struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id)
367 {
368         int i;
369
370         for (i = 0; i < calg_entries(); i++) {
371                 if (calg_list[i].desc.sadb_alg_id == alg_id) {
372                         if (calg_list[i].available)
373                                 return &calg_list[i];
374                         else
375                                 break;
376                 }
377         }
378         return NULL;
379 }
380 EXPORT_SYMBOL_GPL(xfrm_calg_get_byid);
381
382 static struct xfrm_algo_desc *xfrm_get_byname(struct xfrm_algo_desc *list,
383                                               int entries, u32 type, u32 mask,
384                                               char *name, int probe)
385 {
386         int i, status;
387
388         if (!name)
389                 return NULL;
390
391         for (i = 0; i < entries; i++) {
392                 if (strcmp(name, list[i].name) &&
393                     (!list[i].compat || strcmp(name, list[i].compat)))
394                         continue;
395
396                 if (list[i].available)
397                         return &list[i];
398
399                 if (!probe)
400                         break;
401
402                 status = crypto_has_alg(list[i].name, type,
403                                         mask | CRYPTO_ALG_ASYNC);
404                 if (!status)
405                         break;
406
407                 list[i].available = status;
408                 return &list[i];
409         }
410         return NULL;
411 }
412
413 struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe)
414 {
415         return xfrm_get_byname(aalg_list, aalg_entries(),
416                                CRYPTO_ALG_TYPE_HASH, CRYPTO_ALG_TYPE_HASH_MASK,
417                                name, probe);
418 }
419 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byname);
420
421 struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe)
422 {
423         return xfrm_get_byname(ealg_list, ealg_entries(),
424                                CRYPTO_ALG_TYPE_BLKCIPHER, CRYPTO_ALG_TYPE_MASK,
425                                name, probe);
426 }
427 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byname);
428
429 struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe)
430 {
431         return xfrm_get_byname(calg_list, calg_entries(),
432                                CRYPTO_ALG_TYPE_COMPRESS, CRYPTO_ALG_TYPE_MASK,
433                                name, probe);
434 }
435 EXPORT_SYMBOL_GPL(xfrm_calg_get_byname);
436
437 struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx)
438 {
439         if (idx >= aalg_entries())
440                 return NULL;
441
442         return &aalg_list[idx];
443 }
444 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byidx);
445
446 struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx)
447 {
448         if (idx >= ealg_entries())
449                 return NULL;
450
451         return &ealg_list[idx];
452 }
453 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byidx);
454
455 /*
456  * Probe for the availability of crypto algorithms, and set the available
457  * flag for any algorithms found on the system.  This is typically called by
458  * pfkey during userspace SA add, update or register.
459  */
460 void xfrm_probe_algs(void)
461 {
462 #ifdef CONFIG_CRYPTO
463         int i, status;
464         
465         BUG_ON(in_softirq());
466
467         for (i = 0; i < aalg_entries(); i++) {
468                 status = crypto_has_hash(aalg_list[i].name, 0,
469                                          CRYPTO_ALG_ASYNC);
470                 if (aalg_list[i].available != status)
471                         aalg_list[i].available = status;
472         }
473         
474         for (i = 0; i < ealg_entries(); i++) {
475                 status = crypto_has_blkcipher(ealg_list[i].name, 0,
476                                               CRYPTO_ALG_ASYNC);
477                 if (ealg_list[i].available != status)
478                         ealg_list[i].available = status;
479         }
480         
481         for (i = 0; i < calg_entries(); i++) {
482                 status = crypto_has_comp(calg_list[i].name, 0,
483                                          CRYPTO_ALG_ASYNC);
484                 if (calg_list[i].available != status)
485                         calg_list[i].available = status;
486         }
487 #endif
488 }
489 EXPORT_SYMBOL_GPL(xfrm_probe_algs);
490
491 int xfrm_count_auth_supported(void)
492 {
493         int i, n;
494
495         for (i = 0, n = 0; i < aalg_entries(); i++)
496                 if (aalg_list[i].available)
497                         n++;
498         return n;
499 }
500 EXPORT_SYMBOL_GPL(xfrm_count_auth_supported);
501
502 int xfrm_count_enc_supported(void)
503 {
504         int i, n;
505
506         for (i = 0, n = 0; i < ealg_entries(); i++)
507                 if (ealg_list[i].available)
508                         n++;
509         return n;
510 }
511 EXPORT_SYMBOL_GPL(xfrm_count_enc_supported);
512
513 /* Move to common area: it is shared with AH. */
514
515 int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
516                  int offset, int len, icv_update_fn_t icv_update)
517 {
518         int start = skb_headlen(skb);
519         int i, copy = start - offset;
520         int err;
521         struct scatterlist sg;
522
523         /* Checksum header. */
524         if (copy > 0) {
525                 if (copy > len)
526                         copy = len;
527                 
528                 sg.page = virt_to_page(skb->data + offset);
529                 sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
530                 sg.length = copy;
531                 
532                 err = icv_update(desc, &sg, copy);
533                 if (unlikely(err))
534                         return err;
535                 
536                 if ((len -= copy) == 0)
537                         return 0;
538                 offset += copy;
539         }
540
541         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
542                 int end;
543
544                 BUG_TRAP(start <= offset + len);
545
546                 end = start + skb_shinfo(skb)->frags[i].size;
547                 if ((copy = end - offset) > 0) {
548                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
549
550                         if (copy > len)
551                                 copy = len;
552                         
553                         sg.page = frag->page;
554                         sg.offset = frag->page_offset + offset-start;
555                         sg.length = copy;
556                         
557                         err = icv_update(desc, &sg, copy);
558                         if (unlikely(err))
559                                 return err;
560
561                         if (!(len -= copy))
562                                 return 0;
563                         offset += copy;
564                 }
565                 start = end;
566         }
567
568         if (skb_shinfo(skb)->frag_list) {
569                 struct sk_buff *list = skb_shinfo(skb)->frag_list;
570
571                 for (; list; list = list->next) {
572                         int end;
573
574                         BUG_TRAP(start <= offset + len);
575
576                         end = start + list->len;
577                         if ((copy = end - offset) > 0) {
578                                 if (copy > len)
579                                         copy = len;
580                                 err = skb_icv_walk(list, desc, offset-start,
581                                                    copy, icv_update);
582                                 if (unlikely(err))
583                                         return err;
584                                 if ((len -= copy) == 0)
585                                         return 0;
586                                 offset += copy;
587                         }
588                         start = end;
589                 }
590         }
591         BUG_ON(len);
592         return 0;
593 }
594 EXPORT_SYMBOL_GPL(skb_icv_walk);
595
596 #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
597
598 /* Looking generic it is not used in another places. */
599
600 int
601 skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
602 {
603         int start = skb_headlen(skb);
604         int i, copy = start - offset;
605         int elt = 0;
606
607         if (copy > 0) {
608                 if (copy > len)
609                         copy = len;
610                 sg[elt].page = virt_to_page(skb->data + offset);
611                 sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
612                 sg[elt].length = copy;
613                 elt++;
614                 if ((len -= copy) == 0)
615                         return elt;
616                 offset += copy;
617         }
618
619         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
620                 int end;
621
622                 BUG_TRAP(start <= offset + len);
623
624                 end = start + skb_shinfo(skb)->frags[i].size;
625                 if ((copy = end - offset) > 0) {
626                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
627
628                         if (copy > len)
629                                 copy = len;
630                         sg[elt].page = frag->page;
631                         sg[elt].offset = frag->page_offset+offset-start;
632                         sg[elt].length = copy;
633                         elt++;
634                         if (!(len -= copy))
635                                 return elt;
636                         offset += copy;
637                 }
638                 start = end;
639         }
640
641         if (skb_shinfo(skb)->frag_list) {
642                 struct sk_buff *list = skb_shinfo(skb)->frag_list;
643
644                 for (; list; list = list->next) {
645                         int end;
646
647                         BUG_TRAP(start <= offset + len);
648
649                         end = start + list->len;
650                         if ((copy = end - offset) > 0) {
651                                 if (copy > len)
652                                         copy = len;
653                                 elt += skb_to_sgvec(list, sg+elt, offset - start, copy);
654                                 if ((len -= copy) == 0)
655                                         return elt;
656                                 offset += copy;
657                         }
658                         start = end;
659                 }
660         }
661         BUG_ON(len);
662         return elt;
663 }
664 EXPORT_SYMBOL_GPL(skb_to_sgvec);
665
666 /* Check that skb data bits are writable. If they are not, copy data
667  * to newly created private area. If "tailbits" is given, make sure that
668  * tailbits bytes beyond current end of skb are writable.
669  *
670  * Returns amount of elements of scatterlist to load for subsequent
671  * transformations and pointer to writable trailer skb.
672  */
673
674 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
675 {
676         int copyflag;
677         int elt;
678         struct sk_buff *skb1, **skb_p;
679
680         /* If skb is cloned or its head is paged, reallocate
681          * head pulling out all the pages (pages are considered not writable
682          * at the moment even if they are anonymous).
683          */
684         if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
685             __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
686                 return -ENOMEM;
687
688         /* Easy case. Most of packets will go this way. */
689         if (!skb_shinfo(skb)->frag_list) {
690                 /* A little of trouble, not enough of space for trailer.
691                  * This should not happen, when stack is tuned to generate
692                  * good frames. OK, on miss we reallocate and reserve even more
693                  * space, 128 bytes is fair. */
694
695                 if (skb_tailroom(skb) < tailbits &&
696                     pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
697                         return -ENOMEM;
698
699                 /* Voila! */
700                 *trailer = skb;
701                 return 1;
702         }
703
704         /* Misery. We are in troubles, going to mincer fragments... */
705
706         elt = 1;
707         skb_p = &skb_shinfo(skb)->frag_list;
708         copyflag = 0;
709
710         while ((skb1 = *skb_p) != NULL) {
711                 int ntail = 0;
712
713                 /* The fragment is partially pulled by someone,
714                  * this can happen on input. Copy it and everything
715                  * after it. */
716
717                 if (skb_shared(skb1))
718                         copyflag = 1;
719
720                 /* If the skb is the last, worry about trailer. */
721
722                 if (skb1->next == NULL && tailbits) {
723                         if (skb_shinfo(skb1)->nr_frags ||
724                             skb_shinfo(skb1)->frag_list ||
725                             skb_tailroom(skb1) < tailbits)
726                                 ntail = tailbits + 128;
727                 }
728
729                 if (copyflag ||
730                     skb_cloned(skb1) ||
731                     ntail ||
732                     skb_shinfo(skb1)->nr_frags ||
733                     skb_shinfo(skb1)->frag_list) {
734                         struct sk_buff *skb2;
735
736                         /* Fuck, we are miserable poor guys... */
737                         if (ntail == 0)
738                                 skb2 = skb_copy(skb1, GFP_ATOMIC);
739                         else
740                                 skb2 = skb_copy_expand(skb1,
741                                                        skb_headroom(skb1),
742                                                        ntail,
743                                                        GFP_ATOMIC);
744                         if (unlikely(skb2 == NULL))
745                                 return -ENOMEM;
746
747                         if (skb1->sk)
748                                 skb_set_owner_w(skb2, skb1->sk);
749
750                         /* Looking around. Are we still alive?
751                          * OK, link new skb, drop old one */
752
753                         skb2->next = skb1->next;
754                         *skb_p = skb2;
755                         kfree_skb(skb1);
756                         skb1 = skb2;
757                 }
758                 elt++;
759                 *trailer = skb1;
760                 skb_p = &skb1->next;
761         }
762
763         return elt;
764 }
765 EXPORT_SYMBOL_GPL(skb_cow_data);
766
767 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
768 {
769         if (tail != skb) {
770                 skb->data_len += len;
771                 skb->len += len;
772         }
773         return skb_put(tail, len);
774 }
775 EXPORT_SYMBOL_GPL(pskb_put);
776 #endif