pandora: defconfig: update
[pandora-kernel.git] / crypto / blkcipher.c
1 /*
2  * Block chaining cipher operations.
3  * 
4  * Generic encrypt/decrypt wrapper for ciphers, handles operations across
5  * multiple page boundaries by using temporary blocks.  In user context,
6  * the kernel is given a chance to schedule us once per page.
7  *
8  * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
9  *
10  * This program is free software; you can redistribute it and/or modify it
11  * under the terms of the GNU General Public License as published by the Free
12  * Software Foundation; either version 2 of the License, or (at your option) 
13  * any later version.
14  *
15  */
16
17 #include <crypto/internal/skcipher.h>
18 #include <crypto/scatterwalk.h>
19 #include <linux/errno.h>
20 #include <linux/hardirq.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/scatterlist.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
26 #include <linux/string.h>
27 #include <linux/cryptouser.h>
28 #include <net/netlink.h>
29
30 #include "internal.h"
31
32 enum {
33         BLKCIPHER_WALK_PHYS = 1 << 0,
34         BLKCIPHER_WALK_SLOW = 1 << 1,
35         BLKCIPHER_WALK_COPY = 1 << 2,
36         BLKCIPHER_WALK_DIFF = 1 << 3,
37 };
38
39 static int blkcipher_walk_next(struct blkcipher_desc *desc,
40                                struct blkcipher_walk *walk);
41 static int blkcipher_walk_first(struct blkcipher_desc *desc,
42                                 struct blkcipher_walk *walk);
43
44 static inline void blkcipher_map_src(struct blkcipher_walk *walk)
45 {
46         walk->src.virt.addr = scatterwalk_map(&walk->in, 0);
47 }
48
49 static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
50 {
51         walk->dst.virt.addr = scatterwalk_map(&walk->out, 1);
52 }
53
54 static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
55 {
56         scatterwalk_unmap(walk->src.virt.addr, 0);
57 }
58
59 static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
60 {
61         scatterwalk_unmap(walk->dst.virt.addr, 1);
62 }
63
64 /* Get a spot of the specified length that does not straddle a page.
65  * The caller needs to ensure that there is enough space for this operation.
66  */
67 static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
68 {
69         u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
70         return max(start, end_page);
71 }
72
73 static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher *tfm,
74                                                struct blkcipher_walk *walk,
75                                                unsigned int bsize)
76 {
77         u8 *addr;
78         unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
79
80         addr = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
81         addr = blkcipher_get_spot(addr, bsize);
82         scatterwalk_copychunks(addr, &walk->out, bsize, 1);
83         return bsize;
84 }
85
86 static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
87                                                unsigned int n)
88 {
89         if (walk->flags & BLKCIPHER_WALK_COPY) {
90                 blkcipher_map_dst(walk);
91                 memcpy(walk->dst.virt.addr, walk->page, n);
92                 blkcipher_unmap_dst(walk);
93         } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
94                 if (walk->flags & BLKCIPHER_WALK_DIFF)
95                         blkcipher_unmap_dst(walk);
96                 blkcipher_unmap_src(walk);
97         }
98
99         scatterwalk_advance(&walk->in, n);
100         scatterwalk_advance(&walk->out, n);
101
102         return n;
103 }
104
105 int blkcipher_walk_done(struct blkcipher_desc *desc,
106                         struct blkcipher_walk *walk, int err)
107 {
108         struct crypto_blkcipher *tfm = desc->tfm;
109         unsigned int nbytes = 0;
110
111         if (likely(err >= 0)) {
112                 unsigned int n = walk->nbytes - err;
113
114                 if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
115                         n = blkcipher_done_fast(walk, n);
116                 else if (WARN_ON(err)) {
117                         err = -EINVAL;
118                         goto err;
119                 } else
120                         n = blkcipher_done_slow(tfm, walk, n);
121
122                 nbytes = walk->total - n;
123                 err = 0;
124         }
125
126         scatterwalk_done(&walk->in, 0, nbytes);
127         scatterwalk_done(&walk->out, 1, nbytes);
128
129 err:
130         walk->total = nbytes;
131         walk->nbytes = nbytes;
132
133         if (nbytes) {
134                 crypto_yield(desc->flags);
135                 return blkcipher_walk_next(desc, walk);
136         }
137
138         if (walk->iv != desc->info)
139                 memcpy(desc->info, walk->iv, crypto_blkcipher_ivsize(tfm));
140         if (walk->buffer != walk->page)
141                 kfree(walk->buffer);
142         if (walk->page)
143                 free_page((unsigned long)walk->page);
144
145         return err;
146 }
147 EXPORT_SYMBOL_GPL(blkcipher_walk_done);
148
149 static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
150                                       struct blkcipher_walk *walk,
151                                       unsigned int bsize,
152                                       unsigned int alignmask)
153 {
154         unsigned int n;
155         unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
156
157         if (walk->buffer)
158                 goto ok;
159
160         walk->buffer = walk->page;
161         if (walk->buffer)
162                 goto ok;
163
164         n = aligned_bsize * 3 - (alignmask + 1) +
165             (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
166         walk->buffer = kmalloc(n, GFP_ATOMIC);
167         if (!walk->buffer)
168                 return blkcipher_walk_done(desc, walk, -ENOMEM);
169
170 ok:
171         walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
172                                           alignmask + 1);
173         walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
174         walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
175                                                  aligned_bsize, bsize);
176
177         scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
178
179         walk->nbytes = bsize;
180         walk->flags |= BLKCIPHER_WALK_SLOW;
181
182         return 0;
183 }
184
185 static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
186 {
187         u8 *tmp = walk->page;
188
189         blkcipher_map_src(walk);
190         memcpy(tmp, walk->src.virt.addr, walk->nbytes);
191         blkcipher_unmap_src(walk);
192
193         walk->src.virt.addr = tmp;
194         walk->dst.virt.addr = tmp;
195
196         return 0;
197 }
198
199 static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
200                                       struct blkcipher_walk *walk)
201 {
202         unsigned long diff;
203
204         walk->src.phys.page = scatterwalk_page(&walk->in);
205         walk->src.phys.offset = offset_in_page(walk->in.offset);
206         walk->dst.phys.page = scatterwalk_page(&walk->out);
207         walk->dst.phys.offset = offset_in_page(walk->out.offset);
208
209         if (walk->flags & BLKCIPHER_WALK_PHYS)
210                 return 0;
211
212         diff = walk->src.phys.offset - walk->dst.phys.offset;
213         diff |= walk->src.virt.page - walk->dst.virt.page;
214
215         blkcipher_map_src(walk);
216         walk->dst.virt.addr = walk->src.virt.addr;
217
218         if (diff) {
219                 walk->flags |= BLKCIPHER_WALK_DIFF;
220                 blkcipher_map_dst(walk);
221         }
222
223         return 0;
224 }
225
226 static int blkcipher_walk_next(struct blkcipher_desc *desc,
227                                struct blkcipher_walk *walk)
228 {
229         struct crypto_blkcipher *tfm = desc->tfm;
230         unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
231         unsigned int bsize;
232         unsigned int n;
233         int err;
234
235         n = walk->total;
236         if (unlikely(n < crypto_blkcipher_blocksize(tfm))) {
237                 desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
238                 return blkcipher_walk_done(desc, walk, -EINVAL);
239         }
240
241         bsize = min(walk->blocksize, n);
242
243         walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
244                          BLKCIPHER_WALK_DIFF);
245         if (!scatterwalk_aligned(&walk->in, alignmask) ||
246             !scatterwalk_aligned(&walk->out, alignmask)) {
247                 walk->flags |= BLKCIPHER_WALK_COPY;
248                 if (!walk->page) {
249                         walk->page = (void *)__get_free_page(GFP_ATOMIC);
250                         if (!walk->page)
251                                 n = 0;
252                 }
253         }
254
255         n = scatterwalk_clamp(&walk->in, n);
256         n = scatterwalk_clamp(&walk->out, n);
257
258         if (unlikely(n < bsize)) {
259                 err = blkcipher_next_slow(desc, walk, bsize, alignmask);
260                 goto set_phys_lowmem;
261         }
262
263         walk->nbytes = n;
264         if (walk->flags & BLKCIPHER_WALK_COPY) {
265                 err = blkcipher_next_copy(walk);
266                 goto set_phys_lowmem;
267         }
268
269         return blkcipher_next_fast(desc, walk);
270
271 set_phys_lowmem:
272         if (walk->flags & BLKCIPHER_WALK_PHYS) {
273                 walk->src.phys.page = virt_to_page(walk->src.virt.addr);
274                 walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
275                 walk->src.phys.offset &= PAGE_SIZE - 1;
276                 walk->dst.phys.offset &= PAGE_SIZE - 1;
277         }
278         return err;
279 }
280
281 static inline int blkcipher_copy_iv(struct blkcipher_walk *walk,
282                                     struct crypto_blkcipher *tfm,
283                                     unsigned int alignmask)
284 {
285         unsigned bs = walk->blocksize;
286         unsigned int ivsize = crypto_blkcipher_ivsize(tfm);
287         unsigned aligned_bs = ALIGN(bs, alignmask + 1);
288         unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
289                             (alignmask + 1);
290         u8 *iv;
291
292         size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
293         walk->buffer = kmalloc(size, GFP_ATOMIC);
294         if (!walk->buffer)
295                 return -ENOMEM;
296
297         iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
298         iv = blkcipher_get_spot(iv, bs) + aligned_bs;
299         iv = blkcipher_get_spot(iv, bs) + aligned_bs;
300         iv = blkcipher_get_spot(iv, ivsize);
301
302         walk->iv = memcpy(iv, walk->iv, ivsize);
303         return 0;
304 }
305
306 int blkcipher_walk_virt(struct blkcipher_desc *desc,
307                         struct blkcipher_walk *walk)
308 {
309         walk->flags &= ~BLKCIPHER_WALK_PHYS;
310         walk->blocksize = crypto_blkcipher_blocksize(desc->tfm);
311         return blkcipher_walk_first(desc, walk);
312 }
313 EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
314
315 int blkcipher_walk_phys(struct blkcipher_desc *desc,
316                         struct blkcipher_walk *walk)
317 {
318         walk->flags |= BLKCIPHER_WALK_PHYS;
319         walk->blocksize = crypto_blkcipher_blocksize(desc->tfm);
320         return blkcipher_walk_first(desc, walk);
321 }
322 EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
323
324 static int blkcipher_walk_first(struct blkcipher_desc *desc,
325                                 struct blkcipher_walk *walk)
326 {
327         struct crypto_blkcipher *tfm = desc->tfm;
328         unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
329
330         if (WARN_ON_ONCE(in_irq()))
331                 return -EDEADLK;
332
333         walk->iv = desc->info;
334         walk->nbytes = walk->total;
335         if (unlikely(!walk->total))
336                 return 0;
337
338         walk->buffer = NULL;
339         if (unlikely(((unsigned long)walk->iv & alignmask))) {
340                 int err = blkcipher_copy_iv(walk, tfm, alignmask);
341                 if (err)
342                         return err;
343         }
344
345         scatterwalk_start(&walk->in, walk->in.sg);
346         scatterwalk_start(&walk->out, walk->out.sg);
347         walk->page = NULL;
348
349         return blkcipher_walk_next(desc, walk);
350 }
351
352 int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
353                               struct blkcipher_walk *walk,
354                               unsigned int blocksize)
355 {
356         walk->flags &= ~BLKCIPHER_WALK_PHYS;
357         walk->blocksize = blocksize;
358         return blkcipher_walk_first(desc, walk);
359 }
360 EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
361
362 static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
363                             unsigned int keylen)
364 {
365         struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
366         unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
367         int ret;
368         u8 *buffer, *alignbuffer;
369         unsigned long absize;
370
371         absize = keylen + alignmask;
372         buffer = kmalloc(absize, GFP_ATOMIC);
373         if (!buffer)
374                 return -ENOMEM;
375
376         alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
377         memcpy(alignbuffer, key, keylen);
378         ret = cipher->setkey(tfm, alignbuffer, keylen);
379         memset(alignbuffer, 0, keylen);
380         kfree(buffer);
381         return ret;
382 }
383
384 static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
385 {
386         struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
387         unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
388
389         if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
390                 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
391                 return -EINVAL;
392         }
393
394         if ((unsigned long)key & alignmask)
395                 return setkey_unaligned(tfm, key, keylen);
396
397         return cipher->setkey(tfm, key, keylen);
398 }
399
400 static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
401                         unsigned int keylen)
402 {
403         return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
404 }
405
406 static int async_encrypt(struct ablkcipher_request *req)
407 {
408         struct crypto_tfm *tfm = req->base.tfm;
409         struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
410         struct blkcipher_desc desc = {
411                 .tfm = __crypto_blkcipher_cast(tfm),
412                 .info = req->info,
413                 .flags = req->base.flags,
414         };
415
416
417         return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
418 }
419
420 static int async_decrypt(struct ablkcipher_request *req)
421 {
422         struct crypto_tfm *tfm = req->base.tfm;
423         struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
424         struct blkcipher_desc desc = {
425                 .tfm = __crypto_blkcipher_cast(tfm),
426                 .info = req->info,
427                 .flags = req->base.flags,
428         };
429
430         return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
431 }
432
433 static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
434                                              u32 mask)
435 {
436         struct blkcipher_alg *cipher = &alg->cra_blkcipher;
437         unsigned int len = alg->cra_ctxsize;
438
439         if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK &&
440             cipher->ivsize) {
441                 len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
442                 len += cipher->ivsize;
443         }
444
445         return len;
446 }
447
448 static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
449 {
450         struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
451         struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
452
453         crt->setkey = async_setkey;
454         crt->encrypt = async_encrypt;
455         crt->decrypt = async_decrypt;
456         if (!alg->ivsize) {
457                 crt->givencrypt = skcipher_null_givencrypt;
458                 crt->givdecrypt = skcipher_null_givdecrypt;
459         }
460         crt->base = __crypto_ablkcipher_cast(tfm);
461         crt->ivsize = alg->ivsize;
462         crt->has_setkey = alg->max_keysize;
463
464         return 0;
465 }
466
467 static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
468 {
469         struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
470         struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
471         unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
472         unsigned long addr;
473
474         crt->setkey = setkey;
475         crt->encrypt = alg->encrypt;
476         crt->decrypt = alg->decrypt;
477
478         addr = (unsigned long)crypto_tfm_ctx(tfm);
479         addr = ALIGN(addr, align);
480         addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
481         crt->iv = (void *)addr;
482
483         return 0;
484 }
485
486 static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
487 {
488         struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
489
490         if (alg->ivsize > PAGE_SIZE / 8)
491                 return -EINVAL;
492
493         if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK)
494                 return crypto_init_blkcipher_ops_sync(tfm);
495         else
496                 return crypto_init_blkcipher_ops_async(tfm);
497 }
498
499 #ifdef CONFIG_NET
500 static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
501 {
502         struct crypto_report_blkcipher rblkcipher;
503
504         strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
505         strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
506                 sizeof(rblkcipher.geniv));
507
508         rblkcipher.blocksize = alg->cra_blocksize;
509         rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
510         rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
511         rblkcipher.ivsize = alg->cra_blkcipher.ivsize;
512
513         NLA_PUT(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
514                 sizeof(struct crypto_report_blkcipher), &rblkcipher);
515
516         return 0;
517
518 nla_put_failure:
519         return -EMSGSIZE;
520 }
521 #else
522 static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
523 {
524         return -ENOSYS;
525 }
526 #endif
527
528 static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
529         __attribute__ ((unused));
530 static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
531 {
532         seq_printf(m, "type         : blkcipher\n");
533         seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
534         seq_printf(m, "min keysize  : %u\n", alg->cra_blkcipher.min_keysize);
535         seq_printf(m, "max keysize  : %u\n", alg->cra_blkcipher.max_keysize);
536         seq_printf(m, "ivsize       : %u\n", alg->cra_blkcipher.ivsize);
537         seq_printf(m, "geniv        : %s\n", alg->cra_blkcipher.geniv ?:
538                                              "<default>");
539 }
540
541 const struct crypto_type crypto_blkcipher_type = {
542         .ctxsize = crypto_blkcipher_ctxsize,
543         .init = crypto_init_blkcipher_ops,
544 #ifdef CONFIG_PROC_FS
545         .show = crypto_blkcipher_show,
546 #endif
547         .report = crypto_blkcipher_report,
548 };
549 EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
550
551 static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn,
552                                 const char *name, u32 type, u32 mask)
553 {
554         struct crypto_alg *alg;
555         int err;
556
557         type = crypto_skcipher_type(type);
558         mask = crypto_skcipher_mask(mask)| CRYPTO_ALG_GENIV;
559
560         alg = crypto_alg_mod_lookup(name, type, mask);
561         if (IS_ERR(alg))
562                 return PTR_ERR(alg);
563
564         err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
565         crypto_mod_put(alg);
566         return err;
567 }
568
569 struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl,
570                                              struct rtattr **tb, u32 type,
571                                              u32 mask)
572 {
573         struct {
574                 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
575                               unsigned int keylen);
576                 int (*encrypt)(struct ablkcipher_request *req);
577                 int (*decrypt)(struct ablkcipher_request *req);
578
579                 unsigned int min_keysize;
580                 unsigned int max_keysize;
581                 unsigned int ivsize;
582
583                 const char *geniv;
584         } balg;
585         const char *name;
586         struct crypto_skcipher_spawn *spawn;
587         struct crypto_attr_type *algt;
588         struct crypto_instance *inst;
589         struct crypto_alg *alg;
590         int err;
591
592         algt = crypto_get_attr_type(tb);
593         err = PTR_ERR(algt);
594         if (IS_ERR(algt))
595                 return ERR_PTR(err);
596
597         if ((algt->type ^ (CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV)) &
598             algt->mask)
599                 return ERR_PTR(-EINVAL);
600
601         name = crypto_attr_alg_name(tb[1]);
602         err = PTR_ERR(name);
603         if (IS_ERR(name))
604                 return ERR_PTR(err);
605
606         inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
607         if (!inst)
608                 return ERR_PTR(-ENOMEM);
609
610         spawn = crypto_instance_ctx(inst);
611
612         /* Ignore async algorithms if necessary. */
613         mask |= crypto_requires_sync(algt->type, algt->mask);
614
615         crypto_set_skcipher_spawn(spawn, inst);
616         err = crypto_grab_nivcipher(spawn, name, type, mask);
617         if (err)
618                 goto err_free_inst;
619
620         alg = crypto_skcipher_spawn_alg(spawn);
621
622         if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
623             CRYPTO_ALG_TYPE_BLKCIPHER) {
624                 balg.ivsize = alg->cra_blkcipher.ivsize;
625                 balg.min_keysize = alg->cra_blkcipher.min_keysize;
626                 balg.max_keysize = alg->cra_blkcipher.max_keysize;
627
628                 balg.setkey = async_setkey;
629                 balg.encrypt = async_encrypt;
630                 balg.decrypt = async_decrypt;
631
632                 balg.geniv = alg->cra_blkcipher.geniv;
633         } else {
634                 balg.ivsize = alg->cra_ablkcipher.ivsize;
635                 balg.min_keysize = alg->cra_ablkcipher.min_keysize;
636                 balg.max_keysize = alg->cra_ablkcipher.max_keysize;
637
638                 balg.setkey = alg->cra_ablkcipher.setkey;
639                 balg.encrypt = alg->cra_ablkcipher.encrypt;
640                 balg.decrypt = alg->cra_ablkcipher.decrypt;
641
642                 balg.geniv = alg->cra_ablkcipher.geniv;
643         }
644
645         err = -EINVAL;
646         if (!balg.ivsize)
647                 goto err_drop_alg;
648
649         /*
650          * This is only true if we're constructing an algorithm with its
651          * default IV generator.  For the default generator we elide the
652          * template name and double-check the IV generator.
653          */
654         if (algt->mask & CRYPTO_ALG_GENIV) {
655                 if (!balg.geniv)
656                         balg.geniv = crypto_default_geniv(alg);
657                 err = -EAGAIN;
658                 if (strcmp(tmpl->name, balg.geniv))
659                         goto err_drop_alg;
660
661                 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
662                 memcpy(inst->alg.cra_driver_name, alg->cra_driver_name,
663                        CRYPTO_MAX_ALG_NAME);
664         } else {
665                 err = -ENAMETOOLONG;
666                 if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
667                              "%s(%s)", tmpl->name, alg->cra_name) >=
668                     CRYPTO_MAX_ALG_NAME)
669                         goto err_drop_alg;
670                 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
671                              "%s(%s)", tmpl->name, alg->cra_driver_name) >=
672                     CRYPTO_MAX_ALG_NAME)
673                         goto err_drop_alg;
674         }
675
676         inst->alg.cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV;
677         inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
678         inst->alg.cra_priority = alg->cra_priority;
679         inst->alg.cra_blocksize = alg->cra_blocksize;
680         inst->alg.cra_alignmask = alg->cra_alignmask;
681         inst->alg.cra_type = &crypto_givcipher_type;
682
683         inst->alg.cra_ablkcipher.ivsize = balg.ivsize;
684         inst->alg.cra_ablkcipher.min_keysize = balg.min_keysize;
685         inst->alg.cra_ablkcipher.max_keysize = balg.max_keysize;
686         inst->alg.cra_ablkcipher.geniv = balg.geniv;
687
688         inst->alg.cra_ablkcipher.setkey = balg.setkey;
689         inst->alg.cra_ablkcipher.encrypt = balg.encrypt;
690         inst->alg.cra_ablkcipher.decrypt = balg.decrypt;
691
692 out:
693         return inst;
694
695 err_drop_alg:
696         crypto_drop_skcipher(spawn);
697 err_free_inst:
698         kfree(inst);
699         inst = ERR_PTR(err);
700         goto out;
701 }
702 EXPORT_SYMBOL_GPL(skcipher_geniv_alloc);
703
704 void skcipher_geniv_free(struct crypto_instance *inst)
705 {
706         crypto_drop_skcipher(crypto_instance_ctx(inst));
707         kfree(inst);
708 }
709 EXPORT_SYMBOL_GPL(skcipher_geniv_free);
710
711 int skcipher_geniv_init(struct crypto_tfm *tfm)
712 {
713         struct crypto_instance *inst = (void *)tfm->__crt_alg;
714         struct crypto_ablkcipher *cipher;
715
716         cipher = crypto_spawn_skcipher(crypto_instance_ctx(inst));
717         if (IS_ERR(cipher))
718                 return PTR_ERR(cipher);
719
720         tfm->crt_ablkcipher.base = cipher;
721         tfm->crt_ablkcipher.reqsize += crypto_ablkcipher_reqsize(cipher);
722
723         return 0;
724 }
725 EXPORT_SYMBOL_GPL(skcipher_geniv_init);
726
727 void skcipher_geniv_exit(struct crypto_tfm *tfm)
728 {
729         crypto_free_ablkcipher(tfm->crt_ablkcipher.base);
730 }
731 EXPORT_SYMBOL_GPL(skcipher_geniv_exit);
732
733 MODULE_LICENSE("GPL");
734 MODULE_DESCRIPTION("Generic block chaining cipher type");