Merge commit 'v2.6.39' into 20110526
[pandora-kernel.git] / arch / x86 / crypto / aesni-intel_glue.c
1 /*
2  * Support for Intel AES-NI instructions. This file contains glue
3  * code, the real AES implementation is in intel-aes_asm.S.
4  *
5  * Copyright (C) 2008, Intel Corp.
6  *    Author: Huang Ying <ying.huang@intel.com>
7  *
8  * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9  * interface for 64-bit kernels.
10  *    Authors: Adrian Hoban <adrian.hoban@intel.com>
11  *             Gabriele Paoloni <gabriele.paoloni@intel.com>
12  *             Tadeusz Struk (tadeusz.struk@intel.com)
13  *             Aidan O'Mahony (aidan.o.mahony@intel.com)
14  *    Copyright (c) 2010, Intel Corporation.
15  *
16  * This program is free software; you can redistribute it and/or modify
17  * it under the terms of the GNU General Public License as published by
18  * the Free Software Foundation; either version 2 of the License, or
19  * (at your option) any later version.
20  */
21
22 #include <linux/hardirq.h>
23 #include <linux/types.h>
24 #include <linux/crypto.h>
25 #include <linux/err.h>
26 #include <crypto/algapi.h>
27 #include <crypto/aes.h>
28 #include <crypto/cryptd.h>
29 #include <crypto/ctr.h>
30 #include <asm/i387.h>
31 #include <asm/aes.h>
32 #include <crypto/scatterwalk.h>
33 #include <crypto/internal/aead.h>
34 #include <linux/workqueue.h>
35 #include <linux/spinlock.h>
36
37 #if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE)
38 #define HAS_CTR
39 #endif
40
41 #if defined(CONFIG_CRYPTO_LRW) || defined(CONFIG_CRYPTO_LRW_MODULE)
42 #define HAS_LRW
43 #endif
44
45 #if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
46 #define HAS_PCBC
47 #endif
48
49 #if defined(CONFIG_CRYPTO_XTS) || defined(CONFIG_CRYPTO_XTS_MODULE)
50 #define HAS_XTS
51 #endif
52
53 struct async_aes_ctx {
54         struct cryptd_ablkcipher *cryptd_tfm;
55 };
56
57 /* This data is stored at the end of the crypto_tfm struct.
58  * It's a type of per "session" data storage location.
59  * This needs to be 16 byte aligned.
60  */
61 struct aesni_rfc4106_gcm_ctx {
62         u8 hash_subkey[16];
63         struct crypto_aes_ctx aes_key_expanded;
64         u8 nonce[4];
65         struct cryptd_aead *cryptd_tfm;
66 };
67
68 struct aesni_gcm_set_hash_subkey_result {
69         int err;
70         struct completion completion;
71 };
72
73 struct aesni_hash_subkey_req_data {
74         u8 iv[16];
75         struct aesni_gcm_set_hash_subkey_result result;
76         struct scatterlist sg;
77 };
78
79 #define AESNI_ALIGN     (16)
80 #define AES_BLOCK_MASK  (~(AES_BLOCK_SIZE-1))
81 #define RFC4106_HASH_SUBKEY_SIZE 16
82
83 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
84                              unsigned int key_len);
85 asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
86                           const u8 *in);
87 asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
88                           const u8 *in);
89 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
90                               const u8 *in, unsigned int len);
91 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
92                               const u8 *in, unsigned int len);
93 asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
94                               const u8 *in, unsigned int len, u8 *iv);
95 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
96                               const u8 *in, unsigned int len, u8 *iv);
97 #ifdef CONFIG_X86_64
98 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
99                               const u8 *in, unsigned int len, u8 *iv);
100
101 /* asmlinkage void aesni_gcm_enc()
102  * void *ctx,  AES Key schedule. Starts on a 16 byte boundary.
103  * u8 *out, Ciphertext output. Encrypt in-place is allowed.
104  * const u8 *in, Plaintext input
105  * unsigned long plaintext_len, Length of data in bytes for encryption.
106  * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
107  *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
108  *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
109  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
110  * const u8 *aad, Additional Authentication Data (AAD)
111  * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
112  *          is going to be 8 or 12 bytes
113  * u8 *auth_tag, Authenticated Tag output.
114  * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
115  *          Valid values are 16 (most likely), 12 or 8.
116  */
117 asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
118                         const u8 *in, unsigned long plaintext_len, u8 *iv,
119                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
120                         u8 *auth_tag, unsigned long auth_tag_len);
121
122 /* asmlinkage void aesni_gcm_dec()
123  * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
124  * u8 *out, Plaintext output. Decrypt in-place is allowed.
125  * const u8 *in, Ciphertext input
126  * unsigned long ciphertext_len, Length of data in bytes for decryption.
127  * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
128  *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
129  *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
130  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
131  * const u8 *aad, Additional Authentication Data (AAD)
132  * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
133  * to be 8 or 12 bytes
134  * u8 *auth_tag, Authenticated Tag output.
135  * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
136  * Valid values are 16 (most likely), 12 or 8.
137  */
138 asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
139                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
140                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
141                         u8 *auth_tag, unsigned long auth_tag_len);
142
143 static inline struct
144 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
145 {
146         return
147                 (struct aesni_rfc4106_gcm_ctx *)
148                 PTR_ALIGN((u8 *)
149                 crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN);
150 }
151 #endif
152
153 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
154 {
155         unsigned long addr = (unsigned long)raw_ctx;
156         unsigned long align = AESNI_ALIGN;
157
158         if (align <= crypto_tfm_ctx_alignment())
159                 align = 1;
160         return (struct crypto_aes_ctx *)ALIGN(addr, align);
161 }
162
163 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
164                               const u8 *in_key, unsigned int key_len)
165 {
166         struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
167         u32 *flags = &tfm->crt_flags;
168         int err;
169
170         if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
171             key_len != AES_KEYSIZE_256) {
172                 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
173                 return -EINVAL;
174         }
175
176         if (!irq_fpu_usable())
177                 err = crypto_aes_expand_key(ctx, in_key, key_len);
178         else {
179                 kernel_fpu_begin();
180                 err = aesni_set_key(ctx, in_key, key_len);
181                 kernel_fpu_end();
182         }
183
184         return err;
185 }
186
187 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
188                        unsigned int key_len)
189 {
190         return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
191 }
192
193 static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
194 {
195         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
196
197         if (!irq_fpu_usable())
198                 crypto_aes_encrypt_x86(ctx, dst, src);
199         else {
200                 kernel_fpu_begin();
201                 aesni_enc(ctx, dst, src);
202                 kernel_fpu_end();
203         }
204 }
205
206 static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
207 {
208         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
209
210         if (!irq_fpu_usable())
211                 crypto_aes_decrypt_x86(ctx, dst, src);
212         else {
213                 kernel_fpu_begin();
214                 aesni_dec(ctx, dst, src);
215                 kernel_fpu_end();
216         }
217 }
218
219 static struct crypto_alg aesni_alg = {
220         .cra_name               = "aes",
221         .cra_driver_name        = "aes-aesni",
222         .cra_priority           = 300,
223         .cra_flags              = CRYPTO_ALG_TYPE_CIPHER,
224         .cra_blocksize          = AES_BLOCK_SIZE,
225         .cra_ctxsize            = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
226         .cra_alignmask          = 0,
227         .cra_module             = THIS_MODULE,
228         .cra_list               = LIST_HEAD_INIT(aesni_alg.cra_list),
229         .cra_u  = {
230                 .cipher = {
231                         .cia_min_keysize        = AES_MIN_KEY_SIZE,
232                         .cia_max_keysize        = AES_MAX_KEY_SIZE,
233                         .cia_setkey             = aes_set_key,
234                         .cia_encrypt            = aes_encrypt,
235                         .cia_decrypt            = aes_decrypt
236                 }
237         }
238 };
239
240 static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
241 {
242         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
243
244         aesni_enc(ctx, dst, src);
245 }
246
247 static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
248 {
249         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
250
251         aesni_dec(ctx, dst, src);
252 }
253
254 static struct crypto_alg __aesni_alg = {
255         .cra_name               = "__aes-aesni",
256         .cra_driver_name        = "__driver-aes-aesni",
257         .cra_priority           = 0,
258         .cra_flags              = CRYPTO_ALG_TYPE_CIPHER,
259         .cra_blocksize          = AES_BLOCK_SIZE,
260         .cra_ctxsize            = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
261         .cra_alignmask          = 0,
262         .cra_module             = THIS_MODULE,
263         .cra_list               = LIST_HEAD_INIT(__aesni_alg.cra_list),
264         .cra_u  = {
265                 .cipher = {
266                         .cia_min_keysize        = AES_MIN_KEY_SIZE,
267                         .cia_max_keysize        = AES_MAX_KEY_SIZE,
268                         .cia_setkey             = aes_set_key,
269                         .cia_encrypt            = __aes_encrypt,
270                         .cia_decrypt            = __aes_decrypt
271                 }
272         }
273 };
274
275 static int ecb_encrypt(struct blkcipher_desc *desc,
276                        struct scatterlist *dst, struct scatterlist *src,
277                        unsigned int nbytes)
278 {
279         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
280         struct blkcipher_walk walk;
281         int err;
282
283         blkcipher_walk_init(&walk, dst, src, nbytes);
284         err = blkcipher_walk_virt(desc, &walk);
285         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
286
287         kernel_fpu_begin();
288         while ((nbytes = walk.nbytes)) {
289                 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
290                               nbytes & AES_BLOCK_MASK);
291                 nbytes &= AES_BLOCK_SIZE - 1;
292                 err = blkcipher_walk_done(desc, &walk, nbytes);
293         }
294         kernel_fpu_end();
295
296         return err;
297 }
298
299 static int ecb_decrypt(struct blkcipher_desc *desc,
300                        struct scatterlist *dst, struct scatterlist *src,
301                        unsigned int nbytes)
302 {
303         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
304         struct blkcipher_walk walk;
305         int err;
306
307         blkcipher_walk_init(&walk, dst, src, nbytes);
308         err = blkcipher_walk_virt(desc, &walk);
309         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
310
311         kernel_fpu_begin();
312         while ((nbytes = walk.nbytes)) {
313                 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
314                               nbytes & AES_BLOCK_MASK);
315                 nbytes &= AES_BLOCK_SIZE - 1;
316                 err = blkcipher_walk_done(desc, &walk, nbytes);
317         }
318         kernel_fpu_end();
319
320         return err;
321 }
322
323 static struct crypto_alg blk_ecb_alg = {
324         .cra_name               = "__ecb-aes-aesni",
325         .cra_driver_name        = "__driver-ecb-aes-aesni",
326         .cra_priority           = 0,
327         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
328         .cra_blocksize          = AES_BLOCK_SIZE,
329         .cra_ctxsize            = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
330         .cra_alignmask          = 0,
331         .cra_type               = &crypto_blkcipher_type,
332         .cra_module             = THIS_MODULE,
333         .cra_list               = LIST_HEAD_INIT(blk_ecb_alg.cra_list),
334         .cra_u = {
335                 .blkcipher = {
336                         .min_keysize    = AES_MIN_KEY_SIZE,
337                         .max_keysize    = AES_MAX_KEY_SIZE,
338                         .setkey         = aes_set_key,
339                         .encrypt        = ecb_encrypt,
340                         .decrypt        = ecb_decrypt,
341                 },
342         },
343 };
344
345 static int cbc_encrypt(struct blkcipher_desc *desc,
346                        struct scatterlist *dst, struct scatterlist *src,
347                        unsigned int nbytes)
348 {
349         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
350         struct blkcipher_walk walk;
351         int err;
352
353         blkcipher_walk_init(&walk, dst, src, nbytes);
354         err = blkcipher_walk_virt(desc, &walk);
355         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
356
357         kernel_fpu_begin();
358         while ((nbytes = walk.nbytes)) {
359                 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
360                               nbytes & AES_BLOCK_MASK, walk.iv);
361                 nbytes &= AES_BLOCK_SIZE - 1;
362                 err = blkcipher_walk_done(desc, &walk, nbytes);
363         }
364         kernel_fpu_end();
365
366         return err;
367 }
368
369 static int cbc_decrypt(struct blkcipher_desc *desc,
370                        struct scatterlist *dst, struct scatterlist *src,
371                        unsigned int nbytes)
372 {
373         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
374         struct blkcipher_walk walk;
375         int err;
376
377         blkcipher_walk_init(&walk, dst, src, nbytes);
378         err = blkcipher_walk_virt(desc, &walk);
379         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
380
381         kernel_fpu_begin();
382         while ((nbytes = walk.nbytes)) {
383                 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
384                               nbytes & AES_BLOCK_MASK, walk.iv);
385                 nbytes &= AES_BLOCK_SIZE - 1;
386                 err = blkcipher_walk_done(desc, &walk, nbytes);
387         }
388         kernel_fpu_end();
389
390         return err;
391 }
392
393 static struct crypto_alg blk_cbc_alg = {
394         .cra_name               = "__cbc-aes-aesni",
395         .cra_driver_name        = "__driver-cbc-aes-aesni",
396         .cra_priority           = 0,
397         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
398         .cra_blocksize          = AES_BLOCK_SIZE,
399         .cra_ctxsize            = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
400         .cra_alignmask          = 0,
401         .cra_type               = &crypto_blkcipher_type,
402         .cra_module             = THIS_MODULE,
403         .cra_list               = LIST_HEAD_INIT(blk_cbc_alg.cra_list),
404         .cra_u = {
405                 .blkcipher = {
406                         .min_keysize    = AES_MIN_KEY_SIZE,
407                         .max_keysize    = AES_MAX_KEY_SIZE,
408                         .setkey         = aes_set_key,
409                         .encrypt        = cbc_encrypt,
410                         .decrypt        = cbc_decrypt,
411                 },
412         },
413 };
414
415 #ifdef CONFIG_X86_64
416 static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
417                             struct blkcipher_walk *walk)
418 {
419         u8 *ctrblk = walk->iv;
420         u8 keystream[AES_BLOCK_SIZE];
421         u8 *src = walk->src.virt.addr;
422         u8 *dst = walk->dst.virt.addr;
423         unsigned int nbytes = walk->nbytes;
424
425         aesni_enc(ctx, keystream, ctrblk);
426         crypto_xor(keystream, src, nbytes);
427         memcpy(dst, keystream, nbytes);
428         crypto_inc(ctrblk, AES_BLOCK_SIZE);
429 }
430
431 static int ctr_crypt(struct blkcipher_desc *desc,
432                      struct scatterlist *dst, struct scatterlist *src,
433                      unsigned int nbytes)
434 {
435         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
436         struct blkcipher_walk walk;
437         int err;
438
439         blkcipher_walk_init(&walk, dst, src, nbytes);
440         err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
441         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
442
443         kernel_fpu_begin();
444         while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
445                 aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
446                               nbytes & AES_BLOCK_MASK, walk.iv);
447                 nbytes &= AES_BLOCK_SIZE - 1;
448                 err = blkcipher_walk_done(desc, &walk, nbytes);
449         }
450         if (walk.nbytes) {
451                 ctr_crypt_final(ctx, &walk);
452                 err = blkcipher_walk_done(desc, &walk, 0);
453         }
454         kernel_fpu_end();
455
456         return err;
457 }
458
459 static struct crypto_alg blk_ctr_alg = {
460         .cra_name               = "__ctr-aes-aesni",
461         .cra_driver_name        = "__driver-ctr-aes-aesni",
462         .cra_priority           = 0,
463         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
464         .cra_blocksize          = 1,
465         .cra_ctxsize            = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
466         .cra_alignmask          = 0,
467         .cra_type               = &crypto_blkcipher_type,
468         .cra_module             = THIS_MODULE,
469         .cra_list               = LIST_HEAD_INIT(blk_ctr_alg.cra_list),
470         .cra_u = {
471                 .blkcipher = {
472                         .min_keysize    = AES_MIN_KEY_SIZE,
473                         .max_keysize    = AES_MAX_KEY_SIZE,
474                         .ivsize         = AES_BLOCK_SIZE,
475                         .setkey         = aes_set_key,
476                         .encrypt        = ctr_crypt,
477                         .decrypt        = ctr_crypt,
478                 },
479         },
480 };
481 #endif
482
483 static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
484                         unsigned int key_len)
485 {
486         struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
487         struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base;
488         int err;
489
490         crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
491         crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm)
492                                     & CRYPTO_TFM_REQ_MASK);
493         err = crypto_ablkcipher_setkey(child, key, key_len);
494         crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child)
495                                     & CRYPTO_TFM_RES_MASK);
496         return err;
497 }
498
499 static int ablk_encrypt(struct ablkcipher_request *req)
500 {
501         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
502         struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
503
504         if (!irq_fpu_usable()) {
505                 struct ablkcipher_request *cryptd_req =
506                         ablkcipher_request_ctx(req);
507                 memcpy(cryptd_req, req, sizeof(*req));
508                 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
509                 return crypto_ablkcipher_encrypt(cryptd_req);
510         } else {
511                 struct blkcipher_desc desc;
512                 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
513                 desc.info = req->info;
514                 desc.flags = 0;
515                 return crypto_blkcipher_crt(desc.tfm)->encrypt(
516                         &desc, req->dst, req->src, req->nbytes);
517         }
518 }
519
520 static int ablk_decrypt(struct ablkcipher_request *req)
521 {
522         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
523         struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
524
525         if (!irq_fpu_usable()) {
526                 struct ablkcipher_request *cryptd_req =
527                         ablkcipher_request_ctx(req);
528                 memcpy(cryptd_req, req, sizeof(*req));
529                 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
530                 return crypto_ablkcipher_decrypt(cryptd_req);
531         } else {
532                 struct blkcipher_desc desc;
533                 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
534                 desc.info = req->info;
535                 desc.flags = 0;
536                 return crypto_blkcipher_crt(desc.tfm)->decrypt(
537                         &desc, req->dst, req->src, req->nbytes);
538         }
539 }
540
541 static void ablk_exit(struct crypto_tfm *tfm)
542 {
543         struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
544
545         cryptd_free_ablkcipher(ctx->cryptd_tfm);
546 }
547
548 static void ablk_init_common(struct crypto_tfm *tfm,
549                              struct cryptd_ablkcipher *cryptd_tfm)
550 {
551         struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
552
553         ctx->cryptd_tfm = cryptd_tfm;
554         tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
555                 crypto_ablkcipher_reqsize(&cryptd_tfm->base);
556 }
557
558 static int ablk_ecb_init(struct crypto_tfm *tfm)
559 {
560         struct cryptd_ablkcipher *cryptd_tfm;
561
562         cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ecb-aes-aesni", 0, 0);
563         if (IS_ERR(cryptd_tfm))
564                 return PTR_ERR(cryptd_tfm);
565         ablk_init_common(tfm, cryptd_tfm);
566         return 0;
567 }
568
569 static struct crypto_alg ablk_ecb_alg = {
570         .cra_name               = "ecb(aes)",
571         .cra_driver_name        = "ecb-aes-aesni",
572         .cra_priority           = 400,
573         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
574         .cra_blocksize          = AES_BLOCK_SIZE,
575         .cra_ctxsize            = sizeof(struct async_aes_ctx),
576         .cra_alignmask          = 0,
577         .cra_type               = &crypto_ablkcipher_type,
578         .cra_module             = THIS_MODULE,
579         .cra_list               = LIST_HEAD_INIT(ablk_ecb_alg.cra_list),
580         .cra_init               = ablk_ecb_init,
581         .cra_exit               = ablk_exit,
582         .cra_u = {
583                 .ablkcipher = {
584                         .min_keysize    = AES_MIN_KEY_SIZE,
585                         .max_keysize    = AES_MAX_KEY_SIZE,
586                         .setkey         = ablk_set_key,
587                         .encrypt        = ablk_encrypt,
588                         .decrypt        = ablk_decrypt,
589                 },
590         },
591 };
592
593 static int ablk_cbc_init(struct crypto_tfm *tfm)
594 {
595         struct cryptd_ablkcipher *cryptd_tfm;
596
597         cryptd_tfm = cryptd_alloc_ablkcipher("__driver-cbc-aes-aesni", 0, 0);
598         if (IS_ERR(cryptd_tfm))
599                 return PTR_ERR(cryptd_tfm);
600         ablk_init_common(tfm, cryptd_tfm);
601         return 0;
602 }
603
604 static struct crypto_alg ablk_cbc_alg = {
605         .cra_name               = "cbc(aes)",
606         .cra_driver_name        = "cbc-aes-aesni",
607         .cra_priority           = 400,
608         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
609         .cra_blocksize          = AES_BLOCK_SIZE,
610         .cra_ctxsize            = sizeof(struct async_aes_ctx),
611         .cra_alignmask          = 0,
612         .cra_type               = &crypto_ablkcipher_type,
613         .cra_module             = THIS_MODULE,
614         .cra_list               = LIST_HEAD_INIT(ablk_cbc_alg.cra_list),
615         .cra_init               = ablk_cbc_init,
616         .cra_exit               = ablk_exit,
617         .cra_u = {
618                 .ablkcipher = {
619                         .min_keysize    = AES_MIN_KEY_SIZE,
620                         .max_keysize    = AES_MAX_KEY_SIZE,
621                         .ivsize         = AES_BLOCK_SIZE,
622                         .setkey         = ablk_set_key,
623                         .encrypt        = ablk_encrypt,
624                         .decrypt        = ablk_decrypt,
625                 },
626         },
627 };
628
629 #ifdef CONFIG_X86_64
630 static int ablk_ctr_init(struct crypto_tfm *tfm)
631 {
632         struct cryptd_ablkcipher *cryptd_tfm;
633
634         cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ctr-aes-aesni", 0, 0);
635         if (IS_ERR(cryptd_tfm))
636                 return PTR_ERR(cryptd_tfm);
637         ablk_init_common(tfm, cryptd_tfm);
638         return 0;
639 }
640
641 static struct crypto_alg ablk_ctr_alg = {
642         .cra_name               = "ctr(aes)",
643         .cra_driver_name        = "ctr-aes-aesni",
644         .cra_priority           = 400,
645         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
646         .cra_blocksize          = 1,
647         .cra_ctxsize            = sizeof(struct async_aes_ctx),
648         .cra_alignmask          = 0,
649         .cra_type               = &crypto_ablkcipher_type,
650         .cra_module             = THIS_MODULE,
651         .cra_list               = LIST_HEAD_INIT(ablk_ctr_alg.cra_list),
652         .cra_init               = ablk_ctr_init,
653         .cra_exit               = ablk_exit,
654         .cra_u = {
655                 .ablkcipher = {
656                         .min_keysize    = AES_MIN_KEY_SIZE,
657                         .max_keysize    = AES_MAX_KEY_SIZE,
658                         .ivsize         = AES_BLOCK_SIZE,
659                         .setkey         = ablk_set_key,
660                         .encrypt        = ablk_encrypt,
661                         .decrypt        = ablk_encrypt,
662                         .geniv          = "chainiv",
663                 },
664         },
665 };
666
667 #ifdef HAS_CTR
668 static int ablk_rfc3686_ctr_init(struct crypto_tfm *tfm)
669 {
670         struct cryptd_ablkcipher *cryptd_tfm;
671
672         cryptd_tfm = cryptd_alloc_ablkcipher(
673                 "rfc3686(__driver-ctr-aes-aesni)", 0, 0);
674         if (IS_ERR(cryptd_tfm))
675                 return PTR_ERR(cryptd_tfm);
676         ablk_init_common(tfm, cryptd_tfm);
677         return 0;
678 }
679
680 static struct crypto_alg ablk_rfc3686_ctr_alg = {
681         .cra_name               = "rfc3686(ctr(aes))",
682         .cra_driver_name        = "rfc3686-ctr-aes-aesni",
683         .cra_priority           = 400,
684         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
685         .cra_blocksize          = 1,
686         .cra_ctxsize            = sizeof(struct async_aes_ctx),
687         .cra_alignmask          = 0,
688         .cra_type               = &crypto_ablkcipher_type,
689         .cra_module             = THIS_MODULE,
690         .cra_list               = LIST_HEAD_INIT(ablk_rfc3686_ctr_alg.cra_list),
691         .cra_init               = ablk_rfc3686_ctr_init,
692         .cra_exit               = ablk_exit,
693         .cra_u = {
694                 .ablkcipher = {
695                         .min_keysize = AES_MIN_KEY_SIZE+CTR_RFC3686_NONCE_SIZE,
696                         .max_keysize = AES_MAX_KEY_SIZE+CTR_RFC3686_NONCE_SIZE,
697                         .ivsize      = CTR_RFC3686_IV_SIZE,
698                         .setkey      = ablk_set_key,
699                         .encrypt     = ablk_encrypt,
700                         .decrypt     = ablk_decrypt,
701                         .geniv       = "seqiv",
702                 },
703         },
704 };
705 #endif
706 #endif
707
708 #ifdef HAS_LRW
709 static int ablk_lrw_init(struct crypto_tfm *tfm)
710 {
711         struct cryptd_ablkcipher *cryptd_tfm;
712
713         cryptd_tfm = cryptd_alloc_ablkcipher("fpu(lrw(__driver-aes-aesni))",
714                                              0, 0);
715         if (IS_ERR(cryptd_tfm))
716                 return PTR_ERR(cryptd_tfm);
717         ablk_init_common(tfm, cryptd_tfm);
718         return 0;
719 }
720
721 static struct crypto_alg ablk_lrw_alg = {
722         .cra_name               = "lrw(aes)",
723         .cra_driver_name        = "lrw-aes-aesni",
724         .cra_priority           = 400,
725         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
726         .cra_blocksize          = AES_BLOCK_SIZE,
727         .cra_ctxsize            = sizeof(struct async_aes_ctx),
728         .cra_alignmask          = 0,
729         .cra_type               = &crypto_ablkcipher_type,
730         .cra_module             = THIS_MODULE,
731         .cra_list               = LIST_HEAD_INIT(ablk_lrw_alg.cra_list),
732         .cra_init               = ablk_lrw_init,
733         .cra_exit               = ablk_exit,
734         .cra_u = {
735                 .ablkcipher = {
736                         .min_keysize    = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
737                         .max_keysize    = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
738                         .ivsize         = AES_BLOCK_SIZE,
739                         .setkey         = ablk_set_key,
740                         .encrypt        = ablk_encrypt,
741                         .decrypt        = ablk_decrypt,
742                 },
743         },
744 };
745 #endif
746
747 #ifdef HAS_PCBC
748 static int ablk_pcbc_init(struct crypto_tfm *tfm)
749 {
750         struct cryptd_ablkcipher *cryptd_tfm;
751
752         cryptd_tfm = cryptd_alloc_ablkcipher("fpu(pcbc(__driver-aes-aesni))",
753                                              0, 0);
754         if (IS_ERR(cryptd_tfm))
755                 return PTR_ERR(cryptd_tfm);
756         ablk_init_common(tfm, cryptd_tfm);
757         return 0;
758 }
759
760 static struct crypto_alg ablk_pcbc_alg = {
761         .cra_name               = "pcbc(aes)",
762         .cra_driver_name        = "pcbc-aes-aesni",
763         .cra_priority           = 400,
764         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
765         .cra_blocksize          = AES_BLOCK_SIZE,
766         .cra_ctxsize            = sizeof(struct async_aes_ctx),
767         .cra_alignmask          = 0,
768         .cra_type               = &crypto_ablkcipher_type,
769         .cra_module             = THIS_MODULE,
770         .cra_list               = LIST_HEAD_INIT(ablk_pcbc_alg.cra_list),
771         .cra_init               = ablk_pcbc_init,
772         .cra_exit               = ablk_exit,
773         .cra_u = {
774                 .ablkcipher = {
775                         .min_keysize    = AES_MIN_KEY_SIZE,
776                         .max_keysize    = AES_MAX_KEY_SIZE,
777                         .ivsize         = AES_BLOCK_SIZE,
778                         .setkey         = ablk_set_key,
779                         .encrypt        = ablk_encrypt,
780                         .decrypt        = ablk_decrypt,
781                 },
782         },
783 };
784 #endif
785
786 #ifdef HAS_XTS
787 static int ablk_xts_init(struct crypto_tfm *tfm)
788 {
789         struct cryptd_ablkcipher *cryptd_tfm;
790
791         cryptd_tfm = cryptd_alloc_ablkcipher("fpu(xts(__driver-aes-aesni))",
792                                              0, 0);
793         if (IS_ERR(cryptd_tfm))
794                 return PTR_ERR(cryptd_tfm);
795         ablk_init_common(tfm, cryptd_tfm);
796         return 0;
797 }
798
799 static struct crypto_alg ablk_xts_alg = {
800         .cra_name               = "xts(aes)",
801         .cra_driver_name        = "xts-aes-aesni",
802         .cra_priority           = 400,
803         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
804         .cra_blocksize          = AES_BLOCK_SIZE,
805         .cra_ctxsize            = sizeof(struct async_aes_ctx),
806         .cra_alignmask          = 0,
807         .cra_type               = &crypto_ablkcipher_type,
808         .cra_module             = THIS_MODULE,
809         .cra_list               = LIST_HEAD_INIT(ablk_xts_alg.cra_list),
810         .cra_init               = ablk_xts_init,
811         .cra_exit               = ablk_exit,
812         .cra_u = {
813                 .ablkcipher = {
814                         .min_keysize    = 2 * AES_MIN_KEY_SIZE,
815                         .max_keysize    = 2 * AES_MAX_KEY_SIZE,
816                         .ivsize         = AES_BLOCK_SIZE,
817                         .setkey         = ablk_set_key,
818                         .encrypt        = ablk_encrypt,
819                         .decrypt        = ablk_decrypt,
820                 },
821         },
822 };
823 #endif
824
825 #ifdef CONFIG_X86_64
826 static int rfc4106_init(struct crypto_tfm *tfm)
827 {
828         struct cryptd_aead *cryptd_tfm;
829         struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
830                 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
831         struct crypto_aead *cryptd_child;
832         struct aesni_rfc4106_gcm_ctx *child_ctx;
833         cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
834         if (IS_ERR(cryptd_tfm))
835                 return PTR_ERR(cryptd_tfm);
836
837         cryptd_child = cryptd_aead_child(cryptd_tfm);
838         child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child);
839         memcpy(child_ctx, ctx, sizeof(*ctx));
840         ctx->cryptd_tfm = cryptd_tfm;
841         tfm->crt_aead.reqsize = sizeof(struct aead_request)
842                 + crypto_aead_reqsize(&cryptd_tfm->base);
843         return 0;
844 }
845
846 static void rfc4106_exit(struct crypto_tfm *tfm)
847 {
848         struct aesni_rfc4106_gcm_ctx *ctx =
849                 (struct aesni_rfc4106_gcm_ctx *)
850                 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
851         if (!IS_ERR(ctx->cryptd_tfm))
852                 cryptd_free_aead(ctx->cryptd_tfm);
853         return;
854 }
855
856 static void
857 rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
858 {
859         struct aesni_gcm_set_hash_subkey_result *result = req->data;
860
861         if (err == -EINPROGRESS)
862                 return;
863         result->err = err;
864         complete(&result->completion);
865 }
866
867 static int
868 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
869 {
870         struct crypto_ablkcipher *ctr_tfm;
871         struct ablkcipher_request *req;
872         int ret = -EINVAL;
873         struct aesni_hash_subkey_req_data *req_data;
874
875         ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
876         if (IS_ERR(ctr_tfm))
877                 return PTR_ERR(ctr_tfm);
878
879         crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
880
881         ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
882         if (ret)
883                 goto out_free_ablkcipher;
884
885         ret = -ENOMEM;
886         req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
887         if (!req)
888                 goto out_free_ablkcipher;
889
890         req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
891         if (!req_data)
892                 goto out_free_request;
893
894         memset(req_data->iv, 0, sizeof(req_data->iv));
895
896         /* Clear the data in the hash sub key container to zero.*/
897         /* We want to cipher all zeros to create the hash sub key. */
898         memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
899
900         init_completion(&req_data->result.completion);
901         sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
902         ablkcipher_request_set_tfm(req, ctr_tfm);
903         ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
904                                         CRYPTO_TFM_REQ_MAY_BACKLOG,
905                                         rfc4106_set_hash_subkey_done,
906                                         &req_data->result);
907
908         ablkcipher_request_set_crypt(req, &req_data->sg,
909                 &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
910
911         ret = crypto_ablkcipher_encrypt(req);
912         if (ret == -EINPROGRESS || ret == -EBUSY) {
913                 ret = wait_for_completion_interruptible
914                         (&req_data->result.completion);
915                 if (!ret)
916                         ret = req_data->result.err;
917         }
918         kfree(req_data);
919 out_free_request:
920         ablkcipher_request_free(req);
921 out_free_ablkcipher:
922         crypto_free_ablkcipher(ctr_tfm);
923         return ret;
924 }
925
926 static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
927                                                    unsigned int key_len)
928 {
929         int ret = 0;
930         struct crypto_tfm *tfm = crypto_aead_tfm(parent);
931         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
932         struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
933         struct aesni_rfc4106_gcm_ctx *child_ctx =
934                                  aesni_rfc4106_gcm_ctx_get(cryptd_child);
935         u8 *new_key_mem = NULL;
936
937         if (key_len < 4) {
938                 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
939                 return -EINVAL;
940         }
941         /*Account for 4 byte nonce at the end.*/
942         key_len -= 4;
943         if (key_len != AES_KEYSIZE_128) {
944                 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
945                 return -EINVAL;
946         }
947
948         memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
949         /*This must be on a 16 byte boundary!*/
950         if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
951                 return -EINVAL;
952
953         if ((unsigned long)key % AESNI_ALIGN) {
954                 /*key is not aligned: use an auxuliar aligned pointer*/
955                 new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
956                 if (!new_key_mem)
957                         return -ENOMEM;
958
959                 new_key_mem = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
960                 memcpy(new_key_mem, key, key_len);
961                 key = new_key_mem;
962         }
963
964         if (!irq_fpu_usable())
965                 ret = crypto_aes_expand_key(&(ctx->aes_key_expanded),
966                 key, key_len);
967         else {
968                 kernel_fpu_begin();
969                 ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
970                 kernel_fpu_end();
971         }
972         /*This must be on a 16 byte boundary!*/
973         if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
974                 ret = -EINVAL;
975                 goto exit;
976         }
977         ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
978         memcpy(child_ctx, ctx, sizeof(*ctx));
979 exit:
980         kfree(new_key_mem);
981         return ret;
982 }
983
984 /* This is the Integrity Check Value (aka the authentication tag length and can
985  * be 8, 12 or 16 bytes long. */
986 static int rfc4106_set_authsize(struct crypto_aead *parent,
987                                 unsigned int authsize)
988 {
989         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
990         struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
991
992         switch (authsize) {
993         case 8:
994         case 12:
995         case 16:
996                 break;
997         default:
998                 return -EINVAL;
999         }
1000         crypto_aead_crt(parent)->authsize = authsize;
1001         crypto_aead_crt(cryptd_child)->authsize = authsize;
1002         return 0;
1003 }
1004
1005 static int rfc4106_encrypt(struct aead_request *req)
1006 {
1007         int ret;
1008         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1009         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1010
1011         if (!irq_fpu_usable()) {
1012                 struct aead_request *cryptd_req =
1013                         (struct aead_request *) aead_request_ctx(req);
1014                 memcpy(cryptd_req, req, sizeof(*req));
1015                 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
1016                 return crypto_aead_encrypt(cryptd_req);
1017         } else {
1018                 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
1019                 kernel_fpu_begin();
1020                 ret = cryptd_child->base.crt_aead.encrypt(req);
1021                 kernel_fpu_end();
1022                 return ret;
1023         }
1024 }
1025
1026 static int rfc4106_decrypt(struct aead_request *req)
1027 {
1028         int ret;
1029         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1030         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1031
1032         if (!irq_fpu_usable()) {
1033                 struct aead_request *cryptd_req =
1034                         (struct aead_request *) aead_request_ctx(req);
1035                 memcpy(cryptd_req, req, sizeof(*req));
1036                 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
1037                 return crypto_aead_decrypt(cryptd_req);
1038         } else {
1039                 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
1040                 kernel_fpu_begin();
1041                 ret = cryptd_child->base.crt_aead.decrypt(req);
1042                 kernel_fpu_end();
1043                 return ret;
1044         }
1045 }
1046
1047 static struct crypto_alg rfc4106_alg = {
1048         .cra_name = "rfc4106(gcm(aes))",
1049         .cra_driver_name = "rfc4106-gcm-aesni",
1050         .cra_priority = 400,
1051         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1052         .cra_blocksize = 1,
1053         .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN,
1054         .cra_alignmask = 0,
1055         .cra_type = &crypto_nivaead_type,
1056         .cra_module = THIS_MODULE,
1057         .cra_list = LIST_HEAD_INIT(rfc4106_alg.cra_list),
1058         .cra_init = rfc4106_init,
1059         .cra_exit = rfc4106_exit,
1060         .cra_u = {
1061                 .aead = {
1062                         .setkey = rfc4106_set_key,
1063                         .setauthsize = rfc4106_set_authsize,
1064                         .encrypt = rfc4106_encrypt,
1065                         .decrypt = rfc4106_decrypt,
1066                         .geniv = "seqiv",
1067                         .ivsize = 8,
1068                         .maxauthsize = 16,
1069                 },
1070         },
1071 };
1072
1073 static int __driver_rfc4106_encrypt(struct aead_request *req)
1074 {
1075         u8 one_entry_in_sg = 0;
1076         u8 *src, *dst, *assoc;
1077         __be32 counter = cpu_to_be32(1);
1078         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1079         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1080         void *aes_ctx = &(ctx->aes_key_expanded);
1081         unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1082         u8 iv_tab[16+AESNI_ALIGN];
1083         u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
1084         struct scatter_walk src_sg_walk;
1085         struct scatter_walk assoc_sg_walk;
1086         struct scatter_walk dst_sg_walk;
1087         unsigned int i;
1088
1089         /* Assuming we are supporting rfc4106 64-bit extended */
1090         /* sequence numbers We need to have the AAD length equal */
1091         /* to 8 or 12 bytes */
1092         if (unlikely(req->assoclen != 8 && req->assoclen != 12))
1093                 return -EINVAL;
1094         /* IV below built */
1095         for (i = 0; i < 4; i++)
1096                 *(iv+i) = ctx->nonce[i];
1097         for (i = 0; i < 8; i++)
1098                 *(iv+4+i) = req->iv[i];
1099         *((__be32 *)(iv+12)) = counter;
1100
1101         if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1102                 one_entry_in_sg = 1;
1103                 scatterwalk_start(&src_sg_walk, req->src);
1104                 scatterwalk_start(&assoc_sg_walk, req->assoc);
1105                 src = scatterwalk_map(&src_sg_walk, 0);
1106                 assoc = scatterwalk_map(&assoc_sg_walk, 0);
1107                 dst = src;
1108                 if (unlikely(req->src != req->dst)) {
1109                         scatterwalk_start(&dst_sg_walk, req->dst);
1110                         dst = scatterwalk_map(&dst_sg_walk, 0);
1111                 }
1112
1113         } else {
1114                 /* Allocate memory for src, dst, assoc */
1115                 src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
1116                         GFP_ATOMIC);
1117                 if (unlikely(!src))
1118                         return -ENOMEM;
1119                 assoc = (src + req->cryptlen + auth_tag_len);
1120                 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1121                 scatterwalk_map_and_copy(assoc, req->assoc, 0,
1122                                         req->assoclen, 0);
1123                 dst = src;
1124         }
1125
1126         aesni_gcm_enc(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
1127                 ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
1128                 + ((unsigned long)req->cryptlen), auth_tag_len);
1129
1130         /* The authTag (aka the Integrity Check Value) needs to be written
1131          * back to the packet. */
1132         if (one_entry_in_sg) {
1133                 if (unlikely(req->src != req->dst)) {
1134                         scatterwalk_unmap(dst, 0);
1135                         scatterwalk_done(&dst_sg_walk, 0, 0);
1136                 }
1137                 scatterwalk_unmap(src, 0);
1138                 scatterwalk_unmap(assoc, 0);
1139                 scatterwalk_done(&src_sg_walk, 0, 0);
1140                 scatterwalk_done(&assoc_sg_walk, 0, 0);
1141         } else {
1142                 scatterwalk_map_and_copy(dst, req->dst, 0,
1143                         req->cryptlen + auth_tag_len, 1);
1144                 kfree(src);
1145         }
1146         return 0;
1147 }
1148
1149 static int __driver_rfc4106_decrypt(struct aead_request *req)
1150 {
1151         u8 one_entry_in_sg = 0;
1152         u8 *src, *dst, *assoc;
1153         unsigned long tempCipherLen = 0;
1154         __be32 counter = cpu_to_be32(1);
1155         int retval = 0;
1156         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1157         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1158         void *aes_ctx = &(ctx->aes_key_expanded);
1159         unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1160         u8 iv_and_authTag[32+AESNI_ALIGN];
1161         u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
1162         u8 *authTag = iv + 16;
1163         struct scatter_walk src_sg_walk;
1164         struct scatter_walk assoc_sg_walk;
1165         struct scatter_walk dst_sg_walk;
1166         unsigned int i;
1167
1168         if (unlikely((req->cryptlen < auth_tag_len) ||
1169                 (req->assoclen != 8 && req->assoclen != 12)))
1170                 return -EINVAL;
1171         /* Assuming we are supporting rfc4106 64-bit extended */
1172         /* sequence numbers We need to have the AAD length */
1173         /* equal to 8 or 12 bytes */
1174
1175         tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
1176         /* IV below built */
1177         for (i = 0; i < 4; i++)
1178                 *(iv+i) = ctx->nonce[i];
1179         for (i = 0; i < 8; i++)
1180                 *(iv+4+i) = req->iv[i];
1181         *((__be32 *)(iv+12)) = counter;
1182
1183         if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1184                 one_entry_in_sg = 1;
1185                 scatterwalk_start(&src_sg_walk, req->src);
1186                 scatterwalk_start(&assoc_sg_walk, req->assoc);
1187                 src = scatterwalk_map(&src_sg_walk, 0);
1188                 assoc = scatterwalk_map(&assoc_sg_walk, 0);
1189                 dst = src;
1190                 if (unlikely(req->src != req->dst)) {
1191                         scatterwalk_start(&dst_sg_walk, req->dst);
1192                         dst = scatterwalk_map(&dst_sg_walk, 0);
1193                 }
1194
1195         } else {
1196                 /* Allocate memory for src, dst, assoc */
1197                 src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
1198                 if (!src)
1199                         return -ENOMEM;
1200                 assoc = (src + req->cryptlen + auth_tag_len);
1201                 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1202                 scatterwalk_map_and_copy(assoc, req->assoc, 0,
1203                         req->assoclen, 0);
1204                 dst = src;
1205         }
1206
1207         aesni_gcm_dec(aes_ctx, dst, src, tempCipherLen, iv,
1208                 ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
1209                 authTag, auth_tag_len);
1210
1211         /* Compare generated tag with passed in tag. */
1212         retval = memcmp(src + tempCipherLen, authTag, auth_tag_len) ?
1213                 -EBADMSG : 0;
1214
1215         if (one_entry_in_sg) {
1216                 if (unlikely(req->src != req->dst)) {
1217                         scatterwalk_unmap(dst, 0);
1218                         scatterwalk_done(&dst_sg_walk, 0, 0);
1219                 }
1220                 scatterwalk_unmap(src, 0);
1221                 scatterwalk_unmap(assoc, 0);
1222                 scatterwalk_done(&src_sg_walk, 0, 0);
1223                 scatterwalk_done(&assoc_sg_walk, 0, 0);
1224         } else {
1225                 scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
1226                 kfree(src);
1227         }
1228         return retval;
1229 }
1230
1231 static struct crypto_alg __rfc4106_alg = {
1232         .cra_name               = "__gcm-aes-aesni",
1233         .cra_driver_name        = "__driver-gcm-aes-aesni",
1234         .cra_priority           = 0,
1235         .cra_flags              = CRYPTO_ALG_TYPE_AEAD,
1236         .cra_blocksize          = 1,
1237         .cra_ctxsize    = sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN,
1238         .cra_alignmask          = 0,
1239         .cra_type               = &crypto_aead_type,
1240         .cra_module             = THIS_MODULE,
1241         .cra_list               = LIST_HEAD_INIT(__rfc4106_alg.cra_list),
1242         .cra_u = {
1243                 .aead = {
1244                         .encrypt        = __driver_rfc4106_encrypt,
1245                         .decrypt        = __driver_rfc4106_decrypt,
1246                 },
1247         },
1248 };
1249 #endif
1250
1251 static int __init aesni_init(void)
1252 {
1253         int err;
1254
1255         if (!cpu_has_aes) {
1256                 printk(KERN_INFO "Intel AES-NI instructions are not detected.\n");
1257                 return -ENODEV;
1258         }
1259
1260         if ((err = crypto_register_alg(&aesni_alg)))
1261                 goto aes_err;
1262         if ((err = crypto_register_alg(&__aesni_alg)))
1263                 goto __aes_err;
1264         if ((err = crypto_register_alg(&blk_ecb_alg)))
1265                 goto blk_ecb_err;
1266         if ((err = crypto_register_alg(&blk_cbc_alg)))
1267                 goto blk_cbc_err;
1268         if ((err = crypto_register_alg(&ablk_ecb_alg)))
1269                 goto ablk_ecb_err;
1270         if ((err = crypto_register_alg(&ablk_cbc_alg)))
1271                 goto ablk_cbc_err;
1272 #ifdef CONFIG_X86_64
1273         if ((err = crypto_register_alg(&blk_ctr_alg)))
1274                 goto blk_ctr_err;
1275         if ((err = crypto_register_alg(&ablk_ctr_alg)))
1276                 goto ablk_ctr_err;
1277         if ((err = crypto_register_alg(&__rfc4106_alg)))
1278                 goto __aead_gcm_err;
1279         if ((err = crypto_register_alg(&rfc4106_alg)))
1280                 goto aead_gcm_err;
1281 #ifdef HAS_CTR
1282         if ((err = crypto_register_alg(&ablk_rfc3686_ctr_alg)))
1283                 goto ablk_rfc3686_ctr_err;
1284 #endif
1285 #endif
1286 #ifdef HAS_LRW
1287         if ((err = crypto_register_alg(&ablk_lrw_alg)))
1288                 goto ablk_lrw_err;
1289 #endif
1290 #ifdef HAS_PCBC
1291         if ((err = crypto_register_alg(&ablk_pcbc_alg)))
1292                 goto ablk_pcbc_err;
1293 #endif
1294 #ifdef HAS_XTS
1295         if ((err = crypto_register_alg(&ablk_xts_alg)))
1296                 goto ablk_xts_err;
1297 #endif
1298         return err;
1299
1300 #ifdef HAS_XTS
1301 ablk_xts_err:
1302 #endif
1303 #ifdef HAS_PCBC
1304         crypto_unregister_alg(&ablk_pcbc_alg);
1305 ablk_pcbc_err:
1306 #endif
1307 #ifdef HAS_LRW
1308         crypto_unregister_alg(&ablk_lrw_alg);
1309 ablk_lrw_err:
1310 #endif
1311 #ifdef CONFIG_X86_64
1312 #ifdef HAS_CTR
1313         crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
1314 ablk_rfc3686_ctr_err:
1315 #endif
1316         crypto_unregister_alg(&rfc4106_alg);
1317 aead_gcm_err:
1318         crypto_unregister_alg(&__rfc4106_alg);
1319 __aead_gcm_err:
1320         crypto_unregister_alg(&ablk_ctr_alg);
1321 ablk_ctr_err:
1322         crypto_unregister_alg(&blk_ctr_alg);
1323 blk_ctr_err:
1324 #endif
1325         crypto_unregister_alg(&ablk_cbc_alg);
1326 ablk_cbc_err:
1327         crypto_unregister_alg(&ablk_ecb_alg);
1328 ablk_ecb_err:
1329         crypto_unregister_alg(&blk_cbc_alg);
1330 blk_cbc_err:
1331         crypto_unregister_alg(&blk_ecb_alg);
1332 blk_ecb_err:
1333         crypto_unregister_alg(&__aesni_alg);
1334 __aes_err:
1335         crypto_unregister_alg(&aesni_alg);
1336 aes_err:
1337         return err;
1338 }
1339
1340 static void __exit aesni_exit(void)
1341 {
1342 #ifdef HAS_XTS
1343         crypto_unregister_alg(&ablk_xts_alg);
1344 #endif
1345 #ifdef HAS_PCBC
1346         crypto_unregister_alg(&ablk_pcbc_alg);
1347 #endif
1348 #ifdef HAS_LRW
1349         crypto_unregister_alg(&ablk_lrw_alg);
1350 #endif
1351 #ifdef CONFIG_X86_64
1352 #ifdef HAS_CTR
1353         crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
1354 #endif
1355         crypto_unregister_alg(&rfc4106_alg);
1356         crypto_unregister_alg(&__rfc4106_alg);
1357         crypto_unregister_alg(&ablk_ctr_alg);
1358         crypto_unregister_alg(&blk_ctr_alg);
1359 #endif
1360         crypto_unregister_alg(&ablk_cbc_alg);
1361         crypto_unregister_alg(&ablk_ecb_alg);
1362         crypto_unregister_alg(&blk_cbc_alg);
1363         crypto_unregister_alg(&blk_ecb_alg);
1364         crypto_unregister_alg(&__aesni_alg);
1365         crypto_unregister_alg(&aesni_alg);
1366 }
1367
1368 module_init(aesni_init);
1369 module_exit(aesni_exit);
1370
1371 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1372 MODULE_LICENSE("GPL");
1373 MODULE_ALIAS("aes");