crypto: hash - Simplify the ahash_finup implementation
[pandora-kernel.git] / crypto / ahash.c
1 /*
2  * Asynchronous Cryptographic Hash operations.
3  *
4  * This is the asynchronous version of hash.c with notification of
5  * completion via a callback.
6  *
7  * Copyright (c) 2008 Loc Ho <lho@amcc.com>
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License as published by the Free
11  * Software Foundation; either version 2 of the License, or (at your option)
12  * any later version.
13  *
14  */
15
16 #include <crypto/internal/hash.h>
17 #include <crypto/scatterwalk.h>
18 #include <linux/err.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/seq_file.h>
24 #include <linux/cryptouser.h>
25 #include <net/netlink.h>
26
27 #include "internal.h"
28
29 struct ahash_request_priv {
30         crypto_completion_t complete;
31         void *data;
32         u8 *result;
33         void *ubuf[] CRYPTO_MINALIGN_ATTR;
34 };
35
36 static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
37 {
38         return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
39                             halg);
40 }
41
42 static int hash_walk_next(struct crypto_hash_walk *walk)
43 {
44         unsigned int alignmask = walk->alignmask;
45         unsigned int offset = walk->offset;
46         unsigned int nbytes = min(walk->entrylen,
47                                   ((unsigned int)(PAGE_SIZE)) - offset);
48
49         walk->data = crypto_kmap(walk->pg, 0);
50         walk->data += offset;
51
52         if (offset & alignmask) {
53                 unsigned int unaligned = alignmask + 1 - (offset & alignmask);
54                 if (nbytes > unaligned)
55                         nbytes = unaligned;
56         }
57
58         walk->entrylen -= nbytes;
59         return nbytes;
60 }
61
62 static int hash_walk_new_entry(struct crypto_hash_walk *walk)
63 {
64         struct scatterlist *sg;
65
66         sg = walk->sg;
67         walk->offset = sg->offset;
68         walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
69         walk->offset = offset_in_page(walk->offset);
70         walk->entrylen = sg->length;
71
72         if (walk->entrylen > walk->total)
73                 walk->entrylen = walk->total;
74         walk->total -= walk->entrylen;
75
76         return hash_walk_next(walk);
77 }
78
79 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
80 {
81         unsigned int alignmask = walk->alignmask;
82         unsigned int nbytes = walk->entrylen;
83
84         walk->data -= walk->offset;
85
86         if (nbytes && walk->offset & alignmask && !err) {
87                 walk->offset = ALIGN(walk->offset, alignmask + 1);
88                 walk->data += walk->offset;
89
90                 nbytes = min(nbytes,
91                              ((unsigned int)(PAGE_SIZE)) - walk->offset);
92                 walk->entrylen -= nbytes;
93
94                 return nbytes;
95         }
96
97         crypto_kunmap(walk->data, 0);
98         crypto_yield(walk->flags);
99
100         if (err)
101                 return err;
102
103         if (nbytes) {
104                 walk->offset = 0;
105                 walk->pg++;
106                 return hash_walk_next(walk);
107         }
108
109         if (!walk->total)
110                 return 0;
111
112         walk->sg = scatterwalk_sg_next(walk->sg);
113
114         return hash_walk_new_entry(walk);
115 }
116 EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
117
118 int crypto_hash_walk_first(struct ahash_request *req,
119                            struct crypto_hash_walk *walk)
120 {
121         walk->total = req->nbytes;
122
123         if (!walk->total)
124                 return 0;
125
126         walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
127         walk->sg = req->src;
128         walk->flags = req->base.flags;
129
130         return hash_walk_new_entry(walk);
131 }
132 EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
133
134 int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
135                                   struct crypto_hash_walk *walk,
136                                   struct scatterlist *sg, unsigned int len)
137 {
138         walk->total = len;
139
140         if (!walk->total)
141                 return 0;
142
143         walk->alignmask = crypto_hash_alignmask(hdesc->tfm);
144         walk->sg = sg;
145         walk->flags = hdesc->flags;
146
147         return hash_walk_new_entry(walk);
148 }
149
150 static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
151                                 unsigned int keylen)
152 {
153         unsigned long alignmask = crypto_ahash_alignmask(tfm);
154         int ret;
155         u8 *buffer, *alignbuffer;
156         unsigned long absize;
157
158         absize = keylen + alignmask;
159         buffer = kmalloc(absize, GFP_KERNEL);
160         if (!buffer)
161                 return -ENOMEM;
162
163         alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
164         memcpy(alignbuffer, key, keylen);
165         ret = tfm->setkey(tfm, alignbuffer, keylen);
166         kzfree(buffer);
167         return ret;
168 }
169
170 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
171                         unsigned int keylen)
172 {
173         unsigned long alignmask = crypto_ahash_alignmask(tfm);
174
175         if ((unsigned long)key & alignmask)
176                 return ahash_setkey_unaligned(tfm, key, keylen);
177
178         return tfm->setkey(tfm, key, keylen);
179 }
180 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
181
182 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
183                           unsigned int keylen)
184 {
185         return -ENOSYS;
186 }
187
188 static inline unsigned int ahash_align_buffer_size(unsigned len,
189                                                    unsigned long mask)
190 {
191         return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
192 }
193
194 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
195 {
196         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
197         unsigned long alignmask = crypto_ahash_alignmask(tfm);
198         unsigned int ds = crypto_ahash_digestsize(tfm);
199         struct ahash_request_priv *priv;
200
201         priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
202                        (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
203                        GFP_KERNEL : GFP_ATOMIC);
204         if (!priv)
205                 return -ENOMEM;
206
207         /*
208          * WARNING: Voodoo programming below!
209          *
210          * The code below is obscure and hard to understand, thus explanation
211          * is necessary. See include/crypto/hash.h and include/linux/crypto.h
212          * to understand the layout of structures used here!
213          *
214          * The code here will replace portions of the ORIGINAL request with
215          * pointers to new code and buffers so the hashing operation can store
216          * the result in aligned buffer. We will call the modified request
217          * an ADJUSTED request.
218          *
219          * The newly mangled request will look as such:
220          *
221          * req {
222          *   .result        = ADJUSTED[new aligned buffer]
223          *   .base.complete = ADJUSTED[pointer to completion function]
224          *   .base.data     = ADJUSTED[*req (pointer to self)]
225          *   .priv          = ADJUSTED[new priv] {
226          *           .result   = ORIGINAL(result)
227          *           .complete = ORIGINAL(base.complete)
228          *           .data     = ORIGINAL(base.data)
229          *   }
230          */
231
232         priv->result = req->result;
233         priv->complete = req->base.complete;
234         priv->data = req->base.data;
235         /*
236          * WARNING: We do not backup req->priv here! The req->priv
237          *          is for internal use of the Crypto API and the
238          *          user must _NOT_ _EVER_ depend on it's content!
239          */
240
241         req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
242         req->base.complete = cplt;
243         req->base.data = req;
244         req->priv = priv;
245
246         return 0;
247 }
248
249 static void ahash_restore_req(struct ahash_request *req)
250 {
251         struct ahash_request_priv *priv = req->priv;
252
253         /* Restore the original crypto request. */
254         req->result = priv->result;
255         req->base.complete = priv->complete;
256         req->base.data = priv->data;
257         req->priv = NULL;
258
259         /* Free the req->priv.priv from the ADJUSTED request. */
260         kzfree(priv);
261 }
262
263 static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
264 {
265         struct ahash_request_priv *priv = req->priv;
266
267         if (err == -EINPROGRESS)
268                 return;
269
270         if (!err)
271                 memcpy(priv->result, req->result,
272                        crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
273
274         ahash_restore_req(req);
275 }
276
277 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
278 {
279         struct ahash_request *areq = req->data;
280
281         /*
282          * Restore the original request, see ahash_op_unaligned() for what
283          * goes where.
284          *
285          * The "struct ahash_request *req" here is in fact the "req.base"
286          * from the ADJUSTED request from ahash_op_unaligned(), thus as it
287          * is a pointer to self, it is also the ADJUSTED "req" .
288          */
289
290         /* First copy req->result into req->priv.result */
291         ahash_op_unaligned_finish(areq, err);
292
293         /* Complete the ORIGINAL request. */
294         areq->base.complete(&areq->base, err);
295 }
296
297 static int ahash_op_unaligned(struct ahash_request *req,
298                               int (*op)(struct ahash_request *))
299 {
300         int err;
301
302         err = ahash_save_req(req, ahash_op_unaligned_done);
303         if (err)
304                 return err;
305
306         err = op(req);
307         ahash_op_unaligned_finish(req, err);
308
309         return err;
310 }
311
312 static int crypto_ahash_op(struct ahash_request *req,
313                            int (*op)(struct ahash_request *))
314 {
315         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
316         unsigned long alignmask = crypto_ahash_alignmask(tfm);
317
318         if ((unsigned long)req->result & alignmask)
319                 return ahash_op_unaligned(req, op);
320
321         return op(req);
322 }
323
324 int crypto_ahash_final(struct ahash_request *req)
325 {
326         return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
327 }
328 EXPORT_SYMBOL_GPL(crypto_ahash_final);
329
330 int crypto_ahash_finup(struct ahash_request *req)
331 {
332         return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
333 }
334 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
335
336 int crypto_ahash_digest(struct ahash_request *req)
337 {
338         return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
339 }
340 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
341
342 static void ahash_def_finup_finish2(struct ahash_request *req, int err)
343 {
344         struct ahash_request_priv *priv = req->priv;
345
346         if (err == -EINPROGRESS)
347                 return;
348
349         if (!err)
350                 memcpy(priv->result, req->result,
351                        crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
352
353         ahash_restore_req(req);
354 }
355
356 static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
357 {
358         struct ahash_request *areq = req->data;
359
360         ahash_def_finup_finish2(areq, err);
361
362         areq->base.complete(&areq->base, err);
363 }
364
365 static int ahash_def_finup_finish1(struct ahash_request *req, int err)
366 {
367         if (err)
368                 goto out;
369
370         req->base.complete = ahash_def_finup_done2;
371         req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
372         err = crypto_ahash_reqtfm(req)->final(req);
373
374 out:
375         ahash_def_finup_finish2(req, err);
376         return err;
377 }
378
379 static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
380 {
381         struct ahash_request *areq = req->data;
382
383         err = ahash_def_finup_finish1(areq, err);
384
385         areq->base.complete(&areq->base, err);
386 }
387
388 static int ahash_def_finup(struct ahash_request *req)
389 {
390         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
391         int err;
392
393         err = ahash_save_req(req, ahash_def_finup_done1);
394         if (err)
395                 return err;
396
397         err = tfm->update(req);
398         return ahash_def_finup_finish1(req, err);
399 }
400
401 static int ahash_no_export(struct ahash_request *req, void *out)
402 {
403         return -ENOSYS;
404 }
405
406 static int ahash_no_import(struct ahash_request *req, const void *in)
407 {
408         return -ENOSYS;
409 }
410
411 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
412 {
413         struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
414         struct ahash_alg *alg = crypto_ahash_alg(hash);
415
416         hash->setkey = ahash_nosetkey;
417         hash->has_setkey = false;
418         hash->export = ahash_no_export;
419         hash->import = ahash_no_import;
420
421         if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
422                 return crypto_init_shash_ops_async(tfm);
423
424         hash->init = alg->init;
425         hash->update = alg->update;
426         hash->final = alg->final;
427         hash->finup = alg->finup ?: ahash_def_finup;
428         hash->digest = alg->digest;
429
430         if (alg->setkey) {
431                 hash->setkey = alg->setkey;
432                 hash->has_setkey = true;
433         }
434         if (alg->export)
435                 hash->export = alg->export;
436         if (alg->import)
437                 hash->import = alg->import;
438
439         return 0;
440 }
441
442 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
443 {
444         if (alg->cra_type == &crypto_ahash_type)
445                 return alg->cra_ctxsize;
446
447         return sizeof(struct crypto_shash *);
448 }
449
450 #ifdef CONFIG_NET
451 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
452 {
453         struct crypto_report_hash rhash;
454
455         strncpy(rhash.type, "ahash", sizeof(rhash.type));
456
457         rhash.blocksize = alg->cra_blocksize;
458         rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
459
460         NLA_PUT(skb, CRYPTOCFGA_REPORT_HASH,
461                 sizeof(struct crypto_report_hash), &rhash);
462
463         return 0;
464
465 nla_put_failure:
466         return -EMSGSIZE;
467 }
468 #else
469 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
470 {
471         return -ENOSYS;
472 }
473 #endif
474
475 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
476         __attribute__ ((unused));
477 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
478 {
479         seq_printf(m, "type         : ahash\n");
480         seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
481                                              "yes" : "no");
482         seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
483         seq_printf(m, "digestsize   : %u\n",
484                    __crypto_hash_alg_common(alg)->digestsize);
485 }
486
487 const struct crypto_type crypto_ahash_type = {
488         .extsize = crypto_ahash_extsize,
489         .init_tfm = crypto_ahash_init_tfm,
490 #ifdef CONFIG_PROC_FS
491         .show = crypto_ahash_show,
492 #endif
493         .report = crypto_ahash_report,
494         .maskclear = ~CRYPTO_ALG_TYPE_MASK,
495         .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
496         .type = CRYPTO_ALG_TYPE_AHASH,
497         .tfmsize = offsetof(struct crypto_ahash, base),
498 };
499 EXPORT_SYMBOL_GPL(crypto_ahash_type);
500
501 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
502                                         u32 mask)
503 {
504         return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
505 }
506 EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
507
508 static int ahash_prepare_alg(struct ahash_alg *alg)
509 {
510         struct crypto_alg *base = &alg->halg.base;
511
512         if (alg->halg.digestsize > PAGE_SIZE / 8 ||
513             alg->halg.statesize > PAGE_SIZE / 8 ||
514             alg->halg.statesize == 0)
515                 return -EINVAL;
516
517         base->cra_type = &crypto_ahash_type;
518         base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
519         base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
520
521         return 0;
522 }
523
524 int crypto_register_ahash(struct ahash_alg *alg)
525 {
526         struct crypto_alg *base = &alg->halg.base;
527         int err;
528
529         err = ahash_prepare_alg(alg);
530         if (err)
531                 return err;
532
533         return crypto_register_alg(base);
534 }
535 EXPORT_SYMBOL_GPL(crypto_register_ahash);
536
537 int crypto_unregister_ahash(struct ahash_alg *alg)
538 {
539         return crypto_unregister_alg(&alg->halg.base);
540 }
541 EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
542
543 int ahash_register_instance(struct crypto_template *tmpl,
544                             struct ahash_instance *inst)
545 {
546         int err;
547
548         err = ahash_prepare_alg(&inst->alg);
549         if (err)
550                 return err;
551
552         return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
553 }
554 EXPORT_SYMBOL_GPL(ahash_register_instance);
555
556 void ahash_free_instance(struct crypto_instance *inst)
557 {
558         crypto_drop_spawn(crypto_instance_ctx(inst));
559         kfree(ahash_instance(inst));
560 }
561 EXPORT_SYMBOL_GPL(ahash_free_instance);
562
563 int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
564                             struct hash_alg_common *alg,
565                             struct crypto_instance *inst)
566 {
567         return crypto_init_spawn2(&spawn->base, &alg->base, inst,
568                                   &crypto_ahash_type);
569 }
570 EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
571
572 struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
573 {
574         struct crypto_alg *alg;
575
576         alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
577         return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
578 }
579 EXPORT_SYMBOL_GPL(ahash_attr_alg);
580
581 MODULE_LICENSE("GPL");
582 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");