crypto: hash - Fix the pointer voodoo in unaligned ahash
[pandora-kernel.git] / crypto / ahash.c
1 /*
2  * Asynchronous Cryptographic Hash operations.
3  *
4  * This is the asynchronous version of hash.c with notification of
5  * completion via a callback.
6  *
7  * Copyright (c) 2008 Loc Ho <lho@amcc.com>
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License as published by the Free
11  * Software Foundation; either version 2 of the License, or (at your option)
12  * any later version.
13  *
14  */
15
16 #include <crypto/internal/hash.h>
17 #include <crypto/scatterwalk.h>
18 #include <linux/err.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/seq_file.h>
24 #include <linux/cryptouser.h>
25 #include <net/netlink.h>
26
27 #include "internal.h"
28
29 struct ahash_request_priv {
30         crypto_completion_t complete;
31         void *data;
32         u8 *result;
33         void *ubuf[] CRYPTO_MINALIGN_ATTR;
34 };
35
36 static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
37 {
38         return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
39                             halg);
40 }
41
42 static int hash_walk_next(struct crypto_hash_walk *walk)
43 {
44         unsigned int alignmask = walk->alignmask;
45         unsigned int offset = walk->offset;
46         unsigned int nbytes = min(walk->entrylen,
47                                   ((unsigned int)(PAGE_SIZE)) - offset);
48
49         walk->data = crypto_kmap(walk->pg, 0);
50         walk->data += offset;
51
52         if (offset & alignmask) {
53                 unsigned int unaligned = alignmask + 1 - (offset & alignmask);
54                 if (nbytes > unaligned)
55                         nbytes = unaligned;
56         }
57
58         walk->entrylen -= nbytes;
59         return nbytes;
60 }
61
62 static int hash_walk_new_entry(struct crypto_hash_walk *walk)
63 {
64         struct scatterlist *sg;
65
66         sg = walk->sg;
67         walk->offset = sg->offset;
68         walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
69         walk->offset = offset_in_page(walk->offset);
70         walk->entrylen = sg->length;
71
72         if (walk->entrylen > walk->total)
73                 walk->entrylen = walk->total;
74         walk->total -= walk->entrylen;
75
76         return hash_walk_next(walk);
77 }
78
79 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
80 {
81         unsigned int alignmask = walk->alignmask;
82         unsigned int nbytes = walk->entrylen;
83
84         walk->data -= walk->offset;
85
86         if (nbytes && walk->offset & alignmask && !err) {
87                 walk->offset = ALIGN(walk->offset, alignmask + 1);
88                 walk->data += walk->offset;
89
90                 nbytes = min(nbytes,
91                              ((unsigned int)(PAGE_SIZE)) - walk->offset);
92                 walk->entrylen -= nbytes;
93
94                 return nbytes;
95         }
96
97         crypto_kunmap(walk->data, 0);
98         crypto_yield(walk->flags);
99
100         if (err)
101                 return err;
102
103         if (nbytes) {
104                 walk->offset = 0;
105                 walk->pg++;
106                 return hash_walk_next(walk);
107         }
108
109         if (!walk->total)
110                 return 0;
111
112         walk->sg = scatterwalk_sg_next(walk->sg);
113
114         return hash_walk_new_entry(walk);
115 }
116 EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
117
118 int crypto_hash_walk_first(struct ahash_request *req,
119                            struct crypto_hash_walk *walk)
120 {
121         walk->total = req->nbytes;
122
123         if (!walk->total)
124                 return 0;
125
126         walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
127         walk->sg = req->src;
128         walk->flags = req->base.flags;
129
130         return hash_walk_new_entry(walk);
131 }
132 EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
133
134 int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
135                                   struct crypto_hash_walk *walk,
136                                   struct scatterlist *sg, unsigned int len)
137 {
138         walk->total = len;
139
140         if (!walk->total)
141                 return 0;
142
143         walk->alignmask = crypto_hash_alignmask(hdesc->tfm);
144         walk->sg = sg;
145         walk->flags = hdesc->flags;
146
147         return hash_walk_new_entry(walk);
148 }
149
150 static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
151                                 unsigned int keylen)
152 {
153         unsigned long alignmask = crypto_ahash_alignmask(tfm);
154         int ret;
155         u8 *buffer, *alignbuffer;
156         unsigned long absize;
157
158         absize = keylen + alignmask;
159         buffer = kmalloc(absize, GFP_KERNEL);
160         if (!buffer)
161                 return -ENOMEM;
162
163         alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
164         memcpy(alignbuffer, key, keylen);
165         ret = tfm->setkey(tfm, alignbuffer, keylen);
166         kzfree(buffer);
167         return ret;
168 }
169
170 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
171                         unsigned int keylen)
172 {
173         unsigned long alignmask = crypto_ahash_alignmask(tfm);
174
175         if ((unsigned long)key & alignmask)
176                 return ahash_setkey_unaligned(tfm, key, keylen);
177
178         return tfm->setkey(tfm, key, keylen);
179 }
180 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
181
182 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
183                           unsigned int keylen)
184 {
185         return -ENOSYS;
186 }
187
188 static inline unsigned int ahash_align_buffer_size(unsigned len,
189                                                    unsigned long mask)
190 {
191         return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
192 }
193
194 static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
195 {
196         struct ahash_request_priv *priv = req->priv;
197
198         if (err == -EINPROGRESS)
199                 return;
200
201         if (!err)
202                 memcpy(priv->result, req->result,
203                        crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
204
205         /* Restore the original crypto request. */
206         req->result = priv->result;
207         req->base.complete = priv->complete;
208         req->base.data = priv->data;
209         req->priv = NULL;
210
211         /* Free the req->priv.priv from the ADJUSTED request. */
212         kzfree(priv);
213 }
214
215 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
216 {
217         struct ahash_request *areq = req->data;
218
219         /*
220          * Restore the original request, see ahash_op_unaligned() for what
221          * goes where.
222          *
223          * The "struct ahash_request *req" here is in fact the "req.base"
224          * from the ADJUSTED request from ahash_op_unaligned(), thus as it
225          * is a pointer to self, it is also the ADJUSTED "req" .
226          */
227
228         /* First copy areq->result into areq->priv.result */
229         ahash_op_unaligned_finish(areq, err);
230
231         /* Complete the ORIGINAL request. */
232         areq->base.complete(&areq->base, err);
233 }
234
235 static int ahash_op_unaligned(struct ahash_request *req,
236                               int (*op)(struct ahash_request *))
237 {
238         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
239         unsigned long alignmask = crypto_ahash_alignmask(tfm);
240         unsigned int ds = crypto_ahash_digestsize(tfm);
241         struct ahash_request_priv *priv;
242         int err;
243
244         priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
245                        (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
246                        GFP_KERNEL : GFP_ATOMIC);
247         if (!priv)
248                 return -ENOMEM;
249
250         /*
251          * WARNING: Voodoo programming below!
252          *
253          * The code below is obscure and hard to understand, thus explanation
254          * is necessary. See include/crypto/hash.h and include/linux/crypto.h
255          * to understand the layout of structures used here!
256          *
257          * The code here will replace portions of the ORIGINAL request with
258          * pointers to new code and buffers so the hashing operation can store
259          * the result in aligned buffer. We will call the modified request
260          * an ADJUSTED request.
261          *
262          * The newly mangled request will look as such:
263          *
264          * req {
265          *   .result        = ADJUSTED[new aligned buffer]
266          *   .base.complete = ADJUSTED[pointer to completion function]
267          *   .base.data     = ADJUSTED[*req (pointer to self)]
268          *   .priv          = ADJUSTED[new priv] {
269          *           .result   = ORIGINAL(result)
270          *           .complete = ORIGINAL(base.complete)
271          *           .data     = ORIGINAL(base.data)
272          *   }
273          */
274
275         priv->result = req->result;
276         priv->complete = req->base.complete;
277         priv->data = req->base.data;
278         /*
279          * WARNING: We do not backup req->priv here! The req->priv
280          *          is for internal use of the Crypto API and the
281          *          user must _NOT_ _EVER_ depend on it's content!
282          */
283
284         req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
285         req->base.complete = ahash_op_unaligned_done;
286         req->base.data = req;
287         req->priv = priv;
288
289         err = op(req);
290         ahash_op_unaligned_finish(req, err);
291
292         return err;
293 }
294
295 static int crypto_ahash_op(struct ahash_request *req,
296                            int (*op)(struct ahash_request *))
297 {
298         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
299         unsigned long alignmask = crypto_ahash_alignmask(tfm);
300
301         if ((unsigned long)req->result & alignmask)
302                 return ahash_op_unaligned(req, op);
303
304         return op(req);
305 }
306
307 int crypto_ahash_final(struct ahash_request *req)
308 {
309         return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
310 }
311 EXPORT_SYMBOL_GPL(crypto_ahash_final);
312
313 int crypto_ahash_finup(struct ahash_request *req)
314 {
315         return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
316 }
317 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
318
319 int crypto_ahash_digest(struct ahash_request *req)
320 {
321         return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
322 }
323 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
324
325 static void ahash_def_finup_finish2(struct ahash_request *req, int err)
326 {
327         struct ahash_request_priv *priv = req->priv;
328
329         if (err == -EINPROGRESS)
330                 return;
331
332         if (!err)
333                 memcpy(priv->result, req->result,
334                        crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
335
336         kzfree(priv);
337 }
338
339 static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
340 {
341         struct ahash_request *areq = req->data;
342         struct ahash_request_priv *priv = areq->priv;
343         crypto_completion_t complete = priv->complete;
344         void *data = priv->data;
345
346         ahash_def_finup_finish2(areq, err);
347
348         complete(data, err);
349 }
350
351 static int ahash_def_finup_finish1(struct ahash_request *req, int err)
352 {
353         if (err)
354                 goto out;
355
356         req->base.complete = ahash_def_finup_done2;
357         req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
358         err = crypto_ahash_reqtfm(req)->final(req);
359
360 out:
361         ahash_def_finup_finish2(req, err);
362         return err;
363 }
364
365 static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
366 {
367         struct ahash_request *areq = req->data;
368         struct ahash_request_priv *priv = areq->priv;
369         crypto_completion_t complete = priv->complete;
370         void *data = priv->data;
371
372         err = ahash_def_finup_finish1(areq, err);
373
374         complete(data, err);
375 }
376
377 static int ahash_def_finup(struct ahash_request *req)
378 {
379         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
380         unsigned long alignmask = crypto_ahash_alignmask(tfm);
381         unsigned int ds = crypto_ahash_digestsize(tfm);
382         struct ahash_request_priv *priv;
383
384         priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
385                        (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
386                        GFP_KERNEL : GFP_ATOMIC);
387         if (!priv)
388                 return -ENOMEM;
389
390         priv->result = req->result;
391         priv->complete = req->base.complete;
392         priv->data = req->base.data;
393
394         req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
395         req->base.complete = ahash_def_finup_done1;
396         req->base.data = req;
397         req->priv = priv;
398
399         return ahash_def_finup_finish1(req, tfm->update(req));
400 }
401
402 static int ahash_no_export(struct ahash_request *req, void *out)
403 {
404         return -ENOSYS;
405 }
406
407 static int ahash_no_import(struct ahash_request *req, const void *in)
408 {
409         return -ENOSYS;
410 }
411
412 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
413 {
414         struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
415         struct ahash_alg *alg = crypto_ahash_alg(hash);
416
417         hash->setkey = ahash_nosetkey;
418         hash->has_setkey = false;
419         hash->export = ahash_no_export;
420         hash->import = ahash_no_import;
421
422         if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
423                 return crypto_init_shash_ops_async(tfm);
424
425         hash->init = alg->init;
426         hash->update = alg->update;
427         hash->final = alg->final;
428         hash->finup = alg->finup ?: ahash_def_finup;
429         hash->digest = alg->digest;
430
431         if (alg->setkey) {
432                 hash->setkey = alg->setkey;
433                 hash->has_setkey = true;
434         }
435         if (alg->export)
436                 hash->export = alg->export;
437         if (alg->import)
438                 hash->import = alg->import;
439
440         return 0;
441 }
442
443 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
444 {
445         if (alg->cra_type == &crypto_ahash_type)
446                 return alg->cra_ctxsize;
447
448         return sizeof(struct crypto_shash *);
449 }
450
451 #ifdef CONFIG_NET
452 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
453 {
454         struct crypto_report_hash rhash;
455
456         strncpy(rhash.type, "ahash", sizeof(rhash.type));
457
458         rhash.blocksize = alg->cra_blocksize;
459         rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
460
461         NLA_PUT(skb, CRYPTOCFGA_REPORT_HASH,
462                 sizeof(struct crypto_report_hash), &rhash);
463
464         return 0;
465
466 nla_put_failure:
467         return -EMSGSIZE;
468 }
469 #else
470 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
471 {
472         return -ENOSYS;
473 }
474 #endif
475
476 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
477         __attribute__ ((unused));
478 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
479 {
480         seq_printf(m, "type         : ahash\n");
481         seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
482                                              "yes" : "no");
483         seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
484         seq_printf(m, "digestsize   : %u\n",
485                    __crypto_hash_alg_common(alg)->digestsize);
486 }
487
488 const struct crypto_type crypto_ahash_type = {
489         .extsize = crypto_ahash_extsize,
490         .init_tfm = crypto_ahash_init_tfm,
491 #ifdef CONFIG_PROC_FS
492         .show = crypto_ahash_show,
493 #endif
494         .report = crypto_ahash_report,
495         .maskclear = ~CRYPTO_ALG_TYPE_MASK,
496         .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
497         .type = CRYPTO_ALG_TYPE_AHASH,
498         .tfmsize = offsetof(struct crypto_ahash, base),
499 };
500 EXPORT_SYMBOL_GPL(crypto_ahash_type);
501
502 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
503                                         u32 mask)
504 {
505         return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
506 }
507 EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
508
509 static int ahash_prepare_alg(struct ahash_alg *alg)
510 {
511         struct crypto_alg *base = &alg->halg.base;
512
513         if (alg->halg.digestsize > PAGE_SIZE / 8 ||
514             alg->halg.statesize > PAGE_SIZE / 8 ||
515             alg->halg.statesize == 0)
516                 return -EINVAL;
517
518         base->cra_type = &crypto_ahash_type;
519         base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
520         base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
521
522         return 0;
523 }
524
525 int crypto_register_ahash(struct ahash_alg *alg)
526 {
527         struct crypto_alg *base = &alg->halg.base;
528         int err;
529
530         err = ahash_prepare_alg(alg);
531         if (err)
532                 return err;
533
534         return crypto_register_alg(base);
535 }
536 EXPORT_SYMBOL_GPL(crypto_register_ahash);
537
538 int crypto_unregister_ahash(struct ahash_alg *alg)
539 {
540         return crypto_unregister_alg(&alg->halg.base);
541 }
542 EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
543
544 int ahash_register_instance(struct crypto_template *tmpl,
545                             struct ahash_instance *inst)
546 {
547         int err;
548
549         err = ahash_prepare_alg(&inst->alg);
550         if (err)
551                 return err;
552
553         return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
554 }
555 EXPORT_SYMBOL_GPL(ahash_register_instance);
556
557 void ahash_free_instance(struct crypto_instance *inst)
558 {
559         crypto_drop_spawn(crypto_instance_ctx(inst));
560         kfree(ahash_instance(inst));
561 }
562 EXPORT_SYMBOL_GPL(ahash_free_instance);
563
564 int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
565                             struct hash_alg_common *alg,
566                             struct crypto_instance *inst)
567 {
568         return crypto_init_spawn2(&spawn->base, &alg->base, inst,
569                                   &crypto_ahash_type);
570 }
571 EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
572
573 struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
574 {
575         struct crypto_alg *alg;
576
577         alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
578         return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
579 }
580 EXPORT_SYMBOL_GPL(ahash_attr_alg);
581
582 MODULE_LICENSE("GPL");
583 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");