Bluetooth: Verify that l2cap_get_conf_opt provides large enough buffer
[pandora-kernel.git] / crypto / ahash.c
1 /*
2  * Asynchronous Cryptographic Hash operations.
3  *
4  * This is the asynchronous version of hash.c with notification of
5  * completion via a callback.
6  *
7  * Copyright (c) 2008 Loc Ho <lho@amcc.com>
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License as published by the Free
11  * Software Foundation; either version 2 of the License, or (at your option)
12  * any later version.
13  *
14  */
15
16 #include <crypto/internal/hash.h>
17 #include <crypto/scatterwalk.h>
18 #include <linux/err.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/seq_file.h>
24 #include <linux/cryptouser.h>
25 #include <net/netlink.h>
26
27 #include "internal.h"
28
29 struct ahash_request_priv {
30         crypto_completion_t complete;
31         void *data;
32         u8 *result;
33         u32 flags;
34         void *ubuf[] CRYPTO_MINALIGN_ATTR;
35 };
36
37 static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
38 {
39         return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
40                             halg);
41 }
42
43 static int hash_walk_next(struct crypto_hash_walk *walk)
44 {
45         unsigned int alignmask = walk->alignmask;
46         unsigned int offset = walk->offset;
47         unsigned int nbytes = min(walk->entrylen,
48                                   ((unsigned int)(PAGE_SIZE)) - offset);
49
50         walk->data = crypto_kmap(walk->pg, 0);
51         walk->data += offset;
52
53         if (offset & alignmask) {
54                 unsigned int unaligned = alignmask + 1 - (offset & alignmask);
55                 if (nbytes > unaligned)
56                         nbytes = unaligned;
57         }
58
59         walk->entrylen -= nbytes;
60         return nbytes;
61 }
62
63 static int hash_walk_new_entry(struct crypto_hash_walk *walk)
64 {
65         struct scatterlist *sg;
66
67         sg = walk->sg;
68         walk->offset = sg->offset;
69         walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
70         walk->offset = offset_in_page(walk->offset);
71         walk->entrylen = sg->length;
72
73         if (walk->entrylen > walk->total)
74                 walk->entrylen = walk->total;
75         walk->total -= walk->entrylen;
76
77         return hash_walk_next(walk);
78 }
79
80 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
81 {
82         unsigned int alignmask = walk->alignmask;
83         unsigned int nbytes = walk->entrylen;
84
85         walk->data -= walk->offset;
86
87         if (nbytes && walk->offset & alignmask && !err) {
88                 walk->offset = ALIGN(walk->offset, alignmask + 1);
89                 walk->data += walk->offset;
90
91                 nbytes = min(nbytes,
92                              ((unsigned int)(PAGE_SIZE)) - walk->offset);
93                 walk->entrylen -= nbytes;
94
95                 return nbytes;
96         }
97
98         crypto_kunmap(walk->data, 0);
99         crypto_yield(walk->flags);
100
101         if (err)
102                 return err;
103
104         if (nbytes) {
105                 walk->offset = 0;
106                 walk->pg++;
107                 return hash_walk_next(walk);
108         }
109
110         if (!walk->total)
111                 return 0;
112
113         walk->sg = scatterwalk_sg_next(walk->sg);
114
115         return hash_walk_new_entry(walk);
116 }
117 EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
118
119 int crypto_hash_walk_first(struct ahash_request *req,
120                            struct crypto_hash_walk *walk)
121 {
122         walk->total = req->nbytes;
123
124         if (!walk->total)
125                 return 0;
126
127         walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
128         walk->sg = req->src;
129         walk->flags = req->base.flags;
130
131         return hash_walk_new_entry(walk);
132 }
133 EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
134
135 int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
136                                   struct crypto_hash_walk *walk,
137                                   struct scatterlist *sg, unsigned int len)
138 {
139         walk->total = len;
140
141         if (!walk->total)
142                 return 0;
143
144         walk->alignmask = crypto_hash_alignmask(hdesc->tfm);
145         walk->sg = sg;
146         walk->flags = hdesc->flags;
147
148         return hash_walk_new_entry(walk);
149 }
150
151 static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
152                                 unsigned int keylen)
153 {
154         unsigned long alignmask = crypto_ahash_alignmask(tfm);
155         int ret;
156         u8 *buffer, *alignbuffer;
157         unsigned long absize;
158
159         absize = keylen + alignmask;
160         buffer = kmalloc(absize, GFP_KERNEL);
161         if (!buffer)
162                 return -ENOMEM;
163
164         alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
165         memcpy(alignbuffer, key, keylen);
166         ret = tfm->setkey(tfm, alignbuffer, keylen);
167         kzfree(buffer);
168         return ret;
169 }
170
171 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
172                         unsigned int keylen)
173 {
174         unsigned long alignmask = crypto_ahash_alignmask(tfm);
175         int err;
176
177         if ((unsigned long)key & alignmask)
178                 err = ahash_setkey_unaligned(tfm, key, keylen);
179         else
180                 err = tfm->setkey(tfm, key, keylen);
181
182         if (err)
183                 return err;
184
185         crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
186         return 0;
187 }
188 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
189
190 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
191                           unsigned int keylen)
192 {
193         return -ENOSYS;
194 }
195
196 static inline unsigned int ahash_align_buffer_size(unsigned len,
197                                                    unsigned long mask)
198 {
199         return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
200 }
201
202 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
203 {
204         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
205         unsigned long alignmask = crypto_ahash_alignmask(tfm);
206         unsigned int ds = crypto_ahash_digestsize(tfm);
207         struct ahash_request_priv *priv;
208
209         priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
210                        (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
211                        GFP_KERNEL : GFP_ATOMIC);
212         if (!priv)
213                 return -ENOMEM;
214
215         /*
216          * WARNING: Voodoo programming below!
217          *
218          * The code below is obscure and hard to understand, thus explanation
219          * is necessary. See include/crypto/hash.h and include/linux/crypto.h
220          * to understand the layout of structures used here!
221          *
222          * The code here will replace portions of the ORIGINAL request with
223          * pointers to new code and buffers so the hashing operation can store
224          * the result in aligned buffer. We will call the modified request
225          * an ADJUSTED request.
226          *
227          * The newly mangled request will look as such:
228          *
229          * req {
230          *   .result        = ADJUSTED[new aligned buffer]
231          *   .base.complete = ADJUSTED[pointer to completion function]
232          *   .base.data     = ADJUSTED[*req (pointer to self)]
233          *   .priv          = ADJUSTED[new priv] {
234          *           .result   = ORIGINAL(result)
235          *           .complete = ORIGINAL(base.complete)
236          *           .data     = ORIGINAL(base.data)
237          *   }
238          */
239
240         priv->result = req->result;
241         priv->complete = req->base.complete;
242         priv->data = req->base.data;
243         priv->flags = req->base.flags;
244
245         /*
246          * WARNING: We do not backup req->priv here! The req->priv
247          *          is for internal use of the Crypto API and the
248          *          user must _NOT_ _EVER_ depend on it's content!
249          */
250
251         req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
252         req->base.complete = cplt;
253         req->base.data = req;
254         req->priv = priv;
255
256         return 0;
257 }
258
259 static void ahash_restore_req(struct ahash_request *req, int err)
260 {
261         struct ahash_request_priv *priv = req->priv;
262
263         if (!err)
264                 memcpy(priv->result, req->result,
265                        crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
266
267         /* Restore the original crypto request. */
268         req->result = priv->result;
269
270         ahash_request_set_callback(req, priv->flags,
271                                    priv->complete, priv->data);
272         req->priv = NULL;
273
274         /* Free the req->priv.priv from the ADJUSTED request. */
275         kzfree(priv);
276 }
277
278 static void ahash_notify_einprogress(struct ahash_request *req)
279 {
280         struct ahash_request_priv *priv = req->priv;
281         struct crypto_async_request oreq;
282
283         oreq.data = priv->data;
284
285         priv->complete(&oreq, -EINPROGRESS);
286 }
287
288 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
289 {
290         struct ahash_request *areq = req->data;
291
292         if (err == -EINPROGRESS) {
293                 ahash_notify_einprogress(areq);
294                 return;
295         }
296
297         /*
298          * Restore the original request, see ahash_op_unaligned() for what
299          * goes where.
300          *
301          * The "struct ahash_request *req" here is in fact the "req.base"
302          * from the ADJUSTED request from ahash_op_unaligned(), thus as it
303          * is a pointer to self, it is also the ADJUSTED "req" .
304          */
305
306         /* First copy req->result into req->priv.result */
307         ahash_restore_req(areq, err);
308
309         /* Complete the ORIGINAL request. */
310         areq->base.complete(&areq->base, err);
311 }
312
313 static int ahash_op_unaligned(struct ahash_request *req,
314                               int (*op)(struct ahash_request *))
315 {
316         int err;
317
318         err = ahash_save_req(req, ahash_op_unaligned_done);
319         if (err)
320                 return err;
321
322         err = op(req);
323         if (err == -EINPROGRESS ||
324             (err == -EBUSY && (ahash_request_flags(req) &
325                                CRYPTO_TFM_REQ_MAY_BACKLOG)))
326                 return err;
327
328         ahash_restore_req(req, err);
329
330         return err;
331 }
332
333 static int crypto_ahash_op(struct ahash_request *req,
334                            int (*op)(struct ahash_request *))
335 {
336         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
337         unsigned long alignmask = crypto_ahash_alignmask(tfm);
338
339         if ((unsigned long)req->result & alignmask)
340                 return ahash_op_unaligned(req, op);
341
342         return op(req);
343 }
344
345 int crypto_ahash_final(struct ahash_request *req)
346 {
347         return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
348 }
349 EXPORT_SYMBOL_GPL(crypto_ahash_final);
350
351 int crypto_ahash_finup(struct ahash_request *req)
352 {
353         return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
354 }
355 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
356
357 int crypto_ahash_digest(struct ahash_request *req)
358 {
359         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
360
361         if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
362                 return -ENOKEY;
363
364         return crypto_ahash_op(req, tfm->digest);
365 }
366 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
367
368 static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
369 {
370         struct ahash_request *areq = req->data;
371
372         if (err == -EINPROGRESS)
373                 return;
374
375         ahash_restore_req(areq, err);
376
377         areq->base.complete(&areq->base, err);
378 }
379
380 static int ahash_def_finup_finish1(struct ahash_request *req, int err)
381 {
382         if (err)
383                 goto out;
384
385         req->base.complete = ahash_def_finup_done2;
386
387         err = crypto_ahash_reqtfm(req)->final(req);
388         if (err == -EINPROGRESS ||
389             (err == -EBUSY && (ahash_request_flags(req) &
390                                CRYPTO_TFM_REQ_MAY_BACKLOG)))
391                 return err;
392
393 out:
394         ahash_restore_req(req, err);
395         return err;
396 }
397
398 static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
399 {
400         struct ahash_request *areq = req->data;
401
402         if (err == -EINPROGRESS) {
403                 ahash_notify_einprogress(areq);
404                 return;
405         }
406
407         areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
408
409         err = ahash_def_finup_finish1(areq, err);
410         if (areq->priv)
411                 return;
412
413         areq->base.complete(&areq->base, err);
414 }
415
416 static int ahash_def_finup(struct ahash_request *req)
417 {
418         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
419         int err;
420
421         err = ahash_save_req(req, ahash_def_finup_done1);
422         if (err)
423                 return err;
424
425         err = tfm->update(req);
426         if (err == -EINPROGRESS ||
427             (err == -EBUSY && (ahash_request_flags(req) &
428                                CRYPTO_TFM_REQ_MAY_BACKLOG)))
429                 return err;
430
431         return ahash_def_finup_finish1(req, err);
432 }
433
434 static int ahash_no_export(struct ahash_request *req, void *out)
435 {
436         return -ENOSYS;
437 }
438
439 static int ahash_no_import(struct ahash_request *req, const void *in)
440 {
441         return -ENOSYS;
442 }
443
444 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
445 {
446         struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
447         struct ahash_alg *alg = crypto_ahash_alg(hash);
448
449         hash->setkey = ahash_nosetkey;
450         hash->export = ahash_no_export;
451         hash->import = ahash_no_import;
452
453         if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
454                 return crypto_init_shash_ops_async(tfm);
455
456         hash->init = alg->init;
457         hash->update = alg->update;
458         hash->final = alg->final;
459         hash->finup = alg->finup ?: ahash_def_finup;
460         hash->digest = alg->digest;
461
462         if (alg->setkey) {
463                 hash->setkey = alg->setkey;
464                 if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
465                         crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
466         }
467         if (alg->export)
468                 hash->export = alg->export;
469         if (alg->import)
470                 hash->import = alg->import;
471
472         return 0;
473 }
474
475 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
476 {
477         if (alg->cra_type == &crypto_ahash_type)
478                 return alg->cra_ctxsize;
479
480         return sizeof(struct crypto_shash *);
481 }
482
483 #ifdef CONFIG_NET
484 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
485 {
486         struct crypto_report_hash rhash;
487
488         strncpy(rhash.type, "ahash", sizeof(rhash.type));
489
490         rhash.blocksize = alg->cra_blocksize;
491         rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
492
493         NLA_PUT(skb, CRYPTOCFGA_REPORT_HASH,
494                 sizeof(struct crypto_report_hash), &rhash);
495
496         return 0;
497
498 nla_put_failure:
499         return -EMSGSIZE;
500 }
501 #else
502 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
503 {
504         return -ENOSYS;
505 }
506 #endif
507
508 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
509         __attribute__ ((unused));
510 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
511 {
512         seq_printf(m, "type         : ahash\n");
513         seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
514                                              "yes" : "no");
515         seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
516         seq_printf(m, "digestsize   : %u\n",
517                    __crypto_hash_alg_common(alg)->digestsize);
518 }
519
520 const struct crypto_type crypto_ahash_type = {
521         .extsize = crypto_ahash_extsize,
522         .init_tfm = crypto_ahash_init_tfm,
523 #ifdef CONFIG_PROC_FS
524         .show = crypto_ahash_show,
525 #endif
526         .report = crypto_ahash_report,
527         .maskclear = ~CRYPTO_ALG_TYPE_MASK,
528         .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
529         .type = CRYPTO_ALG_TYPE_AHASH,
530         .tfmsize = offsetof(struct crypto_ahash, base),
531 };
532 EXPORT_SYMBOL_GPL(crypto_ahash_type);
533
534 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
535                                         u32 mask)
536 {
537         return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
538 }
539 EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
540
541 static int ahash_prepare_alg(struct ahash_alg *alg)
542 {
543         struct crypto_alg *base = &alg->halg.base;
544
545         if (alg->halg.digestsize > PAGE_SIZE / 8 ||
546             alg->halg.statesize > PAGE_SIZE / 8 ||
547             alg->halg.statesize == 0)
548                 return -EINVAL;
549
550         base->cra_type = &crypto_ahash_type;
551         base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
552         base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
553
554         return 0;
555 }
556
557 int crypto_register_ahash(struct ahash_alg *alg)
558 {
559         struct crypto_alg *base = &alg->halg.base;
560         int err;
561
562         err = ahash_prepare_alg(alg);
563         if (err)
564                 return err;
565
566         return crypto_register_alg(base);
567 }
568 EXPORT_SYMBOL_GPL(crypto_register_ahash);
569
570 int crypto_unregister_ahash(struct ahash_alg *alg)
571 {
572         return crypto_unregister_alg(&alg->halg.base);
573 }
574 EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
575
576 int ahash_register_instance(struct crypto_template *tmpl,
577                             struct ahash_instance *inst)
578 {
579         int err;
580
581         err = ahash_prepare_alg(&inst->alg);
582         if (err)
583                 return err;
584
585         return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
586 }
587 EXPORT_SYMBOL_GPL(ahash_register_instance);
588
589 void ahash_free_instance(struct crypto_instance *inst)
590 {
591         crypto_drop_spawn(crypto_instance_ctx(inst));
592         kfree(ahash_instance(inst));
593 }
594 EXPORT_SYMBOL_GPL(ahash_free_instance);
595
596 int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
597                             struct hash_alg_common *alg,
598                             struct crypto_instance *inst)
599 {
600         return crypto_init_spawn2(&spawn->base, &alg->base, inst,
601                                   &crypto_ahash_type);
602 }
603 EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
604
605 struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
606 {
607         struct crypto_alg *alg;
608
609         alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
610         return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
611 }
612 EXPORT_SYMBOL_GPL(ahash_attr_alg);
613
614 bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
615 {
616         struct crypto_alg *alg = &halg->base;
617
618         if (alg->cra_type != &crypto_ahash_type)
619                 return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
620
621         return __crypto_ahash_alg(alg)->setkey != NULL;
622 }
623 EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
624
625 MODULE_LICENSE("GPL");
626 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");