2 * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/bio.h>
17 #include <linux/crypto.h>
18 #include <linux/dst.h>
19 #include <linux/kernel.h>
20 #include <linux/scatterlist.h>
21 #include <linux/slab.h>
24 * Tricky bastard, but IV can be more complex with time...
26 static inline u64 dst_gen_iv(struct dst_trans *t)
32 * Crypto machinery: hash/cipher support for the given crypto controls.
34 static struct crypto_hash *dst_init_hash(struct dst_crypto_ctl *ctl, u8 *key)
37 struct crypto_hash *hash;
39 hash = crypto_alloc_hash(ctl->hash_algo, 0, CRYPTO_ALG_ASYNC);
42 dprintk("%s: failed to allocate hash '%s', err: %d.\n",
43 __func__, ctl->hash_algo, err);
47 ctl->crypto_attached_size = crypto_hash_digestsize(hash);
49 if (!ctl->hash_keysize)
52 err = crypto_hash_setkey(hash, key, ctl->hash_keysize);
54 dprintk("%s: failed to set key for hash '%s', err: %d.\n",
55 __func__, ctl->hash_algo, err);
62 crypto_free_hash(hash);
67 static struct crypto_ablkcipher *dst_init_cipher(struct dst_crypto_ctl *ctl,
71 struct crypto_ablkcipher *cipher;
73 if (!ctl->cipher_keysize)
76 cipher = crypto_alloc_ablkcipher(ctl->cipher_algo, 0, 0);
78 err = PTR_ERR(cipher);
79 dprintk("%s: failed to allocate cipher '%s', err: %d.\n",
80 __func__, ctl->cipher_algo, err);
84 crypto_ablkcipher_clear_flags(cipher, ~0);
86 err = crypto_ablkcipher_setkey(cipher, key, ctl->cipher_keysize);
88 dprintk("%s: failed to set key for cipher '%s', err: %d.\n",
89 __func__, ctl->cipher_algo, err);
96 crypto_free_ablkcipher(cipher);
102 * Crypto engine has a pool of pages to encrypt data into before sending
103 * it over the network. This pool is freed/allocated here.
105 static void dst_crypto_pages_free(struct dst_crypto_engine *e)
109 for (i = 0; i < e->page_num; ++i)
110 __free_page(e->pages[i]);
114 static int dst_crypto_pages_alloc(struct dst_crypto_engine *e, int num)
118 e->pages = kmalloc(num * sizeof(struct page **), GFP_KERNEL);
122 for (i = 0; i < num; ++i) {
123 e->pages[i] = alloc_page(GFP_KERNEL);
125 goto err_out_free_pages;
133 __free_page(e->pages[i]);
140 * Initialize crypto engine for given node.
141 * Setup cipher/hash, keys, pool of threads and private data.
143 static int dst_crypto_engine_init(struct dst_crypto_engine *e,
147 struct dst_crypto_ctl *ctl = &n->crypto;
149 err = dst_crypto_pages_alloc(e, n->max_pages);
154 e->data = kmalloc(e->size, GFP_KERNEL);
157 goto err_out_free_pages;
160 if (ctl->hash_algo[0]) {
161 e->hash = dst_init_hash(ctl, n->hash_key);
162 if (IS_ERR(e->hash)) {
163 err = PTR_ERR(e->hash);
169 if (ctl->cipher_algo[0]) {
170 e->cipher = dst_init_cipher(ctl, n->cipher_key);
171 if (IS_ERR(e->cipher)) {
172 err = PTR_ERR(e->cipher);
174 goto err_out_free_hash;
181 crypto_free_hash(e->hash);
185 dst_crypto_pages_free(e);
190 static void dst_crypto_engine_exit(struct dst_crypto_engine *e)
193 crypto_free_hash(e->hash);
195 crypto_free_ablkcipher(e->cipher);
196 dst_crypto_pages_free(e);
201 * Waiting for cipher processing to be completed.
203 struct dst_crypto_completion {
204 struct completion complete;
208 static void dst_crypto_complete(struct crypto_async_request *req, int err)
210 struct dst_crypto_completion *c = req->data;
212 if (err == -EINPROGRESS)
215 dprintk("%s: req: %p, err: %d.\n", __func__, req, err);
217 complete(&c->complete);
220 static int dst_crypto_process(struct ablkcipher_request *req,
221 struct scatterlist *sg_dst, struct scatterlist *sg_src,
222 void *iv, int enc, unsigned long timeout)
224 struct dst_crypto_completion c;
227 init_completion(&c.complete);
228 c.error = -EINPROGRESS;
230 ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
231 dst_crypto_complete, &c);
233 ablkcipher_request_set_crypt(req, sg_src, sg_dst, sg_src->length, iv);
236 err = crypto_ablkcipher_encrypt(req);
238 err = crypto_ablkcipher_decrypt(req);
243 err = wait_for_completion_interruptible_timeout(&c.complete,
258 * DST uses generic iteration approach for data crypto processing.
259 * Single block IO request is switched into array of scatterlists,
260 * which are submitted to the crypto processing iterator.
262 * Input and output iterator initialization are different, since
263 * in output case we can not encrypt data in-place and need a
264 * temporary storage, which is then being sent to the remote peer.
266 static int dst_trans_iter_out(struct bio *bio, struct dst_crypto_engine *e,
267 int (*iterator) (struct dst_crypto_engine *e,
268 struct scatterlist *dst,
269 struct scatterlist *src))
274 sg_init_table(e->src, bio->bi_vcnt);
275 sg_init_table(e->dst, bio->bi_vcnt);
277 bio_for_each_segment(bv, bio, i) {
278 sg_set_page(&e->src[i], bv->bv_page, bv->bv_len, bv->bv_offset);
279 sg_set_page(&e->dst[i], e->pages[i], bv->bv_len, bv->bv_offset);
281 err = iterator(e, &e->dst[i], &e->src[i]);
289 static int dst_trans_iter_in(struct bio *bio, struct dst_crypto_engine *e,
290 int (*iterator) (struct dst_crypto_engine *e,
291 struct scatterlist *dst,
292 struct scatterlist *src))
297 sg_init_table(e->src, bio->bi_vcnt);
298 sg_init_table(e->dst, bio->bi_vcnt);
300 bio_for_each_segment(bv, bio, i) {
301 sg_set_page(&e->src[i], bv->bv_page, bv->bv_len, bv->bv_offset);
302 sg_set_page(&e->dst[i], bv->bv_page, bv->bv_len, bv->bv_offset);
304 err = iterator(e, &e->dst[i], &e->src[i]);
312 static int dst_crypt_iterator(struct dst_crypto_engine *e,
313 struct scatterlist *sg_dst, struct scatterlist *sg_src)
315 struct ablkcipher_request *req = e->data;
318 memset(iv, 0, sizeof(iv));
320 memcpy(iv, &e->iv, sizeof(e->iv));
322 return dst_crypto_process(req, sg_dst, sg_src, iv, e->enc, e->timeout);
325 static int dst_crypt(struct dst_crypto_engine *e, struct bio *bio)
327 struct ablkcipher_request *req = e->data;
329 memset(req, 0, sizeof(struct ablkcipher_request));
330 ablkcipher_request_set_tfm(req, e->cipher);
333 return dst_trans_iter_out(bio, e, dst_crypt_iterator);
335 return dst_trans_iter_in(bio, e, dst_crypt_iterator);
338 static int dst_hash_iterator(struct dst_crypto_engine *e,
339 struct scatterlist *sg_dst, struct scatterlist *sg_src)
341 return crypto_hash_update(e->data, sg_src, sg_src->length);
344 static int dst_hash(struct dst_crypto_engine *e, struct bio *bio, void *dst)
346 struct hash_desc *desc = e->data;
352 err = crypto_hash_init(desc);
356 err = dst_trans_iter_in(bio, e, dst_hash_iterator);
360 err = crypto_hash_final(desc, dst);
368 * Initialize/cleanup a crypto thread. The only thing it should
369 * do is to allocate a pool of pages as temporary storage.
370 * And to setup cipher and/or hash.
372 static void *dst_crypto_thread_init(void *data)
374 struct dst_node *n = data;
375 struct dst_crypto_engine *e;
378 e = kzalloc(sizeof(struct dst_crypto_engine), GFP_KERNEL);
381 e->src = kcalloc(2 * n->max_pages, sizeof(struct scatterlist),
386 e->dst = e->src + n->max_pages;
388 err = dst_crypto_engine_init(e, n);
390 goto err_out_free_all;
402 static void dst_crypto_thread_cleanup(void *private)
404 struct dst_crypto_engine *e = private;
406 dst_crypto_engine_exit(e);
412 * Initialize crypto engine for given node: store keys, create pool
413 * of threads, initialize each one.
415 * Each thread has unique ID, but 0 and 1 are reserved for receiving and
416 * accepting threads (if export node), so IDs could start from 2, but starting
417 * them from 10 allows easily understand what this thread is for.
419 int dst_node_crypto_init(struct dst_node *n, struct dst_crypto_ctl *ctl)
421 void *key = (ctl + 1);
422 int err = -ENOMEM, i;
425 if (ctl->hash_keysize) {
426 n->hash_key = kmalloc(ctl->hash_keysize, GFP_KERNEL);
429 memcpy(n->hash_key, key, ctl->hash_keysize);
432 if (ctl->cipher_keysize) {
433 n->cipher_key = kmalloc(ctl->cipher_keysize, GFP_KERNEL);
435 goto err_out_free_hash;
436 memcpy(n->cipher_key, key, ctl->cipher_keysize);
438 memcpy(&n->crypto, ctl, sizeof(struct dst_crypto_ctl));
440 for (i = 0; i < ctl->thread_num; ++i) {
441 snprintf(name, sizeof(name), "%s-crypto-%d", n->name, i);
443 err = thread_pool_add_worker(n->pool, name, i + 10,
444 dst_crypto_thread_init, dst_crypto_thread_cleanup, n);
446 goto err_out_free_threads;
451 err_out_free_threads:
453 thread_pool_del_worker_id(n->pool, i+10);
455 if (ctl->cipher_keysize)
456 kfree(n->cipher_key);
457 ctl->cipher_keysize = 0;
459 if (ctl->hash_keysize)
461 ctl->hash_keysize = 0;
466 void dst_node_crypto_exit(struct dst_node *n)
468 struct dst_crypto_ctl *ctl = &n->crypto;
470 if (ctl->cipher_algo[0] || ctl->hash_algo[0]) {
472 kfree(n->cipher_key);
477 * Thrad pool setup callback. Just stores a transaction in private data.
479 static int dst_trans_crypto_setup(void *crypto_engine, void *trans)
481 struct dst_crypto_engine *e = crypto_engine;
488 static void dst_dump_bio(struct bio *bio)
494 bio_for_each_segment(bv, bio, i) {
495 dprintk("%s: %llu/%u: size: %u, offset: %u, data: ",
496 __func__, bio->bi_sector, bio->bi_size,
497 bv->bv_len, bv->bv_offset);
499 p = kmap(bv->bv_page) + bv->bv_offset;
500 for (i = 0; i < bv->bv_len; ++i)
501 printk(KERN_DEBUG "%02x ", p[i]);
509 * Encrypt/hash data and send it to the network.
511 static int dst_crypto_process_sending(struct dst_crypto_engine *e,
512 struct bio *bio, u8 *hash)
517 err = dst_crypt(e, bio);
523 err = dst_hash(e, bio, hash);
527 #ifdef CONFIG_DST_DEBUG
531 /* dst_dump_bio(bio); */
533 printk(KERN_DEBUG "%s: bio: %llu/%u, rw: %lu, hash: ",
534 __func__, (u64)bio->bi_sector,
535 bio->bi_size, bio_data_dir(bio));
536 for (i = 0; i < crypto_hash_digestsize(e->hash); ++i)
537 printk("%02x ", hash[i]);
550 * Check if received data is valid. Decipher if it is.
552 static int dst_crypto_process_receiving(struct dst_crypto_engine *e,
553 struct bio *bio, u8 *hash, u8 *recv_hash)
560 err = dst_hash(e, bio, hash);
564 mismatch = !!memcmp(recv_hash, hash,
565 crypto_hash_digestsize(e->hash));
566 #ifdef CONFIG_DST_DEBUG
567 /* dst_dump_bio(bio); */
569 printk(KERN_DEBUG "%s: bio: %llu/%u, rw: %lu, hash mismatch: %d",
570 __func__, (u64)bio->bi_sector, bio->bi_size,
571 bio_data_dir(bio), mismatch);
575 printk(", recv/calc: ");
576 for (i = 0; i < crypto_hash_digestsize(e->hash); ++i)
577 printk("%02x/%02x ", recv_hash[i], hash[i]);
588 err = dst_crypt(e, bio);
600 * Thread pool callback to encrypt data and send it to the netowork.
602 static int dst_trans_crypto_action(void *crypto_engine, void *schedule_data)
604 struct dst_crypto_engine *e = crypto_engine;
605 struct dst_trans *t = schedule_data;
606 struct bio *bio = t->bio;
609 dprintk("%s: t: %p, gen: %llu, cipher: %p, hash: %p.\n",
610 __func__, t, t->gen, e->cipher, e->hash);
613 e->iv = dst_gen_iv(t);
615 if (bio_data_dir(bio) == WRITE) {
616 err = dst_crypto_process_sending(e, bio, t->cmd.hash);
621 t->cmd.csize = crypto_hash_digestsize(e->hash);
622 t->cmd.size += t->cmd.csize;
625 return dst_trans_send(t);
627 u8 *hash = e->data + e->size/2;
629 err = dst_crypto_process_receiving(e, bio, hash, t->cmd.hash);
646 * Schedule crypto processing for given transaction.
648 int dst_trans_crypto(struct dst_trans *t)
650 struct dst_node *n = t->n;
653 err = thread_pool_schedule(n->pool,
654 dst_trans_crypto_setup, dst_trans_crypto_action,
655 t, MAX_SCHEDULE_TIMEOUT);
667 * Crypto machinery for the export node.
669 static int dst_export_crypto_setup(void *crypto_engine, void *bio)
671 struct dst_crypto_engine *e = crypto_engine;
677 static int dst_export_crypto_action(void *crypto_engine, void *schedule_data)
679 struct dst_crypto_engine *e = crypto_engine;
680 struct bio *bio = schedule_data;
681 struct dst_export_priv *p = bio->bi_private;
684 dprintk("%s: e: %p, data: %p, bio: %llu/%u, dir: %lu.\n",
685 __func__, e, e->data, (u64)bio->bi_sector,
686 bio->bi_size, bio_data_dir(bio));
688 e->enc = (bio_data_dir(bio) == READ);
691 if (bio_data_dir(bio) == WRITE) {
692 u8 *hash = e->data + e->size/2;
694 err = dst_crypto_process_receiving(e, bio, hash, p->cmd.hash);
698 generic_make_request(bio);
700 err = dst_crypto_process_sending(e, bio, p->cmd.hash);
705 p->cmd.csize = crypto_hash_digestsize(e->hash);
706 p->cmd.size += p->cmd.csize;
709 err = dst_export_send_bio(bio);
718 int dst_export_crypto(struct dst_node *n, struct bio *bio)
722 err = thread_pool_schedule(n->pool,
723 dst_export_crypto_setup, dst_export_crypto_action,
724 bio, MAX_SCHEDULE_TIMEOUT);