4 * Support for Samsung S5PV210 HW acceleration.
6 * Copyright (C) 2011 NetUP Inc. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as published
10 * by the Free Software Foundation.
14 #include <linux/delay.h>
15 #include <linux/err.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/errno.h>
19 #include <linux/kernel.h>
20 #include <linux/clk.h>
21 #include <linux/platform_device.h>
22 #include <linux/scatterlist.h>
23 #include <linux/dma-mapping.h>
25 #include <linux/crypto.h>
26 #include <linux/interrupt.h>
28 #include <crypto/algapi.h>
29 #include <crypto/aes.h>
30 #include <crypto/ctr.h>
35 #define _SBF(s, v) ((v) << (s))
36 #define _BIT(b) _SBF(b, 1)
38 /* Feed control registers */
39 #define SSS_REG_FCINTSTAT 0x0000
40 #define SSS_FCINTSTAT_BRDMAINT _BIT(3)
41 #define SSS_FCINTSTAT_BTDMAINT _BIT(2)
42 #define SSS_FCINTSTAT_HRDMAINT _BIT(1)
43 #define SSS_FCINTSTAT_PKDMAINT _BIT(0)
45 #define SSS_REG_FCINTENSET 0x0004
46 #define SSS_FCINTENSET_BRDMAINTENSET _BIT(3)
47 #define SSS_FCINTENSET_BTDMAINTENSET _BIT(2)
48 #define SSS_FCINTENSET_HRDMAINTENSET _BIT(1)
49 #define SSS_FCINTENSET_PKDMAINTENSET _BIT(0)
51 #define SSS_REG_FCINTENCLR 0x0008
52 #define SSS_FCINTENCLR_BRDMAINTENCLR _BIT(3)
53 #define SSS_FCINTENCLR_BTDMAINTENCLR _BIT(2)
54 #define SSS_FCINTENCLR_HRDMAINTENCLR _BIT(1)
55 #define SSS_FCINTENCLR_PKDMAINTENCLR _BIT(0)
57 #define SSS_REG_FCINTPEND 0x000C
58 #define SSS_FCINTPEND_BRDMAINTP _BIT(3)
59 #define SSS_FCINTPEND_BTDMAINTP _BIT(2)
60 #define SSS_FCINTPEND_HRDMAINTP _BIT(1)
61 #define SSS_FCINTPEND_PKDMAINTP _BIT(0)
63 #define SSS_REG_FCFIFOSTAT 0x0010
64 #define SSS_FCFIFOSTAT_BRFIFOFUL _BIT(7)
65 #define SSS_FCFIFOSTAT_BRFIFOEMP _BIT(6)
66 #define SSS_FCFIFOSTAT_BTFIFOFUL _BIT(5)
67 #define SSS_FCFIFOSTAT_BTFIFOEMP _BIT(4)
68 #define SSS_FCFIFOSTAT_HRFIFOFUL _BIT(3)
69 #define SSS_FCFIFOSTAT_HRFIFOEMP _BIT(2)
70 #define SSS_FCFIFOSTAT_PKFIFOFUL _BIT(1)
71 #define SSS_FCFIFOSTAT_PKFIFOEMP _BIT(0)
73 #define SSS_REG_FCFIFOCTRL 0x0014
74 #define SSS_FCFIFOCTRL_DESSEL _BIT(2)
75 #define SSS_HASHIN_INDEPENDENT _SBF(0, 0x00)
76 #define SSS_HASHIN_CIPHER_INPUT _SBF(0, 0x01)
77 #define SSS_HASHIN_CIPHER_OUTPUT _SBF(0, 0x02)
79 #define SSS_REG_FCBRDMAS 0x0020
80 #define SSS_REG_FCBRDMAL 0x0024
81 #define SSS_REG_FCBRDMAC 0x0028
82 #define SSS_FCBRDMAC_BYTESWAP _BIT(1)
83 #define SSS_FCBRDMAC_FLUSH _BIT(0)
85 #define SSS_REG_FCBTDMAS 0x0030
86 #define SSS_REG_FCBTDMAL 0x0034
87 #define SSS_REG_FCBTDMAC 0x0038
88 #define SSS_FCBTDMAC_BYTESWAP _BIT(1)
89 #define SSS_FCBTDMAC_FLUSH _BIT(0)
91 #define SSS_REG_FCHRDMAS 0x0040
92 #define SSS_REG_FCHRDMAL 0x0044
93 #define SSS_REG_FCHRDMAC 0x0048
94 #define SSS_FCHRDMAC_BYTESWAP _BIT(1)
95 #define SSS_FCHRDMAC_FLUSH _BIT(0)
97 #define SSS_REG_FCPKDMAS 0x0050
98 #define SSS_REG_FCPKDMAL 0x0054
99 #define SSS_REG_FCPKDMAC 0x0058
100 #define SSS_FCPKDMAC_BYTESWAP _BIT(3)
101 #define SSS_FCPKDMAC_DESCEND _BIT(2)
102 #define SSS_FCPKDMAC_TRANSMIT _BIT(1)
103 #define SSS_FCPKDMAC_FLUSH _BIT(0)
105 #define SSS_REG_FCPKDMAO 0x005C
108 #define SSS_REG_AES_CONTROL 0x4000
109 #define SSS_AES_BYTESWAP_DI _BIT(11)
110 #define SSS_AES_BYTESWAP_DO _BIT(10)
111 #define SSS_AES_BYTESWAP_IV _BIT(9)
112 #define SSS_AES_BYTESWAP_CNT _BIT(8)
113 #define SSS_AES_BYTESWAP_KEY _BIT(7)
114 #define SSS_AES_KEY_CHANGE_MODE _BIT(6)
115 #define SSS_AES_KEY_SIZE_128 _SBF(4, 0x00)
116 #define SSS_AES_KEY_SIZE_192 _SBF(4, 0x01)
117 #define SSS_AES_KEY_SIZE_256 _SBF(4, 0x02)
118 #define SSS_AES_FIFO_MODE _BIT(3)
119 #define SSS_AES_CHAIN_MODE_ECB _SBF(1, 0x00)
120 #define SSS_AES_CHAIN_MODE_CBC _SBF(1, 0x01)
121 #define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02)
122 #define SSS_AES_MODE_DECRYPT _BIT(0)
124 #define SSS_REG_AES_STATUS 0x4004
125 #define SSS_AES_BUSY _BIT(2)
126 #define SSS_AES_INPUT_READY _BIT(1)
127 #define SSS_AES_OUTPUT_READY _BIT(0)
129 #define SSS_REG_AES_IN_DATA(s) (0x4010 + (s << 2))
130 #define SSS_REG_AES_OUT_DATA(s) (0x4020 + (s << 2))
131 #define SSS_REG_AES_IV_DATA(s) (0x4030 + (s << 2))
132 #define SSS_REG_AES_CNT_DATA(s) (0x4040 + (s << 2))
133 #define SSS_REG_AES_KEY_DATA(s) (0x4080 + (s << 2))
135 #define SSS_REG(dev, reg) ((dev)->ioaddr + (SSS_REG_##reg))
136 #define SSS_READ(dev, reg) __raw_readl(SSS_REG(dev, reg))
137 #define SSS_WRITE(dev, reg, val) __raw_writel((val), SSS_REG(dev, reg))
139 /* HW engine modes */
140 #define FLAGS_AES_DECRYPT _BIT(0)
141 #define FLAGS_AES_MODE_MASK _SBF(1, 0x03)
142 #define FLAGS_AES_CBC _SBF(1, 0x01)
143 #define FLAGS_AES_CTR _SBF(1, 0x02)
145 #define AES_KEY_LEN 16
146 #define CRYPTO_QUEUE_LEN 1
148 struct s5p_aes_reqctx {
153 struct s5p_aes_dev *dev;
155 uint8_t aes_key[AES_MAX_KEY_SIZE];
156 uint8_t nonce[CTR_RFC3686_NONCE_SIZE];
163 void __iomem *ioaddr;
167 struct ablkcipher_request *req;
168 struct s5p_aes_ctx *ctx;
169 struct scatterlist *sg_src;
170 struct scatterlist *sg_dst;
172 struct tasklet_struct tasklet;
173 struct crypto_queue queue;
178 static struct s5p_aes_dev *s5p_dev;
180 static void s5p_set_dma_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
182 SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg));
183 SSS_WRITE(dev, FCBRDMAL, sg_dma_len(sg));
186 static void s5p_set_dma_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
188 SSS_WRITE(dev, FCBTDMAS, sg_dma_address(sg));
189 SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg));
192 static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
194 /* holding a lock outside */
195 dev->req->base.complete(&dev->req->base, err);
199 static void s5p_unset_outdata(struct s5p_aes_dev *dev)
201 dma_unmap_sg(dev->dev, dev->sg_dst, 1, DMA_FROM_DEVICE);
204 static void s5p_unset_indata(struct s5p_aes_dev *dev)
206 dma_unmap_sg(dev->dev, dev->sg_src, 1, DMA_TO_DEVICE);
209 static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
213 if (!IS_ALIGNED(sg_dma_len(sg), AES_BLOCK_SIZE)) {
217 if (!sg_dma_len(sg)) {
222 err = dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE);
235 static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
239 if (!IS_ALIGNED(sg_dma_len(sg), AES_BLOCK_SIZE)) {
243 if (!sg_dma_len(sg)) {
248 err = dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE);
262 * Returns true if new transmitting (output) data is ready and its
263 * address+length have to be written to device (by calling
264 * s5p_set_dma_outdata()). False otherwise.
266 static bool s5p_aes_tx(struct s5p_aes_dev *dev)
271 s5p_unset_outdata(dev);
273 if (!sg_is_last(dev->sg_dst)) {
274 err = s5p_set_outdata(dev, sg_next(dev->sg_dst));
276 s5p_aes_complete(dev, err);
280 s5p_aes_complete(dev, err);
286 * Returns true if new receiving (input) data is ready and its
287 * address+length have to be written to device (by calling
288 * s5p_set_dma_indata()). False otherwise.
290 static bool s5p_aes_rx(struct s5p_aes_dev *dev)
295 s5p_unset_indata(dev);
297 if (!sg_is_last(dev->sg_src)) {
298 err = s5p_set_indata(dev, sg_next(dev->sg_src));
300 s5p_aes_complete(dev, err);
308 static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
310 struct platform_device *pdev = dev_id;
311 struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
314 bool set_dma_tx = false;
315 bool set_dma_rx = false;
317 spin_lock_irqsave(&dev->lock, flags);
319 if (irq == dev->irq_fc) {
320 status = SSS_READ(dev, FCINTSTAT);
321 if (status & SSS_FCINTSTAT_BRDMAINT)
322 set_dma_rx = s5p_aes_rx(dev);
323 if (status & SSS_FCINTSTAT_BTDMAINT)
324 set_dma_tx = s5p_aes_tx(dev);
326 SSS_WRITE(dev, FCINTPEND, status);
330 * Writing length of DMA block (either receiving or transmitting)
331 * will start the operation immediately, so this should be done
332 * at the end (even after clearing pending interrupts to not miss the
336 s5p_set_dma_outdata(dev, dev->sg_dst);
338 s5p_set_dma_indata(dev, dev->sg_src);
340 spin_unlock_irqrestore(&dev->lock, flags);
345 static void s5p_set_aes(struct s5p_aes_dev *dev,
346 uint8_t *key, uint8_t *iv, unsigned int keylen)
348 void __iomem *keystart;
350 memcpy(dev->ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10);
352 if (keylen == AES_KEYSIZE_256)
353 keystart = dev->ioaddr + SSS_REG_AES_KEY_DATA(0);
354 else if (keylen == AES_KEYSIZE_192)
355 keystart = dev->ioaddr + SSS_REG_AES_KEY_DATA(2);
357 keystart = dev->ioaddr + SSS_REG_AES_KEY_DATA(4);
359 memcpy(keystart, key, keylen);
362 static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
364 struct ablkcipher_request *req = dev->req;
366 uint32_t aes_control;
370 aes_control = SSS_AES_KEY_CHANGE_MODE;
371 if (mode & FLAGS_AES_DECRYPT)
372 aes_control |= SSS_AES_MODE_DECRYPT;
374 if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC)
375 aes_control |= SSS_AES_CHAIN_MODE_CBC;
376 else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR)
377 aes_control |= SSS_AES_CHAIN_MODE_CTR;
379 if (dev->ctx->keylen == AES_KEYSIZE_192)
380 aes_control |= SSS_AES_KEY_SIZE_192;
381 else if (dev->ctx->keylen == AES_KEYSIZE_256)
382 aes_control |= SSS_AES_KEY_SIZE_256;
384 aes_control |= SSS_AES_FIFO_MODE;
386 /* as a variant it is possible to use byte swapping on DMA side */
387 aes_control |= SSS_AES_BYTESWAP_DI
388 | SSS_AES_BYTESWAP_DO
389 | SSS_AES_BYTESWAP_IV
390 | SSS_AES_BYTESWAP_KEY
391 | SSS_AES_BYTESWAP_CNT;
393 spin_lock_irqsave(&dev->lock, flags);
395 SSS_WRITE(dev, FCINTENCLR,
396 SSS_FCINTENCLR_BTDMAINTENCLR | SSS_FCINTENCLR_BRDMAINTENCLR);
397 SSS_WRITE(dev, FCFIFOCTRL, 0x00);
399 err = s5p_set_indata(dev, req->src);
403 err = s5p_set_outdata(dev, req->dst);
407 SSS_WRITE(dev, AES_CONTROL, aes_control);
408 s5p_set_aes(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen);
410 s5p_set_dma_indata(dev, req->src);
411 s5p_set_dma_outdata(dev, req->dst);
413 SSS_WRITE(dev, FCINTENSET,
414 SSS_FCINTENSET_BTDMAINTENSET | SSS_FCINTENSET_BRDMAINTENSET);
416 spin_unlock_irqrestore(&dev->lock, flags);
421 s5p_unset_indata(dev);
424 s5p_aes_complete(dev, err);
425 spin_unlock_irqrestore(&dev->lock, flags);
428 static void s5p_tasklet_cb(unsigned long data)
430 struct s5p_aes_dev *dev = (struct s5p_aes_dev *)data;
431 struct crypto_async_request *async_req, *backlog;
432 struct s5p_aes_reqctx *reqctx;
435 spin_lock_irqsave(&dev->lock, flags);
436 backlog = crypto_get_backlog(&dev->queue);
437 async_req = crypto_dequeue_request(&dev->queue);
438 spin_unlock_irqrestore(&dev->lock, flags);
444 backlog->complete(backlog, -EINPROGRESS);
446 dev->req = ablkcipher_request_cast(async_req);
447 dev->ctx = crypto_tfm_ctx(dev->req->base.tfm);
448 reqctx = ablkcipher_request_ctx(dev->req);
450 s5p_aes_crypt_start(dev, reqctx->mode);
453 static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
454 struct ablkcipher_request *req)
459 spin_lock_irqsave(&dev->lock, flags);
462 spin_unlock_irqrestore(&dev->lock, flags);
467 err = ablkcipher_enqueue_request(&dev->queue, req);
468 spin_unlock_irqrestore(&dev->lock, flags);
470 tasklet_schedule(&dev->tasklet);
476 static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
478 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
479 struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
480 struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req);
481 struct s5p_aes_dev *dev = ctx->dev;
483 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
484 pr_err("request size is not exact amount of AES blocks\n");
490 return s5p_aes_handle_req(dev, req);
493 static int s5p_aes_setkey(struct crypto_ablkcipher *cipher,
494 const uint8_t *key, unsigned int keylen)
496 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
497 struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
499 if (keylen != AES_KEYSIZE_128 &&
500 keylen != AES_KEYSIZE_192 &&
501 keylen != AES_KEYSIZE_256)
504 memcpy(ctx->aes_key, key, keylen);
505 ctx->keylen = keylen;
510 static int s5p_aes_ecb_encrypt(struct ablkcipher_request *req)
512 return s5p_aes_crypt(req, 0);
515 static int s5p_aes_ecb_decrypt(struct ablkcipher_request *req)
517 return s5p_aes_crypt(req, FLAGS_AES_DECRYPT);
520 static int s5p_aes_cbc_encrypt(struct ablkcipher_request *req)
522 return s5p_aes_crypt(req, FLAGS_AES_CBC);
525 static int s5p_aes_cbc_decrypt(struct ablkcipher_request *req)
527 return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC);
530 static int s5p_aes_cra_init(struct crypto_tfm *tfm)
532 struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
535 tfm->crt_ablkcipher.reqsize = sizeof(struct s5p_aes_reqctx);
540 static struct crypto_alg algs[] = {
542 .cra_name = "ecb(aes)",
543 .cra_driver_name = "ecb-aes-s5p",
545 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
547 .cra_blocksize = AES_BLOCK_SIZE,
548 .cra_ctxsize = sizeof(struct s5p_aes_ctx),
549 .cra_alignmask = 0x0f,
550 .cra_type = &crypto_ablkcipher_type,
551 .cra_module = THIS_MODULE,
552 .cra_init = s5p_aes_cra_init,
553 .cra_u.ablkcipher = {
554 .min_keysize = AES_MIN_KEY_SIZE,
555 .max_keysize = AES_MAX_KEY_SIZE,
556 .setkey = s5p_aes_setkey,
557 .encrypt = s5p_aes_ecb_encrypt,
558 .decrypt = s5p_aes_ecb_decrypt,
562 .cra_name = "cbc(aes)",
563 .cra_driver_name = "cbc-aes-s5p",
565 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
567 .cra_blocksize = AES_BLOCK_SIZE,
568 .cra_ctxsize = sizeof(struct s5p_aes_ctx),
569 .cra_alignmask = 0x0f,
570 .cra_type = &crypto_ablkcipher_type,
571 .cra_module = THIS_MODULE,
572 .cra_init = s5p_aes_cra_init,
573 .cra_u.ablkcipher = {
574 .min_keysize = AES_MIN_KEY_SIZE,
575 .max_keysize = AES_MAX_KEY_SIZE,
576 .ivsize = AES_BLOCK_SIZE,
577 .setkey = s5p_aes_setkey,
578 .encrypt = s5p_aes_cbc_encrypt,
579 .decrypt = s5p_aes_cbc_decrypt,
584 static int s5p_aes_probe(struct platform_device *pdev)
586 int i, j, err = -ENODEV;
587 struct s5p_aes_dev *pdata;
588 struct device *dev = &pdev->dev;
589 struct resource *res;
594 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
598 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
602 if (!devm_request_mem_region(dev, res->start,
603 resource_size(res), pdev->name))
606 pdata->clk = clk_get(dev, "secss");
607 if (IS_ERR(pdata->clk)) {
608 dev_err(dev, "failed to find secss clock source\n");
612 clk_enable(pdata->clk);
614 spin_lock_init(&pdata->lock);
615 pdata->ioaddr = devm_ioremap(dev, res->start,
618 pdata->irq_hash = platform_get_irq_byname(pdev, "hash");
619 if (pdata->irq_hash < 0) {
620 err = pdata->irq_hash;
621 dev_warn(dev, "hash interrupt is not available.\n");
624 err = devm_request_irq(dev, pdata->irq_hash, s5p_aes_interrupt,
625 IRQF_SHARED, pdev->name, pdev);
627 dev_warn(dev, "hash interrupt is not available.\n");
631 pdata->irq_fc = platform_get_irq_byname(pdev, "feed control");
632 if (pdata->irq_fc < 0) {
634 dev_warn(dev, "feed control interrupt is not available.\n");
637 err = devm_request_irq(dev, pdata->irq_fc, s5p_aes_interrupt,
638 IRQF_SHARED, pdev->name, pdev);
640 dev_warn(dev, "feed control interrupt is not available.\n");
645 platform_set_drvdata(pdev, pdata);
648 tasklet_init(&pdata->tasklet, s5p_tasklet_cb, (unsigned long)pdata);
649 crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN);
651 for (i = 0; i < ARRAY_SIZE(algs); i++) {
652 INIT_LIST_HEAD(&algs[i].cra_list);
653 err = crypto_register_alg(&algs[i]);
658 pr_info("s5p-sss driver registered\n");
663 dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name, err);
665 for (j = 0; j < i; j++)
666 crypto_unregister_alg(&algs[j]);
668 tasklet_kill(&pdata->tasklet);
671 clk_disable(pdata->clk);
675 platform_set_drvdata(pdev, NULL);
680 static int s5p_aes_remove(struct platform_device *pdev)
682 struct s5p_aes_dev *pdata = platform_get_drvdata(pdev);
688 for (i = 0; i < ARRAY_SIZE(algs); i++)
689 crypto_unregister_alg(&algs[i]);
691 tasklet_kill(&pdata->tasklet);
693 clk_disable(pdata->clk);
697 platform_set_drvdata(pdev, NULL);
702 static struct platform_driver s5p_aes_crypto = {
703 .probe = s5p_aes_probe,
704 .remove = s5p_aes_remove,
706 .owner = THIS_MODULE,
711 static int __init s5p_aes_mod_init(void)
713 return platform_driver_register(&s5p_aes_crypto);
716 static void __exit s5p_aes_mod_exit(void)
718 platform_driver_unregister(&s5p_aes_crypto);
721 module_init(s5p_aes_mod_init);
722 module_exit(s5p_aes_mod_exit);
724 MODULE_DESCRIPTION("S5PV210 AES hw acceleration support.");
725 MODULE_LICENSE("GPL v2");
726 MODULE_AUTHOR("Vladimir Zapolskiy <vzapolskiy@gmail.com>");