2 * caam - Freescale FSL CAAM support for crypto API
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
6 * Based on talitos crypto API driver.
8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
10 * --------------- ---------------
11 * | JobDesc #1 |-------------------->| ShareDesc |
12 * | *(packet 1) | | (PDB) |
13 * --------------- |------------->| (hashKey) |
15 * . | |-------->| (operation) |
16 * --------------- | | ---------------
17 * | JobDesc #2 |------| |
23 * | JobDesc #3 |------------
27 * The SharedDesc never changes for a connection unless rekeyed, but
28 * each packet will likely be in a different place. So all we need
29 * to know to process the packet is where the input is, where the
30 * output goes, and what context we want to process with. Context is
31 * in the SharedDesc, packet references in the JobDesc.
33 * So, a job desc looks like:
35 * ---------------------
37 * | ShareDesc Pointer |
43 * ---------------------
50 #include "desc_constr.h"
57 #define CAAM_CRA_PRIORITY 3000
58 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
59 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
60 SHA512_DIGEST_SIZE * 2)
61 /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
62 #define CAAM_MAX_IV_LENGTH 16
64 /* length of descriptors text */
65 #define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 3 + CAAM_PTR_SZ * 3)
67 #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
68 #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ)
69 #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ)
70 #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
72 #define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \
74 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
77 /* for print_hex_dumps with line references */
78 #define xstr(s) str(s)
80 #define debug(format, arg...) printk(format, arg)
82 #define debug(format, arg...)
85 /* Set DK bit in class 1 operation if shared */
86 static inline void append_dec_op1(u32 *desc, u32 type)
88 u32 *jump_cmd, *uncond_jump_cmd;
90 jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
91 append_operation(desc, type | OP_ALG_AS_INITFINAL |
93 uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
94 set_jump_tgt_here(desc, jump_cmd);
95 append_operation(desc, type | OP_ALG_AS_INITFINAL |
96 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
97 set_jump_tgt_here(desc, uncond_jump_cmd);
101 * Wait for completion of class 1 key loading before allowing
104 static inline void append_dec_shr_done(u32 *desc)
108 jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL);
109 set_jump_tgt_here(desc, jump_cmd);
110 append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
114 * For aead functions, read payload and write payload,
115 * both of which are specified in req->src and req->dst
117 static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
119 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
120 KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
121 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
125 * For aead encrypt and decrypt, read iv for both classes
127 static inline void aead_append_ld_iv(u32 *desc, int ivsize)
129 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
130 LDST_CLASS_1_CCB | ivsize);
131 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
135 * If all data, including src (with assoc and iv) or dst (with iv only) are
138 #define GIV_SRC_CONTIG 1
139 #define GIV_DST_CONTIG (1 << 1)
142 * per-session context
145 struct device *jrdev;
146 u32 sh_desc_enc[DESC_MAX_USED_LEN];
147 u32 sh_desc_dec[DESC_MAX_USED_LEN];
148 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
149 dma_addr_t sh_desc_enc_dma;
150 dma_addr_t sh_desc_dec_dma;
151 dma_addr_t sh_desc_givenc_dma;
155 u8 key[CAAM_MAX_KEY_SIZE];
157 unsigned int enckeylen;
158 unsigned int split_key_len;
159 unsigned int split_key_pad_len;
160 unsigned int authsize;
163 static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
166 if (keys_fit_inline) {
167 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
168 ctx->split_key_len, CLASS_2 |
169 KEY_DEST_MDHA_SPLIT | KEY_ENC);
170 append_key_as_imm(desc, (void *)ctx->key +
171 ctx->split_key_pad_len, ctx->enckeylen,
172 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
174 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
175 KEY_DEST_MDHA_SPLIT | KEY_ENC);
176 append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
177 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
181 static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
186 init_sh_desc(desc, HDR_SHARE_WAIT);
188 /* Skip if already shared */
189 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
192 append_key_aead(desc, ctx, keys_fit_inline);
194 set_jump_tgt_here(desc, key_jump_cmd);
196 /* Propagate errors from shared to job descriptor */
197 append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
200 static int aead_set_sh_desc(struct crypto_aead *aead)
202 struct aead_tfm *tfm = &aead->base.crt_aead;
203 struct caam_ctx *ctx = crypto_aead_ctx(aead);
204 struct device *jrdev = ctx->jrdev;
205 bool keys_fit_inline = 0;
206 u32 *key_jump_cmd, *jump_cmd;
210 if (!ctx->enckeylen || !ctx->authsize)
214 * Job Descriptor and Shared Descriptors
215 * must all fit into the 64-word Descriptor h/w Buffer
217 if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
218 ctx->split_key_pad_len + ctx->enckeylen <=
222 /* aead_encrypt shared descriptor */
223 desc = ctx->sh_desc_enc;
225 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
227 /* Class 2 operation */
228 append_operation(desc, ctx->class2_alg_type |
229 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
231 /* cryptlen = seqoutlen - authsize */
232 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
234 /* assoclen + cryptlen = seqinlen - ivsize */
235 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
237 /* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */
238 append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
240 /* read assoc before reading payload */
241 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
243 aead_append_ld_iv(desc, tfm->ivsize);
245 /* Class 1 operation */
246 append_operation(desc, ctx->class1_alg_type |
247 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
249 /* Read and write cryptlen bytes */
250 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
251 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
252 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
255 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
256 LDST_SRCDST_BYTE_CONTEXT);
258 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
261 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
262 dev_err(jrdev, "unable to map shared descriptor\n");
266 print_hex_dump(KERN_ERR, "aead enc shdesc@"xstr(__LINE__)": ",
267 DUMP_PREFIX_ADDRESS, 16, 4, desc,
268 desc_bytes(desc), 1);
272 * Job Descriptor and Shared Descriptors
273 * must all fit into the 64-word Descriptor h/w Buffer
275 if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
276 ctx->split_key_pad_len + ctx->enckeylen <=
280 desc = ctx->sh_desc_dec;
282 /* aead_decrypt shared descriptor */
283 init_sh_desc(desc, HDR_SHARE_WAIT);
285 /* Skip if already shared */
286 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
289 append_key_aead(desc, ctx, keys_fit_inline);
291 /* Only propagate error immediately if shared */
292 jump_cmd = append_jump(desc, JUMP_TEST_ALL);
293 set_jump_tgt_here(desc, key_jump_cmd);
294 append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
295 set_jump_tgt_here(desc, jump_cmd);
297 /* Class 2 operation */
298 append_operation(desc, ctx->class2_alg_type |
299 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
301 /* assoclen + cryptlen = seqinlen - ivsize */
302 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
303 ctx->authsize + tfm->ivsize)
304 /* assoclen = (assoclen + cryptlen) - cryptlen */
305 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
306 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
308 /* read assoc before reading payload */
309 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
312 aead_append_ld_iv(desc, tfm->ivsize);
314 append_dec_op1(desc, ctx->class1_alg_type);
316 /* Read and write cryptlen bytes */
317 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
318 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
319 aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
322 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
323 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
324 append_dec_shr_done(desc);
326 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
329 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
330 dev_err(jrdev, "unable to map shared descriptor\n");
334 print_hex_dump(KERN_ERR, "aead dec shdesc@"xstr(__LINE__)": ",
335 DUMP_PREFIX_ADDRESS, 16, 4, desc,
336 desc_bytes(desc), 1);
340 * Job Descriptor and Shared Descriptors
341 * must all fit into the 64-word Descriptor h/w Buffer
343 if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
344 ctx->split_key_pad_len + ctx->enckeylen <=
348 /* aead_givencrypt shared descriptor */
349 desc = ctx->sh_desc_givenc;
351 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
354 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
355 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
356 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
357 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
358 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
359 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
360 append_move(desc, MOVE_SRC_INFIFO |
361 MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
362 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
364 /* Copy IV to class 1 context */
365 append_move(desc, MOVE_SRC_CLASS1CTX |
366 MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
368 /* Return to encryption */
369 append_operation(desc, ctx->class2_alg_type |
370 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
372 /* ivsize + cryptlen = seqoutlen - authsize */
373 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
375 /* assoclen = seqinlen - (ivsize + cryptlen) */
376 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
378 /* read assoc before reading payload */
379 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
382 /* Copy iv from class 1 ctx to class 2 fifo*/
383 moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
384 NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
385 append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
386 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
387 append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
388 LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
390 /* Class 1 operation */
391 append_operation(desc, ctx->class1_alg_type |
392 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
394 /* Will write ivsize + cryptlen */
395 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
397 /* Not need to reload iv */
398 append_seq_fifo_load(desc, tfm->ivsize,
401 /* Will read cryptlen */
402 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
403 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
406 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
407 LDST_SRCDST_BYTE_CONTEXT);
409 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
412 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
413 dev_err(jrdev, "unable to map shared descriptor\n");
417 print_hex_dump(KERN_ERR, "aead givenc shdesc@"xstr(__LINE__)": ",
418 DUMP_PREFIX_ADDRESS, 16, 4, desc,
419 desc_bytes(desc), 1);
425 static int aead_setauthsize(struct crypto_aead *authenc,
426 unsigned int authsize)
428 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
430 ctx->authsize = authsize;
431 aead_set_sh_desc(authenc);
436 struct split_key_result {
437 struct completion completion;
441 static void split_key_done(struct device *dev, u32 *desc, u32 err,
444 struct split_key_result *res = context;
447 dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
451 char tmp[CAAM_ERROR_STR_MAX];
453 dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
458 complete(&res->completion);
462 get a split ipad/opad key
464 Split key generation-----------------------------------------------
466 [00] 0xb0810008 jobdesc: stidx=1 share=never len=8
467 [01] 0x04000014 key: class2->keyreg len=20
469 [03] 0x84410014 operation: cls2-op sha1 hmac init dec
470 [04] 0x24940000 fifold: class2 msgdata-last2 len=0 imm
471 [05] 0xa4000001 jump: class2 local all ->1 [06]
472 [06] 0x64260028 fifostr: class2 mdsplit-jdk len=40
475 static u32 gen_split_key(struct caam_ctx *ctx, const u8 *key_in, u32 authkeylen)
477 struct device *jrdev = ctx->jrdev;
479 struct split_key_result result;
480 dma_addr_t dma_addr_in, dma_addr_out;
483 desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
485 init_job_desc(desc, 0);
487 dma_addr_in = dma_map_single(jrdev, (void *)key_in, authkeylen,
489 if (dma_mapping_error(jrdev, dma_addr_in)) {
490 dev_err(jrdev, "unable to map key input memory\n");
494 append_key(desc, dma_addr_in, authkeylen, CLASS_2 |
497 /* Sets MDHA up into an HMAC-INIT */
498 append_operation(desc, ctx->alg_op | OP_ALG_DECRYPT |
502 * do a FIFO_LOAD of zero, this will trigger the internal key expansion
503 into both pads inside MDHA
505 append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB |
506 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
509 * FIFO_STORE with the explicit split-key content store
512 dma_addr_out = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
514 if (dma_mapping_error(jrdev, dma_addr_out)) {
515 dev_err(jrdev, "unable to map key output memory\n");
519 append_fifo_store(desc, dma_addr_out, ctx->split_key_len,
520 LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
523 print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
524 DUMP_PREFIX_ADDRESS, 16, 4, key_in, authkeylen, 1);
525 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
526 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
530 init_completion(&result.completion);
532 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
535 wait_for_completion_interruptible(&result.completion);
538 print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
539 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
540 ctx->split_key_pad_len, 1);
544 dma_unmap_single(jrdev, dma_addr_out, ctx->split_key_pad_len,
546 dma_unmap_single(jrdev, dma_addr_in, authkeylen, DMA_TO_DEVICE);
553 static int aead_setkey(struct crypto_aead *aead,
554 const u8 *key, unsigned int keylen)
556 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
557 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
558 struct caam_ctx *ctx = crypto_aead_ctx(aead);
559 struct device *jrdev = ctx->jrdev;
560 struct rtattr *rta = (void *)key;
561 struct crypto_authenc_key_param *param;
562 unsigned int authkeylen;
563 unsigned int enckeylen;
566 param = RTA_DATA(rta);
567 enckeylen = be32_to_cpu(param->enckeylen);
569 key += RTA_ALIGN(rta->rta_len);
570 keylen -= RTA_ALIGN(rta->rta_len);
572 if (keylen < enckeylen)
575 authkeylen = keylen - enckeylen;
577 if (keylen > CAAM_MAX_KEY_SIZE)
580 /* Pick class 2 key length from algorithm submask */
581 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
582 OP_ALG_ALGSEL_SHIFT] * 2;
583 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
586 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
587 keylen, enckeylen, authkeylen);
588 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
589 ctx->split_key_len, ctx->split_key_pad_len);
590 print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
591 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
594 ret = gen_split_key(ctx, key, authkeylen);
599 /* postpend encryption key to auth split key */
600 memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen);
602 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
603 enckeylen, DMA_TO_DEVICE);
604 if (dma_mapping_error(jrdev, ctx->key_dma)) {
605 dev_err(jrdev, "unable to map key i/o memory\n");
609 print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
610 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
611 ctx->split_key_pad_len + enckeylen, 1);
614 ctx->enckeylen = enckeylen;
616 ret = aead_set_sh_desc(aead);
618 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
619 enckeylen, DMA_TO_DEVICE);
624 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
628 struct link_tbl_entry {
637 * aead_edesc - s/w-extended aead descriptor
638 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
639 * @src_nents: number of segments in input scatterlist
640 * @dst_nents: number of segments in output scatterlist
641 * @iv_dma: dma address of iv for checking continuity and link table
642 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
643 * @link_tbl_bytes: length of dma mapped link_tbl space
644 * @link_tbl_dma: bus physical mapped address of h/w link table
645 * @hw_desc: the h/w job descriptor followed by any referenced link tables
653 dma_addr_t link_tbl_dma;
654 struct link_tbl_entry *link_tbl;
658 static void caam_unmap(struct device *dev, struct scatterlist *src,
659 struct scatterlist *dst, int src_nents, int dst_nents,
660 dma_addr_t iv_dma, int ivsize, dma_addr_t link_tbl_dma,
663 if (unlikely(dst != src)) {
664 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
665 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
667 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
671 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
673 dma_unmap_single(dev, link_tbl_dma, link_tbl_bytes,
677 static void aead_unmap(struct device *dev,
678 struct aead_edesc *edesc,
679 struct aead_request *req)
681 struct crypto_aead *aead = crypto_aead_reqtfm(req);
682 int ivsize = crypto_aead_ivsize(aead);
684 dma_unmap_sg(dev, req->assoc, edesc->assoc_nents, DMA_TO_DEVICE);
686 caam_unmap(dev, req->src, req->dst,
687 edesc->src_nents, edesc->dst_nents,
688 edesc->iv_dma, ivsize, edesc->link_tbl_dma,
689 edesc->link_tbl_bytes);
692 static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
695 struct aead_request *req = context;
696 struct aead_edesc *edesc;
698 struct crypto_aead *aead = crypto_aead_reqtfm(req);
699 struct caam_ctx *ctx = crypto_aead_ctx(aead);
700 int ivsize = crypto_aead_ivsize(aead);
702 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
705 edesc = (struct aead_edesc *)((char *)desc -
706 offsetof(struct aead_edesc, hw_desc));
709 char tmp[CAAM_ERROR_STR_MAX];
711 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
714 aead_unmap(jrdev, edesc, req);
717 print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
718 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
720 print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
721 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
722 edesc->src_nents ? 100 : ivsize, 1);
723 print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
724 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
725 edesc->src_nents ? 100 : req->cryptlen +
726 ctx->authsize + 4, 1);
731 aead_request_complete(req, err);
734 static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
737 struct aead_request *req = context;
738 struct aead_edesc *edesc;
740 struct crypto_aead *aead = crypto_aead_reqtfm(req);
741 struct caam_ctx *ctx = crypto_aead_ctx(aead);
742 int ivsize = crypto_aead_ivsize(aead);
744 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
747 edesc = (struct aead_edesc *)((char *)desc -
748 offsetof(struct aead_edesc, hw_desc));
751 print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
752 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
754 print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
755 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
760 char tmp[CAAM_ERROR_STR_MAX];
762 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
765 aead_unmap(jrdev, edesc, req);
768 * verify hw auth check passed else return -EBADMSG
770 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
774 print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ",
775 DUMP_PREFIX_ADDRESS, 16, 4,
776 ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
777 sizeof(struct iphdr) + req->assoclen +
778 ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
779 ctx->authsize + 36, 1);
780 if (!err && edesc->link_tbl_bytes) {
781 struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
782 print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ",
783 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
784 sg->length + ctx->authsize + 16, 1);
790 aead_request_complete(req, err);
793 static void sg_to_link_tbl_one(struct link_tbl_entry *link_tbl_ptr,
794 dma_addr_t dma, u32 len, u32 offset)
796 link_tbl_ptr->ptr = dma;
797 link_tbl_ptr->len = len;
798 link_tbl_ptr->reserved = 0;
799 link_tbl_ptr->buf_pool_id = 0;
800 link_tbl_ptr->offset = offset;
802 print_hex_dump(KERN_ERR, "link_tbl_ptr@"xstr(__LINE__)": ",
803 DUMP_PREFIX_ADDRESS, 16, 4, link_tbl_ptr,
804 sizeof(struct link_tbl_entry), 1);
809 * convert scatterlist to h/w link table format
810 * but does not have final bit; instead, returns last entry
812 static struct link_tbl_entry *sg_to_link_tbl(struct scatterlist *sg,
813 int sg_count, struct link_tbl_entry
814 *link_tbl_ptr, u32 offset)
817 sg_to_link_tbl_one(link_tbl_ptr, sg_dma_address(sg),
818 sg_dma_len(sg), offset);
823 return link_tbl_ptr - 1;
827 * convert scatterlist to h/w link table format
828 * scatterlist must have been previously dma mapped
830 static void sg_to_link_tbl_last(struct scatterlist *sg, int sg_count,
831 struct link_tbl_entry *link_tbl_ptr, u32 offset)
833 link_tbl_ptr = sg_to_link_tbl(sg, sg_count, link_tbl_ptr, offset);
834 link_tbl_ptr->len |= 0x40000000;
838 * Fill in aead job descriptor
840 static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
841 struct aead_edesc *edesc,
842 struct aead_request *req,
843 bool all_contig, bool encrypt)
845 struct crypto_aead *aead = crypto_aead_reqtfm(req);
846 struct caam_ctx *ctx = crypto_aead_ctx(aead);
847 int ivsize = crypto_aead_ivsize(aead);
848 int authsize = ctx->authsize;
849 u32 *desc = edesc->hw_desc;
850 u32 out_options = 0, in_options;
851 dma_addr_t dst_dma, src_dma;
852 int len, link_tbl_index = 0;
855 debug("assoclen %d cryptlen %d authsize %d\n",
856 req->assoclen, req->cryptlen, authsize);
857 print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
858 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
860 print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
861 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
862 edesc->src_nents ? 100 : ivsize, 1);
863 print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
864 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
865 edesc->src_nents ? 100 : req->cryptlen, 1);
866 print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
867 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
868 desc_bytes(sh_desc), 1);
871 len = desc_len(sh_desc);
872 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
875 src_dma = sg_dma_address(req->assoc);
878 src_dma = edesc->link_tbl_dma;
879 link_tbl_index += (edesc->assoc_nents ? : 1) + 1 +
880 (edesc->src_nents ? : 1);
881 in_options = LDST_SGF;
884 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
885 req->cryptlen - authsize, in_options);
887 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
888 req->cryptlen, in_options);
890 if (likely(req->src == req->dst)) {
892 dst_dma = sg_dma_address(req->src);
894 dst_dma = src_dma + sizeof(struct link_tbl_entry) *
895 ((edesc->assoc_nents ? : 1) + 1);
896 out_options = LDST_SGF;
899 if (!edesc->dst_nents) {
900 dst_dma = sg_dma_address(req->dst);
902 dst_dma = edesc->link_tbl_dma +
904 sizeof(struct link_tbl_entry);
905 out_options = LDST_SGF;
909 append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
911 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
916 * Fill in aead givencrypt job descriptor
918 static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
919 struct aead_edesc *edesc,
920 struct aead_request *req,
923 struct crypto_aead *aead = crypto_aead_reqtfm(req);
924 struct caam_ctx *ctx = crypto_aead_ctx(aead);
925 int ivsize = crypto_aead_ivsize(aead);
926 int authsize = ctx->authsize;
927 u32 *desc = edesc->hw_desc;
928 u32 out_options = 0, in_options;
929 dma_addr_t dst_dma, src_dma;
930 int len, link_tbl_index = 0;
933 debug("assoclen %d cryptlen %d authsize %d\n",
934 req->assoclen, req->cryptlen, authsize);
935 print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
936 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
938 print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
939 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
940 print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
941 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
942 edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
943 print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
944 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
945 desc_bytes(sh_desc), 1);
948 len = desc_len(sh_desc);
949 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
951 if (contig & GIV_SRC_CONTIG) {
952 src_dma = sg_dma_address(req->assoc);
955 src_dma = edesc->link_tbl_dma;
956 link_tbl_index += edesc->assoc_nents + 1 + edesc->src_nents;
957 in_options = LDST_SGF;
959 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
960 req->cryptlen - authsize, in_options);
962 if (contig & GIV_DST_CONTIG) {
963 dst_dma = edesc->iv_dma;
965 if (likely(req->src == req->dst)) {
966 dst_dma = src_dma + sizeof(struct link_tbl_entry) *
968 out_options = LDST_SGF;
970 dst_dma = edesc->link_tbl_dma +
972 sizeof(struct link_tbl_entry);
973 out_options = LDST_SGF;
977 append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options);
981 * derive number of elements in scatterlist
983 static int sg_count(struct scatterlist *sg_list, int nbytes)
985 struct scatterlist *sg = sg_list;
990 nbytes -= sg->length;
991 if (!sg_is_last(sg) && (sg + 1)->length == 0)
992 BUG(); /* Not support chaining */
993 sg = scatterwalk_sg_next(sg);
996 if (likely(sg_nents == 1))
1003 * allocate and map the aead extended descriptor
1005 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1006 int desc_bytes, bool *all_contig_ptr)
1008 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1009 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1010 struct device *jrdev = ctx->jrdev;
1011 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1012 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1013 int assoc_nents, src_nents, dst_nents = 0;
1014 struct aead_edesc *edesc;
1015 dma_addr_t iv_dma = 0;
1017 bool all_contig = true;
1018 int ivsize = crypto_aead_ivsize(aead);
1019 int link_tbl_index, link_tbl_len = 0, link_tbl_bytes;
1021 assoc_nents = sg_count(req->assoc, req->assoclen);
1022 src_nents = sg_count(req->src, req->cryptlen);
1024 if (unlikely(req->dst != req->src))
1025 dst_nents = sg_count(req->dst, req->cryptlen);
1027 sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1,
1029 if (likely(req->src == req->dst)) {
1030 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1033 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1035 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
1039 /* Check if data are contiguous */
1040 iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
1041 if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1042 iv_dma || src_nents || iv_dma + ivsize !=
1043 sg_dma_address(req->src)) {
1045 assoc_nents = assoc_nents ? : 1;
1046 src_nents = src_nents ? : 1;
1047 link_tbl_len = assoc_nents + 1 + src_nents;
1049 link_tbl_len += dst_nents;
1051 link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry);
1053 /* allocate space for base edesc and hw desc commands, link tables */
1054 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1055 link_tbl_bytes, GFP_DMA | flags);
1057 dev_err(jrdev, "could not allocate extended descriptor\n");
1058 return ERR_PTR(-ENOMEM);
1061 edesc->assoc_nents = assoc_nents;
1062 edesc->src_nents = src_nents;
1063 edesc->dst_nents = dst_nents;
1064 edesc->iv_dma = iv_dma;
1065 edesc->link_tbl_bytes = link_tbl_bytes;
1066 edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) +
1068 edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl,
1069 link_tbl_bytes, DMA_TO_DEVICE);
1070 *all_contig_ptr = all_contig;
1074 sg_to_link_tbl(req->assoc,
1075 (assoc_nents ? : 1),
1078 link_tbl_index += assoc_nents ? : 1;
1079 sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index,
1081 link_tbl_index += 1;
1082 sg_to_link_tbl_last(req->src,
1086 link_tbl_index += src_nents ? : 1;
1089 sg_to_link_tbl_last(req->dst, dst_nents,
1090 edesc->link_tbl + link_tbl_index, 0);
1096 static int aead_encrypt(struct aead_request *req)
1098 struct aead_edesc *edesc;
1099 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1100 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1101 struct device *jrdev = ctx->jrdev;
1106 req->cryptlen += ctx->authsize;
1108 /* allocate extended descriptor */
1109 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1110 CAAM_CMD_SZ, &all_contig);
1112 return PTR_ERR(edesc);
1114 /* Create and submit job descriptor */
1115 init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
1118 print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
1119 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1120 desc_bytes(edesc->hw_desc), 1);
1123 desc = edesc->hw_desc;
1124 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1128 aead_unmap(jrdev, edesc, req);
1135 static int aead_decrypt(struct aead_request *req)
1137 struct aead_edesc *edesc;
1138 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1139 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1140 struct device *jrdev = ctx->jrdev;
1145 /* allocate extended descriptor */
1146 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1147 CAAM_CMD_SZ, &all_contig);
1149 return PTR_ERR(edesc);
1152 print_hex_dump(KERN_ERR, "dec src@"xstr(__LINE__)": ",
1153 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1157 /* Create and submit job descriptor*/
1158 init_aead_job(ctx->sh_desc_dec,
1159 ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
1161 print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
1162 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1163 desc_bytes(edesc->hw_desc), 1);
1166 desc = edesc->hw_desc;
1167 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1171 aead_unmap(jrdev, edesc, req);
1179 * allocate and map the aead extended descriptor for aead givencrypt
1181 static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
1182 *greq, int desc_bytes,
1185 struct aead_request *req = &greq->areq;
1186 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1187 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1188 struct device *jrdev = ctx->jrdev;
1189 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1190 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1191 int assoc_nents, src_nents, dst_nents = 0;
1192 struct aead_edesc *edesc;
1193 dma_addr_t iv_dma = 0;
1195 u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
1196 int ivsize = crypto_aead_ivsize(aead);
1197 int link_tbl_index, link_tbl_len = 0, link_tbl_bytes;
1199 assoc_nents = sg_count(req->assoc, req->assoclen);
1200 src_nents = sg_count(req->src, req->cryptlen);
1202 if (unlikely(req->dst != req->src))
1203 dst_nents = sg_count(req->dst, req->cryptlen);
1205 sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1,
1207 if (likely(req->src == req->dst)) {
1208 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1211 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1213 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
1217 /* Check if data are contiguous */
1218 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
1219 if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1220 iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
1221 contig &= ~GIV_SRC_CONTIG;
1222 if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
1223 contig &= ~GIV_DST_CONTIG;
1224 if (unlikely(req->src != req->dst)) {
1225 dst_nents = dst_nents ? : 1;
1228 if (!(contig & GIV_SRC_CONTIG)) {
1229 assoc_nents = assoc_nents ? : 1;
1230 src_nents = src_nents ? : 1;
1231 link_tbl_len += assoc_nents + 1 + src_nents;
1232 if (likely(req->src == req->dst))
1233 contig &= ~GIV_DST_CONTIG;
1235 link_tbl_len += dst_nents;
1237 link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry);
1239 /* allocate space for base edesc and hw desc commands, link tables */
1240 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1241 link_tbl_bytes, GFP_DMA | flags);
1243 dev_err(jrdev, "could not allocate extended descriptor\n");
1244 return ERR_PTR(-ENOMEM);
1247 edesc->assoc_nents = assoc_nents;
1248 edesc->src_nents = src_nents;
1249 edesc->dst_nents = dst_nents;
1250 edesc->iv_dma = iv_dma;
1251 edesc->link_tbl_bytes = link_tbl_bytes;
1252 edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) +
1254 edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl,
1255 link_tbl_bytes, DMA_TO_DEVICE);
1256 *contig_ptr = contig;
1259 if (!(contig & GIV_SRC_CONTIG)) {
1260 sg_to_link_tbl(req->assoc, assoc_nents,
1263 link_tbl_index += assoc_nents;
1264 sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index,
1266 link_tbl_index += 1;
1267 sg_to_link_tbl_last(req->src, src_nents,
1270 link_tbl_index += src_nents;
1272 if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
1273 sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index,
1275 link_tbl_index += 1;
1276 sg_to_link_tbl_last(req->dst, dst_nents,
1277 edesc->link_tbl + link_tbl_index, 0);
1283 static int aead_givencrypt(struct aead_givcrypt_request *areq)
1285 struct aead_request *req = &areq->areq;
1286 struct aead_edesc *edesc;
1287 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1288 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1289 struct device *jrdev = ctx->jrdev;
1294 req->cryptlen += ctx->authsize;
1296 /* allocate extended descriptor */
1297 edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
1298 CAAM_CMD_SZ, &contig);
1301 return PTR_ERR(edesc);
1304 print_hex_dump(KERN_ERR, "giv src@"xstr(__LINE__)": ",
1305 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1309 /* Create and submit job descriptor*/
1310 init_aead_giv_job(ctx->sh_desc_givenc,
1311 ctx->sh_desc_givenc_dma, edesc, req, contig);
1313 print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
1314 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1315 desc_bytes(edesc->hw_desc), 1);
1318 desc = edesc->hw_desc;
1319 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1323 aead_unmap(jrdev, edesc, req);
1330 #define template_aead template_u.aead
1331 struct caam_alg_template {
1332 char name[CRYPTO_MAX_ALG_NAME];
1333 char driver_name[CRYPTO_MAX_ALG_NAME];
1334 unsigned int blocksize;
1337 struct ablkcipher_alg ablkcipher;
1338 struct aead_alg aead;
1339 struct blkcipher_alg blkcipher;
1340 struct cipher_alg cipher;
1341 struct compress_alg compress;
1344 u32 class1_alg_type;
1345 u32 class2_alg_type;
1349 static struct caam_alg_template driver_algs[] = {
1350 /* single-pass ipsec_esp descriptor */
1352 .name = "authenc(hmac(sha1),cbc(aes))",
1353 .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
1354 .blocksize = AES_BLOCK_SIZE,
1355 .type = CRYPTO_ALG_TYPE_AEAD,
1357 .setkey = aead_setkey,
1358 .setauthsize = aead_setauthsize,
1359 .encrypt = aead_encrypt,
1360 .decrypt = aead_decrypt,
1361 .givencrypt = aead_givencrypt,
1362 .geniv = "<built-in>",
1363 .ivsize = AES_BLOCK_SIZE,
1364 .maxauthsize = SHA1_DIGEST_SIZE,
1366 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1367 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1368 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1371 .name = "authenc(hmac(sha256),cbc(aes))",
1372 .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
1373 .blocksize = AES_BLOCK_SIZE,
1374 .type = CRYPTO_ALG_TYPE_AEAD,
1376 .setkey = aead_setkey,
1377 .setauthsize = aead_setauthsize,
1378 .encrypt = aead_encrypt,
1379 .decrypt = aead_decrypt,
1380 .givencrypt = aead_givencrypt,
1381 .geniv = "<built-in>",
1382 .ivsize = AES_BLOCK_SIZE,
1383 .maxauthsize = SHA256_DIGEST_SIZE,
1385 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1386 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1387 OP_ALG_AAI_HMAC_PRECOMP,
1388 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1391 .name = "authenc(hmac(sha512),cbc(aes))",
1392 .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
1393 .blocksize = AES_BLOCK_SIZE,
1394 .type = CRYPTO_ALG_TYPE_AEAD,
1396 .setkey = aead_setkey,
1397 .setauthsize = aead_setauthsize,
1398 .encrypt = aead_encrypt,
1399 .decrypt = aead_decrypt,
1400 .givencrypt = aead_givencrypt,
1401 .geniv = "<built-in>",
1402 .ivsize = AES_BLOCK_SIZE,
1403 .maxauthsize = SHA512_DIGEST_SIZE,
1405 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1406 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1407 OP_ALG_AAI_HMAC_PRECOMP,
1408 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1411 .name = "authenc(hmac(sha1),cbc(des3_ede))",
1412 .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
1413 .blocksize = DES3_EDE_BLOCK_SIZE,
1414 .type = CRYPTO_ALG_TYPE_AEAD,
1416 .setkey = aead_setkey,
1417 .setauthsize = aead_setauthsize,
1418 .encrypt = aead_encrypt,
1419 .decrypt = aead_decrypt,
1420 .givencrypt = aead_givencrypt,
1421 .geniv = "<built-in>",
1422 .ivsize = DES3_EDE_BLOCK_SIZE,
1423 .maxauthsize = SHA1_DIGEST_SIZE,
1425 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1426 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1427 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1430 .name = "authenc(hmac(sha256),cbc(des3_ede))",
1431 .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
1432 .blocksize = DES3_EDE_BLOCK_SIZE,
1433 .type = CRYPTO_ALG_TYPE_AEAD,
1435 .setkey = aead_setkey,
1436 .setauthsize = aead_setauthsize,
1437 .encrypt = aead_encrypt,
1438 .decrypt = aead_decrypt,
1439 .givencrypt = aead_givencrypt,
1440 .geniv = "<built-in>",
1441 .ivsize = DES3_EDE_BLOCK_SIZE,
1442 .maxauthsize = SHA256_DIGEST_SIZE,
1444 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1445 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1446 OP_ALG_AAI_HMAC_PRECOMP,
1447 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1450 .name = "authenc(hmac(sha512),cbc(des3_ede))",
1451 .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
1452 .blocksize = DES3_EDE_BLOCK_SIZE,
1453 .type = CRYPTO_ALG_TYPE_AEAD,
1455 .setkey = aead_setkey,
1456 .setauthsize = aead_setauthsize,
1457 .encrypt = aead_encrypt,
1458 .decrypt = aead_decrypt,
1459 .givencrypt = aead_givencrypt,
1460 .geniv = "<built-in>",
1461 .ivsize = DES3_EDE_BLOCK_SIZE,
1462 .maxauthsize = SHA512_DIGEST_SIZE,
1464 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1465 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1466 OP_ALG_AAI_HMAC_PRECOMP,
1467 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1470 .name = "authenc(hmac(sha1),cbc(des))",
1471 .driver_name = "authenc-hmac-sha1-cbc-des-caam",
1472 .blocksize = DES_BLOCK_SIZE,
1473 .type = CRYPTO_ALG_TYPE_AEAD,
1475 .setkey = aead_setkey,
1476 .setauthsize = aead_setauthsize,
1477 .encrypt = aead_encrypt,
1478 .decrypt = aead_decrypt,
1479 .givencrypt = aead_givencrypt,
1480 .geniv = "<built-in>",
1481 .ivsize = DES_BLOCK_SIZE,
1482 .maxauthsize = SHA1_DIGEST_SIZE,
1484 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1485 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1486 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1489 .name = "authenc(hmac(sha256),cbc(des))",
1490 .driver_name = "authenc-hmac-sha256-cbc-des-caam",
1491 .blocksize = DES_BLOCK_SIZE,
1492 .type = CRYPTO_ALG_TYPE_AEAD,
1494 .setkey = aead_setkey,
1495 .setauthsize = aead_setauthsize,
1496 .encrypt = aead_encrypt,
1497 .decrypt = aead_decrypt,
1498 .givencrypt = aead_givencrypt,
1499 .geniv = "<built-in>",
1500 .ivsize = DES_BLOCK_SIZE,
1501 .maxauthsize = SHA256_DIGEST_SIZE,
1503 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1504 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1505 OP_ALG_AAI_HMAC_PRECOMP,
1506 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1509 .name = "authenc(hmac(sha512),cbc(des))",
1510 .driver_name = "authenc-hmac-sha512-cbc-des-caam",
1511 .blocksize = DES_BLOCK_SIZE,
1512 .type = CRYPTO_ALG_TYPE_AEAD,
1514 .setkey = aead_setkey,
1515 .setauthsize = aead_setauthsize,
1516 .encrypt = aead_encrypt,
1517 .decrypt = aead_decrypt,
1518 .givencrypt = aead_givencrypt,
1519 .geniv = "<built-in>",
1520 .ivsize = DES_BLOCK_SIZE,
1521 .maxauthsize = SHA512_DIGEST_SIZE,
1523 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1524 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1525 OP_ALG_AAI_HMAC_PRECOMP,
1526 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1530 struct caam_crypto_alg {
1531 struct list_head entry;
1532 struct device *ctrldev;
1533 int class1_alg_type;
1534 int class2_alg_type;
1536 struct crypto_alg crypto_alg;
1539 static int caam_cra_init(struct crypto_tfm *tfm)
1541 struct crypto_alg *alg = tfm->__crt_alg;
1542 struct caam_crypto_alg *caam_alg =
1543 container_of(alg, struct caam_crypto_alg, crypto_alg);
1544 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
1545 struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev);
1546 int tgt_jr = atomic_inc_return(&priv->tfm_count);
1549 * distribute tfms across job rings to ensure in-order
1550 * crypto request processing per tfm
1552 ctx->jrdev = priv->algapi_jr[(tgt_jr / 2) % priv->num_jrs_for_algapi];
1554 /* copy descriptor header template value */
1555 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
1556 ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
1557 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
1562 static void caam_cra_exit(struct crypto_tfm *tfm)
1564 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
1566 if (ctx->sh_desc_enc_dma &&
1567 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
1568 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
1569 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
1570 if (ctx->sh_desc_dec_dma &&
1571 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
1572 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
1573 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
1574 if (ctx->sh_desc_givenc_dma &&
1575 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
1576 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
1577 desc_bytes(ctx->sh_desc_givenc),
1581 static void __exit caam_algapi_exit(void)
1584 struct device_node *dev_node;
1585 struct platform_device *pdev;
1586 struct device *ctrldev;
1587 struct caam_drv_private *priv;
1588 struct caam_crypto_alg *t_alg, *n;
1591 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1595 pdev = of_find_device_by_node(dev_node);
1599 ctrldev = &pdev->dev;
1600 of_node_put(dev_node);
1601 priv = dev_get_drvdata(ctrldev);
1603 if (!priv->alg_list.next)
1606 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
1607 crypto_unregister_alg(&t_alg->crypto_alg);
1608 list_del(&t_alg->entry);
1612 for (i = 0; i < priv->total_jobrs; i++) {
1613 err = caam_jr_deregister(priv->algapi_jr[i]);
1617 kfree(priv->algapi_jr);
1620 static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
1621 struct caam_alg_template
1624 struct caam_crypto_alg *t_alg;
1625 struct crypto_alg *alg;
1627 t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
1629 dev_err(ctrldev, "failed to allocate t_alg\n");
1630 return ERR_PTR(-ENOMEM);
1633 alg = &t_alg->crypto_alg;
1635 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
1636 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1637 template->driver_name);
1638 alg->cra_module = THIS_MODULE;
1639 alg->cra_init = caam_cra_init;
1640 alg->cra_exit = caam_cra_exit;
1641 alg->cra_priority = CAAM_CRA_PRIORITY;
1642 alg->cra_blocksize = template->blocksize;
1643 alg->cra_alignmask = 0;
1644 alg->cra_ctxsize = sizeof(struct caam_ctx);
1645 alg->cra_flags = CRYPTO_ALG_ASYNC | template->type;
1646 switch (template->type) {
1647 case CRYPTO_ALG_TYPE_AEAD:
1648 alg->cra_type = &crypto_aead_type;
1649 alg->cra_aead = template->template_aead;
1653 t_alg->class1_alg_type = template->class1_alg_type;
1654 t_alg->class2_alg_type = template->class2_alg_type;
1655 t_alg->alg_op = template->alg_op;
1656 t_alg->ctrldev = ctrldev;
1661 static int __init caam_algapi_init(void)
1663 struct device_node *dev_node;
1664 struct platform_device *pdev;
1665 struct device *ctrldev, **jrdev;
1666 struct caam_drv_private *priv;
1669 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1673 pdev = of_find_device_by_node(dev_node);
1677 ctrldev = &pdev->dev;
1678 priv = dev_get_drvdata(ctrldev);
1679 of_node_put(dev_node);
1681 INIT_LIST_HEAD(&priv->alg_list);
1683 jrdev = kmalloc(sizeof(*jrdev) * priv->total_jobrs, GFP_KERNEL);
1687 for (i = 0; i < priv->total_jobrs; i++) {
1688 err = caam_jr_register(ctrldev, &jrdev[i]);
1692 if (err < 0 && i == 0) {
1693 dev_err(ctrldev, "algapi error in job ring registration: %d\n",
1699 priv->num_jrs_for_algapi = i;
1700 priv->algapi_jr = jrdev;
1701 atomic_set(&priv->tfm_count, -1);
1703 /* register crypto algorithms the device supports */
1704 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
1705 /* TODO: check if h/w supports alg */
1706 struct caam_crypto_alg *t_alg;
1708 t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]);
1709 if (IS_ERR(t_alg)) {
1710 err = PTR_ERR(t_alg);
1711 dev_warn(ctrldev, "%s alg allocation failed\n",
1712 driver_algs[i].driver_name);
1716 err = crypto_register_alg(&t_alg->crypto_alg);
1718 dev_warn(ctrldev, "%s alg registration failed\n",
1719 t_alg->crypto_alg.cra_driver_name);
1722 list_add_tail(&t_alg->entry, &priv->alg_list);
1723 dev_info(ctrldev, "%s\n",
1724 t_alg->crypto_alg.cra_driver_name);
1731 module_init(caam_algapi_init);
1732 module_exit(caam_algapi_exit);
1734 MODULE_LICENSE("GPL");
1735 MODULE_DESCRIPTION("FSL CAAM support for crypto API");
1736 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");