2 * caam - Freescale FSL CAAM support for crypto API
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
6 * Based on talitos crypto API driver.
8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
10 * --------------- ---------------
11 * | JobDesc #1 |-------------------->| ShareDesc |
12 * | *(packet 1) | | (PDB) |
13 * --------------- |------------->| (hashKey) |
15 * . | |-------->| (operation) |
16 * --------------- | | ---------------
17 * | JobDesc #2 |------| |
23 * | JobDesc #3 |------------
27 * The SharedDesc never changes for a connection unless rekeyed, but
28 * each packet will likely be in a different place. So all we need
29 * to know to process the packet is where the input is, where the
30 * output goes, and what context we want to process with. Context is
31 * in the SharedDesc, packet references in the JobDesc.
33 * So, a job desc looks like:
35 * ---------------------
37 * | ShareDesc Pointer |
43 * ---------------------
50 #include "desc_constr.h"
57 #define CAAM_CRA_PRIORITY 3000
58 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
59 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
60 SHA512_DIGEST_SIZE * 2)
61 /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
62 #define CAAM_MAX_IV_LENGTH 16
64 /* length of descriptors text */
65 #define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 3 + CAAM_PTR_SZ * 3)
67 #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
68 #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ)
69 #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ)
70 #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
72 #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
73 #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
75 #define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
78 #define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \
80 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
83 /* for print_hex_dumps with line references */
84 #define xstr(s) str(s)
86 #define debug(format, arg...) printk(format, arg)
88 #define debug(format, arg...)
91 /* Set DK bit in class 1 operation if shared */
92 static inline void append_dec_op1(u32 *desc, u32 type)
94 u32 *jump_cmd, *uncond_jump_cmd;
96 jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
97 append_operation(desc, type | OP_ALG_AS_INITFINAL |
99 uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
100 set_jump_tgt_here(desc, jump_cmd);
101 append_operation(desc, type | OP_ALG_AS_INITFINAL |
102 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
103 set_jump_tgt_here(desc, uncond_jump_cmd);
107 * Wait for completion of class 1 key loading before allowing
110 static inline void append_dec_shr_done(u32 *desc)
114 jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL);
115 set_jump_tgt_here(desc, jump_cmd);
116 append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
120 * For aead functions, read payload and write payload,
121 * both of which are specified in req->src and req->dst
123 static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
125 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
126 KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
127 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
131 * For aead encrypt and decrypt, read iv for both classes
133 static inline void aead_append_ld_iv(u32 *desc, int ivsize)
135 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
136 LDST_CLASS_1_CCB | ivsize);
137 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
141 * For ablkcipher encrypt and decrypt, read from req->src and
144 static inline void ablkcipher_append_src_dst(u32 *desc)
146 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); \
147 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); \
148 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | \
149 KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); \
150 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); \
154 * If all data, including src (with assoc and iv) or dst (with iv only) are
157 #define GIV_SRC_CONTIG 1
158 #define GIV_DST_CONTIG (1 << 1)
161 * per-session context
164 struct device *jrdev;
165 u32 sh_desc_enc[DESC_MAX_USED_LEN];
166 u32 sh_desc_dec[DESC_MAX_USED_LEN];
167 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
168 dma_addr_t sh_desc_enc_dma;
169 dma_addr_t sh_desc_dec_dma;
170 dma_addr_t sh_desc_givenc_dma;
174 u8 key[CAAM_MAX_KEY_SIZE];
176 unsigned int enckeylen;
177 unsigned int split_key_len;
178 unsigned int split_key_pad_len;
179 unsigned int authsize;
182 static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
185 if (keys_fit_inline) {
186 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
187 ctx->split_key_len, CLASS_2 |
188 KEY_DEST_MDHA_SPLIT | KEY_ENC);
189 append_key_as_imm(desc, (void *)ctx->key +
190 ctx->split_key_pad_len, ctx->enckeylen,
191 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
193 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
194 KEY_DEST_MDHA_SPLIT | KEY_ENC);
195 append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
196 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
200 static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
205 init_sh_desc(desc, HDR_SHARE_WAIT);
207 /* Skip if already shared */
208 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
211 append_key_aead(desc, ctx, keys_fit_inline);
213 set_jump_tgt_here(desc, key_jump_cmd);
215 /* Propagate errors from shared to job descriptor */
216 append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
219 static int aead_set_sh_desc(struct crypto_aead *aead)
221 struct aead_tfm *tfm = &aead->base.crt_aead;
222 struct caam_ctx *ctx = crypto_aead_ctx(aead);
223 struct device *jrdev = ctx->jrdev;
224 bool keys_fit_inline = 0;
225 u32 *key_jump_cmd, *jump_cmd;
229 if (!ctx->enckeylen || !ctx->authsize)
233 * Job Descriptor and Shared Descriptors
234 * must all fit into the 64-word Descriptor h/w Buffer
236 if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
237 ctx->split_key_pad_len + ctx->enckeylen <=
241 /* aead_encrypt shared descriptor */
242 desc = ctx->sh_desc_enc;
244 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
246 /* Class 2 operation */
247 append_operation(desc, ctx->class2_alg_type |
248 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
250 /* cryptlen = seqoutlen - authsize */
251 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
253 /* assoclen + cryptlen = seqinlen - ivsize */
254 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
256 /* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */
257 append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
259 /* read assoc before reading payload */
260 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
262 aead_append_ld_iv(desc, tfm->ivsize);
264 /* Class 1 operation */
265 append_operation(desc, ctx->class1_alg_type |
266 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
268 /* Read and write cryptlen bytes */
269 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
270 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
271 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
274 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
275 LDST_SRCDST_BYTE_CONTEXT);
277 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
280 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
281 dev_err(jrdev, "unable to map shared descriptor\n");
285 print_hex_dump(KERN_ERR, "aead enc shdesc@"xstr(__LINE__)": ",
286 DUMP_PREFIX_ADDRESS, 16, 4, desc,
287 desc_bytes(desc), 1);
291 * Job Descriptor and Shared Descriptors
292 * must all fit into the 64-word Descriptor h/w Buffer
294 if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
295 ctx->split_key_pad_len + ctx->enckeylen <=
299 desc = ctx->sh_desc_dec;
301 /* aead_decrypt shared descriptor */
302 init_sh_desc(desc, HDR_SHARE_WAIT);
304 /* Skip if already shared */
305 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
308 append_key_aead(desc, ctx, keys_fit_inline);
310 /* Only propagate error immediately if shared */
311 jump_cmd = append_jump(desc, JUMP_TEST_ALL);
312 set_jump_tgt_here(desc, key_jump_cmd);
313 append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
314 set_jump_tgt_here(desc, jump_cmd);
316 /* Class 2 operation */
317 append_operation(desc, ctx->class2_alg_type |
318 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
320 /* assoclen + cryptlen = seqinlen - ivsize */
321 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
322 ctx->authsize + tfm->ivsize)
323 /* assoclen = (assoclen + cryptlen) - cryptlen */
324 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
325 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
327 /* read assoc before reading payload */
328 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
331 aead_append_ld_iv(desc, tfm->ivsize);
333 append_dec_op1(desc, ctx->class1_alg_type);
335 /* Read and write cryptlen bytes */
336 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
337 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
338 aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
341 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
342 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
343 append_dec_shr_done(desc);
345 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
348 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
349 dev_err(jrdev, "unable to map shared descriptor\n");
353 print_hex_dump(KERN_ERR, "aead dec shdesc@"xstr(__LINE__)": ",
354 DUMP_PREFIX_ADDRESS, 16, 4, desc,
355 desc_bytes(desc), 1);
359 * Job Descriptor and Shared Descriptors
360 * must all fit into the 64-word Descriptor h/w Buffer
362 if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
363 ctx->split_key_pad_len + ctx->enckeylen <=
367 /* aead_givencrypt shared descriptor */
368 desc = ctx->sh_desc_givenc;
370 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
373 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
374 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
375 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
376 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
377 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
378 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
379 append_move(desc, MOVE_SRC_INFIFO |
380 MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
381 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
383 /* Copy IV to class 1 context */
384 append_move(desc, MOVE_SRC_CLASS1CTX |
385 MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
387 /* Return to encryption */
388 append_operation(desc, ctx->class2_alg_type |
389 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
391 /* ivsize + cryptlen = seqoutlen - authsize */
392 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
394 /* assoclen = seqinlen - (ivsize + cryptlen) */
395 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
397 /* read assoc before reading payload */
398 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
401 /* Copy iv from class 1 ctx to class 2 fifo*/
402 moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
403 NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
404 append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
405 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
406 append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
407 LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
409 /* Class 1 operation */
410 append_operation(desc, ctx->class1_alg_type |
411 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
413 /* Will write ivsize + cryptlen */
414 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
416 /* Not need to reload iv */
417 append_seq_fifo_load(desc, tfm->ivsize,
420 /* Will read cryptlen */
421 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
422 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
423 FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
424 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
427 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
428 LDST_SRCDST_BYTE_CONTEXT);
430 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
433 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
434 dev_err(jrdev, "unable to map shared descriptor\n");
438 print_hex_dump(KERN_ERR, "aead givenc shdesc@"xstr(__LINE__)": ",
439 DUMP_PREFIX_ADDRESS, 16, 4, desc,
440 desc_bytes(desc), 1);
446 static int aead_setauthsize(struct crypto_aead *authenc,
447 unsigned int authsize)
449 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
451 ctx->authsize = authsize;
452 aead_set_sh_desc(authenc);
457 struct split_key_result {
458 struct completion completion;
462 static void split_key_done(struct device *dev, u32 *desc, u32 err,
465 struct split_key_result *res = context;
468 dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
472 char tmp[CAAM_ERROR_STR_MAX];
474 dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
479 complete(&res->completion);
483 get a split ipad/opad key
485 Split key generation-----------------------------------------------
487 [00] 0xb0810008 jobdesc: stidx=1 share=never len=8
488 [01] 0x04000014 key: class2->keyreg len=20
490 [03] 0x84410014 operation: cls2-op sha1 hmac init dec
491 [04] 0x24940000 fifold: class2 msgdata-last2 len=0 imm
492 [05] 0xa4000001 jump: class2 local all ->1 [06]
493 [06] 0x64260028 fifostr: class2 mdsplit-jdk len=40
496 static u32 gen_split_key(struct caam_ctx *ctx, const u8 *key_in, u32 authkeylen)
498 struct device *jrdev = ctx->jrdev;
500 struct split_key_result result;
501 dma_addr_t dma_addr_in, dma_addr_out;
504 desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
506 init_job_desc(desc, 0);
508 dma_addr_in = dma_map_single(jrdev, (void *)key_in, authkeylen,
510 if (dma_mapping_error(jrdev, dma_addr_in)) {
511 dev_err(jrdev, "unable to map key input memory\n");
515 append_key(desc, dma_addr_in, authkeylen, CLASS_2 |
518 /* Sets MDHA up into an HMAC-INIT */
519 append_operation(desc, ctx->alg_op | OP_ALG_DECRYPT |
523 * do a FIFO_LOAD of zero, this will trigger the internal key expansion
524 into both pads inside MDHA
526 append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB |
527 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
530 * FIFO_STORE with the explicit split-key content store
533 dma_addr_out = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
535 if (dma_mapping_error(jrdev, dma_addr_out)) {
536 dev_err(jrdev, "unable to map key output memory\n");
540 append_fifo_store(desc, dma_addr_out, ctx->split_key_len,
541 LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
544 print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
545 DUMP_PREFIX_ADDRESS, 16, 4, key_in, authkeylen, 1);
546 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
547 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
551 init_completion(&result.completion);
553 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
556 wait_for_completion_interruptible(&result.completion);
559 print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
560 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
561 ctx->split_key_pad_len, 1);
565 dma_unmap_single(jrdev, dma_addr_out, ctx->split_key_pad_len,
567 dma_unmap_single(jrdev, dma_addr_in, authkeylen, DMA_TO_DEVICE);
574 static int aead_setkey(struct crypto_aead *aead,
575 const u8 *key, unsigned int keylen)
577 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
578 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
579 struct caam_ctx *ctx = crypto_aead_ctx(aead);
580 struct device *jrdev = ctx->jrdev;
581 struct rtattr *rta = (void *)key;
582 struct crypto_authenc_key_param *param;
583 unsigned int authkeylen;
584 unsigned int enckeylen;
587 param = RTA_DATA(rta);
588 enckeylen = be32_to_cpu(param->enckeylen);
590 key += RTA_ALIGN(rta->rta_len);
591 keylen -= RTA_ALIGN(rta->rta_len);
593 if (keylen < enckeylen)
596 authkeylen = keylen - enckeylen;
598 if (keylen > CAAM_MAX_KEY_SIZE)
601 /* Pick class 2 key length from algorithm submask */
602 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
603 OP_ALG_ALGSEL_SHIFT] * 2;
604 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
607 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
608 keylen, enckeylen, authkeylen);
609 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
610 ctx->split_key_len, ctx->split_key_pad_len);
611 print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
612 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
615 ret = gen_split_key(ctx, key, authkeylen);
620 /* postpend encryption key to auth split key */
621 memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen);
623 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
624 enckeylen, DMA_TO_DEVICE);
625 if (dma_mapping_error(jrdev, ctx->key_dma)) {
626 dev_err(jrdev, "unable to map key i/o memory\n");
630 print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
631 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
632 ctx->split_key_pad_len + enckeylen, 1);
635 ctx->enckeylen = enckeylen;
637 ret = aead_set_sh_desc(aead);
639 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
640 enckeylen, DMA_TO_DEVICE);
645 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
649 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
650 const u8 *key, unsigned int keylen)
652 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
653 struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
654 struct device *jrdev = ctx->jrdev;
656 u32 *key_jump_cmd, *jump_cmd;
660 print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
661 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
664 memcpy(ctx->key, key, keylen);
665 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
667 if (dma_mapping_error(jrdev, ctx->key_dma)) {
668 dev_err(jrdev, "unable to map key i/o memory\n");
671 ctx->enckeylen = keylen;
673 /* ablkcipher_encrypt shared descriptor */
674 desc = ctx->sh_desc_enc;
675 init_sh_desc(desc, HDR_SHARE_WAIT);
676 /* Skip if already shared */
677 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
680 /* Load class1 key only */
681 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
682 ctx->enckeylen, CLASS_1 |
685 set_jump_tgt_here(desc, key_jump_cmd);
687 /* Propagate errors from shared to job descriptor */
688 append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
691 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
692 LDST_CLASS_1_CCB | tfm->ivsize);
695 append_operation(desc, ctx->class1_alg_type |
696 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
698 /* Perform operation */
699 ablkcipher_append_src_dst(desc);
701 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
704 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
705 dev_err(jrdev, "unable to map shared descriptor\n");
709 print_hex_dump(KERN_ERR, "ablkcipher enc shdesc@"xstr(__LINE__)": ",
710 DUMP_PREFIX_ADDRESS, 16, 4, desc,
711 desc_bytes(desc), 1);
713 /* ablkcipher_decrypt shared descriptor */
714 desc = ctx->sh_desc_dec;
716 init_sh_desc(desc, HDR_SHARE_WAIT);
717 /* Skip if already shared */
718 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
721 /* Load class1 key only */
722 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
723 ctx->enckeylen, CLASS_1 |
726 /* For aead, only propagate error immediately if shared */
727 jump_cmd = append_jump(desc, JUMP_TEST_ALL);
728 set_jump_tgt_here(desc, key_jump_cmd);
729 append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
730 set_jump_tgt_here(desc, jump_cmd);
733 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
734 LDST_CLASS_1_CCB | tfm->ivsize);
736 /* Choose operation */
737 append_dec_op1(desc, ctx->class1_alg_type);
739 /* Perform operation */
740 ablkcipher_append_src_dst(desc);
742 /* Wait for key to load before allowing propagating error */
743 append_dec_shr_done(desc);
745 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
748 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
749 dev_err(jrdev, "unable to map shared descriptor\n");
754 print_hex_dump(KERN_ERR, "ablkcipher dec shdesc@"xstr(__LINE__)": ",
755 DUMP_PREFIX_ADDRESS, 16, 4, desc,
756 desc_bytes(desc), 1);
762 struct link_tbl_entry {
771 * aead_edesc - s/w-extended aead descriptor
772 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
773 * @src_nents: number of segments in input scatterlist
774 * @dst_nents: number of segments in output scatterlist
775 * @iv_dma: dma address of iv for checking continuity and link table
776 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
777 * @link_tbl_bytes: length of dma mapped link_tbl space
778 * @link_tbl_dma: bus physical mapped address of h/w link table
779 * @hw_desc: the h/w job descriptor followed by any referenced link tables
787 dma_addr_t link_tbl_dma;
788 struct link_tbl_entry *link_tbl;
793 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
794 * @src_nents: number of segments in input scatterlist
795 * @dst_nents: number of segments in output scatterlist
796 * @iv_dma: dma address of iv for checking continuity and link table
797 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
798 * @link_tbl_bytes: length of dma mapped link_tbl space
799 * @link_tbl_dma: bus physical mapped address of h/w link table
800 * @hw_desc: the h/w job descriptor followed by any referenced link tables
802 struct ablkcipher_edesc {
807 dma_addr_t link_tbl_dma;
808 struct link_tbl_entry *link_tbl;
812 static void caam_unmap(struct device *dev, struct scatterlist *src,
813 struct scatterlist *dst, int src_nents, int dst_nents,
814 dma_addr_t iv_dma, int ivsize, dma_addr_t link_tbl_dma,
817 if (unlikely(dst != src)) {
818 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
819 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
821 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
825 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
827 dma_unmap_single(dev, link_tbl_dma, link_tbl_bytes,
831 static void aead_unmap(struct device *dev,
832 struct aead_edesc *edesc,
833 struct aead_request *req)
835 struct crypto_aead *aead = crypto_aead_reqtfm(req);
836 int ivsize = crypto_aead_ivsize(aead);
838 dma_unmap_sg(dev, req->assoc, edesc->assoc_nents, DMA_TO_DEVICE);
840 caam_unmap(dev, req->src, req->dst,
841 edesc->src_nents, edesc->dst_nents,
842 edesc->iv_dma, ivsize, edesc->link_tbl_dma,
843 edesc->link_tbl_bytes);
846 static void ablkcipher_unmap(struct device *dev,
847 struct ablkcipher_edesc *edesc,
848 struct ablkcipher_request *req)
850 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
851 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
853 caam_unmap(dev, req->src, req->dst,
854 edesc->src_nents, edesc->dst_nents,
855 edesc->iv_dma, ivsize, edesc->link_tbl_dma,
856 edesc->link_tbl_bytes);
859 static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
862 struct aead_request *req = context;
863 struct aead_edesc *edesc;
865 struct crypto_aead *aead = crypto_aead_reqtfm(req);
866 struct caam_ctx *ctx = crypto_aead_ctx(aead);
867 int ivsize = crypto_aead_ivsize(aead);
869 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
872 edesc = (struct aead_edesc *)((char *)desc -
873 offsetof(struct aead_edesc, hw_desc));
876 char tmp[CAAM_ERROR_STR_MAX];
878 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
881 aead_unmap(jrdev, edesc, req);
884 print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
885 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
887 print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
888 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
889 edesc->src_nents ? 100 : ivsize, 1);
890 print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
891 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
892 edesc->src_nents ? 100 : req->cryptlen +
893 ctx->authsize + 4, 1);
898 aead_request_complete(req, err);
901 static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
904 struct aead_request *req = context;
905 struct aead_edesc *edesc;
907 struct crypto_aead *aead = crypto_aead_reqtfm(req);
908 struct caam_ctx *ctx = crypto_aead_ctx(aead);
909 int ivsize = crypto_aead_ivsize(aead);
911 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
914 edesc = (struct aead_edesc *)((char *)desc -
915 offsetof(struct aead_edesc, hw_desc));
918 print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
919 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
921 print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
922 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
927 char tmp[CAAM_ERROR_STR_MAX];
929 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
932 aead_unmap(jrdev, edesc, req);
935 * verify hw auth check passed else return -EBADMSG
937 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
941 print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ",
942 DUMP_PREFIX_ADDRESS, 16, 4,
943 ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
944 sizeof(struct iphdr) + req->assoclen +
945 ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
946 ctx->authsize + 36, 1);
947 if (!err && edesc->link_tbl_bytes) {
948 struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
949 print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ",
950 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
951 sg->length + ctx->authsize + 16, 1);
957 aead_request_complete(req, err);
960 static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
963 struct ablkcipher_request *req = context;
964 struct ablkcipher_edesc *edesc;
966 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
967 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
969 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
972 edesc = (struct ablkcipher_edesc *)((char *)desc -
973 offsetof(struct ablkcipher_edesc, hw_desc));
976 char tmp[CAAM_ERROR_STR_MAX];
978 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
982 print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
983 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
984 edesc->src_nents > 1 ? 100 : ivsize, 1);
985 print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
986 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
987 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
990 ablkcipher_unmap(jrdev, edesc, req);
993 ablkcipher_request_complete(req, err);
996 static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
999 struct ablkcipher_request *req = context;
1000 struct ablkcipher_edesc *edesc;
1002 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1003 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1005 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1008 edesc = (struct ablkcipher_edesc *)((char *)desc -
1009 offsetof(struct ablkcipher_edesc, hw_desc));
1011 char tmp[CAAM_ERROR_STR_MAX];
1013 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
1017 print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
1018 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1020 print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
1021 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1022 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1025 ablkcipher_unmap(jrdev, edesc, req);
1028 ablkcipher_request_complete(req, err);
1031 static void sg_to_link_tbl_one(struct link_tbl_entry *link_tbl_ptr,
1032 dma_addr_t dma, u32 len, u32 offset)
1034 link_tbl_ptr->ptr = dma;
1035 link_tbl_ptr->len = len;
1036 link_tbl_ptr->reserved = 0;
1037 link_tbl_ptr->buf_pool_id = 0;
1038 link_tbl_ptr->offset = offset;
1040 print_hex_dump(KERN_ERR, "link_tbl_ptr@"xstr(__LINE__)": ",
1041 DUMP_PREFIX_ADDRESS, 16, 4, link_tbl_ptr,
1042 sizeof(struct link_tbl_entry), 1);
1047 * convert scatterlist to h/w link table format
1048 * but does not have final bit; instead, returns last entry
1050 static struct link_tbl_entry *sg_to_link_tbl(struct scatterlist *sg,
1051 int sg_count, struct link_tbl_entry
1052 *link_tbl_ptr, u32 offset)
1055 sg_to_link_tbl_one(link_tbl_ptr, sg_dma_address(sg),
1056 sg_dma_len(sg), offset);
1061 return link_tbl_ptr - 1;
1065 * convert scatterlist to h/w link table format
1066 * scatterlist must have been previously dma mapped
1068 static void sg_to_link_tbl_last(struct scatterlist *sg, int sg_count,
1069 struct link_tbl_entry *link_tbl_ptr, u32 offset)
1071 link_tbl_ptr = sg_to_link_tbl(sg, sg_count, link_tbl_ptr, offset);
1072 link_tbl_ptr->len |= 0x40000000;
1076 * Fill in aead job descriptor
1078 static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
1079 struct aead_edesc *edesc,
1080 struct aead_request *req,
1081 bool all_contig, bool encrypt)
1083 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1084 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1085 int ivsize = crypto_aead_ivsize(aead);
1086 int authsize = ctx->authsize;
1087 u32 *desc = edesc->hw_desc;
1088 u32 out_options = 0, in_options;
1089 dma_addr_t dst_dma, src_dma;
1090 int len, link_tbl_index = 0;
1093 debug("assoclen %d cryptlen %d authsize %d\n",
1094 req->assoclen, req->cryptlen, authsize);
1095 print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
1096 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1098 print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
1099 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1100 edesc->src_nents ? 100 : ivsize, 1);
1101 print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
1102 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1103 edesc->src_nents ? 100 : req->cryptlen, 1);
1104 print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
1105 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
1106 desc_bytes(sh_desc), 1);
1109 len = desc_len(sh_desc);
1110 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1113 src_dma = sg_dma_address(req->assoc);
1116 src_dma = edesc->link_tbl_dma;
1117 link_tbl_index += (edesc->assoc_nents ? : 1) + 1 +
1118 (edesc->src_nents ? : 1);
1119 in_options = LDST_SGF;
1122 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
1123 req->cryptlen - authsize, in_options);
1125 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
1126 req->cryptlen, in_options);
1128 if (likely(req->src == req->dst)) {
1130 dst_dma = sg_dma_address(req->src);
1132 dst_dma = src_dma + sizeof(struct link_tbl_entry) *
1133 ((edesc->assoc_nents ? : 1) + 1);
1134 out_options = LDST_SGF;
1137 if (!edesc->dst_nents) {
1138 dst_dma = sg_dma_address(req->dst);
1140 dst_dma = edesc->link_tbl_dma +
1142 sizeof(struct link_tbl_entry);
1143 out_options = LDST_SGF;
1147 append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
1149 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
1154 * Fill in aead givencrypt job descriptor
1156 static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1157 struct aead_edesc *edesc,
1158 struct aead_request *req,
1161 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1162 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1163 int ivsize = crypto_aead_ivsize(aead);
1164 int authsize = ctx->authsize;
1165 u32 *desc = edesc->hw_desc;
1166 u32 out_options = 0, in_options;
1167 dma_addr_t dst_dma, src_dma;
1168 int len, link_tbl_index = 0;
1171 debug("assoclen %d cryptlen %d authsize %d\n",
1172 req->assoclen, req->cryptlen, authsize);
1173 print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
1174 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1176 print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
1177 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
1178 print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
1179 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1180 edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
1181 print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
1182 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
1183 desc_bytes(sh_desc), 1);
1186 len = desc_len(sh_desc);
1187 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1189 if (contig & GIV_SRC_CONTIG) {
1190 src_dma = sg_dma_address(req->assoc);
1193 src_dma = edesc->link_tbl_dma;
1194 link_tbl_index += edesc->assoc_nents + 1 + edesc->src_nents;
1195 in_options = LDST_SGF;
1197 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
1198 req->cryptlen - authsize, in_options);
1200 if (contig & GIV_DST_CONTIG) {
1201 dst_dma = edesc->iv_dma;
1203 if (likely(req->src == req->dst)) {
1204 dst_dma = src_dma + sizeof(struct link_tbl_entry) *
1206 out_options = LDST_SGF;
1208 dst_dma = edesc->link_tbl_dma +
1210 sizeof(struct link_tbl_entry);
1211 out_options = LDST_SGF;
1215 append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options);
1219 * Fill in ablkcipher job descriptor
1221 static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1222 struct ablkcipher_edesc *edesc,
1223 struct ablkcipher_request *req,
1226 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1227 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1228 u32 *desc = edesc->hw_desc;
1229 u32 out_options = 0, in_options;
1230 dma_addr_t dst_dma, src_dma;
1231 int len, link_tbl_index = 0;
1234 print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
1235 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1237 print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
1238 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1239 edesc->src_nents ? 100 : req->nbytes, 1);
1242 len = desc_len(sh_desc);
1243 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1246 src_dma = edesc->iv_dma;
1249 src_dma = edesc->link_tbl_dma;
1250 link_tbl_index += (iv_contig ? 0 : 1) + edesc->src_nents;
1251 in_options = LDST_SGF;
1253 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
1255 if (likely(req->src == req->dst)) {
1256 if (!edesc->src_nents && iv_contig) {
1257 dst_dma = sg_dma_address(req->src);
1259 dst_dma = edesc->link_tbl_dma +
1260 sizeof(struct link_tbl_entry);
1261 out_options = LDST_SGF;
1264 if (!edesc->dst_nents) {
1265 dst_dma = sg_dma_address(req->dst);
1267 dst_dma = edesc->link_tbl_dma +
1268 link_tbl_index * sizeof(struct link_tbl_entry);
1269 out_options = LDST_SGF;
1272 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
1276 * derive number of elements in scatterlist
1278 static int sg_count(struct scatterlist *sg_list, int nbytes)
1280 struct scatterlist *sg = sg_list;
1283 while (nbytes > 0) {
1285 nbytes -= sg->length;
1286 if (!sg_is_last(sg) && (sg + 1)->length == 0)
1287 BUG(); /* Not support chaining */
1288 sg = scatterwalk_sg_next(sg);
1291 if (likely(sg_nents == 1))
1298 * allocate and map the aead extended descriptor
1300 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1301 int desc_bytes, bool *all_contig_ptr)
1303 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1304 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1305 struct device *jrdev = ctx->jrdev;
1306 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1307 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1308 int assoc_nents, src_nents, dst_nents = 0;
1309 struct aead_edesc *edesc;
1310 dma_addr_t iv_dma = 0;
1312 bool all_contig = true;
1313 int ivsize = crypto_aead_ivsize(aead);
1314 int link_tbl_index, link_tbl_len = 0, link_tbl_bytes;
1316 assoc_nents = sg_count(req->assoc, req->assoclen);
1317 src_nents = sg_count(req->src, req->cryptlen);
1319 if (unlikely(req->dst != req->src))
1320 dst_nents = sg_count(req->dst, req->cryptlen);
1322 sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1,
1324 if (likely(req->src == req->dst)) {
1325 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1328 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1330 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
1334 /* Check if data are contiguous */
1335 iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
1336 if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1337 iv_dma || src_nents || iv_dma + ivsize !=
1338 sg_dma_address(req->src)) {
1340 assoc_nents = assoc_nents ? : 1;
1341 src_nents = src_nents ? : 1;
1342 link_tbl_len = assoc_nents + 1 + src_nents;
1344 link_tbl_len += dst_nents;
1346 link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry);
1348 /* allocate space for base edesc and hw desc commands, link tables */
1349 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1350 link_tbl_bytes, GFP_DMA | flags);
1352 dev_err(jrdev, "could not allocate extended descriptor\n");
1353 return ERR_PTR(-ENOMEM);
1356 edesc->assoc_nents = assoc_nents;
1357 edesc->src_nents = src_nents;
1358 edesc->dst_nents = dst_nents;
1359 edesc->iv_dma = iv_dma;
1360 edesc->link_tbl_bytes = link_tbl_bytes;
1361 edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) +
1363 edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl,
1364 link_tbl_bytes, DMA_TO_DEVICE);
1365 *all_contig_ptr = all_contig;
1369 sg_to_link_tbl(req->assoc,
1370 (assoc_nents ? : 1),
1373 link_tbl_index += assoc_nents ? : 1;
1374 sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index,
1376 link_tbl_index += 1;
1377 sg_to_link_tbl_last(req->src,
1381 link_tbl_index += src_nents ? : 1;
1384 sg_to_link_tbl_last(req->dst, dst_nents,
1385 edesc->link_tbl + link_tbl_index, 0);
1391 static int aead_encrypt(struct aead_request *req)
1393 struct aead_edesc *edesc;
1394 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1395 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1396 struct device *jrdev = ctx->jrdev;
1401 req->cryptlen += ctx->authsize;
1403 /* allocate extended descriptor */
1404 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1405 CAAM_CMD_SZ, &all_contig);
1407 return PTR_ERR(edesc);
1409 /* Create and submit job descriptor */
1410 init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
1413 print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
1414 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1415 desc_bytes(edesc->hw_desc), 1);
1418 desc = edesc->hw_desc;
1419 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1423 aead_unmap(jrdev, edesc, req);
1430 static int aead_decrypt(struct aead_request *req)
1432 struct aead_edesc *edesc;
1433 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1434 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1435 struct device *jrdev = ctx->jrdev;
1440 /* allocate extended descriptor */
1441 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1442 CAAM_CMD_SZ, &all_contig);
1444 return PTR_ERR(edesc);
1447 print_hex_dump(KERN_ERR, "dec src@"xstr(__LINE__)": ",
1448 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1452 /* Create and submit job descriptor*/
1453 init_aead_job(ctx->sh_desc_dec,
1454 ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
1456 print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
1457 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1458 desc_bytes(edesc->hw_desc), 1);
1461 desc = edesc->hw_desc;
1462 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1466 aead_unmap(jrdev, edesc, req);
1474 * allocate and map the aead extended descriptor for aead givencrypt
1476 static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
1477 *greq, int desc_bytes,
1480 struct aead_request *req = &greq->areq;
1481 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1482 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1483 struct device *jrdev = ctx->jrdev;
1484 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1485 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1486 int assoc_nents, src_nents, dst_nents = 0;
1487 struct aead_edesc *edesc;
1488 dma_addr_t iv_dma = 0;
1490 u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
1491 int ivsize = crypto_aead_ivsize(aead);
1492 int link_tbl_index, link_tbl_len = 0, link_tbl_bytes;
1494 assoc_nents = sg_count(req->assoc, req->assoclen);
1495 src_nents = sg_count(req->src, req->cryptlen);
1497 if (unlikely(req->dst != req->src))
1498 dst_nents = sg_count(req->dst, req->cryptlen);
1500 sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1,
1502 if (likely(req->src == req->dst)) {
1503 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1506 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1508 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
1512 /* Check if data are contiguous */
1513 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
1514 if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1515 iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
1516 contig &= ~GIV_SRC_CONTIG;
1517 if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
1518 contig &= ~GIV_DST_CONTIG;
1519 if (unlikely(req->src != req->dst)) {
1520 dst_nents = dst_nents ? : 1;
1523 if (!(contig & GIV_SRC_CONTIG)) {
1524 assoc_nents = assoc_nents ? : 1;
1525 src_nents = src_nents ? : 1;
1526 link_tbl_len += assoc_nents + 1 + src_nents;
1527 if (likely(req->src == req->dst))
1528 contig &= ~GIV_DST_CONTIG;
1530 link_tbl_len += dst_nents;
1532 link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry);
1534 /* allocate space for base edesc and hw desc commands, link tables */
1535 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1536 link_tbl_bytes, GFP_DMA | flags);
1538 dev_err(jrdev, "could not allocate extended descriptor\n");
1539 return ERR_PTR(-ENOMEM);
1542 edesc->assoc_nents = assoc_nents;
1543 edesc->src_nents = src_nents;
1544 edesc->dst_nents = dst_nents;
1545 edesc->iv_dma = iv_dma;
1546 edesc->link_tbl_bytes = link_tbl_bytes;
1547 edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) +
1549 edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl,
1550 link_tbl_bytes, DMA_TO_DEVICE);
1551 *contig_ptr = contig;
1554 if (!(contig & GIV_SRC_CONTIG)) {
1555 sg_to_link_tbl(req->assoc, assoc_nents,
1558 link_tbl_index += assoc_nents;
1559 sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index,
1561 link_tbl_index += 1;
1562 sg_to_link_tbl_last(req->src, src_nents,
1565 link_tbl_index += src_nents;
1567 if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
1568 sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index,
1570 link_tbl_index += 1;
1571 sg_to_link_tbl_last(req->dst, dst_nents,
1572 edesc->link_tbl + link_tbl_index, 0);
1578 static int aead_givencrypt(struct aead_givcrypt_request *areq)
1580 struct aead_request *req = &areq->areq;
1581 struct aead_edesc *edesc;
1582 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1583 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1584 struct device *jrdev = ctx->jrdev;
1589 req->cryptlen += ctx->authsize;
1591 /* allocate extended descriptor */
1592 edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
1593 CAAM_CMD_SZ, &contig);
1596 return PTR_ERR(edesc);
1599 print_hex_dump(KERN_ERR, "giv src@"xstr(__LINE__)": ",
1600 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1604 /* Create and submit job descriptor*/
1605 init_aead_giv_job(ctx->sh_desc_givenc,
1606 ctx->sh_desc_givenc_dma, edesc, req, contig);
1608 print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
1609 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1610 desc_bytes(edesc->hw_desc), 1);
1613 desc = edesc->hw_desc;
1614 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1618 aead_unmap(jrdev, edesc, req);
1626 * allocate and map the ablkcipher extended descriptor for ablkcipher
1628 static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1629 *req, int desc_bytes,
1630 bool *iv_contig_out)
1632 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1633 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1634 struct device *jrdev = ctx->jrdev;
1635 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1636 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
1637 GFP_KERNEL : GFP_ATOMIC;
1638 int src_nents, dst_nents = 0, link_tbl_bytes;
1639 struct ablkcipher_edesc *edesc;
1640 dma_addr_t iv_dma = 0;
1641 bool iv_contig = false;
1643 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1646 src_nents = sg_count(req->src, req->nbytes);
1648 if (unlikely(req->dst != req->src))
1649 dst_nents = sg_count(req->dst, req->nbytes);
1651 if (likely(req->src == req->dst)) {
1652 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1655 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1657 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
1662 * Check if iv can be contiguous with source and destination.
1663 * If so, include it. If not, create scatterlist.
1665 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
1666 if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
1669 src_nents = src_nents ? : 1;
1670 link_tbl_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
1671 sizeof(struct link_tbl_entry);
1673 /* allocate space for base edesc and hw desc commands, link tables */
1674 edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
1675 link_tbl_bytes, GFP_DMA | flags);
1677 dev_err(jrdev, "could not allocate extended descriptor\n");
1678 return ERR_PTR(-ENOMEM);
1681 edesc->src_nents = src_nents;
1682 edesc->dst_nents = dst_nents;
1683 edesc->link_tbl_bytes = link_tbl_bytes;
1684 edesc->link_tbl = (void *)edesc + sizeof(struct ablkcipher_edesc) +
1689 sg_to_link_tbl_one(edesc->link_tbl, iv_dma, ivsize, 0);
1690 sg_to_link_tbl_last(req->src, src_nents,
1691 edesc->link_tbl + 1, 0);
1692 link_tbl_index += 1 + src_nents;
1695 if (unlikely(dst_nents)) {
1696 sg_to_link_tbl_last(req->dst, dst_nents,
1697 edesc->link_tbl + link_tbl_index, 0);
1700 edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl,
1701 link_tbl_bytes, DMA_TO_DEVICE);
1702 edesc->iv_dma = iv_dma;
1705 print_hex_dump(KERN_ERR, "ablkcipher link_tbl@"xstr(__LINE__)": ",
1706 DUMP_PREFIX_ADDRESS, 16, 4, edesc->link_tbl,
1710 *iv_contig_out = iv_contig;
1714 static int ablkcipher_encrypt(struct ablkcipher_request *req)
1716 struct ablkcipher_edesc *edesc;
1717 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1718 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1719 struct device *jrdev = ctx->jrdev;
1724 /* allocate extended descriptor */
1725 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1726 CAAM_CMD_SZ, &iv_contig);
1728 return PTR_ERR(edesc);
1730 /* Create and submit job descriptor*/
1731 init_ablkcipher_job(ctx->sh_desc_enc,
1732 ctx->sh_desc_enc_dma, edesc, req, iv_contig);
1734 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
1735 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1736 desc_bytes(edesc->hw_desc), 1);
1738 desc = edesc->hw_desc;
1739 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
1744 ablkcipher_unmap(jrdev, edesc, req);
1751 static int ablkcipher_decrypt(struct ablkcipher_request *req)
1753 struct ablkcipher_edesc *edesc;
1754 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1755 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1756 struct device *jrdev = ctx->jrdev;
1761 /* allocate extended descriptor */
1762 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1763 CAAM_CMD_SZ, &iv_contig);
1765 return PTR_ERR(edesc);
1767 /* Create and submit job descriptor*/
1768 init_ablkcipher_job(ctx->sh_desc_dec,
1769 ctx->sh_desc_dec_dma, edesc, req, iv_contig);
1770 desc = edesc->hw_desc;
1772 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
1773 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1774 desc_bytes(edesc->hw_desc), 1);
1777 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
1781 ablkcipher_unmap(jrdev, edesc, req);
1788 #define template_aead template_u.aead
1789 #define template_ablkcipher template_u.ablkcipher
1790 struct caam_alg_template {
1791 char name[CRYPTO_MAX_ALG_NAME];
1792 char driver_name[CRYPTO_MAX_ALG_NAME];
1793 unsigned int blocksize;
1796 struct ablkcipher_alg ablkcipher;
1797 struct aead_alg aead;
1798 struct blkcipher_alg blkcipher;
1799 struct cipher_alg cipher;
1800 struct compress_alg compress;
1803 u32 class1_alg_type;
1804 u32 class2_alg_type;
1808 static struct caam_alg_template driver_algs[] = {
1809 /* single-pass ipsec_esp descriptor */
1811 .name = "authenc(hmac(sha1),cbc(aes))",
1812 .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
1813 .blocksize = AES_BLOCK_SIZE,
1814 .type = CRYPTO_ALG_TYPE_AEAD,
1816 .setkey = aead_setkey,
1817 .setauthsize = aead_setauthsize,
1818 .encrypt = aead_encrypt,
1819 .decrypt = aead_decrypt,
1820 .givencrypt = aead_givencrypt,
1821 .geniv = "<built-in>",
1822 .ivsize = AES_BLOCK_SIZE,
1823 .maxauthsize = SHA1_DIGEST_SIZE,
1825 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1826 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1827 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1830 .name = "authenc(hmac(sha256),cbc(aes))",
1831 .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
1832 .blocksize = AES_BLOCK_SIZE,
1833 .type = CRYPTO_ALG_TYPE_AEAD,
1835 .setkey = aead_setkey,
1836 .setauthsize = aead_setauthsize,
1837 .encrypt = aead_encrypt,
1838 .decrypt = aead_decrypt,
1839 .givencrypt = aead_givencrypt,
1840 .geniv = "<built-in>",
1841 .ivsize = AES_BLOCK_SIZE,
1842 .maxauthsize = SHA256_DIGEST_SIZE,
1844 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1845 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1846 OP_ALG_AAI_HMAC_PRECOMP,
1847 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1850 .name = "authenc(hmac(sha512),cbc(aes))",
1851 .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
1852 .blocksize = AES_BLOCK_SIZE,
1853 .type = CRYPTO_ALG_TYPE_AEAD,
1855 .setkey = aead_setkey,
1856 .setauthsize = aead_setauthsize,
1857 .encrypt = aead_encrypt,
1858 .decrypt = aead_decrypt,
1859 .givencrypt = aead_givencrypt,
1860 .geniv = "<built-in>",
1861 .ivsize = AES_BLOCK_SIZE,
1862 .maxauthsize = SHA512_DIGEST_SIZE,
1864 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1865 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1866 OP_ALG_AAI_HMAC_PRECOMP,
1867 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1870 .name = "authenc(hmac(sha1),cbc(des3_ede))",
1871 .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
1872 .blocksize = DES3_EDE_BLOCK_SIZE,
1873 .type = CRYPTO_ALG_TYPE_AEAD,
1875 .setkey = aead_setkey,
1876 .setauthsize = aead_setauthsize,
1877 .encrypt = aead_encrypt,
1878 .decrypt = aead_decrypt,
1879 .givencrypt = aead_givencrypt,
1880 .geniv = "<built-in>",
1881 .ivsize = DES3_EDE_BLOCK_SIZE,
1882 .maxauthsize = SHA1_DIGEST_SIZE,
1884 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1885 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1886 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1889 .name = "authenc(hmac(sha256),cbc(des3_ede))",
1890 .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
1891 .blocksize = DES3_EDE_BLOCK_SIZE,
1892 .type = CRYPTO_ALG_TYPE_AEAD,
1894 .setkey = aead_setkey,
1895 .setauthsize = aead_setauthsize,
1896 .encrypt = aead_encrypt,
1897 .decrypt = aead_decrypt,
1898 .givencrypt = aead_givencrypt,
1899 .geniv = "<built-in>",
1900 .ivsize = DES3_EDE_BLOCK_SIZE,
1901 .maxauthsize = SHA256_DIGEST_SIZE,
1903 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1904 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1905 OP_ALG_AAI_HMAC_PRECOMP,
1906 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1909 .name = "authenc(hmac(sha512),cbc(des3_ede))",
1910 .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
1911 .blocksize = DES3_EDE_BLOCK_SIZE,
1912 .type = CRYPTO_ALG_TYPE_AEAD,
1914 .setkey = aead_setkey,
1915 .setauthsize = aead_setauthsize,
1916 .encrypt = aead_encrypt,
1917 .decrypt = aead_decrypt,
1918 .givencrypt = aead_givencrypt,
1919 .geniv = "<built-in>",
1920 .ivsize = DES3_EDE_BLOCK_SIZE,
1921 .maxauthsize = SHA512_DIGEST_SIZE,
1923 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1924 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1925 OP_ALG_AAI_HMAC_PRECOMP,
1926 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1929 .name = "authenc(hmac(sha1),cbc(des))",
1930 .driver_name = "authenc-hmac-sha1-cbc-des-caam",
1931 .blocksize = DES_BLOCK_SIZE,
1932 .type = CRYPTO_ALG_TYPE_AEAD,
1934 .setkey = aead_setkey,
1935 .setauthsize = aead_setauthsize,
1936 .encrypt = aead_encrypt,
1937 .decrypt = aead_decrypt,
1938 .givencrypt = aead_givencrypt,
1939 .geniv = "<built-in>",
1940 .ivsize = DES_BLOCK_SIZE,
1941 .maxauthsize = SHA1_DIGEST_SIZE,
1943 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1944 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1945 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1948 .name = "authenc(hmac(sha256),cbc(des))",
1949 .driver_name = "authenc-hmac-sha256-cbc-des-caam",
1950 .blocksize = DES_BLOCK_SIZE,
1951 .type = CRYPTO_ALG_TYPE_AEAD,
1953 .setkey = aead_setkey,
1954 .setauthsize = aead_setauthsize,
1955 .encrypt = aead_encrypt,
1956 .decrypt = aead_decrypt,
1957 .givencrypt = aead_givencrypt,
1958 .geniv = "<built-in>",
1959 .ivsize = DES_BLOCK_SIZE,
1960 .maxauthsize = SHA256_DIGEST_SIZE,
1962 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1963 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1964 OP_ALG_AAI_HMAC_PRECOMP,
1965 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1968 .name = "authenc(hmac(sha512),cbc(des))",
1969 .driver_name = "authenc-hmac-sha512-cbc-des-caam",
1970 .blocksize = DES_BLOCK_SIZE,
1971 .type = CRYPTO_ALG_TYPE_AEAD,
1973 .setkey = aead_setkey,
1974 .setauthsize = aead_setauthsize,
1975 .encrypt = aead_encrypt,
1976 .decrypt = aead_decrypt,
1977 .givencrypt = aead_givencrypt,
1978 .geniv = "<built-in>",
1979 .ivsize = DES_BLOCK_SIZE,
1980 .maxauthsize = SHA512_DIGEST_SIZE,
1982 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1983 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1984 OP_ALG_AAI_HMAC_PRECOMP,
1985 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1987 /* ablkcipher descriptor */
1990 .driver_name = "cbc-aes-caam",
1991 .blocksize = AES_BLOCK_SIZE,
1992 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1993 .template_ablkcipher = {
1994 .setkey = ablkcipher_setkey,
1995 .encrypt = ablkcipher_encrypt,
1996 .decrypt = ablkcipher_decrypt,
1998 .min_keysize = AES_MIN_KEY_SIZE,
1999 .max_keysize = AES_MAX_KEY_SIZE,
2000 .ivsize = AES_BLOCK_SIZE,
2002 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2005 .name = "cbc(des3_ede)",
2006 .driver_name = "cbc-3des-caam",
2007 .blocksize = DES3_EDE_BLOCK_SIZE,
2008 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2009 .template_ablkcipher = {
2010 .setkey = ablkcipher_setkey,
2011 .encrypt = ablkcipher_encrypt,
2012 .decrypt = ablkcipher_decrypt,
2014 .min_keysize = DES3_EDE_KEY_SIZE,
2015 .max_keysize = DES3_EDE_KEY_SIZE,
2016 .ivsize = DES3_EDE_BLOCK_SIZE,
2018 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2022 .driver_name = "cbc-des-caam",
2023 .blocksize = DES_BLOCK_SIZE,
2024 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2025 .template_ablkcipher = {
2026 .setkey = ablkcipher_setkey,
2027 .encrypt = ablkcipher_encrypt,
2028 .decrypt = ablkcipher_decrypt,
2030 .min_keysize = DES_KEY_SIZE,
2031 .max_keysize = DES_KEY_SIZE,
2032 .ivsize = DES_BLOCK_SIZE,
2034 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2038 struct caam_crypto_alg {
2039 struct list_head entry;
2040 struct device *ctrldev;
2041 int class1_alg_type;
2042 int class2_alg_type;
2044 struct crypto_alg crypto_alg;
2047 static int caam_cra_init(struct crypto_tfm *tfm)
2049 struct crypto_alg *alg = tfm->__crt_alg;
2050 struct caam_crypto_alg *caam_alg =
2051 container_of(alg, struct caam_crypto_alg, crypto_alg);
2052 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2053 struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev);
2054 int tgt_jr = atomic_inc_return(&priv->tfm_count);
2057 * distribute tfms across job rings to ensure in-order
2058 * crypto request processing per tfm
2060 ctx->jrdev = priv->algapi_jr[(tgt_jr / 2) % priv->num_jrs_for_algapi];
2062 /* copy descriptor header template value */
2063 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
2064 ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
2065 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
2070 static void caam_cra_exit(struct crypto_tfm *tfm)
2072 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2074 if (ctx->sh_desc_enc_dma &&
2075 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
2076 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
2077 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
2078 if (ctx->sh_desc_dec_dma &&
2079 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
2080 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
2081 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
2082 if (ctx->sh_desc_givenc_dma &&
2083 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
2084 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
2085 desc_bytes(ctx->sh_desc_givenc),
2089 static void __exit caam_algapi_exit(void)
2092 struct device_node *dev_node;
2093 struct platform_device *pdev;
2094 struct device *ctrldev;
2095 struct caam_drv_private *priv;
2096 struct caam_crypto_alg *t_alg, *n;
2099 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2103 pdev = of_find_device_by_node(dev_node);
2107 ctrldev = &pdev->dev;
2108 of_node_put(dev_node);
2109 priv = dev_get_drvdata(ctrldev);
2111 if (!priv->alg_list.next)
2114 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2115 crypto_unregister_alg(&t_alg->crypto_alg);
2116 list_del(&t_alg->entry);
2120 for (i = 0; i < priv->total_jobrs; i++) {
2121 err = caam_jr_deregister(priv->algapi_jr[i]);
2125 kfree(priv->algapi_jr);
2128 static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
2129 struct caam_alg_template
2132 struct caam_crypto_alg *t_alg;
2133 struct crypto_alg *alg;
2135 t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
2137 dev_err(ctrldev, "failed to allocate t_alg\n");
2138 return ERR_PTR(-ENOMEM);
2141 alg = &t_alg->crypto_alg;
2143 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
2144 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2145 template->driver_name);
2146 alg->cra_module = THIS_MODULE;
2147 alg->cra_init = caam_cra_init;
2148 alg->cra_exit = caam_cra_exit;
2149 alg->cra_priority = CAAM_CRA_PRIORITY;
2150 alg->cra_blocksize = template->blocksize;
2151 alg->cra_alignmask = 0;
2152 alg->cra_ctxsize = sizeof(struct caam_ctx);
2153 alg->cra_flags = CRYPTO_ALG_ASYNC | template->type;
2154 switch (template->type) {
2155 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2156 alg->cra_type = &crypto_ablkcipher_type;
2157 alg->cra_ablkcipher = template->template_ablkcipher;
2159 case CRYPTO_ALG_TYPE_AEAD:
2160 alg->cra_type = &crypto_aead_type;
2161 alg->cra_aead = template->template_aead;
2165 t_alg->class1_alg_type = template->class1_alg_type;
2166 t_alg->class2_alg_type = template->class2_alg_type;
2167 t_alg->alg_op = template->alg_op;
2168 t_alg->ctrldev = ctrldev;
2173 static int __init caam_algapi_init(void)
2175 struct device_node *dev_node;
2176 struct platform_device *pdev;
2177 struct device *ctrldev, **jrdev;
2178 struct caam_drv_private *priv;
2181 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2185 pdev = of_find_device_by_node(dev_node);
2189 ctrldev = &pdev->dev;
2190 priv = dev_get_drvdata(ctrldev);
2191 of_node_put(dev_node);
2193 INIT_LIST_HEAD(&priv->alg_list);
2195 jrdev = kmalloc(sizeof(*jrdev) * priv->total_jobrs, GFP_KERNEL);
2199 for (i = 0; i < priv->total_jobrs; i++) {
2200 err = caam_jr_register(ctrldev, &jrdev[i]);
2204 if (err < 0 && i == 0) {
2205 dev_err(ctrldev, "algapi error in job ring registration: %d\n",
2211 priv->num_jrs_for_algapi = i;
2212 priv->algapi_jr = jrdev;
2213 atomic_set(&priv->tfm_count, -1);
2215 /* register crypto algorithms the device supports */
2216 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2217 /* TODO: check if h/w supports alg */
2218 struct caam_crypto_alg *t_alg;
2220 t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]);
2221 if (IS_ERR(t_alg)) {
2222 err = PTR_ERR(t_alg);
2223 dev_warn(ctrldev, "%s alg allocation failed\n",
2224 driver_algs[i].driver_name);
2228 err = crypto_register_alg(&t_alg->crypto_alg);
2230 dev_warn(ctrldev, "%s alg registration failed\n",
2231 t_alg->crypto_alg.cra_driver_name);
2234 list_add_tail(&t_alg->entry, &priv->alg_list);
2235 dev_info(ctrldev, "%s\n",
2236 t_alg->crypto_alg.cra_driver_name);
2243 module_init(caam_algapi_init);
2244 module_exit(caam_algapi_exit);
2246 MODULE_LICENSE("GPL");
2247 MODULE_DESCRIPTION("FSL CAAM support for crypto API");
2248 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");