crypto: mv_cesa - fill inner/outer IV fields only in HMAC case
[pandora-kernel.git] / drivers / crypto / mv_cesa.c
index c99305a..c1925c2 100644 (file)
@@ -187,9 +187,9 @@ static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
 {
        int ret;
        void *sbuf;
-       int copied = 0;
+       int copy_len;
 
-       while (1) {
+       while (len) {
                if (!p->sg_src_left) {
                        ret = sg_miter_next(&p->src_sg_it);
                        BUG_ON(!ret);
@@ -199,19 +199,14 @@ static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
 
                sbuf = p->src_sg_it.addr + p->src_start;
 
-               if (p->sg_src_left <= len - copied) {
-                       memcpy(dbuf + copied, sbuf, p->sg_src_left);
-                       copied += p->sg_src_left;
-                       p->sg_src_left = 0;
-                       if (copied >= len)
-                               break;
-               } else {
-                       int copy_len = len - copied;
-                       memcpy(dbuf + copied, sbuf, copy_len);
-                       p->src_start += copy_len;
-                       p->sg_src_left -= copy_len;
-                       break;
-               }
+               copy_len = min(p->sg_src_left, len);
+               memcpy(dbuf, sbuf, copy_len);
+
+               p->src_start += copy_len;
+               p->sg_src_left -= copy_len;
+
+               len -= copy_len;
+               dbuf += copy_len;
        }
 }
 
@@ -275,7 +270,6 @@ static void mv_process_current_q(int first_block)
        memcpy(cpg->sram + SRAM_CONFIG, &op,
                        sizeof(struct sec_accel_config));
 
-       writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
        /* GO */
        writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
 
@@ -302,6 +296,7 @@ static void mv_crypto_algo_completion(void)
 static void mv_process_hash_current(int first_block)
 {
        struct ahash_request *req = ahash_request_cast(cpg->cur_req);
+       const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
        struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
        struct req_progress *p = &cpg->p;
        struct sec_accel_config op = { 0 };
@@ -314,6 +309,8 @@ static void mv_process_hash_current(int first_block)
                break;
        case COP_HMAC_SHA1:
                op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1;
+               memcpy(cpg->sram + SRAM_HMAC_IV_IN,
+                               tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
                break;
        }
 
@@ -349,7 +346,6 @@ static void mv_process_hash_current(int first_block)
 
        memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
 
-       writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
        /* GO */
        writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
 
@@ -409,12 +405,6 @@ static void mv_hash_algo_completion(void)
                copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes);
        sg_miter_stop(&cpg->p.src_sg_it);
 
-       ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
-       ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
-       ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
-       ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
-       ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
-
        if (likely(ctx->last_chunk)) {
                if (likely(ctx->count <= MAX_HW_HASH_SIZE)) {
                        memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF,
@@ -422,6 +412,12 @@ static void mv_hash_algo_completion(void)
                                                       (req)));
                } else
                        mv_hash_final_fallback(req);
+       } else {
+               ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
+               ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
+               ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
+               ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
+               ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
        }
 }
 
@@ -517,7 +513,6 @@ static void mv_start_new_hash_req(struct ahash_request *req)
 {
        struct req_progress *p = &cpg->p;
        struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
-       const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
        int num_sgs, hw_bytes, old_extra_bytes, rc;
        cpg->cur_req = &req->base;
        memset(p, 0, sizeof(struct req_progress));
@@ -530,8 +525,6 @@ static void mv_start_new_hash_req(struct ahash_request *req)
                p->crypt_len = ctx->extra_bytes;
        }
 
-       memcpy(cpg->sram + SRAM_HMAC_IV_IN, tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
-
        if (unlikely(!ctx->first_hash)) {
                writel(ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
                writel(ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
@@ -603,9 +596,7 @@ static int queue_manag(void *data)
                        if (async_req->tfm->__crt_alg->cra_type !=
                            &crypto_ahash_type) {
                                struct ablkcipher_request *req =
-                                   container_of(async_req,
-                                                struct ablkcipher_request,
-                                                base);
+                                   ablkcipher_request_cast(async_req);
                                mv_start_new_crypt_req(req);
                        } else {
                                struct ahash_request *req =
@@ -732,9 +723,6 @@ static int mv_hash_final(struct ahash_request *req)
 
 static int mv_hash_finup(struct ahash_request *req)
 {
-       if (!req->nbytes)
-               return mv_hash_final(req);
-
        mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes);
        return mv_handle_req(&req->base);
 }
@@ -1065,14 +1053,21 @@ static int mv_probe(struct platform_device *pdev)
 
        writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
        writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
+       writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
 
        ret = crypto_register_alg(&mv_aes_alg_ecb);
-       if (ret)
+       if (ret) {
+               printk(KERN_WARNING MV_CESA
+                      "Could not register aes-ecb driver\n");
                goto err_irq;
+       }
 
        ret = crypto_register_alg(&mv_aes_alg_cbc);
-       if (ret)
+       if (ret) {
+               printk(KERN_WARNING MV_CESA
+                      "Could not register aes-cbc driver\n");
                goto err_unreg_ecb;
+       }
 
        ret = crypto_register_ahash(&mv_sha1_alg);
        if (ret == 0)