Merge branch 'devel-stable' into for-linus
[pandora-kernel.git] / drivers / crypto / mv_cesa.c
index c99305a..3cf303e 100644 (file)
@@ -133,7 +133,6 @@ struct mv_req_hash_ctx {
        int extra_bytes;        /* unprocessed bytes in buffer */
        enum hash_op op;
        int count_add;
-       struct scatterlist dummysg;
 };
 
 static void compute_aes_dec_key(struct mv_ctx *ctx)
@@ -187,9 +186,9 @@ static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
 {
        int ret;
        void *sbuf;
-       int copied = 0;
+       int copy_len;
 
-       while (1) {
+       while (len) {
                if (!p->sg_src_left) {
                        ret = sg_miter_next(&p->src_sg_it);
                        BUG_ON(!ret);
@@ -199,19 +198,14 @@ static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
 
                sbuf = p->src_sg_it.addr + p->src_start;
 
-               if (p->sg_src_left <= len - copied) {
-                       memcpy(dbuf + copied, sbuf, p->sg_src_left);
-                       copied += p->sg_src_left;
-                       p->sg_src_left = 0;
-                       if (copied >= len)
-                               break;
-               } else {
-                       int copy_len = len - copied;
-                       memcpy(dbuf + copied, sbuf, copy_len);
-                       p->src_start += copy_len;
-                       p->sg_src_left -= copy_len;
-                       break;
-               }
+               copy_len = min(p->sg_src_left, len);
+               memcpy(dbuf, sbuf, copy_len);
+
+               p->src_start += copy_len;
+               p->sg_src_left -= copy_len;
+
+               len -= copy_len;
+               dbuf += copy_len;
        }
 }
 
@@ -275,7 +269,6 @@ static void mv_process_current_q(int first_block)
        memcpy(cpg->sram + SRAM_CONFIG, &op,
                        sizeof(struct sec_accel_config));
 
-       writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
        /* GO */
        writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
 
@@ -302,6 +295,7 @@ static void mv_crypto_algo_completion(void)
 static void mv_process_hash_current(int first_block)
 {
        struct ahash_request *req = ahash_request_cast(cpg->cur_req);
+       const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
        struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
        struct req_progress *p = &cpg->p;
        struct sec_accel_config op = { 0 };
@@ -314,6 +308,8 @@ static void mv_process_hash_current(int first_block)
                break;
        case COP_HMAC_SHA1:
                op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1;
+               memcpy(cpg->sram + SRAM_HMAC_IV_IN,
+                               tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
                break;
        }
 
@@ -345,11 +341,16 @@ static void mv_process_hash_current(int first_block)
                        op.config |= CFG_LAST_FRAG;
                else
                        op.config |= CFG_MID_FRAG;
+
+               writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
+               writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
+               writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
+               writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
+               writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
        }
 
        memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
 
-       writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
        /* GO */
        writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
 
@@ -409,12 +410,6 @@ static void mv_hash_algo_completion(void)
                copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes);
        sg_miter_stop(&cpg->p.src_sg_it);
 
-       ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
-       ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
-       ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
-       ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
-       ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
-
        if (likely(ctx->last_chunk)) {
                if (likely(ctx->count <= MAX_HW_HASH_SIZE)) {
                        memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF,
@@ -422,6 +417,12 @@ static void mv_hash_algo_completion(void)
                                                       (req)));
                } else
                        mv_hash_final_fallback(req);
+       } else {
+               ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
+               ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
+               ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
+               ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
+               ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
        }
 }
 
@@ -480,7 +481,7 @@ static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
        int i = 0;
        size_t cur_len;
 
-       while (1) {
+       while (sl) {
                cur_len = sl[i].length;
                ++i;
                if (total_bytes > cur_len)
@@ -517,29 +518,12 @@ static void mv_start_new_hash_req(struct ahash_request *req)
 {
        struct req_progress *p = &cpg->p;
        struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
-       const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
        int num_sgs, hw_bytes, old_extra_bytes, rc;
        cpg->cur_req = &req->base;
        memset(p, 0, sizeof(struct req_progress));
        hw_bytes = req->nbytes + ctx->extra_bytes;
        old_extra_bytes = ctx->extra_bytes;
 
-       if (unlikely(ctx->extra_bytes)) {
-               memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer,
-                      ctx->extra_bytes);
-               p->crypt_len = ctx->extra_bytes;
-       }
-
-       memcpy(cpg->sram + SRAM_HMAC_IV_IN, tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
-
-       if (unlikely(!ctx->first_hash)) {
-               writel(ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
-               writel(ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
-               writel(ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
-               writel(ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
-               writel(ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
-       }
-
        ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE;
        if (ctx->extra_bytes != 0
            && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE))
@@ -555,6 +539,12 @@ static void mv_start_new_hash_req(struct ahash_request *req)
                p->complete = mv_hash_algo_completion;
                p->process = mv_process_hash_current;
 
+               if (unlikely(old_extra_bytes)) {
+                       memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer,
+                              old_extra_bytes);
+                       p->crypt_len = old_extra_bytes;
+               }
+
                mv_process_hash_current(1);
        } else {
                copy_src_to_buf(p, ctx->buffer + old_extra_bytes,
@@ -603,9 +593,7 @@ static int queue_manag(void *data)
                        if (async_req->tfm->__crt_alg->cra_type !=
                            &crypto_ahash_type) {
                                struct ablkcipher_request *req =
-                                   container_of(async_req,
-                                                struct ablkcipher_request,
-                                                base);
+                                   ablkcipher_request_cast(async_req);
                                mv_start_new_crypt_req(req);
                        } else {
                                struct ahash_request *req =
@@ -722,19 +710,13 @@ static int mv_hash_update(struct ahash_request *req)
 static int mv_hash_final(struct ahash_request *req)
 {
        struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
-       /* dummy buffer of 4 bytes */
-       sg_init_one(&ctx->dummysg, ctx->buffer, 4);
-       /* I think I'm allowed to do that... */
-       ahash_request_set_crypt(req, &ctx->dummysg, req->result, 0);
+
        mv_update_hash_req_ctx(ctx, 1, 0);
        return mv_handle_req(&req->base);
 }
 
 static int mv_hash_finup(struct ahash_request *req)
 {
-       if (!req->nbytes)
-               return mv_hash_final(req);
-
        mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes);
        return mv_handle_req(&req->base);
 }
@@ -1065,14 +1047,21 @@ static int mv_probe(struct platform_device *pdev)
 
        writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
        writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
+       writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
 
        ret = crypto_register_alg(&mv_aes_alg_ecb);
-       if (ret)
+       if (ret) {
+               printk(KERN_WARNING MV_CESA
+                      "Could not register aes-ecb driver\n");
                goto err_irq;
+       }
 
        ret = crypto_register_alg(&mv_aes_alg_cbc);
-       if (ret)
+       if (ret) {
+               printk(KERN_WARNING MV_CESA
+                      "Could not register aes-cbc driver\n");
                goto err_unreg_ecb;
+       }
 
        ret = crypto_register_ahash(&mv_sha1_alg);
        if (ret == 0)