Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 21 May 2011 07:13:03 +0000 (00:13 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 21 May 2011 07:13:03 +0000 (00:13 -0700)
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6:
  sctp: Fix build failure.
  garp: use kfree_rcu()
  ipv6: copy prefsrc setting when copying route entry

45 files changed:
Documentation/devicetree/bindings/crypto/fsl-sec4.txt [new file with mode: 0644]
arch/powerpc/boot/dts/p4080ds.dts
arch/powerpc/kernel/misc_32.S
arch/s390/crypto/Makefile
arch/s390/crypto/aes_s390.c
arch/s390/crypto/crypt_s390.h
arch/s390/crypto/des_check_key.c [deleted file]
arch/s390/crypto/des_s390.c
arch/s390/crypto/ghash_s390.c [new file with mode: 0644]
arch/s390/crypto/prng.c
arch/s390/crypto/sha1_s390.c
arch/s390/crypto/sha256_s390.c
arch/s390/crypto/sha512_s390.c
arch/x86/crypto/Makefile
arch/x86/crypto/aesni-intel_glue.c
arch/x86/crypto/fpu.c
crypto/Kconfig
crypto/tcrypt.c
crypto/testmgr.c
crypto/testmgr.h
drivers/char/hw_random/Kconfig
drivers/char/hw_random/amd-rng.c
drivers/crypto/Kconfig
drivers/crypto/Makefile
drivers/crypto/caam/Kconfig [new file with mode: 0644]
drivers/crypto/caam/Makefile [new file with mode: 0644]
drivers/crypto/caam/caamalg.c [new file with mode: 0644]
drivers/crypto/caam/compat.h [new file with mode: 0644]
drivers/crypto/caam/ctrl.c [new file with mode: 0644]
drivers/crypto/caam/desc.h [new file with mode: 0644]
drivers/crypto/caam/desc_constr.h [new file with mode: 0644]
drivers/crypto/caam/error.c [new file with mode: 0644]
drivers/crypto/caam/error.h [new file with mode: 0644]
drivers/crypto/caam/intern.h [new file with mode: 0644]
drivers/crypto/caam/jr.c [new file with mode: 0644]
drivers/crypto/caam/jr.h [new file with mode: 0644]
drivers/crypto/caam/regs.h [new file with mode: 0644]
drivers/crypto/mv_cesa.c
drivers/crypto/omap-sham.c
drivers/crypto/padlock-sha.c
drivers/crypto/picoxcell_crypto.c
drivers/crypto/s5p-sss.c [new file with mode: 0644]
fs/namei.c
fs/partitions/ldm.c
mm/shmem.c

diff --git a/Documentation/devicetree/bindings/crypto/fsl-sec4.txt b/Documentation/devicetree/bindings/crypto/fsl-sec4.txt
new file mode 100644 (file)
index 0000000..bf57ecd
--- /dev/null
@@ -0,0 +1,397 @@
+=====================================================================
+SEC 4 Device Tree Binding
+Copyright (C) 2008-2011 Freescale Semiconductor Inc.
+
+ CONTENTS
+   -Overview
+   -SEC 4 Node
+   -Job Ring Node
+   -Run Time Integrity Check (RTIC) Node
+   -Run Time Integrity Check (RTIC) Memory Node
+   -Secure Non-Volatile Storage (SNVS) Node
+   -Full Example
+
+NOTE: the SEC 4 is also known as Freescale's Cryptographic Accelerator
+Accelerator and Assurance Module (CAAM).
+
+=====================================================================
+Overview
+
+DESCRIPTION
+
+SEC 4 h/w can process requests from 2 types of sources.
+1. DPAA Queue Interface (HW interface between Queue Manager & SEC 4).
+2. Job Rings (HW interface between cores & SEC 4 registers).
+
+High Speed Data Path Configuration:
+
+HW interface between QM & SEC 4 and also BM & SEC 4, on DPAA-enabled parts
+such as the P4080.  The number of simultaneous dequeues the QI can make is
+equal to the number of Descriptor Controller (DECO) engines in a particular
+SEC version.  E.g., the SEC 4.0 in the P4080 has 5 DECOs and can thus
+dequeue from 5 subportals simultaneously.
+
+Job Ring Data Path Configuration:
+
+Each JR is located on a separate 4k page, they may (or may not) be made visible
+in the memory partition devoted to a particular core.  The P4080 has 4 JRs, so
+up to 4 JRs can be configured; and all 4 JRs process requests in parallel.
+
+=====================================================================
+SEC 4 Node
+
+Description
+
+    Node defines the base address of the SEC 4 block.
+    This block specifies the address range of all global
+    configuration registers for the SEC 4 block.  It
+    also receives interrupts from the Run Time Integrity Check
+    (RTIC) function within the SEC 4 block.
+
+PROPERTIES
+
+   - compatible
+      Usage: required
+      Value type: <string>
+      Definition: Must include "fsl,sec-v4.0"
+
+   - #address-cells
+       Usage: required
+       Value type: <u32>
+       Definition: A standard property.  Defines the number of cells
+           for representing physical addresses in child nodes.
+
+   - #size-cells
+       Usage: required
+       Value type: <u32>
+       Definition: A standard property.  Defines the number of cells
+           for representing the size of physical addresses in
+           child nodes.
+
+   - reg
+      Usage: required
+      Value type: <prop-encoded-array>
+      Definition: A standard property.  Specifies the physical
+          address and length of the SEC4 configuration registers.
+          registers
+
+   - ranges
+       Usage: required
+       Value type: <prop-encoded-array>
+       Definition: A standard property.  Specifies the physical address
+           range of the SEC 4.0 register space (-SNVS not included).  A
+           triplet that includes the child address, parent address, &
+           length.
+
+   - interrupts
+      Usage: required
+      Value type: <prop_encoded-array>
+      Definition:  Specifies the interrupts generated by this
+           device.  The value of the interrupts property
+           consists of one interrupt specifier. The format
+           of the specifier is defined by the binding document
+           describing the node's interrupt parent.
+
+   - interrupt-parent
+      Usage: (required if interrupt property is defined)
+      Value type: <phandle>
+      Definition: A single <phandle> value that points
+          to the interrupt parent to which the child domain
+          is being mapped.
+
+   Note: All other standard properties (see the ePAPR) are allowed
+   but are optional.
+
+
+EXAMPLE
+       crypto@300000 {
+               compatible = "fsl,sec-v4.0";
+               #address-cells = <1>;
+               #size-cells = <1>;
+               reg = <0x300000 0x10000>;
+               ranges = <0 0x300000 0x10000>;
+               interrupt-parent = <&mpic>;
+               interrupts = <92 2>;
+       };
+
+=====================================================================
+Job Ring (JR) Node
+
+    Child of the crypto node defines data processing interface to SEC 4
+    across the peripheral bus for purposes of processing
+    cryptographic descriptors. The specified address
+    range can be made visible to one (or more) cores.
+    The interrupt defined for this node is controlled within
+    the address range of this node.
+
+  - compatible
+      Usage: required
+      Value type: <string>
+      Definition: Must include "fsl,sec-v4.0-job-ring"
+
+  - reg
+      Usage: required
+      Value type: <prop-encoded-array>
+      Definition: Specifies a two JR parameters:  an offset from
+          the parent physical address and the length the JR registers.
+
+   - fsl,liodn
+       Usage: optional-but-recommended
+       Value type: <prop-encoded-array>
+       Definition:
+           Specifies the LIODN to be used in conjunction with
+           the ppid-to-liodn table that specifies the PPID to LIODN mapping.
+           Needed if the PAMU is used.  Value is a 12 bit value
+           where value is a LIODN ID for this JR. This property is
+           normally set by boot firmware.
+
+   - interrupts
+      Usage: required
+      Value type: <prop_encoded-array>
+      Definition:  Specifies the interrupts generated by this
+           device.  The value of the interrupts property
+           consists of one interrupt specifier. The format
+           of the specifier is defined by the binding document
+           describing the node's interrupt parent.
+
+   - interrupt-parent
+      Usage: (required if interrupt property is defined)
+      Value type: <phandle>
+      Definition: A single <phandle> value that points
+          to the interrupt parent to which the child domain
+          is being mapped.
+
+EXAMPLE
+       jr@1000 {
+               compatible = "fsl,sec-v4.0-job-ring";
+               reg = <0x1000 0x1000>;
+               fsl,liodn = <0x081>;
+               interrupt-parent = <&mpic>;
+               interrupts = <88 2>;
+       };
+
+
+=====================================================================
+Run Time Integrity Check (RTIC) Node
+
+  Child node of the crypto node.  Defines a register space that
+  contains up to 5 sets of addresses and their lengths (sizes) that
+  will be checked at run time.  After an initial hash result is
+  calculated, these addresses are checked by HW to monitor any
+  change.  If any memory is modified, a Security Violation is
+  triggered (see SNVS definition).
+
+
+  - compatible
+      Usage: required
+      Value type: <string>
+      Definition: Must include "fsl,sec-v4.0-rtic".
+
+   - #address-cells
+       Usage: required
+       Value type: <u32>
+       Definition: A standard property.  Defines the number of cells
+           for representing physical addresses in child nodes.  Must
+           have a value of 1.
+
+   - #size-cells
+       Usage: required
+       Value type: <u32>
+       Definition: A standard property.  Defines the number of cells
+           for representing the size of physical addresses in
+           child nodes.  Must have a value of 1.
+
+  - reg
+      Usage: required
+      Value type: <prop-encoded-array>
+      Definition: A standard property.  Specifies a two parameters:
+          an offset from the parent physical address and the length
+          the SEC4 registers.
+
+   - ranges
+       Usage: required
+       Value type: <prop-encoded-array>
+       Definition: A standard property.  Specifies the physical address
+           range of the SEC 4 register space (-SNVS not included).  A
+           triplet that includes the child address, parent address, &
+           length.
+
+EXAMPLE
+       rtic@6000 {
+               compatible = "fsl,sec-v4.0-rtic";
+               #address-cells = <1>;
+               #size-cells = <1>;
+               reg = <0x6000 0x100>;
+               ranges = <0x0 0x6100 0xe00>;
+       };
+
+=====================================================================
+Run Time Integrity Check (RTIC) Memory Node
+  A child node that defines individual RTIC memory regions that are used to
+  perform run-time integrity check of memory areas that should not modified.
+  The node defines a register that contains the memory address &
+  length (combined) and a second register that contains the hash result
+  in big endian format.
+
+  - compatible
+      Usage: required
+      Value type: <string>
+      Definition: Must include "fsl,sec-v4.0-rtic-memory".
+
+  - reg
+      Usage: required
+      Value type: <prop-encoded-array>
+      Definition: A standard property.  Specifies two parameters:
+          an offset from the parent physical address and the length:
+
+          1. The location of the RTIC memory address & length registers.
+          2. The location RTIC hash result.
+
+  - fsl,rtic-region
+       Usage: optional-but-recommended
+       Value type: <prop-encoded-array>
+       Definition:
+           Specifies the HW address (36 bit address) for this region
+           followed by the length of the HW partition to be checked;
+           the address is represented as a 64 bit quantity followed
+           by a 32 bit length.
+
+   - fsl,liodn
+       Usage: optional-but-recommended
+       Value type: <prop-encoded-array>
+       Definition:
+           Specifies the LIODN to be used in conjunction with
+           the ppid-to-liodn table that specifies the PPID to LIODN
+           mapping.  Needed if the PAMU is used.  Value is a 12 bit value
+           where value is a LIODN ID for this RTIC memory region. This
+           property is normally set by boot firmware.
+
+EXAMPLE
+       rtic-a@0 {
+               compatible = "fsl,sec-v4.0-rtic-memory";
+               reg = <0x00 0x20 0x100 0x80>;
+               fsl,liodn   = <0x03c>;
+               fsl,rtic-region  = <0x12345678 0x12345678 0x12345678>;
+       };
+
+=====================================================================
+Secure Non-Volatile Storage (SNVS) Node
+
+    Node defines address range and the associated
+    interrupt for the SNVS function.  This function
+    monitors security state information & reports
+    security violations.
+
+  - compatible
+      Usage: required
+      Value type: <string>
+      Definition: Must include "fsl,sec-v4.0-mon".
+
+  - reg
+      Usage: required
+      Value type: <prop-encoded-array>
+      Definition: A standard property.  Specifies the physical
+          address and length of the SEC4 configuration
+          registers.
+
+   - interrupts
+      Usage: required
+      Value type: <prop_encoded-array>
+      Definition:  Specifies the interrupts generated by this
+           device.  The value of the interrupts property
+           consists of one interrupt specifier. The format
+           of the specifier is defined by the binding document
+           describing the node's interrupt parent.
+
+   - interrupt-parent
+      Usage: (required if interrupt property is defined)
+      Value type: <phandle>
+      Definition: A single <phandle> value that points
+          to the interrupt parent to which the child domain
+          is being mapped.
+
+EXAMPLE
+       sec_mon@314000 {
+               compatible = "fsl,sec-v4.0-mon";
+               reg = <0x314000 0x1000>;
+               interrupt-parent = <&mpic>;
+               interrupts = <93 2>;
+       };
+
+=====================================================================
+FULL EXAMPLE
+
+       crypto: crypto@300000 {
+               compatible = "fsl,sec-v4.0";
+               #address-cells = <1>;
+               #size-cells = <1>;
+               reg = <0x300000 0x10000>;
+               ranges = <0 0x300000 0x10000>;
+               interrupt-parent = <&mpic>;
+               interrupts = <92 2>;
+
+               sec_jr0: jr@1000 {
+                       compatible = "fsl,sec-v4.0-job-ring";
+                       reg = <0x1000 0x1000>;
+                       interrupt-parent = <&mpic>;
+                       interrupts = <88 2>;
+               };
+
+               sec_jr1: jr@2000 {
+                       compatible = "fsl,sec-v4.0-job-ring";
+                       reg = <0x2000 0x1000>;
+                       interrupt-parent = <&mpic>;
+                       interrupts = <89 2>;
+               };
+
+               sec_jr2: jr@3000 {
+                       compatible = "fsl,sec-v4.0-job-ring";
+                       reg = <0x3000 0x1000>;
+                       interrupt-parent = <&mpic>;
+                       interrupts = <90 2>;
+               };
+
+               sec_jr3: jr@4000 {
+                       compatible = "fsl,sec-v4.0-job-ring";
+                       reg = <0x4000 0x1000>;
+                       interrupt-parent = <&mpic>;
+                       interrupts = <91 2>;
+               };
+
+               rtic@6000 {
+                       compatible = "fsl,sec-v4.0-rtic";
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       reg = <0x6000 0x100>;
+                       ranges = <0x0 0x6100 0xe00>;
+
+                       rtic_a: rtic-a@0 {
+                               compatible = "fsl,sec-v4.0-rtic-memory";
+                               reg = <0x00 0x20 0x100 0x80>;
+                       };
+
+                       rtic_b: rtic-b@20 {
+                               compatible = "fsl,sec-v4.0-rtic-memory";
+                               reg = <0x20 0x20 0x200 0x80>;
+                       };
+
+                       rtic_c: rtic-c@40 {
+                               compatible = "fsl,sec-v4.0-rtic-memory";
+                               reg = <0x40 0x20 0x300 0x80>;
+                       };
+
+                       rtic_d: rtic-d@60 {
+                               compatible = "fsl,sec-v4.0-rtic-memory";
+                               reg = <0x60 0x20 0x500 0x80>;
+                       };
+               };
+       };
+
+       sec_mon: sec_mon@314000 {
+               compatible = "fsl,sec-v4.0-mon";
+               reg = <0x314000 0x1000>;
+               interrupt-parent = <&mpic>;
+               interrupts = <93 2>;
+       };
+
+=====================================================================
index 5b7fc29..927f94d 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * P4080DS Device Tree Source
  *
- * Copyright 2009 Freescale Semiconductor Inc.
+ * Copyright 2009-2011 Freescale Semiconductor Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under  the terms of the GNU General  Public License as published by the
                dma1 = &dma1;
                sdhc = &sdhc;
 
+               crypto = &crypto;
+               sec_jr0 = &sec_jr0;
+               sec_jr1 = &sec_jr1;
+               sec_jr2 = &sec_jr2;
+               sec_jr3 = &sec_jr3;
+               rtic_a = &rtic_a;
+               rtic_b = &rtic_b;
+               rtic_c = &rtic_c;
+               rtic_d = &rtic_d;
+               sec_mon = &sec_mon;
+
                rio0 = &rapidio0;
        };
 
                        dr_mode = "host";
                        phy_type = "ulpi";
                };
+
+               crypto: crypto@300000 {
+                       compatible = "fsl,sec-v4.0";
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       reg = <0x300000 0x10000>;
+                       ranges = <0 0x300000 0x10000>;
+                       interrupt-parent = <&mpic>;
+                       interrupts = <92 2>;
+
+                       sec_jr0: jr@1000 {
+                               compatible = "fsl,sec-v4.0-job-ring";
+                               reg = <0x1000 0x1000>;
+                               interrupt-parent = <&mpic>;
+                               interrupts = <88 2>;
+                       };
+
+                       sec_jr1: jr@2000 {
+                               compatible = "fsl,sec-v4.0-job-ring";
+                               reg = <0x2000 0x1000>;
+                               interrupt-parent = <&mpic>;
+                               interrupts = <89 2>;
+                       };
+
+                       sec_jr2: jr@3000 {
+                               compatible = "fsl,sec-v4.0-job-ring";
+                               reg = <0x3000 0x1000>;
+                               interrupt-parent = <&mpic>;
+                               interrupts = <90 2>;
+                       };
+
+                       sec_jr3: jr@4000 {
+                               compatible = "fsl,sec-v4.0-job-ring";
+                               reg = <0x4000 0x1000>;
+                               interrupt-parent = <&mpic>;
+                               interrupts = <91 2>;
+                       };
+
+                       rtic@6000 {
+                               compatible = "fsl,sec-v4.0-rtic";
+                               #address-cells = <1>;
+                               #size-cells = <1>;
+                               reg = <0x6000 0x100>;
+                               ranges = <0x0 0x6100 0xe00>;
+
+                               rtic_a: rtic-a@0 {
+                                       compatible = "fsl,sec-v4.0-rtic-memory";
+                                       reg = <0x00 0x20 0x100 0x80>;
+                               };
+
+                               rtic_b: rtic-b@20 {
+                                       compatible = "fsl,sec-v4.0-rtic-memory";
+                                       reg = <0x20 0x20 0x200 0x80>;
+                               };
+
+                               rtic_c: rtic-c@40 {
+                                       compatible = "fsl,sec-v4.0-rtic-memory";
+                                       reg = <0x40 0x20 0x300 0x80>;
+                               };
+
+                               rtic_d: rtic-d@60 {
+                                       compatible = "fsl,sec-v4.0-rtic-memory";
+                                       reg = <0x60 0x20 0x500 0x80>;
+                               };
+                       };
+               };
+
+               sec_mon: sec_mon@314000 {
+                       compatible = "fsl,sec-v4.0-mon";
+                       reg = <0x314000 0x1000>;
+                       interrupt-parent = <&mpic>;
+                       interrupts = <93 2>;
+               };
        };
 
        rapidio0: rapidio@ffe0c0000 {
index 402560e..998a100 100644 (file)
@@ -700,7 +700,7 @@ _GLOBAL(start_secondary_resume)
        rlwinm  r1,r1,0,0,(31-THREAD_SHIFT)     /* current_thread_info() */
        addi    r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
        li      r3,0
-       std     r3,0(r1)                /* Zero the stack frame pointer */
+       stw     r3,0(r1)                /* Zero the stack frame pointer */
        bl      start_secondary
        b       .
 #endif /* CONFIG_SMP */
index 1cf81d7..7f0b7cd 100644 (file)
@@ -8,3 +8,4 @@ obj-$(CONFIG_CRYPTO_SHA512_S390) += sha512_s390.o sha_common.o
 obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o
 obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o
 obj-$(CONFIG_S390_PRNG) += prng.o
+obj-$(CONFIG_CRYPTO_GHASH_S390) += ghash_s390.o
index 58f4673..a9ce135 100644 (file)
@@ -31,7 +31,8 @@
 #define AES_KEYLEN_192         2
 #define AES_KEYLEN_256         4
 
-static char keylen_flag = 0;
+static u8 *ctrblk;
+static char keylen_flag;
 
 struct s390_aes_ctx {
        u8 iv[AES_BLOCK_SIZE];
@@ -45,6 +46,24 @@ struct s390_aes_ctx {
        } fallback;
 };
 
+struct pcc_param {
+       u8 key[32];
+       u8 tweak[16];
+       u8 block[16];
+       u8 bit[16];
+       u8 xts[16];
+};
+
+struct s390_xts_ctx {
+       u8 key[32];
+       u8 xts_param[16];
+       struct pcc_param pcc;
+       long enc;
+       long dec;
+       int key_len;
+       struct crypto_blkcipher *fallback;
+};
+
 /*
  * Check if the key_len is supported by the HW.
  * Returns 0 if it is, a positive number if it is not and software fallback is
@@ -504,15 +523,337 @@ static struct crypto_alg cbc_aes_alg = {
        }
 };
 
+static int xts_fallback_setkey(struct crypto_tfm *tfm, const u8 *key,
+                                  unsigned int len)
+{
+       struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
+       unsigned int ret;
+
+       xts_ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+       xts_ctx->fallback->base.crt_flags |= (tfm->crt_flags &
+                       CRYPTO_TFM_REQ_MASK);
+
+       ret = crypto_blkcipher_setkey(xts_ctx->fallback, key, len);
+       if (ret) {
+               tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+               tfm->crt_flags |= (xts_ctx->fallback->base.crt_flags &
+                               CRYPTO_TFM_RES_MASK);
+       }
+       return ret;
+}
+
+static int xts_fallback_decrypt(struct blkcipher_desc *desc,
+               struct scatterlist *dst, struct scatterlist *src,
+               unsigned int nbytes)
+{
+       struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct crypto_blkcipher *tfm;
+       unsigned int ret;
+
+       tfm = desc->tfm;
+       desc->tfm = xts_ctx->fallback;
+
+       ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
+
+       desc->tfm = tfm;
+       return ret;
+}
+
+static int xts_fallback_encrypt(struct blkcipher_desc *desc,
+               struct scatterlist *dst, struct scatterlist *src,
+               unsigned int nbytes)
+{
+       struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct crypto_blkcipher *tfm;
+       unsigned int ret;
+
+       tfm = desc->tfm;
+       desc->tfm = xts_ctx->fallback;
+
+       ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
+
+       desc->tfm = tfm;
+       return ret;
+}
+
+static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+                          unsigned int key_len)
+{
+       struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
+       u32 *flags = &tfm->crt_flags;
+
+       switch (key_len) {
+       case 32:
+               xts_ctx->enc = KM_XTS_128_ENCRYPT;
+               xts_ctx->dec = KM_XTS_128_DECRYPT;
+               memcpy(xts_ctx->key + 16, in_key, 16);
+               memcpy(xts_ctx->pcc.key + 16, in_key + 16, 16);
+               break;
+       case 48:
+               xts_ctx->enc = 0;
+               xts_ctx->dec = 0;
+               xts_fallback_setkey(tfm, in_key, key_len);
+               break;
+       case 64:
+               xts_ctx->enc = KM_XTS_256_ENCRYPT;
+               xts_ctx->dec = KM_XTS_256_DECRYPT;
+               memcpy(xts_ctx->key, in_key, 32);
+               memcpy(xts_ctx->pcc.key, in_key + 32, 32);
+               break;
+       default:
+               *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+               return -EINVAL;
+       }
+       xts_ctx->key_len = key_len;
+       return 0;
+}
+
+static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
+                        struct s390_xts_ctx *xts_ctx,
+                        struct blkcipher_walk *walk)
+{
+       unsigned int offset = (xts_ctx->key_len >> 1) & 0x10;
+       int ret = blkcipher_walk_virt(desc, walk);
+       unsigned int nbytes = walk->nbytes;
+       unsigned int n;
+       u8 *in, *out;
+       void *param;
+
+       if (!nbytes)
+               goto out;
+
+       memset(xts_ctx->pcc.block, 0, sizeof(xts_ctx->pcc.block));
+       memset(xts_ctx->pcc.bit, 0, sizeof(xts_ctx->pcc.bit));
+       memset(xts_ctx->pcc.xts, 0, sizeof(xts_ctx->pcc.xts));
+       memcpy(xts_ctx->pcc.tweak, walk->iv, sizeof(xts_ctx->pcc.tweak));
+       param = xts_ctx->pcc.key + offset;
+       ret = crypt_s390_pcc(func, param);
+       BUG_ON(ret < 0);
+
+       memcpy(xts_ctx->xts_param, xts_ctx->pcc.xts, 16);
+       param = xts_ctx->key + offset;
+       do {
+               /* only use complete blocks */
+               n = nbytes & ~(AES_BLOCK_SIZE - 1);
+               out = walk->dst.virt.addr;
+               in = walk->src.virt.addr;
+
+               ret = crypt_s390_km(func, param, out, in, n);
+               BUG_ON(ret < 0 || ret != n);
+
+               nbytes &= AES_BLOCK_SIZE - 1;
+               ret = blkcipher_walk_done(desc, walk, nbytes);
+       } while ((nbytes = walk->nbytes));
+out:
+       return ret;
+}
+
+static int xts_aes_encrypt(struct blkcipher_desc *desc,
+                          struct scatterlist *dst, struct scatterlist *src,
+                          unsigned int nbytes)
+{
+       struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+
+       if (unlikely(xts_ctx->key_len == 48))
+               return xts_fallback_encrypt(desc, dst, src, nbytes);
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       return xts_aes_crypt(desc, xts_ctx->enc, xts_ctx, &walk);
+}
+
+static int xts_aes_decrypt(struct blkcipher_desc *desc,
+                          struct scatterlist *dst, struct scatterlist *src,
+                          unsigned int nbytes)
+{
+       struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+
+       if (unlikely(xts_ctx->key_len == 48))
+               return xts_fallback_decrypt(desc, dst, src, nbytes);
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       return xts_aes_crypt(desc, xts_ctx->dec, xts_ctx, &walk);
+}
+
+static int xts_fallback_init(struct crypto_tfm *tfm)
+{
+       const char *name = tfm->__crt_alg->cra_name;
+       struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
+
+       xts_ctx->fallback = crypto_alloc_blkcipher(name, 0,
+                       CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+
+       if (IS_ERR(xts_ctx->fallback)) {
+               pr_err("Allocating XTS fallback algorithm %s failed\n",
+                      name);
+               return PTR_ERR(xts_ctx->fallback);
+       }
+       return 0;
+}
+
+static void xts_fallback_exit(struct crypto_tfm *tfm)
+{
+       struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
+
+       crypto_free_blkcipher(xts_ctx->fallback);
+       xts_ctx->fallback = NULL;
+}
+
+static struct crypto_alg xts_aes_alg = {
+       .cra_name               =       "xts(aes)",
+       .cra_driver_name        =       "xts-aes-s390",
+       .cra_priority           =       CRYPT_S390_COMPOSITE_PRIORITY,
+       .cra_flags              =       CRYPTO_ALG_TYPE_BLKCIPHER |
+                                       CRYPTO_ALG_NEED_FALLBACK,
+       .cra_blocksize          =       AES_BLOCK_SIZE,
+       .cra_ctxsize            =       sizeof(struct s390_xts_ctx),
+       .cra_type               =       &crypto_blkcipher_type,
+       .cra_module             =       THIS_MODULE,
+       .cra_list               =       LIST_HEAD_INIT(xts_aes_alg.cra_list),
+       .cra_init               =       xts_fallback_init,
+       .cra_exit               =       xts_fallback_exit,
+       .cra_u                  =       {
+               .blkcipher = {
+                       .min_keysize            =       2 * AES_MIN_KEY_SIZE,
+                       .max_keysize            =       2 * AES_MAX_KEY_SIZE,
+                       .ivsize                 =       AES_BLOCK_SIZE,
+                       .setkey                 =       xts_aes_set_key,
+                       .encrypt                =       xts_aes_encrypt,
+                       .decrypt                =       xts_aes_decrypt,
+               }
+       }
+};
+
+static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+                          unsigned int key_len)
+{
+       struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+
+       switch (key_len) {
+       case 16:
+               sctx->enc = KMCTR_AES_128_ENCRYPT;
+               sctx->dec = KMCTR_AES_128_DECRYPT;
+               break;
+       case 24:
+               sctx->enc = KMCTR_AES_192_ENCRYPT;
+               sctx->dec = KMCTR_AES_192_DECRYPT;
+               break;
+       case 32:
+               sctx->enc = KMCTR_AES_256_ENCRYPT;
+               sctx->dec = KMCTR_AES_256_DECRYPT;
+               break;
+       }
+
+       return aes_set_key(tfm, in_key, key_len);
+}
+
+static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
+                        struct s390_aes_ctx *sctx, struct blkcipher_walk *walk)
+{
+       int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
+       unsigned int i, n, nbytes;
+       u8 buf[AES_BLOCK_SIZE];
+       u8 *out, *in;
+
+       if (!walk->nbytes)
+               return ret;
+
+       memcpy(ctrblk, walk->iv, AES_BLOCK_SIZE);
+       while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
+               out = walk->dst.virt.addr;
+               in = walk->src.virt.addr;
+               while (nbytes >= AES_BLOCK_SIZE) {
+                       /* only use complete blocks, max. PAGE_SIZE */
+                       n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
+                                                nbytes & ~(AES_BLOCK_SIZE - 1);
+                       for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
+                               memcpy(ctrblk + i, ctrblk + i - AES_BLOCK_SIZE,
+                                      AES_BLOCK_SIZE);
+                               crypto_inc(ctrblk + i, AES_BLOCK_SIZE);
+                       }
+                       ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk);
+                       BUG_ON(ret < 0 || ret != n);
+                       if (n > AES_BLOCK_SIZE)
+                               memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE,
+                                      AES_BLOCK_SIZE);
+                       crypto_inc(ctrblk, AES_BLOCK_SIZE);
+                       out += n;
+                       in += n;
+                       nbytes -= n;
+               }
+               ret = blkcipher_walk_done(desc, walk, nbytes);
+       }
+       /*
+        * final block may be < AES_BLOCK_SIZE, copy only nbytes
+        */
+       if (nbytes) {
+               out = walk->dst.virt.addr;
+               in = walk->src.virt.addr;
+               ret = crypt_s390_kmctr(func, sctx->key, buf, in,
+                                      AES_BLOCK_SIZE, ctrblk);
+               BUG_ON(ret < 0 || ret != AES_BLOCK_SIZE);
+               memcpy(out, buf, nbytes);
+               crypto_inc(ctrblk, AES_BLOCK_SIZE);
+               ret = blkcipher_walk_done(desc, walk, 0);
+       }
+       memcpy(walk->iv, ctrblk, AES_BLOCK_SIZE);
+       return ret;
+}
+
+static int ctr_aes_encrypt(struct blkcipher_desc *desc,
+                          struct scatterlist *dst, struct scatterlist *src,
+                          unsigned int nbytes)
+{
+       struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       return ctr_aes_crypt(desc, sctx->enc, sctx, &walk);
+}
+
+static int ctr_aes_decrypt(struct blkcipher_desc *desc,
+                          struct scatterlist *dst, struct scatterlist *src,
+                          unsigned int nbytes)
+{
+       struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       return ctr_aes_crypt(desc, sctx->dec, sctx, &walk);
+}
+
+static struct crypto_alg ctr_aes_alg = {
+       .cra_name               =       "ctr(aes)",
+       .cra_driver_name        =       "ctr-aes-s390",
+       .cra_priority           =       CRYPT_S390_COMPOSITE_PRIORITY,
+       .cra_flags              =       CRYPTO_ALG_TYPE_BLKCIPHER,
+       .cra_blocksize          =       1,
+       .cra_ctxsize            =       sizeof(struct s390_aes_ctx),
+       .cra_type               =       &crypto_blkcipher_type,
+       .cra_module             =       THIS_MODULE,
+       .cra_list               =       LIST_HEAD_INIT(ctr_aes_alg.cra_list),
+       .cra_u                  =       {
+               .blkcipher = {
+                       .min_keysize            =       AES_MIN_KEY_SIZE,
+                       .max_keysize            =       AES_MAX_KEY_SIZE,
+                       .ivsize                 =       AES_BLOCK_SIZE,
+                       .setkey                 =       ctr_aes_set_key,
+                       .encrypt                =       ctr_aes_encrypt,
+                       .decrypt                =       ctr_aes_decrypt,
+               }
+       }
+};
+
 static int __init aes_s390_init(void)
 {
        int ret;
 
-       if (crypt_s390_func_available(KM_AES_128_ENCRYPT))
+       if (crypt_s390_func_available(KM_AES_128_ENCRYPT, CRYPT_S390_MSA))
                keylen_flag |= AES_KEYLEN_128;
-       if (crypt_s390_func_available(KM_AES_192_ENCRYPT))
+       if (crypt_s390_func_available(KM_AES_192_ENCRYPT, CRYPT_S390_MSA))
                keylen_flag |= AES_KEYLEN_192;
-       if (crypt_s390_func_available(KM_AES_256_ENCRYPT))
+       if (crypt_s390_func_available(KM_AES_256_ENCRYPT, CRYPT_S390_MSA))
                keylen_flag |= AES_KEYLEN_256;
 
        if (!keylen_flag)
@@ -535,9 +876,40 @@ static int __init aes_s390_init(void)
        if (ret)
                goto cbc_aes_err;
 
+       if (crypt_s390_func_available(KM_XTS_128_ENCRYPT,
+                       CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
+           crypt_s390_func_available(KM_XTS_256_ENCRYPT,
+                       CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
+               ret = crypto_register_alg(&xts_aes_alg);
+               if (ret)
+                       goto xts_aes_err;
+       }
+
+       if (crypt_s390_func_available(KMCTR_AES_128_ENCRYPT,
+                               CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
+           crypt_s390_func_available(KMCTR_AES_192_ENCRYPT,
+                               CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
+           crypt_s390_func_available(KMCTR_AES_256_ENCRYPT,
+                               CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
+               ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
+               if (!ctrblk) {
+                       ret = -ENOMEM;
+                       goto ctr_aes_err;
+               }
+               ret = crypto_register_alg(&ctr_aes_alg);
+               if (ret) {
+                       free_page((unsigned long) ctrblk);
+                       goto ctr_aes_err;
+               }
+       }
+
 out:
        return ret;
 
+ctr_aes_err:
+       crypto_unregister_alg(&xts_aes_alg);
+xts_aes_err:
+       crypto_unregister_alg(&cbc_aes_alg);
 cbc_aes_err:
        crypto_unregister_alg(&ecb_aes_alg);
 ecb_aes_err:
@@ -548,6 +920,9 @@ aes_err:
 
 static void __exit aes_s390_fini(void)
 {
+       crypto_unregister_alg(&ctr_aes_alg);
+       free_page((unsigned long) ctrblk);
+       crypto_unregister_alg(&xts_aes_alg);
        crypto_unregister_alg(&cbc_aes_alg);
        crypto_unregister_alg(&ecb_aes_alg);
        crypto_unregister_alg(&aes_alg);
index 7ee9a1b..4967677 100644 (file)
 #define CRYPT_S390_PRIORITY 300
 #define CRYPT_S390_COMPOSITE_PRIORITY 400
 
+#define CRYPT_S390_MSA 0x1
+#define CRYPT_S390_MSA3        0x2
+#define CRYPT_S390_MSA4        0x4
+
 /* s390 cryptographic operations */
 enum crypt_s390_operations {
        CRYPT_S390_KM   = 0x0100,
        CRYPT_S390_KMC  = 0x0200,
        CRYPT_S390_KIMD = 0x0300,
        CRYPT_S390_KLMD = 0x0400,
-       CRYPT_S390_KMAC = 0x0500
+       CRYPT_S390_KMAC = 0x0500,
+       CRYPT_S390_KMCTR = 0x0600
 };
 
 /*
@@ -51,6 +56,10 @@ enum crypt_s390_km_func {
        KM_AES_192_DECRYPT  = CRYPT_S390_KM | 0x13 | 0x80,
        KM_AES_256_ENCRYPT  = CRYPT_S390_KM | 0x14,
        KM_AES_256_DECRYPT  = CRYPT_S390_KM | 0x14 | 0x80,
+       KM_XTS_128_ENCRYPT  = CRYPT_S390_KM | 0x32,
+       KM_XTS_128_DECRYPT  = CRYPT_S390_KM | 0x32 | 0x80,
+       KM_XTS_256_ENCRYPT  = CRYPT_S390_KM | 0x34,
+       KM_XTS_256_DECRYPT  = CRYPT_S390_KM | 0x34 | 0x80,
 };
 
 /*
@@ -74,6 +83,26 @@ enum crypt_s390_kmc_func {
        KMC_PRNG             = CRYPT_S390_KMC | 0x43,
 };
 
+/*
+ * function codes for KMCTR (CIPHER MESSAGE WITH COUNTER)
+ * instruction
+ */
+enum crypt_s390_kmctr_func {
+       KMCTR_QUERY            = CRYPT_S390_KMCTR | 0x0,
+       KMCTR_DEA_ENCRYPT      = CRYPT_S390_KMCTR | 0x1,
+       KMCTR_DEA_DECRYPT      = CRYPT_S390_KMCTR | 0x1 | 0x80,
+       KMCTR_TDEA_128_ENCRYPT = CRYPT_S390_KMCTR | 0x2,
+       KMCTR_TDEA_128_DECRYPT = CRYPT_S390_KMCTR | 0x2 | 0x80,
+       KMCTR_TDEA_192_ENCRYPT = CRYPT_S390_KMCTR | 0x3,
+       KMCTR_TDEA_192_DECRYPT = CRYPT_S390_KMCTR | 0x3 | 0x80,
+       KMCTR_AES_128_ENCRYPT  = CRYPT_S390_KMCTR | 0x12,
+       KMCTR_AES_128_DECRYPT  = CRYPT_S390_KMCTR | 0x12 | 0x80,
+       KMCTR_AES_192_ENCRYPT  = CRYPT_S390_KMCTR | 0x13,
+       KMCTR_AES_192_DECRYPT  = CRYPT_S390_KMCTR | 0x13 | 0x80,
+       KMCTR_AES_256_ENCRYPT  = CRYPT_S390_KMCTR | 0x14,
+       KMCTR_AES_256_DECRYPT  = CRYPT_S390_KMCTR | 0x14 | 0x80,
+};
+
 /*
  * function codes for KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST)
  * instruction
@@ -83,6 +112,7 @@ enum crypt_s390_kimd_func {
        KIMD_SHA_1   = CRYPT_S390_KIMD | 1,
        KIMD_SHA_256 = CRYPT_S390_KIMD | 2,
        KIMD_SHA_512 = CRYPT_S390_KIMD | 3,
+       KIMD_GHASH   = CRYPT_S390_KIMD | 65,
 };
 
 /*
@@ -283,6 +313,45 @@ static inline int crypt_s390_kmac(long func, void *param,
        return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
 }
 
+/**
+ * crypt_s390_kmctr:
+ * @func: the function code passed to KMCTR; see crypt_s390_kmctr_func
+ * @param: address of parameter block; see POP for details on each func
+ * @dest: address of destination memory area
+ * @src: address of source memory area
+ * @src_len: length of src operand in bytes
+ * @counter: address of counter value
+ *
+ * Executes the KMCTR (CIPHER MESSAGE WITH COUNTER) operation of the CPU.
+ *
+ * Returns -1 for failure, 0 for the query func, number of processed
+ * bytes for encryption/decryption funcs
+ */
+static inline int crypt_s390_kmctr(long func, void *param, u8 *dest,
+                                const u8 *src, long src_len, u8 *counter)
+{
+       register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
+       register void *__param asm("1") = param;
+       register const u8 *__src asm("2") = src;
+       register long __src_len asm("3") = src_len;
+       register u8 *__dest asm("4") = dest;
+       register u8 *__ctr asm("6") = counter;
+       int ret = -1;
+
+       asm volatile(
+               "0:     .insn   rrf,0xb92d0000,%3,%1,%4,0 \n" /* KMCTR opcode */
+               "1:     brc     1,0b \n" /* handle partial completion */
+               "       la      %0,0\n"
+               "2:\n"
+               EX_TABLE(0b,2b) EX_TABLE(1b,2b)
+               : "+d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest),
+                 "+a" (__ctr)
+               : "d" (__func), "a" (__param) : "cc", "memory");
+       if (ret < 0)
+               return ret;
+       return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
+}
+
 /**
  * crypt_s390_func_available:
  * @func: the function code of the specific function; 0 if op in general
@@ -291,13 +360,17 @@ static inline int crypt_s390_kmac(long func, void *param,
  *
  * Returns 1 if func available; 0 if func or op in general not available
  */
-static inline int crypt_s390_func_available(int func)
+static inline int crypt_s390_func_available(int func,
+                                           unsigned int facility_mask)
 {
        unsigned char status[16];
        int ret;
 
-       /* check if CPACF facility (bit 17) is available */
-       if (!test_facility(17))
+       if (facility_mask & CRYPT_S390_MSA && !test_facility(17))
+               return 0;
+       if (facility_mask & CRYPT_S390_MSA3 && !test_facility(76))
+               return 0;
+       if (facility_mask & CRYPT_S390_MSA4 && !test_facility(77))
                return 0;
 
        switch (func & CRYPT_S390_OP_MASK) {
@@ -316,6 +389,10 @@ static inline int crypt_s390_func_available(int func)
        case CRYPT_S390_KMAC:
                ret = crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0);
                break;
+       case CRYPT_S390_KMCTR:
+               ret = crypt_s390_kmctr(KMCTR_QUERY, &status, NULL, NULL, 0,
+                                      NULL);
+               break;
        default:
                return 0;
        }
@@ -326,4 +403,31 @@ static inline int crypt_s390_func_available(int func)
        return (status[func >> 3] & (0x80 >> (func & 7))) != 0;
 }
 
+/**
+ * crypt_s390_pcc:
+ * @func: the function code passed to KM; see crypt_s390_km_func
+ * @param: address of parameter block; see POP for details on each func
+ *
+ * Executes the PCC (PERFORM CRYPTOGRAPHIC COMPUTATION) operation of the CPU.
+ *
+ * Returns -1 for failure, 0 for success.
+ */
+static inline int crypt_s390_pcc(long func, void *param)
+{
+       register long __func asm("0") = func & 0x7f; /* encrypt or decrypt */
+       register void *__param asm("1") = param;
+       int ret = -1;
+
+       asm volatile(
+               "0:     .insn   rre,0xb92c0000,0,0 \n" /* PCC opcode */
+               "1:     brc     1,0b \n" /* handle partial completion */
+               "       la      %0,0\n"
+               "2:\n"
+               EX_TABLE(0b,2b) EX_TABLE(1b,2b)
+               : "+d" (ret)
+               : "d" (__func), "a" (__param) : "cc", "memory");
+       return ret;
+}
+
+
 #endif /* _CRYPTO_ARCH_S390_CRYPT_S390_H */
diff --git a/arch/s390/crypto/des_check_key.c b/arch/s390/crypto/des_check_key.c
deleted file mode 100644 (file)
index 5706af2..0000000
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Cryptographic API.
- *
- * Function for checking keys for the DES and Tripple DES Encryption
- * algorithms.
- *
- * Originally released as descore by Dana L. How <how@isl.stanford.edu>.
- * Modified by Raimar Falke <rf13@inf.tu-dresden.de> for the Linux-Kernel.
- * Derived from Cryptoapi and Nettle implementations, adapted for in-place
- * scatterlist interface.  Changed LGPL to GPL per section 3 of the LGPL.
- *
- * s390 Version:
- *   Copyright IBM Corp. 2003
- *   Author(s): Thomas Spatzier
- *             Jan Glauber (jan.glauber@de.ibm.com)
- *
- * Derived from "crypto/des.c"
- *   Copyright (c) 1992 Dana L. How.
- *   Copyright (c) Raimar Falke <rf13@inf.tu-dresden.de>
- *   Copyright (c) Gisle Sflensminde <gisle@ii.uib.no>
- *   Copyright (C) 2001 Niels Mvller.
- *   Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- */
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/crypto.h>
-#include "crypto_des.h"
-
-#define ROR(d,c,o)     ((d) = (d) >> (c) | (d) << (o))
-
-static const u8 parity[] = {
-       8,1,0,8,0,8,8,0,0,8,8,0,8,0,2,8,0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,3,
-       0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,8,0,0,8,0,8,8,0,0,8,8,0,8,0,0,8,
-       0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,8,0,0,8,0,8,8,0,0,8,8,0,8,0,0,8,
-       8,0,0,8,0,8,8,0,0,8,8,0,8,0,0,8,0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,
-       0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,8,0,0,8,0,8,8,0,0,8,8,0,8,0,0,8,
-       8,0,0,8,0,8,8,0,0,8,8,0,8,0,0,8,0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,
-       8,0,0,8,0,8,8,0,0,8,8,0,8,0,0,8,0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,
-       4,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,8,5,0,8,0,8,8,0,0,8,8,0,8,0,6,8,
-};
-
-/*
- * RFC2451: Weak key checks SHOULD be performed.
- */
-int
-crypto_des_check_key(const u8 *key, unsigned int keylen, u32 *flags)
-{
-       u32 n, w;
-
-       n  = parity[key[0]]; n <<= 4;
-       n |= parity[key[1]]; n <<= 4;
-       n |= parity[key[2]]; n <<= 4;
-       n |= parity[key[3]]; n <<= 4;
-       n |= parity[key[4]]; n <<= 4;
-       n |= parity[key[5]]; n <<= 4;
-       n |= parity[key[6]]; n <<= 4;
-       n |= parity[key[7]];
-       w = 0x88888888L;
-
-       if ((*flags & CRYPTO_TFM_REQ_WEAK_KEY)
-           && !((n - (w >> 3)) & w)) {  /* 1 in 10^10 keys passes this test */
-               if (n < 0x41415151) {
-                       if (n < 0x31312121) {
-                               if (n < 0x14141515) {
-                                       /* 01 01 01 01 01 01 01 01 */
-                                       if (n == 0x11111111) goto weak;
-                                       /* 01 1F 01 1F 01 0E 01 0E */
-                                       if (n == 0x13131212) goto weak;
-                               } else {
-                                       /* 01 E0 01 E0 01 F1 01 F1 */
-                                       if (n == 0x14141515) goto weak;
-                                       /* 01 FE 01 FE 01 FE 01 FE */
-                                       if (n == 0x16161616) goto weak;
-                               }
-                       } else {
-                               if (n < 0x34342525) {
-                                       /* 1F 01 1F 01 0E 01 0E 01 */
-                                       if (n == 0x31312121) goto weak;
-                                       /* 1F 1F 1F 1F 0E 0E 0E 0E (?) */
-                                       if (n == 0x33332222) goto weak;
-                               } else {
-                                       /* 1F E0 1F E0 0E F1 0E F1 */
-                                       if (n == 0x34342525) goto weak;
-                                       /* 1F FE 1F FE 0E FE 0E FE */
-                                       if (n == 0x36362626) goto weak;
-                               }
-                       }
-               } else {
-                       if (n < 0x61616161) {
-                               if (n < 0x44445555) {
-                                       /* E0 01 E0 01 F1 01 F1 01 */
-                                       if (n == 0x41415151) goto weak;
-                                       /* E0 1F E0 1F F1 0E F1 0E */
-                                       if (n == 0x43435252) goto weak;
-                               } else {
-                                       /* E0 E0 E0 E0 F1 F1 F1 F1 (?) */
-                                       if (n == 0x44445555) goto weak;
-                                       /* E0 FE E0 FE F1 FE F1 FE */
-                                       if (n == 0x46465656) goto weak;
-                               }
-                       } else {
-                               if (n < 0x64646565) {
-                                       /* FE 01 FE 01 FE 01 FE 01 */
-                                       if (n == 0x61616161) goto weak;
-                                       /* FE 1F FE 1F FE 0E FE 0E */
-                                       if (n == 0x63636262) goto weak;
-                               } else {
-                                       /* FE E0 FE E0 FE F1 FE F1 */
-                                       if (n == 0x64646565) goto weak;
-                                       /* FE FE FE FE FE FE FE FE */
-                                       if (n == 0x66666666) goto weak;
-                               }
-                       }
-               }
-       }
-       return 0;
-weak:
-       *flags |= CRYPTO_TFM_RES_WEAK_KEY;
-       return -EINVAL;
-}
-
-EXPORT_SYMBOL(crypto_des_check_key);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Key Check function for DES &  DES3 Cipher Algorithms");
index cc54201..a52bfd1 100644 (file)
@@ -3,7 +3,7 @@
  *
  * s390 implementation of the DES Cipher Algorithm.
  *
- * Copyright IBM Corp. 2003,2007
+ * Copyright IBM Corp. 2003,2011
  * Author(s): Thomas Spatzier
  *           Jan Glauber (jan.glauber@de.ibm.com)
  *
 
 #include "crypt_s390.h"
 
-#define DES3_192_KEY_SIZE      (3 * DES_KEY_SIZE)
+#define DES3_KEY_SIZE  (3 * DES_KEY_SIZE)
 
-struct crypt_s390_des_ctx {
-       u8 iv[DES_BLOCK_SIZE];
-       u8 key[DES_KEY_SIZE];
-};
+static u8 *ctrblk;
 
-struct crypt_s390_des3_192_ctx {
+struct s390_des_ctx {
        u8 iv[DES_BLOCK_SIZE];
-       u8 key[DES3_192_KEY_SIZE];
+       u8 key[DES3_KEY_SIZE];
 };
 
 static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
-                     unsigned int keylen)
+                     unsigned int key_len)
 {
-       struct crypt_s390_des_ctx *dctx = crypto_tfm_ctx(tfm);
+       struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
        u32 *flags = &tfm->crt_flags;
        u32 tmp[DES_EXPKEY_WORDS];
 
@@ -47,22 +44,22 @@ static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
                return -EINVAL;
        }
 
-       memcpy(dctx->key, key, keylen);
+       memcpy(ctx->key, key, key_len);
        return 0;
 }
 
 static void des_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 {
-       struct crypt_s390_des_ctx *dctx = crypto_tfm_ctx(tfm);
+       struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
 
-       crypt_s390_km(KM_DEA_ENCRYPT, dctx->key, out, in, DES_BLOCK_SIZE);
+       crypt_s390_km(KM_DEA_ENCRYPT, ctx->key, out, in, DES_BLOCK_SIZE);
 }
 
 static void des_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 {
-       struct crypt_s390_des_ctx *dctx = crypto_tfm_ctx(tfm);
+       struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
 
-       crypt_s390_km(KM_DEA_DECRYPT, dctx->key, out, in, DES_BLOCK_SIZE);
+       crypt_s390_km(KM_DEA_DECRYPT, ctx->key, out, in, DES_BLOCK_SIZE);
 }
 
 static struct crypto_alg des_alg = {
@@ -71,7 +68,7 @@ static struct crypto_alg des_alg = {
        .cra_priority           =       CRYPT_S390_PRIORITY,
        .cra_flags              =       CRYPTO_ALG_TYPE_CIPHER,
        .cra_blocksize          =       DES_BLOCK_SIZE,
-       .cra_ctxsize            =       sizeof(struct crypt_s390_des_ctx),
+       .cra_ctxsize            =       sizeof(struct s390_des_ctx),
        .cra_module             =       THIS_MODULE,
        .cra_list               =       LIST_HEAD_INIT(des_alg.cra_list),
        .cra_u                  =       {
@@ -86,7 +83,7 @@ static struct crypto_alg des_alg = {
 };
 
 static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
-                           void *param, struct blkcipher_walk *walk)
+                           u8 *key, struct blkcipher_walk *walk)
 {
        int ret = blkcipher_walk_virt(desc, walk);
        unsigned int nbytes;
@@ -97,7 +94,7 @@ static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
                u8 *out = walk->dst.virt.addr;
                u8 *in = walk->src.virt.addr;
 
-               ret = crypt_s390_km(func, param, out, in, n);
+               ret = crypt_s390_km(func, key, out, in, n);
                BUG_ON((ret < 0) || (ret != n));
 
                nbytes &= DES_BLOCK_SIZE - 1;
@@ -108,7 +105,7 @@ static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
 }
 
 static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
-                           void *param, struct blkcipher_walk *walk)
+                           u8 *iv, struct blkcipher_walk *walk)
 {
        int ret = blkcipher_walk_virt(desc, walk);
        unsigned int nbytes = walk->nbytes;
@@ -116,20 +113,20 @@ static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
        if (!nbytes)
                goto out;
 
-       memcpy(param, walk->iv, DES_BLOCK_SIZE);
+       memcpy(iv, walk->iv, DES_BLOCK_SIZE);
        do {
                /* only use complete blocks */
                unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1);
                u8 *out = walk->dst.virt.addr;
                u8 *in = walk->src.virt.addr;
 
-               ret = crypt_s390_kmc(func, param, out, in, n);
+               ret = crypt_s390_kmc(func, iv, out, in, n);
                BUG_ON((ret < 0) || (ret != n));
 
                nbytes &= DES_BLOCK_SIZE - 1;
                ret = blkcipher_walk_done(desc, walk, nbytes);
        } while ((nbytes = walk->nbytes));
-       memcpy(walk->iv, param, DES_BLOCK_SIZE);
+       memcpy(walk->iv, iv, DES_BLOCK_SIZE);
 
 out:
        return ret;
@@ -139,22 +136,22 @@ static int ecb_des_encrypt(struct blkcipher_desc *desc,
                           struct scatterlist *dst, struct scatterlist *src,
                           unsigned int nbytes)
 {
-       struct crypt_s390_des_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+       struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
        struct blkcipher_walk walk;
 
        blkcipher_walk_init(&walk, dst, src, nbytes);
-       return ecb_desall_crypt(desc, KM_DEA_ENCRYPT, sctx->key, &walk);
+       return ecb_desall_crypt(desc, KM_DEA_ENCRYPT, ctx->key, &walk);
 }
 
 static int ecb_des_decrypt(struct blkcipher_desc *desc,
                           struct scatterlist *dst, struct scatterlist *src,
                           unsigned int nbytes)
 {
-       struct crypt_s390_des_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+       struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
        struct blkcipher_walk walk;
 
        blkcipher_walk_init(&walk, dst, src, nbytes);
-       return ecb_desall_crypt(desc, KM_DEA_DECRYPT, sctx->key, &walk);
+       return ecb_desall_crypt(desc, KM_DEA_DECRYPT, ctx->key, &walk);
 }
 
 static struct crypto_alg ecb_des_alg = {
@@ -163,7 +160,7 @@ static struct crypto_alg ecb_des_alg = {
        .cra_priority           =       CRYPT_S390_COMPOSITE_PRIORITY,
        .cra_flags              =       CRYPTO_ALG_TYPE_BLKCIPHER,
        .cra_blocksize          =       DES_BLOCK_SIZE,
-       .cra_ctxsize            =       sizeof(struct crypt_s390_des_ctx),
+       .cra_ctxsize            =       sizeof(struct s390_des_ctx),
        .cra_type               =       &crypto_blkcipher_type,
        .cra_module             =       THIS_MODULE,
        .cra_list               =       LIST_HEAD_INIT(ecb_des_alg.cra_list),
@@ -182,22 +179,22 @@ static int cbc_des_encrypt(struct blkcipher_desc *desc,
                           struct scatterlist *dst, struct scatterlist *src,
                           unsigned int nbytes)
 {
-       struct crypt_s390_des_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+       struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
        struct blkcipher_walk walk;
 
        blkcipher_walk_init(&walk, dst, src, nbytes);
-       return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, sctx->iv, &walk);
+       return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, ctx->iv, &walk);
 }
 
 static int cbc_des_decrypt(struct blkcipher_desc *desc,
                           struct scatterlist *dst, struct scatterlist *src,
                           unsigned int nbytes)
 {
-       struct crypt_s390_des_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+       struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
        struct blkcipher_walk walk;
 
        blkcipher_walk_init(&walk, dst, src, nbytes);
-       return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, sctx->iv, &walk);
+       return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, ctx->iv, &walk);
 }
 
 static struct crypto_alg cbc_des_alg = {
@@ -206,7 +203,7 @@ static struct crypto_alg cbc_des_alg = {
        .cra_priority           =       CRYPT_S390_COMPOSITE_PRIORITY,
        .cra_flags              =       CRYPTO_ALG_TYPE_BLKCIPHER,
        .cra_blocksize          =       DES_BLOCK_SIZE,
-       .cra_ctxsize            =       sizeof(struct crypt_s390_des_ctx),
+       .cra_ctxsize            =       sizeof(struct s390_des_ctx),
        .cra_type               =       &crypto_blkcipher_type,
        .cra_module             =       THIS_MODULE,
        .cra_list               =       LIST_HEAD_INIT(cbc_des_alg.cra_list),
@@ -235,10 +232,10 @@ static struct crypto_alg cbc_des_alg = {
  *   property.
  *
  */
-static int des3_192_setkey(struct crypto_tfm *tfm, const u8 *key,
-                          unsigned int keylen)
+static int des3_setkey(struct crypto_tfm *tfm, const u8 *key,
+                      unsigned int key_len)
 {
-       struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm);
+       struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
        u32 *flags = &tfm->crt_flags;
 
        if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) &&
@@ -248,141 +245,276 @@ static int des3_192_setkey(struct crypto_tfm *tfm, const u8 *key,
                *flags |= CRYPTO_TFM_RES_WEAK_KEY;
                return -EINVAL;
        }
-       memcpy(dctx->key, key, keylen);
+       memcpy(ctx->key, key, key_len);
        return 0;
 }
 
-static void des3_192_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+static void des3_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 {
-       struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm);
+       struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
 
-       crypt_s390_km(KM_TDEA_192_ENCRYPT, dctx->key, dst, (void*)src,
-                     DES_BLOCK_SIZE);
+       crypt_s390_km(KM_TDEA_192_ENCRYPT, ctx->key, dst, src, DES_BLOCK_SIZE);
 }
 
-static void des3_192_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+static void des3_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 {
-       struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm);
+       struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
 
-       crypt_s390_km(KM_TDEA_192_DECRYPT, dctx->key, dst, (void*)src,
-                     DES_BLOCK_SIZE);
+       crypt_s390_km(KM_TDEA_192_DECRYPT, ctx->key, dst, src, DES_BLOCK_SIZE);
 }
 
-static struct crypto_alg des3_192_alg = {
+static struct crypto_alg des3_alg = {
        .cra_name               =       "des3_ede",
        .cra_driver_name        =       "des3_ede-s390",
        .cra_priority           =       CRYPT_S390_PRIORITY,
        .cra_flags              =       CRYPTO_ALG_TYPE_CIPHER,
        .cra_blocksize          =       DES_BLOCK_SIZE,
-       .cra_ctxsize            =       sizeof(struct crypt_s390_des3_192_ctx),
+       .cra_ctxsize            =       sizeof(struct s390_des_ctx),
        .cra_module             =       THIS_MODULE,
-       .cra_list               =       LIST_HEAD_INIT(des3_192_alg.cra_list),
+       .cra_list               =       LIST_HEAD_INIT(des3_alg.cra_list),
        .cra_u                  =       {
                .cipher = {
-                       .cia_min_keysize        =       DES3_192_KEY_SIZE,
-                       .cia_max_keysize        =       DES3_192_KEY_SIZE,
-                       .cia_setkey             =       des3_192_setkey,
-                       .cia_encrypt            =       des3_192_encrypt,
-                       .cia_decrypt            =       des3_192_decrypt,
+                       .cia_min_keysize        =       DES3_KEY_SIZE,
+                       .cia_max_keysize        =       DES3_KEY_SIZE,
+                       .cia_setkey             =       des3_setkey,
+                       .cia_encrypt            =       des3_encrypt,
+                       .cia_decrypt            =       des3_decrypt,
                }
        }
 };
 
-static int ecb_des3_192_encrypt(struct blkcipher_desc *desc,
-                               struct scatterlist *dst,
-                               struct scatterlist *src, unsigned int nbytes)
+static int ecb_des3_encrypt(struct blkcipher_desc *desc,
+                           struct scatterlist *dst, struct scatterlist *src,
+                           unsigned int nbytes)
 {
-       struct crypt_s390_des3_192_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+       struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
        struct blkcipher_walk walk;
 
        blkcipher_walk_init(&walk, dst, src, nbytes);
-       return ecb_desall_crypt(desc, KM_TDEA_192_ENCRYPT, sctx->key, &walk);
+       return ecb_desall_crypt(desc, KM_TDEA_192_ENCRYPT, ctx->key, &walk);
 }
 
-static int ecb_des3_192_decrypt(struct blkcipher_desc *desc,
-                               struct scatterlist *dst,
-                               struct scatterlist *src, unsigned int nbytes)
+static int ecb_des3_decrypt(struct blkcipher_desc *desc,
+                           struct scatterlist *dst, struct scatterlist *src,
+                           unsigned int nbytes)
 {
-       struct crypt_s390_des3_192_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+       struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
        struct blkcipher_walk walk;
 
        blkcipher_walk_init(&walk, dst, src, nbytes);
-       return ecb_desall_crypt(desc, KM_TDEA_192_DECRYPT, sctx->key, &walk);
+       return ecb_desall_crypt(desc, KM_TDEA_192_DECRYPT, ctx->key, &walk);
 }
 
-static struct crypto_alg ecb_des3_192_alg = {
+static struct crypto_alg ecb_des3_alg = {
        .cra_name               =       "ecb(des3_ede)",
        .cra_driver_name        =       "ecb-des3_ede-s390",
        .cra_priority           =       CRYPT_S390_COMPOSITE_PRIORITY,
        .cra_flags              =       CRYPTO_ALG_TYPE_BLKCIPHER,
        .cra_blocksize          =       DES_BLOCK_SIZE,
-       .cra_ctxsize            =       sizeof(struct crypt_s390_des3_192_ctx),
+       .cra_ctxsize            =       sizeof(struct s390_des_ctx),
        .cra_type               =       &crypto_blkcipher_type,
        .cra_module             =       THIS_MODULE,
        .cra_list               =       LIST_HEAD_INIT(
-                                               ecb_des3_192_alg.cra_list),
+                                               ecb_des3_alg.cra_list),
        .cra_u                  =       {
                .blkcipher = {
-                       .min_keysize            =       DES3_192_KEY_SIZE,
-                       .max_keysize            =       DES3_192_KEY_SIZE,
-                       .setkey                 =       des3_192_setkey,
-                       .encrypt                =       ecb_des3_192_encrypt,
-                       .decrypt                =       ecb_des3_192_decrypt,
+                       .min_keysize            =       DES3_KEY_SIZE,
+                       .max_keysize            =       DES3_KEY_SIZE,
+                       .setkey                 =       des3_setkey,
+                       .encrypt                =       ecb_des3_encrypt,
+                       .decrypt                =       ecb_des3_decrypt,
                }
        }
 };
 
-static int cbc_des3_192_encrypt(struct blkcipher_desc *desc,
-                               struct scatterlist *dst,
-                               struct scatterlist *src, unsigned int nbytes)
+static int cbc_des3_encrypt(struct blkcipher_desc *desc,
+                           struct scatterlist *dst, struct scatterlist *src,
+                           unsigned int nbytes)
 {
-       struct crypt_s390_des3_192_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+       struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
        struct blkcipher_walk walk;
 
        blkcipher_walk_init(&walk, dst, src, nbytes);
-       return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, sctx->iv, &walk);
+       return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, ctx->iv, &walk);
 }
 
-static int cbc_des3_192_decrypt(struct blkcipher_desc *desc,
-                               struct scatterlist *dst,
-                               struct scatterlist *src, unsigned int nbytes)
+static int cbc_des3_decrypt(struct blkcipher_desc *desc,
+                           struct scatterlist *dst, struct scatterlist *src,
+                           unsigned int nbytes)
 {
-       struct crypt_s390_des3_192_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+       struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
        struct blkcipher_walk walk;
 
        blkcipher_walk_init(&walk, dst, src, nbytes);
-       return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, sctx->iv, &walk);
+       return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, ctx->iv, &walk);
 }
 
-static struct crypto_alg cbc_des3_192_alg = {
+static struct crypto_alg cbc_des3_alg = {
        .cra_name               =       "cbc(des3_ede)",
        .cra_driver_name        =       "cbc-des3_ede-s390",
        .cra_priority           =       CRYPT_S390_COMPOSITE_PRIORITY,
        .cra_flags              =       CRYPTO_ALG_TYPE_BLKCIPHER,
        .cra_blocksize          =       DES_BLOCK_SIZE,
-       .cra_ctxsize            =       sizeof(struct crypt_s390_des3_192_ctx),
+       .cra_ctxsize            =       sizeof(struct s390_des_ctx),
        .cra_type               =       &crypto_blkcipher_type,
        .cra_module             =       THIS_MODULE,
        .cra_list               =       LIST_HEAD_INIT(
-                                               cbc_des3_192_alg.cra_list),
+                                               cbc_des3_alg.cra_list),
        .cra_u                  =       {
                .blkcipher = {
-                       .min_keysize            =       DES3_192_KEY_SIZE,
-                       .max_keysize            =       DES3_192_KEY_SIZE,
+                       .min_keysize            =       DES3_KEY_SIZE,
+                       .max_keysize            =       DES3_KEY_SIZE,
                        .ivsize                 =       DES_BLOCK_SIZE,
-                       .setkey                 =       des3_192_setkey,
-                       .encrypt                =       cbc_des3_192_encrypt,
-                       .decrypt                =       cbc_des3_192_decrypt,
+                       .setkey                 =       des3_setkey,
+                       .encrypt                =       cbc_des3_encrypt,
+                       .decrypt                =       cbc_des3_decrypt,
                }
        }
 };
 
-static int des_s390_init(void)
+static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
+                           struct s390_des_ctx *ctx, struct blkcipher_walk *walk)
+{
+       int ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
+       unsigned int i, n, nbytes;
+       u8 buf[DES_BLOCK_SIZE];
+       u8 *out, *in;
+
+       memcpy(ctrblk, walk->iv, DES_BLOCK_SIZE);
+       while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
+               out = walk->dst.virt.addr;
+               in = walk->src.virt.addr;
+               while (nbytes >= DES_BLOCK_SIZE) {
+                       /* align to block size, max. PAGE_SIZE */
+                       n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
+                               nbytes & ~(DES_BLOCK_SIZE - 1);
+                       for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) {
+                               memcpy(ctrblk + i, ctrblk + i - DES_BLOCK_SIZE,
+                                      DES_BLOCK_SIZE);
+                               crypto_inc(ctrblk + i, DES_BLOCK_SIZE);
+                       }
+                       ret = crypt_s390_kmctr(func, ctx->key, out, in, n, ctrblk);
+                       BUG_ON((ret < 0) || (ret != n));
+                       if (n > DES_BLOCK_SIZE)
+                               memcpy(ctrblk, ctrblk + n - DES_BLOCK_SIZE,
+                                      DES_BLOCK_SIZE);
+                       crypto_inc(ctrblk, DES_BLOCK_SIZE);
+                       out += n;
+                       in += n;
+                       nbytes -= n;
+               }
+               ret = blkcipher_walk_done(desc, walk, nbytes);
+       }
+
+       /* final block may be < DES_BLOCK_SIZE, copy only nbytes */
+       if (nbytes) {
+               out = walk->dst.virt.addr;
+               in = walk->src.virt.addr;
+               ret = crypt_s390_kmctr(func, ctx->key, buf, in,
+                                      DES_BLOCK_SIZE, ctrblk);
+               BUG_ON(ret < 0 || ret != DES_BLOCK_SIZE);
+               memcpy(out, buf, nbytes);
+               crypto_inc(ctrblk, DES_BLOCK_SIZE);
+               ret = blkcipher_walk_done(desc, walk, 0);
+       }
+       memcpy(walk->iv, ctrblk, DES_BLOCK_SIZE);
+       return ret;
+}
+
+static int ctr_des_encrypt(struct blkcipher_desc *desc,
+                          struct scatterlist *dst, struct scatterlist *src,
+                          unsigned int nbytes)
+{
+       struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       return ctr_desall_crypt(desc, KMCTR_DEA_ENCRYPT, ctx, &walk);
+}
+
+static int ctr_des_decrypt(struct blkcipher_desc *desc,
+                          struct scatterlist *dst, struct scatterlist *src,
+                          unsigned int nbytes)
+{
+       struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       return ctr_desall_crypt(desc, KMCTR_DEA_DECRYPT, ctx, &walk);
+}
+
+static struct crypto_alg ctr_des_alg = {
+       .cra_name               =       "ctr(des)",
+       .cra_driver_name        =       "ctr-des-s390",
+       .cra_priority           =       CRYPT_S390_COMPOSITE_PRIORITY,
+       .cra_flags              =       CRYPTO_ALG_TYPE_BLKCIPHER,
+       .cra_blocksize          =       1,
+       .cra_ctxsize            =       sizeof(struct s390_des_ctx),
+       .cra_type               =       &crypto_blkcipher_type,
+       .cra_module             =       THIS_MODULE,
+       .cra_list               =       LIST_HEAD_INIT(ctr_des_alg.cra_list),
+       .cra_u                  =       {
+               .blkcipher = {
+                       .min_keysize            =       DES_KEY_SIZE,
+                       .max_keysize            =       DES_KEY_SIZE,
+                       .ivsize                 =       DES_BLOCK_SIZE,
+                       .setkey                 =       des_setkey,
+                       .encrypt                =       ctr_des_encrypt,
+                       .decrypt                =       ctr_des_decrypt,
+               }
+       }
+};
+
+static int ctr_des3_encrypt(struct blkcipher_desc *desc,
+                           struct scatterlist *dst, struct scatterlist *src,
+                           unsigned int nbytes)
+{
+       struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       return ctr_desall_crypt(desc, KMCTR_TDEA_192_ENCRYPT, ctx, &walk);
+}
+
+static int ctr_des3_decrypt(struct blkcipher_desc *desc,
+                           struct scatterlist *dst, struct scatterlist *src,
+                           unsigned int nbytes)
+{
+       struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       return ctr_desall_crypt(desc, KMCTR_TDEA_192_DECRYPT, ctx, &walk);
+}
+
+static struct crypto_alg ctr_des3_alg = {
+       .cra_name               =       "ctr(des3_ede)",
+       .cra_driver_name        =       "ctr-des3_ede-s390",
+       .cra_priority           =       CRYPT_S390_COMPOSITE_PRIORITY,
+       .cra_flags              =       CRYPTO_ALG_TYPE_BLKCIPHER,
+       .cra_blocksize          =       1,
+       .cra_ctxsize            =       sizeof(struct s390_des_ctx),
+       .cra_type               =       &crypto_blkcipher_type,
+       .cra_module             =       THIS_MODULE,
+       .cra_list               =       LIST_HEAD_INIT(ctr_des3_alg.cra_list),
+       .cra_u                  =       {
+               .blkcipher = {
+                       .min_keysize            =       DES3_KEY_SIZE,
+                       .max_keysize            =       DES3_KEY_SIZE,
+                       .ivsize                 =       DES_BLOCK_SIZE,
+                       .setkey                 =       des3_setkey,
+                       .encrypt                =       ctr_des3_encrypt,
+                       .decrypt                =       ctr_des3_decrypt,
+               }
+       }
+};
+
+static int __init des_s390_init(void)
 {
        int ret;
 
-       if (!crypt_s390_func_available(KM_DEA_ENCRYPT) ||
-           !crypt_s390_func_available(KM_TDEA_192_ENCRYPT))
+       if (!crypt_s390_func_available(KM_DEA_ENCRYPT, CRYPT_S390_MSA) ||
+           !crypt_s390_func_available(KM_TDEA_192_ENCRYPT, CRYPT_S390_MSA))
                return -EOPNOTSUPP;
 
        ret = crypto_register_alg(&des_alg);
@@ -394,23 +526,46 @@ static int des_s390_init(void)
        ret = crypto_register_alg(&cbc_des_alg);
        if (ret)
                goto cbc_des_err;
-       ret = crypto_register_alg(&des3_192_alg);
+       ret = crypto_register_alg(&des3_alg);
        if (ret)
-               goto des3_192_err;
-       ret = crypto_register_alg(&ecb_des3_192_alg);
+               goto des3_err;
+       ret = crypto_register_alg(&ecb_des3_alg);
        if (ret)
-               goto ecb_des3_192_err;
-       ret = crypto_register_alg(&cbc_des3_192_alg);
+               goto ecb_des3_err;
+       ret = crypto_register_alg(&cbc_des3_alg);
        if (ret)
-               goto cbc_des3_192_err;
+               goto cbc_des3_err;
+
+       if (crypt_s390_func_available(KMCTR_DEA_ENCRYPT,
+                       CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
+           crypt_s390_func_available(KMCTR_TDEA_192_ENCRYPT,
+                       CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
+               ret = crypto_register_alg(&ctr_des_alg);
+               if (ret)
+                       goto ctr_des_err;
+               ret = crypto_register_alg(&ctr_des3_alg);
+               if (ret)
+                       goto ctr_des3_err;
+               ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
+               if (!ctrblk) {
+                       ret = -ENOMEM;
+                       goto ctr_mem_err;
+               }
+       }
 out:
        return ret;
 
-cbc_des3_192_err:
-       crypto_unregister_alg(&ecb_des3_192_alg);
-ecb_des3_192_err:
-       crypto_unregister_alg(&des3_192_alg);
-des3_192_err:
+ctr_mem_err:
+       crypto_unregister_alg(&ctr_des3_alg);
+ctr_des3_err:
+       crypto_unregister_alg(&ctr_des_alg);
+ctr_des_err:
+       crypto_unregister_alg(&cbc_des3_alg);
+cbc_des3_err:
+       crypto_unregister_alg(&ecb_des3_alg);
+ecb_des3_err:
+       crypto_unregister_alg(&des3_alg);
+des3_err:
        crypto_unregister_alg(&cbc_des_alg);
 cbc_des_err:
        crypto_unregister_alg(&ecb_des_alg);
@@ -422,9 +577,14 @@ des_err:
 
 static void __exit des_s390_exit(void)
 {
-       crypto_unregister_alg(&cbc_des3_192_alg);
-       crypto_unregister_alg(&ecb_des3_192_alg);
-       crypto_unregister_alg(&des3_192_alg);
+       if (ctrblk) {
+               crypto_unregister_alg(&ctr_des_alg);
+               crypto_unregister_alg(&ctr_des3_alg);
+               free_page((unsigned long) ctrblk);
+       }
+       crypto_unregister_alg(&cbc_des3_alg);
+       crypto_unregister_alg(&ecb_des3_alg);
+       crypto_unregister_alg(&des3_alg);
        crypto_unregister_alg(&cbc_des_alg);
        crypto_unregister_alg(&ecb_des_alg);
        crypto_unregister_alg(&des_alg);
diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c
new file mode 100644 (file)
index 0000000..b1bd170
--- /dev/null
@@ -0,0 +1,162 @@
+/*
+ * Cryptographic API.
+ *
+ * s390 implementation of the GHASH algorithm for GCM (Galois/Counter Mode).
+ *
+ * Copyright IBM Corp. 2011
+ * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
+ */
+
+#include <crypto/internal/hash.h>
+#include <linux/module.h>
+
+#include "crypt_s390.h"
+
+#define GHASH_BLOCK_SIZE       16
+#define GHASH_DIGEST_SIZE      16
+
+struct ghash_ctx {
+       u8 icv[16];
+       u8 key[16];
+};
+
+struct ghash_desc_ctx {
+       u8 buffer[GHASH_BLOCK_SIZE];
+       u32 bytes;
+};
+
+static int ghash_init(struct shash_desc *desc)
+{
+       struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+
+       memset(dctx, 0, sizeof(*dctx));
+
+       return 0;
+}
+
+static int ghash_setkey(struct crypto_shash *tfm,
+                       const u8 *key, unsigned int keylen)
+{
+       struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
+
+       if (keylen != GHASH_BLOCK_SIZE) {
+               crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+               return -EINVAL;
+       }
+
+       memcpy(ctx->key, key, GHASH_BLOCK_SIZE);
+       memset(ctx->icv, 0, GHASH_BLOCK_SIZE);
+
+       return 0;
+}
+
+static int ghash_update(struct shash_desc *desc,
+                        const u8 *src, unsigned int srclen)
+{
+       struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+       struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
+       unsigned int n;
+       u8 *buf = dctx->buffer;
+       int ret;
+
+       if (dctx->bytes) {
+               u8 *pos = buf + (GHASH_BLOCK_SIZE - dctx->bytes);
+
+               n = min(srclen, dctx->bytes);
+               dctx->bytes -= n;
+               srclen -= n;
+
+               memcpy(pos, src, n);
+               src += n;
+
+               if (!dctx->bytes) {
+                       ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf,
+                                             GHASH_BLOCK_SIZE);
+                       BUG_ON(ret != GHASH_BLOCK_SIZE);
+               }
+       }
+
+       n = srclen & ~(GHASH_BLOCK_SIZE - 1);
+       if (n) {
+               ret = crypt_s390_kimd(KIMD_GHASH, ctx, src, n);
+               BUG_ON(ret != n);
+               src += n;
+               srclen -= n;
+       }
+
+       if (srclen) {
+               dctx->bytes = GHASH_BLOCK_SIZE - srclen;
+               memcpy(buf, src, srclen);
+       }
+
+       return 0;
+}
+
+static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
+{
+       u8 *buf = dctx->buffer;
+       int ret;
+
+       if (dctx->bytes) {
+               u8 *pos = buf + (GHASH_BLOCK_SIZE - dctx->bytes);
+
+               memset(pos, 0, dctx->bytes);
+
+               ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE);
+               BUG_ON(ret != GHASH_BLOCK_SIZE);
+       }
+
+       dctx->bytes = 0;
+}
+
+static int ghash_final(struct shash_desc *desc, u8 *dst)
+{
+       struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+       struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
+
+       ghash_flush(ctx, dctx);
+       memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE);
+
+       return 0;
+}
+
+static struct shash_alg ghash_alg = {
+       .digestsize     = GHASH_DIGEST_SIZE,
+       .init           = ghash_init,
+       .update         = ghash_update,
+       .final          = ghash_final,
+       .setkey         = ghash_setkey,
+       .descsize       = sizeof(struct ghash_desc_ctx),
+       .base           = {
+               .cra_name               = "ghash",
+               .cra_driver_name        = "ghash-s390",
+               .cra_priority           = CRYPT_S390_PRIORITY,
+               .cra_flags              = CRYPTO_ALG_TYPE_SHASH,
+               .cra_blocksize          = GHASH_BLOCK_SIZE,
+               .cra_ctxsize            = sizeof(struct ghash_ctx),
+               .cra_module             = THIS_MODULE,
+               .cra_list               = LIST_HEAD_INIT(ghash_alg.base.cra_list),
+       },
+};
+
+static int __init ghash_mod_init(void)
+{
+       if (!crypt_s390_func_available(KIMD_GHASH,
+                                      CRYPT_S390_MSA | CRYPT_S390_MSA4))
+               return -EOPNOTSUPP;
+
+       return crypto_register_shash(&ghash_alg);
+}
+
+static void __exit ghash_mod_exit(void)
+{
+       crypto_unregister_shash(&ghash_alg);
+}
+
+module_init(ghash_mod_init);
+module_exit(ghash_mod_exit);
+
+MODULE_ALIAS("ghash");
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("GHASH Message Digest Algorithm, s390 implementation");
index 8b16c47..0808fbf 100644 (file)
@@ -166,7 +166,7 @@ static int __init prng_init(void)
        int ret;
 
        /* check if the CPU has a PRNG */
-       if (!crypt_s390_func_available(KMC_PRNG))
+       if (!crypt_s390_func_available(KMC_PRNG, CRYPT_S390_MSA))
                return -EOPNOTSUPP;
 
        if (prng_chunk_size < 8)
index f6de782..e9868c6 100644 (file)
@@ -90,7 +90,7 @@ static struct shash_alg alg = {
 
 static int __init sha1_s390_init(void)
 {
-       if (!crypt_s390_func_available(KIMD_SHA_1))
+       if (!crypt_s390_func_available(KIMD_SHA_1, CRYPT_S390_MSA))
                return -EOPNOTSUPP;
        return crypto_register_shash(&alg);
 }
index 61a7db3..5ed8d64 100644 (file)
@@ -86,7 +86,7 @@ static struct shash_alg alg = {
 
 static int sha256_s390_init(void)
 {
-       if (!crypt_s390_func_available(KIMD_SHA_256))
+       if (!crypt_s390_func_available(KIMD_SHA_256, CRYPT_S390_MSA))
                return -EOPNOTSUPP;
 
        return crypto_register_shash(&alg);
index 4bf73d0..32a8138 100644 (file)
@@ -132,7 +132,7 @@ static int __init init(void)
 {
        int ret;
 
-       if (!crypt_s390_func_available(KIMD_SHA_512))
+       if (!crypt_s390_func_available(KIMD_SHA_512, CRYPT_S390_MSA))
                return -EOPNOTSUPP;
        if ((ret = crypto_register_shash(&sha512_alg)) < 0)
                goto out;
index 1a58ad8..c04f1b7 100644 (file)
@@ -2,8 +2,6 @@
 # Arch-specific CryptoAPI modules.
 #
 
-obj-$(CONFIG_CRYPTO_FPU) += fpu.o
-
 obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o
 obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o
 obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o
@@ -24,6 +22,6 @@ aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o
 twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o
 salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o
 
-aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o
+aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o
 
 ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
index 2577613..feee8ff 100644 (file)
@@ -94,6 +94,10 @@ asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
                              const u8 *in, unsigned int len, u8 *iv);
 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
                              const u8 *in, unsigned int len, u8 *iv);
+
+int crypto_fpu_init(void);
+void crypto_fpu_exit(void);
+
 #ifdef CONFIG_X86_64
 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
                              const u8 *in, unsigned int len, u8 *iv);
@@ -1257,6 +1261,8 @@ static int __init aesni_init(void)
                return -ENODEV;
        }
 
+       if ((err = crypto_fpu_init()))
+               goto fpu_err;
        if ((err = crypto_register_alg(&aesni_alg)))
                goto aes_err;
        if ((err = crypto_register_alg(&__aesni_alg)))
@@ -1334,6 +1340,7 @@ blk_ecb_err:
 __aes_err:
        crypto_unregister_alg(&aesni_alg);
 aes_err:
+fpu_err:
        return err;
 }
 
@@ -1363,6 +1370,8 @@ static void __exit aesni_exit(void)
        crypto_unregister_alg(&blk_ecb_alg);
        crypto_unregister_alg(&__aesni_alg);
        crypto_unregister_alg(&aesni_alg);
+
+       crypto_fpu_exit();
 }
 
 module_init(aesni_init);
index 1a8f864..98d7a18 100644 (file)
@@ -150,18 +150,12 @@ static struct crypto_template crypto_fpu_tmpl = {
        .module = THIS_MODULE,
 };
 
-static int __init crypto_fpu_module_init(void)
+int __init crypto_fpu_init(void)
 {
        return crypto_register_template(&crypto_fpu_tmpl);
 }
 
-static void __exit crypto_fpu_module_exit(void)
+void __exit crypto_fpu_exit(void)
 {
        crypto_unregister_template(&crypto_fpu_tmpl);
 }
-
-module_init(crypto_fpu_module_init);
-module_exit(crypto_fpu_module_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("FPU block cipher wrapper");
index 4b7cb0e..87b22ca 100644 (file)
@@ -264,11 +264,6 @@ config CRYPTO_XTS
          key size 256, 384 or 512 bits. This implementation currently
          can't handle a sectorsize which is not a multiple of 16 bytes.
 
-config CRYPTO_FPU
-       tristate
-       select CRYPTO_BLKCIPHER
-       select CRYPTO_MANAGER
-
 comment "Hash modes"
 
 config CRYPTO_HMAC
@@ -543,7 +538,6 @@ config CRYPTO_AES_NI_INTEL
        select CRYPTO_AES_586 if !64BIT
        select CRYPTO_CRYPTD
        select CRYPTO_ALGAPI
-       select CRYPTO_FPU
        help
          Use Intel AES-NI instructions for AES algorithm.
 
index e912ea5..2222617 100644 (file)
@@ -1009,6 +1009,10 @@ static int do_test(int m)
                                speed_template_32_48_64);
                test_cipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
                                speed_template_32_48_64);
+               test_cipher_speed("ctr(aes)", ENCRYPT, sec, NULL, 0,
+                               speed_template_16_24_32);
+               test_cipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
+                               speed_template_16_24_32);
                break;
 
        case 201:
index 2854865..b6b93d4 100644 (file)
@@ -2218,6 +2218,22 @@ static const struct alg_test_desc alg_test_descs[] = {
                                .count = MICHAEL_MIC_TEST_VECTORS
                        }
                }
+       }, {
+               .alg = "ofb(aes)",
+               .test = alg_test_skcipher,
+               .fips_allowed = 1,
+               .suite = {
+                       .cipher = {
+                               .enc = {
+                                       .vecs = aes_ofb_enc_tv_template,
+                                       .count = AES_OFB_ENC_TEST_VECTORS
+                               },
+                               .dec = {
+                                       .vecs = aes_ofb_dec_tv_template,
+                                       .count = AES_OFB_DEC_TEST_VECTORS
+                               }
+                       }
+               }
        }, {
                .alg = "pcbc(fcrypt)",
                .test = alg_test_skcipher,
index aa6dac0..27e6061 100644 (file)
@@ -2980,6 +2980,8 @@ static struct cipher_testvec cast6_dec_tv_template[] = {
 #define AES_XTS_DEC_TEST_VECTORS 4
 #define AES_CTR_ENC_TEST_VECTORS 3
 #define AES_CTR_DEC_TEST_VECTORS 3
+#define AES_OFB_ENC_TEST_VECTORS 1
+#define AES_OFB_DEC_TEST_VECTORS 1
 #define AES_CTR_3686_ENC_TEST_VECTORS 7
 #define AES_CTR_3686_DEC_TEST_VECTORS 6
 #define AES_GCM_ENC_TEST_VECTORS 9
@@ -5506,6 +5508,64 @@ static struct cipher_testvec aes_ctr_rfc3686_dec_tv_template[] = {
        },
 };
 
+static struct cipher_testvec aes_ofb_enc_tv_template[] = {
+        /* From NIST Special Publication 800-38A, Appendix F.5 */
+       {
+               .key    = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+                         "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+               .klen   = 16,
+               .iv     = "\x00\x01\x02\x03\x04\x05\x06\x07\x08"
+                         "\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+               .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+                         "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+                         "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+                         "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+                         "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+                         "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+                         "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+                         "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+               .ilen   = 64,
+               .result = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20"
+                         "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a"
+                         "\x77\x89\x50\x8d\x16\x91\x8f\x03\xf5"
+                         "\x3c\x52\xda\xc5\x4e\xd8\x25"
+                         "\x97\x40\x05\x1e\x9c\x5f\xec\xf6\x43"
+                         "\x44\xf7\xa8\x22\x60\xed\xcc"
+                         "\x30\x4c\x65\x28\xf6\x59\xc7\x78"
+                         "\x66\xa5\x10\xd9\xc1\xd6\xae\x5e",
+               .rlen   = 64,
+       }
+};
+
+static struct cipher_testvec aes_ofb_dec_tv_template[] = {
+        /* From NIST Special Publication 800-38A, Appendix F.5 */
+       {
+               .key    = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+                         "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+               .klen   = 16,
+               .iv     = "\x00\x01\x02\x03\x04\x05\x06\x07\x08"
+                         "\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+               .input = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20"
+                         "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a"
+                         "\x77\x89\x50\x8d\x16\x91\x8f\x03\xf5"
+                         "\x3c\x52\xda\xc5\x4e\xd8\x25"
+                         "\x97\x40\x05\x1e\x9c\x5f\xec\xf6\x43"
+                         "\x44\xf7\xa8\x22\x60\xed\xcc"
+                         "\x30\x4c\x65\x28\xf6\x59\xc7\x78"
+                         "\x66\xa5\x10\xd9\xc1\xd6\xae\x5e",
+               .ilen   = 64,
+               .result = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+                         "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+                         "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+                         "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+                         "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+                         "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+                         "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+                         "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+               .rlen   = 64,
+       }
+};
+
 static struct aead_testvec aes_gcm_enc_tv_template[] = {
        { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
                .key    = zeroed_string,
index beecd1c..a60043b 100644 (file)
@@ -49,7 +49,7 @@ config HW_RANDOM_INTEL
 
 config HW_RANDOM_AMD
        tristate "AMD HW Random Number Generator support"
-       depends on HW_RANDOM && X86 && PCI
+       depends on HW_RANDOM && (X86 || PPC_MAPLE) && PCI
        default HW_RANDOM
        ---help---
          This driver provides kernel-side support for the Random Number
index 0d8c578..c6af038 100644 (file)
@@ -133,6 +133,12 @@ found:
        pmbase &= 0x0000FF00;
        if (pmbase == 0)
                goto out;
+       if (!request_region(pmbase + 0xF0, 8, "AMD HWRNG")) {
+               dev_err(&pdev->dev, "AMD HWRNG region 0x%x already in use!\n",
+                       pmbase + 0xF0);
+               err = -EBUSY;
+               goto out;
+       }
        amd_rng.priv = (unsigned long)pmbase;
        amd_pdev = pdev;
 
@@ -141,6 +147,7 @@ found:
        if (err) {
                printk(KERN_ERR PFX "RNG registering failed (%d)\n",
                       err);
+               release_region(pmbase + 0xF0, 8);
                goto out;
        }
 out:
@@ -149,6 +156,8 @@ out:
 
 static void __exit mod_exit(void)
 {
+       u32 pmbase = (unsigned long)amd_rng.priv;
+       release_region(pmbase + 0xF0, 8);
        hwrng_unregister(&amd_rng);
 }
 
index e541852..c64c380 100644 (file)
@@ -91,6 +91,8 @@ config CRYPTO_SHA1_S390
          This is the s390 hardware accelerated implementation of the
          SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
 
+         It is available as of z990.
+
 config CRYPTO_SHA256_S390
        tristate "SHA256 digest algorithm"
        depends on S390
@@ -99,8 +101,7 @@ config CRYPTO_SHA256_S390
          This is the s390 hardware accelerated implementation of the
          SHA256 secure hash standard (DFIPS 180-2).
 
-         This version of SHA implements a 256 bit hash with 128 bits of
-         security against collision attacks.
+         It is available as of z9.
 
 config CRYPTO_SHA512_S390
        tristate "SHA384 and SHA512 digest algorithm"
@@ -110,10 +111,7 @@ config CRYPTO_SHA512_S390
          This is the s390 hardware accelerated implementation of the
          SHA512 secure hash standard.
 
-         This version of SHA implements a 512 bit hash with 256 bits of
-         security against collision attacks. The code also includes SHA-384,
-         a 384 bit hash with 192 bits of security against collision attacks.
-
+         It is available as of z10.
 
 config CRYPTO_DES_S390
        tristate "DES and Triple DES cipher algorithms"
@@ -121,9 +119,12 @@ config CRYPTO_DES_S390
        select CRYPTO_ALGAPI
        select CRYPTO_BLKCIPHER
        help
-         This us the s390 hardware accelerated implementation of the
+         This is the s390 hardware accelerated implementation of the
          DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
 
+         As of z990 the ECB and CBC mode are hardware accelerated.
+         As of z196 the CTR mode is hardware accelerated.
+
 config CRYPTO_AES_S390
        tristate "AES cipher algorithms"
        depends on S390
@@ -131,20 +132,15 @@ config CRYPTO_AES_S390
        select CRYPTO_BLKCIPHER
        help
          This is the s390 hardware accelerated implementation of the
-         AES cipher algorithms (FIPS-197). AES uses the Rijndael
-         algorithm.
-
-         Rijndael appears to be consistently a very good performer in
-         both hardware and software across a wide range of computing
-         environments regardless of its use in feedback or non-feedback
-         modes. Its key setup time is excellent, and its key agility is
-         good. Rijndael's very low memory requirements make it very well
-         suited for restricted-space environments, in which it also
-         demonstrates excellent performance. Rijndael's operations are
-         among the easiest to defend against power and timing attacks.
+         AES cipher algorithms (FIPS-197).
 
-         On s390 the System z9-109 currently only supports the key size
-         of 128 bit.
+         As of z9 the ECB and CBC modes are hardware accelerated
+         for 128 bit keys.
+         As of z10 the ECB and CBC modes are hardware accelerated
+         for all AES key sizes.
+         As of z196 the CTR mode is hardware accelerated for all AES
+         key sizes and XTS mode is hardware accelerated for 256 and
+         512 bit keys.
 
 config S390_PRNG
        tristate "Pseudo random number generator device driver"
@@ -154,8 +150,20 @@ config S390_PRNG
          Select this option if you want to use the s390 pseudo random number
          generator. The PRNG is part of the cryptographic processor functions
          and uses triple-DES to generate secure random numbers like the
-         ANSI X9.17 standard. The PRNG is usable via the char device
-         /dev/prandom.
+         ANSI X9.17 standard. User-space programs access the
+         pseudo-random-number device through the char device /dev/prandom.
+
+         It is available as of z9.
+
+config CRYPTO_GHASH_S390
+       tristate "GHASH digest algorithm"
+       depends on S390
+       select CRYPTO_HASH
+       help
+         This is the s390 hardware accelerated implementation of the
+         GHASH message digest algorithm for GCM (Galois/Counter Mode).
+
+         It is available as of z196.
 
 config CRYPTO_DEV_MV_CESA
        tristate "Marvell's Cryptographic Engine"
@@ -200,6 +208,8 @@ config CRYPTO_DEV_HIFN_795X_RNG
          Select this option if you want to enable the random number generator
          on the HIFN 795x crypto adapters.
 
+source drivers/crypto/caam/Kconfig
+
 config CRYPTO_DEV_TALITOS
        tristate "Talitos Freescale Security Engine (SEC)"
        select CRYPTO_ALGAPI
@@ -269,4 +279,15 @@ config CRYPTO_DEV_PICOXCELL
 
          Saying m here will build a module named pipcoxcell_crypto.
 
+config CRYPTO_DEV_S5P
+       tristate "Support for Samsung S5PV210 crypto accelerator"
+       depends on ARCH_S5PV210
+       select CRYPTO_AES
+       select CRYPTO_ALGAPI
+       select CRYPTO_BLKCIPHER
+       help
+         This option allows you to have support for S5P crypto acceleration.
+         Select this to offload Samsung S5PV210 or S5PC110 from AES
+         algorithms execution.
+
 endif # CRYPTO_HW
index 5203e34..53ea501 100644 (file)
@@ -6,8 +6,10 @@ n2_crypto-y := n2_core.o n2_asm.o
 obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
 obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
 obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
 obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
 obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
 obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
 obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
 obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
+obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
new file mode 100644 (file)
index 0000000..2d876bb
--- /dev/null
@@ -0,0 +1,72 @@
+config CRYPTO_DEV_FSL_CAAM
+       tristate "Freescale CAAM-Multicore driver backend"
+       depends on FSL_SOC
+       help
+         Enables the driver module for Freescale's Cryptographic Accelerator
+         and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
+         This module adds a job ring operation interface, and configures h/w
+         to operate as a DPAA component automatically, depending
+         on h/w feature availability.
+
+         To compile this driver as a module, choose M here: the module
+         will be called caam.
+
+config CRYPTO_DEV_FSL_CAAM_RINGSIZE
+       int "Job Ring size"
+       depends on CRYPTO_DEV_FSL_CAAM
+       range 2 9
+       default "9"
+       help
+         Select size of Job Rings as a power of 2, within the
+         range 2-9 (ring size 4-512).
+         Examples:
+               2 => 4
+               3 => 8
+               4 => 16
+               5 => 32
+               6 => 64
+               7 => 128
+               8 => 256
+               9 => 512
+
+config CRYPTO_DEV_FSL_CAAM_INTC
+       bool "Job Ring interrupt coalescing"
+       depends on CRYPTO_DEV_FSL_CAAM
+       default y
+       help
+         Enable the Job Ring's interrupt coalescing feature.
+
+config CRYPTO_DEV_FSL_CAAM_INTC_COUNT_THLD
+       int "Job Ring interrupt coalescing count threshold"
+       depends on CRYPTO_DEV_FSL_CAAM_INTC
+       range 1 255
+       default 255
+       help
+         Select number of descriptor completions to queue before
+         raising an interrupt, in the range 1-255. Note that a selection
+         of 1 functionally defeats the coalescing feature, and a selection
+         equal or greater than the job ring size will force timeouts.
+
+config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
+       int "Job Ring interrupt coalescing timer threshold"
+       depends on CRYPTO_DEV_FSL_CAAM_INTC
+       range 1 65535
+       default 2048
+       help
+         Select number of bus clocks/64 to timeout in the case that one or
+         more descriptor completions are queued without reaching the count
+         threshold. Range is 1-65535.
+
+config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
+       tristate "Register algorithm implementations with the Crypto API"
+       depends on CRYPTO_DEV_FSL_CAAM
+       default y
+       select CRYPTO_ALGAPI
+       select CRYPTO_AUTHENC
+       help
+         Selecting this will offload crypto for users of the
+         scatterlist crypto API (such as the linux native IPSec
+         stack) to the SEC4 via job ring.
+
+         To compile this as a module, choose M here: the module
+         will be called caamalg.
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
new file mode 100644 (file)
index 0000000..ef39011
--- /dev/null
@@ -0,0 +1,8 @@
+#
+# Makefile for the CAAM backend and dependent components
+#
+
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
+
+caam-objs := ctrl.o jr.o error.o
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
new file mode 100644 (file)
index 0000000..d0e65d6
--- /dev/null
@@ -0,0 +1,1268 @@
+/*
+ * caam - Freescale FSL CAAM support for crypto API
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ * Based on talitos crypto API driver.
+ *
+ * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
+ *
+ * ---------------                     ---------------
+ * | JobDesc #1  |-------------------->|  ShareDesc  |
+ * | *(packet 1) |                     |   (PDB)     |
+ * ---------------      |------------->|  (hashKey)  |
+ *       .              |              | (cipherKey) |
+ *       .              |    |-------->| (operation) |
+ * ---------------      |    |         ---------------
+ * | JobDesc #2  |------|    |
+ * | *(packet 2) |           |
+ * ---------------           |
+ *       .                   |
+ *       .                   |
+ * ---------------           |
+ * | JobDesc #3  |------------
+ * | *(packet 3) |
+ * ---------------
+ *
+ * The SharedDesc never changes for a connection unless rekeyed, but
+ * each packet will likely be in a different place. So all we need
+ * to know to process the packet is where the input is, where the
+ * output goes, and what context we want to process with. Context is
+ * in the SharedDesc, packet references in the JobDesc.
+ *
+ * So, a job desc looks like:
+ *
+ * ---------------------
+ * | Header            |
+ * | ShareDesc Pointer |
+ * | SEQ_OUT_PTR       |
+ * | (output buffer)   |
+ * | SEQ_IN_PTR        |
+ * | (input buffer)    |
+ * | LOAD (to DECO)    |
+ * ---------------------
+ */
+
+#include "compat.h"
+
+#include "regs.h"
+#include "intern.h"
+#include "desc_constr.h"
+#include "jr.h"
+#include "error.h"
+
+/*
+ * crypto alg
+ */
+#define CAAM_CRA_PRIORITY              3000
+/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
+#define CAAM_MAX_KEY_SIZE              (AES_MAX_KEY_SIZE + \
+                                        SHA512_DIGEST_SIZE * 2)
+/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
+#define CAAM_MAX_IV_LENGTH             16
+
+/* length of descriptors text */
+#define DESC_AEAD_SHARED_TEXT_LEN      4
+#define DESC_AEAD_ENCRYPT_TEXT_LEN     21
+#define DESC_AEAD_DECRYPT_TEXT_LEN     24
+#define DESC_AEAD_GIVENCRYPT_TEXT_LEN  27
+
+#ifdef DEBUG
+/* for print_hex_dumps with line references */
+#define xstr(s) str(s)
+#define str(s) #s
+#define debug(format, arg...) printk(format, arg)
+#else
+#define debug(format, arg...)
+#endif
+
+/*
+ * per-session context
+ */
+struct caam_ctx {
+       struct device *jrdev;
+       u32 *sh_desc;
+       dma_addr_t shared_desc_phys;
+       u32 class1_alg_type;
+       u32 class2_alg_type;
+       u32 alg_op;
+       u8 *key;
+       dma_addr_t key_phys;
+       unsigned int enckeylen;
+       unsigned int split_key_len;
+       unsigned int split_key_pad_len;
+       unsigned int authsize;
+};
+
+static int aead_authenc_setauthsize(struct crypto_aead *authenc,
+                                   unsigned int authsize)
+{
+       struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+       ctx->authsize = authsize;
+
+       return 0;
+}
+
+struct split_key_result {
+       struct completion completion;
+       int err;
+};
+
+static void split_key_done(struct device *dev, u32 *desc, u32 err,
+                          void *context)
+{
+       struct split_key_result *res = context;
+
+#ifdef DEBUG
+       dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+       if (err) {
+               char tmp[CAAM_ERROR_STR_MAX];
+
+               dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
+       }
+
+       res->err = err;
+
+       complete(&res->completion);
+}
+
+/*
+get a split ipad/opad key
+
+Split key generation-----------------------------------------------
+
+[00] 0xb0810008    jobdesc: stidx=1 share=never len=8
+[01] 0x04000014        key: class2->keyreg len=20
+                       @0xffe01000
+[03] 0x84410014  operation: cls2-op sha1 hmac init dec
+[04] 0x24940000     fifold: class2 msgdata-last2 len=0 imm
+[05] 0xa4000001       jump: class2 local all ->1 [06]
+[06] 0x64260028    fifostr: class2 mdsplit-jdk len=40
+                       @0xffe04000
+*/
+static u32 gen_split_key(struct caam_ctx *ctx, const u8 *key_in, u32 authkeylen)
+{
+       struct device *jrdev = ctx->jrdev;
+       u32 *desc;
+       struct split_key_result result;
+       dma_addr_t dma_addr_in, dma_addr_out;
+       int ret = 0;
+
+       desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
+
+       init_job_desc(desc, 0);
+
+       dma_addr_in = dma_map_single(jrdev, (void *)key_in, authkeylen,
+                                    DMA_TO_DEVICE);
+       if (dma_mapping_error(jrdev, dma_addr_in)) {
+               dev_err(jrdev, "unable to map key input memory\n");
+               kfree(desc);
+               return -ENOMEM;
+       }
+       append_key(desc, dma_addr_in, authkeylen, CLASS_2 |
+                      KEY_DEST_CLASS_REG);
+
+       /* Sets MDHA up into an HMAC-INIT */
+       append_operation(desc, ctx->alg_op | OP_ALG_DECRYPT |
+                            OP_ALG_AS_INIT);
+
+       /*
+        * do a FIFO_LOAD of zero, this will trigger the internal key expansion
+          into both pads inside MDHA
+        */
+       append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB |
+                               FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
+
+       /*
+        * FIFO_STORE with the explicit split-key content store
+        * (0x26 output type)
+        */
+       dma_addr_out = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
+                                     DMA_FROM_DEVICE);
+       if (dma_mapping_error(jrdev, dma_addr_out)) {
+               dev_err(jrdev, "unable to map key output memory\n");
+               kfree(desc);
+               return -ENOMEM;
+       }
+       append_fifo_store(desc, dma_addr_out, ctx->split_key_len,
+                         LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
+
+#ifdef DEBUG
+       print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
+                      DUMP_PREFIX_ADDRESS, 16, 4, key_in, authkeylen, 1);
+       print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+       result.err = 0;
+       init_completion(&result.completion);
+
+       ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
+       if (!ret) {
+               /* in progress */
+               wait_for_completion_interruptible(&result.completion);
+               ret = result.err;
+#ifdef DEBUG
+               print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
+                              DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+                              ctx->split_key_pad_len, 1);
+#endif
+       }
+
+       dma_unmap_single(jrdev, dma_addr_out, ctx->split_key_pad_len,
+                        DMA_FROM_DEVICE);
+       dma_unmap_single(jrdev, dma_addr_in, authkeylen, DMA_TO_DEVICE);
+
+       kfree(desc);
+
+       return ret;
+}
+
+static int build_sh_desc_ipsec(struct caam_ctx *ctx)
+{
+       struct device *jrdev = ctx->jrdev;
+       u32 *sh_desc;
+       u32 *jump_cmd;
+       bool keys_fit_inline = 0;
+
+       /*
+        * largest Job Descriptor and its Shared Descriptor
+        * must both fit into the 64-word Descriptor h/w Buffer
+        */
+       if ((DESC_AEAD_GIVENCRYPT_TEXT_LEN +
+            DESC_AEAD_SHARED_TEXT_LEN) * CAAM_CMD_SZ +
+           ctx->split_key_pad_len + ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+               keys_fit_inline = 1;
+
+       /* build shared descriptor for this session */
+       sh_desc = kmalloc(CAAM_CMD_SZ * DESC_AEAD_SHARED_TEXT_LEN +
+                         keys_fit_inline ?
+                         ctx->split_key_pad_len + ctx->enckeylen :
+                         CAAM_PTR_SZ * 2, GFP_DMA | GFP_KERNEL);
+       if (!sh_desc) {
+               dev_err(jrdev, "could not allocate shared descriptor\n");
+               return -ENOMEM;
+       }
+
+       init_sh_desc(sh_desc, HDR_SAVECTX | HDR_SHARE_SERIAL);
+
+       jump_cmd = append_jump(sh_desc, CLASS_BOTH | JUMP_TEST_ALL |
+                              JUMP_COND_SHRD | JUMP_COND_SELF);
+
+       /*
+        * process keys, starting with class 2/authentication.
+        */
+       if (keys_fit_inline) {
+               append_key_as_imm(sh_desc, ctx->key, ctx->split_key_pad_len,
+                                 ctx->split_key_len,
+                                 CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
+
+               append_key_as_imm(sh_desc, (void *)ctx->key +
+                                 ctx->split_key_pad_len, ctx->enckeylen,
+                                 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+       } else {
+               append_key(sh_desc, ctx->key_phys, ctx->split_key_len, CLASS_2 |
+                          KEY_DEST_MDHA_SPLIT | KEY_ENC);
+               append_key(sh_desc, ctx->key_phys + ctx->split_key_pad_len,
+                          ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+       }
+
+       /* update jump cmd now that we are at the jump target */
+       set_jump_tgt_here(sh_desc, jump_cmd);
+
+       ctx->shared_desc_phys = dma_map_single(jrdev, sh_desc,
+                                              desc_bytes(sh_desc),
+                                              DMA_TO_DEVICE);
+       if (dma_mapping_error(jrdev, ctx->shared_desc_phys)) {
+               dev_err(jrdev, "unable to map shared descriptor\n");
+               kfree(sh_desc);
+               return -ENOMEM;
+       }
+
+       ctx->sh_desc = sh_desc;
+
+       return 0;
+}
+
+static int aead_authenc_setkey(struct crypto_aead *aead,
+                              const u8 *key, unsigned int keylen)
+{
+       /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
+       static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
+       struct caam_ctx *ctx = crypto_aead_ctx(aead);
+       struct device *jrdev = ctx->jrdev;
+       struct rtattr *rta = (void *)key;
+       struct crypto_authenc_key_param *param;
+       unsigned int authkeylen;
+       unsigned int enckeylen;
+       int ret = 0;
+
+       param = RTA_DATA(rta);
+       enckeylen = be32_to_cpu(param->enckeylen);
+
+       key += RTA_ALIGN(rta->rta_len);
+       keylen -= RTA_ALIGN(rta->rta_len);
+
+       if (keylen < enckeylen)
+               goto badkey;
+
+       authkeylen = keylen - enckeylen;
+
+       if (keylen > CAAM_MAX_KEY_SIZE)
+               goto badkey;
+
+       /* Pick class 2 key length from algorithm submask */
+       ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
+                                     OP_ALG_ALGSEL_SHIFT] * 2;
+       ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
+
+#ifdef DEBUG
+       printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
+              keylen, enckeylen, authkeylen);
+       printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
+              ctx->split_key_len, ctx->split_key_pad_len);
+       print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
+                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+       ctx->key = kmalloc(ctx->split_key_pad_len + enckeylen,
+                          GFP_KERNEL | GFP_DMA);
+       if (!ctx->key) {
+               dev_err(jrdev, "could not allocate key output memory\n");
+               return -ENOMEM;
+       }
+
+       ret = gen_split_key(ctx, key, authkeylen);
+       if (ret) {
+               kfree(ctx->key);
+               goto badkey;
+       }
+
+       /* postpend encryption key to auth split key */
+       memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen);
+
+       ctx->key_phys = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
+                                      enckeylen, DMA_TO_DEVICE);
+       if (dma_mapping_error(jrdev, ctx->key_phys)) {
+               dev_err(jrdev, "unable to map key i/o memory\n");
+               kfree(ctx->key);
+               return -ENOMEM;
+       }
+#ifdef DEBUG
+       print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
+                      DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+                      ctx->split_key_pad_len + enckeylen, 1);
+#endif
+
+       ctx->enckeylen = enckeylen;
+
+       ret = build_sh_desc_ipsec(ctx);
+       if (ret) {
+               dma_unmap_single(jrdev, ctx->key_phys, ctx->split_key_pad_len +
+                                enckeylen, DMA_TO_DEVICE);
+               kfree(ctx->key);
+       }
+
+       return ret;
+badkey:
+       crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+       return -EINVAL;
+}
+
+struct link_tbl_entry {
+       u64 ptr;
+       u32 len;
+       u8 reserved;
+       u8 buf_pool_id;
+       u16 offset;
+};
+
+/*
+ * ipsec_esp_edesc - s/w-extended ipsec_esp descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
+ * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
+ * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
+ * @link_tbl_bytes: length of dma mapped link_tbl space
+ * @link_tbl_dma: bus physical mapped address of h/w link table
+ * @hw_desc: the h/w job descriptor followed by any referenced link tables
+ */
+struct ipsec_esp_edesc {
+       int assoc_nents;
+       int src_nents;
+       int dst_nents;
+       int link_tbl_bytes;
+       dma_addr_t link_tbl_dma;
+       struct link_tbl_entry *link_tbl;
+       u32 hw_desc[0];
+};
+
+static void ipsec_esp_unmap(struct device *dev,
+                           struct ipsec_esp_edesc *edesc,
+                           struct aead_request *areq)
+{
+       dma_unmap_sg(dev, areq->assoc, edesc->assoc_nents, DMA_TO_DEVICE);
+
+       if (unlikely(areq->dst != areq->src)) {
+               dma_unmap_sg(dev, areq->src, edesc->src_nents,
+                            DMA_TO_DEVICE);
+               dma_unmap_sg(dev, areq->dst, edesc->dst_nents,
+                            DMA_FROM_DEVICE);
+       } else {
+               dma_unmap_sg(dev, areq->src, edesc->src_nents,
+                            DMA_BIDIRECTIONAL);
+       }
+
+       if (edesc->link_tbl_bytes)
+               dma_unmap_single(dev, edesc->link_tbl_dma,
+                                edesc->link_tbl_bytes,
+                                DMA_TO_DEVICE);
+}
+
+/*
+ * ipsec_esp descriptor callbacks
+ */
+static void ipsec_esp_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
+                                  void *context)
+{
+       struct aead_request *areq = context;
+       struct ipsec_esp_edesc *edesc;
+#ifdef DEBUG
+       struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+       int ivsize = crypto_aead_ivsize(aead);
+       struct caam_ctx *ctx = crypto_aead_ctx(aead);
+
+       dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+       edesc = (struct ipsec_esp_edesc *)((char *)desc -
+                offsetof(struct ipsec_esp_edesc, hw_desc));
+
+       if (err) {
+               char tmp[CAAM_ERROR_STR_MAX];
+
+               dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
+       }
+
+       ipsec_esp_unmap(jrdev, edesc, areq);
+
+#ifdef DEBUG
+       print_hex_dump(KERN_ERR, "assoc  @"xstr(__LINE__)": ",
+                      DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->assoc),
+                      areq->assoclen , 1);
+       print_hex_dump(KERN_ERR, "dstiv  @"xstr(__LINE__)": ",
+                      DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src) - ivsize,
+                      edesc->src_nents ? 100 : ivsize, 1);
+       print_hex_dump(KERN_ERR, "dst    @"xstr(__LINE__)": ",
+                      DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src),
+                      edesc->src_nents ? 100 : areq->cryptlen +
+                      ctx->authsize + 4, 1);
+#endif
+
+       kfree(edesc);
+
+       aead_request_complete(areq, err);
+}
+
+static void ipsec_esp_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
+                                  void *context)
+{
+       struct aead_request *areq = context;
+       struct ipsec_esp_edesc *edesc;
+#ifdef DEBUG
+       struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+       struct caam_ctx *ctx = crypto_aead_ctx(aead);
+
+       dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+       edesc = (struct ipsec_esp_edesc *)((char *)desc -
+                offsetof(struct ipsec_esp_edesc, hw_desc));
+
+       if (err) {
+               char tmp[CAAM_ERROR_STR_MAX];
+
+               dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
+       }
+
+       ipsec_esp_unmap(jrdev, edesc, areq);
+
+       /*
+        * verify hw auth check passed else return -EBADMSG
+        */
+       if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
+               err = -EBADMSG;
+
+#ifdef DEBUG
+       print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ",
+                      DUMP_PREFIX_ADDRESS, 16, 4,
+                      ((char *)sg_virt(areq->assoc) - sizeof(struct iphdr)),
+                      sizeof(struct iphdr) + areq->assoclen +
+                      ((areq->cryptlen > 1500) ? 1500 : areq->cryptlen) +
+                      ctx->authsize + 36, 1);
+       if (!err && edesc->link_tbl_bytes) {
+               struct scatterlist *sg = sg_last(areq->src, edesc->src_nents);
+               print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ",
+                              DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
+                       sg->length + ctx->authsize + 16, 1);
+       }
+#endif
+       kfree(edesc);
+
+       aead_request_complete(areq, err);
+}
+
+/*
+ * convert scatterlist to h/w link table format
+ * scatterlist must have been previously dma mapped
+ */
+static void sg_to_link_tbl(struct scatterlist *sg, int sg_count,
+                          struct link_tbl_entry *link_tbl_ptr, u32 offset)
+{
+       while (sg_count) {
+               link_tbl_ptr->ptr = sg_dma_address(sg);
+               link_tbl_ptr->len = sg_dma_len(sg);
+               link_tbl_ptr->reserved = 0;
+               link_tbl_ptr->buf_pool_id = 0;
+               link_tbl_ptr->offset = offset;
+               link_tbl_ptr++;
+               sg = sg_next(sg);
+               sg_count--;
+       }
+
+       /* set Final bit (marks end of link table) */
+       link_tbl_ptr--;
+       link_tbl_ptr->len |= 0x40000000;
+}
+
+/*
+ * fill in and submit ipsec_esp job descriptor
+ */
+static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
+                    u32 encrypt,
+                    void (*callback) (struct device *dev, u32 *desc,
+                                      u32 err, void *context))
+{
+       struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+       struct caam_ctx *ctx = crypto_aead_ctx(aead);
+       struct device *jrdev = ctx->jrdev;
+       u32 *desc = edesc->hw_desc, options;
+       int ret, sg_count, assoc_sg_count;
+       int ivsize = crypto_aead_ivsize(aead);
+       int authsize = ctx->authsize;
+       dma_addr_t ptr, dst_dma, src_dma;
+#ifdef DEBUG
+       u32 *sh_desc = ctx->sh_desc;
+
+       debug("assoclen %d cryptlen %d authsize %d\n",
+             areq->assoclen, areq->cryptlen, authsize);
+       print_hex_dump(KERN_ERR, "assoc  @"xstr(__LINE__)": ",
+                      DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->assoc),
+                      areq->assoclen , 1);
+       print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
+                      DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src) - ivsize,
+                      edesc->src_nents ? 100 : ivsize, 1);
+       print_hex_dump(KERN_ERR, "src    @"xstr(__LINE__)": ",
+                      DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src),
+                       edesc->src_nents ? 100 : areq->cryptlen + authsize, 1);
+       print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
+                      DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
+                      desc_bytes(sh_desc), 1);
+#endif
+       assoc_sg_count = dma_map_sg(jrdev, areq->assoc, edesc->assoc_nents ?: 1,
+                                   DMA_TO_DEVICE);
+       if (areq->src == areq->dst)
+               sg_count = dma_map_sg(jrdev, areq->src, edesc->src_nents ? : 1,
+                                     DMA_BIDIRECTIONAL);
+       else
+               sg_count = dma_map_sg(jrdev, areq->src, edesc->src_nents ? : 1,
+                                     DMA_TO_DEVICE);
+
+       /* start auth operation */
+       append_operation(desc, ctx->class2_alg_type | OP_ALG_AS_INITFINAL |
+                        (encrypt ? : OP_ALG_ICV_ON));
+
+       /* Load FIFO with data for Class 2 CHA */
+       options = FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG;
+       if (!edesc->assoc_nents) {
+               ptr = sg_dma_address(areq->assoc);
+       } else {
+               sg_to_link_tbl(areq->assoc, edesc->assoc_nents,
+                              edesc->link_tbl, 0);
+               ptr = edesc->link_tbl_dma;
+               options |= LDST_SGF;
+       }
+       append_fifo_load(desc, ptr, areq->assoclen, options);
+
+       /* copy iv from cipher/class1 input context to class2 infifo */
+       append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
+
+       if (!encrypt) {
+               u32 *jump_cmd, *uncond_jump_cmd;
+
+               /* JUMP if shared */
+               jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
+
+               /* start class 1 (cipher) operation, non-shared version */
+               append_operation(desc, ctx->class1_alg_type |
+                                OP_ALG_AS_INITFINAL);
+
+               uncond_jump_cmd = append_jump(desc, 0);
+
+               set_jump_tgt_here(desc, jump_cmd);
+
+               /* start class 1 (cipher) operation, shared version */
+               append_operation(desc, ctx->class1_alg_type |
+                                OP_ALG_AS_INITFINAL | OP_ALG_AAI_DK);
+               set_jump_tgt_here(desc, uncond_jump_cmd);
+       } else
+               append_operation(desc, ctx->class1_alg_type |
+                                OP_ALG_AS_INITFINAL | encrypt);
+
+       /* load payload & instruct to class2 to snoop class 1 if encrypting */
+       options = 0;
+       if (!edesc->src_nents) {
+               src_dma = sg_dma_address(areq->src);
+       } else {
+               sg_to_link_tbl(areq->src, edesc->src_nents, edesc->link_tbl +
+                              edesc->assoc_nents, 0);
+               src_dma = edesc->link_tbl_dma + edesc->assoc_nents *
+                         sizeof(struct link_tbl_entry);
+               options |= LDST_SGF;
+       }
+       append_seq_in_ptr(desc, src_dma, areq->cryptlen + authsize, options);
+       append_seq_fifo_load(desc, areq->cryptlen, FIFOLD_CLASS_BOTH |
+                            FIFOLD_TYPE_LASTBOTH |
+                            (encrypt ? FIFOLD_TYPE_MSG1OUT2
+                                     : FIFOLD_TYPE_MSG));
+
+       /* specify destination */
+       if (areq->src == areq->dst) {
+               dst_dma = src_dma;
+       } else {
+               sg_count = dma_map_sg(jrdev, areq->dst, edesc->dst_nents ? : 1,
+                                     DMA_FROM_DEVICE);
+               if (!edesc->dst_nents) {
+                       dst_dma = sg_dma_address(areq->dst);
+                       options = 0;
+               } else {
+                       sg_to_link_tbl(areq->dst, edesc->dst_nents,
+                                      edesc->link_tbl + edesc->assoc_nents +
+                                      edesc->src_nents, 0);
+                       dst_dma = edesc->link_tbl_dma + (edesc->assoc_nents +
+                                 edesc->src_nents) *
+                                 sizeof(struct link_tbl_entry);
+                       options = LDST_SGF;
+               }
+       }
+       append_seq_out_ptr(desc, dst_dma, areq->cryptlen + authsize, options);
+       append_seq_fifo_store(desc, areq->cryptlen, FIFOST_TYPE_MESSAGE_DATA);
+
+       /* ICV */
+       if (encrypt)
+               append_seq_store(desc, authsize, LDST_CLASS_2_CCB |
+                                LDST_SRCDST_BYTE_CONTEXT);
+       else
+               append_seq_fifo_load(desc, authsize, FIFOLD_CLASS_CLASS2 |
+                                    FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+
+#ifdef DEBUG
+       debug("job_desc_len %d\n", desc_len(desc));
+       print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc) , 1);
+       print_hex_dump(KERN_ERR, "jdlinkt@"xstr(__LINE__)": ",
+                      DUMP_PREFIX_ADDRESS, 16, 4, edesc->link_tbl,
+                       edesc->link_tbl_bytes, 1);
+#endif
+
+       ret = caam_jr_enqueue(jrdev, desc, callback, areq);
+       if (!ret)
+               ret = -EINPROGRESS;
+       else {
+               ipsec_esp_unmap(jrdev, edesc, areq);
+               kfree(edesc);
+       }
+
+       return ret;
+}
+
+/*
+ * derive number of elements in scatterlist
+ */
+static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained)
+{
+       struct scatterlist *sg = sg_list;
+       int sg_nents = 0;
+
+       *chained = 0;
+       while (nbytes > 0) {
+               sg_nents++;
+               nbytes -= sg->length;
+               if (!sg_is_last(sg) && (sg + 1)->length == 0)
+                       *chained = 1;
+               sg = scatterwalk_sg_next(sg);
+       }
+
+       return sg_nents;
+}
+
+/*
+ * allocate and map the ipsec_esp extended descriptor
+ */
+static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
+                                                    int desc_bytes)
+{
+       struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+       struct caam_ctx *ctx = crypto_aead_ctx(aead);
+       struct device *jrdev = ctx->jrdev;
+       gfp_t flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+                     GFP_ATOMIC;
+       int assoc_nents, src_nents, dst_nents = 0, chained, link_tbl_bytes;
+       struct ipsec_esp_edesc *edesc;
+
+       assoc_nents = sg_count(areq->assoc, areq->assoclen, &chained);
+       BUG_ON(chained);
+       if (likely(assoc_nents == 1))
+               assoc_nents = 0;
+
+       src_nents = sg_count(areq->src, areq->cryptlen + ctx->authsize,
+                            &chained);
+       BUG_ON(chained);
+       if (src_nents == 1)
+               src_nents = 0;
+
+       if (unlikely(areq->dst != areq->src)) {
+               dst_nents = sg_count(areq->dst, areq->cryptlen + ctx->authsize,
+                                    &chained);
+               BUG_ON(chained);
+               if (dst_nents == 1)
+                       dst_nents = 0;
+       }
+
+       link_tbl_bytes = (assoc_nents + src_nents + dst_nents) *
+                        sizeof(struct link_tbl_entry);
+       debug("link_tbl_bytes %d\n", link_tbl_bytes);
+
+       /* allocate space for base edesc and hw desc commands, link tables */
+       edesc = kmalloc(sizeof(struct ipsec_esp_edesc) + desc_bytes +
+                       link_tbl_bytes, GFP_DMA | flags);
+       if (!edesc) {
+               dev_err(jrdev, "could not allocate extended descriptor\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       edesc->assoc_nents = assoc_nents;
+       edesc->src_nents = src_nents;
+       edesc->dst_nents = dst_nents;
+       edesc->link_tbl = (void *)edesc + sizeof(struct ipsec_esp_edesc) +
+                         desc_bytes;
+       edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl,
+                                            link_tbl_bytes, DMA_TO_DEVICE);
+       edesc->link_tbl_bytes = link_tbl_bytes;
+
+       return edesc;
+}
+
+static int aead_authenc_encrypt(struct aead_request *areq)
+{
+       struct ipsec_esp_edesc *edesc;
+       struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+       struct caam_ctx *ctx = crypto_aead_ctx(aead);
+       struct device *jrdev = ctx->jrdev;
+       int ivsize = crypto_aead_ivsize(aead);
+       u32 *desc;
+       dma_addr_t iv_dma;
+
+       /* allocate extended descriptor */
+       edesc = ipsec_esp_edesc_alloc(areq, DESC_AEAD_ENCRYPT_TEXT_LEN *
+                                     CAAM_CMD_SZ);
+       if (IS_ERR(edesc))
+               return PTR_ERR(edesc);
+
+       desc = edesc->hw_desc;
+
+       /* insert shared descriptor pointer */
+       init_job_desc_shared(desc, ctx->shared_desc_phys,
+                            desc_len(ctx->sh_desc), HDR_SHARE_DEFER);
+
+       iv_dma = dma_map_single(jrdev, areq->iv, ivsize, DMA_TO_DEVICE);
+       /* check dma error */
+
+       append_load(desc, iv_dma, ivsize,
+                   LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT);
+
+       return ipsec_esp(edesc, areq, OP_ALG_ENCRYPT, ipsec_esp_encrypt_done);
+}
+
+static int aead_authenc_decrypt(struct aead_request *req)
+{
+       struct crypto_aead *aead = crypto_aead_reqtfm(req);
+       int ivsize = crypto_aead_ivsize(aead);
+       struct caam_ctx *ctx = crypto_aead_ctx(aead);
+       struct device *jrdev = ctx->jrdev;
+       struct ipsec_esp_edesc *edesc;
+       u32 *desc;
+       dma_addr_t iv_dma;
+
+       req->cryptlen -= ctx->authsize;
+
+       /* allocate extended descriptor */
+       edesc = ipsec_esp_edesc_alloc(req, DESC_AEAD_DECRYPT_TEXT_LEN *
+                                     CAAM_CMD_SZ);
+       if (IS_ERR(edesc))
+               return PTR_ERR(edesc);
+
+       desc = edesc->hw_desc;
+
+       /* insert shared descriptor pointer */
+       init_job_desc_shared(desc, ctx->shared_desc_phys,
+                            desc_len(ctx->sh_desc), HDR_SHARE_DEFER);
+
+       iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
+       /* check dma error */
+
+       append_load(desc, iv_dma, ivsize,
+                   LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT);
+
+       return ipsec_esp(edesc, req, !OP_ALG_ENCRYPT, ipsec_esp_decrypt_done);
+}
+
+static int aead_authenc_givencrypt(struct aead_givcrypt_request *req)
+{
+       struct aead_request *areq = &req->areq;
+       struct ipsec_esp_edesc *edesc;
+       struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+       struct caam_ctx *ctx = crypto_aead_ctx(aead);
+       struct device *jrdev = ctx->jrdev;
+       int ivsize = crypto_aead_ivsize(aead);
+       dma_addr_t iv_dma;
+       u32 *desc;
+
+       iv_dma = dma_map_single(jrdev, req->giv, ivsize, DMA_FROM_DEVICE);
+
+       debug("%s: giv %p\n", __func__, req->giv);
+
+       /* allocate extended descriptor */
+       edesc = ipsec_esp_edesc_alloc(areq, DESC_AEAD_GIVENCRYPT_TEXT_LEN *
+                                     CAAM_CMD_SZ);
+       if (IS_ERR(edesc))
+               return PTR_ERR(edesc);
+
+       desc = edesc->hw_desc;
+
+       /* insert shared descriptor pointer */
+       init_job_desc_shared(desc, ctx->shared_desc_phys,
+                            desc_len(ctx->sh_desc), HDR_SHARE_DEFER);
+
+       /*
+        * LOAD IMM Info FIFO
+        * to DECO, Last, Padding, Random, Message, 16 bytes
+        */
+       append_load_imm_u32(desc, NFIFOENTRY_DEST_DECO | NFIFOENTRY_LC1 |
+                           NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG |
+                           NFIFOENTRY_PTYPE_RND | ivsize,
+                           LDST_SRCDST_WORD_INFO_FIFO);
+
+       /*
+        * disable info fifo entries since the above serves as the entry
+        * this way, the MOVE command won't generate an entry.
+        * Note that this isn't required in more recent versions of
+        * SEC as a MOVE that doesn't do info FIFO entries is available.
+        */
+       append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+
+       /* MOVE DECO Alignment -> C1 Context 16 bytes */
+       append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX | ivsize);
+
+       /* re-enable info fifo entries */
+       append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+       /* MOVE C1 Context -> OFIFO 16 bytes */
+       append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO | ivsize);
+
+       append_fifo_store(desc, iv_dma, ivsize, FIFOST_TYPE_MESSAGE_DATA);
+
+       return ipsec_esp(edesc, areq, OP_ALG_ENCRYPT, ipsec_esp_encrypt_done);
+}
+
+struct caam_alg_template {
+       char name[CRYPTO_MAX_ALG_NAME];
+       char driver_name[CRYPTO_MAX_ALG_NAME];
+       unsigned int blocksize;
+       struct aead_alg aead;
+       u32 class1_alg_type;
+       u32 class2_alg_type;
+       u32 alg_op;
+};
+
+static struct caam_alg_template driver_algs[] = {
+       /* single-pass ipsec_esp descriptor */
+       {
+               .name = "authenc(hmac(sha1),cbc(aes))",
+               .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
+               .blocksize = AES_BLOCK_SIZE,
+               .aead = {
+                       .setkey = aead_authenc_setkey,
+                       .setauthsize = aead_authenc_setauthsize,
+                       .encrypt = aead_authenc_encrypt,
+                       .decrypt = aead_authenc_decrypt,
+                       .givencrypt = aead_authenc_givencrypt,
+                       .geniv = "<built-in>",
+                       .ivsize = AES_BLOCK_SIZE,
+                       .maxauthsize = SHA1_DIGEST_SIZE,
+                       },
+               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+               .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
+               .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+       },
+       {
+               .name = "authenc(hmac(sha256),cbc(aes))",
+               .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
+               .blocksize = AES_BLOCK_SIZE,
+               .aead = {
+                       .setkey = aead_authenc_setkey,
+                       .setauthsize = aead_authenc_setauthsize,
+                       .encrypt = aead_authenc_encrypt,
+                       .decrypt = aead_authenc_decrypt,
+                       .givencrypt = aead_authenc_givencrypt,
+                       .geniv = "<built-in>",
+                       .ivsize = AES_BLOCK_SIZE,
+                       .maxauthsize = SHA256_DIGEST_SIZE,
+                       },
+               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+               .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+                                  OP_ALG_AAI_HMAC_PRECOMP,
+               .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+       },
+       {
+               .name = "authenc(hmac(sha512),cbc(aes))",
+               .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
+               .blocksize = AES_BLOCK_SIZE,
+               .aead = {
+                       .setkey = aead_authenc_setkey,
+                       .setauthsize = aead_authenc_setauthsize,
+                       .encrypt = aead_authenc_encrypt,
+                       .decrypt = aead_authenc_decrypt,
+                       .givencrypt = aead_authenc_givencrypt,
+                       .geniv = "<built-in>",
+                       .ivsize = AES_BLOCK_SIZE,
+                       .maxauthsize = SHA512_DIGEST_SIZE,
+                       },
+               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+               .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+                                  OP_ALG_AAI_HMAC_PRECOMP,
+               .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+       },
+       {
+               .name = "authenc(hmac(sha1),cbc(des3_ede))",
+               .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
+               .blocksize = DES3_EDE_BLOCK_SIZE,
+               .aead = {
+                       .setkey = aead_authenc_setkey,
+                       .setauthsize = aead_authenc_setauthsize,
+                       .encrypt = aead_authenc_encrypt,
+                       .decrypt = aead_authenc_decrypt,
+                       .givencrypt = aead_authenc_givencrypt,
+                       .geniv = "<built-in>",
+                       .ivsize = DES3_EDE_BLOCK_SIZE,
+                       .maxauthsize = SHA1_DIGEST_SIZE,
+                       },
+               .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+               .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
+               .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+       },
+       {
+               .name = "authenc(hmac(sha256),cbc(des3_ede))",
+               .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
+               .blocksize = DES3_EDE_BLOCK_SIZE,
+               .aead = {
+                       .setkey = aead_authenc_setkey,
+                       .setauthsize = aead_authenc_setauthsize,
+                       .encrypt = aead_authenc_encrypt,
+                       .decrypt = aead_authenc_decrypt,
+                       .givencrypt = aead_authenc_givencrypt,
+                       .geniv = "<built-in>",
+                       .ivsize = DES3_EDE_BLOCK_SIZE,
+                       .maxauthsize = SHA256_DIGEST_SIZE,
+                       },
+               .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+               .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+                                  OP_ALG_AAI_HMAC_PRECOMP,
+               .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+       },
+       {
+               .name = "authenc(hmac(sha512),cbc(des3_ede))",
+               .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
+               .blocksize = DES3_EDE_BLOCK_SIZE,
+               .aead = {
+                       .setkey = aead_authenc_setkey,
+                       .setauthsize = aead_authenc_setauthsize,
+                       .encrypt = aead_authenc_encrypt,
+                       .decrypt = aead_authenc_decrypt,
+                       .givencrypt = aead_authenc_givencrypt,
+                       .geniv = "<built-in>",
+                       .ivsize = DES3_EDE_BLOCK_SIZE,
+                       .maxauthsize = SHA512_DIGEST_SIZE,
+                       },
+               .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+               .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+                                  OP_ALG_AAI_HMAC_PRECOMP,
+               .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+       },
+       {
+               .name = "authenc(hmac(sha1),cbc(des))",
+               .driver_name = "authenc-hmac-sha1-cbc-des-caam",
+               .blocksize = DES_BLOCK_SIZE,
+               .aead = {
+                       .setkey = aead_authenc_setkey,
+                       .setauthsize = aead_authenc_setauthsize,
+                       .encrypt = aead_authenc_encrypt,
+                       .decrypt = aead_authenc_decrypt,
+                       .givencrypt = aead_authenc_givencrypt,
+                       .geniv = "<built-in>",
+                       .ivsize = DES_BLOCK_SIZE,
+                       .maxauthsize = SHA1_DIGEST_SIZE,
+                       },
+               .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+               .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
+               .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+       },
+       {
+               .name = "authenc(hmac(sha256),cbc(des))",
+               .driver_name = "authenc-hmac-sha256-cbc-des-caam",
+               .blocksize = DES_BLOCK_SIZE,
+               .aead = {
+                       .setkey = aead_authenc_setkey,
+                       .setauthsize = aead_authenc_setauthsize,
+                       .encrypt = aead_authenc_encrypt,
+                       .decrypt = aead_authenc_decrypt,
+                       .givencrypt = aead_authenc_givencrypt,
+                       .geniv = "<built-in>",
+                       .ivsize = DES_BLOCK_SIZE,
+                       .maxauthsize = SHA256_DIGEST_SIZE,
+                       },
+               .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+               .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+                                  OP_ALG_AAI_HMAC_PRECOMP,
+               .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+       },
+       {
+               .name = "authenc(hmac(sha512),cbc(des))",
+               .driver_name = "authenc-hmac-sha512-cbc-des-caam",
+               .blocksize = DES_BLOCK_SIZE,
+               .aead = {
+                       .setkey = aead_authenc_setkey,
+                       .setauthsize = aead_authenc_setauthsize,
+                       .encrypt = aead_authenc_encrypt,
+                       .decrypt = aead_authenc_decrypt,
+                       .givencrypt = aead_authenc_givencrypt,
+                       .geniv = "<built-in>",
+                       .ivsize = DES_BLOCK_SIZE,
+                       .maxauthsize = SHA512_DIGEST_SIZE,
+                       },
+               .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+               .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+                                  OP_ALG_AAI_HMAC_PRECOMP,
+               .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+       },
+};
+
+struct caam_crypto_alg {
+       struct list_head entry;
+       struct device *ctrldev;
+       int class1_alg_type;
+       int class2_alg_type;
+       int alg_op;
+       struct crypto_alg crypto_alg;
+};
+
+static int caam_cra_init(struct crypto_tfm *tfm)
+{
+       struct crypto_alg *alg = tfm->__crt_alg;
+       struct caam_crypto_alg *caam_alg =
+                container_of(alg, struct caam_crypto_alg, crypto_alg);
+       struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev);
+       int tgt_jr = atomic_inc_return(&priv->tfm_count);
+
+       /*
+        * distribute tfms across job rings to ensure in-order
+        * crypto request processing per tfm
+        */
+       ctx->jrdev = priv->algapi_jr[(tgt_jr / 2) % priv->num_jrs_for_algapi];
+
+       /* copy descriptor header template value */
+       ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
+       ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
+       ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
+
+       return 0;
+}
+
+static void caam_cra_exit(struct crypto_tfm *tfm)
+{
+       struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       if (!dma_mapping_error(ctx->jrdev, ctx->shared_desc_phys))
+               dma_unmap_single(ctx->jrdev, ctx->shared_desc_phys,
+                                desc_bytes(ctx->sh_desc), DMA_TO_DEVICE);
+       kfree(ctx->sh_desc);
+
+       if (!dma_mapping_error(ctx->jrdev, ctx->key_phys))
+               dma_unmap_single(ctx->jrdev, ctx->key_phys,
+                                ctx->split_key_pad_len + ctx->enckeylen,
+                                DMA_TO_DEVICE);
+       kfree(ctx->key);
+}
+
+static void __exit caam_algapi_exit(void)
+{
+
+       struct device_node *dev_node;
+       struct platform_device *pdev;
+       struct device *ctrldev;
+       struct caam_drv_private *priv;
+       struct caam_crypto_alg *t_alg, *n;
+       int i, err;
+
+       dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+       if (!dev_node)
+               return;
+
+       pdev = of_find_device_by_node(dev_node);
+       if (!pdev)
+               return;
+
+       ctrldev = &pdev->dev;
+       of_node_put(dev_node);
+       priv = dev_get_drvdata(ctrldev);
+
+       if (!priv->alg_list.next)
+               return;
+
+       list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
+               crypto_unregister_alg(&t_alg->crypto_alg);
+               list_del(&t_alg->entry);
+               kfree(t_alg);
+       }
+
+       for (i = 0; i < priv->total_jobrs; i++) {
+               err = caam_jr_deregister(priv->algapi_jr[i]);
+               if (err < 0)
+                       break;
+       }
+       kfree(priv->algapi_jr);
+}
+
+static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
+                                             struct caam_alg_template
+                                             *template)
+{
+       struct caam_crypto_alg *t_alg;
+       struct crypto_alg *alg;
+
+       t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
+       if (!t_alg) {
+               dev_err(ctrldev, "failed to allocate t_alg\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       alg = &t_alg->crypto_alg;
+
+       snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
+       snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+                template->driver_name);
+       alg->cra_module = THIS_MODULE;
+       alg->cra_init = caam_cra_init;
+       alg->cra_exit = caam_cra_exit;
+       alg->cra_priority = CAAM_CRA_PRIORITY;
+       alg->cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
+       alg->cra_blocksize = template->blocksize;
+       alg->cra_alignmask = 0;
+       alg->cra_type = &crypto_aead_type;
+       alg->cra_ctxsize = sizeof(struct caam_ctx);
+       alg->cra_u.aead = template->aead;
+
+       t_alg->class1_alg_type = template->class1_alg_type;
+       t_alg->class2_alg_type = template->class2_alg_type;
+       t_alg->alg_op = template->alg_op;
+       t_alg->ctrldev = ctrldev;
+
+       return t_alg;
+}
+
+static int __init caam_algapi_init(void)
+{
+       struct device_node *dev_node;
+       struct platform_device *pdev;
+       struct device *ctrldev, **jrdev;
+       struct caam_drv_private *priv;
+       int i = 0, err = 0;
+
+       dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+       if (!dev_node)
+               return -ENODEV;
+
+       pdev = of_find_device_by_node(dev_node);
+       if (!pdev)
+               return -ENODEV;
+
+       ctrldev = &pdev->dev;
+       priv = dev_get_drvdata(ctrldev);
+       of_node_put(dev_node);
+
+       INIT_LIST_HEAD(&priv->alg_list);
+
+       jrdev = kmalloc(sizeof(*jrdev) * priv->total_jobrs, GFP_KERNEL);
+       if (!jrdev)
+               return -ENOMEM;
+
+       for (i = 0; i < priv->total_jobrs; i++) {
+               err = caam_jr_register(ctrldev, &jrdev[i]);
+               if (err < 0)
+                       break;
+       }
+       if (err < 0 && i == 0) {
+               dev_err(ctrldev, "algapi error in job ring registration: %d\n",
+                       err);
+               kfree(jrdev);
+               return err;
+       }
+
+       priv->num_jrs_for_algapi = i;
+       priv->algapi_jr = jrdev;
+       atomic_set(&priv->tfm_count, -1);
+
+       /* register crypto algorithms the device supports */
+       for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+               /* TODO: check if h/w supports alg */
+               struct caam_crypto_alg *t_alg;
+
+               t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]);
+               if (IS_ERR(t_alg)) {
+                       err = PTR_ERR(t_alg);
+                       dev_warn(ctrldev, "%s alg allocation failed\n",
+                                driver_algs[i].driver_name);
+                       continue;
+               }
+
+               err = crypto_register_alg(&t_alg->crypto_alg);
+               if (err) {
+                       dev_warn(ctrldev, "%s alg registration failed\n",
+                               t_alg->crypto_alg.cra_driver_name);
+                       kfree(t_alg);
+               } else {
+                       list_add_tail(&t_alg->entry, &priv->alg_list);
+                       dev_info(ctrldev, "%s\n",
+                                t_alg->crypto_alg.cra_driver_name);
+               }
+       }
+
+       return err;
+}
+
+module_init(caam_algapi_init);
+module_exit(caam_algapi_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM support for crypto API");
+MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
new file mode 100644 (file)
index 0000000..9504503
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ */
+
+#ifndef CAAM_COMPAT_H
+#define CAAM_COMPAT_H
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/crypto.h>
+#include <linux/hw_random.h>
+#include <linux/of_platform.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/rtnetlink.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/debugfs.h>
+#include <linux/circ_buf.h>
+#include <net/xfrm.h>
+
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/sha.h>
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+
+#endif /* !defined(CAAM_COMPAT_H) */
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
new file mode 100644 (file)
index 0000000..9009713
--- /dev/null
@@ -0,0 +1,269 @@
+/*
+ * CAAM control-plane driver backend
+ * Controller-level driver, kernel property detection, initialization
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ */
+
+#include "compat.h"
+#include "regs.h"
+#include "intern.h"
+#include "jr.h"
+
+static int caam_remove(struct platform_device *pdev)
+{
+       struct device *ctrldev;
+       struct caam_drv_private *ctrlpriv;
+       struct caam_drv_private_jr *jrpriv;
+       struct caam_full __iomem *topregs;
+       int ring, ret = 0;
+
+       ctrldev = &pdev->dev;
+       ctrlpriv = dev_get_drvdata(ctrldev);
+       topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
+
+       /* shut down JobRs */
+       for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
+               ret |= caam_jr_shutdown(ctrlpriv->jrdev[ring]);
+               jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]);
+               irq_dispose_mapping(jrpriv->irq);
+       }
+
+       /* Shut down debug views */
+#ifdef CONFIG_DEBUG_FS
+       debugfs_remove_recursive(ctrlpriv->dfs_root);
+#endif
+
+       /* Unmap controller region */
+       iounmap(&topregs->ctrl);
+
+       kfree(ctrlpriv->jrdev);
+       kfree(ctrlpriv);
+
+       return ret;
+}
+
+/* Probe routine for CAAM top (controller) level */
+static int caam_probe(struct platform_device *pdev)
+{
+       int d, ring, rspec;
+       struct device *dev;
+       struct device_node *nprop, *np;
+       struct caam_ctrl __iomem *ctrl;
+       struct caam_full __iomem *topregs;
+       struct caam_drv_private *ctrlpriv;
+       struct caam_perfmon *perfmon;
+       struct caam_deco **deco;
+       u32 deconum;
+
+       ctrlpriv = kzalloc(sizeof(struct caam_drv_private), GFP_KERNEL);
+       if (!ctrlpriv)
+               return -ENOMEM;
+
+       dev = &pdev->dev;
+       dev_set_drvdata(dev, ctrlpriv);
+       ctrlpriv->pdev = pdev;
+       nprop = pdev->dev.of_node;
+
+       /* Get configuration properties from device tree */
+       /* First, get register page */
+       ctrl = of_iomap(nprop, 0);
+       if (ctrl == NULL) {
+               dev_err(dev, "caam: of_iomap() failed\n");
+               return -ENOMEM;
+       }
+       ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
+
+       /* topregs used to derive pointers to CAAM sub-blocks only */
+       topregs = (struct caam_full __iomem *)ctrl;
+
+       /* Get the IRQ of the controller (for security violations only) */
+       ctrlpriv->secvio_irq = of_irq_to_resource(nprop, 0, NULL);
+
+       /*
+        * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
+        * 36-bit pointers in master configuration register
+        */
+       setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE |
+                 (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
+
+       if (sizeof(dma_addr_t) == sizeof(u64))
+               dma_set_mask(dev, DMA_BIT_MASK(36));
+
+       /* Find out how many DECOs are present */
+       deconum = (rd_reg64(&topregs->ctrl.perfmon.cha_num) &
+                  CHA_NUM_DECONUM_MASK) >> CHA_NUM_DECONUM_SHIFT;
+
+       ctrlpriv->deco = kmalloc(deconum * sizeof(struct caam_deco *),
+                                GFP_KERNEL);
+
+       deco = (struct caam_deco __force **)&topregs->deco;
+       for (d = 0; d < deconum; d++)
+               ctrlpriv->deco[d] = deco[d];
+
+       /*
+        * Detect and enable JobRs
+        * First, find out how many ring spec'ed, allocate references
+        * for all, then go probe each one.
+        */
+       rspec = 0;
+       for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring")
+               rspec++;
+       ctrlpriv->jrdev = kzalloc(sizeof(struct device *) * rspec, GFP_KERNEL);
+       if (ctrlpriv->jrdev == NULL) {
+               iounmap(&topregs->ctrl);
+               return -ENOMEM;
+       }
+
+       ring = 0;
+       ctrlpriv->total_jobrs = 0;
+       for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") {
+               caam_jr_probe(pdev, np, ring);
+               ctrlpriv->total_jobrs++;
+               ring++;
+       }
+
+       /* Check to see if QI present. If so, enable */
+       ctrlpriv->qi_present = !!(rd_reg64(&topregs->ctrl.perfmon.comp_parms) &
+                                 CTPR_QI_MASK);
+       if (ctrlpriv->qi_present) {
+               ctrlpriv->qi = (struct caam_queue_if __force *)&topregs->qi;
+               /* This is all that's required to physically enable QI */
+               wr_reg32(&topregs->qi.qi_control_lo, QICTL_DQEN);
+       }
+
+       /* If no QI and no rings specified, quit and go home */
+       if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
+               dev_err(dev, "no queues configured, terminating\n");
+               caam_remove(pdev);
+               return -ENOMEM;
+       }
+
+       /* NOTE: RTIC detection ought to go here, around Si time */
+
+       /* Initialize queue allocator lock */
+       spin_lock_init(&ctrlpriv->jr_alloc_lock);
+
+       /* Report "alive" for developer to see */
+       dev_info(dev, "device ID = 0x%016llx\n",
+                rd_reg64(&topregs->ctrl.perfmon.caam_id));
+       dev_info(dev, "job rings = %d, qi = %d\n",
+                ctrlpriv->total_jobrs, ctrlpriv->qi_present);
+
+#ifdef CONFIG_DEBUG_FS
+       /*
+        * FIXME: needs better naming distinction, as some amalgamation of
+        * "caam" and nprop->full_name. The OF name isn't distinctive,
+        * but does separate instances
+        */
+       perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
+
+       ctrlpriv->dfs_root = debugfs_create_dir("caam", NULL);
+       ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
+
+       /* Controller-level - performance monitor counters */
+       ctrlpriv->ctl_rq_dequeued =
+               debugfs_create_u64("rq_dequeued",
+                                  S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+                                  ctrlpriv->ctl, &perfmon->req_dequeued);
+       ctrlpriv->ctl_ob_enc_req =
+               debugfs_create_u64("ob_rq_encrypted",
+                                  S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+                                  ctrlpriv->ctl, &perfmon->ob_enc_req);
+       ctrlpriv->ctl_ib_dec_req =
+               debugfs_create_u64("ib_rq_decrypted",
+                                  S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+                                  ctrlpriv->ctl, &perfmon->ib_dec_req);
+       ctrlpriv->ctl_ob_enc_bytes =
+               debugfs_create_u64("ob_bytes_encrypted",
+                                  S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+                                  ctrlpriv->ctl, &perfmon->ob_enc_bytes);
+       ctrlpriv->ctl_ob_prot_bytes =
+               debugfs_create_u64("ob_bytes_protected",
+                                  S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+                                  ctrlpriv->ctl, &perfmon->ob_prot_bytes);
+       ctrlpriv->ctl_ib_dec_bytes =
+               debugfs_create_u64("ib_bytes_decrypted",
+                                  S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+                                  ctrlpriv->ctl, &perfmon->ib_dec_bytes);
+       ctrlpriv->ctl_ib_valid_bytes =
+               debugfs_create_u64("ib_bytes_validated",
+                                  S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+                                  ctrlpriv->ctl, &perfmon->ib_valid_bytes);
+
+       /* Controller level - global status values */
+       ctrlpriv->ctl_faultaddr =
+               debugfs_create_u64("fault_addr",
+                                  S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+                                  ctrlpriv->ctl, &perfmon->faultaddr);
+       ctrlpriv->ctl_faultdetail =
+               debugfs_create_u32("fault_detail",
+                                  S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+                                  ctrlpriv->ctl, &perfmon->faultdetail);
+       ctrlpriv->ctl_faultstatus =
+               debugfs_create_u32("fault_status",
+                                  S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+                                  ctrlpriv->ctl, &perfmon->status);
+
+       /* Internal covering keys (useful in non-secure mode only) */
+       ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
+       ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
+       ctrlpriv->ctl_kek = debugfs_create_blob("kek",
+                                               S_IFCHR | S_IRUSR |
+                                               S_IRGRP | S_IROTH,
+                                               ctrlpriv->ctl,
+                                               &ctrlpriv->ctl_kek_wrap);
+
+       ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
+       ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
+       ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
+                                                S_IFCHR | S_IRUSR |
+                                                S_IRGRP | S_IROTH,
+                                                ctrlpriv->ctl,
+                                                &ctrlpriv->ctl_tkek_wrap);
+
+       ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
+       ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
+       ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
+                                                S_IFCHR | S_IRUSR |
+                                                S_IRGRP | S_IROTH,
+                                                ctrlpriv->ctl,
+                                                &ctrlpriv->ctl_tdsk_wrap);
+#endif
+       return 0;
+}
+
+static struct of_device_id caam_match[] = {
+       {
+               .compatible = "fsl,sec-v4.0",
+       },
+       {},
+};
+MODULE_DEVICE_TABLE(of, caam_match);
+
+static struct platform_driver caam_driver = {
+       .driver = {
+               .name = "caam",
+               .owner = THIS_MODULE,
+               .of_match_table = caam_match,
+       },
+       .probe       = caam_probe,
+       .remove      = __devexit_p(caam_remove),
+};
+
+static int __init caam_base_init(void)
+{
+       return platform_driver_register(&caam_driver);
+}
+
+static void __exit caam_base_exit(void)
+{
+       return platform_driver_unregister(&caam_driver);
+}
+
+module_init(caam_base_init);
+module_exit(caam_base_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM request backend");
+MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
new file mode 100644 (file)
index 0000000..974a758
--- /dev/null
@@ -0,0 +1,1605 @@
+/*
+ * CAAM descriptor composition header
+ * Definitions to support CAAM descriptor instruction generation
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ */
+
+#ifndef DESC_H
+#define DESC_H
+
+/* Max size of any CAAM descriptor in 32-bit words, inclusive of header */
+#define MAX_CAAM_DESCSIZE       64
+
+/* Block size of any entity covered/uncovered with a KEK/TKEK */
+#define KEK_BLOCKSIZE          16
+
+/*
+ * Supported descriptor command types as they show up
+ * inside a descriptor command word.
+ */
+#define CMD_SHIFT               27
+#define CMD_MASK                0xf8000000
+
+#define CMD_KEY                 (0x00 << CMD_SHIFT)
+#define CMD_SEQ_KEY             (0x01 << CMD_SHIFT)
+#define CMD_LOAD                (0x02 << CMD_SHIFT)
+#define CMD_SEQ_LOAD            (0x03 << CMD_SHIFT)
+#define CMD_FIFO_LOAD           (0x04 << CMD_SHIFT)
+#define CMD_SEQ_FIFO_LOAD       (0x05 << CMD_SHIFT)
+#define CMD_STORE               (0x0a << CMD_SHIFT)
+#define CMD_SEQ_STORE           (0x0b << CMD_SHIFT)
+#define CMD_FIFO_STORE          (0x0c << CMD_SHIFT)
+#define CMD_SEQ_FIFO_STORE      (0x0d << CMD_SHIFT)
+#define CMD_MOVE_LEN            (0x0e << CMD_SHIFT)
+#define CMD_MOVE                (0x0f << CMD_SHIFT)
+#define CMD_OPERATION           (0x10 << CMD_SHIFT)
+#define CMD_SIGNATURE           (0x12 << CMD_SHIFT)
+#define CMD_JUMP                (0x14 << CMD_SHIFT)
+#define CMD_MATH                (0x15 << CMD_SHIFT)
+#define CMD_DESC_HDR            (0x16 << CMD_SHIFT)
+#define CMD_SHARED_DESC_HDR     (0x17 << CMD_SHIFT)
+#define CMD_SEQ_IN_PTR          (0x1e << CMD_SHIFT)
+#define CMD_SEQ_OUT_PTR         (0x1f << CMD_SHIFT)
+
+/* General-purpose class selector for all commands */
+#define CLASS_SHIFT             25
+#define CLASS_MASK              (0x03 << CLASS_SHIFT)
+
+#define CLASS_NONE              (0x00 << CLASS_SHIFT)
+#define CLASS_1                 (0x01 << CLASS_SHIFT)
+#define CLASS_2                 (0x02 << CLASS_SHIFT)
+#define CLASS_BOTH              (0x03 << CLASS_SHIFT)
+
+/*
+ * Descriptor header command constructs
+ * Covers shared, job, and trusted descriptor headers
+ */
+
+/*
+ * Do Not Run - marks a descriptor inexecutable if there was
+ * a preceding error somewhere
+ */
+#define HDR_DNR                 0x01000000
+
+/*
+ * ONE - should always be set. Combination of ONE (always
+ * set) and ZRO (always clear) forms an endianness sanity check
+ */
+#define HDR_ONE                 0x00800000
+#define HDR_ZRO                 0x00008000
+
+/* Start Index or SharedDesc Length */
+#define HDR_START_IDX_MASK      0x3f
+#define HDR_START_IDX_SHIFT     16
+
+/* If shared descriptor header, 6-bit length */
+#define HDR_DESCLEN_SHR_MASK  0x3f
+
+/* If non-shared header, 7-bit length */
+#define HDR_DESCLEN_MASK      0x7f
+
+/* This is a TrustedDesc (if not SharedDesc) */
+#define HDR_TRUSTED             0x00004000
+
+/* Make into TrustedDesc (if not SharedDesc) */
+#define HDR_MAKE_TRUSTED        0x00002000
+
+/* Save context if self-shared (if SharedDesc) */
+#define HDR_SAVECTX             0x00001000
+
+/* Next item points to SharedDesc */
+#define HDR_SHARED              0x00001000
+
+/*
+ * Reverse Execution Order - execute JobDesc first, then
+ * execute SharedDesc (normally SharedDesc goes first).
+ */
+#define HDR_REVERSE             0x00000800
+
+/* Propogate DNR property to SharedDesc */
+#define HDR_PROP_DNR            0x00000800
+
+/* JobDesc/SharedDesc share property */
+#define HDR_SD_SHARE_MASK       0x03
+#define HDR_SD_SHARE_SHIFT      8
+#define HDR_JD_SHARE_MASK       0x07
+#define HDR_JD_SHARE_SHIFT      8
+
+#define HDR_SHARE_NEVER         (0x00 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_WAIT          (0x01 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_SERIAL        (0x02 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_ALWAYS        (0x03 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_DEFER         (0x04 << HDR_SD_SHARE_SHIFT)
+
+/* JobDesc/SharedDesc descriptor length */
+#define HDR_JD_LENGTH_MASK      0x7f
+#define HDR_SD_LENGTH_MASK      0x3f
+
+/*
+ * KEY/SEQ_KEY Command Constructs
+ */
+
+/* Key Destination Class: 01 = Class 1, 02 - Class 2  */
+#define KEY_DEST_CLASS_SHIFT    25  /* use CLASS_1 or CLASS_2 */
+#define KEY_DEST_CLASS_MASK     (0x03 << KEY_DEST_CLASS_SHIFT)
+
+/* Scatter-Gather Table/Variable Length Field */
+#define KEY_SGF                 0x01000000
+#define KEY_VLF                 0x01000000
+
+/* Immediate - Key follows command in the descriptor */
+#define KEY_IMM                 0x00800000
+
+/*
+ * Encrypted - Key is encrypted either with the KEK, or
+ * with the TDKEK if TK is set
+ */
+#define KEY_ENC                 0x00400000
+
+/*
+ * No Write Back - Do not allow key to be FIFO STOREd
+ */
+#define KEY_NWB                        0x00200000
+
+/*
+ * Enhanced Encryption of Key
+ */
+#define KEY_EKT                        0x00100000
+
+/*
+ * Encrypted with Trusted Key
+ */
+#define KEY_TK                 0x00008000
+
+/*
+ * KDEST - Key Destination: 0 - class key register,
+ * 1 - PKHA 'e', 2 - AFHA Sbox, 3 - MDHA split-key
+ */
+#define KEY_DEST_SHIFT          16
+#define KEY_DEST_MASK           (0x03 << KEY_DEST_SHIFT)
+
+#define KEY_DEST_CLASS_REG      (0x00 << KEY_DEST_SHIFT)
+#define KEY_DEST_PKHA_E         (0x01 << KEY_DEST_SHIFT)
+#define KEY_DEST_AFHA_SBOX      (0x02 << KEY_DEST_SHIFT)
+#define KEY_DEST_MDHA_SPLIT     (0x03 << KEY_DEST_SHIFT)
+
+/* Length in bytes */
+#define KEY_LENGTH_MASK         0x000003ff
+
+/*
+ * LOAD/SEQ_LOAD/STORE/SEQ_STORE Command Constructs
+ */
+
+/*
+ * Load/Store Destination: 0 = class independent CCB,
+ * 1 = class 1 CCB, 2 = class 2 CCB, 3 = DECO
+ */
+#define LDST_CLASS_SHIFT        25
+#define LDST_CLASS_MASK         (0x03 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_IND_CCB      (0x00 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_1_CCB        (0x01 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_2_CCB        (0x02 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_DECO         (0x03 << LDST_CLASS_SHIFT)
+
+/* Scatter-Gather Table/Variable Length Field */
+#define LDST_SGF                0x01000000
+#define LDST_VLF               LDST_SGF
+
+/* Immediate - Key follows this command in descriptor    */
+#define LDST_IMM_MASK           1
+#define LDST_IMM_SHIFT          23
+#define LDST_IMM                (LDST_IMM_MASK << LDST_IMM_SHIFT)
+
+/* SRC/DST - Destination for LOAD, Source for STORE   */
+#define LDST_SRCDST_SHIFT       16
+#define LDST_SRCDST_MASK        (0x7f << LDST_SRCDST_SHIFT)
+
+#define LDST_SRCDST_BYTE_CONTEXT       (0x20 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_KEY           (0x40 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_INFIFO                (0x7c << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_OUTFIFO       (0x7e << LDST_SRCDST_SHIFT)
+
+#define LDST_SRCDST_WORD_MODE_REG      (0x00 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_KEYSZ_REG     (0x01 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DATASZ_REG    (0x02 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_ICVSZ_REG     (0x03 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CHACTRL       (0x06 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECOCTRL       (0x06 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_IRQCTRL       (0x07 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_PCLOVRD   (0x07 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLRW          (0x08 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH0     (0x08 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_STAT          (0x09 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH1     (0x09 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH2     (0x0a << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_AAD_SZ    (0x0b << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH3     (0x0b << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLASS1_ICV_SZ  (0x0c << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_ALTDS_CLASS1   (0x0f << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_A_SZ      (0x10 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_B_SZ      (0x11 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_N_SZ      (0x12 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_E_SZ      (0x13 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF        (0x40 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO      (0x7a << LDST_SRCDST_SHIFT)
+
+/* Offset in source/destination                        */
+#define LDST_OFFSET_SHIFT       8
+#define LDST_OFFSET_MASK        (0xff << LDST_OFFSET_SHIFT)
+
+/* LDOFF definitions used when DST = LDST_SRCDST_WORD_DECOCTRL */
+/* These could also be shifted by LDST_OFFSET_SHIFT - this reads better */
+#define LDOFF_CHG_SHARE_SHIFT        0
+#define LDOFF_CHG_SHARE_MASK         (0x3 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_NEVER        (0x1 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_OK_NO_PROP   (0x2 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_OK_PROP      (0x3 << LDOFF_CHG_SHARE_SHIFT)
+
+#define LDOFF_ENABLE_AUTO_NFIFO         (1 << 2)
+#define LDOFF_DISABLE_AUTO_NFIFO        (1 << 3)
+
+#define LDOFF_CHG_NONSEQLIODN_SHIFT     4
+#define LDOFF_CHG_NONSEQLIODN_MASK      (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_SEQ       (0x1 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_NON_SEQ   (0x2 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_TRUSTED   (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+
+#define LDOFF_CHG_SEQLIODN_SHIFT     6
+#define LDOFF_CHG_SEQLIODN_MASK      (0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_SEQ       (0x1 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_NON_SEQ   (0x2 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_TRUSTED   (0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
+
+/* Data length in bytes                                 */
+#define LDST_LEN_SHIFT          0
+#define LDST_LEN_MASK           (0xff << LDST_LEN_SHIFT)
+
+/* Special Length definitions when dst=deco-ctrl */
+#define LDLEN_ENABLE_OSL_COUNT      (1 << 7)
+#define LDLEN_RST_CHA_OFIFO_PTR     (1 << 6)
+#define LDLEN_RST_OFIFO             (1 << 5)
+#define LDLEN_SET_OFIFO_OFF_VALID   (1 << 4)
+#define LDLEN_SET_OFIFO_OFF_RSVD    (1 << 3)
+#define LDLEN_SET_OFIFO_OFFSET_SHIFT 0
+#define LDLEN_SET_OFIFO_OFFSET_MASK (3 << LDLEN_SET_OFIFO_OFFSET_SHIFT)
+
+/*
+ * FIFO_LOAD/FIFO_STORE/SEQ_FIFO_LOAD/SEQ_FIFO_STORE
+ * Command Constructs
+ */
+
+/*
+ * Load Destination: 0 = skip (SEQ_FIFO_LOAD only),
+ * 1 = Load for Class1, 2 = Load for Class2, 3 = Load both
+ * Store Source: 0 = normal, 1 = Class1key, 2 = Class2key
+ */
+#define FIFOLD_CLASS_SHIFT      25
+#define FIFOLD_CLASS_MASK       (0x03 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_SKIP       (0x00 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_CLASS1     (0x01 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_CLASS2     (0x02 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_BOTH       (0x03 << FIFOLD_CLASS_SHIFT)
+
+#define FIFOST_CLASS_SHIFT      25
+#define FIFOST_CLASS_MASK       (0x03 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_NORMAL     (0x00 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_CLASS1KEY  (0x01 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_CLASS2KEY  (0x02 << FIFOST_CLASS_SHIFT)
+
+/*
+ * Scatter-Gather Table/Variable Length Field
+ * If set for FIFO_LOAD, refers to a SG table. Within
+ * SEQ_FIFO_LOAD, is variable input sequence
+ */
+#define FIFOLDST_SGF_SHIFT      24
+#define FIFOLDST_SGF_MASK       (1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_VLF_MASK       (1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_SGF            (1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_VLF            (1 << FIFOLDST_SGF_SHIFT)
+
+/* Immediate - Data follows command in descriptor */
+#define FIFOLD_IMM_SHIFT      23
+#define FIFOLD_IMM_MASK       (1 << FIFOLD_IMM_SHIFT)
+#define FIFOLD_IMM            (1 << FIFOLD_IMM_SHIFT)
+
+/* Continue - Not the last FIFO store to come */
+#define FIFOST_CONT_SHIFT     23
+#define FIFOST_CONT_MASK      (1 << FIFOST_CONT_SHIFT)
+#define FIFOST_CONT_MASK      (1 << FIFOST_CONT_SHIFT)
+
+/*
+ * Extended Length - use 32-bit extended length that
+ * follows the pointer field. Illegal with IMM set
+ */
+#define FIFOLDST_EXT_SHIFT      22
+#define FIFOLDST_EXT_MASK       (1 << FIFOLDST_EXT_SHIFT)
+#define FIFOLDST_EXT            (1 << FIFOLDST_EXT_SHIFT)
+
+/* Input data type.*/
+#define FIFOLD_TYPE_SHIFT       16
+#define FIFOLD_CONT_TYPE_SHIFT  19 /* shift past last-flush bits */
+#define FIFOLD_TYPE_MASK        (0x3f << FIFOLD_TYPE_SHIFT)
+
+/* PK types */
+#define FIFOLD_TYPE_PK          (0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_MASK     (0x30 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_TYPEMASK (0x0f << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A0       (0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A1       (0x01 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A2       (0x02 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A3       (0x03 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B0       (0x04 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B1       (0x05 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B2       (0x06 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B3       (0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_N        (0x08 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A        (0x0c << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B        (0x0d << FIFOLD_TYPE_SHIFT)
+
+/* Other types. Need to OR in last/flush bits as desired */
+#define FIFOLD_TYPE_MSG_MASK    (0x38 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_MSG         (0x10 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_MSG1OUT2    (0x18 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_IV          (0x20 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_BITDATA     (0x28 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_AAD         (0x30 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_ICV         (0x38 << FIFOLD_TYPE_SHIFT)
+
+/* Last/Flush bits for use with "other" types above */
+#define FIFOLD_TYPE_ACT_MASK    (0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_NOACTION    (0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_FLUSH1      (0x01 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST1       (0x02 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2FLUSH  (0x03 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2       (0x04 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2FLUSH1 (0x05 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LASTBOTH    (0x06 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LASTBOTHFL  (0x07 << FIFOLD_TYPE_SHIFT)
+
+#define FIFOLDST_LEN_MASK       0xffff
+#define FIFOLDST_EXT_LEN_MASK   0xffffffff
+
+/* Output data types */
+#define FIFOST_TYPE_SHIFT       16
+#define FIFOST_TYPE_MASK        (0x3f << FIFOST_TYPE_SHIFT)
+
+#define FIFOST_TYPE_PKHA_A0      (0x00 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A1      (0x01 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A2      (0x02 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A3      (0x03 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B0      (0x04 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B1      (0x05 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B2      (0x06 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B3      (0x07 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_N       (0x08 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A       (0x0c << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B       (0x0d << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_AF_SBOX_JKEK (0x10 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_E_JKEK  (0x22 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_E_TKEK  (0x23 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_KEY_KEK      (0x24 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_KEY_TKEK     (0x25 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SPLIT_KEK    (0x26 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SPLIT_TKEK   (0x27 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_OUTFIFO_KEK  (0x28 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_OUTFIFO_TKEK (0x29 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_RNGSTORE     (0x34 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_RNGFIFO      (0x35 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SKIP         (0x3f << FIFOST_TYPE_SHIFT)
+
+/*
+ * OPERATION Command Constructs
+ */
+
+/* Operation type selectors - OP TYPE */
+#define OP_TYPE_SHIFT           24
+#define OP_TYPE_MASK            (0x07 << OP_TYPE_SHIFT)
+
+#define OP_TYPE_UNI_PROTOCOL    (0x00 << OP_TYPE_SHIFT)
+#define OP_TYPE_PK              (0x01 << OP_TYPE_SHIFT)
+#define OP_TYPE_CLASS1_ALG      (0x02 << OP_TYPE_SHIFT)
+#define OP_TYPE_CLASS2_ALG      (0x04 << OP_TYPE_SHIFT)
+#define OP_TYPE_DECAP_PROTOCOL  (0x06 << OP_TYPE_SHIFT)
+#define OP_TYPE_ENCAP_PROTOCOL  (0x07 << OP_TYPE_SHIFT)
+
+/* ProtocolID selectors - PROTID */
+#define OP_PCLID_SHIFT          16
+#define OP_PCLID_MASK           (0xff << 16)
+
+/* Assuming OP_TYPE = OP_TYPE_UNI_PROTOCOL */
+#define OP_PCLID_IKEV1_PRF      (0x01 << OP_PCLID_SHIFT)
+#define OP_PCLID_IKEV2_PRF      (0x02 << OP_PCLID_SHIFT)
+#define OP_PCLID_SSL30_PRF      (0x08 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS10_PRF      (0x09 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS11_PRF      (0x0a << OP_PCLID_SHIFT)
+#define OP_PCLID_DTLS10_PRF     (0x0c << OP_PCLID_SHIFT)
+#define OP_PCLID_PRF            (0x06 << OP_PCLID_SHIFT)
+#define OP_PCLID_BLOB           (0x0d << OP_PCLID_SHIFT)
+#define OP_PCLID_SECRETKEY      (0x11 << OP_PCLID_SHIFT)
+#define OP_PCLID_PUBLICKEYPAIR  (0x14 << OP_PCLID_SHIFT)
+#define OP_PCLID_DSASIGN        (0x15 << OP_PCLID_SHIFT)
+#define OP_PCLID_DSAVERIFY      (0x16 << OP_PCLID_SHIFT)
+
+/* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
+#define OP_PCLID_IPSEC          (0x01 << OP_PCLID_SHIFT)
+#define OP_PCLID_SRTP           (0x02 << OP_PCLID_SHIFT)
+#define OP_PCLID_MACSEC         (0x03 << OP_PCLID_SHIFT)
+#define OP_PCLID_WIFI           (0x04 << OP_PCLID_SHIFT)
+#define OP_PCLID_WIMAX          (0x05 << OP_PCLID_SHIFT)
+#define OP_PCLID_SSL30          (0x08 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS10          (0x09 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS11          (0x0a << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS12          (0x0b << OP_PCLID_SHIFT)
+#define OP_PCLID_DTLS           (0x0c << OP_PCLID_SHIFT)
+
+/*
+ * ProtocolInfo selectors
+ */
+#define OP_PCLINFO_MASK                          0xffff
+
+/* for OP_PCLID_IPSEC */
+#define OP_PCL_IPSEC_CIPHER_MASK                 0xff00
+#define OP_PCL_IPSEC_AUTH_MASK                   0x00ff
+
+#define OP_PCL_IPSEC_DES_IV64                    0x0100
+#define OP_PCL_IPSEC_DES                         0x0200
+#define OP_PCL_IPSEC_3DES                        0x0300
+#define OP_PCL_IPSEC_AES_CBC                     0x0c00
+#define OP_PCL_IPSEC_AES_CTR                     0x0d00
+#define OP_PCL_IPSEC_AES_XTS                     0x1600
+#define OP_PCL_IPSEC_AES_CCM8                    0x0e00
+#define OP_PCL_IPSEC_AES_CCM12                   0x0f00
+#define OP_PCL_IPSEC_AES_CCM16                   0x1000
+#define OP_PCL_IPSEC_AES_GCM8                    0x1200
+#define OP_PCL_IPSEC_AES_GCM12                   0x1300
+#define OP_PCL_IPSEC_AES_GCM16                   0x1400
+
+#define OP_PCL_IPSEC_HMAC_NULL                   0x0000
+#define OP_PCL_IPSEC_HMAC_MD5_96                 0x0001
+#define OP_PCL_IPSEC_HMAC_SHA1_96                0x0002
+#define OP_PCL_IPSEC_AES_XCBC_MAC_96             0x0005
+#define OP_PCL_IPSEC_HMAC_MD5_128                0x0006
+#define OP_PCL_IPSEC_HMAC_SHA1_160               0x0007
+#define OP_PCL_IPSEC_HMAC_SHA2_256_128           0x000c
+#define OP_PCL_IPSEC_HMAC_SHA2_384_192           0x000d
+#define OP_PCL_IPSEC_HMAC_SHA2_512_256           0x000e
+
+/* For SRTP - OP_PCLID_SRTP */
+#define OP_PCL_SRTP_CIPHER_MASK                  0xff00
+#define OP_PCL_SRTP_AUTH_MASK                    0x00ff
+
+#define OP_PCL_SRTP_AES_CTR                      0x0d00
+
+#define OP_PCL_SRTP_HMAC_SHA1_160                0x0007
+
+/* For SSL 3.0 - OP_PCLID_SSL30 */
+#define OP_PCL_SSL30_AES_128_CBC_SHA             0x002f
+#define OP_PCL_SSL30_AES_128_CBC_SHA_2           0x0030
+#define OP_PCL_SSL30_AES_128_CBC_SHA_3           0x0031
+#define OP_PCL_SSL30_AES_128_CBC_SHA_4           0x0032
+#define OP_PCL_SSL30_AES_128_CBC_SHA_5           0x0033
+#define OP_PCL_SSL30_AES_128_CBC_SHA_6           0x0034
+#define OP_PCL_SSL30_AES_128_CBC_SHA_7           0x008c
+#define OP_PCL_SSL30_AES_128_CBC_SHA_8           0x0090
+#define OP_PCL_SSL30_AES_128_CBC_SHA_9           0x0094
+#define OP_PCL_SSL30_AES_128_CBC_SHA_10          0xc004
+#define OP_PCL_SSL30_AES_128_CBC_SHA_11          0xc009
+#define OP_PCL_SSL30_AES_128_CBC_SHA_12          0xc00e
+#define OP_PCL_SSL30_AES_128_CBC_SHA_13          0xc013
+#define OP_PCL_SSL30_AES_128_CBC_SHA_14          0xc018
+#define OP_PCL_SSL30_AES_128_CBC_SHA_15          0xc01d
+#define OP_PCL_SSL30_AES_128_CBC_SHA_16          0xc01e
+#define OP_PCL_SSL30_AES_128_CBC_SHA_17          0xc01f
+
+#define OP_PCL_SSL30_AES_256_CBC_SHA             0x0035
+#define OP_PCL_SSL30_AES_256_CBC_SHA_2           0x0036
+#define OP_PCL_SSL30_AES_256_CBC_SHA_3           0x0037
+#define OP_PCL_SSL30_AES_256_CBC_SHA_4           0x0038
+#define OP_PCL_SSL30_AES_256_CBC_SHA_5           0x0039
+#define OP_PCL_SSL30_AES_256_CBC_SHA_6           0x003a
+#define OP_PCL_SSL30_AES_256_CBC_SHA_7           0x008d
+#define OP_PCL_SSL30_AES_256_CBC_SHA_8           0x0091
+#define OP_PCL_SSL30_AES_256_CBC_SHA_9           0x0095
+#define OP_PCL_SSL30_AES_256_CBC_SHA_10          0xc005
+#define OP_PCL_SSL30_AES_256_CBC_SHA_11          0xc00a
+#define OP_PCL_SSL30_AES_256_CBC_SHA_12          0xc00f
+#define OP_PCL_SSL30_AES_256_CBC_SHA_13          0xc014
+#define OP_PCL_SSL30_AES_256_CBC_SHA_14          0xc019
+#define OP_PCL_SSL30_AES_256_CBC_SHA_15          0xc020
+#define OP_PCL_SSL30_AES_256_CBC_SHA_16          0xc021
+#define OP_PCL_SSL30_AES_256_CBC_SHA_17          0xc022
+
+#define OP_PCL_SSL30_3DES_EDE_CBC_MD5            0x0023
+
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA            0x001f
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_2          0x008b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_3          0x008f
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_4          0x0093
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_5          0x000a
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_6          0x000d
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_7          0x0010
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_8          0x0013
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_9          0x0016
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_10         0x001b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_11         0xc003
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_12         0xc008
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_13         0xc00d
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_14         0xc012
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_15         0xc017
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_16         0xc01a
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_17         0xc01b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_18         0xc01c
+
+#define OP_PCL_SSL30_DES40_CBC_MD5               0x0029
+
+#define OP_PCL_SSL30_DES_CBC_MD5                 0x0022
+
+#define OP_PCL_SSL30_DES40_CBC_SHA               0x0008
+#define OP_PCL_SSL30_DES40_CBC_SHA_2             0x000b
+#define OP_PCL_SSL30_DES40_CBC_SHA_3             0x000e
+#define OP_PCL_SSL30_DES40_CBC_SHA_4             0x0011
+#define OP_PCL_SSL30_DES40_CBC_SHA_5             0x0014
+#define OP_PCL_SSL30_DES40_CBC_SHA_6             0x0019
+#define OP_PCL_SSL30_DES40_CBC_SHA_7             0x0026
+
+#define OP_PCL_SSL30_DES_CBC_SHA                 0x001e
+#define OP_PCL_SSL30_DES_CBC_SHA_2               0x0009
+#define OP_PCL_SSL30_DES_CBC_SHA_3               0x000c
+#define OP_PCL_SSL30_DES_CBC_SHA_4               0x000f
+#define OP_PCL_SSL30_DES_CBC_SHA_5               0x0012
+#define OP_PCL_SSL30_DES_CBC_SHA_6               0x0015
+#define OP_PCL_SSL30_DES_CBC_SHA_7               0x001a
+
+#define OP_PCL_SSL30_RC4_128_MD5                 0x0024
+#define OP_PCL_SSL30_RC4_128_MD5_2               0x0004
+#define OP_PCL_SSL30_RC4_128_MD5_3               0x0018
+
+#define OP_PCL_SSL30_RC4_40_MD5                  0x002b
+#define OP_PCL_SSL30_RC4_40_MD5_2                0x0003
+#define OP_PCL_SSL30_RC4_40_MD5_3                0x0017
+
+#define OP_PCL_SSL30_RC4_128_SHA                 0x0020
+#define OP_PCL_SSL30_RC4_128_SHA_2               0x008a
+#define OP_PCL_SSL30_RC4_128_SHA_3               0x008e
+#define OP_PCL_SSL30_RC4_128_SHA_4               0x0092
+#define OP_PCL_SSL30_RC4_128_SHA_5               0x0005
+#define OP_PCL_SSL30_RC4_128_SHA_6               0xc002
+#define OP_PCL_SSL30_RC4_128_SHA_7               0xc007
+#define OP_PCL_SSL30_RC4_128_SHA_8               0xc00c
+#define OP_PCL_SSL30_RC4_128_SHA_9               0xc011
+#define OP_PCL_SSL30_RC4_128_SHA_10              0xc016
+
+#define OP_PCL_SSL30_RC4_40_SHA                  0x0028
+
+
+/* For TLS 1.0 - OP_PCLID_TLS10 */
+#define OP_PCL_TLS10_AES_128_CBC_SHA             0x002f
+#define OP_PCL_TLS10_AES_128_CBC_SHA_2           0x0030
+#define OP_PCL_TLS10_AES_128_CBC_SHA_3           0x0031
+#define OP_PCL_TLS10_AES_128_CBC_SHA_4           0x0032
+#define OP_PCL_TLS10_AES_128_CBC_SHA_5           0x0033
+#define OP_PCL_TLS10_AES_128_CBC_SHA_6           0x0034
+#define OP_PCL_TLS10_AES_128_CBC_SHA_7           0x008c
+#define OP_PCL_TLS10_AES_128_CBC_SHA_8           0x0090
+#define OP_PCL_TLS10_AES_128_CBC_SHA_9           0x0094
+#define OP_PCL_TLS10_AES_128_CBC_SHA_10          0xc004
+#define OP_PCL_TLS10_AES_128_CBC_SHA_11          0xc009
+#define OP_PCL_TLS10_AES_128_CBC_SHA_12          0xc00e
+#define OP_PCL_TLS10_AES_128_CBC_SHA_13          0xc013
+#define OP_PCL_TLS10_AES_128_CBC_SHA_14          0xc018
+#define OP_PCL_TLS10_AES_128_CBC_SHA_15          0xc01d
+#define OP_PCL_TLS10_AES_128_CBC_SHA_16          0xc01e
+#define OP_PCL_TLS10_AES_128_CBC_SHA_17          0xc01f
+
+#define OP_PCL_TLS10_AES_256_CBC_SHA             0x0035
+#define OP_PCL_TLS10_AES_256_CBC_SHA_2           0x0036
+#define OP_PCL_TLS10_AES_256_CBC_SHA_3           0x0037
+#define OP_PCL_TLS10_AES_256_CBC_SHA_4           0x0038
+#define OP_PCL_TLS10_AES_256_CBC_SHA_5           0x0039
+#define OP_PCL_TLS10_AES_256_CBC_SHA_6           0x003a
+#define OP_PCL_TLS10_AES_256_CBC_SHA_7           0x008d
+#define OP_PCL_TLS10_AES_256_CBC_SHA_8           0x0091
+#define OP_PCL_TLS10_AES_256_CBC_SHA_9           0x0095
+#define OP_PCL_TLS10_AES_256_CBC_SHA_10          0xc005
+#define OP_PCL_TLS10_AES_256_CBC_SHA_11          0xc00a
+#define OP_PCL_TLS10_AES_256_CBC_SHA_12          0xc00f
+#define OP_PCL_TLS10_AES_256_CBC_SHA_13          0xc014
+#define OP_PCL_TLS10_AES_256_CBC_SHA_14          0xc019
+#define OP_PCL_TLS10_AES_256_CBC_SHA_15          0xc020
+#define OP_PCL_TLS10_AES_256_CBC_SHA_16          0xc021
+#define OP_PCL_TLS10_AES_256_CBC_SHA_17          0xc022
+
+/* #define OP_PCL_TLS10_3DES_EDE_CBC_MD5            0x0023 */
+
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA            0x001f
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_2          0x008b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_3          0x008f
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_4          0x0093
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_5          0x000a
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_6          0x000d
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_7          0x0010
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_8          0x0013
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_9          0x0016
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_10         0x001b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_11         0xc003
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_12         0xc008
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_13         0xc00d
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_14         0xc012
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_15         0xc017
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_16         0xc01a
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_17         0xc01b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_18         0xc01c
+
+#define OP_PCL_TLS10_DES40_CBC_MD5               0x0029
+
+#define OP_PCL_TLS10_DES_CBC_MD5                 0x0022
+
+#define OP_PCL_TLS10_DES40_CBC_SHA               0x0008
+#define OP_PCL_TLS10_DES40_CBC_SHA_2             0x000b
+#define OP_PCL_TLS10_DES40_CBC_SHA_3             0x000e
+#define OP_PCL_TLS10_DES40_CBC_SHA_4             0x0011
+#define OP_PCL_TLS10_DES40_CBC_SHA_5             0x0014
+#define OP_PCL_TLS10_DES40_CBC_SHA_6             0x0019
+#define OP_PCL_TLS10_DES40_CBC_SHA_7             0x0026
+
+
+#define OP_PCL_TLS10_DES_CBC_SHA                 0x001e
+#define OP_PCL_TLS10_DES_CBC_SHA_2               0x0009
+#define OP_PCL_TLS10_DES_CBC_SHA_3               0x000c
+#define OP_PCL_TLS10_DES_CBC_SHA_4               0x000f
+#define OP_PCL_TLS10_DES_CBC_SHA_5               0x0012
+#define OP_PCL_TLS10_DES_CBC_SHA_6               0x0015
+#define OP_PCL_TLS10_DES_CBC_SHA_7               0x001a
+
+#define OP_PCL_TLS10_RC4_128_MD5                 0x0024
+#define OP_PCL_TLS10_RC4_128_MD5_2               0x0004
+#define OP_PCL_TLS10_RC4_128_MD5_3               0x0018
+
+#define OP_PCL_TLS10_RC4_40_MD5                  0x002b
+#define OP_PCL_TLS10_RC4_40_MD5_2                0x0003
+#define OP_PCL_TLS10_RC4_40_MD5_3                0x0017
+
+#define OP_PCL_TLS10_RC4_128_SHA                 0x0020
+#define OP_PCL_TLS10_RC4_128_SHA_2               0x008a
+#define OP_PCL_TLS10_RC4_128_SHA_3               0x008e
+#define OP_PCL_TLS10_RC4_128_SHA_4               0x0092
+#define OP_PCL_TLS10_RC4_128_SHA_5               0x0005
+#define OP_PCL_TLS10_RC4_128_SHA_6               0xc002
+#define OP_PCL_TLS10_RC4_128_SHA_7               0xc007
+#define OP_PCL_TLS10_RC4_128_SHA_8               0xc00c
+#define OP_PCL_TLS10_RC4_128_SHA_9               0xc011
+#define OP_PCL_TLS10_RC4_128_SHA_10              0xc016
+
+#define OP_PCL_TLS10_RC4_40_SHA                  0x0028
+
+#define OP_PCL_TLS10_3DES_EDE_CBC_MD5            0xff23
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA160         0xff30
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA224         0xff34
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA256         0xff36
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA384         0xff33
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA512         0xff35
+#define OP_PCL_TLS10_AES_128_CBC_SHA160          0xff80
+#define OP_PCL_TLS10_AES_128_CBC_SHA224          0xff84
+#define OP_PCL_TLS10_AES_128_CBC_SHA256          0xff86
+#define OP_PCL_TLS10_AES_128_CBC_SHA384          0xff83
+#define OP_PCL_TLS10_AES_128_CBC_SHA512          0xff85
+#define OP_PCL_TLS10_AES_192_CBC_SHA160          0xff20
+#define OP_PCL_TLS10_AES_192_CBC_SHA224          0xff24
+#define OP_PCL_TLS10_AES_192_CBC_SHA256          0xff26
+#define OP_PCL_TLS10_AES_192_CBC_SHA384          0xff23
+#define OP_PCL_TLS10_AES_192_CBC_SHA512          0xff25
+#define OP_PCL_TLS10_AES_256_CBC_SHA160          0xff60
+#define OP_PCL_TLS10_AES_256_CBC_SHA224          0xff64
+#define OP_PCL_TLS10_AES_256_CBC_SHA256          0xff66
+#define OP_PCL_TLS10_AES_256_CBC_SHA384          0xff63
+#define OP_PCL_TLS10_AES_256_CBC_SHA512          0xff65
+
+
+
+/* For TLS 1.1 - OP_PCLID_TLS11 */
+#define OP_PCL_TLS11_AES_128_CBC_SHA             0x002f
+#define OP_PCL_TLS11_AES_128_CBC_SHA_2           0x0030
+#define OP_PCL_TLS11_AES_128_CBC_SHA_3           0x0031
+#define OP_PCL_TLS11_AES_128_CBC_SHA_4           0x0032
+#define OP_PCL_TLS11_AES_128_CBC_SHA_5           0x0033
+#define OP_PCL_TLS11_AES_128_CBC_SHA_6           0x0034
+#define OP_PCL_TLS11_AES_128_CBC_SHA_7           0x008c
+#define OP_PCL_TLS11_AES_128_CBC_SHA_8           0x0090
+#define OP_PCL_TLS11_AES_128_CBC_SHA_9           0x0094
+#define OP_PCL_TLS11_AES_128_CBC_SHA_10          0xc004
+#define OP_PCL_TLS11_AES_128_CBC_SHA_11          0xc009
+#define OP_PCL_TLS11_AES_128_CBC_SHA_12          0xc00e
+#define OP_PCL_TLS11_AES_128_CBC_SHA_13          0xc013
+#define OP_PCL_TLS11_AES_128_CBC_SHA_14          0xc018
+#define OP_PCL_TLS11_AES_128_CBC_SHA_15          0xc01d
+#define OP_PCL_TLS11_AES_128_CBC_SHA_16          0xc01e
+#define OP_PCL_TLS11_AES_128_CBC_SHA_17          0xc01f
+
+#define OP_PCL_TLS11_AES_256_CBC_SHA             0x0035
+#define OP_PCL_TLS11_AES_256_CBC_SHA_2           0x0036
+#define OP_PCL_TLS11_AES_256_CBC_SHA_3           0x0037
+#define OP_PCL_TLS11_AES_256_CBC_SHA_4           0x0038
+#define OP_PCL_TLS11_AES_256_CBC_SHA_5           0x0039
+#define OP_PCL_TLS11_AES_256_CBC_SHA_6           0x003a
+#define OP_PCL_TLS11_AES_256_CBC_SHA_7           0x008d
+#define OP_PCL_TLS11_AES_256_CBC_SHA_8           0x0091
+#define OP_PCL_TLS11_AES_256_CBC_SHA_9           0x0095
+#define OP_PCL_TLS11_AES_256_CBC_SHA_10          0xc005
+#define OP_PCL_TLS11_AES_256_CBC_SHA_11          0xc00a
+#define OP_PCL_TLS11_AES_256_CBC_SHA_12          0xc00f
+#define OP_PCL_TLS11_AES_256_CBC_SHA_13          0xc014
+#define OP_PCL_TLS11_AES_256_CBC_SHA_14          0xc019
+#define OP_PCL_TLS11_AES_256_CBC_SHA_15          0xc020
+#define OP_PCL_TLS11_AES_256_CBC_SHA_16          0xc021
+#define OP_PCL_TLS11_AES_256_CBC_SHA_17          0xc022
+
+/* #define OP_PCL_TLS11_3DES_EDE_CBC_MD5            0x0023 */
+
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA            0x001f
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_2          0x008b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_3          0x008f
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_4          0x0093
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_5          0x000a
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_6          0x000d
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_7          0x0010
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_8          0x0013
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_9          0x0016
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_10         0x001b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_11         0xc003
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_12         0xc008
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_13         0xc00d
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_14         0xc012
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_15         0xc017
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_16         0xc01a
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_17         0xc01b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_18         0xc01c
+
+#define OP_PCL_TLS11_DES40_CBC_MD5               0x0029
+
+#define OP_PCL_TLS11_DES_CBC_MD5                 0x0022
+
+#define OP_PCL_TLS11_DES40_CBC_SHA               0x0008
+#define OP_PCL_TLS11_DES40_CBC_SHA_2             0x000b
+#define OP_PCL_TLS11_DES40_CBC_SHA_3             0x000e
+#define OP_PCL_TLS11_DES40_CBC_SHA_4             0x0011
+#define OP_PCL_TLS11_DES40_CBC_SHA_5             0x0014
+#define OP_PCL_TLS11_DES40_CBC_SHA_6             0x0019
+#define OP_PCL_TLS11_DES40_CBC_SHA_7             0x0026
+
+#define OP_PCL_TLS11_DES_CBC_SHA                 0x001e
+#define OP_PCL_TLS11_DES_CBC_SHA_2               0x0009
+#define OP_PCL_TLS11_DES_CBC_SHA_3               0x000c
+#define OP_PCL_TLS11_DES_CBC_SHA_4               0x000f
+#define OP_PCL_TLS11_DES_CBC_SHA_5               0x0012
+#define OP_PCL_TLS11_DES_CBC_SHA_6               0x0015
+#define OP_PCL_TLS11_DES_CBC_SHA_7               0x001a
+
+#define OP_PCL_TLS11_RC4_128_MD5                 0x0024
+#define OP_PCL_TLS11_RC4_128_MD5_2               0x0004
+#define OP_PCL_TLS11_RC4_128_MD5_3               0x0018
+
+#define OP_PCL_TLS11_RC4_40_MD5                  0x002b
+#define OP_PCL_TLS11_RC4_40_MD5_2                0x0003
+#define OP_PCL_TLS11_RC4_40_MD5_3                0x0017
+
+#define OP_PCL_TLS11_RC4_128_SHA                 0x0020
+#define OP_PCL_TLS11_RC4_128_SHA_2               0x008a
+#define OP_PCL_TLS11_RC4_128_SHA_3               0x008e
+#define OP_PCL_TLS11_RC4_128_SHA_4               0x0092
+#define OP_PCL_TLS11_RC4_128_SHA_5               0x0005
+#define OP_PCL_TLS11_RC4_128_SHA_6               0xc002
+#define OP_PCL_TLS11_RC4_128_SHA_7               0xc007
+#define OP_PCL_TLS11_RC4_128_SHA_8               0xc00c
+#define OP_PCL_TLS11_RC4_128_SHA_9               0xc011
+#define OP_PCL_TLS11_RC4_128_SHA_10              0xc016
+
+#define OP_PCL_TLS11_RC4_40_SHA                  0x0028
+
+#define OP_PCL_TLS11_3DES_EDE_CBC_MD5            0xff23
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA160         0xff30
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA224         0xff34
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA256         0xff36
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA384         0xff33
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA512         0xff35
+#define OP_PCL_TLS11_AES_128_CBC_SHA160          0xff80
+#define OP_PCL_TLS11_AES_128_CBC_SHA224          0xff84
+#define OP_PCL_TLS11_AES_128_CBC_SHA256          0xff86
+#define OP_PCL_TLS11_AES_128_CBC_SHA384          0xff83
+#define OP_PCL_TLS11_AES_128_CBC_SHA512          0xff85
+#define OP_PCL_TLS11_AES_192_CBC_SHA160          0xff20
+#define OP_PCL_TLS11_AES_192_CBC_SHA224          0xff24
+#define OP_PCL_TLS11_AES_192_CBC_SHA256          0xff26
+#define OP_PCL_TLS11_AES_192_CBC_SHA384          0xff23
+#define OP_PCL_TLS11_AES_192_CBC_SHA512          0xff25
+#define OP_PCL_TLS11_AES_256_CBC_SHA160          0xff60
+#define OP_PCL_TLS11_AES_256_CBC_SHA224          0xff64
+#define OP_PCL_TLS11_AES_256_CBC_SHA256          0xff66
+#define OP_PCL_TLS11_AES_256_CBC_SHA384          0xff63
+#define OP_PCL_TLS11_AES_256_CBC_SHA512          0xff65
+
+
+/* For TLS 1.2 - OP_PCLID_TLS12 */
+#define OP_PCL_TLS12_AES_128_CBC_SHA             0x002f
+#define OP_PCL_TLS12_AES_128_CBC_SHA_2           0x0030
+#define OP_PCL_TLS12_AES_128_CBC_SHA_3           0x0031
+#define OP_PCL_TLS12_AES_128_CBC_SHA_4           0x0032
+#define OP_PCL_TLS12_AES_128_CBC_SHA_5           0x0033
+#define OP_PCL_TLS12_AES_128_CBC_SHA_6           0x0034
+#define OP_PCL_TLS12_AES_128_CBC_SHA_7           0x008c
+#define OP_PCL_TLS12_AES_128_CBC_SHA_8           0x0090
+#define OP_PCL_TLS12_AES_128_CBC_SHA_9           0x0094
+#define OP_PCL_TLS12_AES_128_CBC_SHA_10          0xc004
+#define OP_PCL_TLS12_AES_128_CBC_SHA_11          0xc009
+#define OP_PCL_TLS12_AES_128_CBC_SHA_12          0xc00e
+#define OP_PCL_TLS12_AES_128_CBC_SHA_13          0xc013
+#define OP_PCL_TLS12_AES_128_CBC_SHA_14          0xc018
+#define OP_PCL_TLS12_AES_128_CBC_SHA_15          0xc01d
+#define OP_PCL_TLS12_AES_128_CBC_SHA_16          0xc01e
+#define OP_PCL_TLS12_AES_128_CBC_SHA_17          0xc01f
+
+#define OP_PCL_TLS12_AES_256_CBC_SHA             0x0035
+#define OP_PCL_TLS12_AES_256_CBC_SHA_2           0x0036
+#define OP_PCL_TLS12_AES_256_CBC_SHA_3           0x0037
+#define OP_PCL_TLS12_AES_256_CBC_SHA_4           0x0038
+#define OP_PCL_TLS12_AES_256_CBC_SHA_5           0x0039
+#define OP_PCL_TLS12_AES_256_CBC_SHA_6           0x003a
+#define OP_PCL_TLS12_AES_256_CBC_SHA_7           0x008d
+#define OP_PCL_TLS12_AES_256_CBC_SHA_8           0x0091
+#define OP_PCL_TLS12_AES_256_CBC_SHA_9           0x0095
+#define OP_PCL_TLS12_AES_256_CBC_SHA_10          0xc005
+#define OP_PCL_TLS12_AES_256_CBC_SHA_11          0xc00a
+#define OP_PCL_TLS12_AES_256_CBC_SHA_12          0xc00f
+#define OP_PCL_TLS12_AES_256_CBC_SHA_13          0xc014
+#define OP_PCL_TLS12_AES_256_CBC_SHA_14          0xc019
+#define OP_PCL_TLS12_AES_256_CBC_SHA_15          0xc020
+#define OP_PCL_TLS12_AES_256_CBC_SHA_16          0xc021
+#define OP_PCL_TLS12_AES_256_CBC_SHA_17          0xc022
+
+/* #define OP_PCL_TLS12_3DES_EDE_CBC_MD5            0x0023 */
+
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA            0x001f
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_2          0x008b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_3          0x008f
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_4          0x0093
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_5          0x000a
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_6          0x000d
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_7          0x0010
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_8          0x0013
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_9          0x0016
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_10         0x001b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_11         0xc003
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_12         0xc008
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_13         0xc00d
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_14         0xc012
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_15         0xc017
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_16         0xc01a
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_17         0xc01b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_18         0xc01c
+
+#define OP_PCL_TLS12_DES40_CBC_MD5               0x0029
+
+#define OP_PCL_TLS12_DES_CBC_MD5                 0x0022
+
+#define OP_PCL_TLS12_DES40_CBC_SHA               0x0008
+#define OP_PCL_TLS12_DES40_CBC_SHA_2             0x000b
+#define OP_PCL_TLS12_DES40_CBC_SHA_3             0x000e
+#define OP_PCL_TLS12_DES40_CBC_SHA_4             0x0011
+#define OP_PCL_TLS12_DES40_CBC_SHA_5             0x0014
+#define OP_PCL_TLS12_DES40_CBC_SHA_6             0x0019
+#define OP_PCL_TLS12_DES40_CBC_SHA_7             0x0026
+
+#define OP_PCL_TLS12_DES_CBC_SHA                 0x001e
+#define OP_PCL_TLS12_DES_CBC_SHA_2               0x0009
+#define OP_PCL_TLS12_DES_CBC_SHA_3               0x000c
+#define OP_PCL_TLS12_DES_CBC_SHA_4               0x000f
+#define OP_PCL_TLS12_DES_CBC_SHA_5               0x0012
+#define OP_PCL_TLS12_DES_CBC_SHA_6               0x0015
+#define OP_PCL_TLS12_DES_CBC_SHA_7               0x001a
+
+#define OP_PCL_TLS12_RC4_128_MD5                 0x0024
+#define OP_PCL_TLS12_RC4_128_MD5_2               0x0004
+#define OP_PCL_TLS12_RC4_128_MD5_3               0x0018
+
+#define OP_PCL_TLS12_RC4_40_MD5                  0x002b
+#define OP_PCL_TLS12_RC4_40_MD5_2                0x0003
+#define OP_PCL_TLS12_RC4_40_MD5_3                0x0017
+
+#define OP_PCL_TLS12_RC4_128_SHA                 0x0020
+#define OP_PCL_TLS12_RC4_128_SHA_2               0x008a
+#define OP_PCL_TLS12_RC4_128_SHA_3               0x008e
+#define OP_PCL_TLS12_RC4_128_SHA_4               0x0092
+#define OP_PCL_TLS12_RC4_128_SHA_5               0x0005
+#define OP_PCL_TLS12_RC4_128_SHA_6               0xc002
+#define OP_PCL_TLS12_RC4_128_SHA_7               0xc007
+#define OP_PCL_TLS12_RC4_128_SHA_8               0xc00c
+#define OP_PCL_TLS12_RC4_128_SHA_9               0xc011
+#define OP_PCL_TLS12_RC4_128_SHA_10              0xc016
+
+#define OP_PCL_TLS12_RC4_40_SHA                  0x0028
+
+/* #define OP_PCL_TLS12_AES_128_CBC_SHA256          0x003c */
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_2        0x003e
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_3        0x003f
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_4        0x0040
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_5        0x0067
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_6        0x006c
+
+/* #define OP_PCL_TLS12_AES_256_CBC_SHA256          0x003d */
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_2        0x0068
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_3        0x0069
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_4        0x006a
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_5        0x006b
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_6        0x006d
+
+/* AEAD_AES_xxx_CCM/GCM remain to be defined... */
+
+#define OP_PCL_TLS12_3DES_EDE_CBC_MD5            0xff23
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA160         0xff30
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA224         0xff34
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA256         0xff36
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA384         0xff33
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA512         0xff35
+#define OP_PCL_TLS12_AES_128_CBC_SHA160          0xff80
+#define OP_PCL_TLS12_AES_128_CBC_SHA224          0xff84
+#define OP_PCL_TLS12_AES_128_CBC_SHA256          0xff86
+#define OP_PCL_TLS12_AES_128_CBC_SHA384          0xff83
+#define OP_PCL_TLS12_AES_128_CBC_SHA512          0xff85
+#define OP_PCL_TLS12_AES_192_CBC_SHA160          0xff20
+#define OP_PCL_TLS12_AES_192_CBC_SHA224          0xff24
+#define OP_PCL_TLS12_AES_192_CBC_SHA256          0xff26
+#define OP_PCL_TLS12_AES_192_CBC_SHA384          0xff23
+#define OP_PCL_TLS12_AES_192_CBC_SHA512          0xff25
+#define OP_PCL_TLS12_AES_256_CBC_SHA160          0xff60
+#define OP_PCL_TLS12_AES_256_CBC_SHA224          0xff64
+#define OP_PCL_TLS12_AES_256_CBC_SHA256          0xff66
+#define OP_PCL_TLS12_AES_256_CBC_SHA384          0xff63
+#define OP_PCL_TLS12_AES_256_CBC_SHA512          0xff65
+
+/* For DTLS - OP_PCLID_DTLS */
+
+#define OP_PCL_DTLS_AES_128_CBC_SHA              0x002f
+#define OP_PCL_DTLS_AES_128_CBC_SHA_2            0x0030
+#define OP_PCL_DTLS_AES_128_CBC_SHA_3            0x0031
+#define OP_PCL_DTLS_AES_128_CBC_SHA_4            0x0032
+#define OP_PCL_DTLS_AES_128_CBC_SHA_5            0x0033
+#define OP_PCL_DTLS_AES_128_CBC_SHA_6            0x0034
+#define OP_PCL_DTLS_AES_128_CBC_SHA_7            0x008c
+#define OP_PCL_DTLS_AES_128_CBC_SHA_8            0x0090
+#define OP_PCL_DTLS_AES_128_CBC_SHA_9            0x0094
+#define OP_PCL_DTLS_AES_128_CBC_SHA_10           0xc004
+#define OP_PCL_DTLS_AES_128_CBC_SHA_11           0xc009
+#define OP_PCL_DTLS_AES_128_CBC_SHA_12           0xc00e
+#define OP_PCL_DTLS_AES_128_CBC_SHA_13           0xc013
+#define OP_PCL_DTLS_AES_128_CBC_SHA_14           0xc018
+#define OP_PCL_DTLS_AES_128_CBC_SHA_15           0xc01d
+#define OP_PCL_DTLS_AES_128_CBC_SHA_16           0xc01e
+#define OP_PCL_DTLS_AES_128_CBC_SHA_17           0xc01f
+
+#define OP_PCL_DTLS_AES_256_CBC_SHA              0x0035
+#define OP_PCL_DTLS_AES_256_CBC_SHA_2            0x0036
+#define OP_PCL_DTLS_AES_256_CBC_SHA_3            0x0037
+#define OP_PCL_DTLS_AES_256_CBC_SHA_4            0x0038
+#define OP_PCL_DTLS_AES_256_CBC_SHA_5            0x0039
+#define OP_PCL_DTLS_AES_256_CBC_SHA_6            0x003a
+#define OP_PCL_DTLS_AES_256_CBC_SHA_7            0x008d
+#define OP_PCL_DTLS_AES_256_CBC_SHA_8            0x0091
+#define OP_PCL_DTLS_AES_256_CBC_SHA_9            0x0095
+#define OP_PCL_DTLS_AES_256_CBC_SHA_10           0xc005
+#define OP_PCL_DTLS_AES_256_CBC_SHA_11           0xc00a
+#define OP_PCL_DTLS_AES_256_CBC_SHA_12           0xc00f
+#define OP_PCL_DTLS_AES_256_CBC_SHA_13           0xc014
+#define OP_PCL_DTLS_AES_256_CBC_SHA_14           0xc019
+#define OP_PCL_DTLS_AES_256_CBC_SHA_15           0xc020
+#define OP_PCL_DTLS_AES_256_CBC_SHA_16           0xc021
+#define OP_PCL_DTLS_AES_256_CBC_SHA_17           0xc022
+
+/* #define OP_PCL_DTLS_3DES_EDE_CBC_MD5             0x0023 */
+
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA             0x001f
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_2           0x008b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_3           0x008f
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_4           0x0093
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_5           0x000a
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_6           0x000d
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_7           0x0010
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_8           0x0013
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_9           0x0016
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_10          0x001b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_11          0xc003
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_12          0xc008
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_13          0xc00d
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_14          0xc012
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_15          0xc017
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_16          0xc01a
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_17          0xc01b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_18          0xc01c
+
+#define OP_PCL_DTLS_DES40_CBC_MD5                0x0029
+
+#define OP_PCL_DTLS_DES_CBC_MD5                  0x0022
+
+#define OP_PCL_DTLS_DES40_CBC_SHA                0x0008
+#define OP_PCL_DTLS_DES40_CBC_SHA_2              0x000b
+#define OP_PCL_DTLS_DES40_CBC_SHA_3              0x000e
+#define OP_PCL_DTLS_DES40_CBC_SHA_4              0x0011
+#define OP_PCL_DTLS_DES40_CBC_SHA_5              0x0014
+#define OP_PCL_DTLS_DES40_CBC_SHA_6              0x0019
+#define OP_PCL_DTLS_DES40_CBC_SHA_7              0x0026
+
+
+#define OP_PCL_DTLS_DES_CBC_SHA                  0x001e
+#define OP_PCL_DTLS_DES_CBC_SHA_2                0x0009
+#define OP_PCL_DTLS_DES_CBC_SHA_3                0x000c
+#define OP_PCL_DTLS_DES_CBC_SHA_4                0x000f
+#define OP_PCL_DTLS_DES_CBC_SHA_5                0x0012
+#define OP_PCL_DTLS_DES_CBC_SHA_6                0x0015
+#define OP_PCL_DTLS_DES_CBC_SHA_7                0x001a
+
+
+#define OP_PCL_DTLS_3DES_EDE_CBC_MD5             0xff23
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA160          0xff30
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA224          0xff34
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA256          0xff36
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA384          0xff33
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA512          0xff35
+#define OP_PCL_DTLS_AES_128_CBC_SHA160           0xff80
+#define OP_PCL_DTLS_AES_128_CBC_SHA224           0xff84
+#define OP_PCL_DTLS_AES_128_CBC_SHA256           0xff86
+#define OP_PCL_DTLS_AES_128_CBC_SHA384           0xff83
+#define OP_PCL_DTLS_AES_128_CBC_SHA512           0xff85
+#define OP_PCL_DTLS_AES_192_CBC_SHA160           0xff20
+#define OP_PCL_DTLS_AES_192_CBC_SHA224           0xff24
+#define OP_PCL_DTLS_AES_192_CBC_SHA256           0xff26
+#define OP_PCL_DTLS_AES_192_CBC_SHA384           0xff23
+#define OP_PCL_DTLS_AES_192_CBC_SHA512           0xff25
+#define OP_PCL_DTLS_AES_256_CBC_SHA160           0xff60
+#define OP_PCL_DTLS_AES_256_CBC_SHA224           0xff64
+#define OP_PCL_DTLS_AES_256_CBC_SHA256           0xff66
+#define OP_PCL_DTLS_AES_256_CBC_SHA384           0xff63
+#define OP_PCL_DTLS_AES_256_CBC_SHA512           0xff65
+
+/* 802.16 WiMAX protinfos */
+#define OP_PCL_WIMAX_OFDM                        0x0201
+#define OP_PCL_WIMAX_OFDMA                       0x0231
+
+/* 802.11 WiFi protinfos */
+#define OP_PCL_WIFI                              0xac04
+
+/* MacSec protinfos */
+#define OP_PCL_MACSEC                            0x0001
+
+/* PKI unidirectional protocol protinfo bits */
+#define OP_PCL_PKPROT_TEST                       0x0008
+#define OP_PCL_PKPROT_DECRYPT                    0x0004
+#define OP_PCL_PKPROT_ECC                        0x0002
+#define OP_PCL_PKPROT_F2M                        0x0001
+
+/* For non-protocol/alg-only op commands */
+#define OP_ALG_TYPE_SHIFT      24
+#define OP_ALG_TYPE_MASK       (0x7 << OP_ALG_TYPE_SHIFT)
+#define OP_ALG_TYPE_CLASS1     2
+#define OP_ALG_TYPE_CLASS2     4
+
+#define OP_ALG_ALGSEL_SHIFT    16
+#define OP_ALG_ALGSEL_MASK     (0xff << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SUBMASK  (0x0f << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_AES      (0x10 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_DES      (0x20 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_3DES     (0x21 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_ARC4     (0x30 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_MD5      (0x40 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA1     (0x41 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA224   (0x42 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA256   (0x43 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA384   (0x44 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA512   (0x45 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_RNG      (0x50 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SNOW     (0x60 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SNOW_F8  (0x60 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_KASUMI   (0x70 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_CRC      (0x90 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SNOW_F9  (0xA0 << OP_ALG_ALGSEL_SHIFT)
+
+#define OP_ALG_AAI_SHIFT       4
+#define OP_ALG_AAI_MASK                (0x1ff << OP_ALG_AAI_SHIFT)
+
+/* blockcipher AAI set */
+#define OP_ALG_AAI_CTR_MOD128  (0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD8    (0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD16   (0x02 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD24   (0x03 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD32   (0x04 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD40   (0x05 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD48   (0x06 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD56   (0x07 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD64   (0x08 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD72   (0x09 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD80   (0x0a << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD88   (0x0b << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD96   (0x0c << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD104  (0x0d << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD112  (0x0e << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD120  (0x0f << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CBC         (0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_ECB         (0x20 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CFB         (0x30 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_OFB         (0x40 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_XTS         (0x50 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CMAC                (0x60 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_XCBC_MAC    (0x70 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CCM         (0x80 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_GCM         (0x90 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CBC_XCBCMAC (0xa0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_XCBCMAC (0xb0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CHECKODD    (0x80 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DK          (0x100 << OP_ALG_AAI_SHIFT)
+
+/* randomizer AAI set */
+#define OP_ALG_AAI_RNG         (0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG_NOZERO  (0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG_ODD     (0x20 << OP_ALG_AAI_SHIFT)
+
+/* hmac/smac AAI set */
+#define OP_ALG_AAI_HASH                (0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_HMAC                (0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_SMAC                (0x02 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_HMAC_PRECOMP        (0x04 << OP_ALG_AAI_SHIFT)
+
+/* CRC AAI set*/
+#define OP_ALG_AAI_802         (0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_3385                (0x02 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CUST_POLY   (0x04 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DIS         (0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DOS         (0x20 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DOC         (0x40 << OP_ALG_AAI_SHIFT)
+
+/* Kasumi/SNOW AAI set */
+#define OP_ALG_AAI_F8          (0xc0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_F9          (0xc8 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_GSM         (0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_EDGE                (0x20 << OP_ALG_AAI_SHIFT)
+
+
+#define OP_ALG_AS_SHIFT                2
+#define OP_ALG_AS_MASK         (0x3 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_UPDATE       (0 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_INIT         (1 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_FINALIZE     (2 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_INITFINAL    (3 << OP_ALG_AS_SHIFT)
+
+#define OP_ALG_ICV_SHIFT       1
+#define OP_ALG_ICV_MASK                (1 << OP_ALG_ICV_SHIFT)
+#define OP_ALG_ICV_OFF         (0 << OP_ALG_ICV_SHIFT)
+#define OP_ALG_ICV_ON          (1 << OP_ALG_ICV_SHIFT)
+
+#define OP_ALG_DIR_SHIFT       0
+#define OP_ALG_DIR_MASK                1
+#define OP_ALG_DECRYPT         0
+#define OP_ALG_ENCRYPT         1
+
+/* PKHA algorithm type set */
+#define OP_ALG_PK                    0x00800000
+#define OP_ALG_PK_FUN_MASK           0x3f /* clrmem, modmath, or cpymem */
+
+/* PKHA mode clear memory functions */
+#define OP_ALG_PKMODE_A_RAM          0x80000
+#define OP_ALG_PKMODE_B_RAM          0x40000
+#define OP_ALG_PKMODE_E_RAM          0x20000
+#define OP_ALG_PKMODE_N_RAM          0x10000
+#define OP_ALG_PKMODE_CLEARMEM       0x00001
+
+/* PKHA mode modular-arithmetic functions */
+#define OP_ALG_PKMODE_MOD_IN_MONTY   0x80000
+#define OP_ALG_PKMODE_MOD_OUT_MONTY  0x40000
+#define OP_ALG_PKMODE_MOD_F2M        0x20000
+#define OP_ALG_PKMODE_MOD_R2_IN      0x10000
+#define OP_ALG_PKMODE_PRJECTV        0x00800
+#define OP_ALG_PKMODE_TIME_EQ        0x400
+#define OP_ALG_PKMODE_OUT_B          0x000
+#define OP_ALG_PKMODE_OUT_A          0x100
+#define OP_ALG_PKMODE_MOD_ADD        0x002
+#define OP_ALG_PKMODE_MOD_SUB_AB     0x003
+#define OP_ALG_PKMODE_MOD_SUB_BA     0x004
+#define OP_ALG_PKMODE_MOD_MULT       0x005
+#define OP_ALG_PKMODE_MOD_EXPO       0x006
+#define OP_ALG_PKMODE_MOD_REDUCT     0x007
+#define OP_ALG_PKMODE_MOD_INV        0x008
+#define OP_ALG_PKMODE_MOD_ECC_ADD    0x009
+#define OP_ALG_PKMODE_MOD_ECC_DBL    0x00a
+#define OP_ALG_PKMODE_MOD_ECC_MULT   0x00b
+#define OP_ALG_PKMODE_MOD_MONT_CNST  0x00c
+#define OP_ALG_PKMODE_MOD_CRT_CNST   0x00d
+#define OP_ALG_PKMODE_MOD_GCD        0x00e
+#define OP_ALG_PKMODE_MOD_PRIMALITY  0x00f
+
+/* PKHA mode copy-memory functions */
+#define OP_ALG_PKMODE_SRC_REG_SHIFT  13
+#define OP_ALG_PKMODE_SRC_REG_MASK   (7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_SHIFT  10
+#define OP_ALG_PKMODE_DST_REG_MASK   (7 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_SHIFT  8
+#define OP_ALG_PKMODE_SRC_SEG_MASK   (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_SHIFT  6
+#define OP_ALG_PKMODE_DST_SEG_MASK   (3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+
+#define OP_ALG_PKMODE_SRC_REG_A      (0 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_REG_B      (1 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_REG_N      (3 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_A      (0 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_B      (1 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_E      (2 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_N      (3 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_0      (0 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_1      (1 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_2      (2 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_3      (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_0      (0 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_1      (1 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_2      (2 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_3      (3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_CPYMEM_N_SZ    0x80
+#define OP_ALG_PKMODE_CPYMEM_SRC_SZ  0x81
+
+/*
+ * SEQ_IN_PTR Command Constructs
+ */
+
+/* Release Buffers */
+#define SQIN_RBS               0x04000000
+
+/* Sequence pointer is really a descriptor */
+#define SQIN_INL               0x02000000
+
+/* Sequence pointer is a scatter-gather table */
+#define SQIN_SGF               0x01000000
+
+/* Appends to a previous pointer */
+#define SQIN_PRE               0x00800000
+
+/* Use extended length following pointer */
+#define SQIN_EXT               0x00400000
+
+/* Restore sequence with pointer/length */
+#define SQIN_RTO               0x00200000
+
+/* Replace job descriptor */
+#define SQIN_RJD               0x00100000
+
+#define SQIN_LEN_SHIFT           0
+#define SQIN_LEN_MASK           (0xffff << SQIN_LEN_SHIFT)
+
+/*
+ * SEQ_OUT_PTR Command Constructs
+ */
+
+/* Sequence pointer is a scatter-gather table */
+#define SQOUT_SGF              0x01000000
+
+/* Appends to a previous pointer */
+#define SQOUT_PRE              0x00800000
+
+/* Restore sequence with pointer/length */
+#define SQOUT_RTO              0x00200000
+
+/* Use extended length following pointer */
+#define SQOUT_EXT              0x00400000
+
+#define SQOUT_LEN_SHIFT           0
+#define SQOUT_LEN_MASK           (0xffff << SQOUT_LEN_SHIFT)
+
+
+/*
+ * SIGNATURE Command Constructs
+ */
+
+/* TYPE field is all that's relevant */
+#define SIGN_TYPE_SHIFT         16
+#define SIGN_TYPE_MASK          (0x0f << SIGN_TYPE_SHIFT)
+
+#define SIGN_TYPE_FINAL         (0x00 << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_FINAL_RESTORE (0x01 << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_FINAL_NONZERO (0x02 << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_2         (0x0a << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_3         (0x0b << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_4         (0x0c << SIGN_TYPE_SHIFT)
+
+/*
+ * MOVE Command Constructs
+ */
+
+#define MOVE_AUX_SHIFT          25
+#define MOVE_AUX_MASK           (3 << MOVE_AUX_SHIFT)
+#define MOVE_AUX_MS             (2 << MOVE_AUX_SHIFT)
+#define MOVE_AUX_LS             (1 << MOVE_AUX_SHIFT)
+
+#define MOVE_WAITCOMP_SHIFT     24
+#define MOVE_WAITCOMP_MASK      (1 << MOVE_WAITCOMP_SHIFT)
+#define MOVE_WAITCOMP           (1 << MOVE_WAITCOMP_SHIFT)
+
+#define MOVE_SRC_SHIFT          20
+#define MOVE_SRC_MASK           (0x0f << MOVE_SRC_SHIFT)
+#define MOVE_SRC_CLASS1CTX      (0x00 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_CLASS2CTX      (0x01 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_OUTFIFO        (0x02 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_DESCBUF        (0x03 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH0          (0x04 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH1          (0x05 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH2          (0x06 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH3          (0x07 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_INFIFO         (0x08 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_INFIFO_CL      (0x09 << MOVE_SRC_SHIFT)
+
+#define MOVE_DEST_SHIFT         16
+#define MOVE_DEST_MASK          (0x0f << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1CTX     (0x00 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2CTX     (0x01 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_OUTFIFO       (0x02 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_DESCBUF       (0x03 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH0         (0x04 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH1         (0x05 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH2         (0x06 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH3         (0x07 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1INFIFO  (0x08 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2INFIFO  (0x09 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_PK_A          (0x0c << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1KEY     (0x0d << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2KEY     (0x0e << MOVE_DEST_SHIFT)
+
+#define MOVE_OFFSET_SHIFT       8
+#define MOVE_OFFSET_MASK        (0xff << MOVE_OFFSET_SHIFT)
+
+#define MOVE_LEN_SHIFT          0
+#define MOVE_LEN_MASK           (0xff << MOVE_LEN_SHIFT)
+
+#define MOVELEN_MRSEL_SHIFT     0
+#define MOVELEN_MRSEL_MASK      (0x3 << MOVE_LEN_SHIFT)
+
+/*
+ * MATH Command Constructs
+ */
+
+#define MATH_IFB_SHIFT          26
+#define MATH_IFB_MASK           (1 << MATH_IFB_SHIFT)
+#define MATH_IFB                (1 << MATH_IFB_SHIFT)
+
+#define MATH_NFU_SHIFT          25
+#define MATH_NFU_MASK           (1 << MATH_NFU_SHIFT)
+#define MATH_NFU                (1 << MATH_NFU_SHIFT)
+
+#define MATH_STL_SHIFT          24
+#define MATH_STL_MASK           (1 << MATH_STL_SHIFT)
+#define MATH_STL                (1 << MATH_STL_SHIFT)
+
+/* Function selectors */
+#define MATH_FUN_SHIFT          20
+#define MATH_FUN_MASK           (0x0f << MATH_FUN_SHIFT)
+#define MATH_FUN_ADD            (0x00 << MATH_FUN_SHIFT)
+#define MATH_FUN_ADDC           (0x01 << MATH_FUN_SHIFT)
+#define MATH_FUN_SUB            (0x02 << MATH_FUN_SHIFT)
+#define MATH_FUN_SUBB           (0x03 << MATH_FUN_SHIFT)
+#define MATH_FUN_OR             (0x04 << MATH_FUN_SHIFT)
+#define MATH_FUN_AND            (0x05 << MATH_FUN_SHIFT)
+#define MATH_FUN_XOR            (0x06 << MATH_FUN_SHIFT)
+#define MATH_FUN_LSHIFT         (0x07 << MATH_FUN_SHIFT)
+#define MATH_FUN_RSHIFT         (0x08 << MATH_FUN_SHIFT)
+#define MATH_FUN_SHLD           (0x09 << MATH_FUN_SHIFT)
+#define MATH_FUN_ZBYT           (0x0a << MATH_FUN_SHIFT)
+
+/* Source 0 selectors */
+#define MATH_SRC0_SHIFT         16
+#define MATH_SRC0_MASK          (0x0f << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG0          (0x00 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG1          (0x01 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG2          (0x02 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG3          (0x03 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_IMM           (0x04 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_SEQINLEN      (0x08 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_SEQOUTLEN     (0x09 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_VARSEQINLEN   (0x0a << MATH_SRC0_SHIFT)
+#define MATH_SRC0_VARSEQOUTLEN  (0x0b << MATH_SRC0_SHIFT)
+#define MATH_SRC0_ZERO          (0x0c << MATH_SRC0_SHIFT)
+
+/* Source 1 selectors */
+#define MATH_SRC1_SHIFT         12
+#define MATH_SRC1_MASK          (0x0f << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG0          (0x00 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG1          (0x01 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG2          (0x02 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG3          (0x03 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_IMM           (0x04 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_INFIFO        (0x0a << MATH_SRC1_SHIFT)
+#define MATH_SRC1_OUTFIFO       (0x0b << MATH_SRC1_SHIFT)
+#define MATH_SRC1_ONE           (0x0c << MATH_SRC1_SHIFT)
+
+/* Destination selectors */
+#define MATH_DEST_SHIFT         8
+#define MATH_DEST_MASK          (0x0f << MATH_DEST_SHIFT)
+#define MATH_DEST_REG0          (0x00 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG1          (0x01 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG2          (0x02 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG3          (0x03 << MATH_DEST_SHIFT)
+#define MATH_DEST_SEQINLEN      (0x08 << MATH_DEST_SHIFT)
+#define MATH_DEST_SEQOUTLEN     (0x09 << MATH_DEST_SHIFT)
+#define MATH_DEST_VARSEQINLEN   (0x0a << MATH_DEST_SHIFT)
+#define MATH_DEST_VARSEQOUTLEN  (0x0b << MATH_DEST_SHIFT)
+#define MATH_DEST_NONE          (0x0f << MATH_DEST_SHIFT)
+
+/* Length selectors */
+#define MATH_LEN_SHIFT          0
+#define MATH_LEN_MASK           (0x0f << MATH_LEN_SHIFT)
+#define MATH_LEN_1BYTE          0x01
+#define MATH_LEN_2BYTE          0x02
+#define MATH_LEN_4BYTE          0x04
+#define MATH_LEN_8BYTE          0x08
+
+/*
+ * JUMP Command Constructs
+ */
+
+#define JUMP_CLASS_SHIFT        25
+#define JUMP_CLASS_MASK                (3 << JUMP_CLASS_SHIFT)
+#define JUMP_CLASS_NONE                0
+#define JUMP_CLASS_CLASS1      (1 << JUMP_CLASS_SHIFT)
+#define JUMP_CLASS_CLASS2      (2 << JUMP_CLASS_SHIFT)
+#define JUMP_CLASS_BOTH                (3 << JUMP_CLASS_SHIFT)
+
+#define JUMP_JSL_SHIFT          24
+#define JUMP_JSL_MASK           (1 << JUMP_JSL_SHIFT)
+#define JUMP_JSL                (1 << JUMP_JSL_SHIFT)
+
+#define JUMP_TYPE_SHIFT         22
+#define JUMP_TYPE_MASK          (0x03 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_LOCAL         (0x00 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_NONLOCAL      (0x01 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_HALT          (0x02 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_HALT_USER     (0x03 << JUMP_TYPE_SHIFT)
+
+#define JUMP_TEST_SHIFT         16
+#define JUMP_TEST_MASK          (0x03 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_ALL           (0x00 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_INVALL        (0x01 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_ANY           (0x02 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_INVANY        (0x03 << JUMP_TEST_SHIFT)
+
+/* Condition codes. JSL bit is factored in */
+#define JUMP_COND_SHIFT         8
+#define JUMP_COND_MASK          (0x100ff << JUMP_COND_SHIFT)
+#define JUMP_COND_PK_0          (0x80 << JUMP_COND_SHIFT)
+#define JUMP_COND_PK_GCD_1      (0x40 << JUMP_COND_SHIFT)
+#define JUMP_COND_PK_PRIME      (0x20 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_N        (0x08 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_Z        (0x04 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_C        (0x02 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_NV       (0x01 << JUMP_COND_SHIFT)
+
+#define JUMP_COND_JRP           ((0x80 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_SHRD          ((0x40 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_SELF          ((0x20 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_CALM          ((0x10 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NIP           ((0x08 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NIFP          ((0x04 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NOP           ((0x02 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NCP           ((0x01 << JUMP_COND_SHIFT) | JUMP_JSL)
+
+#define JUMP_OFFSET_SHIFT       0
+#define JUMP_OFFSET_MASK        (0xff << JUMP_OFFSET_SHIFT)
+
+/*
+ * NFIFO ENTRY
+ * Data Constructs
+ *
+ */
+#define NFIFOENTRY_DEST_SHIFT  30
+#define NFIFOENTRY_DEST_MASK   (3 << NFIFOENTRY_DEST_SHIFT)
+#define NFIFOENTRY_DEST_DECO   (0 << NFIFOENTRY_DEST_SHIFT)
+#define NFIFOENTRY_DEST_CLASS1 (1 << NFIFOENTRY_DEST_SHIFT)
+#define NFIFOENTRY_DEST_CLASS2 (2 << NFIFOENTRY_DEST_SHIFT)
+#define NFIFOENTRY_DEST_BOTH   (3 << NFIFOENTRY_DEST_SHIFT)
+
+#define NFIFOENTRY_LC2_SHIFT   29
+#define NFIFOENTRY_LC2_MASK            (1 << NFIFOENTRY_LC2_SHIFT)
+#define NFIFOENTRY_LC2                 (1 << NFIFOENTRY_LC2_SHIFT)
+
+#define NFIFOENTRY_LC1_SHIFT   28
+#define NFIFOENTRY_LC1_MASK            (1 << NFIFOENTRY_LC1_SHIFT)
+#define NFIFOENTRY_LC1                 (1 << NFIFOENTRY_LC1_SHIFT)
+
+#define NFIFOENTRY_FC2_SHIFT   27
+#define NFIFOENTRY_FC2_MASK            (1 << NFIFOENTRY_FC2_SHIFT)
+#define NFIFOENTRY_FC2                 (1 << NFIFOENTRY_FC2_SHIFT)
+
+#define NFIFOENTRY_FC1_SHIFT   26
+#define NFIFOENTRY_FC1_MASK            (1 << NFIFOENTRY_FC1_SHIFT)
+#define NFIFOENTRY_FC1                 (1 << NFIFOENTRY_FC1_SHIFT)
+
+#define NFIFOENTRY_STYPE_SHIFT 24
+#define NFIFOENTRY_STYPE_MASK  (3 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_DFIFO (0 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_OFIFO (1 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_PAD   (2 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_SNOOP (3 << NFIFOENTRY_STYPE_SHIFT)
+
+#define NFIFOENTRY_DTYPE_SHIFT 20
+#define NFIFOENTRY_DTYPE_MASK  (0xF << NFIFOENTRY_DTYPE_SHIFT)
+
+#define NFIFOENTRY_DTYPE_SBOX      (0x0  << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_AAD       (0x1  << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_IV        (0x2  << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_SAD       (0x3  << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_ICV       (0xA  << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_SKIP      (0xE  << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_MSG       (0xF  << NFIFOENTRY_DTYPE_SHIFT)
+
+#define NFIFOENTRY_DTYPE_PK_A0     (0x0  << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A1     (0x1  << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A2     (0x2  << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A3     (0x3  << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B0     (0x4  << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B1     (0x5  << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B2     (0x6  << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B3     (0x7  << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_N      (0x8  << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_E      (0x9  << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A      (0xC  << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B      (0xD  << NFIFOENTRY_DTYPE_SHIFT)
+
+
+#define NFIFOENTRY_BND_SHIFT   19
+#define NFIFOENTRY_BND_MASK            (1 << NFIFOENTRY_BND_SHIFT)
+#define NFIFOENTRY_BND                 (1 << NFIFOENTRY_BND_SHIFT)
+
+#define NFIFOENTRY_PTYPE_SHIFT 16
+#define NFIFOENTRY_PTYPE_MASK  (0x7 << NFIFOENTRY_PTYPE_SHIFT)
+
+#define NFIFOENTRY_PTYPE_ZEROS         (0x0  << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NOZEROS   (0x1  << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_INCREMENT     (0x2  << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND           (0x3  << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_ZEROS_NZ      (0x4  << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NZ_LZ     (0x5  << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_N             (0x6  << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NZ_N      (0x7  << NFIFOENTRY_PTYPE_SHIFT)
+
+#define NFIFOENTRY_OC_SHIFT            15
+#define NFIFOENTRY_OC_MASK             (1 << NFIFOENTRY_OC_SHIFT)
+#define NFIFOENTRY_OC                  (1 << NFIFOENTRY_OC_SHIFT)
+
+#define NFIFOENTRY_AST_SHIFT   14
+#define NFIFOENTRY_AST_MASK            (1 << NFIFOENTRY_OC_SHIFT)
+#define NFIFOENTRY_AST                 (1 << NFIFOENTRY_OC_SHIFT)
+
+#define NFIFOENTRY_BM_SHIFT            11
+#define NFIFOENTRY_BM_MASK             (1 << NFIFOENTRY_BM_SHIFT)
+#define NFIFOENTRY_BM                  (1 << NFIFOENTRY_BM_SHIFT)
+
+#define NFIFOENTRY_PS_SHIFT            10
+#define NFIFOENTRY_PS_MASK             (1 << NFIFOENTRY_PS_SHIFT)
+#define NFIFOENTRY_PS                  (1 << NFIFOENTRY_PS_SHIFT)
+
+
+#define NFIFOENTRY_DLEN_SHIFT  0
+#define NFIFOENTRY_DLEN_MASK   (0xFFF << NFIFOENTRY_DLEN_SHIFT)
+
+#define NFIFOENTRY_PLEN_SHIFT  0
+#define NFIFOENTRY_PLEN_MASK   (0xFF << NFIFOENTRY_PLEN_SHIFT)
+
+/*
+ * PDB internal definitions
+ */
+
+/* IPSec ESP CBC Encap/Decap Options */
+#define PDBOPTS_ESPCBC_ARSNONE  0x00   /* no antireplay window              */
+#define PDBOPTS_ESPCBC_ARS32    0x40   /* 32-entry antireplay window        */
+#define PDBOPTS_ESPCBC_ARS64    0xc0   /* 64-entry antireplay window        */
+#define PDBOPTS_ESPCBC_IVSRC    0x20   /* IV comes from internal random gen */
+#define PDBOPTS_ESPCBC_ESN      0x10   /* extended sequence included        */
+#define PDBOPTS_ESPCBC_OUTFMT   0x08   /* output only decapsulation (decap) */
+#define PDBOPTS_ESPCBC_IPHDRSRC 0x08   /* IP header comes from PDB (encap)  */
+#define PDBOPTS_ESPCBC_INCIPHDR 0x04   /* Prepend IP header to output frame */
+#define PDBOPTS_ESPCBC_IPVSN    0x02   /* process IPv6 header               */
+#define PDBOPTS_ESPCBC_TUNNEL   0x01   /* tunnel mode next-header byte      */
+
+#endif /* DESC_H */
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
new file mode 100644 (file)
index 0000000..4691580
--- /dev/null
@@ -0,0 +1,205 @@
+/*
+ * caam descriptor construction helper functions
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ */
+
+#include "desc.h"
+
+#define IMMEDIATE (1 << 23)
+#define CAAM_CMD_SZ sizeof(u32)
+#define CAAM_PTR_SZ sizeof(dma_addr_t)
+#define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * 64)
+
+#ifdef DEBUG
+#define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\
+                             &__func__[sizeof("append")]); } while (0)
+#else
+#define PRINT_POS
+#endif
+
+#define DISABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \
+                               LDST_SRCDST_WORD_DECOCTRL | \
+                               (LDOFF_DISABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT))
+#define ENABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \
+                              LDST_SRCDST_WORD_DECOCTRL | \
+                              (LDOFF_ENABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT))
+
+static inline int desc_len(u32 *desc)
+{
+       return *desc & HDR_DESCLEN_MASK;
+}
+
+static inline int desc_bytes(void *desc)
+{
+       return desc_len(desc) * CAAM_CMD_SZ;
+}
+
+static inline u32 *desc_end(u32 *desc)
+{
+       return desc + desc_len(desc);
+}
+
+static inline void *sh_desc_pdb(u32 *desc)
+{
+       return desc + 1;
+}
+
+static inline void init_desc(u32 *desc, u32 options)
+{
+       *desc = options | HDR_ONE | 1;
+}
+
+static inline void init_sh_desc(u32 *desc, u32 options)
+{
+       PRINT_POS;
+       init_desc(desc, CMD_SHARED_DESC_HDR | options);
+}
+
+static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
+{
+       u32 pdb_len = pdb_bytes / CAAM_CMD_SZ + 1;
+
+       init_sh_desc(desc, ((pdb_len << HDR_START_IDX_SHIFT) + pdb_len) |
+                    options);
+}
+
+static inline void init_job_desc(u32 *desc, u32 options)
+{
+       init_desc(desc, CMD_DESC_HDR | options);
+}
+
+static inline void append_ptr(u32 *desc, dma_addr_t ptr)
+{
+       dma_addr_t *offset = (dma_addr_t *)desc_end(desc);
+
+       *offset = ptr;
+
+       (*desc) += CAAM_PTR_SZ / CAAM_CMD_SZ;
+}
+
+static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
+                                       u32 options)
+{
+       PRINT_POS;
+       init_job_desc(desc, HDR_SHARED | options |
+                     (len << HDR_START_IDX_SHIFT));
+       append_ptr(desc, ptr);
+}
+
+static inline void append_data(u32 *desc, void *data, int len)
+{
+       u32 *offset = desc_end(desc);
+
+       if (len) /* avoid sparse warning: memcpy with byte count of 0 */
+               memcpy(offset, data, len);
+
+       (*desc) += (len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
+}
+
+static inline void append_cmd(u32 *desc, u32 command)
+{
+       u32 *cmd = desc_end(desc);
+
+       *cmd = command;
+
+       (*desc)++;
+}
+
+static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
+                                 u32 command)
+{
+       append_cmd(desc, command | len);
+       append_ptr(desc, ptr);
+}
+
+static inline void append_cmd_data(u32 *desc, void *data, int len,
+                                  u32 command)
+{
+       append_cmd(desc, command | IMMEDIATE | len);
+       append_data(desc, data, len);
+}
+
+static inline u32 *append_jump(u32 *desc, u32 options)
+{
+       u32 *cmd = desc_end(desc);
+
+       PRINT_POS;
+       append_cmd(desc, CMD_JUMP | options);
+
+       return cmd;
+}
+
+static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd)
+{
+       *jump_cmd = *jump_cmd | (desc_len(desc) - (jump_cmd - desc));
+}
+
+#define APPEND_CMD(cmd, op) \
+static inline void append_##cmd(u32 *desc, u32 options) \
+{ \
+       PRINT_POS; \
+       append_cmd(desc, CMD_##op | options); \
+}
+APPEND_CMD(operation, OPERATION)
+APPEND_CMD(move, MOVE)
+
+#define APPEND_CMD_LEN(cmd, op) \
+static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \
+{ \
+       PRINT_POS; \
+       append_cmd(desc, CMD_##op | len | options); \
+}
+APPEND_CMD_LEN(seq_store, SEQ_STORE)
+APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_LOAD)
+APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE)
+
+#define APPEND_CMD_PTR(cmd, op) \
+static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \
+                               u32 options) \
+{ \
+       PRINT_POS; \
+       append_cmd_ptr(desc, ptr, len, CMD_##op | options); \
+}
+APPEND_CMD_PTR(key, KEY)
+APPEND_CMD_PTR(seq_in_ptr, SEQ_IN_PTR)
+APPEND_CMD_PTR(seq_out_ptr, SEQ_OUT_PTR)
+APPEND_CMD_PTR(load, LOAD)
+APPEND_CMD_PTR(store, STORE)
+APPEND_CMD_PTR(fifo_load, FIFO_LOAD)
+APPEND_CMD_PTR(fifo_store, FIFO_STORE)
+
+#define APPEND_CMD_PTR_TO_IMM(cmd, op) \
+static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
+                                        unsigned int len, u32 options) \
+{ \
+       PRINT_POS; \
+       append_cmd_data(desc, data, len, CMD_##op | options); \
+}
+APPEND_CMD_PTR_TO_IMM(load, LOAD);
+APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD);
+
+/*
+ * 2nd variant for commands whose specified immediate length differs
+ * from length of immediate data provided, e.g., split keys
+ */
+#define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
+static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
+                                        unsigned int data_len, \
+                                        unsigned int len, u32 options) \
+{ \
+       PRINT_POS; \
+       append_cmd(desc, CMD_##op | IMMEDIATE | len | options); \
+       append_data(desc, data, data_len); \
+}
+APPEND_CMD_PTR_TO_IMM2(key, KEY);
+
+#define APPEND_CMD_RAW_IMM(cmd, op, type) \
+static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \
+                                            u32 options) \
+{ \
+       PRINT_POS; \
+       append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(type)); \
+       append_cmd(desc, immediate); \
+}
+APPEND_CMD_RAW_IMM(load, LOAD, u32);
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
new file mode 100644 (file)
index 0000000..7e2d54b
--- /dev/null
@@ -0,0 +1,248 @@
+/*
+ * CAAM Error Reporting
+ *
+ * Copyright 2009-2011 Freescale Semiconductor, Inc.
+ */
+
+#include "compat.h"
+#include "regs.h"
+#include "intern.h"
+#include "desc.h"
+#include "jr.h"
+#include "error.h"
+
+#define SPRINTFCAT(str, format, param, max_alloc)              \
+{                                                              \
+       char *tmp;                                              \
+                                                               \
+       tmp = kmalloc(sizeof(format) + max_alloc, GFP_ATOMIC);  \
+       sprintf(tmp, format, param);                            \
+       strcat(str, tmp);                                       \
+       kfree(tmp);                                             \
+}
+
+static void report_jump_idx(u32 status, char *outstr)
+{
+       u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >>
+                 JRSTA_DECOERR_INDEX_SHIFT;
+
+       if (status & JRSTA_DECOERR_JUMP)
+               strcat(outstr, "jump tgt desc idx ");
+       else
+               strcat(outstr, "desc idx ");
+
+       SPRINTFCAT(outstr, "%d: ", idx, sizeof("255"));
+}
+
+static void report_ccb_status(u32 status, char *outstr)
+{
+       char *cha_id_list[] = {
+               "",
+               "AES",
+               "DES, 3DES",
+               "ARC4",
+               "MD5, SHA-1, SH-224, SHA-256, SHA-384, SHA-512",
+               "RNG",
+               "SNOW f8",
+               "Kasumi f8, f9",
+               "All Public Key Algorithms",
+               "CRC",
+               "SNOW f9",
+       };
+       char *err_id_list[] = {
+               "None. No error.",
+               "Mode error.",
+               "Data size error.",
+               "Key size error.",
+               "PKHA A memory size error.",
+               "PKHA B memory size error.",
+               "Data arrived out of sequence error.",
+               "PKHA divide-by-zero error.",
+               "PKHA modulus even error.",
+               "DES key parity error.",
+               "ICV check failed.",
+               "Hardware error.",
+               "Unsupported CCM AAD size.",
+               "Class 1 CHA is not reset",
+               "Invalid CHA combination was selected",
+               "Invalid CHA selected.",
+       };
+       u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >>
+                   JRSTA_CCBERR_CHAID_SHIFT;
+       u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
+
+       report_jump_idx(status, outstr);
+
+       if (cha_id < ARRAY_SIZE(cha_id_list)) {
+               SPRINTFCAT(outstr, "%s: ", cha_id_list[cha_id],
+                          strlen(cha_id_list[cha_id]));
+       } else {
+               SPRINTFCAT(outstr, "unidentified cha_id value 0x%02x: ",
+                          cha_id, sizeof("ff"));
+       }
+
+       if (err_id < ARRAY_SIZE(err_id_list)) {
+               SPRINTFCAT(outstr, "%s", err_id_list[err_id],
+                          strlen(err_id_list[err_id]));
+       } else {
+               SPRINTFCAT(outstr, "unidentified err_id value 0x%02x",
+                          err_id, sizeof("ff"));
+       }
+}
+
+static void report_jump_status(u32 status, char *outstr)
+{
+       SPRINTFCAT(outstr, "%s() not implemented", __func__, sizeof(__func__));
+}
+
+static void report_deco_status(u32 status, char *outstr)
+{
+       const struct {
+               u8 value;
+               char *error_text;
+       } desc_error_list[] = {
+               { 0x00, "None. No error." },
+               { 0x01, "SGT Length Error. The descriptor is trying to read "
+                       "more data than is contained in the SGT table." },
+               { 0x02, "Reserved." },
+               { 0x03, "Job Ring Control Error. There is a bad value in the "
+                       "Job Ring Control register." },
+               { 0x04, "Invalid Descriptor Command. The Descriptor Command "
+                       "field is invalid." },
+               { 0x05, "Reserved." },
+               { 0x06, "Invalid KEY Command" },
+               { 0x07, "Invalid LOAD Command" },
+               { 0x08, "Invalid STORE Command" },
+               { 0x09, "Invalid OPERATION Command" },
+               { 0x0A, "Invalid FIFO LOAD Command" },
+               { 0x0B, "Invalid FIFO STORE Command" },
+               { 0x0C, "Invalid MOVE Command" },
+               { 0x0D, "Invalid JUMP Command. A nonlocal JUMP Command is "
+                       "invalid because the target is not a Job Header "
+                       "Command, or the jump is from a Trusted Descriptor to "
+                       "a Job Descriptor, or because the target Descriptor "
+                       "contains a Shared Descriptor." },
+               { 0x0E, "Invalid MATH Command" },
+               { 0x0F, "Invalid SIGNATURE Command" },
+               { 0x10, "Invalid Sequence Command. A SEQ IN PTR OR SEQ OUT PTR "
+                       "Command is invalid or a SEQ KEY, SEQ LOAD, SEQ FIFO "
+                       "LOAD, or SEQ FIFO STORE decremented the input or "
+                       "output sequence length below 0. This error may result "
+                       "if a built-in PROTOCOL Command has encountered a "
+                       "malformed PDU." },
+               { 0x11, "Skip data type invalid. The type must be 0xE or 0xF."},
+               { 0x12, "Shared Descriptor Header Error" },
+               { 0x13, "Header Error. Invalid length or parity, or certain "
+                       "other problems." },
+               { 0x14, "Burster Error. Burster has gotten to an illegal "
+                       "state" },
+               { 0x15, "Context Register Length Error. The descriptor is "
+                       "trying to read or write past the end of the Context "
+                       "Register. A SEQ LOAD or SEQ STORE with the VLF bit "
+                       "set was executed with too large a length in the "
+                       "variable length register (VSOL for SEQ STORE or VSIL "
+                       "for SEQ LOAD)." },
+               { 0x16, "DMA Error" },
+               { 0x17, "Reserved." },
+               { 0x1A, "Job failed due to JR reset" },
+               { 0x1B, "Job failed due to Fail Mode" },
+               { 0x1C, "DECO Watchdog timer timeout error" },
+               { 0x1D, "DECO tried to copy a key from another DECO but the "
+                       "other DECO's Key Registers were locked" },
+               { 0x1E, "DECO attempted to copy data from a DECO that had an "
+                       "unmasked Descriptor error" },
+               { 0x1F, "LIODN error. DECO was trying to share from itself or "
+                       "from another DECO but the two Non-SEQ LIODN values "
+                       "didn't match or the 'shared from' DECO's Descriptor "
+                       "required that the SEQ LIODNs be the same and they "
+                       "aren't." },
+               { 0x20, "DECO has completed a reset initiated via the DRR "
+                       "register" },
+               { 0x21, "Nonce error. When using EKT (CCM) key encryption "
+                       "option in the FIFO STORE Command, the Nonce counter "
+                       "reached its maximum value and this encryption mode "
+                       "can no longer be used." },
+               { 0x22, "Meta data is too large (> 511 bytes) for TLS decap "
+                       "(input frame; block ciphers) and IPsec decap (output "
+                       "frame, when doing the next header byte update) and "
+                       "DCRC (output frame)." },
+               { 0x80, "DNR (do not run) error" },
+               { 0x81, "undefined protocol command" },
+               { 0x82, "invalid setting in PDB" },
+               { 0x83, "Anti-replay LATE error" },
+               { 0x84, "Anti-replay REPLAY error" },
+               { 0x85, "Sequence number overflow" },
+               { 0x86, "Sigver invalid signature" },
+               { 0x87, "DSA Sign Illegal test descriptor" },
+               { 0x88, "Protocol Format Error - A protocol has seen an error "
+                       "in the format of data received. When running RSA, "
+                       "this means that formatting with random padding was "
+                       "used, and did not follow the form: 0x00, 0x02, 8-to-N "
+                       "bytes of non-zero pad, 0x00, F data." },
+               { 0x89, "Protocol Size Error - A protocol has seen an error in "
+                       "size. When running RSA, pdb size N < (size of F) when "
+                       "no formatting is used; or pdb size N < (F + 11) when "
+                       "formatting is used." },
+               { 0xC1, "Blob Command error: Undefined mode" },
+               { 0xC2, "Blob Command error: Secure Memory Blob mode error" },
+               { 0xC4, "Blob Command error: Black Blob key or input size "
+                       "error" },
+               { 0xC5, "Blob Command error: Invalid key destination" },
+               { 0xC8, "Blob Command error: Trusted/Secure mode error" },
+               { 0xF0, "IPsec TTL or hop limit field either came in as 0, "
+                       "or was decremented to 0" },
+               { 0xF1, "3GPP HFN matches or exceeds the Threshold" },
+       };
+       u8 desc_error = status & JRSTA_DECOERR_ERROR_MASK;
+       int i;
+
+       report_jump_idx(status, outstr);
+
+       for (i = 0; i < ARRAY_SIZE(desc_error_list); i++)
+               if (desc_error_list[i].value == desc_error)
+                       break;
+
+       if (i != ARRAY_SIZE(desc_error_list) && desc_error_list[i].error_text) {
+               SPRINTFCAT(outstr, "%s", desc_error_list[i].error_text,
+                          strlen(desc_error_list[i].error_text));
+       } else {
+               SPRINTFCAT(outstr, "unidentified error value 0x%02x",
+                          desc_error, sizeof("ff"));
+       }
+}
+
+static void report_jr_status(u32 status, char *outstr)
+{
+       SPRINTFCAT(outstr, "%s() not implemented", __func__, sizeof(__func__));
+}
+
+static void report_cond_code_status(u32 status, char *outstr)
+{
+       SPRINTFCAT(outstr, "%s() not implemented", __func__, sizeof(__func__));
+}
+
+char *caam_jr_strstatus(char *outstr, u32 status)
+{
+       struct stat_src {
+               void (*report_ssed)(u32 status, char *outstr);
+               char *error;
+       } status_src[] = {
+               { NULL, "No error" },
+               { NULL, NULL },
+               { report_ccb_status, "CCB" },
+               { report_jump_status, "Jump" },
+               { report_deco_status, "DECO" },
+               { NULL, NULL },
+               { report_jr_status, "Job Ring" },
+               { report_cond_code_status, "Condition Code" },
+       };
+       u32 ssrc = status >> JRSTA_SSRC_SHIFT;
+
+       sprintf(outstr, "%s: ", status_src[ssrc].error);
+
+       if (status_src[ssrc].report_ssed)
+               status_src[ssrc].report_ssed(status, outstr);
+
+       return outstr;
+}
+EXPORT_SYMBOL(caam_jr_strstatus);
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h
new file mode 100644 (file)
index 0000000..02c7baa
--- /dev/null
@@ -0,0 +1,11 @@
+/*
+ * CAAM Error Reporting code header
+ *
+ * Copyright 2009-2011 Freescale Semiconductor, Inc.
+ */
+
+#ifndef CAAM_ERROR_H
+#define CAAM_ERROR_H
+#define CAAM_ERROR_STR_MAX 302
+extern char *caam_jr_strstatus(char *outstr, u32 status);
+#endif /* CAAM_ERROR_H */
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
new file mode 100644 (file)
index 0000000..a34be01
--- /dev/null
@@ -0,0 +1,113 @@
+/*
+ * CAAM/SEC 4.x driver backend
+ * Private/internal definitions between modules
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ */
+
+#ifndef INTERN_H
+#define INTERN_H
+
+#define JOBR_UNASSIGNED 0
+#define JOBR_ASSIGNED 1
+
+/* Currently comes from Kconfig param as a ^2 (driver-required) */
+#define JOBR_DEPTH (1 << CONFIG_CRYPTO_DEV_FSL_CAAM_RINGSIZE)
+
+/* Kconfig params for interrupt coalescing if selected (else zero) */
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_INTC
+#define JOBR_INTC JRCFG_ICEN
+#define JOBR_INTC_TIME_THLD CONFIG_CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
+#define JOBR_INTC_COUNT_THLD CONFIG_CRYPTO_DEV_FSL_CAAM_INTC_COUNT_THLD
+#else
+#define JOBR_INTC 0
+#define JOBR_INTC_TIME_THLD 0
+#define JOBR_INTC_COUNT_THLD 0
+#endif
+
+/*
+ * Storage for tracking each in-process entry moving across a ring
+ * Each entry on an output ring needs one of these
+ */
+struct caam_jrentry_info {
+       void (*callbk)(struct device *dev, u32 *desc, u32 status, void *arg);
+       void *cbkarg;   /* Argument per ring entry */
+       u32 *desc_addr_virt;    /* Stored virt addr for postprocessing */
+       dma_addr_t desc_addr_dma;       /* Stored bus addr for done matching */
+       u32 desc_size;  /* Stored size for postprocessing, header derived */
+};
+
+/* Private sub-storage for a single JobR */
+struct caam_drv_private_jr {
+       struct device *parentdev;       /* points back to controller dev */
+       int ridx;
+       struct caam_job_ring __iomem *rregs;    /* JobR's register space */
+       struct tasklet_struct irqtask[NR_CPUS];
+       int irq;                        /* One per queue */
+       int assign;                     /* busy/free */
+
+       /* Job ring info */
+       int ringsize;   /* Size of rings (assume input = output) */
+       struct caam_jrentry_info *entinfo;      /* Alloc'ed 1 per ring entry */
+       spinlock_t inplock ____cacheline_aligned; /* Input ring index lock */
+       int inp_ring_write_index;       /* Input index "tail" */
+       int head;                       /* entinfo (s/w ring) head index */
+       dma_addr_t *inpring;    /* Base of input ring, alloc DMA-safe */
+       spinlock_t outlock ____cacheline_aligned; /* Output ring index lock */
+       int out_ring_read_index;        /* Output index "tail" */
+       int tail;                       /* entinfo (s/w ring) tail index */
+       struct jr_outentry *outring;    /* Base of output ring, DMA-safe */
+};
+
+/*
+ * Driver-private storage for a single CAAM block instance
+ */
+struct caam_drv_private {
+
+       struct device *dev;
+       struct device **jrdev; /* Alloc'ed array per sub-device */
+       spinlock_t jr_alloc_lock;
+       struct platform_device *pdev;
+
+       /* Physical-presence section */
+       struct caam_ctrl *ctrl; /* controller region */
+       struct caam_deco **deco; /* DECO/CCB views */
+       struct caam_assurance *ac;
+       struct caam_queue_if *qi; /* QI control region */
+
+       /*
+        * Detected geometry block. Filled in from device tree if powerpc,
+        * or from register-based version detection code
+        */
+       u8 total_jobrs;         /* Total Job Rings in device */
+       u8 qi_present;          /* Nonzero if QI present in device */
+       int secvio_irq;         /* Security violation interrupt number */
+
+       /* which jr allocated to scatterlist crypto */
+       atomic_t tfm_count ____cacheline_aligned;
+       int num_jrs_for_algapi;
+       struct device **algapi_jr;
+       /* list of registered crypto algorithms (mk generic context handle?) */
+       struct list_head alg_list;
+
+       /*
+        * debugfs entries for developer view into driver/device
+        * variables at runtime.
+        */
+#ifdef CONFIG_DEBUG_FS
+       struct dentry *dfs_root;
+       struct dentry *ctl; /* controller dir */
+       struct dentry *ctl_rq_dequeued, *ctl_ob_enc_req, *ctl_ib_dec_req;
+       struct dentry *ctl_ob_enc_bytes, *ctl_ob_prot_bytes;
+       struct dentry *ctl_ib_dec_bytes, *ctl_ib_valid_bytes;
+       struct dentry *ctl_faultaddr, *ctl_faultdetail, *ctl_faultstatus;
+
+       struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
+       struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk;
+#endif
+};
+
+void caam_jr_algapi_init(struct device *dev);
+void caam_jr_algapi_remove(struct device *dev);
+#endif /* INTERN_H */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
new file mode 100644 (file)
index 0000000..340fa32
--- /dev/null
@@ -0,0 +1,517 @@
+/*
+ * CAAM/SEC 4.x transport/backend driver
+ * JobR backend functionality
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ */
+
+#include "compat.h"
+#include "regs.h"
+#include "jr.h"
+#include "desc.h"
+#include "intern.h"
+
+/* Main per-ring interrupt handler */
+static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
+{
+       struct device *dev = st_dev;
+       struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+       u32 irqstate;
+
+       /*
+        * Check the output ring for ready responses, kick
+        * tasklet if jobs done.
+        */
+       irqstate = rd_reg32(&jrp->rregs->jrintstatus);
+       if (!irqstate)
+               return IRQ_NONE;
+
+       /*
+        * If JobR error, we got more development work to do
+        * Flag a bug now, but we really need to shut down and
+        * restart the queue (and fix code).
+        */
+       if (irqstate & JRINT_JR_ERROR) {
+               dev_err(dev, "job ring error: irqstate: %08x\n", irqstate);
+               BUG();
+       }
+
+       /* mask valid interrupts */
+       setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
+
+       /* Have valid interrupt at this point, just ACK and trigger */
+       wr_reg32(&jrp->rregs->jrintstatus, irqstate);
+
+       preempt_disable();
+       tasklet_schedule(&jrp->irqtask[smp_processor_id()]);
+       preempt_enable();
+
+       return IRQ_HANDLED;
+}
+
+/* Deferred service handler, run as interrupt-fired tasklet */
+static void caam_jr_dequeue(unsigned long devarg)
+{
+       int hw_idx, sw_idx, i, head, tail;
+       struct device *dev = (struct device *)devarg;
+       struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+       void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
+       u32 *userdesc, userstatus;
+       void *userarg;
+       unsigned long flags;
+
+       spin_lock_irqsave(&jrp->outlock, flags);
+
+       head = ACCESS_ONCE(jrp->head);
+       sw_idx = tail = jrp->tail;
+
+       while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
+              rd_reg32(&jrp->rregs->outring_used)) {
+
+               hw_idx = jrp->out_ring_read_index;
+               for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
+                       sw_idx = (tail + i) & (JOBR_DEPTH - 1);
+
+                       smp_read_barrier_depends();
+
+                       if (jrp->outring[hw_idx].desc ==
+                           jrp->entinfo[sw_idx].desc_addr_dma)
+                               break; /* found */
+               }
+               /* we should never fail to find a matching descriptor */
+               BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);
+
+               /* Unmap just-run descriptor so we can post-process */
+               dma_unmap_single(dev, jrp->outring[hw_idx].desc,
+                                jrp->entinfo[sw_idx].desc_size,
+                                DMA_TO_DEVICE);
+
+               /* mark completed, avoid matching on a recycled desc addr */
+               jrp->entinfo[sw_idx].desc_addr_dma = 0;
+
+               /* Stash callback params for use outside of lock */
+               usercall = jrp->entinfo[sw_idx].callbk;
+               userarg = jrp->entinfo[sw_idx].cbkarg;
+               userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
+               userstatus = jrp->outring[hw_idx].jrstatus;
+
+               smp_mb();
+
+               jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) &
+                                          (JOBR_DEPTH - 1);
+
+               /*
+                * if this job completed out-of-order, do not increment
+                * the tail.  Otherwise, increment tail by 1 plus the
+                * number of subsequent jobs already completed out-of-order
+                */
+               if (sw_idx == tail) {
+                       do {
+                               tail = (tail + 1) & (JOBR_DEPTH - 1);
+                               smp_read_barrier_depends();
+                       } while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
+                                jrp->entinfo[tail].desc_addr_dma == 0);
+
+                       jrp->tail = tail;
+               }
+
+               /* set done */
+               wr_reg32(&jrp->rregs->outring_rmvd, 1);
+
+               spin_unlock_irqrestore(&jrp->outlock, flags);
+
+               /* Finally, execute user's callback */
+               usercall(dev, userdesc, userstatus, userarg);
+
+               spin_lock_irqsave(&jrp->outlock, flags);
+
+               head = ACCESS_ONCE(jrp->head);
+               sw_idx = tail = jrp->tail;
+       }
+
+       spin_unlock_irqrestore(&jrp->outlock, flags);
+
+       /* reenable / unmask IRQs */
+       clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
+}
+
+/**
+ * caam_jr_register() - Alloc a ring for someone to use as needed. Returns
+ * an ordinal of the rings allocated, else returns -ENODEV if no rings
+ * are available.
+ * @ctrldev: points to the controller level dev (parent) that
+ *           owns rings available for use.
+ * @dev:     points to where a pointer to the newly allocated queue's
+ *           dev can be written to if successful.
+ **/
+int caam_jr_register(struct device *ctrldev, struct device **rdev)
+{
+       struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
+       struct caam_drv_private_jr *jrpriv = NULL;
+       unsigned long flags;
+       int ring;
+
+       /* Lock, if free ring - assign, unlock */
+       spin_lock_irqsave(&ctrlpriv->jr_alloc_lock, flags);
+       for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
+               jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]);
+               if (jrpriv->assign == JOBR_UNASSIGNED) {
+                       jrpriv->assign = JOBR_ASSIGNED;
+                       *rdev = ctrlpriv->jrdev[ring];
+                       spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags);
+                       return ring;
+               }
+       }
+
+       /* If assigned, write dev where caller needs it */
+       spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags);
+       *rdev = NULL;
+
+       return -ENODEV;
+}
+EXPORT_SYMBOL(caam_jr_register);
+
+/**
+ * caam_jr_deregister() - Deregister an API and release the queue.
+ * Returns 0 if OK, -EBUSY if queue still contains pending entries
+ * or unprocessed results at the time of the call
+ * @dev     - points to the dev that identifies the queue to
+ *            be released.
+ **/
+int caam_jr_deregister(struct device *rdev)
+{
+       struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev);
+       struct caam_drv_private *ctrlpriv;
+       unsigned long flags;
+
+       /* Get the owning controller's private space */
+       ctrlpriv = dev_get_drvdata(jrpriv->parentdev);
+
+       /*
+        * Make sure ring empty before release
+        */
+       if (rd_reg32(&jrpriv->rregs->outring_used) ||
+           (rd_reg32(&jrpriv->rregs->inpring_avail) != JOBR_DEPTH))
+               return -EBUSY;
+
+       /* Release ring */
+       spin_lock_irqsave(&ctrlpriv->jr_alloc_lock, flags);
+       jrpriv->assign = JOBR_UNASSIGNED;
+       spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags);
+
+       return 0;
+}
+EXPORT_SYMBOL(caam_jr_deregister);
+
+/**
+ * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK,
+ * -EBUSY if the queue is full, -EIO if it cannot map the caller's
+ * descriptor.
+ * @dev:  device of the job ring to be used. This device should have
+ *        been assigned prior by caam_jr_register().
+ * @desc: points to a job descriptor that execute our request. All
+ *        descriptors (and all referenced data) must be in a DMAable
+ *        region, and all data references must be physical addresses
+ *        accessible to CAAM (i.e. within a PAMU window granted
+ *        to it).
+ * @cbk:  pointer to a callback function to be invoked upon completion
+ *        of this request. This has the form:
+ *        callback(struct device *dev, u32 *desc, u32 stat, void *arg)
+ *        where:
+ *        @dev:    contains the job ring device that processed this
+ *                 response.
+ *        @desc:   descriptor that initiated the request, same as
+ *                 "desc" being argued to caam_jr_enqueue().
+ *        @status: untranslated status received from CAAM. See the
+ *                 reference manual for a detailed description of
+ *                 error meaning, or see the JRSTA definitions in the
+ *                 register header file
+ *        @areq:   optional pointer to an argument passed with the
+ *                 original request
+ * @areq: optional pointer to a user argument for use at callback
+ *        time.
+ **/
+int caam_jr_enqueue(struct device *dev, u32 *desc,
+                   void (*cbk)(struct device *dev, u32 *desc,
+                               u32 status, void *areq),
+                   void *areq)
+{
+       struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+       struct caam_jrentry_info *head_entry;
+       unsigned long flags;
+       int head, tail, desc_size;
+       dma_addr_t desc_dma;
+
+       desc_size = (*desc & HDR_JD_LENGTH_MASK) * sizeof(u32);
+       desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE);
+       if (dma_mapping_error(dev, desc_dma)) {
+               dev_err(dev, "caam_jr_enqueue(): can't map jobdesc\n");
+               return -EIO;
+       }
+
+       spin_lock_irqsave(&jrp->inplock, flags);
+
+       head = jrp->head;
+       tail = ACCESS_ONCE(jrp->tail);
+
+       if (!rd_reg32(&jrp->rregs->inpring_avail) ||
+           CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
+               spin_unlock_irqrestore(&jrp->inplock, flags);
+               dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
+               return -EBUSY;
+       }
+
+       head_entry = &jrp->entinfo[head];
+       head_entry->desc_addr_virt = desc;
+       head_entry->desc_size = desc_size;
+       head_entry->callbk = (void *)cbk;
+       head_entry->cbkarg = areq;
+       head_entry->desc_addr_dma = desc_dma;
+
+       jrp->inpring[jrp->inp_ring_write_index] = desc_dma;
+
+       smp_wmb();
+
+       jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) &
+                                   (JOBR_DEPTH - 1);
+       jrp->head = (head + 1) & (JOBR_DEPTH - 1);
+
+       wmb();
+
+       wr_reg32(&jrp->rregs->inpring_jobadd, 1);
+
+       spin_unlock_irqrestore(&jrp->inplock, flags);
+
+       return 0;
+}
+EXPORT_SYMBOL(caam_jr_enqueue);
+
+static int caam_reset_hw_jr(struct device *dev)
+{
+       struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+       unsigned int timeout = 100000;
+
+       /*
+        * mask interrupts since we are going to poll
+        * for reset completion status
+        */
+       setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
+
+       /* initiate flush (required prior to reset) */
+       wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
+       while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
+               JRINT_ERR_HALT_INPROGRESS) && --timeout)
+               cpu_relax();
+
+       if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) !=
+           JRINT_ERR_HALT_COMPLETE || timeout == 0) {
+               dev_err(dev, "failed to flush job ring %d\n", jrp->ridx);
+               return -EIO;
+       }
+
+       /* initiate reset */
+       timeout = 100000;
+       wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
+       while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
+               cpu_relax();
+
+       if (timeout == 0) {
+               dev_err(dev, "failed to reset job ring %d\n", jrp->ridx);
+               return -EIO;
+       }
+
+       /* unmask interrupts */
+       clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
+
+       return 0;
+}
+
+/*
+ * Init JobR independent of platform property detection
+ */
+static int caam_jr_init(struct device *dev)
+{
+       struct caam_drv_private_jr *jrp;
+       dma_addr_t inpbusaddr, outbusaddr;
+       int i, error;
+
+       jrp = dev_get_drvdata(dev);
+
+       /* Connect job ring interrupt handler. */
+       for_each_possible_cpu(i)
+               tasklet_init(&jrp->irqtask[i], caam_jr_dequeue,
+                            (unsigned long)dev);
+
+       error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
+                           "caam-jobr", dev);
+       if (error) {
+               dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
+                       jrp->ridx, jrp->irq);
+               irq_dispose_mapping(jrp->irq);
+               jrp->irq = 0;
+               return -EINVAL;
+       }
+
+       error = caam_reset_hw_jr(dev);
+       if (error)
+               return error;
+
+       jrp->inpring = kzalloc(sizeof(dma_addr_t) * JOBR_DEPTH,
+                              GFP_KERNEL | GFP_DMA);
+       jrp->outring = kzalloc(sizeof(struct jr_outentry) *
+                              JOBR_DEPTH, GFP_KERNEL | GFP_DMA);
+
+       jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH,
+                              GFP_KERNEL);
+
+       if ((jrp->inpring == NULL) || (jrp->outring == NULL) ||
+           (jrp->entinfo == NULL)) {
+               dev_err(dev, "can't allocate job rings for %d\n",
+                       jrp->ridx);
+               return -ENOMEM;
+       }
+
+       for (i = 0; i < JOBR_DEPTH; i++)
+               jrp->entinfo[i].desc_addr_dma = !0;
+
+       /* Setup rings */
+       inpbusaddr = dma_map_single(dev, jrp->inpring,
+                                   sizeof(u32 *) * JOBR_DEPTH,
+                                   DMA_BIDIRECTIONAL);
+       if (dma_mapping_error(dev, inpbusaddr)) {
+               dev_err(dev, "caam_jr_init(): can't map input ring\n");
+               kfree(jrp->inpring);
+               kfree(jrp->outring);
+               kfree(jrp->entinfo);
+               return -EIO;
+       }
+
+       outbusaddr = dma_map_single(dev, jrp->outring,
+                                   sizeof(struct jr_outentry) * JOBR_DEPTH,
+                                   DMA_BIDIRECTIONAL);
+       if (dma_mapping_error(dev, outbusaddr)) {
+               dev_err(dev, "caam_jr_init(): can't map output ring\n");
+                       dma_unmap_single(dev, inpbusaddr,
+                                        sizeof(u32 *) * JOBR_DEPTH,
+                                        DMA_BIDIRECTIONAL);
+               kfree(jrp->inpring);
+               kfree(jrp->outring);
+               kfree(jrp->entinfo);
+               return -EIO;
+       }
+
+       jrp->inp_ring_write_index = 0;
+       jrp->out_ring_read_index = 0;
+       jrp->head = 0;
+       jrp->tail = 0;
+
+       wr_reg64(&jrp->rregs->inpring_base, inpbusaddr);
+       wr_reg64(&jrp->rregs->outring_base, outbusaddr);
+       wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
+       wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
+
+       jrp->ringsize = JOBR_DEPTH;
+
+       spin_lock_init(&jrp->inplock);
+       spin_lock_init(&jrp->outlock);
+
+       /* Select interrupt coalescing parameters */
+       setbits32(&jrp->rregs->rconfig_lo, JOBR_INTC |
+                 (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
+                 (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
+
+       jrp->assign = JOBR_UNASSIGNED;
+       return 0;
+}
+
+/*
+ * Shutdown JobR independent of platform property code
+ */
+int caam_jr_shutdown(struct device *dev)
+{
+       struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+       dma_addr_t inpbusaddr, outbusaddr;
+       int ret, i;
+
+       ret = caam_reset_hw_jr(dev);
+
+       for_each_possible_cpu(i)
+               tasklet_kill(&jrp->irqtask[i]);
+
+       /* Release interrupt */
+       free_irq(jrp->irq, dev);
+
+       /* Free rings */
+       inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
+       outbusaddr = rd_reg64(&jrp->rregs->outring_base);
+       dma_unmap_single(dev, outbusaddr,
+                        sizeof(struct jr_outentry) * JOBR_DEPTH,
+                        DMA_BIDIRECTIONAL);
+       dma_unmap_single(dev, inpbusaddr, sizeof(u32 *) * JOBR_DEPTH,
+                        DMA_BIDIRECTIONAL);
+       kfree(jrp->outring);
+       kfree(jrp->inpring);
+       kfree(jrp->entinfo);
+
+       return ret;
+}
+
+/*
+ * Probe routine for each detected JobR subsystem. It assumes that
+ * property detection was picked up externally.
+ */
+int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
+                 int ring)
+{
+       struct device *ctrldev, *jrdev;
+       struct platform_device *jr_pdev;
+       struct caam_drv_private *ctrlpriv;
+       struct caam_drv_private_jr *jrpriv;
+       u32 *jroffset;
+       int error;
+
+       ctrldev = &pdev->dev;
+       ctrlpriv = dev_get_drvdata(ctrldev);
+
+       jrpriv = kmalloc(sizeof(struct caam_drv_private_jr),
+                        GFP_KERNEL);
+       if (jrpriv == NULL) {
+               dev_err(ctrldev, "can't alloc private mem for job ring %d\n",
+                       ring);
+               return -ENOMEM;
+       }
+       jrpriv->parentdev = ctrldev; /* point back to parent */
+       jrpriv->ridx = ring; /* save ring identity relative to detection */
+
+       /*
+        * Derive a pointer to the detected JobRs regs
+        * Driver has already iomapped the entire space, we just
+        * need to add in the offset to this JobR. Don't know if I
+        * like this long-term, but it'll run
+        */
+       jroffset = (u32 *)of_get_property(np, "reg", NULL);
+       jrpriv->rregs = (struct caam_job_ring __iomem *)((void *)ctrlpriv->ctrl
+                                                        + *jroffset);
+
+       /* Build a local dev for each detected queue */
+       jr_pdev = of_platform_device_create(np, NULL, ctrldev);
+       if (jr_pdev == NULL) {
+               kfree(jrpriv);
+               return -EINVAL;
+       }
+       jrdev = &jr_pdev->dev;
+       dev_set_drvdata(jrdev, jrpriv);
+       ctrlpriv->jrdev[ring] = jrdev;
+
+       /* Identify the interrupt */
+       jrpriv->irq = of_irq_to_resource(np, 0, NULL);
+
+       /* Now do the platform independent part */
+       error = caam_jr_init(jrdev); /* now turn on hardware */
+       if (error) {
+               kfree(jrpriv);
+               return error;
+       }
+
+       return error;
+}
diff --git a/drivers/crypto/caam/jr.h b/drivers/crypto/caam/jr.h
new file mode 100644 (file)
index 0000000..c23df39
--- /dev/null
@@ -0,0 +1,21 @@
+/*
+ * CAAM public-level include definitions for the JobR backend
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ */
+
+#ifndef JR_H
+#define JR_H
+
+/* Prototypes for backend-level services exposed to APIs */
+int caam_jr_register(struct device *ctrldev, struct device **rdev);
+int caam_jr_deregister(struct device *rdev);
+int caam_jr_enqueue(struct device *dev, u32 *desc,
+                   void (*cbk)(struct device *dev, u32 *desc, u32 status,
+                               void *areq),
+                   void *areq);
+
+extern int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
+                        int ring);
+extern int caam_jr_shutdown(struct device *dev);
+#endif /* JR_H */
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
new file mode 100644 (file)
index 0000000..aee394e
--- /dev/null
@@ -0,0 +1,663 @@
+/*
+ * CAAM hardware register-level view
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ */
+
+#ifndef REGS_H
+#define REGS_H
+
+#include <linux/types.h>
+#include <linux/io.h>
+
+/*
+ * Architecture-specific register access methods
+ *
+ * CAAM's bus-addressable registers are 64 bits internally.
+ * They have been wired to be safely accessible on 32-bit
+ * architectures, however. Registers were organized such
+ * that (a) they can be contained in 32 bits, (b) if not, then they
+ * can be treated as two 32-bit entities, or finally (c) if they
+ * must be treated as a single 64-bit value, then this can safely
+ * be done with two 32-bit cycles.
+ *
+ * For 32-bit operations on 64-bit values, CAAM follows the same
+ * 64-bit register access conventions as it's predecessors, in that
+ * writes are "triggered" by a write to the register at the numerically
+ * higher address, thus, a full 64-bit write cycle requires a write
+ * to the lower address, followed by a write to the higher address,
+ * which will latch/execute the write cycle.
+ *
+ * For example, let's assume a SW reset of CAAM through the master
+ * configuration register.
+ * - SWRST is in bit 31 of MCFG.
+ * - MCFG begins at base+0x0000.
+ * - Bits 63-32 are a 32-bit word at base+0x0000 (numerically-lower)
+ * - Bits 31-0 are a 32-bit word at base+0x0004 (numerically-higher)
+ *
+ * (and on Power, the convention is 0-31, 32-63, I know...)
+ *
+ * Assuming a 64-bit write to this MCFG to perform a software reset
+ * would then require a write of 0 to base+0x0000, followed by a
+ * write of 0x80000000 to base+0x0004, which would "execute" the
+ * reset.
+ *
+ * Of course, since MCFG 63-32 is all zero, we could cheat and simply
+ * write 0x8000000 to base+0x0004, and the reset would work fine.
+ * However, since CAAM does contain some write-and-read-intended
+ * 64-bit registers, this code defines 64-bit access methods for
+ * the sake of internal consistency and simplicity, and so that a
+ * clean transition to 64-bit is possible when it becomes necessary.
+ *
+ * There are limitations to this that the developer must recognize.
+ * 32-bit architectures cannot enforce an atomic-64 operation,
+ * Therefore:
+ *
+ * - On writes, since the HW is assumed to latch the cycle on the
+ *   write of the higher-numeric-address word, then ordered
+ *   writes work OK.
+ *
+ * - For reads, where a register contains a relevant value of more
+ *   that 32 bits, the hardware employs logic to latch the other
+ *   "half" of the data until read, ensuring an accurate value.
+ *   This is of particular relevance when dealing with CAAM's
+ *   performance counters.
+ *
+ */
+
+#ifdef __BIG_ENDIAN
+#define wr_reg32(reg, data) out_be32(reg, data)
+#define rd_reg32(reg) in_be32(reg)
+#ifdef CONFIG_64BIT
+#define wr_reg64(reg, data) out_be64(reg, data)
+#define rd_reg64(reg) in_be64(reg)
+#endif
+#else
+#ifdef __LITTLE_ENDIAN
+#define wr_reg32(reg, data) __raw_writel(reg, data)
+#define rd_reg32(reg) __raw_readl(reg)
+#ifdef CONFIG_64BIT
+#define wr_reg64(reg, data) __raw_writeq(reg, data)
+#define rd_reg64(reg) __raw_readq(reg)
+#endif
+#endif
+#endif
+
+#ifndef CONFIG_64BIT
+static inline void wr_reg64(u64 __iomem *reg, u64 data)
+{
+       wr_reg32((u32 __iomem *)reg, (data & 0xffffffff00000000ull) >> 32);
+       wr_reg32((u32 __iomem *)reg + 1, data & 0x00000000ffffffffull);
+}
+
+static inline u64 rd_reg64(u64 __iomem *reg)
+{
+       return (((u64)rd_reg32((u32 __iomem *)reg)) << 32) |
+               ((u64)rd_reg32((u32 __iomem *)reg + 1));
+}
+#endif
+
+/*
+ * jr_outentry
+ * Represents each entry in a JobR output ring
+ */
+struct jr_outentry {
+       dma_addr_t desc;/* Pointer to completed descriptor */
+       u32 jrstatus;   /* Status for completed descriptor */
+} __packed;
+
+/*
+ * caam_perfmon - Performance Monitor/Secure Memory Status/
+ *                CAAM Global Status/Component Version IDs
+ *
+ * Spans f00-fff wherever instantiated
+ */
+
+/* Number of DECOs */
+#define CHA_NUM_DECONUM_SHIFT  56
+#define CHA_NUM_DECONUM_MASK   (0xfull << CHA_NUM_DECONUM_SHIFT)
+
+struct caam_perfmon {
+       /* Performance Monitor Registers                        f00-f9f */
+       u64 req_dequeued;       /* PC_REQ_DEQ - Dequeued Requests            */
+       u64 ob_enc_req; /* PC_OB_ENC_REQ - Outbound Encrypt Requests */
+       u64 ib_dec_req; /* PC_IB_DEC_REQ - Inbound Decrypt Requests  */
+       u64 ob_enc_bytes;       /* PC_OB_ENCRYPT - Outbound Bytes Encrypted  */
+       u64 ob_prot_bytes;      /* PC_OB_PROTECT - Outbound Bytes Protected  */
+       u64 ib_dec_bytes;       /* PC_IB_DECRYPT - Inbound Bytes Decrypted   */
+       u64 ib_valid_bytes;     /* PC_IB_VALIDATED Inbound Bytes Validated   */
+       u64 rsvd[13];
+
+       /* CAAM Hardware Instantiation Parameters               fa0-fbf */
+       u64 cha_rev;            /* CRNR - CHA Revision Number           */
+#define CTPR_QI_SHIFT          57
+#define CTPR_QI_MASK           (0x1ull << CTPR_QI_SHIFT)
+       u64 comp_parms; /* CTPR - Compile Parameters Register   */
+       u64 rsvd1[2];
+
+       /* CAAM Global Status                                   fc0-fdf */
+       u64 faultaddr;  /* FAR  - Fault Address         */
+       u32 faultliodn; /* FALR - Fault Address LIODN   */
+       u32 faultdetail;        /* FADR - Fault Addr Detail     */
+       u32 rsvd2;
+       u32 status;             /* CSTA - CAAM Status */
+       u64 rsvd3;
+
+       /* Component Instantiation Parameters                   fe0-fff */
+       u32 rtic_id;            /* RVID - RTIC Version ID       */
+       u32 ccb_id;             /* CCBVID - CCB Version ID      */
+       u64 cha_id;             /* CHAVID - CHA Version ID      */
+       u64 cha_num;            /* CHANUM - CHA Number          */
+       u64 caam_id;            /* CAAMVID - CAAM Version ID    */
+};
+
+/* LIODN programming for DMA configuration */
+#define MSTRID_LOCK_LIODN      0x80000000
+#define MSTRID_LOCK_MAKETRUSTED        0x00010000      /* only for JR masterid */
+
+#define MSTRID_LIODN_MASK      0x0fff
+struct masterid {
+       u32 liodn_ms;   /* lock and make-trusted control bits */
+       u32 liodn_ls;   /* LIODN for non-sequence and seq access */
+};
+
+/* Partition ID for DMA configuration */
+struct partid {
+       u32 rsvd1;
+       u32 pidr;       /* partition ID, DECO */
+};
+
+/* RNG test mode (replicated twice in some configurations) */
+/* Padded out to 0x100 */
+struct rngtst {
+       u32 mode;               /* RTSTMODEx - Test mode */
+       u32 rsvd1[3];
+       u32 reset;              /* RTSTRESETx - Test reset control */
+       u32 rsvd2[3];
+       u32 status;             /* RTSTSSTATUSx - Test status */
+       u32 rsvd3;
+       u32 errstat;            /* RTSTERRSTATx - Test error status */
+       u32 rsvd4;
+       u32 errctl;             /* RTSTERRCTLx - Test error control */
+       u32 rsvd5;
+       u32 entropy;            /* RTSTENTROPYx - Test entropy */
+       u32 rsvd6[15];
+       u32 verifctl;   /* RTSTVERIFCTLx - Test verification control */
+       u32 rsvd7;
+       u32 verifstat;  /* RTSTVERIFSTATx - Test verification status */
+       u32 rsvd8;
+       u32 verifdata;  /* RTSTVERIFDx - Test verification data */
+       u32 rsvd9;
+       u32 xkey;               /* RTSTXKEYx - Test XKEY */
+       u32 rsvd10;
+       u32 oscctctl;   /* RTSTOSCCTCTLx - Test osc. counter control */
+       u32 rsvd11;
+       u32 oscct;              /* RTSTOSCCTx - Test oscillator counter */
+       u32 rsvd12;
+       u32 oscctstat;  /* RTSTODCCTSTATx - Test osc counter status */
+       u32 rsvd13[2];
+       u32 ofifo[4];   /* RTSTOFIFOx - Test output FIFO */
+       u32 rsvd14[15];
+};
+
+/*
+ * caam_ctrl - basic core configuration
+ * starts base + 0x0000 padded out to 0x1000
+ */
+
+#define KEK_KEY_SIZE           8
+#define TKEK_KEY_SIZE          8
+#define TDSK_KEY_SIZE          8
+
+#define DECO_RESET     1       /* Use with DECO reset/availability regs */
+#define DECO_RESET_0   (DECO_RESET << 0)
+#define DECO_RESET_1   (DECO_RESET << 1)
+#define DECO_RESET_2   (DECO_RESET << 2)
+#define DECO_RESET_3   (DECO_RESET << 3)
+#define DECO_RESET_4   (DECO_RESET << 4)
+
+struct caam_ctrl {
+       /* Basic Configuration Section                          000-01f */
+       /* Read/Writable                                                */
+       u32 rsvd1;
+       u32 mcr;                /* MCFG      Master Config Register  */
+       u32 rsvd2[2];
+
+       /* Bus Access Configuration Section                     010-11f */
+       /* Read/Writable                                                */
+       struct masterid jr_mid[4];      /* JRxLIODNR - JobR LIODN setup */
+       u32 rsvd3[12];
+       struct masterid rtic_mid[4];    /* RTICxLIODNR - RTIC LIODN setup */
+       u32 rsvd4[7];
+       u32 deco_rq;                    /* DECORR - DECO Request */
+       struct partid deco_mid[5];      /* DECOxLIODNR - 1 per DECO */
+       u32 rsvd5[22];
+
+       /* DECO Availability/Reset Section                      120-3ff */
+       u32 deco_avail;         /* DAR - DECO availability */
+       u32 deco_reset;         /* DRR - DECO reset */
+       u32 rsvd6[182];
+
+       /* Key Encryption/Decryption Configuration              400-5ff */
+       /* Read/Writable only while in Non-secure mode                  */
+       u32 kek[KEK_KEY_SIZE];  /* JDKEKR - Key Encryption Key */
+       u32 tkek[TKEK_KEY_SIZE];        /* TDKEKR - Trusted Desc KEK */
+       u32 tdsk[TDSK_KEY_SIZE];        /* TDSKR - Trusted Desc Signing Key */
+       u32 rsvd7[32];
+       u64 sknonce;                    /* SKNR - Secure Key Nonce */
+       u32 rsvd8[70];
+
+       /* RNG Test/Verification/Debug Access                   600-7ff */
+       /* (Useful in Test/Debug modes only...)                         */
+       struct rngtst rtst[2];
+
+       u32 rsvd9[448];
+
+       /* Performance Monitor                                  f00-fff */
+       struct caam_perfmon perfmon;
+};
+
+/*
+ * Controller master config register defs
+ */
+#define MCFGR_SWRESET          0x80000000 /* software reset */
+#define MCFGR_WDENABLE         0x40000000 /* DECO watchdog enable */
+#define MCFGR_WDFAIL           0x20000000 /* DECO watchdog force-fail */
+#define MCFGR_DMA_RESET                0x10000000
+#define MCFGR_LONG_PTR         0x00010000 /* Use >32-bit desc addressing */
+
+/* AXI read cache control */
+#define MCFGR_ARCACHE_SHIFT    12
+#define MCFGR_ARCACHE_MASK     (0xf << MCFGR_ARCACHE_SHIFT)
+
+/* AXI write cache control */
+#define MCFGR_AWCACHE_SHIFT    8
+#define MCFGR_AWCACHE_MASK     (0xf << MCFGR_AWCACHE_SHIFT)
+
+/* AXI pipeline depth */
+#define MCFGR_AXIPIPE_SHIFT    4
+#define MCFGR_AXIPIPE_MASK     (0xf << MCFGR_AXIPIPE_SHIFT)
+
+#define MCFGR_AXIPRI           0x00000008 /* Assert AXI priority sideband */
+#define MCFGR_BURST_64         0x00000001 /* Max burst size */
+
+/*
+ * caam_job_ring - direct job ring setup
+ * 1-4 possible per instantiation, base + 1000/2000/3000/4000
+ * Padded out to 0x1000
+ */
+struct caam_job_ring {
+       /* Input ring */
+       u64 inpring_base;       /* IRBAx -  Input desc ring baseaddr */
+       u32 rsvd1;
+       u32 inpring_size;       /* IRSx - Input ring size */
+       u32 rsvd2;
+       u32 inpring_avail;      /* IRSAx - Input ring room remaining */
+       u32 rsvd3;
+       u32 inpring_jobadd;     /* IRJAx - Input ring jobs added */
+
+       /* Output Ring */
+       u64 outring_base;       /* ORBAx - Output status ring base addr */
+       u32 rsvd4;
+       u32 outring_size;       /* ORSx - Output ring size */
+       u32 rsvd5;
+       u32 outring_rmvd;       /* ORJRx - Output ring jobs removed */
+       u32 rsvd6;
+       u32 outring_used;       /* ORSFx - Output ring slots full */
+
+       /* Status/Configuration */
+       u32 rsvd7;
+       u32 jroutstatus;        /* JRSTAx - JobR output status */
+       u32 rsvd8;
+       u32 jrintstatus;        /* JRINTx - JobR interrupt status */
+       u32 rconfig_hi; /* JRxCFG - Ring configuration */
+       u32 rconfig_lo;
+
+       /* Indices. CAAM maintains as "heads" of each queue */
+       u32 rsvd9;
+       u32 inp_rdidx;  /* IRRIx - Input ring read index */
+       u32 rsvd10;
+       u32 out_wtidx;  /* ORWIx - Output ring write index */
+
+       /* Command/control */
+       u32 rsvd11;
+       u32 jrcommand;  /* JRCRx - JobR command */
+
+       u32 rsvd12[932];
+
+       /* Performance Monitor                                  f00-fff */
+       struct caam_perfmon perfmon;
+};
+
+#define JR_RINGSIZE_MASK       0x03ff
+/*
+ * jrstatus - Job Ring Output Status
+ * All values in lo word
+ * Also note, same values written out as status through QI
+ * in the command/status field of a frame descriptor
+ */
+#define JRSTA_SSRC_SHIFT            28
+#define JRSTA_SSRC_MASK             0xf0000000
+
+#define JRSTA_SSRC_NONE             0x00000000
+#define JRSTA_SSRC_CCB_ERROR        0x20000000
+#define JRSTA_SSRC_JUMP_HALT_USER   0x30000000
+#define JRSTA_SSRC_DECO             0x40000000
+#define JRSTA_SSRC_JRERROR          0x60000000
+#define JRSTA_SSRC_JUMP_HALT_CC     0x70000000
+
+#define JRSTA_DECOERR_JUMP          0x08000000
+#define JRSTA_DECOERR_INDEX_SHIFT   8
+#define JRSTA_DECOERR_INDEX_MASK    0xff00
+#define JRSTA_DECOERR_ERROR_MASK    0x00ff
+
+#define JRSTA_DECOERR_NONE          0x00
+#define JRSTA_DECOERR_LINKLEN       0x01
+#define JRSTA_DECOERR_LINKPTR       0x02
+#define JRSTA_DECOERR_JRCTRL        0x03
+#define JRSTA_DECOERR_DESCCMD       0x04
+#define JRSTA_DECOERR_ORDER         0x05
+#define JRSTA_DECOERR_KEYCMD        0x06
+#define JRSTA_DECOERR_LOADCMD       0x07
+#define JRSTA_DECOERR_STORECMD      0x08
+#define JRSTA_DECOERR_OPCMD         0x09
+#define JRSTA_DECOERR_FIFOLDCMD     0x0a
+#define JRSTA_DECOERR_FIFOSTCMD     0x0b
+#define JRSTA_DECOERR_MOVECMD       0x0c
+#define JRSTA_DECOERR_JUMPCMD       0x0d
+#define JRSTA_DECOERR_MATHCMD       0x0e
+#define JRSTA_DECOERR_SHASHCMD      0x0f
+#define JRSTA_DECOERR_SEQCMD        0x10
+#define JRSTA_DECOERR_DECOINTERNAL  0x11
+#define JRSTA_DECOERR_SHDESCHDR     0x12
+#define JRSTA_DECOERR_HDRLEN        0x13
+#define JRSTA_DECOERR_BURSTER       0x14
+#define JRSTA_DECOERR_DESCSIGNATURE 0x15
+#define JRSTA_DECOERR_DMA           0x16
+#define JRSTA_DECOERR_BURSTFIFO     0x17
+#define JRSTA_DECOERR_JRRESET       0x1a
+#define JRSTA_DECOERR_JOBFAIL       0x1b
+#define JRSTA_DECOERR_DNRERR        0x80
+#define JRSTA_DECOERR_UNDEFPCL      0x81
+#define JRSTA_DECOERR_PDBERR        0x82
+#define JRSTA_DECOERR_ANRPLY_LATE   0x83
+#define JRSTA_DECOERR_ANRPLY_REPLAY 0x84
+#define JRSTA_DECOERR_SEQOVF        0x85
+#define JRSTA_DECOERR_INVSIGN       0x86
+#define JRSTA_DECOERR_DSASIGN       0x87
+
+#define JRSTA_CCBERR_JUMP           0x08000000
+#define JRSTA_CCBERR_INDEX_MASK     0xff00
+#define JRSTA_CCBERR_INDEX_SHIFT    8
+#define JRSTA_CCBERR_CHAID_MASK     0x00f0
+#define JRSTA_CCBERR_CHAID_SHIFT    4
+#define JRSTA_CCBERR_ERRID_MASK     0x000f
+
+#define JRSTA_CCBERR_CHAID_AES      (0x01 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_DES      (0x02 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_ARC4     (0x03 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_MD       (0x04 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_RNG      (0x05 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_SNOW     (0x06 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_KASUMI   (0x07 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_PK       (0x08 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_CRC      (0x09 << JRSTA_CCBERR_CHAID_SHIFT)
+
+#define JRSTA_CCBERR_ERRID_NONE     0x00
+#define JRSTA_CCBERR_ERRID_MODE     0x01
+#define JRSTA_CCBERR_ERRID_DATASIZ  0x02
+#define JRSTA_CCBERR_ERRID_KEYSIZ   0x03
+#define JRSTA_CCBERR_ERRID_PKAMEMSZ 0x04
+#define JRSTA_CCBERR_ERRID_PKBMEMSZ 0x05
+#define JRSTA_CCBERR_ERRID_SEQUENCE 0x06
+#define JRSTA_CCBERR_ERRID_PKDIVZRO 0x07
+#define JRSTA_CCBERR_ERRID_PKMODEVN 0x08
+#define JRSTA_CCBERR_ERRID_KEYPARIT 0x09
+#define JRSTA_CCBERR_ERRID_ICVCHK   0x0a
+#define JRSTA_CCBERR_ERRID_HARDWARE 0x0b
+#define JRSTA_CCBERR_ERRID_CCMAAD   0x0c
+#define JRSTA_CCBERR_ERRID_INVCHA   0x0f
+
+#define JRINT_ERR_INDEX_MASK        0x3fff0000
+#define JRINT_ERR_INDEX_SHIFT       16
+#define JRINT_ERR_TYPE_MASK         0xf00
+#define JRINT_ERR_TYPE_SHIFT        8
+#define JRINT_ERR_HALT_MASK         0xc
+#define JRINT_ERR_HALT_SHIFT        2
+#define JRINT_ERR_HALT_INPROGRESS   0x4
+#define JRINT_ERR_HALT_COMPLETE     0x8
+#define JRINT_JR_ERROR              0x02
+#define JRINT_JR_INT                0x01
+
+#define JRINT_ERR_TYPE_WRITE        1
+#define JRINT_ERR_TYPE_BAD_INPADDR  3
+#define JRINT_ERR_TYPE_BAD_OUTADDR  4
+#define JRINT_ERR_TYPE_INV_INPWRT   5
+#define JRINT_ERR_TYPE_INV_OUTWRT   6
+#define JRINT_ERR_TYPE_RESET        7
+#define JRINT_ERR_TYPE_REMOVE_OFL   8
+#define JRINT_ERR_TYPE_ADD_OFL      9
+
+#define JRCFG_SOE              0x04
+#define JRCFG_ICEN             0x02
+#define JRCFG_IMSK             0x01
+#define JRCFG_ICDCT_SHIFT      8
+#define JRCFG_ICTT_SHIFT       16
+
+#define JRCR_RESET                  0x01
+
+/*
+ * caam_assurance - Assurance Controller View
+ * base + 0x6000 padded out to 0x1000
+ */
+
+struct rtic_element {
+       u64 address;
+       u32 rsvd;
+       u32 length;
+};
+
+struct rtic_block {
+       struct rtic_element element[2];
+};
+
+struct rtic_memhash {
+       u32 memhash_be[32];
+       u32 memhash_le[32];
+};
+
+struct caam_assurance {
+    /* Status/Command/Watchdog */
+       u32 rsvd1;
+       u32 status;             /* RSTA - Status */
+       u32 rsvd2;
+       u32 cmd;                /* RCMD - Command */
+       u32 rsvd3;
+       u32 ctrl;               /* RCTL - Control */
+       u32 rsvd4;
+       u32 throttle;   /* RTHR - Throttle */
+       u32 rsvd5[2];
+       u64 watchdog;   /* RWDOG - Watchdog Timer */
+       u32 rsvd6;
+       u32 rend;               /* REND - Endian corrections */
+       u32 rsvd7[50];
+
+       /* Block access/configuration @ 100/110/120/130 */
+       struct rtic_block memblk[4];    /* Memory Blocks A-D */
+       u32 rsvd8[32];
+
+       /* Block hashes @ 200/300/400/500 */
+       struct rtic_memhash hash[4];    /* Block hash values A-D */
+       u32 rsvd_3[640];
+};
+
+/*
+ * caam_queue_if - QI configuration and control
+ * starts base + 0x7000, padded out to 0x1000 long
+ */
+
+struct caam_queue_if {
+       u32 qi_control_hi;      /* QICTL  - QI Control */
+       u32 qi_control_lo;
+       u32 rsvd1;
+       u32 qi_status;  /* QISTA  - QI Status */
+       u32 qi_deq_cfg_hi;      /* QIDQC  - QI Dequeue Configuration */
+       u32 qi_deq_cfg_lo;
+       u32 qi_enq_cfg_hi;      /* QISEQC - QI Enqueue Command     */
+       u32 qi_enq_cfg_lo;
+       u32 rsvd2[1016];
+};
+
+/* QI control bits - low word */
+#define QICTL_DQEN      0x01              /* Enable frame pop          */
+#define QICTL_STOP      0x02              /* Stop dequeue/enqueue      */
+#define QICTL_SOE       0x04              /* Stop on error             */
+
+/* QI control bits - high word */
+#define QICTL_MBSI     0x01
+#define QICTL_MHWSI    0x02
+#define QICTL_MWSI     0x04
+#define QICTL_MDWSI    0x08
+#define QICTL_CBSI     0x10            /* CtrlDataByteSwapInput     */
+#define QICTL_CHWSI    0x20            /* CtrlDataHalfSwapInput     */
+#define QICTL_CWSI     0x40            /* CtrlDataWordSwapInput     */
+#define QICTL_CDWSI    0x80            /* CtrlDataDWordSwapInput    */
+#define QICTL_MBSO     0x0100
+#define QICTL_MHWSO    0x0200
+#define QICTL_MWSO     0x0400
+#define QICTL_MDWSO    0x0800
+#define QICTL_CBSO     0x1000          /* CtrlDataByteSwapOutput    */
+#define QICTL_CHWSO    0x2000          /* CtrlDataHalfSwapOutput    */
+#define QICTL_CWSO     0x4000          /* CtrlDataWordSwapOutput    */
+#define QICTL_CDWSO     0x8000         /* CtrlDataDWordSwapOutput   */
+#define QICTL_DMBS     0x010000
+#define QICTL_EPO      0x020000
+
+/* QI status bits */
+#define QISTA_PHRDERR   0x01              /* PreHeader Read Error      */
+#define QISTA_CFRDERR   0x02              /* Compound Frame Read Error */
+#define QISTA_OFWRERR   0x04              /* Output Frame Read Error   */
+#define QISTA_BPDERR    0x08              /* Buffer Pool Depleted      */
+#define QISTA_BTSERR    0x10              /* Buffer Undersize          */
+#define QISTA_CFWRERR   0x20              /* Compound Frame Write Err  */
+#define QISTA_STOPD     0x80000000        /* QI Stopped (see QICTL)    */
+
+/* deco_sg_table - DECO view of scatter/gather table */
+struct deco_sg_table {
+       u64 addr;               /* Segment Address */
+       u32 elen;               /* E, F bits + 30-bit length */
+       u32 bpid_offset;        /* Buffer Pool ID + 16-bit length */
+};
+
+/*
+ * caam_deco - descriptor controller - CHA cluster block
+ *
+ * Only accessible when direct DECO access is turned on
+ * (done in DECORR, via MID programmed in DECOxMID
+ *
+ * 5 typical, base + 0x8000/9000/a000/b000
+ * Padded out to 0x1000 long
+ */
+struct caam_deco {
+       u32 rsvd1;
+       u32 cls1_mode;  /* CxC1MR -  Class 1 Mode */
+       u32 rsvd2;
+       u32 cls1_keysize;       /* CxC1KSR - Class 1 Key Size */
+       u32 cls1_datasize_hi;   /* CxC1DSR - Class 1 Data Size */
+       u32 cls1_datasize_lo;
+       u32 rsvd3;
+       u32 cls1_icvsize;       /* CxC1ICVSR - Class 1 ICV size */
+       u32 rsvd4[5];
+       u32 cha_ctrl;   /* CCTLR - CHA control */
+       u32 rsvd5;
+       u32 irq_crtl;   /* CxCIRQ - CCB interrupt done/error/clear */
+       u32 rsvd6;
+       u32 clr_written;        /* CxCWR - Clear-Written */
+       u32 ccb_status_hi;      /* CxCSTA - CCB Status/Error */
+       u32 ccb_status_lo;
+       u32 rsvd7[3];
+       u32 aad_size;   /* CxAADSZR - Current AAD Size */
+       u32 rsvd8;
+       u32 cls1_iv_size;       /* CxC1IVSZR - Current Class 1 IV Size */
+       u32 rsvd9[7];
+       u32 pkha_a_size;        /* PKASZRx - Size of PKHA A */
+       u32 rsvd10;
+       u32 pkha_b_size;        /* PKBSZRx - Size of PKHA B */
+       u32 rsvd11;
+       u32 pkha_n_size;        /* PKNSZRx - Size of PKHA N */
+       u32 rsvd12;
+       u32 pkha_e_size;        /* PKESZRx - Size of PKHA E */
+       u32 rsvd13[24];
+       u32 cls1_ctx[16];       /* CxC1CTXR - Class 1 Context @100 */
+       u32 rsvd14[48];
+       u32 cls1_key[8];        /* CxC1KEYR - Class 1 Key @200 */
+       u32 rsvd15[121];
+       u32 cls2_mode;  /* CxC2MR - Class 2 Mode */
+       u32 rsvd16;
+       u32 cls2_keysize;       /* CxX2KSR - Class 2 Key Size */
+       u32 cls2_datasize_hi;   /* CxC2DSR - Class 2 Data Size */
+       u32 cls2_datasize_lo;
+       u32 rsvd17;
+       u32 cls2_icvsize;       /* CxC2ICVSZR - Class 2 ICV Size */
+       u32 rsvd18[56];
+       u32 cls2_ctx[18];       /* CxC2CTXR - Class 2 Context @500 */
+       u32 rsvd19[46];
+       u32 cls2_key[32];       /* CxC2KEYR - Class2 Key @600 */
+       u32 rsvd20[84];
+       u32 inp_infofifo_hi;    /* CxIFIFO - Input Info FIFO @7d0 */
+       u32 inp_infofifo_lo;
+       u32 rsvd21[2];
+       u64 inp_datafifo;       /* CxDFIFO - Input Data FIFO */
+       u32 rsvd22[2];
+       u64 out_datafifo;       /* CxOFIFO - Output Data FIFO */
+       u32 rsvd23[2];
+       u32 jr_ctl_hi;  /* CxJRR - JobR Control Register      @800 */
+       u32 jr_ctl_lo;
+       u64 jr_descaddr;        /* CxDADR - JobR Descriptor Address */
+       u32 op_status_hi;       /* DxOPSTA - DECO Operation Status */
+       u32 op_status_lo;
+       u32 rsvd24[2];
+       u32 liodn;              /* DxLSR - DECO LIODN Status - non-seq */
+       u32 td_liodn;   /* DxLSR - DECO LIODN Status - trustdesc */
+       u32 rsvd26[6];
+       u64 math[4];            /* DxMTH - Math register */
+       u32 rsvd27[8];
+       struct deco_sg_table gthr_tbl[4];       /* DxGTR - Gather Tables */
+       u32 rsvd28[16];
+       struct deco_sg_table sctr_tbl[4];       /* DxSTR - Scatter Tables */
+       u32 rsvd29[48];
+       u32 descbuf[64];        /* DxDESB - Descriptor buffer */
+       u32 rsvd30[320];
+};
+
+/*
+ * Current top-level view of memory map is:
+ *
+ * 0x0000 - 0x0fff - CAAM Top-Level Control
+ * 0x1000 - 0x1fff - Job Ring 0
+ * 0x2000 - 0x2fff - Job Ring 1
+ * 0x3000 - 0x3fff - Job Ring 2
+ * 0x4000 - 0x4fff - Job Ring 3
+ * 0x5000 - 0x5fff - (unused)
+ * 0x6000 - 0x6fff - Assurance Controller
+ * 0x7000 - 0x7fff - Queue Interface
+ * 0x8000 - 0x8fff - DECO-CCB 0
+ * 0x9000 - 0x9fff - DECO-CCB 1
+ * 0xa000 - 0xafff - DECO-CCB 2
+ * 0xb000 - 0xbfff - DECO-CCB 3
+ * 0xc000 - 0xcfff - DECO-CCB 4
+ *
+ * caam_full describes the full register view of CAAM if useful,
+ * although many configurations may choose to implement parts of
+ * the register map separately, in differing privilege regions
+ */
+struct caam_full {
+       struct caam_ctrl __iomem ctrl;
+       struct caam_job_ring jr[4];
+       u64 rsvd[512];
+       struct caam_assurance assure;
+       struct caam_queue_if qi;
+       struct caam_deco *deco;
+};
+
+#endif /* REGS_H */
index c99305a..3cf303e 100644 (file)
@@ -133,7 +133,6 @@ struct mv_req_hash_ctx {
        int extra_bytes;        /* unprocessed bytes in buffer */
        enum hash_op op;
        int count_add;
-       struct scatterlist dummysg;
 };
 
 static void compute_aes_dec_key(struct mv_ctx *ctx)
@@ -187,9 +186,9 @@ static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
 {
        int ret;
        void *sbuf;
-       int copied = 0;
+       int copy_len;
 
-       while (1) {
+       while (len) {
                if (!p->sg_src_left) {
                        ret = sg_miter_next(&p->src_sg_it);
                        BUG_ON(!ret);
@@ -199,19 +198,14 @@ static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
 
                sbuf = p->src_sg_it.addr + p->src_start;
 
-               if (p->sg_src_left <= len - copied) {
-                       memcpy(dbuf + copied, sbuf, p->sg_src_left);
-                       copied += p->sg_src_left;
-                       p->sg_src_left = 0;
-                       if (copied >= len)
-                               break;
-               } else {
-                       int copy_len = len - copied;
-                       memcpy(dbuf + copied, sbuf, copy_len);
-                       p->src_start += copy_len;
-                       p->sg_src_left -= copy_len;
-                       break;
-               }
+               copy_len = min(p->sg_src_left, len);
+               memcpy(dbuf, sbuf, copy_len);
+
+               p->src_start += copy_len;
+               p->sg_src_left -= copy_len;
+
+               len -= copy_len;
+               dbuf += copy_len;
        }
 }
 
@@ -275,7 +269,6 @@ static void mv_process_current_q(int first_block)
        memcpy(cpg->sram + SRAM_CONFIG, &op,
                        sizeof(struct sec_accel_config));
 
-       writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
        /* GO */
        writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
 
@@ -302,6 +295,7 @@ static void mv_crypto_algo_completion(void)
 static void mv_process_hash_current(int first_block)
 {
        struct ahash_request *req = ahash_request_cast(cpg->cur_req);
+       const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
        struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
        struct req_progress *p = &cpg->p;
        struct sec_accel_config op = { 0 };
@@ -314,6 +308,8 @@ static void mv_process_hash_current(int first_block)
                break;
        case COP_HMAC_SHA1:
                op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1;
+               memcpy(cpg->sram + SRAM_HMAC_IV_IN,
+                               tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
                break;
        }
 
@@ -345,11 +341,16 @@ static void mv_process_hash_current(int first_block)
                        op.config |= CFG_LAST_FRAG;
                else
                        op.config |= CFG_MID_FRAG;
+
+               writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
+               writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
+               writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
+               writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
+               writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
        }
 
        memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
 
-       writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
        /* GO */
        writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
 
@@ -409,12 +410,6 @@ static void mv_hash_algo_completion(void)
                copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes);
        sg_miter_stop(&cpg->p.src_sg_it);
 
-       ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
-       ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
-       ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
-       ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
-       ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
-
        if (likely(ctx->last_chunk)) {
                if (likely(ctx->count <= MAX_HW_HASH_SIZE)) {
                        memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF,
@@ -422,6 +417,12 @@ static void mv_hash_algo_completion(void)
                                                       (req)));
                } else
                        mv_hash_final_fallback(req);
+       } else {
+               ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
+               ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
+               ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
+               ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
+               ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
        }
 }
 
@@ -480,7 +481,7 @@ static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
        int i = 0;
        size_t cur_len;
 
-       while (1) {
+       while (sl) {
                cur_len = sl[i].length;
                ++i;
                if (total_bytes > cur_len)
@@ -517,29 +518,12 @@ static void mv_start_new_hash_req(struct ahash_request *req)
 {
        struct req_progress *p = &cpg->p;
        struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
-       const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
        int num_sgs, hw_bytes, old_extra_bytes, rc;
        cpg->cur_req = &req->base;
        memset(p, 0, sizeof(struct req_progress));
        hw_bytes = req->nbytes + ctx->extra_bytes;
        old_extra_bytes = ctx->extra_bytes;
 
-       if (unlikely(ctx->extra_bytes)) {
-               memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer,
-                      ctx->extra_bytes);
-               p->crypt_len = ctx->extra_bytes;
-       }
-
-       memcpy(cpg->sram + SRAM_HMAC_IV_IN, tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
-
-       if (unlikely(!ctx->first_hash)) {
-               writel(ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
-               writel(ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
-               writel(ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
-               writel(ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
-               writel(ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
-       }
-
        ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE;
        if (ctx->extra_bytes != 0
            && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE))
@@ -555,6 +539,12 @@ static void mv_start_new_hash_req(struct ahash_request *req)
                p->complete = mv_hash_algo_completion;
                p->process = mv_process_hash_current;
 
+               if (unlikely(old_extra_bytes)) {
+                       memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer,
+                              old_extra_bytes);
+                       p->crypt_len = old_extra_bytes;
+               }
+
                mv_process_hash_current(1);
        } else {
                copy_src_to_buf(p, ctx->buffer + old_extra_bytes,
@@ -603,9 +593,7 @@ static int queue_manag(void *data)
                        if (async_req->tfm->__crt_alg->cra_type !=
                            &crypto_ahash_type) {
                                struct ablkcipher_request *req =
-                                   container_of(async_req,
-                                                struct ablkcipher_request,
-                                                base);
+                                   ablkcipher_request_cast(async_req);
                                mv_start_new_crypt_req(req);
                        } else {
                                struct ahash_request *req =
@@ -722,19 +710,13 @@ static int mv_hash_update(struct ahash_request *req)
 static int mv_hash_final(struct ahash_request *req)
 {
        struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
-       /* dummy buffer of 4 bytes */
-       sg_init_one(&ctx->dummysg, ctx->buffer, 4);
-       /* I think I'm allowed to do that... */
-       ahash_request_set_crypt(req, &ctx->dummysg, req->result, 0);
+
        mv_update_hash_req_ctx(ctx, 1, 0);
        return mv_handle_req(&req->base);
 }
 
 static int mv_hash_finup(struct ahash_request *req)
 {
-       if (!req->nbytes)
-               return mv_hash_final(req);
-
        mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes);
        return mv_handle_req(&req->base);
 }
@@ -1065,14 +1047,21 @@ static int mv_probe(struct platform_device *pdev)
 
        writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
        writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
+       writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
 
        ret = crypto_register_alg(&mv_aes_alg_ecb);
-       if (ret)
+       if (ret) {
+               printk(KERN_WARNING MV_CESA
+                      "Could not register aes-ecb driver\n");
                goto err_irq;
+       }
 
        ret = crypto_register_alg(&mv_aes_alg_cbc);
-       if (ret)
+       if (ret) {
+               printk(KERN_WARNING MV_CESA
+                      "Could not register aes-cbc driver\n");
                goto err_unreg_ecb;
+       }
 
        ret = crypto_register_ahash(&mv_sha1_alg);
        if (ret == 0)
index 465cde3..ba8f1ea 100644 (file)
@@ -78,7 +78,6 @@
 #define FLAGS_SHA1             0x0010
 #define FLAGS_DMA_ACTIVE       0x0020
 #define FLAGS_OUTPUT_READY     0x0040
-#define FLAGS_CLEAN            0x0080
 #define FLAGS_INIT             0x0100
 #define FLAGS_CPU              0x0200
 #define FLAGS_HMAC             0x0400
@@ -511,26 +510,6 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
        return 0;
 }
 
-static void omap_sham_cleanup(struct ahash_request *req)
-{
-       struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
-       struct omap_sham_dev *dd = ctx->dd;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dd->lock, flags);
-       if (ctx->flags & FLAGS_CLEAN) {
-               spin_unlock_irqrestore(&dd->lock, flags);
-               return;
-       }
-       ctx->flags |= FLAGS_CLEAN;
-       spin_unlock_irqrestore(&dd->lock, flags);
-
-       if (ctx->digcnt)
-               omap_sham_copy_ready_hash(req);
-
-       dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
-}
-
 static int omap_sham_init(struct ahash_request *req)
 {
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -618,9 +597,8 @@ static int omap_sham_final_req(struct omap_sham_dev *dd)
        return err;
 }
 
-static int omap_sham_finish_req_hmac(struct ahash_request *req)
+static int omap_sham_finish_hmac(struct ahash_request *req)
 {
-       struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
        struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
        struct omap_sham_hmac_ctx *bctx = tctx->base;
        int bs = crypto_shash_blocksize(bctx->shash);
@@ -635,7 +613,24 @@ static int omap_sham_finish_req_hmac(struct ahash_request *req)
 
        return crypto_shash_init(&desc.shash) ?:
               crypto_shash_update(&desc.shash, bctx->opad, bs) ?:
-              crypto_shash_finup(&desc.shash, ctx->digest, ds, ctx->digest);
+              crypto_shash_finup(&desc.shash, req->result, ds, req->result);
+}
+
+static int omap_sham_finish(struct ahash_request *req)
+{
+       struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+       struct omap_sham_dev *dd = ctx->dd;
+       int err = 0;
+
+       if (ctx->digcnt) {
+               omap_sham_copy_ready_hash(req);
+               if (ctx->flags & FLAGS_HMAC)
+                       err = omap_sham_finish_hmac(req);
+       }
+
+       dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
+
+       return err;
 }
 
 static void omap_sham_finish_req(struct ahash_request *req, int err)
@@ -645,15 +640,12 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
 
        if (!err) {
                omap_sham_copy_hash(ctx->dd->req, 1);
-               if (ctx->flags & FLAGS_HMAC)
-                       err = omap_sham_finish_req_hmac(req);
+               if (ctx->flags & FLAGS_FINAL)
+                       err = omap_sham_finish(req);
        } else {
                ctx->flags |= FLAGS_ERROR;
        }
 
-       if ((ctx->flags & FLAGS_FINAL) || err)
-               omap_sham_cleanup(req);
-
        clk_disable(dd->iclk);
        dd->flags &= ~FLAGS_BUSY;
 
@@ -809,22 +801,21 @@ static int omap_sham_final_shash(struct ahash_request *req)
 static int omap_sham_final(struct ahash_request *req)
 {
        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
-       int err = 0;
 
        ctx->flags |= FLAGS_FINUP;
 
-       if (!(ctx->flags & FLAGS_ERROR)) {
-               /* OMAP HW accel works only with buffers >= 9 */
-               /* HMAC is always >= 9 because of ipad */
-               if ((ctx->digcnt + ctx->bufcnt) < 9)
-                       err = omap_sham_final_shash(req);
-               else if (ctx->bufcnt)
-                       return omap_sham_enqueue(req, OP_FINAL);
-       }
+       if (ctx->flags & FLAGS_ERROR)
+               return 0; /* uncompleted hash is not needed */
 
-       omap_sham_cleanup(req);
+       /* OMAP HW accel works only with buffers >= 9 */
+       /* HMAC is always >= 9 because ipad == block size */
+       if ((ctx->digcnt + ctx->bufcnt) < 9)
+               return omap_sham_final_shash(req);
+       else if (ctx->bufcnt)
+               return omap_sham_enqueue(req, OP_FINAL);
 
-       return err;
+       /* copy ready hash (+ finalize hmac) */
+       return omap_sham_finish(req);
 }
 
 static int omap_sham_finup(struct ahash_request *req)
@@ -835,7 +826,7 @@ static int omap_sham_finup(struct ahash_request *req)
        ctx->flags |= FLAGS_FINUP;
 
        err1 = omap_sham_update(req);
-       if (err1 == -EINPROGRESS)
+       if (err1 == -EINPROGRESS || err1 == -EBUSY)
                return err1;
        /*
         * final() has to be always called to cleanup resources
@@ -890,8 +881,6 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
        struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
        const char *alg_name = crypto_tfm_alg_name(tfm);
 
-       pr_info("enter\n");
-
        /* Allocate a fallback and abort if it failed. */
        tctx->fallback = crypto_alloc_shash(alg_name, 0,
                                            CRYPTO_ALG_NEED_FALLBACK);
@@ -1297,7 +1286,8 @@ static int __init omap_sham_mod_init(void)
        pr_info("loading %s driver\n", "omap-sham");
 
        if (!cpu_class_is_omap2() ||
-               omap_type() != OMAP2_DEVICE_TYPE_SEC) {
+               (omap_type() != OMAP2_DEVICE_TYPE_SEC &&
+                       omap_type() != OMAP2_DEVICE_TYPE_EMU)) {
                pr_err("Unsupported cpu\n");
                return -ENODEV;
        }
index adf075b..06bdb4b 100644 (file)
@@ -288,9 +288,250 @@ static struct shash_alg sha256_alg = {
        }
 };
 
+/* Add two shash_alg instance for hardware-implemented *
+* multiple-parts hash supported by VIA Nano Processor.*/
+static int padlock_sha1_init_nano(struct shash_desc *desc)
+{
+       struct sha1_state *sctx = shash_desc_ctx(desc);
+
+       *sctx = (struct sha1_state){
+               .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
+       };
+
+       return 0;
+}
+
+static int padlock_sha1_update_nano(struct shash_desc *desc,
+                       const u8 *data, unsigned int len)
+{
+       struct sha1_state *sctx = shash_desc_ctx(desc);
+       unsigned int partial, done;
+       const u8 *src;
+       /*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/
+       u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
+               ((aligned(STACK_ALIGN)));
+       u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
+       int ts_state;
+
+       partial = sctx->count & 0x3f;
+       sctx->count += len;
+       done = 0;
+       src = data;
+       memcpy(dst, (u8 *)(sctx->state), SHA1_DIGEST_SIZE);
+
+       if ((partial + len) >= SHA1_BLOCK_SIZE) {
+
+               /* Append the bytes in state's buffer to a block to handle */
+               if (partial) {
+                       done = -partial;
+                       memcpy(sctx->buffer + partial, data,
+                               done + SHA1_BLOCK_SIZE);
+                       src = sctx->buffer;
+                       ts_state = irq_ts_save();
+                       asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
+                       : "+S"(src), "+D"(dst) \
+                       : "a"((long)-1), "c"((unsigned long)1));
+                       irq_ts_restore(ts_state);
+                       done += SHA1_BLOCK_SIZE;
+                       src = data + done;
+               }
+
+               /* Process the left bytes from the input data */
+               if (len - done >= SHA1_BLOCK_SIZE) {
+                       ts_state = irq_ts_save();
+                       asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
+                       : "+S"(src), "+D"(dst)
+                       : "a"((long)-1),
+                       "c"((unsigned long)((len - done) / SHA1_BLOCK_SIZE)));
+                       irq_ts_restore(ts_state);
+                       done += ((len - done) - (len - done) % SHA1_BLOCK_SIZE);
+                       src = data + done;
+               }
+               partial = 0;
+       }
+       memcpy((u8 *)(sctx->state), dst, SHA1_DIGEST_SIZE);
+       memcpy(sctx->buffer + partial, src, len - done);
+
+       return 0;
+}
+
+static int padlock_sha1_final_nano(struct shash_desc *desc, u8 *out)
+{
+       struct sha1_state *state = (struct sha1_state *)shash_desc_ctx(desc);
+       unsigned int partial, padlen;
+       __be64 bits;
+       static const u8 padding[64] = { 0x80, };
+
+       bits = cpu_to_be64(state->count << 3);
+
+       /* Pad out to 56 mod 64 */
+       partial = state->count & 0x3f;
+       padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial);
+       padlock_sha1_update_nano(desc, padding, padlen);
+
+       /* Append length field bytes */
+       padlock_sha1_update_nano(desc, (const u8 *)&bits, sizeof(bits));
+
+       /* Swap to output */
+       padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 5);
+
+       return 0;
+}
+
+static int padlock_sha256_init_nano(struct shash_desc *desc)
+{
+       struct sha256_state *sctx = shash_desc_ctx(desc);
+
+       *sctx = (struct sha256_state){
+               .state = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, \
+                               SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7},
+       };
+
+       return 0;
+}
+
+static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *data,
+                         unsigned int len)
+{
+       struct sha256_state *sctx = shash_desc_ctx(desc);
+       unsigned int partial, done;
+       const u8 *src;
+       /*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/
+       u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
+               ((aligned(STACK_ALIGN)));
+       u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
+       int ts_state;
+
+       partial = sctx->count & 0x3f;
+       sctx->count += len;
+       done = 0;
+       src = data;
+       memcpy(dst, (u8 *)(sctx->state), SHA256_DIGEST_SIZE);
+
+       if ((partial + len) >= SHA256_BLOCK_SIZE) {
+
+               /* Append the bytes in state's buffer to a block to handle */
+               if (partial) {
+                       done = -partial;
+                       memcpy(sctx->buf + partial, data,
+                               done + SHA256_BLOCK_SIZE);
+                       src = sctx->buf;
+                       ts_state = irq_ts_save();
+                       asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
+                       : "+S"(src), "+D"(dst)
+                       : "a"((long)-1), "c"((unsigned long)1));
+                       irq_ts_restore(ts_state);
+                       done += SHA256_BLOCK_SIZE;
+                       src = data + done;
+               }
+
+               /* Process the left bytes from input data*/
+               if (len - done >= SHA256_BLOCK_SIZE) {
+                       ts_state = irq_ts_save();
+                       asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
+                       : "+S"(src), "+D"(dst)
+                       : "a"((long)-1),
+                       "c"((unsigned long)((len - done) / 64)));
+                       irq_ts_restore(ts_state);
+                       done += ((len - done) - (len - done) % 64);
+                       src = data + done;
+               }
+               partial = 0;
+       }
+       memcpy((u8 *)(sctx->state), dst, SHA256_DIGEST_SIZE);
+       memcpy(sctx->buf + partial, src, len - done);
+
+       return 0;
+}
+
+static int padlock_sha256_final_nano(struct shash_desc *desc, u8 *out)
+{
+       struct sha256_state *state =
+               (struct sha256_state *)shash_desc_ctx(desc);
+       unsigned int partial, padlen;
+       __be64 bits;
+       static const u8 padding[64] = { 0x80, };
+
+       bits = cpu_to_be64(state->count << 3);
+
+       /* Pad out to 56 mod 64 */
+       partial = state->count & 0x3f;
+       padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial);
+       padlock_sha256_update_nano(desc, padding, padlen);
+
+       /* Append length field bytes */
+       padlock_sha256_update_nano(desc, (const u8 *)&bits, sizeof(bits));
+
+       /* Swap to output */
+       padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 8);
+
+       return 0;
+}
+
+static int padlock_sha_export_nano(struct shash_desc *desc,
+                               void *out)
+{
+       int statesize = crypto_shash_statesize(desc->tfm);
+       void *sctx = shash_desc_ctx(desc);
+
+       memcpy(out, sctx, statesize);
+       return 0;
+}
+
+static int padlock_sha_import_nano(struct shash_desc *desc,
+                               const void *in)
+{
+       int statesize = crypto_shash_statesize(desc->tfm);
+       void *sctx = shash_desc_ctx(desc);
+
+       memcpy(sctx, in, statesize);
+       return 0;
+}
+
+static struct shash_alg sha1_alg_nano = {
+       .digestsize     =       SHA1_DIGEST_SIZE,
+       .init           =       padlock_sha1_init_nano,
+       .update         =       padlock_sha1_update_nano,
+       .final          =       padlock_sha1_final_nano,
+       .export         =       padlock_sha_export_nano,
+       .import         =       padlock_sha_import_nano,
+       .descsize       =       sizeof(struct sha1_state),
+       .statesize      =       sizeof(struct sha1_state),
+       .base           =       {
+               .cra_name               =       "sha1",
+               .cra_driver_name        =       "sha1-padlock-nano",
+               .cra_priority           =       PADLOCK_CRA_PRIORITY,
+               .cra_flags              =       CRYPTO_ALG_TYPE_SHASH,
+               .cra_blocksize          =       SHA1_BLOCK_SIZE,
+               .cra_module             =       THIS_MODULE,
+       }
+};
+
+static struct shash_alg sha256_alg_nano = {
+       .digestsize     =       SHA256_DIGEST_SIZE,
+       .init           =       padlock_sha256_init_nano,
+       .update         =       padlock_sha256_update_nano,
+       .final          =       padlock_sha256_final_nano,
+       .export         =       padlock_sha_export_nano,
+       .import         =       padlock_sha_import_nano,
+       .descsize       =       sizeof(struct sha256_state),
+       .statesize      =       sizeof(struct sha256_state),
+       .base           =       {
+               .cra_name               =       "sha256",
+               .cra_driver_name        =       "sha256-padlock-nano",
+               .cra_priority           =       PADLOCK_CRA_PRIORITY,
+               .cra_flags              =       CRYPTO_ALG_TYPE_SHASH,
+               .cra_blocksize          =       SHA256_BLOCK_SIZE,
+               .cra_module             =       THIS_MODULE,
+       }
+};
+
 static int __init padlock_init(void)
 {
        int rc = -ENODEV;
+       struct cpuinfo_x86 *c = &cpu_data(0);
+       struct shash_alg *sha1;
+       struct shash_alg *sha256;
 
        if (!cpu_has_phe) {
                printk(KERN_NOTICE PFX "VIA PadLock Hash Engine not detected.\n");
@@ -302,11 +543,21 @@ static int __init padlock_init(void)
                return -ENODEV;
        }
 
-       rc = crypto_register_shash(&sha1_alg);
+       /* Register the newly added algorithm module if on *
+       * VIA Nano processor, or else just do as before */
+       if (c->x86_model < 0x0f) {
+               sha1 = &sha1_alg;
+               sha256 = &sha256_alg;
+       } else {
+               sha1 = &sha1_alg_nano;
+               sha256 = &sha256_alg_nano;
+       }
+
+       rc = crypto_register_shash(sha1);
        if (rc)
                goto out;
 
-       rc = crypto_register_shash(&sha256_alg);
+       rc = crypto_register_shash(sha256);
        if (rc)
                goto out_unreg1;
 
@@ -315,7 +566,8 @@ static int __init padlock_init(void)
        return 0;
 
 out_unreg1:
-       crypto_unregister_shash(&sha1_alg);
+       crypto_unregister_shash(sha1);
+
 out:
        printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n");
        return rc;
@@ -323,8 +575,15 @@ out:
 
 static void __exit padlock_fini(void)
 {
-       crypto_unregister_shash(&sha1_alg);
-       crypto_unregister_shash(&sha256_alg);
+       struct cpuinfo_x86 *c = &cpu_data(0);
+
+       if (c->x86_model >= 0x0f) {
+               crypto_unregister_shash(&sha1_alg_nano);
+               crypto_unregister_shash(&sha256_alg_nano);
+       } else {
+               crypto_unregister_shash(&sha1_alg);
+               crypto_unregister_shash(&sha256_alg);
+       }
 }
 
 module_init(padlock_init);
index b092d0a..230b5b8 100644 (file)
@@ -176,6 +176,8 @@ struct spacc_aead_ctx {
        u8                              salt[AES_BLOCK_SIZE];
 };
 
+static int spacc_ablk_submit(struct spacc_req *req);
+
 static inline struct spacc_alg *to_spacc_alg(struct crypto_alg *alg)
 {
        return alg ? container_of(alg, struct spacc_alg, alg) : NULL;
@@ -666,6 +668,24 @@ static int spacc_aead_submit(struct spacc_req *req)
        return -EINPROGRESS;
 }
 
+static int spacc_req_submit(struct spacc_req *req);
+
+static void spacc_push(struct spacc_engine *engine)
+{
+       struct spacc_req *req;
+
+       while (!list_empty(&engine->pending) &&
+              engine->in_flight + 1 <= engine->fifo_sz) {
+
+               ++engine->in_flight;
+               req = list_first_entry(&engine->pending, struct spacc_req,
+                                      list);
+               list_move_tail(&req->list, &engine->in_progress);
+
+               req->result = spacc_req_submit(req);
+       }
+}
+
 /*
  * Setup an AEAD request for processing. This will configure the engine, load
  * the context and then start the packet processing.
@@ -698,7 +718,8 @@ static int spacc_aead_setup(struct aead_request *req, u8 *giv,
 
        err = -EINPROGRESS;
        spin_lock_irqsave(&engine->hw_lock, flags);
-       if (unlikely(spacc_fifo_cmd_full(engine))) {
+       if (unlikely(spacc_fifo_cmd_full(engine)) ||
+           engine->in_flight + 1 > engine->fifo_sz) {
                if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
                        err = -EBUSY;
                        spin_unlock_irqrestore(&engine->hw_lock, flags);
@@ -706,9 +727,8 @@ static int spacc_aead_setup(struct aead_request *req, u8 *giv,
                }
                list_add_tail(&dev_req->list, &engine->pending);
        } else {
-               ++engine->in_flight;
-               list_add_tail(&dev_req->list, &engine->in_progress);
-               spacc_aead_submit(dev_req);
+               list_add_tail(&dev_req->list, &engine->pending);
+               spacc_push(engine);
        }
        spin_unlock_irqrestore(&engine->hw_lock, flags);
 
@@ -1041,7 +1061,8 @@ static int spacc_ablk_setup(struct ablkcipher_request *req, unsigned alg_type,
         * we either stick it on the end of a pending list if we can backlog,
         * or bailout with an error if not.
         */
-       if (unlikely(spacc_fifo_cmd_full(engine))) {
+       if (unlikely(spacc_fifo_cmd_full(engine)) ||
+           engine->in_flight + 1 > engine->fifo_sz) {
                if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
                        err = -EBUSY;
                        spin_unlock_irqrestore(&engine->hw_lock, flags);
@@ -1049,9 +1070,8 @@ static int spacc_ablk_setup(struct ablkcipher_request *req, unsigned alg_type,
                }
                list_add_tail(&dev_req->list, &engine->pending);
        } else {
-               ++engine->in_flight;
-               list_add_tail(&dev_req->list, &engine->in_progress);
-               spacc_ablk_submit(dev_req);
+               list_add_tail(&dev_req->list, &engine->pending);
+               spacc_push(engine);
        }
        spin_unlock_irqrestore(&engine->hw_lock, flags);
 
@@ -1139,6 +1159,7 @@ static void spacc_process_done(struct spacc_engine *engine)
                req = list_first_entry(&engine->in_progress, struct spacc_req,
                                       list);
                list_move_tail(&req->list, &engine->completed);
+               --engine->in_flight;
 
                /* POP the status register. */
                writel(~0, engine->regs + SPA_STAT_POP_REG_OFFSET);
@@ -1208,36 +1229,21 @@ static void spacc_spacc_complete(unsigned long data)
        struct spacc_engine *engine = (struct spacc_engine *)data;
        struct spacc_req *req, *tmp;
        unsigned long flags;
-       int num_removed = 0;
        LIST_HEAD(completed);
 
        spin_lock_irqsave(&engine->hw_lock, flags);
+
        list_splice_init(&engine->completed, &completed);
+       spacc_push(engine);
+       if (engine->in_flight)
+               mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
+
        spin_unlock_irqrestore(&engine->hw_lock, flags);
 
        list_for_each_entry_safe(req, tmp, &completed, list) {
-               ++num_removed;
                req->complete(req);
+               list_del(&req->list);
        }
-
-       /* Try and fill the engine back up again. */
-       spin_lock_irqsave(&engine->hw_lock, flags);
-
-       engine->in_flight -= num_removed;
-
-       list_for_each_entry_safe(req, tmp, &engine->pending, list) {
-               if (spacc_fifo_cmd_full(engine))
-                       break;
-
-               list_move_tail(&req->list, &engine->in_progress);
-               ++engine->in_flight;
-               req->result = spacc_req_submit(req);
-       }
-
-       if (engine->in_flight)
-               mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
-
-       spin_unlock_irqrestore(&engine->hw_lock, flags);
 }
 
 #ifdef CONFIG_PM
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
new file mode 100644 (file)
index 0000000..8115417
--- /dev/null
@@ -0,0 +1,701 @@
+/*
+ * Cryptographic API.
+ *
+ * Support for Samsung S5PV210 HW acceleration.
+ *
+ * Copyright (C) 2011 NetUP Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/crypto.h>
+#include <linux/interrupt.h>
+
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/ctr.h>
+
+#include <plat/cpu.h>
+#include <plat/dma.h>
+
+#define _SBF(s, v)                      ((v) << (s))
+#define _BIT(b)                         _SBF(b, 1)
+
+/* Feed control registers */
+#define SSS_REG_FCINTSTAT               0x0000
+#define SSS_FCINTSTAT_BRDMAINT          _BIT(3)
+#define SSS_FCINTSTAT_BTDMAINT          _BIT(2)
+#define SSS_FCINTSTAT_HRDMAINT          _BIT(1)
+#define SSS_FCINTSTAT_PKDMAINT          _BIT(0)
+
+#define SSS_REG_FCINTENSET              0x0004
+#define SSS_FCINTENSET_BRDMAINTENSET    _BIT(3)
+#define SSS_FCINTENSET_BTDMAINTENSET    _BIT(2)
+#define SSS_FCINTENSET_HRDMAINTENSET    _BIT(1)
+#define SSS_FCINTENSET_PKDMAINTENSET    _BIT(0)
+
+#define SSS_REG_FCINTENCLR              0x0008
+#define SSS_FCINTENCLR_BRDMAINTENCLR    _BIT(3)
+#define SSS_FCINTENCLR_BTDMAINTENCLR    _BIT(2)
+#define SSS_FCINTENCLR_HRDMAINTENCLR    _BIT(1)
+#define SSS_FCINTENCLR_PKDMAINTENCLR    _BIT(0)
+
+#define SSS_REG_FCINTPEND               0x000C
+#define SSS_FCINTPEND_BRDMAINTP         _BIT(3)
+#define SSS_FCINTPEND_BTDMAINTP         _BIT(2)
+#define SSS_FCINTPEND_HRDMAINTP         _BIT(1)
+#define SSS_FCINTPEND_PKDMAINTP         _BIT(0)
+
+#define SSS_REG_FCFIFOSTAT              0x0010
+#define SSS_FCFIFOSTAT_BRFIFOFUL        _BIT(7)
+#define SSS_FCFIFOSTAT_BRFIFOEMP        _BIT(6)
+#define SSS_FCFIFOSTAT_BTFIFOFUL        _BIT(5)
+#define SSS_FCFIFOSTAT_BTFIFOEMP        _BIT(4)
+#define SSS_FCFIFOSTAT_HRFIFOFUL        _BIT(3)
+#define SSS_FCFIFOSTAT_HRFIFOEMP        _BIT(2)
+#define SSS_FCFIFOSTAT_PKFIFOFUL        _BIT(1)
+#define SSS_FCFIFOSTAT_PKFIFOEMP        _BIT(0)
+
+#define SSS_REG_FCFIFOCTRL              0x0014
+#define SSS_FCFIFOCTRL_DESSEL           _BIT(2)
+#define SSS_HASHIN_INDEPENDENT          _SBF(0, 0x00)
+#define SSS_HASHIN_CIPHER_INPUT         _SBF(0, 0x01)
+#define SSS_HASHIN_CIPHER_OUTPUT        _SBF(0, 0x02)
+
+#define SSS_REG_FCBRDMAS                0x0020
+#define SSS_REG_FCBRDMAL                0x0024
+#define SSS_REG_FCBRDMAC                0x0028
+#define SSS_FCBRDMAC_BYTESWAP           _BIT(1)
+#define SSS_FCBRDMAC_FLUSH              _BIT(0)
+
+#define SSS_REG_FCBTDMAS                0x0030
+#define SSS_REG_FCBTDMAL                0x0034
+#define SSS_REG_FCBTDMAC                0x0038
+#define SSS_FCBTDMAC_BYTESWAP           _BIT(1)
+#define SSS_FCBTDMAC_FLUSH              _BIT(0)
+
+#define SSS_REG_FCHRDMAS                0x0040
+#define SSS_REG_FCHRDMAL                0x0044
+#define SSS_REG_FCHRDMAC                0x0048
+#define SSS_FCHRDMAC_BYTESWAP           _BIT(1)
+#define SSS_FCHRDMAC_FLUSH              _BIT(0)
+
+#define SSS_REG_FCPKDMAS                0x0050
+#define SSS_REG_FCPKDMAL                0x0054
+#define SSS_REG_FCPKDMAC                0x0058
+#define SSS_FCPKDMAC_BYTESWAP           _BIT(3)
+#define SSS_FCPKDMAC_DESCEND            _BIT(2)
+#define SSS_FCPKDMAC_TRANSMIT           _BIT(1)
+#define SSS_FCPKDMAC_FLUSH              _BIT(0)
+
+#define SSS_REG_FCPKDMAO                0x005C
+
+/* AES registers */
+#define SSS_REG_AES_CONTROL             0x4000
+#define SSS_AES_BYTESWAP_DI             _BIT(11)
+#define SSS_AES_BYTESWAP_DO             _BIT(10)
+#define SSS_AES_BYTESWAP_IV             _BIT(9)
+#define SSS_AES_BYTESWAP_CNT            _BIT(8)
+#define SSS_AES_BYTESWAP_KEY            _BIT(7)
+#define SSS_AES_KEY_CHANGE_MODE         _BIT(6)
+#define SSS_AES_KEY_SIZE_128            _SBF(4, 0x00)
+#define SSS_AES_KEY_SIZE_192            _SBF(4, 0x01)
+#define SSS_AES_KEY_SIZE_256            _SBF(4, 0x02)
+#define SSS_AES_FIFO_MODE               _BIT(3)
+#define SSS_AES_CHAIN_MODE_ECB          _SBF(1, 0x00)
+#define SSS_AES_CHAIN_MODE_CBC          _SBF(1, 0x01)
+#define SSS_AES_CHAIN_MODE_CTR          _SBF(1, 0x02)
+#define SSS_AES_MODE_DECRYPT            _BIT(0)
+
+#define SSS_REG_AES_STATUS              0x4004
+#define SSS_AES_BUSY                    _BIT(2)
+#define SSS_AES_INPUT_READY             _BIT(1)
+#define SSS_AES_OUTPUT_READY            _BIT(0)
+
+#define SSS_REG_AES_IN_DATA(s)          (0x4010 + (s << 2))
+#define SSS_REG_AES_OUT_DATA(s)         (0x4020 + (s << 2))
+#define SSS_REG_AES_IV_DATA(s)          (0x4030 + (s << 2))
+#define SSS_REG_AES_CNT_DATA(s)         (0x4040 + (s << 2))
+#define SSS_REG_AES_KEY_DATA(s)         (0x4080 + (s << 2))
+
+#define SSS_REG(dev, reg)               ((dev)->ioaddr + (SSS_REG_##reg))
+#define SSS_READ(dev, reg)              __raw_readl(SSS_REG(dev, reg))
+#define SSS_WRITE(dev, reg, val)        __raw_writel((val), SSS_REG(dev, reg))
+
+/* HW engine modes */
+#define FLAGS_AES_DECRYPT               _BIT(0)
+#define FLAGS_AES_MODE_MASK             _SBF(1, 0x03)
+#define FLAGS_AES_CBC                   _SBF(1, 0x01)
+#define FLAGS_AES_CTR                   _SBF(1, 0x02)
+
+#define AES_KEY_LEN         16
+#define CRYPTO_QUEUE_LEN    1
+
+struct s5p_aes_reqctx {
+       unsigned long mode;
+};
+
+struct s5p_aes_ctx {
+       struct s5p_aes_dev         *dev;
+
+       uint8_t                     aes_key[AES_MAX_KEY_SIZE];
+       uint8_t                     nonce[CTR_RFC3686_NONCE_SIZE];
+       int                         keylen;
+};
+
+struct s5p_aes_dev {
+       struct device              *dev;
+       struct clk                 *clk;
+       void __iomem               *ioaddr;
+       int                         irq_hash;
+       int                         irq_fc;
+
+       struct ablkcipher_request  *req;
+       struct s5p_aes_ctx         *ctx;
+       struct scatterlist         *sg_src;
+       struct scatterlist         *sg_dst;
+
+       struct tasklet_struct       tasklet;
+       struct crypto_queue         queue;
+       bool                        busy;
+       spinlock_t                  lock;
+};
+
+static struct s5p_aes_dev *s5p_dev;
+
+static void s5p_set_dma_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
+{
+       SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg));
+       SSS_WRITE(dev, FCBRDMAL, sg_dma_len(sg));
+}
+
+static void s5p_set_dma_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
+{
+       SSS_WRITE(dev, FCBTDMAS, sg_dma_address(sg));
+       SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg));
+}
+
+static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
+{
+       /* holding a lock outside */
+       dev->req->base.complete(&dev->req->base, err);
+       dev->busy = false;
+}
+
+static void s5p_unset_outdata(struct s5p_aes_dev *dev)
+{
+       dma_unmap_sg(dev->dev, dev->sg_dst, 1, DMA_FROM_DEVICE);
+}
+
+static void s5p_unset_indata(struct s5p_aes_dev *dev)
+{
+       dma_unmap_sg(dev->dev, dev->sg_src, 1, DMA_TO_DEVICE);
+}
+
+static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
+{
+       int err;
+
+       if (!IS_ALIGNED(sg_dma_len(sg), AES_BLOCK_SIZE)) {
+               err = -EINVAL;
+               goto exit;
+       }
+       if (!sg_dma_len(sg)) {
+               err = -EINVAL;
+               goto exit;
+       }
+
+       err = dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE);
+       if (!err) {
+               err = -ENOMEM;
+               goto exit;
+       }
+
+       dev->sg_dst = sg;
+       err = 0;
+
+ exit:
+       return err;
+}
+
+static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
+{
+       int err;
+
+       if (!IS_ALIGNED(sg_dma_len(sg), AES_BLOCK_SIZE)) {
+               err = -EINVAL;
+               goto exit;
+       }
+       if (!sg_dma_len(sg)) {
+               err = -EINVAL;
+               goto exit;
+       }
+
+       err = dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE);
+       if (!err) {
+               err = -ENOMEM;
+               goto exit;
+       }
+
+       dev->sg_src = sg;
+       err = 0;
+
+ exit:
+       return err;
+}
+
+static void s5p_aes_tx(struct s5p_aes_dev *dev)
+{
+       int err = 0;
+
+       s5p_unset_outdata(dev);
+
+       if (!sg_is_last(dev->sg_dst)) {
+               err = s5p_set_outdata(dev, sg_next(dev->sg_dst));
+               if (err) {
+                       s5p_aes_complete(dev, err);
+                       return;
+               }
+
+               s5p_set_dma_outdata(dev, dev->sg_dst);
+       } else
+               s5p_aes_complete(dev, err);
+}
+
+static void s5p_aes_rx(struct s5p_aes_dev *dev)
+{
+       int err;
+
+       s5p_unset_indata(dev);
+
+       if (!sg_is_last(dev->sg_src)) {
+               err = s5p_set_indata(dev, sg_next(dev->sg_src));
+               if (err) {
+                       s5p_aes_complete(dev, err);
+                       return;
+               }
+
+               s5p_set_dma_indata(dev, dev->sg_src);
+       }
+}
+
+static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
+{
+       struct platform_device *pdev = dev_id;
+       struct s5p_aes_dev     *dev  = platform_get_drvdata(pdev);
+       uint32_t                status;
+       unsigned long           flags;
+
+       spin_lock_irqsave(&dev->lock, flags);
+
+       if (irq == dev->irq_fc) {
+               status = SSS_READ(dev, FCINTSTAT);
+               if (status & SSS_FCINTSTAT_BRDMAINT)
+                       s5p_aes_rx(dev);
+               if (status & SSS_FCINTSTAT_BTDMAINT)
+                       s5p_aes_tx(dev);
+
+               SSS_WRITE(dev, FCINTPEND, status);
+       }
+
+       spin_unlock_irqrestore(&dev->lock, flags);
+
+       return IRQ_HANDLED;
+}
+
+static void s5p_set_aes(struct s5p_aes_dev *dev,
+                       uint8_t *key, uint8_t *iv, unsigned int keylen)
+{
+       void __iomem *keystart;
+
+       memcpy(dev->ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10);
+
+       if (keylen == AES_KEYSIZE_256)
+               keystart = dev->ioaddr + SSS_REG_AES_KEY_DATA(0);
+       else if (keylen == AES_KEYSIZE_192)
+               keystart = dev->ioaddr + SSS_REG_AES_KEY_DATA(2);
+       else
+               keystart = dev->ioaddr + SSS_REG_AES_KEY_DATA(4);
+
+       memcpy(keystart, key, keylen);
+}
+
+static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
+{
+       struct ablkcipher_request  *req = dev->req;
+
+       uint32_t                    aes_control;
+       int                         err;
+       unsigned long               flags;
+
+       aes_control = SSS_AES_KEY_CHANGE_MODE;
+       if (mode & FLAGS_AES_DECRYPT)
+               aes_control |= SSS_AES_MODE_DECRYPT;
+
+       if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC)
+               aes_control |= SSS_AES_CHAIN_MODE_CBC;
+       else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR)
+               aes_control |= SSS_AES_CHAIN_MODE_CTR;
+
+       if (dev->ctx->keylen == AES_KEYSIZE_192)
+               aes_control |= SSS_AES_KEY_SIZE_192;
+       else if (dev->ctx->keylen == AES_KEYSIZE_256)
+               aes_control |= SSS_AES_KEY_SIZE_256;
+
+       aes_control |= SSS_AES_FIFO_MODE;
+
+       /* as a variant it is possible to use byte swapping on DMA side */
+       aes_control |= SSS_AES_BYTESWAP_DI
+                   |  SSS_AES_BYTESWAP_DO
+                   |  SSS_AES_BYTESWAP_IV
+                   |  SSS_AES_BYTESWAP_KEY
+                   |  SSS_AES_BYTESWAP_CNT;
+
+       spin_lock_irqsave(&dev->lock, flags);
+
+       SSS_WRITE(dev, FCINTENCLR,
+                 SSS_FCINTENCLR_BTDMAINTENCLR | SSS_FCINTENCLR_BRDMAINTENCLR);
+       SSS_WRITE(dev, FCFIFOCTRL, 0x00);
+
+       err = s5p_set_indata(dev, req->src);
+       if (err)
+               goto indata_error;
+
+       err = s5p_set_outdata(dev, req->dst);
+       if (err)
+               goto outdata_error;
+
+       SSS_WRITE(dev, AES_CONTROL, aes_control);
+       s5p_set_aes(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen);
+
+       s5p_set_dma_indata(dev,  req->src);
+       s5p_set_dma_outdata(dev, req->dst);
+
+       SSS_WRITE(dev, FCINTENSET,
+                 SSS_FCINTENSET_BTDMAINTENSET | SSS_FCINTENSET_BRDMAINTENSET);
+
+       spin_unlock_irqrestore(&dev->lock, flags);
+
+       return;
+
+ outdata_error:
+       s5p_unset_indata(dev);
+
+ indata_error:
+       s5p_aes_complete(dev, err);
+       spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void s5p_tasklet_cb(unsigned long data)
+{
+       struct s5p_aes_dev *dev = (struct s5p_aes_dev *)data;
+       struct crypto_async_request *async_req, *backlog;
+       struct s5p_aes_reqctx *reqctx;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev->lock, flags);
+       backlog   = crypto_get_backlog(&dev->queue);
+       async_req = crypto_dequeue_request(&dev->queue);
+       spin_unlock_irqrestore(&dev->lock, flags);
+
+       if (!async_req)
+               return;
+
+       if (backlog)
+               backlog->complete(backlog, -EINPROGRESS);
+
+       dev->req = ablkcipher_request_cast(async_req);
+       dev->ctx = crypto_tfm_ctx(dev->req->base.tfm);
+       reqctx   = ablkcipher_request_ctx(dev->req);
+
+       s5p_aes_crypt_start(dev, reqctx->mode);
+}
+
+static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
+                             struct ablkcipher_request *req)
+{
+       unsigned long flags;
+       int err;
+
+       spin_lock_irqsave(&dev->lock, flags);
+       if (dev->busy) {
+               err = -EAGAIN;
+               spin_unlock_irqrestore(&dev->lock, flags);
+               goto exit;
+       }
+       dev->busy = true;
+
+       err = ablkcipher_enqueue_request(&dev->queue, req);
+       spin_unlock_irqrestore(&dev->lock, flags);
+
+       tasklet_schedule(&dev->tasklet);
+
+ exit:
+       return err;
+}
+
+static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
+{
+       struct crypto_ablkcipher   *tfm    = crypto_ablkcipher_reqtfm(req);
+       struct s5p_aes_ctx         *ctx    = crypto_ablkcipher_ctx(tfm);
+       struct s5p_aes_reqctx      *reqctx = ablkcipher_request_ctx(req);
+       struct s5p_aes_dev         *dev    = ctx->dev;
+
+       if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
+               pr_err("request size is not exact amount of AES blocks\n");
+               return -EINVAL;
+       }
+
+       reqctx->mode = mode;
+
+       return s5p_aes_handle_req(dev, req);
+}
+
+static int s5p_aes_setkey(struct crypto_ablkcipher *cipher,
+                         const uint8_t *key, unsigned int keylen)
+{
+       struct crypto_tfm  *tfm = crypto_ablkcipher_tfm(cipher);
+       struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       if (keylen != AES_KEYSIZE_128 &&
+           keylen != AES_KEYSIZE_192 &&
+           keylen != AES_KEYSIZE_256)
+               return -EINVAL;
+
+       memcpy(ctx->aes_key, key, keylen);
+       ctx->keylen = keylen;
+
+       return 0;
+}
+
+static int s5p_aes_ecb_encrypt(struct ablkcipher_request *req)
+{
+       return s5p_aes_crypt(req, 0);
+}
+
+static int s5p_aes_ecb_decrypt(struct ablkcipher_request *req)
+{
+       return s5p_aes_crypt(req, FLAGS_AES_DECRYPT);
+}
+
+static int s5p_aes_cbc_encrypt(struct ablkcipher_request *req)
+{
+       return s5p_aes_crypt(req, FLAGS_AES_CBC);
+}
+
+static int s5p_aes_cbc_decrypt(struct ablkcipher_request *req)
+{
+       return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC);
+}
+
+static int s5p_aes_cra_init(struct crypto_tfm *tfm)
+{
+       struct s5p_aes_ctx  *ctx = crypto_tfm_ctx(tfm);
+
+       ctx->dev = s5p_dev;
+       tfm->crt_ablkcipher.reqsize = sizeof(struct s5p_aes_reqctx);
+
+       return 0;
+}
+
+static struct crypto_alg algs[] = {
+       {
+               .cra_name               = "ecb(aes)",
+               .cra_driver_name        = "ecb-aes-s5p",
+               .cra_priority           = 100,
+               .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER |
+                                         CRYPTO_ALG_ASYNC,
+               .cra_blocksize          = AES_BLOCK_SIZE,
+               .cra_ctxsize            = sizeof(struct s5p_aes_ctx),
+               .cra_alignmask          = 0x0f,
+               .cra_type               = &crypto_ablkcipher_type,
+               .cra_module             = THIS_MODULE,
+               .cra_init               = s5p_aes_cra_init,
+               .cra_u.ablkcipher = {
+                       .min_keysize    = AES_MIN_KEY_SIZE,
+                       .max_keysize    = AES_MAX_KEY_SIZE,
+                       .setkey         = s5p_aes_setkey,
+                       .encrypt        = s5p_aes_ecb_encrypt,
+                       .decrypt        = s5p_aes_ecb_decrypt,
+               }
+       },
+       {
+               .cra_name               = "cbc(aes)",
+               .cra_driver_name        = "cbc-aes-s5p",
+               .cra_priority           = 100,
+               .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER |
+                                         CRYPTO_ALG_ASYNC,
+               .cra_blocksize          = AES_BLOCK_SIZE,
+               .cra_ctxsize            = sizeof(struct s5p_aes_ctx),
+               .cra_alignmask          = 0x0f,
+               .cra_type               = &crypto_ablkcipher_type,
+               .cra_module             = THIS_MODULE,
+               .cra_init               = s5p_aes_cra_init,
+               .cra_u.ablkcipher = {
+                       .min_keysize    = AES_MIN_KEY_SIZE,
+                       .max_keysize    = AES_MAX_KEY_SIZE,
+                       .ivsize         = AES_BLOCK_SIZE,
+                       .setkey         = s5p_aes_setkey,
+                       .encrypt        = s5p_aes_cbc_encrypt,
+                       .decrypt        = s5p_aes_cbc_decrypt,
+               }
+       },
+};
+
+static int s5p_aes_probe(struct platform_device *pdev)
+{
+       int                 i, j, err = -ENODEV;
+       struct s5p_aes_dev *pdata;
+       struct device      *dev = &pdev->dev;
+       struct resource    *res;
+
+       if (s5p_dev)
+               return -EEXIST;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res)
+               return -ENODEV;
+
+       pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+       if (!pdata)
+               return -ENOMEM;
+
+       if (!devm_request_mem_region(dev, res->start,
+                                    resource_size(res), pdev->name))
+               return -EBUSY;
+
+       pdata->clk = clk_get(dev, "secss");
+       if (IS_ERR(pdata->clk)) {
+               dev_err(dev, "failed to find secss clock source\n");
+               return -ENOENT;
+       }
+
+       clk_enable(pdata->clk);
+
+       spin_lock_init(&pdata->lock);
+       pdata->ioaddr = devm_ioremap(dev, res->start,
+                                    resource_size(res));
+
+       pdata->irq_hash = platform_get_irq_byname(pdev, "hash");
+       if (pdata->irq_hash < 0) {
+               err = pdata->irq_hash;
+               dev_warn(dev, "hash interrupt is not available.\n");
+               goto err_irq;
+       }
+       err = devm_request_irq(dev, pdata->irq_hash, s5p_aes_interrupt,
+                              IRQF_SHARED, pdev->name, pdev);
+       if (err < 0) {
+               dev_warn(dev, "hash interrupt is not available.\n");
+               goto err_irq;
+       }
+
+       pdata->irq_fc = platform_get_irq_byname(pdev, "feed control");
+       if (pdata->irq_fc < 0) {
+               err = pdata->irq_fc;
+               dev_warn(dev, "feed control interrupt is not available.\n");
+               goto err_irq;
+       }
+       err = devm_request_irq(dev, pdata->irq_fc, s5p_aes_interrupt,
+                              IRQF_SHARED, pdev->name, pdev);
+       if (err < 0) {
+               dev_warn(dev, "feed control interrupt is not available.\n");
+               goto err_irq;
+       }
+
+       pdata->dev = dev;
+       platform_set_drvdata(pdev, pdata);
+       s5p_dev = pdata;
+
+       tasklet_init(&pdata->tasklet, s5p_tasklet_cb, (unsigned long)pdata);
+       crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN);
+
+       for (i = 0; i < ARRAY_SIZE(algs); i++) {
+               INIT_LIST_HEAD(&algs[i].cra_list);
+               err = crypto_register_alg(&algs[i]);
+               if (err)
+                       goto err_algs;
+       }
+
+       pr_info("s5p-sss driver registered\n");
+
+       return 0;
+
+ err_algs:
+       dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name, err);
+
+       for (j = 0; j < i; j++)
+               crypto_unregister_alg(&algs[j]);
+
+       tasklet_kill(&pdata->tasklet);
+
+ err_irq:
+       clk_disable(pdata->clk);
+       clk_put(pdata->clk);
+
+       s5p_dev = NULL;
+       platform_set_drvdata(pdev, NULL);
+
+       return err;
+}
+
+static int s5p_aes_remove(struct platform_device *pdev)
+{
+       struct s5p_aes_dev *pdata = platform_get_drvdata(pdev);
+       int i;
+
+       if (!pdata)
+               return -ENODEV;
+
+       for (i = 0; i < ARRAY_SIZE(algs); i++)
+               crypto_unregister_alg(&algs[i]);
+
+       tasklet_kill(&pdata->tasklet);
+
+       clk_disable(pdata->clk);
+       clk_put(pdata->clk);
+
+       s5p_dev = NULL;
+       platform_set_drvdata(pdev, NULL);
+
+       return 0;
+}
+
+static struct platform_driver s5p_aes_crypto = {
+       .probe  = s5p_aes_probe,
+       .remove = s5p_aes_remove,
+       .driver = {
+               .owner  = THIS_MODULE,
+               .name   = "s5p-secss",
+       },
+};
+
+static int __init s5p_aes_mod_init(void)
+{
+       return  platform_driver_register(&s5p_aes_crypto);
+}
+
+static void __exit s5p_aes_mod_exit(void)
+{
+       platform_driver_unregister(&s5p_aes_crypto);
+}
+
+module_init(s5p_aes_mod_init);
+module_exit(s5p_aes_mod_exit);
+
+MODULE_DESCRIPTION("S5PV210 AES hw acceleration support.");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Vladimir Zapolskiy <vzapolskiy@gmail.com>");
index e3c4f11..6ff858c 100644 (file)
@@ -1378,12 +1378,12 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
 {
        int res;
 
-       BUG_ON(nd->depth >= MAX_NESTED_LINKS);
        if (unlikely(current->link_count >= MAX_NESTED_LINKS)) {
                path_put_conditional(path, nd);
                path_put(&nd->path);
                return -ELOOP;
        }
+       BUG_ON(nd->depth >= MAX_NESTED_LINKS);
 
        nd->depth++;
        current->link_count++;
index ce4f624..a29d5cc 100644 (file)
@@ -1335,6 +1335,11 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
 
        list_add_tail (&f->list, frags);
 found:
+       if (rec >= f->num) {
+               ldm_error("REC value (%d) exceeds NUM value (%d)", rec, f->num);
+               return false;
+       }
+
        if (f->map & (1 << rec)) {
                ldm_error ("Duplicate VBLK, part %d.", rec);
                f->map &= 0x7F;                 /* Mark the group as broken */
index dfc7069..ba4ad28 100644 (file)
@@ -916,11 +916,12 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
                        if (size > ENTRIES_PER_PAGE)
                                size = ENTRIES_PER_PAGE;
                        offset = shmem_find_swp(entry, ptr, ptr+size);
+                       shmem_swp_unmap(ptr);
                        if (offset >= 0) {
                                shmem_dir_unmap(dir);
+                               ptr = shmem_swp_map(subdir);
                                goto found;
                        }
-                       shmem_swp_unmap(ptr);
                }
        }
 lost1: