summaryrefslogtreecommitdiff
path: root/drivers/crypto/caam
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/caam')
-rw-r--r--drivers/crypto/caam/Kconfig121
-rw-r--r--drivers/crypto/caam/Makefile15
-rw-r--r--drivers/crypto/caam/caamalg.c4312
-rw-r--r--drivers/crypto/caam/caamhash.c1963
-rw-r--r--drivers/crypto/caam/caamrng.c362
-rw-r--r--drivers/crypto/caam/compat.h41
-rw-r--r--drivers/crypto/caam/ctrl.c729
-rw-r--r--drivers/crypto/caam/ctrl.h13
-rw-r--r--drivers/crypto/caam/desc.h1621
-rw-r--r--drivers/crypto/caam/desc_constr.h390
-rw-r--r--drivers/crypto/caam/error.c253
-rw-r--r--drivers/crypto/caam/error.h11
-rw-r--r--drivers/crypto/caam/intern.h113
-rw-r--r--drivers/crypto/caam/jr.c550
-rw-r--r--drivers/crypto/caam/jr.h18
-rw-r--r--drivers/crypto/caam/key_gen.c123
-rw-r--r--drivers/crypto/caam/key_gen.h17
-rw-r--r--drivers/crypto/caam/pdb.h402
-rw-r--r--drivers/crypto/caam/regs.h780
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h118
20 files changed, 11952 insertions, 0 deletions
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
new file mode 100644
index 000000000..e7555ff4c
--- /dev/null
+++ b/drivers/crypto/caam/Kconfig
@@ -0,0 +1,121 @@
+config CRYPTO_DEV_FSL_CAAM
+ tristate "Freescale CAAM-Multicore driver backend"
+ depends on FSL_SOC
+ help
+ Enables the driver module for Freescale's Cryptographic Accelerator
+ and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
+ This module creates job ring devices, and configures h/w
+ to operate as a DPAA component automatically, depending
+ on h/w feature availability.
+
+ To compile this driver as a module, choose M here: the module
+ will be called caam.
+
+config CRYPTO_DEV_FSL_CAAM_JR
+ tristate "Freescale CAAM Job Ring driver backend"
+ depends on CRYPTO_DEV_FSL_CAAM
+ default y
+ help
+ Enables the driver module for Job Rings which are part of
+ Freescale's Cryptographic Accelerator
+ and Assurance Module (CAAM). This module adds a job ring operation
+ interface.
+
+ To compile this driver as a module, choose M here: the module
+ will be called caam_jr.
+
+config CRYPTO_DEV_FSL_CAAM_RINGSIZE
+ int "Job Ring size"
+ depends on CRYPTO_DEV_FSL_CAAM_JR
+ range 2 9
+ default "9"
+ help
+ Select size of Job Rings as a power of 2, within the
+ range 2-9 (ring size 4-512).
+ Examples:
+ 2 => 4
+ 3 => 8
+ 4 => 16
+ 5 => 32
+ 6 => 64
+ 7 => 128
+ 8 => 256
+ 9 => 512
+
+config CRYPTO_DEV_FSL_CAAM_INTC
+ bool "Job Ring interrupt coalescing"
+ depends on CRYPTO_DEV_FSL_CAAM_JR
+ default n
+ help
+ Enable the Job Ring's interrupt coalescing feature.
+
+ Note: the driver already provides adequate
+ interrupt coalescing in software.
+
+config CRYPTO_DEV_FSL_CAAM_INTC_COUNT_THLD
+ int "Job Ring interrupt coalescing count threshold"
+ depends on CRYPTO_DEV_FSL_CAAM_INTC
+ range 1 255
+ default 255
+ help
+ Select number of descriptor completions to queue before
+ raising an interrupt, in the range 1-255. Note that a selection
+ of 1 functionally defeats the coalescing feature, and a selection
+ equal or greater than the job ring size will force timeouts.
+
+config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
+ int "Job Ring interrupt coalescing timer threshold"
+ depends on CRYPTO_DEV_FSL_CAAM_INTC
+ range 1 65535
+ default 2048
+ help
+ Select number of bus clocks/64 to timeout in the case that one or
+ more descriptor completions are queued without reaching the count
+ threshold. Range is 1-65535.
+
+config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
+ tristate "Register algorithm implementations with the Crypto API"
+ depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
+ default y
+ select CRYPTO_ALGAPI
+ select CRYPTO_AUTHENC
+ help
+ Selecting this will offload crypto for users of the
+ scatterlist crypto API (such as the linux native IPSec
+ stack) to the SEC4 via job ring.
+
+ To compile this as a module, choose M here: the module
+ will be called caamalg.
+
+config CRYPTO_DEV_FSL_CAAM_AHASH_API
+ tristate "Register hash algorithm implementations with Crypto API"
+ depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
+ default y
+ select CRYPTO_HASH
+ help
+ Selecting this will offload ahash for users of the
+ scatterlist crypto API to the SEC4 via job ring.
+
+ To compile this as a module, choose M here: the module
+ will be called caamhash.
+
+config CRYPTO_DEV_FSL_CAAM_RNG_API
+ tristate "Register caam device for hwrng API"
+ depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
+ default y
+ select CRYPTO_RNG
+ select HW_RANDOM
+ help
+ Selecting this will register the SEC4 hardware rng to
+ the hw_random API for suppying the kernel entropy pool.
+
+ To compile this as a module, choose M here: the module
+ will be called caamrng.
+
+config CRYPTO_DEV_FSL_CAAM_DEBUG
+ bool "Enable debug output in CAAM driver"
+ depends on CRYPTO_DEV_FSL_CAAM
+ default n
+ help
+ Selecting this will enable printing of various debug
+ information in the CAAM driver.
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
new file mode 100644
index 000000000..550758a33
--- /dev/null
+++ b/drivers/crypto/caam/Makefile
@@ -0,0 +1,15 @@
+#
+# Makefile for the CAAM backend and dependent components
+#
+ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG), y)
+ EXTRA_CFLAGS := -DDEBUG
+endif
+
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
+
+caam-objs := ctrl.o
+caam_jr-objs := jr.o key_gen.o error.o
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
new file mode 100644
index 000000000..29071a156
--- /dev/null
+++ b/drivers/crypto/caam/caamalg.c
@@ -0,0 +1,4312 @@
+/*
+ * caam - Freescale FSL CAAM support for crypto API
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ * Based on talitos crypto API driver.
+ *
+ * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
+ *
+ * --------------- ---------------
+ * | JobDesc #1 |-------------------->| ShareDesc |
+ * | *(packet 1) | | (PDB) |
+ * --------------- |------------->| (hashKey) |
+ * . | | (cipherKey) |
+ * . | |-------->| (operation) |
+ * --------------- | | ---------------
+ * | JobDesc #2 |------| |
+ * | *(packet 2) | |
+ * --------------- |
+ * . |
+ * . |
+ * --------------- |
+ * | JobDesc #3 |------------
+ * | *(packet 3) |
+ * ---------------
+ *
+ * The SharedDesc never changes for a connection unless rekeyed, but
+ * each packet will likely be in a different place. So all we need
+ * to know to process the packet is where the input is, where the
+ * output goes, and what context we want to process with. Context is
+ * in the SharedDesc, packet references in the JobDesc.
+ *
+ * So, a job desc looks like:
+ *
+ * ---------------------
+ * | Header |
+ * | ShareDesc Pointer |
+ * | SEQ_OUT_PTR |
+ * | (output buffer) |
+ * | (output length) |
+ * | SEQ_IN_PTR |
+ * | (input buffer) |
+ * | (input length) |
+ * ---------------------
+ */
+
+#include "compat.h"
+
+#include "regs.h"
+#include "intern.h"
+#include "desc_constr.h"
+#include "jr.h"
+#include "error.h"
+#include "sg_sw_sec4.h"
+#include "key_gen.h"
+
+/*
+ * crypto alg
+ */
+#define CAAM_CRA_PRIORITY 3000
+/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
+#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
+ CTR_RFC3686_NONCE_SIZE + \
+ SHA512_DIGEST_SIZE * 2)
+/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
+#define CAAM_MAX_IV_LENGTH 16
+
+/* length of descriptors text */
+#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
+#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
+#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
+#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
+
+/* Note: Nonce is counted in enckeylen */
+#define DESC_AEAD_CTR_RFC3686_LEN (6 * CAAM_CMD_SZ)
+
+#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)
+
+#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
+#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 23 * CAAM_CMD_SZ)
+#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 19 * CAAM_CMD_SZ)
+
+#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
+#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 15 * CAAM_CMD_SZ)
+#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 14 * CAAM_CMD_SZ)
+#define DESC_RFC4106_GIVENC_LEN (DESC_RFC4106_BASE + 21 * CAAM_CMD_SZ)
+
+#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
+#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 25 * CAAM_CMD_SZ)
+#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 27 * CAAM_CMD_SZ)
+#define DESC_RFC4543_GIVENC_LEN (DESC_RFC4543_BASE + 30 * CAAM_CMD_SZ)
+
+#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
+#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
+ 20 * CAAM_CMD_SZ)
+#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
+ 15 * CAAM_CMD_SZ)
+
+#define DESC_MAX_USED_BYTES (DESC_RFC4543_GIVENC_LEN + \
+ CAAM_MAX_KEY_SIZE)
+#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
+
+#ifdef DEBUG
+/* for print_hex_dumps with line references */
+#define debug(format, arg...) printk(format, arg)
+#else
+#define debug(format, arg...)
+#endif
+static struct list_head alg_list;
+
+/* Set DK bit in class 1 operation if shared */
+static inline void append_dec_op1(u32 *desc, u32 type)
+{
+ u32 *jump_cmd, *uncond_jump_cmd;
+
+ /* DK bit is valid only for AES */
+ if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
+ append_operation(desc, type | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT);
+ return;
+ }
+
+ jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
+ append_operation(desc, type | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT);
+ uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+ set_jump_tgt_here(desc, jump_cmd);
+ append_operation(desc, type | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_AAI_DK);
+ set_jump_tgt_here(desc, uncond_jump_cmd);
+}
+
+/*
+ * For aead functions, read payload and write payload,
+ * both of which are specified in req->src and req->dst
+ */
+static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
+{
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
+ KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
+}
+
+/*
+ * For aead encrypt and decrypt, read iv for both classes
+ */
+static inline void aead_append_ld_iv(u32 *desc, int ivsize, int ivoffset)
+{
+ append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ (ivoffset << LDST_OFFSET_SHIFT));
+ append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
+ (ivoffset << MOVE_OFFSET_SHIFT) | ivsize);
+}
+
+/*
+ * For ablkcipher encrypt and decrypt, read from req->src and
+ * write to req->dst
+ */
+static inline void ablkcipher_append_src_dst(u32 *desc)
+{
+ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
+ KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+}
+
+/*
+ * If all data, including src (with assoc and iv) or dst (with iv only) are
+ * contiguous
+ */
+#define GIV_SRC_CONTIG 1
+#define GIV_DST_CONTIG (1 << 1)
+
+/*
+ * per-session context
+ */
+struct caam_ctx {
+ struct device *jrdev;
+ u32 sh_desc_enc[DESC_MAX_USED_LEN];
+ u32 sh_desc_dec[DESC_MAX_USED_LEN];
+ u32 sh_desc_givenc[DESC_MAX_USED_LEN];
+ dma_addr_t sh_desc_enc_dma;
+ dma_addr_t sh_desc_dec_dma;
+ dma_addr_t sh_desc_givenc_dma;
+ u32 class1_alg_type;
+ u32 class2_alg_type;
+ u32 alg_op;
+ u8 key[CAAM_MAX_KEY_SIZE];
+ dma_addr_t key_dma;
+ unsigned int enckeylen;
+ unsigned int split_key_len;
+ unsigned int split_key_pad_len;
+ unsigned int authsize;
+};
+
+static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
+ int keys_fit_inline, bool is_rfc3686)
+{
+ u32 *nonce;
+ unsigned int enckeylen = ctx->enckeylen;
+
+ /*
+ * RFC3686 specific:
+ * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
+ * | enckeylen = encryption key size + nonce size
+ */
+ if (is_rfc3686)
+ enckeylen -= CTR_RFC3686_NONCE_SIZE;
+
+ if (keys_fit_inline) {
+ append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
+ ctx->split_key_len, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ append_key_as_imm(desc, (void *)ctx->key +
+ ctx->split_key_pad_len, enckeylen,
+ enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ } else {
+ append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
+ enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ }
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686) {
+ nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
+ enckeylen);
+ append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ append_move(desc,
+ MOVE_SRC_OUTFIFO |
+ MOVE_DEST_CLASS1CTX |
+ (16 << MOVE_OFFSET_SHIFT) |
+ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+ }
+}
+
+static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
+ int keys_fit_inline, bool is_rfc3686)
+{
+ u32 *key_jump_cmd;
+
+ /* Note: Context registers are saved. */
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+}
+
+static int aead_null_set_sh_desc(struct crypto_aead *aead)
+{
+ struct aead_tfm *tfm = &aead->base.crt_aead;
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ bool keys_fit_inline = false;
+ u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
+ u32 *desc;
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN +
+ ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ /* aead_encrypt shared descriptor */
+ desc = ctx->sh_desc_enc;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (keys_fit_inline)
+ append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
+ ctx->split_key_len, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ else
+ append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* cryptlen = seqoutlen - authsize */
+ append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+
+ /*
+ * NULL encryption; IV is zero
+ * assoclen = (assoclen + cryptlen) - cryptlen
+ */
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ KEY_VLF);
+
+ /* Prepare to read and write cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
+ MOVE_DEST_MATH3 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
+ MOVE_DEST_DESCBUF |
+ MOVE_WAITCOMP |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Class 2 operation */
+ append_operation(desc, ctx->class2_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* Read and write cryptlen bytes */
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
+ MOVE_AUX_LS);
+
+ /* Write ICV */
+ append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "aead null enc shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ keys_fit_inline = false;
+ if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
+ ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ desc = ctx->sh_desc_dec;
+
+ /* aead_decrypt shared descriptor */
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (keys_fit_inline)
+ append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
+ ctx->split_key_len, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ else
+ append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 2 operation */
+ append_operation(desc, ctx->class2_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* assoclen + cryptlen = seqinlen - ivsize - authsize */
+ append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
+ ctx->authsize + tfm->ivsize);
+ /* assoclen = (assoclen + cryptlen) - cryptlen */
+ append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+ append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ KEY_VLF);
+
+ /* Prepare to read and write cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
+ MOVE_DEST_MATH2 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
+ MOVE_DEST_DESCBUF |
+ MOVE_WAITCOMP |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Read and write cryptlen bytes */
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+ /*
+ * Insert a NOP here, since we need at least 4 instructions between
+ * code patching the descriptor buffer and the location being patched.
+ */
+ jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+ set_jump_tgt_here(desc, jump_cmd);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
+ MOVE_AUX_LS);
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Load ICV */
+ append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+
+ ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "aead null dec shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ return 0;
+}
+
+static int aead_set_sh_desc(struct crypto_aead *aead)
+{
+ struct aead_tfm *tfm = &aead->base.crt_aead;
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct crypto_tfm *ctfm = crypto_aead_tfm(aead);
+ const char *alg_name = crypto_tfm_alg_name(ctfm);
+ struct device *jrdev = ctx->jrdev;
+ bool keys_fit_inline;
+ u32 geniv, moveiv;
+ u32 ctx1_iv_off = 0;
+ u32 *desc;
+ const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+ OP_ALG_AAI_CTR_MOD128);
+ const bool is_rfc3686 = (ctr_mode &&
+ (strstr(alg_name, "rfc3686") != NULL));
+
+ if (!ctx->authsize)
+ return 0;
+
+ /* NULL encryption / decryption */
+ if (!ctx->enckeylen)
+ return aead_null_set_sh_desc(aead);
+
+ /*
+ * AES-CTR needs to load IV in CONTEXT1 reg
+ * at an offset of 128bits (16bytes)
+ * CONTEXT1[255:128] = IV
+ */
+ if (ctr_mode)
+ ctx1_iv_off = 16;
+
+ /*
+ * RFC3686 specific:
+ * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+ */
+ if (is_rfc3686)
+ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ keys_fit_inline = false;
+ if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
+ ctx->split_key_pad_len + ctx->enckeylen +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
+ CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ /* aead_encrypt shared descriptor */
+ desc = ctx->sh_desc_enc;
+
+ /* Note: Context registers are saved. */
+ init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
+
+ /* Class 2 operation */
+ append_operation(desc, ctx->class2_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* cryptlen = seqoutlen - authsize */
+ append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+
+ /* assoclen + cryptlen = seqinlen - ivsize */
+ append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
+
+ /* assoclen = (assoclen + cryptlen) - cryptlen */
+ append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ KEY_VLF);
+ aead_append_ld_iv(desc, tfm->ivsize, ctx1_iv_off);
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
+ LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ /* Class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* Read and write cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
+
+ /* Write ICV */
+ append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ keys_fit_inline = false;
+ if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
+ ctx->split_key_pad_len + ctx->enckeylen +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
+ CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ /* aead_decrypt shared descriptor */
+ desc = ctx->sh_desc_dec;
+
+ /* Note: Context registers are saved. */
+ init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
+
+ /* Class 2 operation */
+ append_operation(desc, ctx->class2_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* assoclen + cryptlen = seqinlen - ivsize - authsize */
+ append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
+ ctx->authsize + tfm->ivsize);
+ /* assoclen = (assoclen + cryptlen) - cryptlen */
+ append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+ append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ KEY_VLF);
+
+ aead_append_ld_iv(desc, tfm->ivsize, ctx1_iv_off);
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
+ LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ /* Choose operation */
+ if (ctr_mode)
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
+ else
+ append_dec_op1(desc, ctx->class1_alg_type);
+
+ /* Read and write cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
+
+ /* Load ICV */
+ append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+
+ ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ keys_fit_inline = false;
+ if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
+ ctx->split_key_pad_len + ctx->enckeylen +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
+ CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ /* aead_givencrypt shared descriptor */
+ desc = ctx->sh_desc_givenc;
+
+ /* Note: Context registers are saved. */
+ init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
+
+ /* Generate IV */
+ geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+ NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
+ NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
+ append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_WAITCOMP |
+ MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
+ (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
+ (tfm->ivsize << MOVE_LEN_SHIFT));
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Copy IV to class 1 context */
+ append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
+ (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
+ (tfm->ivsize << MOVE_LEN_SHIFT));
+
+ /* Return to encryption */
+ append_operation(desc, ctx->class2_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* ivsize + cryptlen = seqoutlen - authsize */
+ append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+
+ /* assoclen = seqinlen - (ivsize + cryptlen) */
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ KEY_VLF);
+
+ /* Copy iv from outfifo to class 2 fifo */
+ moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
+ NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
+ append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+ append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
+ LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ /* Class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* Will write ivsize + cryptlen */
+ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Not need to reload iv */
+ append_seq_fifo_load(desc, tfm->ivsize,
+ FIFOLD_CLASS_SKIP);
+
+ /* Will read cryptlen */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
+
+ /* Write ICV */
+ append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ return 0;
+}
+
+static int aead_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ aead_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static int gcm_set_sh_desc(struct crypto_aead *aead)
+{
+ struct aead_tfm *tfm = &aead->base.crt_aead;
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ bool keys_fit_inline = false;
+ u32 *key_jump_cmd, *zero_payload_jump_cmd,
+ *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
+ u32 *desc;
+
+ if (!ctx->enckeylen || !ctx->authsize)
+ return 0;
+
+ /*
+ * AES GCM encrypt shared descriptor
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (DESC_GCM_ENC_LEN + DESC_JOB_IO_LEN +
+ ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ desc = ctx->sh_desc_enc;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* skip key loading if they are loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD | JUMP_COND_SELF);
+ if (keys_fit_inline)
+ append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, ctx->key_dma, ctx->enckeylen,
+ CLASS_1 | KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* cryptlen = seqoutlen - authsize */
+ append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+
+ /* assoclen + cryptlen = seqinlen - ivsize */
+ append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
+
+ /* assoclen = (assoclen + cryptlen) - cryptlen */
+ append_math_sub(desc, REG1, REG2, REG3, CAAM_CMD_SZ);
+
+ /* if cryptlen is ZERO jump to zero-payload commands */
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+ /* read IV */
+ append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+
+ /* if assoclen is ZERO, skip reading the assoc data */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
+ zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ /* read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+ set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
+
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* write encrypted data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* read payload data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+
+ /* jump the zero-payload commands */
+ append_jump(desc, JUMP_TEST_ALL | 7);
+
+ /* zero-payload commands */
+ set_jump_tgt_here(desc, zero_payload_jump_cmd);
+
+ /* if assoclen is ZERO, jump to IV reading - is the only input data */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
+ zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+ /* read IV */
+ append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+
+ /* read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
+
+ /* jump to ICV writing */
+ append_jump(desc, JUMP_TEST_ALL | 2);
+
+ /* read IV - is the only input data */
+ set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
+ append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
+ FIFOLD_TYPE_LAST1);
+
+ /* write ICV */
+ append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ keys_fit_inline = false;
+ if (DESC_GCM_DEC_LEN + DESC_JOB_IO_LEN +
+ ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ desc = ctx->sh_desc_dec;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* skip key loading if they are loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL |
+ JUMP_TEST_ALL | JUMP_COND_SHRD |
+ JUMP_COND_SELF);
+ if (keys_fit_inline)
+ append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, ctx->key_dma, ctx->enckeylen,
+ CLASS_1 | KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* assoclen + cryptlen = seqinlen - ivsize - icvsize */
+ append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
+ ctx->authsize + tfm->ivsize);
+
+ /* assoclen = (assoclen + cryptlen) - cryptlen */
+ append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+ append_math_sub(desc, REG1, REG3, REG2, CAAM_CMD_SZ);
+
+ /* read IV */
+ append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+
+ /* jump to zero-payload command if cryptlen is zero */
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
+ zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
+ /* if asoclen is ZERO, skip reading assoc data */
+ zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+ /* read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+ set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
+
+ append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+
+ /* store encrypted data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* read payload data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+ /* jump the zero-payload commands */
+ append_jump(desc, JUMP_TEST_ALL | 4);
+
+ /* zero-payload command */
+ set_jump_tgt_here(desc, zero_payload_jump_cmd);
+
+ /* if assoclen is ZERO, jump to ICV reading */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
+ zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+ /* read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+ set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
+
+ /* read ICV */
+ append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+
+ ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ return 0;
+}
+
+static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ gcm_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static int rfc4106_set_sh_desc(struct crypto_aead *aead)
+{
+ struct aead_tfm *tfm = &aead->base.crt_aead;
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ bool keys_fit_inline = false;
+ u32 *key_jump_cmd, *move_cmd, *write_iv_cmd;
+ u32 *desc;
+ u32 geniv;
+
+ if (!ctx->enckeylen || !ctx->authsize)
+ return 0;
+
+ /*
+ * RFC4106 encrypt shared descriptor
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (DESC_RFC4106_ENC_LEN + DESC_JOB_IO_LEN +
+ ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ desc = ctx->sh_desc_enc;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (keys_fit_inline)
+ append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, ctx->key_dma, ctx->enckeylen,
+ CLASS_1 | KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* cryptlen = seqoutlen - authsize */
+ append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* assoclen + cryptlen = seqinlen - ivsize */
+ append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
+
+ /* assoclen = (assoclen + cryptlen) - cryptlen */
+ append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
+
+ /* Read Salt */
+ append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen),
+ 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV);
+ /* Read AES-GCM-ESP IV */
+ append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+
+ /* Read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+ /* Will read cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Write encrypted data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* Read payload data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+
+ /* Write ICV */
+ append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ keys_fit_inline = false;
+ if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
+ ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ desc = ctx->sh_desc_dec;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL |
+ JUMP_TEST_ALL | JUMP_COND_SHRD);
+ if (keys_fit_inline)
+ append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, ctx->key_dma, ctx->enckeylen,
+ CLASS_1 | KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* assoclen + cryptlen = seqinlen - ivsize - icvsize */
+ append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
+ ctx->authsize + tfm->ivsize);
+
+ /* assoclen = (assoclen + cryptlen) - cryptlen */
+ append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+ append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
+
+ /* Will write cryptlen bytes */
+ append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* Read Salt */
+ append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen),
+ 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV);
+ /* Read AES-GCM-ESP IV */
+ append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+
+ /* Read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+ /* Will read cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+
+ /* Store payload data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* Read encrypted data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+ /* Read ICV */
+ append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+
+ ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ keys_fit_inline = false;
+ if (DESC_RFC4106_GIVENC_LEN + DESC_JOB_IO_LEN +
+ ctx->split_key_pad_len + ctx->enckeylen <=
+ CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ /* rfc4106_givencrypt shared descriptor */
+ desc = ctx->sh_desc_givenc;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (keys_fit_inline)
+ append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, ctx->key_dma, ctx->enckeylen,
+ CLASS_1 | KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Generate IV */
+ geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+ NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
+ NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
+ append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ move_cmd = append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_DESCBUF |
+ (tfm->ivsize << MOVE_LEN_SHIFT));
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Copy generated IV to OFIFO */
+ write_iv_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_OUTFIFO |
+ (tfm->ivsize << MOVE_LEN_SHIFT));
+
+ /* Class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* ivsize + cryptlen = seqoutlen - authsize */
+ append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+
+ /* assoclen = seqinlen - (ivsize + cryptlen) */
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+
+ /* Will write ivsize + cryptlen */
+ append_math_add(desc, VARSEQOUTLEN, REG3, REG0, CAAM_CMD_SZ);
+
+ /* Read Salt and generated IV */
+ append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV |
+ FIFOLD_TYPE_FLUSH1 | IMMEDIATE | 12);
+ /* Append Salt */
+ append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
+ set_move_tgt_here(desc, move_cmd);
+ set_move_tgt_here(desc, write_iv_cmd);
+ /* Blank commands. Will be overwritten by generated IV. */
+ append_cmd(desc, 0x00000000);
+ append_cmd(desc, 0x00000000);
+ /* End of blank commands */
+
+ /* No need to reload iv */
+ append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_SKIP);
+
+ /* Read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+ /* Will read cryptlen */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Store generated IV and encrypted data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* Read payload data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+
+ /* Write ICV */
+ append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "rfc4106 givenc shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ return 0;
+}
+
+static int rfc4106_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ rfc4106_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static int rfc4543_set_sh_desc(struct crypto_aead *aead)
+{
+ struct aead_tfm *tfm = &aead->base.crt_aead;
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ bool keys_fit_inline = false;
+ u32 *key_jump_cmd, *write_iv_cmd, *write_aad_cmd;
+ u32 *read_move_cmd, *write_move_cmd;
+ u32 *desc;
+ u32 geniv;
+
+ if (!ctx->enckeylen || !ctx->authsize)
+ return 0;
+
+ /*
+ * RFC4543 encrypt shared descriptor
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (DESC_RFC4543_ENC_LEN + DESC_JOB_IO_LEN +
+ ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ desc = ctx->sh_desc_enc;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (keys_fit_inline)
+ append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, ctx->key_dma, ctx->enckeylen,
+ CLASS_1 | KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* Load AES-GMAC ESP IV into Math1 register */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_DECO_MATH1 |
+ LDST_CLASS_DECO | tfm->ivsize);
+
+ /* Wait the DMA transaction to finish */
+ append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM |
+ (1 << JUMP_OFFSET_SHIFT));
+
+ /* Overwrite blank immediate AES-GMAC ESP IV data */
+ write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
+ (tfm->ivsize << MOVE_LEN_SHIFT));
+
+ /* Overwrite blank immediate AAD data */
+ write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
+ (tfm->ivsize << MOVE_LEN_SHIFT));
+
+ /* cryptlen = seqoutlen - authsize */
+ append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+
+ /* assoclen = (seqinlen - ivsize) - cryptlen */
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+
+ /* Read Salt and AES-GMAC ESP IV */
+ append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize));
+ /* Append Salt */
+ append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
+ set_move_tgt_here(desc, write_iv_cmd);
+ /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
+ append_cmd(desc, 0x00000000);
+ append_cmd(desc, 0x00000000);
+ /* End of blank commands */
+
+ /* Read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD);
+
+ /* Will read cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Will write cryptlen bytes */
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Authenticate AES-GMAC ESP IV */
+ append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
+ FIFOLD_TYPE_AAD | tfm->ivsize);
+ set_move_tgt_here(desc, write_aad_cmd);
+ /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
+ append_cmd(desc, 0x00000000);
+ append_cmd(desc, 0x00000000);
+ /* End of blank commands */
+
+ /* Read and write cryptlen bytes */
+ aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ /* Move payload data to OFIFO */
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
+
+ /* Write ICV */
+ append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ keys_fit_inline = false;
+ if (DESC_RFC4543_DEC_LEN + DESC_JOB_IO_LEN +
+ ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ desc = ctx->sh_desc_dec;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL |
+ JUMP_TEST_ALL | JUMP_COND_SHRD);
+ if (keys_fit_inline)
+ append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, ctx->key_dma, ctx->enckeylen,
+ CLASS_1 | KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* Load AES-GMAC ESP IV into Math1 register */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_DECO_MATH1 |
+ LDST_CLASS_DECO | tfm->ivsize);
+
+ /* Wait the DMA transaction to finish */
+ append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM |
+ (1 << JUMP_OFFSET_SHIFT));
+
+ /* assoclen + cryptlen = (seqinlen - ivsize) - icvsize */
+ append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, ctx->authsize);
+
+ /* Overwrite blank immediate AES-GMAC ESP IV data */
+ write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
+ (tfm->ivsize << MOVE_LEN_SHIFT));
+
+ /* Overwrite blank immediate AAD data */
+ write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
+ (tfm->ivsize << MOVE_LEN_SHIFT));
+
+ /* assoclen = (assoclen + cryptlen) - cryptlen */
+ append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+ append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Read Salt and AES-GMAC ESP IV */
+ append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize));
+ /* Append Salt */
+ append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
+ set_move_tgt_here(desc, write_iv_cmd);
+ /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
+ append_cmd(desc, 0x00000000);
+ append_cmd(desc, 0x00000000);
+ /* End of blank commands */
+
+ /* Read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD);
+
+ /* Will read cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+
+ /* Will write cryptlen bytes */
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
+
+ /* Authenticate AES-GMAC ESP IV */
+ append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
+ FIFOLD_TYPE_AAD | tfm->ivsize);
+ set_move_tgt_here(desc, write_aad_cmd);
+ /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
+ append_cmd(desc, 0x00000000);
+ append_cmd(desc, 0x00000000);
+ /* End of blank commands */
+
+ /* Store payload data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* In-snoop cryptlen data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ /* Move payload data to OFIFO */
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Read ICV */
+ append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+
+ ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ keys_fit_inline = false;
+ if (DESC_RFC4543_GIVENC_LEN + DESC_JOB_IO_LEN +
+ ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ /* rfc4543_givencrypt shared descriptor */
+ desc = ctx->sh_desc_givenc;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (keys_fit_inline)
+ append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, ctx->key_dma, ctx->enckeylen,
+ CLASS_1 | KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Generate IV */
+ geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+ NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
+ NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
+ append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ /* Move generated IV to Math1 register */
+ append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_MATH1 |
+ (tfm->ivsize << MOVE_LEN_SHIFT));
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Overwrite blank immediate AES-GMAC IV data */
+ write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
+ (tfm->ivsize << MOVE_LEN_SHIFT));
+
+ /* Overwrite blank immediate AAD data */
+ write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
+ (tfm->ivsize << MOVE_LEN_SHIFT));
+
+ /* Copy generated IV to OFIFO */
+ append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_OUTFIFO |
+ (tfm->ivsize << MOVE_LEN_SHIFT));
+
+ /* Class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* ivsize + cryptlen = seqoutlen - authsize */
+ append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+
+ /* assoclen = seqinlen - (ivsize + cryptlen) */
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+
+ /* Will write ivsize + cryptlen */
+ append_math_add(desc, VARSEQOUTLEN, REG3, REG0, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Read Salt and AES-GMAC generated IV */
+ append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize));
+ /* Append Salt */
+ append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
+ set_move_tgt_here(desc, write_iv_cmd);
+ /* Blank commands. Will be overwritten by AES-GMAC generated IV. */
+ append_cmd(desc, 0x00000000);
+ append_cmd(desc, 0x00000000);
+ /* End of blank commands */
+
+ /* No need to reload iv */
+ append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_SKIP);
+
+ /* Read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD);
+
+ /* Will read cryptlen */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Authenticate AES-GMAC IV */
+ append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
+ FIFOLD_TYPE_AAD | tfm->ivsize);
+ set_move_tgt_here(desc, write_aad_cmd);
+ /* Blank commands. Will be overwritten by AES-GMAC IV. */
+ append_cmd(desc, 0x00000000);
+ append_cmd(desc, 0x00000000);
+ /* End of blank commands */
+
+ /* Read and write cryptlen bytes */
+ aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ /* Move payload data to OFIFO */
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
+
+ /* Write ICV */
+ append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "rfc4543 givenc shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ return 0;
+}
+
+static int rfc4543_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ rfc4543_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
+ u32 authkeylen)
+{
+ return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
+ ctx->split_key_pad_len, key_in, authkeylen,
+ ctx->alg_op);
+}
+
+static int aead_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int keylen)
+{
+ /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
+ static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ struct crypto_authenc_keys keys;
+ int ret = 0;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+ goto badkey;
+
+ /* Pick class 2 key length from algorithm submask */
+ ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
+ OP_ALG_ALGSEL_SHIFT] * 2;
+ ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
+
+ if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
+ goto badkey;
+
+#ifdef DEBUG
+ printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
+ keys.authkeylen + keys.enckeylen, keys.enckeylen,
+ keys.authkeylen);
+ printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
+ ctx->split_key_len, ctx->split_key_pad_len);
+ print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+ ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
+ if (ret) {
+ goto badkey;
+ }
+
+ /* postpend encryption key to auth split key */
+ memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
+
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
+ keys.enckeylen, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->key_dma)) {
+ dev_err(jrdev, "unable to map key i/o memory\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+ ctx->split_key_pad_len + keys.enckeylen, 1);
+#endif
+
+ ctx->enckeylen = keys.enckeylen;
+
+ ret = aead_set_sh_desc(aead);
+ if (ret) {
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
+ keys.enckeylen, DMA_TO_DEVICE);
+ }
+
+ return ret;
+badkey:
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+}
+
+static int gcm_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ int ret = 0;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+ memcpy(ctx->key, key, keylen);
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->key_dma)) {
+ dev_err(jrdev, "unable to map key i/o memory\n");
+ return -ENOMEM;
+ }
+ ctx->enckeylen = keylen;
+
+ ret = gcm_set_sh_desc(aead);
+ if (ret) {
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+ DMA_TO_DEVICE);
+ }
+
+ return ret;
+}
+
+static int rfc4106_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ int ret = 0;
+
+ if (keylen < 4)
+ return -EINVAL;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+ memcpy(ctx->key, key, keylen);
+
+ /*
+ * The last four bytes of the key material are used as the salt value
+ * in the nonce. Update the AES key length.
+ */
+ ctx->enckeylen = keylen - 4;
+
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->key_dma)) {
+ dev_err(jrdev, "unable to map key i/o memory\n");
+ return -ENOMEM;
+ }
+
+ ret = rfc4106_set_sh_desc(aead);
+ if (ret) {
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+ DMA_TO_DEVICE);
+ }
+
+ return ret;
+}
+
+static int rfc4543_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ int ret = 0;
+
+ if (keylen < 4)
+ return -EINVAL;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+ memcpy(ctx->key, key, keylen);
+
+ /*
+ * The last four bytes of the key material are used as the salt value
+ * in the nonce. Update the AES key length.
+ */
+ ctx->enckeylen = keylen - 4;
+
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->key_dma)) {
+ dev_err(jrdev, "unable to map key i/o memory\n");
+ return -ENOMEM;
+ }
+
+ ret = rfc4543_set_sh_desc(aead);
+ if (ret) {
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+ DMA_TO_DEVICE);
+ }
+
+ return ret;
+}
+
+static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
+ const char *alg_name = crypto_tfm_alg_name(tfm);
+ struct device *jrdev = ctx->jrdev;
+ int ret = 0;
+ u32 *key_jump_cmd;
+ u32 *desc;
+ u32 *nonce;
+ u32 geniv;
+ u32 ctx1_iv_off = 0;
+ const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+ OP_ALG_AAI_CTR_MOD128);
+ const bool is_rfc3686 = (ctr_mode &&
+ (strstr(alg_name, "rfc3686") != NULL));
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+ /*
+ * AES-CTR needs to load IV in CONTEXT1 reg
+ * at an offset of 128bits (16bytes)
+ * CONTEXT1[255:128] = IV
+ */
+ if (ctr_mode)
+ ctx1_iv_off = 16;
+
+ /*
+ * RFC3686 specific:
+ * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+ * | *key = {KEY, NONCE}
+ */
+ if (is_rfc3686) {
+ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+ keylen -= CTR_RFC3686_NONCE_SIZE;
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->key_dma)) {
+ dev_err(jrdev, "unable to map key i/o memory\n");
+ return -ENOMEM;
+ }
+ ctx->enckeylen = keylen;
+
+ /* ablkcipher_encrypt shared descriptor */
+ desc = ctx->sh_desc_enc;
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 key only */
+ append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+
+ /* Load nonce into CONTEXT1 reg */
+ if (is_rfc3686) {
+ nonce = (u32 *)(key + keylen);
+ append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ append_move(desc, MOVE_WAITCOMP |
+ MOVE_SRC_OUTFIFO |
+ MOVE_DEST_CLASS1CTX |
+ (16 << MOVE_OFFSET_SHIFT) |
+ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+ }
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Load iv */
+ append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+ /* Load counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
+ LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ /* Load operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+ ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+ /* ablkcipher_decrypt shared descriptor */
+ desc = ctx->sh_desc_dec;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 key only */
+ append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+
+ /* Load nonce into CONTEXT1 reg */
+ if (is_rfc3686) {
+ nonce = (u32 *)(key + keylen);
+ append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ append_move(desc, MOVE_WAITCOMP |
+ MOVE_SRC_OUTFIFO |
+ MOVE_DEST_CLASS1CTX |
+ (16 << MOVE_OFFSET_SHIFT) |
+ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+ }
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* load IV */
+ append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+ /* Load counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
+ LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ /* Choose operation */
+ if (ctr_mode)
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
+ else
+ append_dec_op1(desc, ctx->class1_alg_type);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+ ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+ /* ablkcipher_givencrypt shared descriptor */
+ desc = ctx->sh_desc_givenc;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 key only */
+ append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+
+ /* Load Nonce into CONTEXT1 reg */
+ if (is_rfc3686) {
+ nonce = (u32 *)(key + keylen);
+ append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ append_move(desc, MOVE_WAITCOMP |
+ MOVE_SRC_OUTFIFO |
+ MOVE_DEST_CLASS1CTX |
+ (16 << MOVE_OFFSET_SHIFT) |
+ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+ }
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Generate IV */
+ geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+ NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
+ NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
+ append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_WAITCOMP |
+ MOVE_SRC_INFIFO |
+ MOVE_DEST_CLASS1CTX |
+ (crt->ivsize << MOVE_LEN_SHIFT) |
+ (ctx1_iv_off << MOVE_OFFSET_SHIFT));
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Copy generated IV to memory */
+ append_seq_store(desc, crt->ivsize,
+ LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+ (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_u32(desc, (u32)1, LDST_IMM |
+ LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ if (ctx1_iv_off)
+ append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
+ (1 << JUMP_OFFSET_SHIFT));
+
+ /* Load operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+ ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ return ret;
+}
+
+/*
+ * aead_edesc - s/w-extended aead descriptor
+ * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
+ * @assoc_chained: if source is chained
+ * @src_nents: number of segments in input scatterlist
+ * @src_chained: if source is chained
+ * @dst_nents: number of segments in output scatterlist
+ * @dst_chained: if destination is chained
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
+ * @sec4_sg_bytes: length of dma mapped sec4_sg space
+ * @sec4_sg_dma: bus physical mapped address of h/w link table
+ * @hw_desc: the h/w job descriptor followed by any referenced link tables
+ */
+struct aead_edesc {
+ int assoc_nents;
+ bool assoc_chained;
+ int src_nents;
+ bool src_chained;
+ int dst_nents;
+ bool dst_chained;
+ dma_addr_t iv_dma;
+ int sec4_sg_bytes;
+ dma_addr_t sec4_sg_dma;
+ struct sec4_sg_entry *sec4_sg;
+ u32 hw_desc[0];
+};
+
+/*
+ * ablkcipher_edesc - s/w-extended ablkcipher descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @src_chained: if source is chained
+ * @dst_nents: number of segments in output scatterlist
+ * @dst_chained: if destination is chained
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
+ * @sec4_sg_bytes: length of dma mapped sec4_sg space
+ * @sec4_sg_dma: bus physical mapped address of h/w link table
+ * @hw_desc: the h/w job descriptor followed by any referenced link tables
+ */
+struct ablkcipher_edesc {
+ int src_nents;
+ bool src_chained;
+ int dst_nents;
+ bool dst_chained;
+ dma_addr_t iv_dma;
+ int sec4_sg_bytes;
+ dma_addr_t sec4_sg_dma;
+ struct sec4_sg_entry *sec4_sg;
+ u32 hw_desc[0];
+};
+
+static void caam_unmap(struct device *dev, struct scatterlist *src,
+ struct scatterlist *dst, int src_nents,
+ bool src_chained, int dst_nents, bool dst_chained,
+ dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
+ int sec4_sg_bytes)
+{
+ if (dst != src) {
+ dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
+ src_chained);
+ dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
+ dst_chained);
+ } else {
+ dma_unmap_sg_chained(dev, src, src_nents ? : 1,
+ DMA_BIDIRECTIONAL, src_chained);
+ }
+
+ if (iv_dma)
+ dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
+ if (sec4_sg_bytes)
+ dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
+ DMA_TO_DEVICE);
+}
+
+static void aead_unmap(struct device *dev,
+ struct aead_edesc *edesc,
+ struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ int ivsize = crypto_aead_ivsize(aead);
+
+ dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
+ DMA_TO_DEVICE, edesc->assoc_chained);
+
+ caam_unmap(dev, req->src, req->dst,
+ edesc->src_nents, edesc->src_chained, edesc->dst_nents,
+ edesc->dst_chained, edesc->iv_dma, ivsize,
+ edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
+}
+
+static void ablkcipher_unmap(struct device *dev,
+ struct ablkcipher_edesc *edesc,
+ struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+ caam_unmap(dev, req->src, req->dst,
+ edesc->src_nents, edesc->src_chained, edesc->dst_nents,
+ edesc->dst_chained, edesc->iv_dma, ivsize,
+ edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
+}
+
+static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
+{
+ struct aead_request *req = context;
+ struct aead_edesc *edesc;
+#ifdef DEBUG
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ivsize = crypto_aead_ivsize(aead);
+
+ dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+ edesc = (struct aead_edesc *)((char *)desc -
+ offsetof(struct aead_edesc, hw_desc));
+
+ if (err)
+ caam_jr_strstatus(jrdev, err);
+
+ aead_unmap(jrdev, edesc, req);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
+ req->assoclen , 1);
+ print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
+ edesc->src_nents ? 100 : ivsize, 1);
+ print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->src_nents ? 100 : req->cryptlen +
+ ctx->authsize + 4, 1);
+#endif
+
+ kfree(edesc);
+
+ aead_request_complete(req, err);
+}
+
+static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
+{
+ struct aead_request *req = context;
+ struct aead_edesc *edesc;
+#ifdef DEBUG
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ivsize = crypto_aead_ivsize(aead);
+
+ dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+ edesc = (struct aead_edesc *)((char *)desc -
+ offsetof(struct aead_edesc, hw_desc));
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
+ ivsize, 1);
+ print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
+ req->cryptlen - ctx->authsize, 1);
+#endif
+
+ if (err)
+ caam_jr_strstatus(jrdev, err);
+
+ aead_unmap(jrdev, edesc, req);
+
+ /*
+ * verify hw auth check passed else return -EBADMSG
+ */
+ if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
+ err = -EBADMSG;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4,
+ ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
+ sizeof(struct iphdr) + req->assoclen +
+ ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
+ ctx->authsize + 36, 1);
+ if (!err && edesc->sec4_sg_bytes) {
+ struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
+ print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
+ sg->length + ctx->authsize + 16, 1);
+ }
+#endif
+
+ kfree(edesc);
+
+ aead_request_complete(req, err);
+}
+
+static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
+{
+ struct ablkcipher_request *req = context;
+ struct ablkcipher_edesc *edesc;
+#ifdef DEBUG
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+ dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+ edesc = (struct ablkcipher_edesc *)((char *)desc -
+ offsetof(struct ablkcipher_edesc, hw_desc));
+
+ if (err)
+ caam_jr_strstatus(jrdev, err);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ edesc->src_nents > 1 ? 100 : ivsize, 1);
+ print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+#endif
+
+ ablkcipher_unmap(jrdev, edesc, req);
+ kfree(edesc);
+
+ ablkcipher_request_complete(req, err);
+}
+
+static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
+{
+ struct ablkcipher_request *req = context;
+ struct ablkcipher_edesc *edesc;
+#ifdef DEBUG
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+ dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+ edesc = (struct ablkcipher_edesc *)((char *)desc -
+ offsetof(struct ablkcipher_edesc, hw_desc));
+ if (err)
+ caam_jr_strstatus(jrdev, err);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ ivsize, 1);
+ print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+#endif
+
+ ablkcipher_unmap(jrdev, edesc, req);
+ kfree(edesc);
+
+ ablkcipher_request_complete(req, err);
+}
+
+/*
+ * Fill in aead job descriptor
+ */
+static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
+ struct aead_edesc *edesc,
+ struct aead_request *req,
+ bool all_contig, bool encrypt)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ivsize = crypto_aead_ivsize(aead);
+ int authsize = ctx->authsize;
+ u32 *desc = edesc->hw_desc;
+ u32 out_options = 0, in_options;
+ dma_addr_t dst_dma, src_dma;
+ int len, sec4_sg_index = 0;
+ bool is_gcm = false;
+
+#ifdef DEBUG
+ debug("assoclen %d cryptlen %d authsize %d\n",
+ req->assoclen, req->cryptlen, authsize);
+ print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
+ req->assoclen , 1);
+ print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
+ edesc->src_nents ? 100 : ivsize, 1);
+ print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->src_nents ? 100 : req->cryptlen, 1);
+ print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
+ desc_bytes(sh_desc), 1);
+#endif
+
+ if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
+ OP_ALG_ALGSEL_AES) &&
+ ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
+ is_gcm = true;
+
+ len = desc_len(sh_desc);
+ init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+ if (all_contig) {
+ if (is_gcm)
+ src_dma = edesc->iv_dma;
+ else
+ src_dma = sg_dma_address(req->assoc);
+ in_options = 0;
+ } else {
+ src_dma = edesc->sec4_sg_dma;
+ sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
+ (edesc->src_nents ? : 1);
+ in_options = LDST_SGF;
+ }
+
+ append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
+ in_options);
+
+ if (likely(req->src == req->dst)) {
+ if (all_contig) {
+ dst_dma = sg_dma_address(req->src);
+ } else {
+ dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
+ ((edesc->assoc_nents ? : 1) + 1);
+ out_options = LDST_SGF;
+ }
+ } else {
+ if (!edesc->dst_nents) {
+ dst_dma = sg_dma_address(req->dst);
+ } else {
+ dst_dma = edesc->sec4_sg_dma +
+ sec4_sg_index *
+ sizeof(struct sec4_sg_entry);
+ out_options = LDST_SGF;
+ }
+ }
+ if (encrypt)
+ append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
+ out_options);
+ else
+ append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
+ out_options);
+}
+
+/*
+ * Fill in aead givencrypt job descriptor
+ */
+static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
+ struct aead_edesc *edesc,
+ struct aead_request *req,
+ int contig)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ivsize = crypto_aead_ivsize(aead);
+ int authsize = ctx->authsize;
+ u32 *desc = edesc->hw_desc;
+ u32 out_options = 0, in_options;
+ dma_addr_t dst_dma, src_dma;
+ int len, sec4_sg_index = 0;
+ bool is_gcm = false;
+
+#ifdef DEBUG
+ debug("assoclen %d cryptlen %d authsize %d\n",
+ req->assoclen, req->cryptlen, authsize);
+ print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
+ req->assoclen , 1);
+ print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
+ print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
+ print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
+ desc_bytes(sh_desc), 1);
+#endif
+
+ if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
+ OP_ALG_ALGSEL_AES) &&
+ ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
+ is_gcm = true;
+
+ len = desc_len(sh_desc);
+ init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+ if (contig & GIV_SRC_CONTIG) {
+ if (is_gcm)
+ src_dma = edesc->iv_dma;
+ else
+ src_dma = sg_dma_address(req->assoc);
+ in_options = 0;
+ } else {
+ src_dma = edesc->sec4_sg_dma;
+ sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
+ in_options = LDST_SGF;
+ }
+ append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
+ in_options);
+
+ if (contig & GIV_DST_CONTIG) {
+ dst_dma = edesc->iv_dma;
+ } else {
+ if (likely(req->src == req->dst)) {
+ dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
+ (edesc->assoc_nents +
+ (is_gcm ? 1 + edesc->src_nents : 0));
+ out_options = LDST_SGF;
+ } else {
+ dst_dma = edesc->sec4_sg_dma +
+ sec4_sg_index *
+ sizeof(struct sec4_sg_entry);
+ out_options = LDST_SGF;
+ }
+ }
+
+ append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
+ out_options);
+}
+
+/*
+ * Fill in ablkcipher job descriptor
+ */
+static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
+ struct ablkcipher_edesc *edesc,
+ struct ablkcipher_request *req,
+ bool iv_contig)
+{
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ u32 *desc = edesc->hw_desc;
+ u32 out_options = 0, in_options;
+ dma_addr_t dst_dma, src_dma;
+ int len, sec4_sg_index = 0;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ ivsize, 1);
+ print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->src_nents ? 100 : req->nbytes, 1);
+#endif
+
+ len = desc_len(sh_desc);
+ init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+ if (iv_contig) {
+ src_dma = edesc->iv_dma;
+ in_options = 0;
+ } else {
+ src_dma = edesc->sec4_sg_dma;
+ sec4_sg_index += edesc->src_nents + 1;
+ in_options = LDST_SGF;
+ }
+ append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
+
+ if (likely(req->src == req->dst)) {
+ if (!edesc->src_nents && iv_contig) {
+ dst_dma = sg_dma_address(req->src);
+ } else {
+ dst_dma = edesc->sec4_sg_dma +
+ sizeof(struct sec4_sg_entry);
+ out_options = LDST_SGF;
+ }
+ } else {
+ if (!edesc->dst_nents) {
+ dst_dma = sg_dma_address(req->dst);
+ } else {
+ dst_dma = edesc->sec4_sg_dma +
+ sec4_sg_index * sizeof(struct sec4_sg_entry);
+ out_options = LDST_SGF;
+ }
+ }
+ append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
+}
+
+/*
+ * Fill in ablkcipher givencrypt job descriptor
+ */
+static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
+ struct ablkcipher_edesc *edesc,
+ struct ablkcipher_request *req,
+ bool iv_contig)
+{
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ u32 *desc = edesc->hw_desc;
+ u32 out_options, in_options;
+ dma_addr_t dst_dma, src_dma;
+ int len, sec4_sg_index = 0;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ ivsize, 1);
+ print_hex_dump(KERN_ERR, "src @" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->src_nents ? 100 : req->nbytes, 1);
+#endif
+
+ len = desc_len(sh_desc);
+ init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+ if (!edesc->src_nents) {
+ src_dma = sg_dma_address(req->src);
+ in_options = 0;
+ } else {
+ src_dma = edesc->sec4_sg_dma;
+ sec4_sg_index += edesc->src_nents;
+ in_options = LDST_SGF;
+ }
+ append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
+
+ if (iv_contig) {
+ dst_dma = edesc->iv_dma;
+ out_options = 0;
+ } else {
+ dst_dma = edesc->sec4_sg_dma +
+ sec4_sg_index * sizeof(struct sec4_sg_entry);
+ out_options = LDST_SGF;
+ }
+ append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
+}
+
+/*
+ * allocate and map the aead extended descriptor
+ */
+static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
+ int desc_bytes, bool *all_contig_ptr,
+ bool encrypt)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ int assoc_nents, src_nents, dst_nents = 0;
+ struct aead_edesc *edesc;
+ dma_addr_t iv_dma = 0;
+ int sgc;
+ bool all_contig = true;
+ bool assoc_chained = false, src_chained = false, dst_chained = false;
+ int ivsize = crypto_aead_ivsize(aead);
+ int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
+ unsigned int authsize = ctx->authsize;
+ bool is_gcm = false;
+
+ assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
+
+ if (unlikely(req->dst != req->src)) {
+ src_nents = sg_count(req->src, req->cryptlen, &src_chained);
+ dst_nents = sg_count(req->dst,
+ req->cryptlen +
+ (encrypt ? authsize : (-authsize)),
+ &dst_chained);
+ } else {
+ src_nents = sg_count(req->src,
+ req->cryptlen +
+ (encrypt ? authsize : 0),
+ &src_chained);
+ }
+
+ sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
+ DMA_TO_DEVICE, assoc_chained);
+ if (likely(req->src == req->dst)) {
+ sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
+ DMA_BIDIRECTIONAL, src_chained);
+ } else {
+ sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE, src_chained);
+ sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
+ DMA_FROM_DEVICE, dst_chained);
+ }
+
+ iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, iv_dma)) {
+ dev_err(jrdev, "unable to map IV\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
+ OP_ALG_ALGSEL_AES) &&
+ ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
+ is_gcm = true;
+
+ /*
+ * Check if data are contiguous.
+ * GCM expected input sequence: IV, AAD, text
+ * All other - expected input sequence: AAD, IV, text
+ */
+ if (is_gcm)
+ all_contig = (!assoc_nents &&
+ iv_dma + ivsize == sg_dma_address(req->assoc) &&
+ !src_nents && sg_dma_address(req->assoc) +
+ req->assoclen == sg_dma_address(req->src));
+ else
+ all_contig = (!assoc_nents && sg_dma_address(req->assoc) +
+ req->assoclen == iv_dma && !src_nents &&
+ iv_dma + ivsize == sg_dma_address(req->src));
+ if (!all_contig) {
+ assoc_nents = assoc_nents ? : 1;
+ src_nents = src_nents ? : 1;
+ sec4_sg_len = assoc_nents + 1 + src_nents;
+ }
+
+ sec4_sg_len += dst_nents;
+
+ sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
+ sec4_sg_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ edesc->assoc_nents = assoc_nents;
+ edesc->assoc_chained = assoc_chained;
+ edesc->src_nents = src_nents;
+ edesc->src_chained = src_chained;
+ edesc->dst_nents = dst_nents;
+ edesc->dst_chained = dst_chained;
+ edesc->iv_dma = iv_dma;
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
+ desc_bytes;
+ *all_contig_ptr = all_contig;
+
+ sec4_sg_index = 0;
+ if (!all_contig) {
+ if (!is_gcm) {
+ sg_to_sec4_sg(req->assoc,
+ assoc_nents,
+ edesc->sec4_sg +
+ sec4_sg_index, 0);
+ sec4_sg_index += assoc_nents;
+ }
+
+ dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
+ iv_dma, ivsize, 0);
+ sec4_sg_index += 1;
+
+ if (is_gcm) {
+ sg_to_sec4_sg(req->assoc,
+ assoc_nents,
+ edesc->sec4_sg +
+ sec4_sg_index, 0);
+ sec4_sg_index += assoc_nents;
+ }
+
+ sg_to_sec4_sg_last(req->src,
+ src_nents,
+ edesc->sec4_sg +
+ sec4_sg_index, 0);
+ sec4_sg_index += src_nents;
+ }
+ if (dst_nents) {
+ sg_to_sec4_sg_last(req->dst, dst_nents,
+ edesc->sec4_sg + sec4_sg_index, 0);
+ }
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return edesc;
+}
+
+static int aead_encrypt(struct aead_request *req)
+{
+ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ bool all_contig;
+ u32 *desc;
+ int ret = 0;
+
+ /* allocate extended descriptor */
+ edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
+ CAAM_CMD_SZ, &all_contig, true);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ /* Create and submit job descriptor */
+ init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
+ all_contig, true);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+#endif
+
+ desc = edesc->hw_desc;
+ ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ aead_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
+static int aead_decrypt(struct aead_request *req)
+{
+ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ bool all_contig;
+ u32 *desc;
+ int ret = 0;
+
+ /* allocate extended descriptor */
+ edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
+ CAAM_CMD_SZ, &all_contig, false);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ req->cryptlen, 1);
+#endif
+
+ /* Create and submit job descriptor*/
+ init_aead_job(ctx->sh_desc_dec,
+ ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+#endif
+
+ desc = edesc->hw_desc;
+ ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ aead_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
+/*
+ * allocate and map the aead extended descriptor for aead givencrypt
+ */
+static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
+ *greq, int desc_bytes,
+ u32 *contig_ptr)
+{
+ struct aead_request *req = &greq->areq;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ int assoc_nents, src_nents, dst_nents = 0;
+ struct aead_edesc *edesc;
+ dma_addr_t iv_dma = 0;
+ int sgc;
+ u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
+ int ivsize = crypto_aead_ivsize(aead);
+ bool assoc_chained = false, src_chained = false, dst_chained = false;
+ int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
+ bool is_gcm = false;
+
+ assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
+ src_nents = sg_count(req->src, req->cryptlen, &src_chained);
+
+ if (unlikely(req->dst != req->src))
+ dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
+ &dst_chained);
+
+ sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
+ DMA_TO_DEVICE, assoc_chained);
+ if (likely(req->src == req->dst)) {
+ sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
+ DMA_BIDIRECTIONAL, src_chained);
+ } else {
+ sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE, src_chained);
+ sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
+ DMA_FROM_DEVICE, dst_chained);
+ }
+
+ iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, iv_dma)) {
+ dev_err(jrdev, "unable to map IV\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
+ OP_ALG_ALGSEL_AES) &&
+ ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
+ is_gcm = true;
+
+ /*
+ * Check if data are contiguous.
+ * GCM expected input sequence: IV, AAD, text
+ * All other - expected input sequence: AAD, IV, text
+ */
+
+ if (is_gcm) {
+ if (assoc_nents || iv_dma + ivsize !=
+ sg_dma_address(req->assoc) || src_nents ||
+ sg_dma_address(req->assoc) + req->assoclen !=
+ sg_dma_address(req->src))
+ contig &= ~GIV_SRC_CONTIG;
+ } else {
+ if (assoc_nents ||
+ sg_dma_address(req->assoc) + req->assoclen != iv_dma ||
+ src_nents || iv_dma + ivsize != sg_dma_address(req->src))
+ contig &= ~GIV_SRC_CONTIG;
+ }
+
+ if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
+ contig &= ~GIV_DST_CONTIG;
+
+ if (!(contig & GIV_SRC_CONTIG)) {
+ assoc_nents = assoc_nents ? : 1;
+ src_nents = src_nents ? : 1;
+ sec4_sg_len += assoc_nents + 1 + src_nents;
+ if (req->src == req->dst &&
+ (src_nents || iv_dma + ivsize != sg_dma_address(req->src)))
+ contig &= ~GIV_DST_CONTIG;
+ }
+
+ /*
+ * Add new sg entries for GCM output sequence.
+ * Expected output sequence: IV, encrypted text.
+ */
+ if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG))
+ sec4_sg_len += 1 + src_nents;
+
+ if (unlikely(req->src != req->dst)) {
+ dst_nents = dst_nents ? : 1;
+ sec4_sg_len += 1 + dst_nents;
+ }
+
+ sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
+ sec4_sg_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ edesc->assoc_nents = assoc_nents;
+ edesc->assoc_chained = assoc_chained;
+ edesc->src_nents = src_nents;
+ edesc->src_chained = src_chained;
+ edesc->dst_nents = dst_nents;
+ edesc->dst_chained = dst_chained;
+ edesc->iv_dma = iv_dma;
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
+ desc_bytes;
+ *contig_ptr = contig;
+
+ sec4_sg_index = 0;
+ if (!(contig & GIV_SRC_CONTIG)) {
+ if (!is_gcm) {
+ sg_to_sec4_sg(req->assoc, assoc_nents,
+ edesc->sec4_sg + sec4_sg_index, 0);
+ sec4_sg_index += assoc_nents;
+ }
+
+ dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
+ iv_dma, ivsize, 0);
+ sec4_sg_index += 1;
+
+ if (is_gcm) {
+ sg_to_sec4_sg(req->assoc, assoc_nents,
+ edesc->sec4_sg + sec4_sg_index, 0);
+ sec4_sg_index += assoc_nents;
+ }
+
+ sg_to_sec4_sg_last(req->src, src_nents,
+ edesc->sec4_sg +
+ sec4_sg_index, 0);
+ sec4_sg_index += src_nents;
+ }
+
+ if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG)) {
+ dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
+ iv_dma, ivsize, 0);
+ sec4_sg_index += 1;
+ sg_to_sec4_sg_last(req->src, src_nents,
+ edesc->sec4_sg + sec4_sg_index, 0);
+ }
+
+ if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
+ dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
+ iv_dma, ivsize, 0);
+ sec4_sg_index += 1;
+ sg_to_sec4_sg_last(req->dst, dst_nents,
+ edesc->sec4_sg + sec4_sg_index, 0);
+ }
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return edesc;
+}
+
+static int aead_givencrypt(struct aead_givcrypt_request *areq)
+{
+ struct aead_request *req = &areq->areq;
+ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ u32 contig;
+ u32 *desc;
+ int ret = 0;
+
+ /* allocate extended descriptor */
+ edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
+ CAAM_CMD_SZ, &contig);
+
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ req->cryptlen, 1);
+#endif
+
+ /* Create and submit job descriptor*/
+ init_aead_giv_job(ctx->sh_desc_givenc,
+ ctx->sh_desc_givenc_dma, edesc, req, contig);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+#endif
+
+ desc = edesc->hw_desc;
+ ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ aead_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
+static int aead_null_givencrypt(struct aead_givcrypt_request *areq)
+{
+ return aead_encrypt(&areq->areq);
+}
+
+/*
+ * allocate and map the ablkcipher extended descriptor for ablkcipher
+ */
+static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
+ *req, int desc_bytes,
+ bool *iv_contig_out)
+{
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, dst_nents = 0, sec4_sg_bytes;
+ struct ablkcipher_edesc *edesc;
+ dma_addr_t iv_dma = 0;
+ bool iv_contig = false;
+ int sgc;
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ bool src_chained = false, dst_chained = false;
+ int sec4_sg_index;
+
+ src_nents = sg_count(req->src, req->nbytes, &src_chained);
+
+ if (req->dst != req->src)
+ dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
+
+ if (likely(req->src == req->dst)) {
+ sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
+ DMA_BIDIRECTIONAL, src_chained);
+ } else {
+ sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE, src_chained);
+ sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
+ DMA_FROM_DEVICE, dst_chained);
+ }
+
+ iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, iv_dma)) {
+ dev_err(jrdev, "unable to map IV\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /*
+ * Check if iv can be contiguous with source and destination.
+ * If so, include it. If not, create scatterlist.
+ */
+ if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
+ iv_contig = true;
+ else
+ src_nents = src_nents ? : 1;
+ sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
+ sizeof(struct sec4_sg_entry);
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
+ sec4_sg_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ edesc->src_nents = src_nents;
+ edesc->src_chained = src_chained;
+ edesc->dst_nents = dst_nents;
+ edesc->dst_chained = dst_chained;
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
+ desc_bytes;
+
+ sec4_sg_index = 0;
+ if (!iv_contig) {
+ dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
+ sg_to_sec4_sg_last(req->src, src_nents,
+ edesc->sec4_sg + 1, 0);
+ sec4_sg_index += 1 + src_nents;
+ }
+
+ if (dst_nents) {
+ sg_to_sec4_sg_last(req->dst, dst_nents,
+ edesc->sec4_sg + sec4_sg_index, 0);
+ }
+
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ edesc->iv_dma = iv_dma;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
+ sec4_sg_bytes, 1);
+#endif
+
+ *iv_contig_out = iv_contig;
+ return edesc;
+}
+
+static int ablkcipher_encrypt(struct ablkcipher_request *req)
+{
+ struct ablkcipher_edesc *edesc;
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct device *jrdev = ctx->jrdev;
+ bool iv_contig;
+ u32 *desc;
+ int ret = 0;
+
+ /* allocate extended descriptor */
+ edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
+ CAAM_CMD_SZ, &iv_contig);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ /* Create and submit job descriptor*/
+ init_ablkcipher_job(ctx->sh_desc_enc,
+ ctx->sh_desc_enc_dma, edesc, req, iv_contig);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+#endif
+ desc = edesc->hw_desc;
+ ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
+
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ ablkcipher_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
+static int ablkcipher_decrypt(struct ablkcipher_request *req)
+{
+ struct ablkcipher_edesc *edesc;
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct device *jrdev = ctx->jrdev;
+ bool iv_contig;
+ u32 *desc;
+ int ret = 0;
+
+ /* allocate extended descriptor */
+ edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
+ CAAM_CMD_SZ, &iv_contig);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ /* Create and submit job descriptor*/
+ init_ablkcipher_job(ctx->sh_desc_dec,
+ ctx->sh_desc_dec_dma, edesc, req, iv_contig);
+ desc = edesc->hw_desc;
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+#endif
+
+ ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ ablkcipher_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
+/*
+ * allocate and map the ablkcipher extended descriptor
+ * for ablkcipher givencrypt
+ */
+static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
+ struct skcipher_givcrypt_request *greq,
+ int desc_bytes,
+ bool *iv_contig_out)
+{
+ struct ablkcipher_request *req = &greq->creq;
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, dst_nents = 0, sec4_sg_bytes;
+ struct ablkcipher_edesc *edesc;
+ dma_addr_t iv_dma = 0;
+ bool iv_contig = false;
+ int sgc;
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ bool src_chained = false, dst_chained = false;
+ int sec4_sg_index;
+
+ src_nents = sg_count(req->src, req->nbytes, &src_chained);
+
+ if (unlikely(req->dst != req->src))
+ dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
+
+ if (likely(req->src == req->dst)) {
+ sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
+ DMA_BIDIRECTIONAL, src_chained);
+ } else {
+ sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE, src_chained);
+ sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
+ DMA_FROM_DEVICE, dst_chained);
+ }
+
+ /*
+ * Check if iv can be contiguous with source and destination.
+ * If so, include it. If not, create scatterlist.
+ */
+ iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, iv_dma)) {
+ dev_err(jrdev, "unable to map IV\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
+ iv_contig = true;
+ else
+ dst_nents = dst_nents ? : 1;
+ sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
+ sizeof(struct sec4_sg_entry);
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = kmalloc(sizeof(*edesc) + desc_bytes +
+ sec4_sg_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ edesc->src_nents = src_nents;
+ edesc->src_chained = src_chained;
+ edesc->dst_nents = dst_nents;
+ edesc->dst_chained = dst_chained;
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
+ desc_bytes;
+
+ sec4_sg_index = 0;
+ if (src_nents) {
+ sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
+ sec4_sg_index += src_nents;
+ }
+
+ if (!iv_contig) {
+ dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
+ iv_dma, ivsize, 0);
+ sec4_sg_index += 1;
+ sg_to_sec4_sg_last(req->dst, dst_nents,
+ edesc->sec4_sg + sec4_sg_index, 0);
+ }
+
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ edesc->iv_dma = iv_dma;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
+ sec4_sg_bytes, 1);
+#endif
+
+ *iv_contig_out = iv_contig;
+ return edesc;
+}
+
+static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
+{
+ struct ablkcipher_request *req = &creq->creq;
+ struct ablkcipher_edesc *edesc;
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct device *jrdev = ctx->jrdev;
+ bool iv_contig;
+ u32 *desc;
+ int ret = 0;
+
+ /* allocate extended descriptor */
+ edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
+ CAAM_CMD_SZ, &iv_contig);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ /* Create and submit job descriptor*/
+ init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
+ edesc, req, iv_contig);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+#endif
+ desc = edesc->hw_desc;
+ ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
+
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ ablkcipher_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
+#define template_aead template_u.aead
+#define template_ablkcipher template_u.ablkcipher
+struct caam_alg_template {
+ char name[CRYPTO_MAX_ALG_NAME];
+ char driver_name[CRYPTO_MAX_ALG_NAME];
+ unsigned int blocksize;
+ u32 type;
+ union {
+ struct ablkcipher_alg ablkcipher;
+ struct aead_alg aead;
+ struct blkcipher_alg blkcipher;
+ struct cipher_alg cipher;
+ struct compress_alg compress;
+ struct rng_alg rng;
+ } template_u;
+ u32 class1_alg_type;
+ u32 class2_alg_type;
+ u32 alg_op;
+};
+
+static struct caam_alg_template driver_algs[] = {
+ /* single-pass ipsec_esp descriptor */
+ {
+ .name = "authenc(hmac(md5),ecb(cipher_null))",
+ .driver_name = "authenc-hmac-md5-ecb-cipher_null-caam",
+ .blocksize = NULL_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_null_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = NULL_IV_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .class1_alg_type = 0,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha1),ecb(cipher_null))",
+ .driver_name = "authenc-hmac-sha1-ecb-cipher_null-caam",
+ .blocksize = NULL_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_null_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = NULL_IV_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .class1_alg_type = 0,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha224),ecb(cipher_null))",
+ .driver_name = "authenc-hmac-sha224-ecb-cipher_null-caam",
+ .blocksize = NULL_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_null_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = NULL_IV_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .class1_alg_type = 0,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha256),ecb(cipher_null))",
+ .driver_name = "authenc-hmac-sha256-ecb-cipher_null-caam",
+ .blocksize = NULL_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_null_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = NULL_IV_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .class1_alg_type = 0,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha384),ecb(cipher_null))",
+ .driver_name = "authenc-hmac-sha384-ecb-cipher_null-caam",
+ .blocksize = NULL_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_null_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = NULL_IV_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .class1_alg_type = 0,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha512),ecb(cipher_null))",
+ .driver_name = "authenc-hmac-sha512-ecb-cipher_null-caam",
+ .blocksize = NULL_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_null_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = NULL_IV_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .class1_alg_type = 0,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(md5),cbc(aes))",
+ .driver_name = "authenc-hmac-md5-cbc-aes-caam",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha1),cbc(aes))",
+ .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha224),cbc(aes))",
+ .driver_name = "authenc-hmac-sha224-cbc-aes-caam",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha256),cbc(aes))",
+ .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha384),cbc(aes))",
+ .driver_name = "authenc-hmac-sha384-cbc-aes-caam",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ },
+
+ {
+ .name = "authenc(hmac(sha512),cbc(aes))",
+ .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(md5),cbc(des3_ede))",
+ .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha1),cbc(des3_ede))",
+ .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha224),cbc(des3_ede))",
+ .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha256),cbc(des3_ede))",
+ .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha384),cbc(des3_ede))",
+ .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha512),cbc(des3_ede))",
+ .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(md5),cbc(des))",
+ .driver_name = "authenc-hmac-md5-cbc-des-caam",
+ .blocksize = DES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha1),cbc(des))",
+ .driver_name = "authenc-hmac-sha1-cbc-des-caam",
+ .blocksize = DES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha224),cbc(des))",
+ .driver_name = "authenc-hmac-sha224-cbc-des-caam",
+ .blocksize = DES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha256),cbc(des))",
+ .driver_name = "authenc-hmac-sha256-cbc-des-caam",
+ .blocksize = DES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha384),cbc(des))",
+ .driver_name = "authenc-hmac-sha384-cbc-des-caam",
+ .blocksize = DES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha512),cbc(des))",
+ .driver_name = "authenc-hmac-sha512-cbc-des-caam",
+ .blocksize = DES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(md5),rfc3686(ctr(aes)))",
+ .driver_name = "authenc-hmac-md5-rfc3686-ctr-aes-caam",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
+ .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-caam",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
+ .driver_name = "authenc-hmac-sha224-rfc3686-ctr-aes-caam",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
+ .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-caam",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
+ .driver_name = "authenc-hmac-sha384-rfc3686-ctr-aes-caam",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
+ .driver_name = "authenc-hmac-sha512-rfc3686-ctr-aes-caam",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "rfc4106(gcm(aes))",
+ .driver_name = "rfc4106-gcm-aes-caam",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = rfc4106_setkey,
+ .setauthsize = rfc4106_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = 8,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ },
+ {
+ .name = "rfc4543(gcm(aes))",
+ .driver_name = "rfc4543-gcm-aes-caam",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = rfc4543_setkey,
+ .setauthsize = rfc4543_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = 8,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ },
+ /* Galois Counter Mode */
+ {
+ .name = "gcm(aes)",
+ .driver_name = "gcm-aes-caam",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = gcm_setkey,
+ .setauthsize = gcm_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = NULL,
+ .geniv = "<built-in>",
+ .ivsize = 12,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ },
+ /* ablkcipher descriptor */
+ {
+ .name = "cbc(aes)",
+ .driver_name = "cbc-aes-caam",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .givencrypt = ablkcipher_givencrypt,
+ .geniv = "<built-in>",
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ },
+ {
+ .name = "cbc(des3_ede)",
+ .driver_name = "cbc-3des-caam",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .givencrypt = ablkcipher_givencrypt,
+ .geniv = "<built-in>",
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ },
+ {
+ .name = "cbc(des)",
+ .driver_name = "cbc-des-caam",
+ .blocksize = DES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .givencrypt = ablkcipher_givencrypt,
+ .geniv = "<built-in>",
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ },
+ {
+ .name = "ctr(aes)",
+ .driver_name = "ctr-aes-caam",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .geniv = "chainiv",
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ },
+ {
+ .name = "rfc3686(ctr(aes))",
+ .driver_name = "rfc3686-ctr-aes-caam",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .givencrypt = ablkcipher_givencrypt,
+ .geniv = "<built-in>",
+ .min_keysize = AES_MIN_KEY_SIZE +
+ CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE +
+ CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ }
+};
+
+struct caam_crypto_alg {
+ struct list_head entry;
+ int class1_alg_type;
+ int class2_alg_type;
+ int alg_op;
+ struct crypto_alg crypto_alg;
+};
+
+static int caam_cra_init(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct caam_crypto_alg *caam_alg =
+ container_of(alg, struct caam_crypto_alg, crypto_alg);
+ struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ ctx->jrdev = caam_jr_alloc();
+ if (IS_ERR(ctx->jrdev)) {
+ pr_err("Job Ring Device allocation for transform failed\n");
+ return PTR_ERR(ctx->jrdev);
+ }
+
+ /* copy descriptor header template value */
+ ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
+ ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
+ ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
+
+ return 0;
+}
+
+static void caam_cra_exit(struct crypto_tfm *tfm)
+{
+ struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (ctx->sh_desc_enc_dma &&
+ !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
+ dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
+ desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
+ if (ctx->sh_desc_dec_dma &&
+ !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
+ dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
+ desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
+ if (ctx->sh_desc_givenc_dma &&
+ !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
+ dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
+ desc_bytes(ctx->sh_desc_givenc),
+ DMA_TO_DEVICE);
+ if (ctx->key_dma &&
+ !dma_mapping_error(ctx->jrdev, ctx->key_dma))
+ dma_unmap_single(ctx->jrdev, ctx->key_dma,
+ ctx->enckeylen + ctx->split_key_pad_len,
+ DMA_TO_DEVICE);
+
+ caam_jr_free(ctx->jrdev);
+}
+
+static void __exit caam_algapi_exit(void)
+{
+
+ struct caam_crypto_alg *t_alg, *n;
+
+ if (!alg_list.next)
+ return;
+
+ list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
+ crypto_unregister_alg(&t_alg->crypto_alg);
+ list_del(&t_alg->entry);
+ kfree(t_alg);
+ }
+}
+
+static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
+ *template)
+{
+ struct caam_crypto_alg *t_alg;
+ struct crypto_alg *alg;
+
+ t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
+ if (!t_alg) {
+ pr_err("failed to allocate t_alg\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ alg = &t_alg->crypto_alg;
+
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->driver_name);
+ alg->cra_module = THIS_MODULE;
+ alg->cra_init = caam_cra_init;
+ alg->cra_exit = caam_cra_exit;
+ alg->cra_priority = CAAM_CRA_PRIORITY;
+ alg->cra_blocksize = template->blocksize;
+ alg->cra_alignmask = 0;
+ alg->cra_ctxsize = sizeof(struct caam_ctx);
+ alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
+ template->type;
+ switch (template->type) {
+ case CRYPTO_ALG_TYPE_GIVCIPHER:
+ alg->cra_type = &crypto_givcipher_type;
+ alg->cra_ablkcipher = template->template_ablkcipher;
+ break;
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ alg->cra_type = &crypto_ablkcipher_type;
+ alg->cra_ablkcipher = template->template_ablkcipher;
+ break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ alg->cra_type = &crypto_aead_type;
+ alg->cra_aead = template->template_aead;
+ break;
+ }
+
+ t_alg->class1_alg_type = template->class1_alg_type;
+ t_alg->class2_alg_type = template->class2_alg_type;
+ t_alg->alg_op = template->alg_op;
+
+ return t_alg;
+}
+
+static int __init caam_algapi_init(void)
+{
+ struct device_node *dev_node;
+ struct platform_device *pdev;
+ struct device *ctrldev;
+ void *priv;
+ int i = 0, err = 0;
+
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+ if (!dev_node) {
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+ if (!dev_node)
+ return -ENODEV;
+ }
+
+ pdev = of_find_device_by_node(dev_node);
+ if (!pdev) {
+ of_node_put(dev_node);
+ return -ENODEV;
+ }
+
+ ctrldev = &pdev->dev;
+ priv = dev_get_drvdata(ctrldev);
+ of_node_put(dev_node);
+
+ /*
+ * If priv is NULL, it's probably because the caam driver wasn't
+ * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+ */
+ if (!priv)
+ return -ENODEV;
+
+
+ INIT_LIST_HEAD(&alg_list);
+
+ /* register crypto algorithms the device supports */
+ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+ /* TODO: check if h/w supports alg */
+ struct caam_crypto_alg *t_alg;
+
+ t_alg = caam_alg_alloc(&driver_algs[i]);
+ if (IS_ERR(t_alg)) {
+ err = PTR_ERR(t_alg);
+ pr_warn("%s alg allocation failed\n",
+ driver_algs[i].driver_name);
+ continue;
+ }
+
+ err = crypto_register_alg(&t_alg->crypto_alg);
+ if (err) {
+ pr_warn("%s alg registration failed\n",
+ t_alg->crypto_alg.cra_driver_name);
+ kfree(t_alg);
+ } else
+ list_add_tail(&t_alg->entry, &alg_list);
+ }
+ if (!list_empty(&alg_list))
+ pr_info("caam algorithms registered in /proc/crypto\n");
+
+ return err;
+}
+
+module_init(caam_algapi_init);
+module_exit(caam_algapi_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM support for crypto API");
+MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
new file mode 100644
index 000000000..332c8ef8d
--- /dev/null
+++ b/drivers/crypto/caam/caamhash.c
@@ -0,0 +1,1963 @@
+/*
+ * caam - Freescale FSL CAAM support for ahash functions of crypto API
+ *
+ * Copyright 2011 Freescale Semiconductor, Inc.
+ *
+ * Based on caamalg.c crypto API driver.
+ *
+ * relationship of digest job descriptor or first job descriptor after init to
+ * shared descriptors:
+ *
+ * --------------- ---------------
+ * | JobDesc #1 |-------------------->| ShareDesc |
+ * | *(packet 1) | | (hashKey) |
+ * --------------- | (operation) |
+ * ---------------
+ *
+ * relationship of subsequent job descriptors to shared descriptors:
+ *
+ * --------------- ---------------
+ * | JobDesc #2 |-------------------->| ShareDesc |
+ * | *(packet 2) | |------------->| (hashKey) |
+ * --------------- | |-------->| (operation) |
+ * . | | | (load ctx2) |
+ * . | | ---------------
+ * --------------- | |
+ * | JobDesc #3 |------| |
+ * | *(packet 3) | |
+ * --------------- |
+ * . |
+ * . |
+ * --------------- |
+ * | JobDesc #4 |------------
+ * | *(packet 4) |
+ * ---------------
+ *
+ * The SharedDesc never changes for a connection unless rekeyed, but
+ * each packet will likely be in a different place. So all we need
+ * to know to process the packet is where the input is, where the
+ * output goes, and what context we want to process with. Context is
+ * in the SharedDesc, packet references in the JobDesc.
+ *
+ * So, a job desc looks like:
+ *
+ * ---------------------
+ * | Header |
+ * | ShareDesc Pointer |
+ * | SEQ_OUT_PTR |
+ * | (output buffer) |
+ * | (output length) |
+ * | SEQ_IN_PTR |
+ * | (input buffer) |
+ * | (input length) |
+ * ---------------------
+ */
+
+#include "compat.h"
+
+#include "regs.h"
+#include "intern.h"
+#include "desc_constr.h"
+#include "jr.h"
+#include "error.h"
+#include "sg_sw_sec4.h"
+#include "key_gen.h"
+
+#define CAAM_CRA_PRIORITY 3000
+
+/* max hash key is max split key size */
+#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
+
+#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
+#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
+
+/* length of descriptors text */
+#define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
+#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
+#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
+#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
+#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
+#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
+
+#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
+ CAAM_MAX_HASH_KEY_SIZE)
+#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
+
+/* caam context sizes for hashes: running digest + 8 */
+#define HASH_MSG_LEN 8
+#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
+
+#ifdef DEBUG
+/* for print_hex_dumps with line references */
+#define debug(format, arg...) printk(format, arg)
+#else
+#define debug(format, arg...)
+#endif
+
+
+static struct list_head hash_list;
+
+/* ahash per-session context */
+struct caam_hash_ctx {
+ struct device *jrdev;
+ u32 sh_desc_update[DESC_HASH_MAX_USED_LEN];
+ u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN];
+ u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN];
+ u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN];
+ u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN];
+ dma_addr_t sh_desc_update_dma;
+ dma_addr_t sh_desc_update_first_dma;
+ dma_addr_t sh_desc_fin_dma;
+ dma_addr_t sh_desc_digest_dma;
+ dma_addr_t sh_desc_finup_dma;
+ u32 alg_type;
+ u32 alg_op;
+ u8 key[CAAM_MAX_HASH_KEY_SIZE];
+ dma_addr_t key_dma;
+ int ctx_len;
+ unsigned int split_key_len;
+ unsigned int split_key_pad_len;
+};
+
+/* ahash state */
+struct caam_hash_state {
+ dma_addr_t buf_dma;
+ dma_addr_t ctx_dma;
+ u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
+ int buflen_0;
+ u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
+ int buflen_1;
+ u8 caam_ctx[MAX_CTX_LEN];
+ int (*update)(struct ahash_request *req);
+ int (*final)(struct ahash_request *req);
+ int (*finup)(struct ahash_request *req);
+ int current_buf;
+};
+
+/* Common job descriptor seq in/out ptr routines */
+
+/* Map state->caam_ctx, and append seq_out_ptr command that points to it */
+static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
+ struct caam_hash_state *state,
+ int ctx_len)
+{
+ state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
+ ctx_len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(jrdev, state->ctx_dma)) {
+ dev_err(jrdev, "unable to map ctx\n");
+ return -ENOMEM;
+ }
+
+ append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
+
+ return 0;
+}
+
+/* Map req->result, and append seq_out_ptr command that points to it */
+static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
+ u8 *result, int digestsize)
+{
+ dma_addr_t dst_dma;
+
+ dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
+ append_seq_out_ptr(desc, dst_dma, digestsize, 0);
+
+ return dst_dma;
+}
+
+/* Map current buffer in state and put it in link table */
+static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
+ struct sec4_sg_entry *sec4_sg,
+ u8 *buf, int buflen)
+{
+ dma_addr_t buf_dma;
+
+ buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
+ dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
+
+ return buf_dma;
+}
+
+/* Map req->src and put it in link table */
+static inline void src_map_to_sec4_sg(struct device *jrdev,
+ struct scatterlist *src, int src_nents,
+ struct sec4_sg_entry *sec4_sg,
+ bool chained)
+{
+ dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained);
+ sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
+}
+
+/*
+ * Only put buffer in link table if it contains data, which is possible,
+ * since a buffer has previously been used, and needs to be unmapped,
+ */
+static inline dma_addr_t
+try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
+ u8 *buf, dma_addr_t buf_dma, int buflen,
+ int last_buflen)
+{
+ if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
+ dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
+ if (buflen)
+ buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
+ else
+ buf_dma = 0;
+
+ return buf_dma;
+}
+
+/* Map state->caam_ctx, and add it to link table */
+static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
+ struct caam_hash_state *state, int ctx_len,
+ struct sec4_sg_entry *sec4_sg, u32 flag)
+{
+ state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
+ if (dma_mapping_error(jrdev, state->ctx_dma)) {
+ dev_err(jrdev, "unable to map ctx\n");
+ return -ENOMEM;
+ }
+
+ dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
+
+ return 0;
+}
+
+/* Common shared descriptor commands */
+static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
+{
+ append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
+ ctx->split_key_len, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+}
+
+/* Append key if it has been set */
+static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
+{
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ if (ctx->split_key_len) {
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ append_key_ahash(desc, ctx);
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+ }
+
+ /* Propagate errors from shared to job descriptor */
+ append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
+}
+
+/*
+ * For ahash read data from seqin following state->caam_ctx,
+ * and write resulting class2 context to seqout, which may be state->caam_ctx
+ * or req->result
+ */
+static inline void ahash_append_load_str(u32 *desc, int digestsize)
+{
+ /* Calculate remaining bytes to read */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Read remaining bytes */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
+ FIFOLD_TYPE_MSG | KEY_VLF);
+
+ /* Store class2 context bytes */
+ append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+}
+
+/*
+ * For ahash update, final and finup, import context, read and write to seqout
+ */
+static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
+ int digestsize,
+ struct caam_hash_ctx *ctx)
+{
+ init_sh_desc_key_ahash(desc, ctx);
+
+ /* Import context from software */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_2_CCB | ctx->ctx_len);
+
+ /* Class 2 operation */
+ append_operation(desc, op | state | OP_ALG_ENCRYPT);
+
+ /*
+ * Load from buf and/or src and write to req->result or state->context
+ */
+ ahash_append_load_str(desc, digestsize);
+}
+
+/* For ahash firsts and digest, read and write to seqout */
+static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
+ int digestsize, struct caam_hash_ctx *ctx)
+{
+ init_sh_desc_key_ahash(desc, ctx);
+
+ /* Class 2 operation */
+ append_operation(desc, op | state | OP_ALG_ENCRYPT);
+
+ /*
+ * Load from buf and/or src and write to req->result or state->context
+ */
+ ahash_append_load_str(desc, digestsize);
+}
+
+static int ahash_set_sh_desc(struct crypto_ahash *ahash)
+{
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct device *jrdev = ctx->jrdev;
+ u32 have_key = 0;
+ u32 *desc;
+
+ if (ctx->split_key_len)
+ have_key = OP_ALG_AAI_HMAC_PRECOMP;
+
+ /* ahash_update shared descriptor */
+ desc = ctx->sh_desc_update;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Import context from software */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_2_CCB | ctx->ctx_len);
+
+ /* Class 2 operation */
+ append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
+ OP_ALG_ENCRYPT);
+
+ /* Load data and write to result or context */
+ ahash_append_load_str(desc, ctx->ctx_len);
+
+ ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "ahash update shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+ /* ahash_update_first shared descriptor */
+ desc = ctx->sh_desc_update_first;
+
+ ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
+ ctx->ctx_len, ctx);
+
+ ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "ahash update first shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+ /* ahash_final shared descriptor */
+ desc = ctx->sh_desc_fin;
+
+ ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
+ OP_ALG_AS_FINALIZE, digestsize, ctx);
+
+ ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ /* ahash_finup shared descriptor */
+ desc = ctx->sh_desc_finup;
+
+ ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
+ OP_ALG_AS_FINALIZE, digestsize, ctx);
+
+ ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ /* ahash_digest shared descriptor */
+ desc = ctx->sh_desc_digest;
+
+ ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
+ digestsize, ctx);
+
+ ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "ahash digest shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ return 0;
+}
+
+static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
+ u32 keylen)
+{
+ return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
+ ctx->split_key_pad_len, key_in, keylen,
+ ctx->alg_op);
+}
+
+/* Digest hash size if it is too large */
+static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
+ u32 *keylen, u8 *key_out, u32 digestsize)
+{
+ struct device *jrdev = ctx->jrdev;
+ u32 *desc;
+ struct split_key_result result;
+ dma_addr_t src_dma, dst_dma;
+ int ret = 0;
+
+ desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
+ if (!desc) {
+ dev_err(jrdev, "unable to allocate key input memory\n");
+ return -ENOMEM;
+ }
+
+ init_job_desc(desc, 0);
+
+ src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, src_dma)) {
+ dev_err(jrdev, "unable to map key input memory\n");
+ kfree(desc);
+ return -ENOMEM;
+ }
+ dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(jrdev, dst_dma)) {
+ dev_err(jrdev, "unable to map key output memory\n");
+ dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
+ kfree(desc);
+ return -ENOMEM;
+ }
+
+ /* Job descriptor to perform unkeyed hash on key_in */
+ append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
+ OP_ALG_AS_INITFINAL);
+ append_seq_in_ptr(desc, src_dma, *keylen, 0);
+ append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
+ append_seq_out_ptr(desc, dst_dma, digestsize, 0);
+ append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
+ print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+ result.err = 0;
+ init_completion(&result.completion);
+
+ ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
+ if (!ret) {
+ /* in progress */
+ wait_for_completion_interruptible(&result.completion);
+ ret = result.err;
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "digested key@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_in,
+ digestsize, 1);
+#endif
+ }
+ dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
+ dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
+
+ *keylen = digestsize;
+
+ kfree(desc);
+
+ return ret;
+}
+
+static int ahash_setkey(struct crypto_ahash *ahash,
+ const u8 *key, unsigned int keylen)
+{
+ /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
+ static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct device *jrdev = ctx->jrdev;
+ int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
+ int digestsize = crypto_ahash_digestsize(ahash);
+ int ret = 0;
+ u8 *hashed_key = NULL;
+
+#ifdef DEBUG
+ printk(KERN_ERR "keylen %d\n", keylen);
+#endif
+
+ if (keylen > blocksize) {
+ hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL |
+ GFP_DMA);
+ if (!hashed_key)
+ return -ENOMEM;
+ ret = hash_digest_key(ctx, key, &keylen, hashed_key,
+ digestsize);
+ if (ret)
+ goto badkey;
+ key = hashed_key;
+ }
+
+ /* Pick class 2 key length from algorithm submask */
+ ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
+ OP_ALG_ALGSEL_SHIFT] * 2;
+ ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
+
+#ifdef DEBUG
+ printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
+ ctx->split_key_len, ctx->split_key_pad_len);
+ print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+ ret = gen_split_hash_key(ctx, key, keylen);
+ if (ret)
+ goto badkey;
+
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->key_dma)) {
+ dev_err(jrdev, "unable to map key i/o memory\n");
+ ret = -ENOMEM;
+ goto map_err;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+ ctx->split_key_pad_len, 1);
+#endif
+
+ ret = ahash_set_sh_desc(ahash);
+ if (ret) {
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
+ DMA_TO_DEVICE);
+ }
+
+map_err:
+ kfree(hashed_key);
+ return ret;
+badkey:
+ kfree(hashed_key);
+ crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+}
+
+/*
+ * ahash_edesc - s/w-extended ahash descriptor
+ * @dst_dma: physical mapped address of req->result
+ * @sec4_sg_dma: physical mapped address of h/w link table
+ * @chained: if source is chained
+ * @src_nents: number of segments in input scatterlist
+ * @sec4_sg_bytes: length of dma mapped sec4_sg space
+ * @sec4_sg: pointer to h/w link table
+ * @hw_desc: the h/w job descriptor followed by any referenced link tables
+ */
+struct ahash_edesc {
+ dma_addr_t dst_dma;
+ dma_addr_t sec4_sg_dma;
+ bool chained;
+ int src_nents;
+ int sec4_sg_bytes;
+ struct sec4_sg_entry *sec4_sg;
+ u32 hw_desc[0];
+};
+
+static inline void ahash_unmap(struct device *dev,
+ struct ahash_edesc *edesc,
+ struct ahash_request *req, int dst_len)
+{
+ if (edesc->src_nents)
+ dma_unmap_sg_chained(dev, req->src, edesc->src_nents,
+ DMA_TO_DEVICE, edesc->chained);
+ if (edesc->dst_dma)
+ dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
+
+ if (edesc->sec4_sg_bytes)
+ dma_unmap_single(dev, edesc->sec4_sg_dma,
+ edesc->sec4_sg_bytes, DMA_TO_DEVICE);
+}
+
+static inline void ahash_unmap_ctx(struct device *dev,
+ struct ahash_edesc *edesc,
+ struct ahash_request *req, int dst_len, u32 flag)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ if (state->ctx_dma)
+ dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
+ ahash_unmap(dev, edesc, req, dst_len);
+}
+
+static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
+{
+ struct ahash_request *req = context;
+ struct ahash_edesc *edesc;
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ int digestsize = crypto_ahash_digestsize(ahash);
+#ifdef DEBUG
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+ edesc = (struct ahash_edesc *)((char *)desc -
+ offsetof(struct ahash_edesc, hw_desc));
+ if (err)
+ caam_jr_strstatus(jrdev, err);
+
+ ahash_unmap(jrdev, edesc, req, digestsize);
+ kfree(edesc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
+ if (req->result)
+ print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ digestsize, 1);
+#endif
+
+ req->base.complete(&req->base, err);
+}
+
+static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
+{
+ struct ahash_request *req = context;
+ struct ahash_edesc *edesc;
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+#ifdef DEBUG
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ int digestsize = crypto_ahash_digestsize(ahash);
+
+ dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+ edesc = (struct ahash_edesc *)((char *)desc -
+ offsetof(struct ahash_edesc, hw_desc));
+ if (err)
+ caam_jr_strstatus(jrdev, err);
+
+ ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
+ kfree(edesc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
+ if (req->result)
+ print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ digestsize, 1);
+#endif
+
+ req->base.complete(&req->base, err);
+}
+
+static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
+{
+ struct ahash_request *req = context;
+ struct ahash_edesc *edesc;
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ int digestsize = crypto_ahash_digestsize(ahash);
+#ifdef DEBUG
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+ edesc = (struct ahash_edesc *)((char *)desc -
+ offsetof(struct ahash_edesc, hw_desc));
+ if (err)
+ caam_jr_strstatus(jrdev, err);
+
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
+ kfree(edesc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
+ if (req->result)
+ print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ digestsize, 1);
+#endif
+
+ req->base.complete(&req->base, err);
+}
+
+static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
+{
+ struct ahash_request *req = context;
+ struct ahash_edesc *edesc;
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+#ifdef DEBUG
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ int digestsize = crypto_ahash_digestsize(ahash);
+
+ dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+ edesc = (struct ahash_edesc *)((char *)desc -
+ offsetof(struct ahash_edesc, hw_desc));
+ if (err)
+ caam_jr_strstatus(jrdev, err);
+
+ ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
+ kfree(edesc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
+ if (req->result)
+ print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ digestsize, 1);
+#endif
+
+ req->base.complete(&req->base, err);
+}
+
+/* submit update job descriptor */
+static int ahash_update_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
+ int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
+ u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
+ int *next_buflen = state->current_buf ? &state->buflen_0 :
+ &state->buflen_1, last_buflen;
+ int in_len = *buflen + req->nbytes, to_hash;
+ u32 *sh_desc = ctx->sh_desc_update, *desc;
+ dma_addr_t ptr = ctx->sh_desc_update_dma;
+ int src_nents, sec4_sg_bytes, sec4_sg_src_index;
+ struct ahash_edesc *edesc;
+ bool chained = false;
+ int ret = 0;
+ int sh_len;
+
+ last_buflen = *next_buflen;
+ *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
+ to_hash = in_len - *next_buflen;
+
+ if (to_hash) {
+ src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
+ &chained);
+ sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
+ sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
+ sizeof(struct sec4_sg_entry);
+
+ /*
+ * allocate space for base edesc and hw desc commands,
+ * link tables
+ */
+ edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ sec4_sg_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev,
+ "could not allocate extended descriptor\n");
+ return -ENOMEM;
+ }
+
+ edesc->src_nents = src_nents;
+ edesc->chained = chained;
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
+ DESC_JOB_IO_LEN;
+
+ ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
+ edesc->sec4_sg, DMA_BIDIRECTIONAL);
+ if (ret)
+ return ret;
+
+ state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
+ edesc->sec4_sg + 1,
+ buf, state->buf_dma,
+ *buflen, last_buflen);
+
+ if (src_nents) {
+ src_map_to_sec4_sg(jrdev, req->src, src_nents,
+ edesc->sec4_sg + sec4_sg_src_index,
+ chained);
+ if (*next_buflen) {
+ scatterwalk_map_and_copy(next_buf, req->src,
+ to_hash - *buflen,
+ *next_buflen, 0);
+ state->current_buf = !state->current_buf;
+ }
+ } else {
+ (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
+ SEC4_SG_LEN_FIN;
+ }
+
+ sh_len = desc_len(sh_desc);
+ desc = edesc->hw_desc;
+ init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
+ HDR_REVERSE);
+
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
+ return -ENOMEM;
+ }
+
+ append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
+ to_hash, LDST_SGF);
+
+ append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
+ DMA_BIDIRECTIONAL);
+ kfree(edesc);
+ }
+ } else if (*next_buflen) {
+ scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
+ req->nbytes, 0);
+ *buflen = *next_buflen;
+ *next_buflen = last_buflen;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
+ print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
+ *next_buflen, 1);
+#endif
+
+ return ret;
+}
+
+static int ahash_final_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
+ int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
+ int last_buflen = state->current_buf ? state->buflen_0 :
+ state->buflen_1;
+ u32 *sh_desc = ctx->sh_desc_fin, *desc;
+ dma_addr_t ptr = ctx->sh_desc_fin_dma;
+ int sec4_sg_bytes;
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct ahash_edesc *edesc;
+ int ret = 0;
+ int sh_len;
+
+ sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ sec4_sg_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
+ return -ENOMEM;
+ }
+
+ sh_len = desc_len(sh_desc);
+ desc = edesc->hw_desc;
+ init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
+ DESC_JOB_IO_LEN;
+ edesc->src_nents = 0;
+
+ ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
+ edesc->sec4_sg, DMA_TO_DEVICE);
+ if (ret)
+ return ret;
+
+ state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
+ buf, state->buf_dma, buflen,
+ last_buflen);
+ (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
+
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
+ return -ENOMEM;
+ }
+
+ append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
+ LDST_SGF);
+
+ edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
+ digestsize);
+ if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+ dev_err(jrdev, "unable to map dst\n");
+ return -ENOMEM;
+ }
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+ ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
+static int ahash_finup_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
+ int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
+ int last_buflen = state->current_buf ? state->buflen_0 :
+ state->buflen_1;
+ u32 *sh_desc = ctx->sh_desc_finup, *desc;
+ dma_addr_t ptr = ctx->sh_desc_finup_dma;
+ int sec4_sg_bytes, sec4_sg_src_index;
+ int src_nents;
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct ahash_edesc *edesc;
+ bool chained = false;
+ int ret = 0;
+ int sh_len;
+
+ src_nents = __sg_count(req->src, req->nbytes, &chained);
+ sec4_sg_src_index = 1 + (buflen ? 1 : 0);
+ sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
+ sizeof(struct sec4_sg_entry);
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ sec4_sg_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
+ return -ENOMEM;
+ }
+
+ sh_len = desc_len(sh_desc);
+ desc = edesc->hw_desc;
+ init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+ edesc->src_nents = src_nents;
+ edesc->chained = chained;
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
+ DESC_JOB_IO_LEN;
+
+ ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
+ edesc->sec4_sg, DMA_TO_DEVICE);
+ if (ret)
+ return ret;
+
+ state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
+ buf, state->buf_dma, buflen,
+ last_buflen);
+
+ src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
+ sec4_sg_src_index, chained);
+
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
+ return -ENOMEM;
+ }
+
+ append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
+ buflen + req->nbytes, LDST_SGF);
+
+ edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
+ digestsize);
+ if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+ dev_err(jrdev, "unable to map dst\n");
+ return -ENOMEM;
+ }
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+ ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
+static int ahash_digest(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ u32 *sh_desc = ctx->sh_desc_digest, *desc;
+ dma_addr_t ptr = ctx->sh_desc_digest_dma;
+ int digestsize = crypto_ahash_digestsize(ahash);
+ int src_nents, sec4_sg_bytes;
+ dma_addr_t src_dma;
+ struct ahash_edesc *edesc;
+ bool chained = false;
+ int ret = 0;
+ u32 options;
+ int sh_len;
+
+ src_nents = sg_count(req->src, req->nbytes, &chained);
+ dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE,
+ chained);
+ sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
+ DESC_JOB_IO_LEN, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
+ return -ENOMEM;
+ }
+ edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
+ DESC_JOB_IO_LEN;
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->src_nents = src_nents;
+ edesc->chained = chained;
+
+ sh_len = desc_len(sh_desc);
+ desc = edesc->hw_desc;
+ init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+ if (src_nents) {
+ sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
+ return -ENOMEM;
+ }
+ src_dma = edesc->sec4_sg_dma;
+ options = LDST_SGF;
+ } else {
+ src_dma = sg_dma_address(req->src);
+ options = 0;
+ }
+ append_seq_in_ptr(desc, src_dma, req->nbytes, options);
+
+ edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
+ digestsize);
+ if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+ dev_err(jrdev, "unable to map dst\n");
+ return -ENOMEM;
+ }
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+ ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ ahash_unmap(jrdev, edesc, req, digestsize);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
+/* submit ahash final if it the first job descriptor */
+static int ahash_final_no_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
+ int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
+ u32 *sh_desc = ctx->sh_desc_digest, *desc;
+ dma_addr_t ptr = ctx->sh_desc_digest_dma;
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct ahash_edesc *edesc;
+ int ret = 0;
+ int sh_len;
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
+ GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
+ return -ENOMEM;
+ }
+
+ edesc->sec4_sg_bytes = 0;
+ sh_len = desc_len(sh_desc);
+ desc = edesc->hw_desc;
+ init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+ state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, state->buf_dma)) {
+ dev_err(jrdev, "unable to map src\n");
+ return -ENOMEM;
+ }
+
+ append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
+
+ edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
+ digestsize);
+ if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+ dev_err(jrdev, "unable to map dst\n");
+ return -ENOMEM;
+ }
+ edesc->src_nents = 0;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+ ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ ahash_unmap(jrdev, edesc, req, digestsize);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
+/* submit ahash update if it the first job descriptor after update */
+static int ahash_update_no_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
+ int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
+ u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
+ int *next_buflen = state->current_buf ? &state->buflen_0 :
+ &state->buflen_1;
+ int in_len = *buflen + req->nbytes, to_hash;
+ int sec4_sg_bytes, src_nents;
+ struct ahash_edesc *edesc;
+ u32 *desc, *sh_desc = ctx->sh_desc_update_first;
+ dma_addr_t ptr = ctx->sh_desc_update_first_dma;
+ bool chained = false;
+ int ret = 0;
+ int sh_len;
+
+ *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
+ to_hash = in_len - *next_buflen;
+
+ if (to_hash) {
+ src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
+ &chained);
+ sec4_sg_bytes = (1 + src_nents) *
+ sizeof(struct sec4_sg_entry);
+
+ /*
+ * allocate space for base edesc and hw desc commands,
+ * link tables
+ */
+ edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ sec4_sg_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev,
+ "could not allocate extended descriptor\n");
+ return -ENOMEM;
+ }
+
+ edesc->src_nents = src_nents;
+ edesc->chained = chained;
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
+ DESC_JOB_IO_LEN;
+ edesc->dst_dma = 0;
+
+ state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
+ buf, *buflen);
+ src_map_to_sec4_sg(jrdev, req->src, src_nents,
+ edesc->sec4_sg + 1, chained);
+ if (*next_buflen) {
+ scatterwalk_map_and_copy(next_buf, req->src,
+ to_hash - *buflen,
+ *next_buflen, 0);
+ state->current_buf = !state->current_buf;
+ }
+
+ sh_len = desc_len(sh_desc);
+ desc = edesc->hw_desc;
+ init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
+ HDR_REVERSE);
+
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
+ return -ENOMEM;
+ }
+
+ append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
+
+ ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
+ if (ret)
+ return ret;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ state->update = ahash_update_ctx;
+ state->finup = ahash_finup_ctx;
+ state->final = ahash_final_ctx;
+ } else {
+ ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
+ DMA_TO_DEVICE);
+ kfree(edesc);
+ }
+ } else if (*next_buflen) {
+ scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
+ req->nbytes, 0);
+ *buflen = *next_buflen;
+ *next_buflen = 0;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
+ print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
+ *next_buflen, 1);
+#endif
+
+ return ret;
+}
+
+/* submit ahash finup if it the first job descriptor after update */
+static int ahash_finup_no_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
+ int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
+ int last_buflen = state->current_buf ? state->buflen_0 :
+ state->buflen_1;
+ u32 *sh_desc = ctx->sh_desc_digest, *desc;
+ dma_addr_t ptr = ctx->sh_desc_digest_dma;
+ int sec4_sg_bytes, sec4_sg_src_index, src_nents;
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct ahash_edesc *edesc;
+ bool chained = false;
+ int sh_len;
+ int ret = 0;
+
+ src_nents = __sg_count(req->src, req->nbytes, &chained);
+ sec4_sg_src_index = 2;
+ sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
+ sizeof(struct sec4_sg_entry);
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ sec4_sg_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
+ return -ENOMEM;
+ }
+
+ sh_len = desc_len(sh_desc);
+ desc = edesc->hw_desc;
+ init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+ edesc->src_nents = src_nents;
+ edesc->chained = chained;
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
+ DESC_JOB_IO_LEN;
+
+ state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
+ state->buf_dma, buflen,
+ last_buflen);
+
+ src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1,
+ chained);
+
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
+ return -ENOMEM;
+ }
+
+ append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
+ req->nbytes, LDST_SGF);
+
+ edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
+ digestsize);
+ if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+ dev_err(jrdev, "unable to map dst\n");
+ return -ENOMEM;
+ }
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+ ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ ahash_unmap(jrdev, edesc, req, digestsize);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
+/* submit first update job descriptor after init */
+static int ahash_update_first(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
+ int *next_buflen = state->current_buf ?
+ &state->buflen_1 : &state->buflen_0;
+ int to_hash;
+ u32 *sh_desc = ctx->sh_desc_update_first, *desc;
+ dma_addr_t ptr = ctx->sh_desc_update_first_dma;
+ int sec4_sg_bytes, src_nents;
+ dma_addr_t src_dma;
+ u32 options;
+ struct ahash_edesc *edesc;
+ bool chained = false;
+ int ret = 0;
+ int sh_len;
+
+ *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
+ 1);
+ to_hash = req->nbytes - *next_buflen;
+
+ if (to_hash) {
+ src_nents = sg_count(req->src, req->nbytes - (*next_buflen),
+ &chained);
+ dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE, chained);
+ sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
+
+ /*
+ * allocate space for base edesc and hw desc commands,
+ * link tables
+ */
+ edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ sec4_sg_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev,
+ "could not allocate extended descriptor\n");
+ return -ENOMEM;
+ }
+
+ edesc->src_nents = src_nents;
+ edesc->chained = chained;
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
+ DESC_JOB_IO_LEN;
+ edesc->dst_dma = 0;
+
+ if (src_nents) {
+ sg_to_sec4_sg_last(req->src, src_nents,
+ edesc->sec4_sg, 0);
+ edesc->sec4_sg_dma = dma_map_single(jrdev,
+ edesc->sec4_sg,
+ sec4_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
+ return -ENOMEM;
+ }
+ src_dma = edesc->sec4_sg_dma;
+ options = LDST_SGF;
+ } else {
+ src_dma = sg_dma_address(req->src);
+ options = 0;
+ }
+
+ if (*next_buflen)
+ scatterwalk_map_and_copy(next_buf, req->src, to_hash,
+ *next_buflen, 0);
+
+ sh_len = desc_len(sh_desc);
+ desc = edesc->hw_desc;
+ init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
+ HDR_REVERSE);
+
+ append_seq_in_ptr(desc, src_dma, to_hash, options);
+
+ ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
+ if (ret)
+ return ret;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst,
+ req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ state->update = ahash_update_ctx;
+ state->finup = ahash_finup_ctx;
+ state->final = ahash_final_ctx;
+ } else {
+ ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
+ DMA_TO_DEVICE);
+ kfree(edesc);
+ }
+ } else if (*next_buflen) {
+ state->update = ahash_update_no_ctx;
+ state->finup = ahash_finup_no_ctx;
+ state->final = ahash_final_no_ctx;
+ scatterwalk_map_and_copy(next_buf, req->src, 0,
+ req->nbytes, 0);
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
+ *next_buflen, 1);
+#endif
+
+ return ret;
+}
+
+static int ahash_finup_first(struct ahash_request *req)
+{
+ return ahash_digest(req);
+}
+
+static int ahash_init(struct ahash_request *req)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ state->update = ahash_update_first;
+ state->finup = ahash_finup_first;
+ state->final = ahash_final_no_ctx;
+
+ state->current_buf = 0;
+ state->buf_dma = 0;
+ state->buflen_0 = 0;
+ state->buflen_1 = 0;
+
+ return 0;
+}
+
+static int ahash_update(struct ahash_request *req)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ return state->update(req);
+}
+
+static int ahash_finup(struct ahash_request *req)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ return state->finup(req);
+}
+
+static int ahash_final(struct ahash_request *req)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ return state->final(req);
+}
+
+static int ahash_export(struct ahash_request *req, void *out)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ memcpy(out, ctx, sizeof(struct caam_hash_ctx));
+ memcpy(out + sizeof(struct caam_hash_ctx), state,
+ sizeof(struct caam_hash_state));
+ return 0;
+}
+
+static int ahash_import(struct ahash_request *req, const void *in)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ memcpy(ctx, in, sizeof(struct caam_hash_ctx));
+ memcpy(state, in + sizeof(struct caam_hash_ctx),
+ sizeof(struct caam_hash_state));
+ return 0;
+}
+
+struct caam_hash_template {
+ char name[CRYPTO_MAX_ALG_NAME];
+ char driver_name[CRYPTO_MAX_ALG_NAME];
+ char hmac_name[CRYPTO_MAX_ALG_NAME];
+ char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
+ unsigned int blocksize;
+ struct ahash_alg template_ahash;
+ u32 alg_type;
+ u32 alg_op;
+};
+
+/* ahash descriptors */
+static struct caam_hash_template driver_hash[] = {
+ {
+ .name = "sha1",
+ .driver_name = "sha1-caam",
+ .hmac_name = "hmac(sha1)",
+ .hmac_driver_name = "hmac-sha1-caam",
+ .blocksize = SHA1_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA1,
+ .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ }, {
+ .name = "sha224",
+ .driver_name = "sha224-caam",
+ .hmac_name = "hmac(sha224)",
+ .hmac_driver_name = "hmac-sha224-caam",
+ .blocksize = SHA224_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA224,
+ .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ }, {
+ .name = "sha256",
+ .driver_name = "sha256-caam",
+ .hmac_name = "hmac(sha256)",
+ .hmac_driver_name = "hmac-sha256-caam",
+ .blocksize = SHA256_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA256,
+ .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ }, {
+ .name = "sha384",
+ .driver_name = "sha384-caam",
+ .hmac_name = "hmac(sha384)",
+ .hmac_driver_name = "hmac-sha384-caam",
+ .blocksize = SHA384_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA384,
+ .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ }, {
+ .name = "sha512",
+ .driver_name = "sha512-caam",
+ .hmac_name = "hmac(sha512)",
+ .hmac_driver_name = "hmac-sha512-caam",
+ .blocksize = SHA512_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA512,
+ .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ }, {
+ .name = "md5",
+ .driver_name = "md5-caam",
+ .hmac_name = "hmac(md5)",
+ .hmac_driver_name = "hmac-md5-caam",
+ .blocksize = MD5_BLOCK_WORDS * 4,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_MD5,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ },
+};
+
+struct caam_hash_alg {
+ struct list_head entry;
+ int alg_type;
+ int alg_op;
+ struct ahash_alg ahash_alg;
+};
+
+static int caam_hash_cra_init(struct crypto_tfm *tfm)
+{
+ struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+ struct crypto_alg *base = tfm->__crt_alg;
+ struct hash_alg_common *halg =
+ container_of(base, struct hash_alg_common, base);
+ struct ahash_alg *alg =
+ container_of(halg, struct ahash_alg, halg);
+ struct caam_hash_alg *caam_hash =
+ container_of(alg, struct caam_hash_alg, ahash_alg);
+ struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
+ static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
+ HASH_MSG_LEN + SHA1_DIGEST_SIZE,
+ HASH_MSG_LEN + 32,
+ HASH_MSG_LEN + SHA256_DIGEST_SIZE,
+ HASH_MSG_LEN + 64,
+ HASH_MSG_LEN + SHA512_DIGEST_SIZE };
+ int ret = 0;
+
+ /*
+ * Get a Job ring from Job Ring driver to ensure in-order
+ * crypto request processing per tfm
+ */
+ ctx->jrdev = caam_jr_alloc();
+ if (IS_ERR(ctx->jrdev)) {
+ pr_err("Job Ring Device allocation for transform failed\n");
+ return PTR_ERR(ctx->jrdev);
+ }
+ /* copy descriptor header template value */
+ ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
+ ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
+
+ ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
+ OP_ALG_ALGSEL_SHIFT];
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct caam_hash_state));
+
+ ret = ahash_set_sh_desc(ahash);
+
+ return ret;
+}
+
+static void caam_hash_cra_exit(struct crypto_tfm *tfm)
+{
+ struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (ctx->sh_desc_update_dma &&
+ !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
+ dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
+ desc_bytes(ctx->sh_desc_update),
+ DMA_TO_DEVICE);
+ if (ctx->sh_desc_update_first_dma &&
+ !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
+ dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
+ desc_bytes(ctx->sh_desc_update_first),
+ DMA_TO_DEVICE);
+ if (ctx->sh_desc_fin_dma &&
+ !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
+ dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
+ desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
+ if (ctx->sh_desc_digest_dma &&
+ !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
+ dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
+ desc_bytes(ctx->sh_desc_digest),
+ DMA_TO_DEVICE);
+ if (ctx->sh_desc_finup_dma &&
+ !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
+ dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
+ desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
+
+ caam_jr_free(ctx->jrdev);
+}
+
+static void __exit caam_algapi_hash_exit(void)
+{
+ struct caam_hash_alg *t_alg, *n;
+
+ if (!hash_list.next)
+ return;
+
+ list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
+ crypto_unregister_ahash(&t_alg->ahash_alg);
+ list_del(&t_alg->entry);
+ kfree(t_alg);
+ }
+}
+
+static struct caam_hash_alg *
+caam_hash_alloc(struct caam_hash_template *template,
+ bool keyed)
+{
+ struct caam_hash_alg *t_alg;
+ struct ahash_alg *halg;
+ struct crypto_alg *alg;
+
+ t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
+ if (!t_alg) {
+ pr_err("failed to allocate t_alg\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ t_alg->ahash_alg = template->template_ahash;
+ halg = &t_alg->ahash_alg;
+ alg = &halg->halg.base;
+
+ if (keyed) {
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->hmac_name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->hmac_driver_name);
+ } else {
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->driver_name);
+ }
+ alg->cra_module = THIS_MODULE;
+ alg->cra_init = caam_hash_cra_init;
+ alg->cra_exit = caam_hash_cra_exit;
+ alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
+ alg->cra_priority = CAAM_CRA_PRIORITY;
+ alg->cra_blocksize = template->blocksize;
+ alg->cra_alignmask = 0;
+ alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
+ alg->cra_type = &crypto_ahash_type;
+
+ t_alg->alg_type = template->alg_type;
+ t_alg->alg_op = template->alg_op;
+
+ return t_alg;
+}
+
+static int __init caam_algapi_hash_init(void)
+{
+ struct device_node *dev_node;
+ struct platform_device *pdev;
+ struct device *ctrldev;
+ void *priv;
+ int i = 0, err = 0;
+
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+ if (!dev_node) {
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+ if (!dev_node)
+ return -ENODEV;
+ }
+
+ pdev = of_find_device_by_node(dev_node);
+ if (!pdev) {
+ of_node_put(dev_node);
+ return -ENODEV;
+ }
+
+ ctrldev = &pdev->dev;
+ priv = dev_get_drvdata(ctrldev);
+ of_node_put(dev_node);
+
+ /*
+ * If priv is NULL, it's probably because the caam driver wasn't
+ * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+ */
+ if (!priv)
+ return -ENODEV;
+
+ INIT_LIST_HEAD(&hash_list);
+
+ /* register crypto algorithms the device supports */
+ for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
+ /* TODO: check if h/w supports alg */
+ struct caam_hash_alg *t_alg;
+
+ /* register hmac version */
+ t_alg = caam_hash_alloc(&driver_hash[i], true);
+ if (IS_ERR(t_alg)) {
+ err = PTR_ERR(t_alg);
+ pr_warn("%s alg allocation failed\n",
+ driver_hash[i].driver_name);
+ continue;
+ }
+
+ err = crypto_register_ahash(&t_alg->ahash_alg);
+ if (err) {
+ pr_warn("%s alg registration failed\n",
+ t_alg->ahash_alg.halg.base.cra_driver_name);
+ kfree(t_alg);
+ } else
+ list_add_tail(&t_alg->entry, &hash_list);
+
+ /* register unkeyed version */
+ t_alg = caam_hash_alloc(&driver_hash[i], false);
+ if (IS_ERR(t_alg)) {
+ err = PTR_ERR(t_alg);
+ pr_warn("%s alg allocation failed\n",
+ driver_hash[i].driver_name);
+ continue;
+ }
+
+ err = crypto_register_ahash(&t_alg->ahash_alg);
+ if (err) {
+ pr_warn("%s alg registration failed\n",
+ t_alg->ahash_alg.halg.base.cra_driver_name);
+ kfree(t_alg);
+ } else
+ list_add_tail(&t_alg->entry, &hash_list);
+ }
+
+ return err;
+}
+
+module_init(caam_algapi_hash_init);
+module_exit(caam_algapi_hash_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
+MODULE_AUTHOR("Freescale Semiconductor - NMG");
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
new file mode 100644
index 000000000..509533720
--- /dev/null
+++ b/drivers/crypto/caam/caamrng.c
@@ -0,0 +1,362 @@
+/*
+ * caam - Freescale FSL CAAM support for hw_random
+ *
+ * Copyright 2011 Freescale Semiconductor, Inc.
+ *
+ * Based on caamalg.c crypto API driver.
+ *
+ * relationship between job descriptors to shared descriptors:
+ *
+ * --------------- --------------
+ * | JobDesc #0 |-------------------->| ShareDesc |
+ * | *(buffer 0) | |------------->| (generate) |
+ * --------------- | | (move) |
+ * | | (store) |
+ * --------------- | --------------
+ * | JobDesc #1 |------|
+ * | *(buffer 1) |
+ * ---------------
+ *
+ * A job desc looks like this:
+ *
+ * ---------------------
+ * | Header |
+ * | ShareDesc Pointer |
+ * | SEQ_OUT_PTR |
+ * | (output buffer) |
+ * ---------------------
+ *
+ * The SharedDesc never changes, and each job descriptor points to one of two
+ * buffers for each device, from which the data will be copied into the
+ * requested destination
+ */
+
+#include <linux/hw_random.h>
+#include <linux/completion.h>
+#include <linux/atomic.h>
+
+#include "compat.h"
+
+#include "regs.h"
+#include "intern.h"
+#include "desc_constr.h"
+#include "jr.h"
+#include "error.h"
+
+/*
+ * Maximum buffer size: maximum number of random, cache-aligned bytes that
+ * will be generated and moved to seq out ptr (extlen not allowed)
+ */
+#define RN_BUF_SIZE (0xffff / L1_CACHE_BYTES * \
+ L1_CACHE_BYTES)
+
+/* length of descriptors */
+#define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2)
+#define DESC_RNG_LEN (4 * CAAM_CMD_SZ)
+
+/* Buffer, its dma address and lock */
+struct buf_data {
+ u8 buf[RN_BUF_SIZE] ____cacheline_aligned;
+ dma_addr_t addr;
+ struct completion filled;
+ u32 hw_desc[DESC_JOB_O_LEN];
+#define BUF_NOT_EMPTY 0
+#define BUF_EMPTY 1
+#define BUF_PENDING 2 /* Empty, but with job pending --don't submit another */
+ atomic_t empty;
+};
+
+/* rng per-device context */
+struct caam_rng_ctx {
+ struct device *jrdev;
+ dma_addr_t sh_desc_dma;
+ u32 sh_desc[DESC_RNG_LEN];
+ unsigned int cur_buf_idx;
+ int current_buf;
+ struct buf_data bufs[2];
+};
+
+static struct caam_rng_ctx *rng_ctx;
+
+static inline void rng_unmap_buf(struct device *jrdev, struct buf_data *bd)
+{
+ if (bd->addr)
+ dma_unmap_single(jrdev, bd->addr, RN_BUF_SIZE,
+ DMA_FROM_DEVICE);
+}
+
+static inline void rng_unmap_ctx(struct caam_rng_ctx *ctx)
+{
+ struct device *jrdev = ctx->jrdev;
+
+ if (ctx->sh_desc_dma)
+ dma_unmap_single(jrdev, ctx->sh_desc_dma,
+ desc_bytes(ctx->sh_desc), DMA_TO_DEVICE);
+ rng_unmap_buf(jrdev, &ctx->bufs[0]);
+ rng_unmap_buf(jrdev, &ctx->bufs[1]);
+}
+
+static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
+{
+ struct buf_data *bd;
+
+ bd = (struct buf_data *)((char *)desc -
+ offsetof(struct buf_data, hw_desc));
+
+ if (err)
+ caam_jr_strstatus(jrdev, err);
+
+ atomic_set(&bd->empty, BUF_NOT_EMPTY);
+ complete(&bd->filled);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "rng refreshed buf@: ",
+ DUMP_PREFIX_ADDRESS, 16, 4, bd->buf, RN_BUF_SIZE, 1);
+#endif
+}
+
+static inline int submit_job(struct caam_rng_ctx *ctx, int to_current)
+{
+ struct buf_data *bd = &ctx->bufs[!(to_current ^ ctx->current_buf)];
+ struct device *jrdev = ctx->jrdev;
+ u32 *desc = bd->hw_desc;
+ int err;
+
+ dev_dbg(jrdev, "submitting job %d\n", !(to_current ^ ctx->current_buf));
+ init_completion(&bd->filled);
+ err = caam_jr_enqueue(jrdev, desc, rng_done, ctx);
+ if (err)
+ complete(&bd->filled); /* don't wait on failed job*/
+ else
+ atomic_inc(&bd->empty); /* note if pending */
+
+ return err;
+}
+
+static int caam_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ struct caam_rng_ctx *ctx = rng_ctx;
+ struct buf_data *bd = &ctx->bufs[ctx->current_buf];
+ int next_buf_idx, copied_idx;
+ int err;
+
+ if (atomic_read(&bd->empty)) {
+ /* try to submit job if there wasn't one */
+ if (atomic_read(&bd->empty) == BUF_EMPTY) {
+ err = submit_job(ctx, 1);
+ /* if can't submit job, can't even wait */
+ if (err)
+ return 0;
+ }
+ /* no immediate data, so exit if not waiting */
+ if (!wait)
+ return 0;
+
+ /* waiting for pending job */
+ if (atomic_read(&bd->empty))
+ wait_for_completion(&bd->filled);
+ }
+
+ next_buf_idx = ctx->cur_buf_idx + max;
+ dev_dbg(ctx->jrdev, "%s: start reading at buffer %d, idx %d\n",
+ __func__, ctx->current_buf, ctx->cur_buf_idx);
+
+ /* if enough data in current buffer */
+ if (next_buf_idx < RN_BUF_SIZE) {
+ memcpy(data, bd->buf + ctx->cur_buf_idx, max);
+ ctx->cur_buf_idx = next_buf_idx;
+ return max;
+ }
+
+ /* else, copy what's left... */
+ copied_idx = RN_BUF_SIZE - ctx->cur_buf_idx;
+ memcpy(data, bd->buf + ctx->cur_buf_idx, copied_idx);
+ ctx->cur_buf_idx = 0;
+ atomic_set(&bd->empty, BUF_EMPTY);
+
+ /* ...refill... */
+ submit_job(ctx, 1);
+
+ /* and use next buffer */
+ ctx->current_buf = !ctx->current_buf;
+ dev_dbg(ctx->jrdev, "switched to buffer %d\n", ctx->current_buf);
+
+ /* since there already is some data read, don't wait */
+ return copied_idx + caam_read(rng, data + copied_idx,
+ max - copied_idx, false);
+}
+
+static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx)
+{
+ struct device *jrdev = ctx->jrdev;
+ u32 *desc = ctx->sh_desc;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Propagate errors from shared to job descriptor */
+ append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
+
+ /* Generate random bytes */
+ append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG);
+
+ /* Store bytes */
+ append_seq_fifo_store(desc, RN_BUF_SIZE, FIFOST_TYPE_RNGSTORE);
+
+ ctx->sh_desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
+ desc, desc_bytes(desc), 1);
+#endif
+ return 0;
+}
+
+static inline int rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id)
+{
+ struct device *jrdev = ctx->jrdev;
+ struct buf_data *bd = &ctx->bufs[buf_id];
+ u32 *desc = bd->hw_desc;
+ int sh_len = desc_len(ctx->sh_desc);
+
+ init_job_desc_shared(desc, ctx->sh_desc_dma, sh_len, HDR_SHARE_DEFER |
+ HDR_REVERSE);
+
+ bd->addr = dma_map_single(jrdev, bd->buf, RN_BUF_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(jrdev, bd->addr)) {
+ dev_err(jrdev, "unable to map dst\n");
+ return -ENOMEM;
+ }
+
+ append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
+ desc, desc_bytes(desc), 1);
+#endif
+ return 0;
+}
+
+static void caam_cleanup(struct hwrng *rng)
+{
+ int i;
+ struct buf_data *bd;
+
+ for (i = 0; i < 2; i++) {
+ bd = &rng_ctx->bufs[i];
+ if (atomic_read(&bd->empty) == BUF_PENDING)
+ wait_for_completion(&bd->filled);
+ }
+
+ rng_unmap_ctx(rng_ctx);
+}
+
+static int caam_init_buf(struct caam_rng_ctx *ctx, int buf_id)
+{
+ struct buf_data *bd = &ctx->bufs[buf_id];
+ int err;
+
+ err = rng_create_job_desc(ctx, buf_id);
+ if (err)
+ return err;
+
+ atomic_set(&bd->empty, BUF_EMPTY);
+ submit_job(ctx, buf_id == ctx->current_buf);
+ wait_for_completion(&bd->filled);
+
+ return 0;
+}
+
+static int caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev)
+{
+ int err;
+
+ ctx->jrdev = jrdev;
+
+ err = rng_create_sh_desc(ctx);
+ if (err)
+ return err;
+
+ ctx->current_buf = 0;
+ ctx->cur_buf_idx = 0;
+
+ err = caam_init_buf(ctx, 0);
+ if (err)
+ return err;
+
+ err = caam_init_buf(ctx, 1);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static struct hwrng caam_rng = {
+ .name = "rng-caam",
+ .cleanup = caam_cleanup,
+ .read = caam_read,
+};
+
+static void __exit caam_rng_exit(void)
+{
+ caam_jr_free(rng_ctx->jrdev);
+ hwrng_unregister(&caam_rng);
+ kfree(rng_ctx);
+}
+
+static int __init caam_rng_init(void)
+{
+ struct device *dev;
+ struct device_node *dev_node;
+ struct platform_device *pdev;
+ struct device *ctrldev;
+ void *priv;
+ int err;
+
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+ if (!dev_node) {
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+ if (!dev_node)
+ return -ENODEV;
+ }
+
+ pdev = of_find_device_by_node(dev_node);
+ if (!pdev) {
+ of_node_put(dev_node);
+ return -ENODEV;
+ }
+
+ ctrldev = &pdev->dev;
+ priv = dev_get_drvdata(ctrldev);
+ of_node_put(dev_node);
+
+ /*
+ * If priv is NULL, it's probably because the caam driver wasn't
+ * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+ */
+ if (!priv)
+ return -ENODEV;
+
+ dev = caam_jr_alloc();
+ if (IS_ERR(dev)) {
+ pr_err("Job Ring Device allocation for transform failed\n");
+ return PTR_ERR(dev);
+ }
+ rng_ctx = kmalloc(sizeof(struct caam_rng_ctx), GFP_DMA);
+ if (!rng_ctx)
+ return -ENOMEM;
+ err = caam_init_rng(rng_ctx, dev);
+ if (err)
+ return err;
+
+ dev_info(dev, "registering rng-caam\n");
+ return hwrng_register(&caam_rng);
+}
+
+module_init(caam_rng_init);
+module_exit(caam_rng_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM support for hw_random API");
+MODULE_AUTHOR("Freescale Semiconductor - NMG");
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
new file mode 100644
index 000000000..acd7743e2
--- /dev/null
+++ b/drivers/crypto/caam/compat.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ */
+
+#ifndef CAAM_COMPAT_H
+#define CAAM_COMPAT_H
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/crypto.h>
+#include <linux/hash.h>
+#include <linux/hw_random.h>
+#include <linux/of_platform.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/rtnetlink.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/debugfs.h>
+#include <linux/circ_buf.h>
+#include <net/xfrm.h>
+
+#include <crypto/algapi.h>
+#include <crypto/null.h>
+#include <crypto/aes.h>
+#include <crypto/ctr.h>
+#include <crypto/des.h>
+#include <crypto/sha.h>
+#include <crypto/md5.h>
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/internal/hash.h>
+
+#endif /* !defined(CAAM_COMPAT_H) */
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
new file mode 100644
index 000000000..efba4ccd4
--- /dev/null
+++ b/drivers/crypto/caam/ctrl.c
@@ -0,0 +1,729 @@
+/* * CAAM control-plane driver backend
+ * Controller-level driver, kernel property detection, initialization
+ *
+ * Copyright 2008-2012 Freescale Semiconductor, Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include "compat.h"
+#include "regs.h"
+#include "intern.h"
+#include "jr.h"
+#include "desc_constr.h"
+#include "error.h"
+
+/*
+ * Descriptor to instantiate RNG State Handle 0 in normal mode and
+ * load the JDKEK, TDKEK and TDSK registers
+ */
+static void build_instantiation_desc(u32 *desc, int handle, int do_sk)
+{
+ u32 *jump_cmd, op_flags;
+
+ init_job_desc(desc, 0);
+
+ op_flags = OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
+ (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT;
+
+ /* INIT RNG in non-test mode */
+ append_operation(desc, op_flags);
+
+ if (!handle && do_sk) {
+ /*
+ * For SH0, Secure Keys must be generated as well
+ */
+
+ /* wait for done */
+ jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
+ set_jump_tgt_here(desc, jump_cmd);
+
+ /*
+ * load 1 to clear written reg:
+ * resets the done interrrupt and returns the RNG to idle.
+ */
+ append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
+
+ /* Initialize State Handle */
+ append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
+ OP_ALG_AAI_RNG4_SK);
+ }
+
+ append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
+}
+
+/* Descriptor for deinstantiation of State Handle 0 of the RNG block. */
+static void build_deinstantiation_desc(u32 *desc, int handle)
+{
+ init_job_desc(desc, 0);
+
+ /* Uninstantiate State Handle 0 */
+ append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
+ (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INITFINAL);
+
+ append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
+}
+
+/*
+ * run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of
+ * the software (no JR/QI used).
+ * @ctrldev - pointer to device
+ * @status - descriptor status, after being run
+ *
+ * Return: - 0 if no error occurred
+ * - -ENODEV if the DECO couldn't be acquired
+ * - -EAGAIN if an error occurred while executing the descriptor
+ */
+static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
+ u32 *status)
+{
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
+ struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
+ struct caam_deco __iomem *deco = ctrlpriv->deco;
+ unsigned int timeout = 100000;
+ u32 deco_dbg_reg, flags;
+ int i;
+
+
+ if (ctrlpriv->virt_en == 1) {
+ setbits32(&ctrl->deco_rsr, DECORSR_JR0);
+
+ while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
+ --timeout)
+ cpu_relax();
+
+ timeout = 100000;
+ }
+
+ setbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE);
+
+ while (!(rd_reg32(&ctrl->deco_rq) & DECORR_DEN0) &&
+ --timeout)
+ cpu_relax();
+
+ if (!timeout) {
+ dev_err(ctrldev, "failed to acquire DECO 0\n");
+ clrbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE);
+ return -ENODEV;
+ }
+
+ for (i = 0; i < desc_len(desc); i++)
+ wr_reg32(&deco->descbuf[i], *(desc + i));
+
+ flags = DECO_JQCR_WHL;
+ /*
+ * If the descriptor length is longer than 4 words, then the
+ * FOUR bit in JRCTRL register must be set.
+ */
+ if (desc_len(desc) >= 4)
+ flags |= DECO_JQCR_FOUR;
+
+ /* Instruct the DECO to execute it */
+ wr_reg32(&deco->jr_ctl_hi, flags);
+
+ timeout = 10000000;
+ do {
+ deco_dbg_reg = rd_reg32(&deco->desc_dbg);
+ /*
+ * If an error occured in the descriptor, then
+ * the DECO status field will be set to 0x0D
+ */
+ if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) ==
+ DESC_DBG_DECO_STAT_HOST_ERR)
+ break;
+ cpu_relax();
+ } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
+
+ *status = rd_reg32(&deco->op_status_hi) &
+ DECO_OP_STATUS_HI_ERR_MASK;
+
+ if (ctrlpriv->virt_en == 1)
+ clrbits32(&ctrl->deco_rsr, DECORSR_JR0);
+
+ /* Mark the DECO as free */
+ clrbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE);
+
+ if (!timeout)
+ return -EAGAIN;
+
+ return 0;
+}
+
+/*
+ * instantiate_rng - builds and executes a descriptor on DECO0,
+ * which initializes the RNG block.
+ * @ctrldev - pointer to device
+ * @state_handle_mask - bitmask containing the instantiation status
+ * for the RNG4 state handles which exist in
+ * the RNG4 block: 1 if it's been instantiated
+ * by an external entry, 0 otherwise.
+ * @gen_sk - generate data to be loaded into the JDKEK, TDKEK and TDSK;
+ * Caution: this can be done only once; if the keys need to be
+ * regenerated, a POR is required
+ *
+ * Return: - 0 if no error occurred
+ * - -ENOMEM if there isn't enough memory to allocate the descriptor
+ * - -ENODEV if DECO0 couldn't be acquired
+ * - -EAGAIN if an error occurred when executing the descriptor
+ * f.i. there was a RNG hardware error due to not "good enough"
+ * entropy being aquired.
+ */
+static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
+ int gen_sk)
+{
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
+ struct caam_ctrl __iomem *ctrl;
+ u32 *desc, status, rdsta_val;
+ int ret = 0, sh_idx;
+
+ ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
+ desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
+ /*
+ * If the corresponding bit is set, this state handle
+ * was initialized by somebody else, so it's left alone.
+ */
+ if ((1 << sh_idx) & state_handle_mask)
+ continue;
+
+ /* Create the descriptor for instantiating RNG State Handle */
+ build_instantiation_desc(desc, sh_idx, gen_sk);
+
+ /* Try to run it through DECO0 */
+ ret = run_descriptor_deco0(ctrldev, desc, &status);
+
+ /*
+ * If ret is not 0, or descriptor status is not 0, then
+ * something went wrong. No need to try the next state
+ * handle (if available), bail out here.
+ * Also, if for some reason, the State Handle didn't get
+ * instantiated although the descriptor has finished
+ * without any error (HW optimizations for later
+ * CAAM eras), then try again.
+ */
+ rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
+ if (status || !(rdsta_val & (1 << sh_idx)))
+ ret = -EAGAIN;
+ if (ret)
+ break;
+ dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
+ /* Clear the contents before recreating the descriptor */
+ memset(desc, 0x00, CAAM_CMD_SZ * 7);
+ }
+
+ kfree(desc);
+
+ return ret;
+}
+
+/*
+ * deinstantiate_rng - builds and executes a descriptor on DECO0,
+ * which deinitializes the RNG block.
+ * @ctrldev - pointer to device
+ * @state_handle_mask - bitmask containing the instantiation status
+ * for the RNG4 state handles which exist in
+ * the RNG4 block: 1 if it's been instantiated
+ *
+ * Return: - 0 if no error occurred
+ * - -ENOMEM if there isn't enough memory to allocate the descriptor
+ * - -ENODEV if DECO0 couldn't be acquired
+ * - -EAGAIN if an error occurred when executing the descriptor
+ */
+static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
+{
+ u32 *desc, status;
+ int sh_idx, ret = 0;
+
+ desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
+ /*
+ * If the corresponding bit is set, then it means the state
+ * handle was initialized by us, and thus it needs to be
+ * deintialized as well
+ */
+ if ((1 << sh_idx) & state_handle_mask) {
+ /*
+ * Create the descriptor for deinstantating this state
+ * handle
+ */
+ build_deinstantiation_desc(desc, sh_idx);
+
+ /* Try to run it through DECO0 */
+ ret = run_descriptor_deco0(ctrldev, desc, &status);
+
+ if (ret || status) {
+ dev_err(ctrldev,
+ "Failed to deinstantiate RNG4 SH%d\n",
+ sh_idx);
+ break;
+ }
+ dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx);
+ }
+ }
+
+ kfree(desc);
+
+ return ret;
+}
+
+static int caam_remove(struct platform_device *pdev)
+{
+ struct device *ctrldev;
+ struct caam_drv_private *ctrlpriv;
+ struct caam_ctrl __iomem *ctrl;
+ int ring, ret = 0;
+
+ ctrldev = &pdev->dev;
+ ctrlpriv = dev_get_drvdata(ctrldev);
+ ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
+
+ /* Remove platform devices for JobRs */
+ for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
+ if (ctrlpriv->jrpdev[ring])
+ of_device_unregister(ctrlpriv->jrpdev[ring]);
+ }
+
+ /* De-initialize RNG state handles initialized by this driver. */
+ if (ctrlpriv->rng4_sh_init)
+ deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
+
+ /* Shut down debug views */
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove_recursive(ctrlpriv->dfs_root);
+#endif
+
+ /* Unmap controller region */
+ iounmap(&ctrl);
+
+ return ret;
+}
+
+/*
+ * kick_trng - sets the various parameters for enabling the initialization
+ * of the RNG4 block in CAAM
+ * @pdev - pointer to the platform device
+ * @ent_delay - Defines the length (in system clocks) of each entropy sample.
+ */
+static void kick_trng(struct platform_device *pdev, int ent_delay)
+{
+ struct device *ctrldev = &pdev->dev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
+ struct caam_ctrl __iomem *ctrl;
+ struct rng4tst __iomem *r4tst;
+ u32 val;
+
+ ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
+ r4tst = &ctrl->r4tst[0];
+
+ /* put RNG4 into program mode */
+ setbits32(&r4tst->rtmctl, RTMCTL_PRGM);
+
+ /*
+ * Performance-wise, it does not make sense to
+ * set the delay to a value that is lower
+ * than the last one that worked (i.e. the state handles
+ * were instantiated properly. Thus, instead of wasting
+ * time trying to set the values controlling the sample
+ * frequency, the function simply returns.
+ */
+ val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
+ >> RTSDCTL_ENT_DLY_SHIFT;
+ if (ent_delay <= val) {
+ /* put RNG4 into run mode */
+ clrbits32(&r4tst->rtmctl, RTMCTL_PRGM);
+ return;
+ }
+
+ val = rd_reg32(&r4tst->rtsdctl);
+ val = (val & ~RTSDCTL_ENT_DLY_MASK) |
+ (ent_delay << RTSDCTL_ENT_DLY_SHIFT);
+ wr_reg32(&r4tst->rtsdctl, val);
+ /* min. freq. count, equal to 1/4 of the entropy sample length */
+ wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2);
+ /* disable maximum frequency count */
+ wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
+ /* read the control register */
+ val = rd_reg32(&r4tst->rtmctl);
+ /*
+ * select raw sampling in both entropy shifter
+ * and statistical checker
+ */
+ setbits32(&val, RTMCTL_SAMP_MODE_RAW_ES_SC);
+ /* put RNG4 into run mode */
+ clrbits32(&val, RTMCTL_PRGM);
+ /* write back the control register */
+ wr_reg32(&r4tst->rtmctl, val);
+}
+
+/**
+ * caam_get_era() - Return the ERA of the SEC on SoC, based
+ * on "sec-era" propery in the DTS. This property is updated by u-boot.
+ **/
+int caam_get_era(void)
+{
+ struct device_node *caam_node;
+ for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
+ const uint32_t *prop = (uint32_t *)of_get_property(caam_node,
+ "fsl,sec-era",
+ NULL);
+ return prop ? *prop : -ENOTSUPP;
+ }
+
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL(caam_get_era);
+
+/* Probe routine for CAAM top (controller) level */
+static int caam_probe(struct platform_device *pdev)
+{
+ int ret, ring, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
+ u64 caam_id;
+ struct device *dev;
+ struct device_node *nprop, *np;
+ struct caam_ctrl __iomem *ctrl;
+ struct caam_drv_private *ctrlpriv;
+#ifdef CONFIG_DEBUG_FS
+ struct caam_perfmon *perfmon;
+#endif
+ u32 scfgr, comp_params;
+ u32 cha_vid_ls;
+ int pg_size;
+ int BLOCK_OFFSET = 0;
+
+ ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(struct caam_drv_private),
+ GFP_KERNEL);
+ if (!ctrlpriv)
+ return -ENOMEM;
+
+ dev = &pdev->dev;
+ dev_set_drvdata(dev, ctrlpriv);
+ ctrlpriv->pdev = pdev;
+ nprop = pdev->dev.of_node;
+
+ /* Get configuration properties from device tree */
+ /* First, get register page */
+ ctrl = of_iomap(nprop, 0);
+ if (ctrl == NULL) {
+ dev_err(dev, "caam: of_iomap() failed\n");
+ return -ENOMEM;
+ }
+ /* Finding the page size for using the CTPR_MS register */
+ comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
+ pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
+
+ /* Allocating the BLOCK_OFFSET based on the supported page size on
+ * the platform
+ */
+ if (pg_size == 0)
+ BLOCK_OFFSET = PG_SIZE_4K;
+ else
+ BLOCK_OFFSET = PG_SIZE_64K;
+
+ ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
+ ctrlpriv->assure = (struct caam_assurance __force *)
+ ((uint8_t *)ctrl +
+ BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
+ );
+ ctrlpriv->deco = (struct caam_deco __force *)
+ ((uint8_t *)ctrl +
+ BLOCK_OFFSET * DECO_BLOCK_NUMBER
+ );
+
+ /* Get the IRQ of the controller (for security violations only) */
+ ctrlpriv->secvio_irq = irq_of_parse_and_map(nprop, 0);
+
+ /*
+ * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
+ * long pointers in master configuration register
+ */
+ setbits32(&ctrl->mcr, MCFGR_WDENABLE |
+ (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
+
+ /*
+ * Read the Compile Time paramters and SCFGR to determine
+ * if Virtualization is enabled for this platform
+ */
+ scfgr = rd_reg32(&ctrl->scfgr);
+
+ ctrlpriv->virt_en = 0;
+ if (comp_params & CTPR_MS_VIRT_EN_INCL) {
+ /* VIRT_EN_INCL = 1 & VIRT_EN_POR = 1 or
+ * VIRT_EN_INCL = 1 & VIRT_EN_POR = 0 & SCFGR_VIRT_EN = 1
+ */
+ if ((comp_params & CTPR_MS_VIRT_EN_POR) ||
+ (!(comp_params & CTPR_MS_VIRT_EN_POR) &&
+ (scfgr & SCFGR_VIRT_EN)))
+ ctrlpriv->virt_en = 1;
+ } else {
+ /* VIRT_EN_INCL = 0 && VIRT_EN_POR_VALUE = 1 */
+ if (comp_params & CTPR_MS_VIRT_EN_POR)
+ ctrlpriv->virt_en = 1;
+ }
+
+ if (ctrlpriv->virt_en == 1)
+ setbits32(&ctrl->jrstart, JRSTART_JR0_START |
+ JRSTART_JR1_START | JRSTART_JR2_START |
+ JRSTART_JR3_START);
+
+ if (sizeof(dma_addr_t) == sizeof(u64))
+ if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ else
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
+ else
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+
+ /*
+ * Detect and enable JobRs
+ * First, find out how many ring spec'ed, allocate references
+ * for all, then go probe each one.
+ */
+ rspec = 0;
+ for_each_available_child_of_node(nprop, np)
+ if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
+ of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
+ rspec++;
+
+ ctrlpriv->jrpdev = devm_kzalloc(&pdev->dev,
+ sizeof(struct platform_device *) * rspec,
+ GFP_KERNEL);
+ if (ctrlpriv->jrpdev == NULL) {
+ iounmap(&ctrl);
+ return -ENOMEM;
+ }
+
+ ring = 0;
+ ctrlpriv->total_jobrs = 0;
+ for_each_available_child_of_node(nprop, np)
+ if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
+ of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
+ ctrlpriv->jrpdev[ring] =
+ of_platform_device_create(np, NULL, dev);
+ if (!ctrlpriv->jrpdev[ring]) {
+ pr_warn("JR%d Platform device creation error\n",
+ ring);
+ continue;
+ }
+ ctrlpriv->jr[ring] = (struct caam_job_ring __force *)
+ ((uint8_t *)ctrl +
+ (ring + JR_BLOCK_NUMBER) *
+ BLOCK_OFFSET
+ );
+ ctrlpriv->total_jobrs++;
+ ring++;
+ }
+
+ /* Check to see if QI present. If so, enable */
+ ctrlpriv->qi_present =
+ !!(rd_reg32(&ctrl->perfmon.comp_parms_ms) &
+ CTPR_MS_QI_MASK);
+ if (ctrlpriv->qi_present) {
+ ctrlpriv->qi = (struct caam_queue_if __force *)
+ ((uint8_t *)ctrl +
+ BLOCK_OFFSET * QI_BLOCK_NUMBER
+ );
+ /* This is all that's required to physically enable QI */
+ wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
+ }
+
+ /* If no QI and no rings specified, quit and go home */
+ if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
+ dev_err(dev, "no queues configured, terminating\n");
+ caam_remove(pdev);
+ return -ENOMEM;
+ }
+
+ cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls);
+
+ /*
+ * If SEC has RNG version >= 4 and RNG state handle has not been
+ * already instantiated, do RNG instantiation
+ */
+ if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
+ ctrlpriv->rng4_sh_init =
+ rd_reg32(&ctrl->r4tst[0].rdsta);
+ /*
+ * If the secure keys (TDKEK, JDKEK, TDSK), were already
+ * generated, signal this to the function that is instantiating
+ * the state handles. An error would occur if RNG4 attempts
+ * to regenerate these keys before the next POR.
+ */
+ gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
+ ctrlpriv->rng4_sh_init &= RDSTA_IFMASK;
+ do {
+ int inst_handles =
+ rd_reg32(&ctrl->r4tst[0].rdsta) &
+ RDSTA_IFMASK;
+ /*
+ * If either SH were instantiated by somebody else
+ * (e.g. u-boot) then it is assumed that the entropy
+ * parameters are properly set and thus the function
+ * setting these (kick_trng(...)) is skipped.
+ * Also, if a handle was instantiated, do not change
+ * the TRNG parameters.
+ */
+ if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
+ dev_info(dev,
+ "Entropy delay = %u\n",
+ ent_delay);
+ kick_trng(pdev, ent_delay);
+ ent_delay += 400;
+ }
+ /*
+ * if instantiate_rng(...) fails, the loop will rerun
+ * and the kick_trng(...) function will modfiy the
+ * upper and lower limits of the entropy sampling
+ * interval, leading to a sucessful initialization of
+ * the RNG.
+ */
+ ret = instantiate_rng(dev, inst_handles,
+ gen_sk);
+ if (ret == -EAGAIN)
+ /*
+ * if here, the loop will rerun,
+ * so don't hog the CPU
+ */
+ cpu_relax();
+ } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
+ if (ret) {
+ dev_err(dev, "failed to instantiate RNG");
+ caam_remove(pdev);
+ return ret;
+ }
+ /*
+ * Set handles init'ed by this module as the complement of the
+ * already initialized ones
+ */
+ ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK;
+
+ /* Enable RDB bit so that RNG works faster */
+ setbits32(&ctrl->scfgr, SCFGR_RDBENABLE);
+ }
+
+ /* NOTE: RTIC detection ought to go here, around Si time */
+
+ caam_id = (u64)rd_reg32(&ctrl->perfmon.caam_id_ms) << 32 |
+ (u64)rd_reg32(&ctrl->perfmon.caam_id_ls);
+
+ /* Report "alive" for developer to see */
+ dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
+ caam_get_era());
+ dev_info(dev, "job rings = %d, qi = %d\n",
+ ctrlpriv->total_jobrs, ctrlpriv->qi_present);
+
+#ifdef CONFIG_DEBUG_FS
+ /*
+ * FIXME: needs better naming distinction, as some amalgamation of
+ * "caam" and nprop->full_name. The OF name isn't distinctive,
+ * but does separate instances
+ */
+ perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
+
+ ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
+ ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
+
+ /* Controller-level - performance monitor counters */
+ ctrlpriv->ctl_rq_dequeued =
+ debugfs_create_u64("rq_dequeued",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->req_dequeued);
+ ctrlpriv->ctl_ob_enc_req =
+ debugfs_create_u64("ob_rq_encrypted",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ob_enc_req);
+ ctrlpriv->ctl_ib_dec_req =
+ debugfs_create_u64("ib_rq_decrypted",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ib_dec_req);
+ ctrlpriv->ctl_ob_enc_bytes =
+ debugfs_create_u64("ob_bytes_encrypted",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ob_enc_bytes);
+ ctrlpriv->ctl_ob_prot_bytes =
+ debugfs_create_u64("ob_bytes_protected",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ob_prot_bytes);
+ ctrlpriv->ctl_ib_dec_bytes =
+ debugfs_create_u64("ib_bytes_decrypted",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ib_dec_bytes);
+ ctrlpriv->ctl_ib_valid_bytes =
+ debugfs_create_u64("ib_bytes_validated",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ib_valid_bytes);
+
+ /* Controller level - global status values */
+ ctrlpriv->ctl_faultaddr =
+ debugfs_create_u64("fault_addr",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->faultaddr);
+ ctrlpriv->ctl_faultdetail =
+ debugfs_create_u32("fault_detail",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->faultdetail);
+ ctrlpriv->ctl_faultstatus =
+ debugfs_create_u32("fault_status",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->status);
+
+ /* Internal covering keys (useful in non-secure mode only) */
+ ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
+ ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
+ ctrlpriv->ctl_kek = debugfs_create_blob("kek",
+ S_IRUSR |
+ S_IRGRP | S_IROTH,
+ ctrlpriv->ctl,
+ &ctrlpriv->ctl_kek_wrap);
+
+ ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
+ ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
+ ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
+ S_IRUSR |
+ S_IRGRP | S_IROTH,
+ ctrlpriv->ctl,
+ &ctrlpriv->ctl_tkek_wrap);
+
+ ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
+ ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
+ ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
+ S_IRUSR |
+ S_IRGRP | S_IROTH,
+ ctrlpriv->ctl,
+ &ctrlpriv->ctl_tdsk_wrap);
+#endif
+ return 0;
+}
+
+static struct of_device_id caam_match[] = {
+ {
+ .compatible = "fsl,sec-v4.0",
+ },
+ {
+ .compatible = "fsl,sec4.0",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, caam_match);
+
+static struct platform_driver caam_driver = {
+ .driver = {
+ .name = "caam",
+ .of_match_table = caam_match,
+ },
+ .probe = caam_probe,
+ .remove = caam_remove,
+};
+
+module_platform_driver(caam_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM request backend");
+MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
diff --git a/drivers/crypto/caam/ctrl.h b/drivers/crypto/caam/ctrl.h
new file mode 100644
index 000000000..cac5402a4
--- /dev/null
+++ b/drivers/crypto/caam/ctrl.h
@@ -0,0 +1,13 @@
+/*
+ * CAAM control-plane driver backend public-level include definitions
+ *
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ */
+
+#ifndef CTRL_H
+#define CTRL_H
+
+/* Prototypes for backend-level services exposed to APIs */
+int caam_get_era(void);
+
+#endif /* CTRL_H */
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
new file mode 100644
index 000000000..d397ff9d5
--- /dev/null
+++ b/drivers/crypto/caam/desc.h
@@ -0,0 +1,1621 @@
+/*
+ * CAAM descriptor composition header
+ * Definitions to support CAAM descriptor instruction generation
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ */
+
+#ifndef DESC_H
+#define DESC_H
+
+struct sec4_sg_entry {
+ u64 ptr;
+#define SEC4_SG_LEN_FIN 0x40000000
+#define SEC4_SG_LEN_EXT 0x80000000
+ u32 len;
+ u8 reserved;
+ u8 buf_pool_id;
+ u16 offset;
+};
+
+/* Max size of any CAAM descriptor in 32-bit words, inclusive of header */
+#define MAX_CAAM_DESCSIZE 64
+
+/* Block size of any entity covered/uncovered with a KEK/TKEK */
+#define KEK_BLOCKSIZE 16
+
+/*
+ * Supported descriptor command types as they show up
+ * inside a descriptor command word.
+ */
+#define CMD_SHIFT 27
+#define CMD_MASK 0xf8000000
+
+#define CMD_KEY (0x00 << CMD_SHIFT)
+#define CMD_SEQ_KEY (0x01 << CMD_SHIFT)
+#define CMD_LOAD (0x02 << CMD_SHIFT)
+#define CMD_SEQ_LOAD (0x03 << CMD_SHIFT)
+#define CMD_FIFO_LOAD (0x04 << CMD_SHIFT)
+#define CMD_SEQ_FIFO_LOAD (0x05 << CMD_SHIFT)
+#define CMD_STORE (0x0a << CMD_SHIFT)
+#define CMD_SEQ_STORE (0x0b << CMD_SHIFT)
+#define CMD_FIFO_STORE (0x0c << CMD_SHIFT)
+#define CMD_SEQ_FIFO_STORE (0x0d << CMD_SHIFT)
+#define CMD_MOVE_LEN (0x0e << CMD_SHIFT)
+#define CMD_MOVE (0x0f << CMD_SHIFT)
+#define CMD_OPERATION (0x10 << CMD_SHIFT)
+#define CMD_SIGNATURE (0x12 << CMD_SHIFT)
+#define CMD_JUMP (0x14 << CMD_SHIFT)
+#define CMD_MATH (0x15 << CMD_SHIFT)
+#define CMD_DESC_HDR (0x16 << CMD_SHIFT)
+#define CMD_SHARED_DESC_HDR (0x17 << CMD_SHIFT)
+#define CMD_SEQ_IN_PTR (0x1e << CMD_SHIFT)
+#define CMD_SEQ_OUT_PTR (0x1f << CMD_SHIFT)
+
+/* General-purpose class selector for all commands */
+#define CLASS_SHIFT 25
+#define CLASS_MASK (0x03 << CLASS_SHIFT)
+
+#define CLASS_NONE (0x00 << CLASS_SHIFT)
+#define CLASS_1 (0x01 << CLASS_SHIFT)
+#define CLASS_2 (0x02 << CLASS_SHIFT)
+#define CLASS_BOTH (0x03 << CLASS_SHIFT)
+
+/*
+ * Descriptor header command constructs
+ * Covers shared, job, and trusted descriptor headers
+ */
+
+/*
+ * Do Not Run - marks a descriptor inexecutable if there was
+ * a preceding error somewhere
+ */
+#define HDR_DNR 0x01000000
+
+/*
+ * ONE - should always be set. Combination of ONE (always
+ * set) and ZRO (always clear) forms an endianness sanity check
+ */
+#define HDR_ONE 0x00800000
+#define HDR_ZRO 0x00008000
+
+/* Start Index or SharedDesc Length */
+#define HDR_START_IDX_MASK 0x3f
+#define HDR_START_IDX_SHIFT 16
+
+/* If shared descriptor header, 6-bit length */
+#define HDR_DESCLEN_SHR_MASK 0x3f
+
+/* If non-shared header, 7-bit length */
+#define HDR_DESCLEN_MASK 0x7f
+
+/* This is a TrustedDesc (if not SharedDesc) */
+#define HDR_TRUSTED 0x00004000
+
+/* Make into TrustedDesc (if not SharedDesc) */
+#define HDR_MAKE_TRUSTED 0x00002000
+
+/* Save context if self-shared (if SharedDesc) */
+#define HDR_SAVECTX 0x00001000
+
+/* Next item points to SharedDesc */
+#define HDR_SHARED 0x00001000
+
+/*
+ * Reverse Execution Order - execute JobDesc first, then
+ * execute SharedDesc (normally SharedDesc goes first).
+ */
+#define HDR_REVERSE 0x00000800
+
+/* Propogate DNR property to SharedDesc */
+#define HDR_PROP_DNR 0x00000800
+
+/* JobDesc/SharedDesc share property */
+#define HDR_SD_SHARE_MASK 0x03
+#define HDR_SD_SHARE_SHIFT 8
+#define HDR_JD_SHARE_MASK 0x07
+#define HDR_JD_SHARE_SHIFT 8
+
+#define HDR_SHARE_NEVER (0x00 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_WAIT (0x01 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_SERIAL (0x02 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_ALWAYS (0x03 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_DEFER (0x04 << HDR_SD_SHARE_SHIFT)
+
+/* JobDesc/SharedDesc descriptor length */
+#define HDR_JD_LENGTH_MASK 0x7f
+#define HDR_SD_LENGTH_MASK 0x3f
+
+/*
+ * KEY/SEQ_KEY Command Constructs
+ */
+
+/* Key Destination Class: 01 = Class 1, 02 - Class 2 */
+#define KEY_DEST_CLASS_SHIFT 25 /* use CLASS_1 or CLASS_2 */
+#define KEY_DEST_CLASS_MASK (0x03 << KEY_DEST_CLASS_SHIFT)
+
+/* Scatter-Gather Table/Variable Length Field */
+#define KEY_SGF 0x01000000
+#define KEY_VLF 0x01000000
+
+/* Immediate - Key follows command in the descriptor */
+#define KEY_IMM 0x00800000
+
+/*
+ * Encrypted - Key is encrypted either with the KEK, or
+ * with the TDKEK if TK is set
+ */
+#define KEY_ENC 0x00400000
+
+/*
+ * No Write Back - Do not allow key to be FIFO STOREd
+ */
+#define KEY_NWB 0x00200000
+
+/*
+ * Enhanced Encryption of Key
+ */
+#define KEY_EKT 0x00100000
+
+/*
+ * Encrypted with Trusted Key
+ */
+#define KEY_TK 0x00008000
+
+/*
+ * KDEST - Key Destination: 0 - class key register,
+ * 1 - PKHA 'e', 2 - AFHA Sbox, 3 - MDHA split-key
+ */
+#define KEY_DEST_SHIFT 16
+#define KEY_DEST_MASK (0x03 << KEY_DEST_SHIFT)
+
+#define KEY_DEST_CLASS_REG (0x00 << KEY_DEST_SHIFT)
+#define KEY_DEST_PKHA_E (0x01 << KEY_DEST_SHIFT)
+#define KEY_DEST_AFHA_SBOX (0x02 << KEY_DEST_SHIFT)
+#define KEY_DEST_MDHA_SPLIT (0x03 << KEY_DEST_SHIFT)
+
+/* Length in bytes */
+#define KEY_LENGTH_MASK 0x000003ff
+
+/*
+ * LOAD/SEQ_LOAD/STORE/SEQ_STORE Command Constructs
+ */
+
+/*
+ * Load/Store Destination: 0 = class independent CCB,
+ * 1 = class 1 CCB, 2 = class 2 CCB, 3 = DECO
+ */
+#define LDST_CLASS_SHIFT 25
+#define LDST_CLASS_MASK (0x03 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_IND_CCB (0x00 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_1_CCB (0x01 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_2_CCB (0x02 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_DECO (0x03 << LDST_CLASS_SHIFT)
+
+/* Scatter-Gather Table/Variable Length Field */
+#define LDST_SGF 0x01000000
+#define LDST_VLF LDST_SGF
+
+/* Immediate - Key follows this command in descriptor */
+#define LDST_IMM_MASK 1
+#define LDST_IMM_SHIFT 23
+#define LDST_IMM (LDST_IMM_MASK << LDST_IMM_SHIFT)
+
+/* SRC/DST - Destination for LOAD, Source for STORE */
+#define LDST_SRCDST_SHIFT 16
+#define LDST_SRCDST_MASK (0x7f << LDST_SRCDST_SHIFT)
+
+#define LDST_SRCDST_BYTE_CONTEXT (0x20 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_KEY (0x40 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_INFIFO (0x7c << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_OUTFIFO (0x7e << LDST_SRCDST_SHIFT)
+
+#define LDST_SRCDST_WORD_MODE_REG (0x00 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_KEYSZ_REG (0x01 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DATASZ_REG (0x02 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_ICVSZ_REG (0x03 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CHACTRL (0x06 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECOCTRL (0x06 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_IRQCTRL (0x07 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_PCLOVRD (0x07 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLRW (0x08 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH0 (0x08 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_STAT (0x09 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH1 (0x09 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH2 (0x0a << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_AAD_SZ (0x0b << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH3 (0x0b << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLASS1_ICV_SZ (0x0c << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_ALTDS_CLASS1 (0x0f << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_A_SZ (0x10 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_N_SZ (0x12 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_E_SZ (0x13 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLASS_CTX (0x20 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF (0x40 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF_JOB (0x41 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF_SHARED (0x42 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF_JOB_WE (0x45 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF_SHARED_WE (0x46 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO (0x7a << LDST_SRCDST_SHIFT)
+
+/* Offset in source/destination */
+#define LDST_OFFSET_SHIFT 8
+#define LDST_OFFSET_MASK (0xff << LDST_OFFSET_SHIFT)
+
+/* LDOFF definitions used when DST = LDST_SRCDST_WORD_DECOCTRL */
+/* These could also be shifted by LDST_OFFSET_SHIFT - this reads better */
+#define LDOFF_CHG_SHARE_SHIFT 0
+#define LDOFF_CHG_SHARE_MASK (0x3 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_NEVER (0x1 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_OK_PROP (0x2 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_OK_NO_PROP (0x3 << LDOFF_CHG_SHARE_SHIFT)
+
+#define LDOFF_ENABLE_AUTO_NFIFO (1 << 2)
+#define LDOFF_DISABLE_AUTO_NFIFO (1 << 3)
+
+#define LDOFF_CHG_NONSEQLIODN_SHIFT 4
+#define LDOFF_CHG_NONSEQLIODN_MASK (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_SEQ (0x1 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_NON_SEQ (0x2 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_TRUSTED (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+
+#define LDOFF_CHG_SEQLIODN_SHIFT 6
+#define LDOFF_CHG_SEQLIODN_MASK (0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_SEQ (0x1 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_NON_SEQ (0x2 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_TRUSTED (0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
+
+/* Data length in bytes */
+#define LDST_LEN_SHIFT 0
+#define LDST_LEN_MASK (0xff << LDST_LEN_SHIFT)
+
+/* Special Length definitions when dst=deco-ctrl */
+#define LDLEN_ENABLE_OSL_COUNT (1 << 7)
+#define LDLEN_RST_CHA_OFIFO_PTR (1 << 6)
+#define LDLEN_RST_OFIFO (1 << 5)
+#define LDLEN_SET_OFIFO_OFF_VALID (1 << 4)
+#define LDLEN_SET_OFIFO_OFF_RSVD (1 << 3)
+#define LDLEN_SET_OFIFO_OFFSET_SHIFT 0
+#define LDLEN_SET_OFIFO_OFFSET_MASK (3 << LDLEN_SET_OFIFO_OFFSET_SHIFT)
+
+/*
+ * FIFO_LOAD/FIFO_STORE/SEQ_FIFO_LOAD/SEQ_FIFO_STORE
+ * Command Constructs
+ */
+
+/*
+ * Load Destination: 0 = skip (SEQ_FIFO_LOAD only),
+ * 1 = Load for Class1, 2 = Load for Class2, 3 = Load both
+ * Store Source: 0 = normal, 1 = Class1key, 2 = Class2key
+ */
+#define FIFOLD_CLASS_SHIFT 25
+#define FIFOLD_CLASS_MASK (0x03 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_SKIP (0x00 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_CLASS1 (0x01 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_CLASS2 (0x02 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_BOTH (0x03 << FIFOLD_CLASS_SHIFT)
+
+#define FIFOST_CLASS_SHIFT 25
+#define FIFOST_CLASS_MASK (0x03 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_NORMAL (0x00 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_CLASS1KEY (0x01 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_CLASS2KEY (0x02 << FIFOST_CLASS_SHIFT)
+
+/*
+ * Scatter-Gather Table/Variable Length Field
+ * If set for FIFO_LOAD, refers to a SG table. Within
+ * SEQ_FIFO_LOAD, is variable input sequence
+ */
+#define FIFOLDST_SGF_SHIFT 24
+#define FIFOLDST_SGF_MASK (1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_VLF_MASK (1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_SGF (1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_VLF (1 << FIFOLDST_SGF_SHIFT)
+
+/* Immediate - Data follows command in descriptor */
+#define FIFOLD_IMM_SHIFT 23
+#define FIFOLD_IMM_MASK (1 << FIFOLD_IMM_SHIFT)
+#define FIFOLD_IMM (1 << FIFOLD_IMM_SHIFT)
+
+/* Continue - Not the last FIFO store to come */
+#define FIFOST_CONT_SHIFT 23
+#define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT)
+
+/*
+ * Extended Length - use 32-bit extended length that
+ * follows the pointer field. Illegal with IMM set
+ */
+#define FIFOLDST_EXT_SHIFT 22
+#define FIFOLDST_EXT_MASK (1 << FIFOLDST_EXT_SHIFT)
+#define FIFOLDST_EXT (1 << FIFOLDST_EXT_SHIFT)
+
+/* Input data type.*/
+#define FIFOLD_TYPE_SHIFT 16
+#define FIFOLD_CONT_TYPE_SHIFT 19 /* shift past last-flush bits */
+#define FIFOLD_TYPE_MASK (0x3f << FIFOLD_TYPE_SHIFT)
+
+/* PK types */
+#define FIFOLD_TYPE_PK (0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_MASK (0x30 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_TYPEMASK (0x0f << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A0 (0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A1 (0x01 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A2 (0x02 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A3 (0x03 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B0 (0x04 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B1 (0x05 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B2 (0x06 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B3 (0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_N (0x08 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A (0x0c << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B (0x0d << FIFOLD_TYPE_SHIFT)
+
+/* Other types. Need to OR in last/flush bits as desired */
+#define FIFOLD_TYPE_MSG_MASK (0x38 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_MSG (0x10 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_MSG1OUT2 (0x18 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_IV (0x20 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_BITDATA (0x28 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_AAD (0x30 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_ICV (0x38 << FIFOLD_TYPE_SHIFT)
+
+/* Last/Flush bits for use with "other" types above */
+#define FIFOLD_TYPE_ACT_MASK (0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_NOACTION (0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_FLUSH1 (0x01 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST1 (0x02 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2FLUSH (0x03 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2 (0x04 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2FLUSH1 (0x05 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LASTBOTH (0x06 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LASTBOTHFL (0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_NOINFOFIFO (0x0F << FIFOLD_TYPE_SHIFT)
+
+#define FIFOLDST_LEN_MASK 0xffff
+#define FIFOLDST_EXT_LEN_MASK 0xffffffff
+
+/* Output data types */
+#define FIFOST_TYPE_SHIFT 16
+#define FIFOST_TYPE_MASK (0x3f << FIFOST_TYPE_SHIFT)
+
+#define FIFOST_TYPE_PKHA_A0 (0x00 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A1 (0x01 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A2 (0x02 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A3 (0x03 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B0 (0x04 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B1 (0x05 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B2 (0x06 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B3 (0x07 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_AF_SBOX_JKEK (0x10 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_E_TKEK (0x23 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_KEY_KEK (0x24 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_KEY_TKEK (0x25 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SPLIT_KEK (0x26 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SPLIT_TKEK (0x27 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_OUTFIFO_KEK (0x28 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_OUTFIFO_TKEK (0x29 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT)
+
+/*
+ * OPERATION Command Constructs
+ */
+
+/* Operation type selectors - OP TYPE */
+#define OP_TYPE_SHIFT 24
+#define OP_TYPE_MASK (0x07 << OP_TYPE_SHIFT)
+
+#define OP_TYPE_UNI_PROTOCOL (0x00 << OP_TYPE_SHIFT)
+#define OP_TYPE_PK (0x01 << OP_TYPE_SHIFT)
+#define OP_TYPE_CLASS1_ALG (0x02 << OP_TYPE_SHIFT)
+#define OP_TYPE_CLASS2_ALG (0x04 << OP_TYPE_SHIFT)
+#define OP_TYPE_DECAP_PROTOCOL (0x06 << OP_TYPE_SHIFT)
+#define OP_TYPE_ENCAP_PROTOCOL (0x07 << OP_TYPE_SHIFT)
+
+/* ProtocolID selectors - PROTID */
+#define OP_PCLID_SHIFT 16
+#define OP_PCLID_MASK (0xff << 16)
+
+/* Assuming OP_TYPE = OP_TYPE_UNI_PROTOCOL */
+#define OP_PCLID_IKEV1_PRF (0x01 << OP_PCLID_SHIFT)
+#define OP_PCLID_IKEV2_PRF (0x02 << OP_PCLID_SHIFT)
+#define OP_PCLID_SSL30_PRF (0x08 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS10_PRF (0x09 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS11_PRF (0x0a << OP_PCLID_SHIFT)
+#define OP_PCLID_DTLS10_PRF (0x0c << OP_PCLID_SHIFT)
+#define OP_PCLID_PRF (0x06 << OP_PCLID_SHIFT)
+#define OP_PCLID_BLOB (0x0d << OP_PCLID_SHIFT)
+#define OP_PCLID_SECRETKEY (0x11 << OP_PCLID_SHIFT)
+#define OP_PCLID_PUBLICKEYPAIR (0x14 << OP_PCLID_SHIFT)
+#define OP_PCLID_DSASIGN (0x15 << OP_PCLID_SHIFT)
+#define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
+
+/* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
+#define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
+#define OP_PCLID_SRTP (0x02 << OP_PCLID_SHIFT)
+#define OP_PCLID_MACSEC (0x03 << OP_PCLID_SHIFT)
+#define OP_PCLID_WIFI (0x04 << OP_PCLID_SHIFT)
+#define OP_PCLID_WIMAX (0x05 << OP_PCLID_SHIFT)
+#define OP_PCLID_SSL30 (0x08 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS10 (0x09 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS11 (0x0a << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS12 (0x0b << OP_PCLID_SHIFT)
+#define OP_PCLID_DTLS (0x0c << OP_PCLID_SHIFT)
+
+/*
+ * ProtocolInfo selectors
+ */
+#define OP_PCLINFO_MASK 0xffff
+
+/* for OP_PCLID_IPSEC */
+#define OP_PCL_IPSEC_CIPHER_MASK 0xff00
+#define OP_PCL_IPSEC_AUTH_MASK 0x00ff
+
+#define OP_PCL_IPSEC_DES_IV64 0x0100
+#define OP_PCL_IPSEC_DES 0x0200
+#define OP_PCL_IPSEC_3DES 0x0300
+#define OP_PCL_IPSEC_AES_CBC 0x0c00
+#define OP_PCL_IPSEC_AES_CTR 0x0d00
+#define OP_PCL_IPSEC_AES_XTS 0x1600
+#define OP_PCL_IPSEC_AES_CCM8 0x0e00
+#define OP_PCL_IPSEC_AES_CCM12 0x0f00
+#define OP_PCL_IPSEC_AES_CCM16 0x1000
+#define OP_PCL_IPSEC_AES_GCM8 0x1200
+#define OP_PCL_IPSEC_AES_GCM12 0x1300
+#define OP_PCL_IPSEC_AES_GCM16 0x1400
+
+#define OP_PCL_IPSEC_HMAC_NULL 0x0000
+#define OP_PCL_IPSEC_HMAC_MD5_96 0x0001
+#define OP_PCL_IPSEC_HMAC_SHA1_96 0x0002
+#define OP_PCL_IPSEC_AES_XCBC_MAC_96 0x0005
+#define OP_PCL_IPSEC_HMAC_MD5_128 0x0006
+#define OP_PCL_IPSEC_HMAC_SHA1_160 0x0007
+#define OP_PCL_IPSEC_HMAC_SHA2_256_128 0x000c
+#define OP_PCL_IPSEC_HMAC_SHA2_384_192 0x000d
+#define OP_PCL_IPSEC_HMAC_SHA2_512_256 0x000e
+
+/* For SRTP - OP_PCLID_SRTP */
+#define OP_PCL_SRTP_CIPHER_MASK 0xff00
+#define OP_PCL_SRTP_AUTH_MASK 0x00ff
+
+#define OP_PCL_SRTP_AES_CTR 0x0d00
+
+#define OP_PCL_SRTP_HMAC_SHA1_160 0x0007
+
+/* For SSL 3.0 - OP_PCLID_SSL30 */
+#define OP_PCL_SSL30_AES_128_CBC_SHA 0x002f
+#define OP_PCL_SSL30_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_SSL30_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_SSL30_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_SSL30_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_SSL30_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_SSL30_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_SSL30_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_SSL30_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_SSL30_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_SSL30_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_SSL30_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_SSL30_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_SSL30_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_SSL30_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_SSL30_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_SSL30_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_SSL30_AES_256_CBC_SHA 0x0035
+#define OP_PCL_SSL30_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_SSL30_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_SSL30_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_SSL30_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_SSL30_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_SSL30_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_SSL30_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_SSL30_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_SSL30_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_SSL30_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_SSL30_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_SSL30_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_SSL30_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_SSL30_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_SSL30_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_SSL30_AES_256_CBC_SHA_17 0xc022
+
+#define OP_PCL_SSL30_3DES_EDE_CBC_MD5 0x0023
+
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_SSL30_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_SSL30_DES_CBC_MD5 0x0022
+
+#define OP_PCL_SSL30_DES40_CBC_SHA 0x0008
+#define OP_PCL_SSL30_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_SSL30_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_SSL30_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_SSL30_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_SSL30_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_SSL30_DES40_CBC_SHA_7 0x0026
+
+#define OP_PCL_SSL30_DES_CBC_SHA 0x001e
+#define OP_PCL_SSL30_DES_CBC_SHA_2 0x0009
+#define OP_PCL_SSL30_DES_CBC_SHA_3 0x000c
+#define OP_PCL_SSL30_DES_CBC_SHA_4 0x000f
+#define OP_PCL_SSL30_DES_CBC_SHA_5 0x0012
+#define OP_PCL_SSL30_DES_CBC_SHA_6 0x0015
+#define OP_PCL_SSL30_DES_CBC_SHA_7 0x001a
+
+#define OP_PCL_SSL30_RC4_128_MD5 0x0024
+#define OP_PCL_SSL30_RC4_128_MD5_2 0x0004
+#define OP_PCL_SSL30_RC4_128_MD5_3 0x0018
+
+#define OP_PCL_SSL30_RC4_40_MD5 0x002b
+#define OP_PCL_SSL30_RC4_40_MD5_2 0x0003
+#define OP_PCL_SSL30_RC4_40_MD5_3 0x0017
+
+#define OP_PCL_SSL30_RC4_128_SHA 0x0020
+#define OP_PCL_SSL30_RC4_128_SHA_2 0x008a
+#define OP_PCL_SSL30_RC4_128_SHA_3 0x008e
+#define OP_PCL_SSL30_RC4_128_SHA_4 0x0092
+#define OP_PCL_SSL30_RC4_128_SHA_5 0x0005
+#define OP_PCL_SSL30_RC4_128_SHA_6 0xc002
+#define OP_PCL_SSL30_RC4_128_SHA_7 0xc007
+#define OP_PCL_SSL30_RC4_128_SHA_8 0xc00c
+#define OP_PCL_SSL30_RC4_128_SHA_9 0xc011
+#define OP_PCL_SSL30_RC4_128_SHA_10 0xc016
+
+#define OP_PCL_SSL30_RC4_40_SHA 0x0028
+
+
+/* For TLS 1.0 - OP_PCLID_TLS10 */
+#define OP_PCL_TLS10_AES_128_CBC_SHA 0x002f
+#define OP_PCL_TLS10_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_TLS10_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_TLS10_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_TLS10_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_TLS10_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_TLS10_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_TLS10_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_TLS10_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_TLS10_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_TLS10_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_TLS10_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_TLS10_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_TLS10_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_TLS10_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_TLS10_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_TLS10_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_TLS10_AES_256_CBC_SHA 0x0035
+#define OP_PCL_TLS10_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_TLS10_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_TLS10_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_TLS10_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_TLS10_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_TLS10_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_TLS10_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_TLS10_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_TLS10_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_TLS10_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_TLS10_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_TLS10_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_TLS10_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_TLS10_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_TLS10_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_TLS10_AES_256_CBC_SHA_17 0xc022
+
+/* #define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0x0023 */
+
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_TLS10_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_TLS10_DES_CBC_MD5 0x0022
+
+#define OP_PCL_TLS10_DES40_CBC_SHA 0x0008
+#define OP_PCL_TLS10_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_TLS10_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_TLS10_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_TLS10_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_TLS10_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_TLS10_DES40_CBC_SHA_7 0x0026
+
+
+#define OP_PCL_TLS10_DES_CBC_SHA 0x001e
+#define OP_PCL_TLS10_DES_CBC_SHA_2 0x0009
+#define OP_PCL_TLS10_DES_CBC_SHA_3 0x000c
+#define OP_PCL_TLS10_DES_CBC_SHA_4 0x000f
+#define OP_PCL_TLS10_DES_CBC_SHA_5 0x0012
+#define OP_PCL_TLS10_DES_CBC_SHA_6 0x0015
+#define OP_PCL_TLS10_DES_CBC_SHA_7 0x001a
+
+#define OP_PCL_TLS10_RC4_128_MD5 0x0024
+#define OP_PCL_TLS10_RC4_128_MD5_2 0x0004
+#define OP_PCL_TLS10_RC4_128_MD5_3 0x0018
+
+#define OP_PCL_TLS10_RC4_40_MD5 0x002b
+#define OP_PCL_TLS10_RC4_40_MD5_2 0x0003
+#define OP_PCL_TLS10_RC4_40_MD5_3 0x0017
+
+#define OP_PCL_TLS10_RC4_128_SHA 0x0020
+#define OP_PCL_TLS10_RC4_128_SHA_2 0x008a
+#define OP_PCL_TLS10_RC4_128_SHA_3 0x008e
+#define OP_PCL_TLS10_RC4_128_SHA_4 0x0092
+#define OP_PCL_TLS10_RC4_128_SHA_5 0x0005
+#define OP_PCL_TLS10_RC4_128_SHA_6 0xc002
+#define OP_PCL_TLS10_RC4_128_SHA_7 0xc007
+#define OP_PCL_TLS10_RC4_128_SHA_8 0xc00c
+#define OP_PCL_TLS10_RC4_128_SHA_9 0xc011
+#define OP_PCL_TLS10_RC4_128_SHA_10 0xc016
+
+#define OP_PCL_TLS10_RC4_40_SHA 0x0028
+
+#define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0xff23
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA160 0xff30
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA224 0xff34
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA256 0xff36
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA384 0xff33
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA512 0xff35
+#define OP_PCL_TLS10_AES_128_CBC_SHA160 0xff80
+#define OP_PCL_TLS10_AES_128_CBC_SHA224 0xff84
+#define OP_PCL_TLS10_AES_128_CBC_SHA256 0xff86
+#define OP_PCL_TLS10_AES_128_CBC_SHA384 0xff83
+#define OP_PCL_TLS10_AES_128_CBC_SHA512 0xff85
+#define OP_PCL_TLS10_AES_192_CBC_SHA160 0xff20
+#define OP_PCL_TLS10_AES_192_CBC_SHA224 0xff24
+#define OP_PCL_TLS10_AES_192_CBC_SHA256 0xff26
+#define OP_PCL_TLS10_AES_192_CBC_SHA384 0xff23
+#define OP_PCL_TLS10_AES_192_CBC_SHA512 0xff25
+#define OP_PCL_TLS10_AES_256_CBC_SHA160 0xff60
+#define OP_PCL_TLS10_AES_256_CBC_SHA224 0xff64
+#define OP_PCL_TLS10_AES_256_CBC_SHA256 0xff66
+#define OP_PCL_TLS10_AES_256_CBC_SHA384 0xff63
+#define OP_PCL_TLS10_AES_256_CBC_SHA512 0xff65
+
+
+
+/* For TLS 1.1 - OP_PCLID_TLS11 */
+#define OP_PCL_TLS11_AES_128_CBC_SHA 0x002f
+#define OP_PCL_TLS11_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_TLS11_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_TLS11_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_TLS11_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_TLS11_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_TLS11_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_TLS11_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_TLS11_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_TLS11_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_TLS11_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_TLS11_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_TLS11_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_TLS11_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_TLS11_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_TLS11_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_TLS11_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_TLS11_AES_256_CBC_SHA 0x0035
+#define OP_PCL_TLS11_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_TLS11_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_TLS11_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_TLS11_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_TLS11_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_TLS11_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_TLS11_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_TLS11_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_TLS11_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_TLS11_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_TLS11_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_TLS11_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_TLS11_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_TLS11_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_TLS11_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_TLS11_AES_256_CBC_SHA_17 0xc022
+
+/* #define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0x0023 */
+
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_TLS11_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_TLS11_DES_CBC_MD5 0x0022
+
+#define OP_PCL_TLS11_DES40_CBC_SHA 0x0008
+#define OP_PCL_TLS11_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_TLS11_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_TLS11_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_TLS11_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_TLS11_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_TLS11_DES40_CBC_SHA_7 0x0026
+
+#define OP_PCL_TLS11_DES_CBC_SHA 0x001e
+#define OP_PCL_TLS11_DES_CBC_SHA_2 0x0009
+#define OP_PCL_TLS11_DES_CBC_SHA_3 0x000c
+#define OP_PCL_TLS11_DES_CBC_SHA_4 0x000f
+#define OP_PCL_TLS11_DES_CBC_SHA_5 0x0012
+#define OP_PCL_TLS11_DES_CBC_SHA_6 0x0015
+#define OP_PCL_TLS11_DES_CBC_SHA_7 0x001a
+
+#define OP_PCL_TLS11_RC4_128_MD5 0x0024
+#define OP_PCL_TLS11_RC4_128_MD5_2 0x0004
+#define OP_PCL_TLS11_RC4_128_MD5_3 0x0018
+
+#define OP_PCL_TLS11_RC4_40_MD5 0x002b
+#define OP_PCL_TLS11_RC4_40_MD5_2 0x0003
+#define OP_PCL_TLS11_RC4_40_MD5_3 0x0017
+
+#define OP_PCL_TLS11_RC4_128_SHA 0x0020
+#define OP_PCL_TLS11_RC4_128_SHA_2 0x008a
+#define OP_PCL_TLS11_RC4_128_SHA_3 0x008e
+#define OP_PCL_TLS11_RC4_128_SHA_4 0x0092
+#define OP_PCL_TLS11_RC4_128_SHA_5 0x0005
+#define OP_PCL_TLS11_RC4_128_SHA_6 0xc002
+#define OP_PCL_TLS11_RC4_128_SHA_7 0xc007
+#define OP_PCL_TLS11_RC4_128_SHA_8 0xc00c
+#define OP_PCL_TLS11_RC4_128_SHA_9 0xc011
+#define OP_PCL_TLS11_RC4_128_SHA_10 0xc016
+
+#define OP_PCL_TLS11_RC4_40_SHA 0x0028
+
+#define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0xff23
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA160 0xff30
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA224 0xff34
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA256 0xff36
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA384 0xff33
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA512 0xff35
+#define OP_PCL_TLS11_AES_128_CBC_SHA160 0xff80
+#define OP_PCL_TLS11_AES_128_CBC_SHA224 0xff84
+#define OP_PCL_TLS11_AES_128_CBC_SHA256 0xff86
+#define OP_PCL_TLS11_AES_128_CBC_SHA384 0xff83
+#define OP_PCL_TLS11_AES_128_CBC_SHA512 0xff85
+#define OP_PCL_TLS11_AES_192_CBC_SHA160 0xff20
+#define OP_PCL_TLS11_AES_192_CBC_SHA224 0xff24
+#define OP_PCL_TLS11_AES_192_CBC_SHA256 0xff26
+#define OP_PCL_TLS11_AES_192_CBC_SHA384 0xff23
+#define OP_PCL_TLS11_AES_192_CBC_SHA512 0xff25
+#define OP_PCL_TLS11_AES_256_CBC_SHA160 0xff60
+#define OP_PCL_TLS11_AES_256_CBC_SHA224 0xff64
+#define OP_PCL_TLS11_AES_256_CBC_SHA256 0xff66
+#define OP_PCL_TLS11_AES_256_CBC_SHA384 0xff63
+#define OP_PCL_TLS11_AES_256_CBC_SHA512 0xff65
+
+
+/* For TLS 1.2 - OP_PCLID_TLS12 */
+#define OP_PCL_TLS12_AES_128_CBC_SHA 0x002f
+#define OP_PCL_TLS12_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_TLS12_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_TLS12_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_TLS12_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_TLS12_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_TLS12_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_TLS12_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_TLS12_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_TLS12_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_TLS12_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_TLS12_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_TLS12_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_TLS12_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_TLS12_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_TLS12_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_TLS12_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_TLS12_AES_256_CBC_SHA 0x0035
+#define OP_PCL_TLS12_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_TLS12_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_TLS12_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_TLS12_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_TLS12_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_TLS12_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_TLS12_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_TLS12_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_TLS12_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_TLS12_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_TLS12_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_TLS12_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_TLS12_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_TLS12_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_TLS12_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_TLS12_AES_256_CBC_SHA_17 0xc022
+
+/* #define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0x0023 */
+
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_TLS12_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_TLS12_DES_CBC_MD5 0x0022
+
+#define OP_PCL_TLS12_DES40_CBC_SHA 0x0008
+#define OP_PCL_TLS12_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_TLS12_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_TLS12_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_TLS12_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_TLS12_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_TLS12_DES40_CBC_SHA_7 0x0026
+
+#define OP_PCL_TLS12_DES_CBC_SHA 0x001e
+#define OP_PCL_TLS12_DES_CBC_SHA_2 0x0009
+#define OP_PCL_TLS12_DES_CBC_SHA_3 0x000c
+#define OP_PCL_TLS12_DES_CBC_SHA_4 0x000f
+#define OP_PCL_TLS12_DES_CBC_SHA_5 0x0012
+#define OP_PCL_TLS12_DES_CBC_SHA_6 0x0015
+#define OP_PCL_TLS12_DES_CBC_SHA_7 0x001a
+
+#define OP_PCL_TLS12_RC4_128_MD5 0x0024
+#define OP_PCL_TLS12_RC4_128_MD5_2 0x0004
+#define OP_PCL_TLS12_RC4_128_MD5_3 0x0018
+
+#define OP_PCL_TLS12_RC4_40_MD5 0x002b
+#define OP_PCL_TLS12_RC4_40_MD5_2 0x0003
+#define OP_PCL_TLS12_RC4_40_MD5_3 0x0017
+
+#define OP_PCL_TLS12_RC4_128_SHA 0x0020
+#define OP_PCL_TLS12_RC4_128_SHA_2 0x008a
+#define OP_PCL_TLS12_RC4_128_SHA_3 0x008e
+#define OP_PCL_TLS12_RC4_128_SHA_4 0x0092
+#define OP_PCL_TLS12_RC4_128_SHA_5 0x0005
+#define OP_PCL_TLS12_RC4_128_SHA_6 0xc002
+#define OP_PCL_TLS12_RC4_128_SHA_7 0xc007
+#define OP_PCL_TLS12_RC4_128_SHA_8 0xc00c
+#define OP_PCL_TLS12_RC4_128_SHA_9 0xc011
+#define OP_PCL_TLS12_RC4_128_SHA_10 0xc016
+
+#define OP_PCL_TLS12_RC4_40_SHA 0x0028
+
+/* #define OP_PCL_TLS12_AES_128_CBC_SHA256 0x003c */
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_2 0x003e
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_3 0x003f
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_4 0x0040
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_5 0x0067
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_6 0x006c
+
+/* #define OP_PCL_TLS12_AES_256_CBC_SHA256 0x003d */
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_2 0x0068
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_3 0x0069
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_4 0x006a
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_5 0x006b
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_6 0x006d
+
+/* AEAD_AES_xxx_CCM/GCM remain to be defined... */
+
+#define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0xff23
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA160 0xff30
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA224 0xff34
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA256 0xff36
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA384 0xff33
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA512 0xff35
+#define OP_PCL_TLS12_AES_128_CBC_SHA160 0xff80
+#define OP_PCL_TLS12_AES_128_CBC_SHA224 0xff84
+#define OP_PCL_TLS12_AES_128_CBC_SHA256 0xff86
+#define OP_PCL_TLS12_AES_128_CBC_SHA384 0xff83
+#define OP_PCL_TLS12_AES_128_CBC_SHA512 0xff85
+#define OP_PCL_TLS12_AES_192_CBC_SHA160 0xff20
+#define OP_PCL_TLS12_AES_192_CBC_SHA224 0xff24
+#define OP_PCL_TLS12_AES_192_CBC_SHA256 0xff26
+#define OP_PCL_TLS12_AES_192_CBC_SHA384 0xff23
+#define OP_PCL_TLS12_AES_192_CBC_SHA512 0xff25
+#define OP_PCL_TLS12_AES_256_CBC_SHA160 0xff60
+#define OP_PCL_TLS12_AES_256_CBC_SHA224 0xff64
+#define OP_PCL_TLS12_AES_256_CBC_SHA256 0xff66
+#define OP_PCL_TLS12_AES_256_CBC_SHA384 0xff63
+#define OP_PCL_TLS12_AES_256_CBC_SHA512 0xff65
+
+/* For DTLS - OP_PCLID_DTLS */
+
+#define OP_PCL_DTLS_AES_128_CBC_SHA 0x002f
+#define OP_PCL_DTLS_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_DTLS_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_DTLS_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_DTLS_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_DTLS_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_DTLS_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_DTLS_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_DTLS_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_DTLS_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_DTLS_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_DTLS_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_DTLS_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_DTLS_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_DTLS_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_DTLS_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_DTLS_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_DTLS_AES_256_CBC_SHA 0x0035
+#define OP_PCL_DTLS_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_DTLS_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_DTLS_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_DTLS_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_DTLS_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_DTLS_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_DTLS_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_DTLS_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_DTLS_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_DTLS_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_DTLS_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_DTLS_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_DTLS_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_DTLS_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_DTLS_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_DTLS_AES_256_CBC_SHA_17 0xc022
+
+/* #define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0x0023 */
+
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_DTLS_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_DTLS_DES_CBC_MD5 0x0022
+
+#define OP_PCL_DTLS_DES40_CBC_SHA 0x0008
+#define OP_PCL_DTLS_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_DTLS_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_DTLS_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_DTLS_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_DTLS_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_DTLS_DES40_CBC_SHA_7 0x0026
+
+
+#define OP_PCL_DTLS_DES_CBC_SHA 0x001e
+#define OP_PCL_DTLS_DES_CBC_SHA_2 0x0009
+#define OP_PCL_DTLS_DES_CBC_SHA_3 0x000c
+#define OP_PCL_DTLS_DES_CBC_SHA_4 0x000f
+#define OP_PCL_DTLS_DES_CBC_SHA_5 0x0012
+#define OP_PCL_DTLS_DES_CBC_SHA_6 0x0015
+#define OP_PCL_DTLS_DES_CBC_SHA_7 0x001a
+
+
+#define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0xff23
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA160 0xff30
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA224 0xff34
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA256 0xff36
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA384 0xff33
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA512 0xff35
+#define OP_PCL_DTLS_AES_128_CBC_SHA160 0xff80
+#define OP_PCL_DTLS_AES_128_CBC_SHA224 0xff84
+#define OP_PCL_DTLS_AES_128_CBC_SHA256 0xff86
+#define OP_PCL_DTLS_AES_128_CBC_SHA384 0xff83
+#define OP_PCL_DTLS_AES_128_CBC_SHA512 0xff85
+#define OP_PCL_DTLS_AES_192_CBC_SHA160 0xff20
+#define OP_PCL_DTLS_AES_192_CBC_SHA224 0xff24
+#define OP_PCL_DTLS_AES_192_CBC_SHA256 0xff26
+#define OP_PCL_DTLS_AES_192_CBC_SHA384 0xff23
+#define OP_PCL_DTLS_AES_192_CBC_SHA512 0xff25
+#define OP_PCL_DTLS_AES_256_CBC_SHA160 0xff60
+#define OP_PCL_DTLS_AES_256_CBC_SHA224 0xff64
+#define OP_PCL_DTLS_AES_256_CBC_SHA256 0xff66
+#define OP_PCL_DTLS_AES_256_CBC_SHA384 0xff63
+#define OP_PCL_DTLS_AES_256_CBC_SHA512 0xff65
+
+/* 802.16 WiMAX protinfos */
+#define OP_PCL_WIMAX_OFDM 0x0201
+#define OP_PCL_WIMAX_OFDMA 0x0231
+
+/* 802.11 WiFi protinfos */
+#define OP_PCL_WIFI 0xac04
+
+/* MacSec protinfos */
+#define OP_PCL_MACSEC 0x0001
+
+/* PKI unidirectional protocol protinfo bits */
+#define OP_PCL_PKPROT_TEST 0x0008
+#define OP_PCL_PKPROT_DECRYPT 0x0004
+#define OP_PCL_PKPROT_ECC 0x0002
+#define OP_PCL_PKPROT_F2M 0x0001
+
+/* For non-protocol/alg-only op commands */
+#define OP_ALG_TYPE_SHIFT 24
+#define OP_ALG_TYPE_MASK (0x7 << OP_ALG_TYPE_SHIFT)
+#define OP_ALG_TYPE_CLASS1 2
+#define OP_ALG_TYPE_CLASS2 4
+
+#define OP_ALG_ALGSEL_SHIFT 16
+#define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SUBMASK (0x0f << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_AES (0x10 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA256 (0x43 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA384 (0x44 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA512 (0x45 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_RNG (0x50 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SNOW (0x60 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SNOW_F8 (0x60 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_KASUMI (0x70 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_CRC (0x90 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SNOW_F9 (0xA0 << OP_ALG_ALGSEL_SHIFT)
+
+#define OP_ALG_AAI_SHIFT 4
+#define OP_ALG_AAI_MASK (0x1ff << OP_ALG_AAI_SHIFT)
+
+/* blockcipher AAI set */
+#define OP_ALG_AAI_CTR_MOD128 (0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD8 (0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD16 (0x02 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD24 (0x03 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD32 (0x04 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD40 (0x05 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD48 (0x06 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD56 (0x07 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD64 (0x08 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD72 (0x09 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD80 (0x0a << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD88 (0x0b << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD96 (0x0c << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD104 (0x0d << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD112 (0x0e << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD120 (0x0f << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CBC (0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_ECB (0x20 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CFB (0x30 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_OFB (0x40 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_XTS (0x50 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CMAC (0x60 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_XCBC_MAC (0x70 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CCM (0x80 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_GCM (0x90 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CBC_XCBCMAC (0xa0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_XCBCMAC (0xb0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CHECKODD (0x80 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DK (0x100 << OP_ALG_AAI_SHIFT)
+
+/* randomizer AAI set */
+#define OP_ALG_AAI_RNG (0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG_NZB (0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG_OBP (0x20 << OP_ALG_AAI_SHIFT)
+
+/* RNG4 AAI set */
+#define OP_ALG_AAI_RNG4_SH_0 (0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_SH_1 (0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_PS (0x40 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_AI (0x80 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_SK (0x100 << OP_ALG_AAI_SHIFT)
+
+/* hmac/smac AAI set */
+#define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_HMAC (0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_SMAC (0x02 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_HMAC_PRECOMP (0x04 << OP_ALG_AAI_SHIFT)
+
+/* CRC AAI set*/
+#define OP_ALG_AAI_802 (0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_3385 (0x02 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CUST_POLY (0x04 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DIS (0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DOS (0x20 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DOC (0x40 << OP_ALG_AAI_SHIFT)
+
+/* Kasumi/SNOW AAI set */
+#define OP_ALG_AAI_F8 (0xc0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_F9 (0xc8 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_GSM (0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_EDGE (0x20 << OP_ALG_AAI_SHIFT)
+
+#define OP_ALG_AS_SHIFT 2
+#define OP_ALG_AS_MASK (0x3 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_UPDATE (0 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_INIT (1 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_FINALIZE (2 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_INITFINAL (3 << OP_ALG_AS_SHIFT)
+
+#define OP_ALG_ICV_SHIFT 1
+#define OP_ALG_ICV_MASK (1 << OP_ALG_ICV_SHIFT)
+#define OP_ALG_ICV_OFF (0 << OP_ALG_ICV_SHIFT)
+#define OP_ALG_ICV_ON (1 << OP_ALG_ICV_SHIFT)
+
+#define OP_ALG_DIR_SHIFT 0
+#define OP_ALG_DIR_MASK 1
+#define OP_ALG_DECRYPT 0
+#define OP_ALG_ENCRYPT 1
+
+/* PKHA algorithm type set */
+#define OP_ALG_PK 0x00800000
+#define OP_ALG_PK_FUN_MASK 0x3f /* clrmem, modmath, or cpymem */
+
+/* PKHA mode clear memory functions */
+#define OP_ALG_PKMODE_A_RAM 0x80000
+#define OP_ALG_PKMODE_B_RAM 0x40000
+#define OP_ALG_PKMODE_E_RAM 0x20000
+#define OP_ALG_PKMODE_N_RAM 0x10000
+#define OP_ALG_PKMODE_CLEARMEM 0x00001
+
+/* PKHA mode modular-arithmetic functions */
+#define OP_ALG_PKMODE_MOD_IN_MONTY 0x80000
+#define OP_ALG_PKMODE_MOD_OUT_MONTY 0x40000
+#define OP_ALG_PKMODE_MOD_F2M 0x20000
+#define OP_ALG_PKMODE_MOD_R2_IN 0x10000
+#define OP_ALG_PKMODE_PRJECTV 0x00800
+#define OP_ALG_PKMODE_TIME_EQ 0x400
+#define OP_ALG_PKMODE_OUT_B 0x000
+#define OP_ALG_PKMODE_OUT_A 0x100
+#define OP_ALG_PKMODE_MOD_ADD 0x002
+#define OP_ALG_PKMODE_MOD_SUB_AB 0x003
+#define OP_ALG_PKMODE_MOD_SUB_BA 0x004
+#define OP_ALG_PKMODE_MOD_MULT 0x005
+#define OP_ALG_PKMODE_MOD_EXPO 0x006
+#define OP_ALG_PKMODE_MOD_REDUCT 0x007
+#define OP_ALG_PKMODE_MOD_INV 0x008
+#define OP_ALG_PKMODE_MOD_ECC_ADD 0x009
+#define OP_ALG_PKMODE_MOD_ECC_DBL 0x00a
+#define OP_ALG_PKMODE_MOD_ECC_MULT 0x00b
+#define OP_ALG_PKMODE_MOD_MONT_CNST 0x00c
+#define OP_ALG_PKMODE_MOD_CRT_CNST 0x00d
+#define OP_ALG_PKMODE_MOD_GCD 0x00e
+#define OP_ALG_PKMODE_MOD_PRIMALITY 0x00f
+
+/* PKHA mode copy-memory functions */
+#define OP_ALG_PKMODE_SRC_REG_SHIFT 13
+#define OP_ALG_PKMODE_SRC_REG_MASK (7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_SHIFT 10
+#define OP_ALG_PKMODE_DST_REG_MASK (7 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_SHIFT 8
+#define OP_ALG_PKMODE_SRC_SEG_MASK (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_SHIFT 6
+#define OP_ALG_PKMODE_DST_SEG_MASK (3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+
+#define OP_ALG_PKMODE_SRC_REG_A (0 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_REG_B (1 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_REG_N (3 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_A (0 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_B (1 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_E (2 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_N (3 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_0 (0 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_1 (1 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_2 (2 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_3 (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_0 (0 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_1 (1 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_2 (2 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_3 (3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_CPYMEM_N_SZ 0x80
+#define OP_ALG_PKMODE_CPYMEM_SRC_SZ 0x81
+
+/*
+ * SEQ_IN_PTR Command Constructs
+ */
+
+/* Release Buffers */
+#define SQIN_RBS 0x04000000
+
+/* Sequence pointer is really a descriptor */
+#define SQIN_INL 0x02000000
+
+/* Sequence pointer is a scatter-gather table */
+#define SQIN_SGF 0x01000000
+
+/* Appends to a previous pointer */
+#define SQIN_PRE 0x00800000
+
+/* Use extended length following pointer */
+#define SQIN_EXT 0x00400000
+
+/* Restore sequence with pointer/length */
+#define SQIN_RTO 0x00200000
+
+/* Replace job descriptor */
+#define SQIN_RJD 0x00100000
+
+#define SQIN_LEN_SHIFT 0
+#define SQIN_LEN_MASK (0xffff << SQIN_LEN_SHIFT)
+
+/*
+ * SEQ_OUT_PTR Command Constructs
+ */
+
+/* Sequence pointer is a scatter-gather table */
+#define SQOUT_SGF 0x01000000
+
+/* Appends to a previous pointer */
+#define SQOUT_PRE SQIN_PRE
+
+/* Restore sequence with pointer/length */
+#define SQOUT_RTO SQIN_RTO
+
+/* Use extended length following pointer */
+#define SQOUT_EXT 0x00400000
+
+#define SQOUT_LEN_SHIFT 0
+#define SQOUT_LEN_MASK (0xffff << SQOUT_LEN_SHIFT)
+
+
+/*
+ * SIGNATURE Command Constructs
+ */
+
+/* TYPE field is all that's relevant */
+#define SIGN_TYPE_SHIFT 16
+#define SIGN_TYPE_MASK (0x0f << SIGN_TYPE_SHIFT)
+
+#define SIGN_TYPE_FINAL (0x00 << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_FINAL_RESTORE (0x01 << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_FINAL_NONZERO (0x02 << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_2 (0x0a << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_3 (0x0b << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_4 (0x0c << SIGN_TYPE_SHIFT)
+
+/*
+ * MOVE Command Constructs
+ */
+
+#define MOVE_AUX_SHIFT 25
+#define MOVE_AUX_MASK (3 << MOVE_AUX_SHIFT)
+#define MOVE_AUX_MS (2 << MOVE_AUX_SHIFT)
+#define MOVE_AUX_LS (1 << MOVE_AUX_SHIFT)
+
+#define MOVE_WAITCOMP_SHIFT 24
+#define MOVE_WAITCOMP_MASK (1 << MOVE_WAITCOMP_SHIFT)
+#define MOVE_WAITCOMP (1 << MOVE_WAITCOMP_SHIFT)
+
+#define MOVE_SRC_SHIFT 20
+#define MOVE_SRC_MASK (0x0f << MOVE_SRC_SHIFT)
+#define MOVE_SRC_CLASS1CTX (0x00 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_CLASS2CTX (0x01 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_OUTFIFO (0x02 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_DESCBUF (0x03 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH0 (0x04 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH1 (0x05 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH2 (0x06 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH3 (0x07 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_INFIFO (0x08 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_INFIFO_CL (0x09 << MOVE_SRC_SHIFT)
+
+#define MOVE_DEST_SHIFT 16
+#define MOVE_DEST_MASK (0x0f << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1CTX (0x00 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2CTX (0x01 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_OUTFIFO (0x02 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_DESCBUF (0x03 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH0 (0x04 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH1 (0x05 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH2 (0x06 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH3 (0x07 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1INFIFO (0x08 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2INFIFO (0x09 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_INFIFO_NOINFO (0x0a << MOVE_DEST_SHIFT)
+#define MOVE_DEST_PK_A (0x0c << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1KEY (0x0d << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2KEY (0x0e << MOVE_DEST_SHIFT)
+
+#define MOVE_OFFSET_SHIFT 8
+#define MOVE_OFFSET_MASK (0xff << MOVE_OFFSET_SHIFT)
+
+#define MOVE_LEN_SHIFT 0
+#define MOVE_LEN_MASK (0xff << MOVE_LEN_SHIFT)
+
+#define MOVELEN_MRSEL_SHIFT 0
+#define MOVELEN_MRSEL_MASK (0x3 << MOVE_LEN_SHIFT)
+
+/*
+ * MATH Command Constructs
+ */
+
+#define MATH_IFB_SHIFT 26
+#define MATH_IFB_MASK (1 << MATH_IFB_SHIFT)
+#define MATH_IFB (1 << MATH_IFB_SHIFT)
+
+#define MATH_NFU_SHIFT 25
+#define MATH_NFU_MASK (1 << MATH_NFU_SHIFT)
+#define MATH_NFU (1 << MATH_NFU_SHIFT)
+
+#define MATH_STL_SHIFT 24
+#define MATH_STL_MASK (1 << MATH_STL_SHIFT)
+#define MATH_STL (1 << MATH_STL_SHIFT)
+
+/* Function selectors */
+#define MATH_FUN_SHIFT 20
+#define MATH_FUN_MASK (0x0f << MATH_FUN_SHIFT)
+#define MATH_FUN_ADD (0x00 << MATH_FUN_SHIFT)
+#define MATH_FUN_ADDC (0x01 << MATH_FUN_SHIFT)
+#define MATH_FUN_SUB (0x02 << MATH_FUN_SHIFT)
+#define MATH_FUN_SUBB (0x03 << MATH_FUN_SHIFT)
+#define MATH_FUN_OR (0x04 << MATH_FUN_SHIFT)
+#define MATH_FUN_AND (0x05 << MATH_FUN_SHIFT)
+#define MATH_FUN_XOR (0x06 << MATH_FUN_SHIFT)
+#define MATH_FUN_LSHIFT (0x07 << MATH_FUN_SHIFT)
+#define MATH_FUN_RSHIFT (0x08 << MATH_FUN_SHIFT)
+#define MATH_FUN_SHLD (0x09 << MATH_FUN_SHIFT)
+#define MATH_FUN_ZBYT (0x0a << MATH_FUN_SHIFT)
+
+/* Source 0 selectors */
+#define MATH_SRC0_SHIFT 16
+#define MATH_SRC0_MASK (0x0f << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG0 (0x00 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG1 (0x01 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG2 (0x02 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG3 (0x03 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_IMM (0x04 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_DPOVRD (0x07 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_SEQINLEN (0x08 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_SEQOUTLEN (0x09 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_VARSEQINLEN (0x0a << MATH_SRC0_SHIFT)
+#define MATH_SRC0_VARSEQOUTLEN (0x0b << MATH_SRC0_SHIFT)
+#define MATH_SRC0_ZERO (0x0c << MATH_SRC0_SHIFT)
+
+/* Source 1 selectors */
+#define MATH_SRC1_SHIFT 12
+#define MATH_SRC1_MASK (0x0f << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG0 (0x00 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG1 (0x01 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC0_SHIFT)
+#define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT)
+#define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT)
+#define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT)
+
+/* Destination selectors */
+#define MATH_DEST_SHIFT 8
+#define MATH_DEST_MASK (0x0f << MATH_DEST_SHIFT)
+#define MATH_DEST_REG0 (0x00 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT)
+#define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT)
+#define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT)
+#define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
+#define MATH_DEST_VARSEQOUTLEN (0x0b << MATH_DEST_SHIFT)
+#define MATH_DEST_NONE (0x0f << MATH_DEST_SHIFT)
+
+/* Length selectors */
+#define MATH_LEN_SHIFT 0
+#define MATH_LEN_MASK (0x0f << MATH_LEN_SHIFT)
+#define MATH_LEN_1BYTE 0x01
+#define MATH_LEN_2BYTE 0x02
+#define MATH_LEN_4BYTE 0x04
+#define MATH_LEN_8BYTE 0x08
+
+/*
+ * JUMP Command Constructs
+ */
+
+#define JUMP_CLASS_SHIFT 25
+#define JUMP_CLASS_MASK (3 << JUMP_CLASS_SHIFT)
+#define JUMP_CLASS_NONE 0
+#define JUMP_CLASS_CLASS1 (1 << JUMP_CLASS_SHIFT)
+#define JUMP_CLASS_CLASS2 (2 << JUMP_CLASS_SHIFT)
+#define JUMP_CLASS_BOTH (3 << JUMP_CLASS_SHIFT)
+
+#define JUMP_JSL_SHIFT 24
+#define JUMP_JSL_MASK (1 << JUMP_JSL_SHIFT)
+#define JUMP_JSL (1 << JUMP_JSL_SHIFT)
+
+#define JUMP_TYPE_SHIFT 22
+#define JUMP_TYPE_MASK (0x03 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_LOCAL (0x00 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_NONLOCAL (0x01 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_HALT (0x02 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_HALT_USER (0x03 << JUMP_TYPE_SHIFT)
+
+#define JUMP_TEST_SHIFT 16
+#define JUMP_TEST_MASK (0x03 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_ALL (0x00 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_INVALL (0x01 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_ANY (0x02 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_INVANY (0x03 << JUMP_TEST_SHIFT)
+
+/* Condition codes. JSL bit is factored in */
+#define JUMP_COND_SHIFT 8
+#define JUMP_COND_MASK (0x100ff << JUMP_COND_SHIFT)
+#define JUMP_COND_PK_0 (0x80 << JUMP_COND_SHIFT)
+#define JUMP_COND_PK_GCD_1 (0x40 << JUMP_COND_SHIFT)
+#define JUMP_COND_PK_PRIME (0x20 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_N (0x08 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_Z (0x04 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_C (0x02 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_NV (0x01 << JUMP_COND_SHIFT)
+
+#define JUMP_COND_JRP ((0x80 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_SHRD ((0x40 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_SELF ((0x20 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_CALM ((0x10 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NIP ((0x08 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NIFP ((0x04 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NOP ((0x02 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NCP ((0x01 << JUMP_COND_SHIFT) | JUMP_JSL)
+
+#define JUMP_OFFSET_SHIFT 0
+#define JUMP_OFFSET_MASK (0xff << JUMP_OFFSET_SHIFT)
+
+/*
+ * NFIFO ENTRY
+ * Data Constructs
+ *
+ */
+#define NFIFOENTRY_DEST_SHIFT 30
+#define NFIFOENTRY_DEST_MASK (3 << NFIFOENTRY_DEST_SHIFT)
+#define NFIFOENTRY_DEST_DECO (0 << NFIFOENTRY_DEST_SHIFT)
+#define NFIFOENTRY_DEST_CLASS1 (1 << NFIFOENTRY_DEST_SHIFT)
+#define NFIFOENTRY_DEST_CLASS2 (2 << NFIFOENTRY_DEST_SHIFT)
+#define NFIFOENTRY_DEST_BOTH (3 << NFIFOENTRY_DEST_SHIFT)
+
+#define NFIFOENTRY_LC2_SHIFT 29
+#define NFIFOENTRY_LC2_MASK (1 << NFIFOENTRY_LC2_SHIFT)
+#define NFIFOENTRY_LC2 (1 << NFIFOENTRY_LC2_SHIFT)
+
+#define NFIFOENTRY_LC1_SHIFT 28
+#define NFIFOENTRY_LC1_MASK (1 << NFIFOENTRY_LC1_SHIFT)
+#define NFIFOENTRY_LC1 (1 << NFIFOENTRY_LC1_SHIFT)
+
+#define NFIFOENTRY_FC2_SHIFT 27
+#define NFIFOENTRY_FC2_MASK (1 << NFIFOENTRY_FC2_SHIFT)
+#define NFIFOENTRY_FC2 (1 << NFIFOENTRY_FC2_SHIFT)
+
+#define NFIFOENTRY_FC1_SHIFT 26
+#define NFIFOENTRY_FC1_MASK (1 << NFIFOENTRY_FC1_SHIFT)
+#define NFIFOENTRY_FC1 (1 << NFIFOENTRY_FC1_SHIFT)
+
+#define NFIFOENTRY_STYPE_SHIFT 24
+#define NFIFOENTRY_STYPE_MASK (3 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_DFIFO (0 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_OFIFO (1 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_PAD (2 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_SNOOP (3 << NFIFOENTRY_STYPE_SHIFT)
+
+#define NFIFOENTRY_DTYPE_SHIFT 20
+#define NFIFOENTRY_DTYPE_MASK (0xF << NFIFOENTRY_DTYPE_SHIFT)
+
+#define NFIFOENTRY_DTYPE_SBOX (0x0 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_AAD (0x1 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_IV (0x2 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_SAD (0x3 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_ICV (0xA << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_SKIP (0xE << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_MSG (0xF << NFIFOENTRY_DTYPE_SHIFT)
+
+#define NFIFOENTRY_DTYPE_PK_A0 (0x0 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A1 (0x1 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A2 (0x2 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A3 (0x3 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B0 (0x4 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B1 (0x5 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B2 (0x6 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B3 (0x7 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_N (0x8 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_E (0x9 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A (0xC << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B (0xD << NFIFOENTRY_DTYPE_SHIFT)
+
+
+#define NFIFOENTRY_BND_SHIFT 19
+#define NFIFOENTRY_BND_MASK (1 << NFIFOENTRY_BND_SHIFT)
+#define NFIFOENTRY_BND (1 << NFIFOENTRY_BND_SHIFT)
+
+#define NFIFOENTRY_PTYPE_SHIFT 16
+#define NFIFOENTRY_PTYPE_MASK (0x7 << NFIFOENTRY_PTYPE_SHIFT)
+
+#define NFIFOENTRY_PTYPE_ZEROS (0x0 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NOZEROS (0x1 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_INCREMENT (0x2 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND (0x3 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_ZEROS_NZ (0x4 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NZ_LZ (0x5 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_N (0x6 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NZ_N (0x7 << NFIFOENTRY_PTYPE_SHIFT)
+
+#define NFIFOENTRY_OC_SHIFT 15
+#define NFIFOENTRY_OC_MASK (1 << NFIFOENTRY_OC_SHIFT)
+#define NFIFOENTRY_OC (1 << NFIFOENTRY_OC_SHIFT)
+
+#define NFIFOENTRY_AST_SHIFT 14
+#define NFIFOENTRY_AST_MASK (1 << NFIFOENTRY_OC_SHIFT)
+#define NFIFOENTRY_AST (1 << NFIFOENTRY_OC_SHIFT)
+
+#define NFIFOENTRY_BM_SHIFT 11
+#define NFIFOENTRY_BM_MASK (1 << NFIFOENTRY_BM_SHIFT)
+#define NFIFOENTRY_BM (1 << NFIFOENTRY_BM_SHIFT)
+
+#define NFIFOENTRY_PS_SHIFT 10
+#define NFIFOENTRY_PS_MASK (1 << NFIFOENTRY_PS_SHIFT)
+#define NFIFOENTRY_PS (1 << NFIFOENTRY_PS_SHIFT)
+
+#define NFIFOENTRY_DLEN_SHIFT 0
+#define NFIFOENTRY_DLEN_MASK (0xFFF << NFIFOENTRY_DLEN_SHIFT)
+
+#define NFIFOENTRY_PLEN_SHIFT 0
+#define NFIFOENTRY_PLEN_MASK (0xFF << NFIFOENTRY_PLEN_SHIFT)
+
+/* Append Load Immediate Command */
+#define FD_CMD_APPEND_LOAD_IMMEDIATE 0x80000000
+
+/* Set SEQ LIODN equal to the Non-SEQ LIODN for the job */
+#define FD_CMD_SET_SEQ_LIODN_EQUAL_NONSEQ_LIODN 0x40000000
+
+/* Frame Descriptor Command for Replacement Job Descriptor */
+#define FD_CMD_REPLACE_JOB_DESC 0x20000000
+
+#endif /* DESC_H */
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
new file mode 100644
index 000000000..9f79fd7bd
--- /dev/null
+++ b/drivers/crypto/caam/desc_constr.h
@@ -0,0 +1,390 @@
+/*
+ * caam descriptor construction helper functions
+ *
+ * Copyright 2008-2012 Freescale Semiconductor, Inc.
+ */
+
+#include "desc.h"
+
+#define IMMEDIATE (1 << 23)
+#define CAAM_CMD_SZ sizeof(u32)
+#define CAAM_PTR_SZ sizeof(dma_addr_t)
+#define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * MAX_CAAM_DESCSIZE)
+#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
+
+#ifdef DEBUG
+#define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\
+ &__func__[sizeof("append")]); } while (0)
+#else
+#define PRINT_POS
+#endif
+
+#define SET_OK_NO_PROP_ERRORS (IMMEDIATE | LDST_CLASS_DECO | \
+ LDST_SRCDST_WORD_DECOCTRL | \
+ (LDOFF_CHG_SHARE_OK_NO_PROP << \
+ LDST_OFFSET_SHIFT))
+#define DISABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \
+ LDST_SRCDST_WORD_DECOCTRL | \
+ (LDOFF_DISABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT))
+#define ENABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \
+ LDST_SRCDST_WORD_DECOCTRL | \
+ (LDOFF_ENABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT))
+
+static inline int desc_len(u32 *desc)
+{
+ return *desc & HDR_DESCLEN_MASK;
+}
+
+static inline int desc_bytes(void *desc)
+{
+ return desc_len(desc) * CAAM_CMD_SZ;
+}
+
+static inline u32 *desc_end(u32 *desc)
+{
+ return desc + desc_len(desc);
+}
+
+static inline void *sh_desc_pdb(u32 *desc)
+{
+ return desc + 1;
+}
+
+static inline void init_desc(u32 *desc, u32 options)
+{
+ *desc = (options | HDR_ONE) + 1;
+}
+
+static inline void init_sh_desc(u32 *desc, u32 options)
+{
+ PRINT_POS;
+ init_desc(desc, CMD_SHARED_DESC_HDR | options);
+}
+
+static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
+{
+ u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
+
+ init_sh_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT) + pdb_len) |
+ options);
+}
+
+static inline void init_job_desc(u32 *desc, u32 options)
+{
+ init_desc(desc, CMD_DESC_HDR | options);
+}
+
+static inline void append_ptr(u32 *desc, dma_addr_t ptr)
+{
+ dma_addr_t *offset = (dma_addr_t *)desc_end(desc);
+
+ *offset = ptr;
+
+ (*desc) += CAAM_PTR_SZ / CAAM_CMD_SZ;
+}
+
+static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
+ u32 options)
+{
+ PRINT_POS;
+ init_job_desc(desc, HDR_SHARED | options |
+ (len << HDR_START_IDX_SHIFT));
+ append_ptr(desc, ptr);
+}
+
+static inline void append_data(u32 *desc, void *data, int len)
+{
+ u32 *offset = desc_end(desc);
+
+ if (len) /* avoid sparse warning: memcpy with byte count of 0 */
+ memcpy(offset, data, len);
+
+ (*desc) += (len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
+}
+
+static inline void append_cmd(u32 *desc, u32 command)
+{
+ u32 *cmd = desc_end(desc);
+
+ *cmd = command;
+
+ (*desc)++;
+}
+
+#define append_u32 append_cmd
+
+static inline void append_u64(u32 *desc, u64 data)
+{
+ u32 *offset = desc_end(desc);
+
+ *offset = upper_32_bits(data);
+ *(++offset) = lower_32_bits(data);
+
+ (*desc) += 2;
+}
+
+/* Write command without affecting header, and return pointer to next word */
+static inline u32 *write_cmd(u32 *desc, u32 command)
+{
+ *desc = command;
+
+ return desc + 1;
+}
+
+static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
+ u32 command)
+{
+ append_cmd(desc, command | len);
+ append_ptr(desc, ptr);
+}
+
+/* Write length after pointer, rather than inside command */
+static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr,
+ unsigned int len, u32 command)
+{
+ append_cmd(desc, command);
+ if (!(command & (SQIN_RTO | SQIN_PRE)))
+ append_ptr(desc, ptr);
+ append_cmd(desc, len);
+}
+
+static inline void append_cmd_data(u32 *desc, void *data, int len,
+ u32 command)
+{
+ append_cmd(desc, command | IMMEDIATE | len);
+ append_data(desc, data, len);
+}
+
+#define APPEND_CMD_RET(cmd, op) \
+static inline u32 *append_##cmd(u32 *desc, u32 options) \
+{ \
+ u32 *cmd = desc_end(desc); \
+ PRINT_POS; \
+ append_cmd(desc, CMD_##op | options); \
+ return cmd; \
+}
+APPEND_CMD_RET(jump, JUMP)
+APPEND_CMD_RET(move, MOVE)
+
+static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd)
+{
+ *jump_cmd = *jump_cmd | (desc_len(desc) - (jump_cmd - desc));
+}
+
+static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
+{
+ *move_cmd &= ~MOVE_OFFSET_MASK;
+ *move_cmd = *move_cmd | ((desc_len(desc) << (MOVE_OFFSET_SHIFT + 2)) &
+ MOVE_OFFSET_MASK);
+}
+
+#define APPEND_CMD(cmd, op) \
+static inline void append_##cmd(u32 *desc, u32 options) \
+{ \
+ PRINT_POS; \
+ append_cmd(desc, CMD_##op | options); \
+}
+APPEND_CMD(operation, OPERATION)
+
+#define APPEND_CMD_LEN(cmd, op) \
+static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \
+{ \
+ PRINT_POS; \
+ append_cmd(desc, CMD_##op | len | options); \
+}
+
+APPEND_CMD_LEN(seq_load, SEQ_LOAD)
+APPEND_CMD_LEN(seq_store, SEQ_STORE)
+APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_LOAD)
+APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE)
+
+#define APPEND_CMD_PTR(cmd, op) \
+static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \
+ u32 options) \
+{ \
+ PRINT_POS; \
+ append_cmd_ptr(desc, ptr, len, CMD_##op | options); \
+}
+APPEND_CMD_PTR(key, KEY)
+APPEND_CMD_PTR(load, LOAD)
+APPEND_CMD_PTR(fifo_load, FIFO_LOAD)
+APPEND_CMD_PTR(fifo_store, FIFO_STORE)
+
+static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len,
+ u32 options)
+{
+ u32 cmd_src;
+
+ cmd_src = options & LDST_SRCDST_MASK;
+
+ append_cmd(desc, CMD_STORE | options | len);
+
+ /* The following options do not require pointer */
+ if (!(cmd_src == LDST_SRCDST_WORD_DESCBUF_SHARED ||
+ cmd_src == LDST_SRCDST_WORD_DESCBUF_JOB ||
+ cmd_src == LDST_SRCDST_WORD_DESCBUF_JOB_WE ||
+ cmd_src == LDST_SRCDST_WORD_DESCBUF_SHARED_WE))
+ append_ptr(desc, ptr);
+}
+
+#define APPEND_SEQ_PTR_INTLEN(cmd, op) \
+static inline void append_seq_##cmd##_ptr_intlen(u32 *desc, dma_addr_t ptr, \
+ unsigned int len, \
+ u32 options) \
+{ \
+ PRINT_POS; \
+ if (options & (SQIN_RTO | SQIN_PRE)) \
+ append_cmd(desc, CMD_SEQ_##op##_PTR | len | options); \
+ else \
+ append_cmd_ptr(desc, ptr, len, CMD_SEQ_##op##_PTR | options); \
+}
+APPEND_SEQ_PTR_INTLEN(in, IN)
+APPEND_SEQ_PTR_INTLEN(out, OUT)
+
+#define APPEND_CMD_PTR_TO_IMM(cmd, op) \
+static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
+ unsigned int len, u32 options) \
+{ \
+ PRINT_POS; \
+ append_cmd_data(desc, data, len, CMD_##op | options); \
+}
+APPEND_CMD_PTR_TO_IMM(load, LOAD);
+APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD);
+
+#define APPEND_CMD_PTR_EXTLEN(cmd, op) \
+static inline void append_##cmd##_extlen(u32 *desc, dma_addr_t ptr, \
+ unsigned int len, u32 options) \
+{ \
+ PRINT_POS; \
+ append_cmd_ptr_extlen(desc, ptr, len, CMD_##op | SQIN_EXT | options); \
+}
+APPEND_CMD_PTR_EXTLEN(seq_in_ptr, SEQ_IN_PTR)
+APPEND_CMD_PTR_EXTLEN(seq_out_ptr, SEQ_OUT_PTR)
+
+/*
+ * Determine whether to store length internally or externally depending on
+ * the size of its type
+ */
+#define APPEND_CMD_PTR_LEN(cmd, op, type) \
+static inline void append_##cmd(u32 *desc, dma_addr_t ptr, \
+ type len, u32 options) \
+{ \
+ PRINT_POS; \
+ if (sizeof(type) > sizeof(u16)) \
+ append_##cmd##_extlen(desc, ptr, len, options); \
+ else \
+ append_##cmd##_intlen(desc, ptr, len, options); \
+}
+APPEND_CMD_PTR_LEN(seq_in_ptr, SEQ_IN_PTR, u32)
+APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_PTR, u32)
+
+/*
+ * 2nd variant for commands whose specified immediate length differs
+ * from length of immediate data provided, e.g., split keys
+ */
+#define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
+static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
+ unsigned int data_len, \
+ unsigned int len, u32 options) \
+{ \
+ PRINT_POS; \
+ append_cmd(desc, CMD_##op | IMMEDIATE | len | options); \
+ append_data(desc, data, data_len); \
+}
+APPEND_CMD_PTR_TO_IMM2(key, KEY);
+
+#define APPEND_CMD_RAW_IMM(cmd, op, type) \
+static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \
+ u32 options) \
+{ \
+ PRINT_POS; \
+ append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(type)); \
+ append_cmd(desc, immediate); \
+}
+APPEND_CMD_RAW_IMM(load, LOAD, u32);
+
+/*
+ * Append math command. Only the last part of destination and source need to
+ * be specified
+ */
+#define APPEND_MATH(op, desc, dest, src_0, src_1, len) \
+append_cmd(desc, CMD_MATH | MATH_FUN_##op | MATH_DEST_##dest | \
+ MATH_SRC0_##src_0 | MATH_SRC1_##src_1 | (u32)len);
+
+#define append_math_add(desc, dest, src0, src1, len) \
+ APPEND_MATH(ADD, desc, dest, src0, src1, len)
+#define append_math_sub(desc, dest, src0, src1, len) \
+ APPEND_MATH(SUB, desc, dest, src0, src1, len)
+#define append_math_add_c(desc, dest, src0, src1, len) \
+ APPEND_MATH(ADDC, desc, dest, src0, src1, len)
+#define append_math_sub_b(desc, dest, src0, src1, len) \
+ APPEND_MATH(SUBB, desc, dest, src0, src1, len)
+#define append_math_and(desc, dest, src0, src1, len) \
+ APPEND_MATH(AND, desc, dest, src0, src1, len)
+#define append_math_or(desc, dest, src0, src1, len) \
+ APPEND_MATH(OR, desc, dest, src0, src1, len)
+#define append_math_xor(desc, dest, src0, src1, len) \
+ APPEND_MATH(XOR, desc, dest, src0, src1, len)
+#define append_math_lshift(desc, dest, src0, src1, len) \
+ APPEND_MATH(LSHIFT, desc, dest, src0, src1, len)
+#define append_math_rshift(desc, dest, src0, src1, len) \
+ APPEND_MATH(RSHIFT, desc, dest, src0, src1, len)
+#define append_math_ldshift(desc, dest, src0, src1, len) \
+ APPEND_MATH(SHLD, desc, dest, src0, src1, len)
+
+/* Exactly one source is IMM. Data is passed in as u32 value */
+#define APPEND_MATH_IMM_u32(op, desc, dest, src_0, src_1, data) \
+do { \
+ APPEND_MATH(op, desc, dest, src_0, src_1, CAAM_CMD_SZ); \
+ append_cmd(desc, data); \
+} while (0)
+
+#define append_math_add_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(ADD, desc, dest, src0, src1, data)
+#define append_math_sub_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(SUB, desc, dest, src0, src1, data)
+#define append_math_add_c_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(ADDC, desc, dest, src0, src1, data)
+#define append_math_sub_b_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(SUBB, desc, dest, src0, src1, data)
+#define append_math_and_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(AND, desc, dest, src0, src1, data)
+#define append_math_or_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(OR, desc, dest, src0, src1, data)
+#define append_math_xor_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(XOR, desc, dest, src0, src1, data)
+#define append_math_lshift_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(LSHIFT, desc, dest, src0, src1, data)
+#define append_math_rshift_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(RSHIFT, desc, dest, src0, src1, data)
+
+/* Exactly one source is IMM. Data is passed in as u64 value */
+#define APPEND_MATH_IMM_u64(op, desc, dest, src_0, src_1, data) \
+do { \
+ u32 upper = (data >> 16) >> 16; \
+ APPEND_MATH(op, desc, dest, src_0, src_1, CAAM_CMD_SZ * 2 | \
+ (upper ? 0 : MATH_IFB)); \
+ if (upper) \
+ append_u64(desc, data); \
+ else \
+ append_u32(desc, data); \
+} while (0)
+
+#define append_math_add_imm_u64(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u64(ADD, desc, dest, src0, src1, data)
+#define append_math_sub_imm_u64(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u64(SUB, desc, dest, src0, src1, data)
+#define append_math_add_c_imm_u64(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u64(ADDC, desc, dest, src0, src1, data)
+#define append_math_sub_b_imm_u64(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u64(SUBB, desc, dest, src0, src1, data)
+#define append_math_and_imm_u64(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u64(AND, desc, dest, src0, src1, data)
+#define append_math_or_imm_u64(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u64(OR, desc, dest, src0, src1, data)
+#define append_math_xor_imm_u64(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u64(XOR, desc, dest, src0, src1, data)
+#define append_math_lshift_imm_u64(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u64(LSHIFT, desc, dest, src0, src1, data)
+#define append_math_rshift_imm_u64(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u64(RSHIFT, desc, dest, src0, src1, data)
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
new file mode 100644
index 000000000..33e41ea83
--- /dev/null
+++ b/drivers/crypto/caam/error.c
@@ -0,0 +1,253 @@
+/*
+ * CAAM Error Reporting
+ *
+ * Copyright 2009-2011 Freescale Semiconductor, Inc.
+ */
+
+#include "compat.h"
+#include "regs.h"
+#include "intern.h"
+#include "desc.h"
+#include "jr.h"
+#include "error.h"
+
+static const struct {
+ u8 value;
+ const char *error_text;
+} desc_error_list[] = {
+ { 0x00, "No error." },
+ { 0x01, "SGT Length Error. The descriptor is trying to read more data than is contained in the SGT table." },
+ { 0x02, "SGT Null Entry Error." },
+ { 0x03, "Job Ring Control Error. There is a bad value in the Job Ring Control register." },
+ { 0x04, "Invalid Descriptor Command. The Descriptor Command field is invalid." },
+ { 0x05, "Reserved." },
+ { 0x06, "Invalid KEY Command" },
+ { 0x07, "Invalid LOAD Command" },
+ { 0x08, "Invalid STORE Command" },
+ { 0x09, "Invalid OPERATION Command" },
+ { 0x0A, "Invalid FIFO LOAD Command" },
+ { 0x0B, "Invalid FIFO STORE Command" },
+ { 0x0C, "Invalid MOVE/MOVE_LEN Command" },
+ { 0x0D, "Invalid JUMP Command. A nonlocal JUMP Command is invalid because the target is not a Job Header Command, or the jump is from a Trusted Descriptor to a Job Descriptor, or because the target Descriptor contains a Shared Descriptor." },
+ { 0x0E, "Invalid MATH Command" },
+ { 0x0F, "Invalid SIGNATURE Command" },
+ { 0x10, "Invalid Sequence Command. A SEQ IN PTR OR SEQ OUT PTR Command is invalid or a SEQ KEY, SEQ LOAD, SEQ FIFO LOAD, or SEQ FIFO STORE decremented the input or output sequence length below 0. This error may result if a built-in PROTOCOL Command has encountered a malformed PDU." },
+ { 0x11, "Skip data type invalid. The type must be 0xE or 0xF."},
+ { 0x12, "Shared Descriptor Header Error" },
+ { 0x13, "Header Error. Invalid length or parity, or certain other problems." },
+ { 0x14, "Burster Error. Burster has gotten to an illegal state" },
+ { 0x15, "Context Register Length Error. The descriptor is trying to read or write past the end of the Context Register. A SEQ LOAD or SEQ STORE with the VLF bit set was executed with too large a length in the variable length register (VSOL for SEQ STORE or VSIL for SEQ LOAD)." },
+ { 0x16, "DMA Error" },
+ { 0x17, "Reserved." },
+ { 0x1A, "Job failed due to JR reset" },
+ { 0x1B, "Job failed due to Fail Mode" },
+ { 0x1C, "DECO Watchdog timer timeout error" },
+ { 0x1D, "DECO tried to copy a key from another DECO but the other DECO's Key Registers were locked" },
+ { 0x1E, "DECO attempted to copy data from a DECO that had an unmasked Descriptor error" },
+ { 0x1F, "LIODN error. DECO was trying to share from itself or from another DECO but the two Non-SEQ LIODN values didn't match or the 'shared from' DECO's Descriptor required that the SEQ LIODNs be the same and they aren't." },
+ { 0x20, "DECO has completed a reset initiated via the DRR register" },
+ { 0x21, "Nonce error. When using EKT (CCM) key encryption option in the FIFO STORE Command, the Nonce counter reached its maximum value and this encryption mode can no longer be used." },
+ { 0x22, "Meta data is too large (> 511 bytes) for TLS decap (input frame; block ciphers) and IPsec decap (output frame, when doing the next header byte update) and DCRC (output frame)." },
+ { 0x23, "Read Input Frame error" },
+ { 0x24, "JDKEK, TDKEK or TDSK not loaded error" },
+ { 0x80, "DNR (do not run) error" },
+ { 0x81, "undefined protocol command" },
+ { 0x82, "invalid setting in PDB" },
+ { 0x83, "Anti-replay LATE error" },
+ { 0x84, "Anti-replay REPLAY error" },
+ { 0x85, "Sequence number overflow" },
+ { 0x86, "Sigver invalid signature" },
+ { 0x87, "DSA Sign Illegal test descriptor" },
+ { 0x88, "Protocol Format Error - A protocol has seen an error in the format of data received. When running RSA, this means that formatting with random padding was used, and did not follow the form: 0x00, 0x02, 8-to-N bytes of non-zero pad, 0x00, F data." },
+ { 0x89, "Protocol Size Error - A protocol has seen an error in size. When running RSA, pdb size N < (size of F) when no formatting is used; or pdb size N < (F + 11) when formatting is used." },
+ { 0xC1, "Blob Command error: Undefined mode" },
+ { 0xC2, "Blob Command error: Secure Memory Blob mode error" },
+ { 0xC4, "Blob Command error: Black Blob key or input size error" },
+ { 0xC5, "Blob Command error: Invalid key destination" },
+ { 0xC8, "Blob Command error: Trusted/Secure mode error" },
+ { 0xF0, "IPsec TTL or hop limit field either came in as 0, or was decremented to 0" },
+ { 0xF1, "3GPP HFN matches or exceeds the Threshold" },
+};
+
+static const char * const cha_id_list[] = {
+ "",
+ "AES",
+ "DES",
+ "ARC4",
+ "MDHA",
+ "RNG",
+ "SNOW f8",
+ "Kasumi f8/9",
+ "PKHA",
+ "CRCA",
+ "SNOW f9",
+ "ZUCE",
+ "ZUCA",
+};
+
+static const char * const err_id_list[] = {
+ "No error.",
+ "Mode error.",
+ "Data size error.",
+ "Key size error.",
+ "PKHA A memory size error.",
+ "PKHA B memory size error.",
+ "Data arrived out of sequence error.",
+ "PKHA divide-by-zero error.",
+ "PKHA modulus even error.",
+ "DES key parity error.",
+ "ICV check failed.",
+ "Hardware error.",
+ "Unsupported CCM AAD size.",
+ "Class 1 CHA is not reset",
+ "Invalid CHA combination was selected",
+ "Invalid CHA selected.",
+};
+
+static const char * const rng_err_id_list[] = {
+ "",
+ "",
+ "",
+ "Instantiate",
+ "Not instantiated",
+ "Test instantiate",
+ "Prediction resistance",
+ "Prediction resistance and test request",
+ "Uninstantiate",
+ "Secure key generation",
+};
+
+static void report_ccb_status(struct device *jrdev, const u32 status,
+ const char *error)
+{
+ u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >>
+ JRSTA_CCBERR_CHAID_SHIFT;
+ u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
+ u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >>
+ JRSTA_DECOERR_INDEX_SHIFT;
+ char *idx_str;
+ const char *cha_str = "unidentified cha_id value 0x";
+ char cha_err_code[3] = { 0 };
+ const char *err_str = "unidentified err_id value 0x";
+ char err_err_code[3] = { 0 };
+
+ if (status & JRSTA_DECOERR_JUMP)
+ idx_str = "jump tgt desc idx";
+ else
+ idx_str = "desc idx";
+
+ if (cha_id < ARRAY_SIZE(cha_id_list))
+ cha_str = cha_id_list[cha_id];
+ else
+ snprintf(cha_err_code, sizeof(cha_err_code), "%02x", cha_id);
+
+ if ((cha_id << JRSTA_CCBERR_CHAID_SHIFT) == JRSTA_CCBERR_CHAID_RNG &&
+ err_id < ARRAY_SIZE(rng_err_id_list) &&
+ strlen(rng_err_id_list[err_id])) {
+ /* RNG-only error */
+ err_str = rng_err_id_list[err_id];
+ } else if (err_id < ARRAY_SIZE(err_id_list))
+ err_str = err_id_list[err_id];
+ else
+ snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
+
+ /*
+ * CCB ICV check failures are part of normal operation life;
+ * we leave the upper layers to do what they want with them.
+ */
+ if (err_id != JRSTA_CCBERR_ERRID_ICVCHK)
+ dev_err(jrdev, "%08x: %s: %s %d: %s%s: %s%s\n",
+ status, error, idx_str, idx,
+ cha_str, cha_err_code,
+ err_str, err_err_code);
+}
+
+static void report_jump_status(struct device *jrdev, const u32 status,
+ const char *error)
+{
+ dev_err(jrdev, "%08x: %s: %s() not implemented\n",
+ status, error, __func__);
+}
+
+static void report_deco_status(struct device *jrdev, const u32 status,
+ const char *error)
+{
+ u8 err_id = status & JRSTA_DECOERR_ERROR_MASK;
+ u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >>
+ JRSTA_DECOERR_INDEX_SHIFT;
+ char *idx_str;
+ const char *err_str = "unidentified error value 0x";
+ char err_err_code[3] = { 0 };
+ int i;
+
+ if (status & JRSTA_DECOERR_JUMP)
+ idx_str = "jump tgt desc idx";
+ else
+ idx_str = "desc idx";
+
+ for (i = 0; i < ARRAY_SIZE(desc_error_list); i++)
+ if (desc_error_list[i].value == err_id)
+ break;
+
+ if (i != ARRAY_SIZE(desc_error_list) && desc_error_list[i].error_text)
+ err_str = desc_error_list[i].error_text;
+ else
+ snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
+
+ dev_err(jrdev, "%08x: %s: %s %d: %s%s\n",
+ status, error, idx_str, idx, err_str, err_err_code);
+}
+
+static void report_jr_status(struct device *jrdev, const u32 status,
+ const char *error)
+{
+ dev_err(jrdev, "%08x: %s: %s() not implemented\n",
+ status, error, __func__);
+}
+
+static void report_cond_code_status(struct device *jrdev, const u32 status,
+ const char *error)
+{
+ dev_err(jrdev, "%08x: %s: %s() not implemented\n",
+ status, error, __func__);
+}
+
+void caam_jr_strstatus(struct device *jrdev, u32 status)
+{
+ static const struct stat_src {
+ void (*report_ssed)(struct device *jrdev, const u32 status,
+ const char *error);
+ const char *error;
+ } status_src[16] = {
+ { NULL, "No error" },
+ { NULL, NULL },
+ { report_ccb_status, "CCB" },
+ { report_jump_status, "Jump" },
+ { report_deco_status, "DECO" },
+ { NULL, "Queue Manager Interface" },
+ { report_jr_status, "Job Ring" },
+ { report_cond_code_status, "Condition Code" },
+ { NULL, NULL },
+ { NULL, NULL },
+ { NULL, NULL },
+ { NULL, NULL },
+ { NULL, NULL },
+ { NULL, NULL },
+ { NULL, NULL },
+ { NULL, NULL },
+ };
+ u32 ssrc = status >> JRSTA_SSRC_SHIFT;
+ const char *error = status_src[ssrc].error;
+
+ /*
+ * If there is an error handling function, call it to report the error.
+ * Otherwise print the error source name.
+ */
+ if (status_src[ssrc].report_ssed)
+ status_src[ssrc].report_ssed(jrdev, status, error);
+ else if (error)
+ dev_err(jrdev, "%d: %s\n", ssrc, error);
+ else
+ dev_err(jrdev, "%d: unknown error source\n", ssrc);
+}
+EXPORT_SYMBOL(caam_jr_strstatus);
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h
new file mode 100644
index 000000000..b6350b0d9
--- /dev/null
+++ b/drivers/crypto/caam/error.h
@@ -0,0 +1,11 @@
+/*
+ * CAAM Error Reporting code header
+ *
+ * Copyright 2009-2011 Freescale Semiconductor, Inc.
+ */
+
+#ifndef CAAM_ERROR_H
+#define CAAM_ERROR_H
+#define CAAM_ERROR_STR_MAX 302
+void caam_jr_strstatus(struct device *jrdev, u32 status);
+#endif /* CAAM_ERROR_H */
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
new file mode 100644
index 000000000..89b94cc9e
--- /dev/null
+++ b/drivers/crypto/caam/intern.h
@@ -0,0 +1,113 @@
+/*
+ * CAAM/SEC 4.x driver backend
+ * Private/internal definitions between modules
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ */
+
+#ifndef INTERN_H
+#define INTERN_H
+
+/* Currently comes from Kconfig param as a ^2 (driver-required) */
+#define JOBR_DEPTH (1 << CONFIG_CRYPTO_DEV_FSL_CAAM_RINGSIZE)
+
+/* Kconfig params for interrupt coalescing if selected (else zero) */
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_INTC
+#define JOBR_INTC JRCFG_ICEN
+#define JOBR_INTC_TIME_THLD CONFIG_CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
+#define JOBR_INTC_COUNT_THLD CONFIG_CRYPTO_DEV_FSL_CAAM_INTC_COUNT_THLD
+#else
+#define JOBR_INTC 0
+#define JOBR_INTC_TIME_THLD 0
+#define JOBR_INTC_COUNT_THLD 0
+#endif
+
+/*
+ * Storage for tracking each in-process entry moving across a ring
+ * Each entry on an output ring needs one of these
+ */
+struct caam_jrentry_info {
+ void (*callbk)(struct device *dev, u32 *desc, u32 status, void *arg);
+ void *cbkarg; /* Argument per ring entry */
+ u32 *desc_addr_virt; /* Stored virt addr for postprocessing */
+ dma_addr_t desc_addr_dma; /* Stored bus addr for done matching */
+ u32 desc_size; /* Stored size for postprocessing, header derived */
+};
+
+/* Private sub-storage for a single JobR */
+struct caam_drv_private_jr {
+ struct list_head list_node; /* Job Ring device list */
+ struct device *dev;
+ int ridx;
+ struct caam_job_ring __iomem *rregs; /* JobR's register space */
+ struct tasklet_struct irqtask;
+ int irq; /* One per queue */
+
+ /* Number of scatterlist crypt transforms active on the JobR */
+ atomic_t tfm_count ____cacheline_aligned;
+
+ /* Job ring info */
+ int ringsize; /* Size of rings (assume input = output) */
+ struct caam_jrentry_info *entinfo; /* Alloc'ed 1 per ring entry */
+ spinlock_t inplock ____cacheline_aligned; /* Input ring index lock */
+ int inp_ring_write_index; /* Input index "tail" */
+ int head; /* entinfo (s/w ring) head index */
+ dma_addr_t *inpring; /* Base of input ring, alloc DMA-safe */
+ spinlock_t outlock ____cacheline_aligned; /* Output ring index lock */
+ int out_ring_read_index; /* Output index "tail" */
+ int tail; /* entinfo (s/w ring) tail index */
+ struct jr_outentry *outring; /* Base of output ring, DMA-safe */
+};
+
+/*
+ * Driver-private storage for a single CAAM block instance
+ */
+struct caam_drv_private {
+
+ struct device *dev;
+ struct platform_device **jrpdev; /* Alloc'ed array per sub-device */
+ struct platform_device *pdev;
+
+ /* Physical-presence section */
+ struct caam_ctrl __iomem *ctrl; /* controller region */
+ struct caam_deco __iomem *deco; /* DECO/CCB views */
+ struct caam_assurance __iomem *assure;
+ struct caam_queue_if __iomem *qi; /* QI control region */
+ struct caam_job_ring __iomem *jr[4]; /* JobR's register space */
+
+ /*
+ * Detected geometry block. Filled in from device tree if powerpc,
+ * or from register-based version detection code
+ */
+ u8 total_jobrs; /* Total Job Rings in device */
+ u8 qi_present; /* Nonzero if QI present in device */
+ int secvio_irq; /* Security violation interrupt number */
+ int virt_en; /* Virtualization enabled in CAAM */
+
+#define RNG4_MAX_HANDLES 2
+ /* RNG4 block */
+ u32 rng4_sh_init; /* This bitmap shows which of the State
+ Handles of the RNG4 block are initialized
+ by this driver */
+
+ /*
+ * debugfs entries for developer view into driver/device
+ * variables at runtime.
+ */
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *dfs_root;
+ struct dentry *ctl; /* controller dir */
+ struct dentry *ctl_rq_dequeued, *ctl_ob_enc_req, *ctl_ib_dec_req;
+ struct dentry *ctl_ob_enc_bytes, *ctl_ob_prot_bytes;
+ struct dentry *ctl_ib_dec_bytes, *ctl_ib_valid_bytes;
+ struct dentry *ctl_faultaddr, *ctl_faultdetail, *ctl_faultstatus;
+
+ struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
+ struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk;
+#endif
+};
+
+void caam_jr_algapi_init(struct device *dev);
+void caam_jr_algapi_remove(struct device *dev);
+#endif /* INTERN_H */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
new file mode 100644
index 000000000..b8b5d47ac
--- /dev/null
+++ b/drivers/crypto/caam/jr.c
@@ -0,0 +1,550 @@
+/*
+ * CAAM/SEC 4.x transport/backend driver
+ * JobR backend functionality
+ *
+ * Copyright 2008-2012 Freescale Semiconductor, Inc.
+ */
+
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+
+#include "compat.h"
+#include "regs.h"
+#include "jr.h"
+#include "desc.h"
+#include "intern.h"
+
+struct jr_driver_data {
+ /* List of Physical JobR's with the Driver */
+ struct list_head jr_list;
+ spinlock_t jr_alloc_lock; /* jr_list lock */
+} ____cacheline_aligned;
+
+static struct jr_driver_data driver_data;
+
+static int caam_reset_hw_jr(struct device *dev)
+{
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+ unsigned int timeout = 100000;
+
+ /*
+ * mask interrupts since we are going to poll
+ * for reset completion status
+ */
+ setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
+
+ /* initiate flush (required prior to reset) */
+ wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
+ while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
+ JRINT_ERR_HALT_INPROGRESS) && --timeout)
+ cpu_relax();
+
+ if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) !=
+ JRINT_ERR_HALT_COMPLETE || timeout == 0) {
+ dev_err(dev, "failed to flush job ring %d\n", jrp->ridx);
+ return -EIO;
+ }
+
+ /* initiate reset */
+ timeout = 100000;
+ wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
+ while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
+ cpu_relax();
+
+ if (timeout == 0) {
+ dev_err(dev, "failed to reset job ring %d\n", jrp->ridx);
+ return -EIO;
+ }
+
+ /* unmask interrupts */
+ clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
+
+ return 0;
+}
+
+/*
+ * Shutdown JobR independent of platform property code
+ */
+int caam_jr_shutdown(struct device *dev)
+{
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+ dma_addr_t inpbusaddr, outbusaddr;
+ int ret;
+
+ ret = caam_reset_hw_jr(dev);
+
+ tasklet_kill(&jrp->irqtask);
+
+ /* Release interrupt */
+ free_irq(jrp->irq, dev);
+
+ /* Free rings */
+ inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
+ outbusaddr = rd_reg64(&jrp->rregs->outring_base);
+ dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
+ jrp->inpring, inpbusaddr);
+ dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
+ jrp->outring, outbusaddr);
+ kfree(jrp->entinfo);
+
+ return ret;
+}
+
+static int caam_jr_remove(struct platform_device *pdev)
+{
+ int ret;
+ struct device *jrdev;
+ struct caam_drv_private_jr *jrpriv;
+
+ jrdev = &pdev->dev;
+ jrpriv = dev_get_drvdata(jrdev);
+
+ /*
+ * Return EBUSY if job ring already allocated.
+ */
+ if (atomic_read(&jrpriv->tfm_count)) {
+ dev_err(jrdev, "Device is busy\n");
+ return -EBUSY;
+ }
+
+ /* Remove the node from Physical JobR list maintained by driver */
+ spin_lock(&driver_data.jr_alloc_lock);
+ list_del(&jrpriv->list_node);
+ spin_unlock(&driver_data.jr_alloc_lock);
+
+ /* Release ring */
+ ret = caam_jr_shutdown(jrdev);
+ if (ret)
+ dev_err(jrdev, "Failed to shut down job ring\n");
+ irq_dispose_mapping(jrpriv->irq);
+
+ return ret;
+}
+
+/* Main per-ring interrupt handler */
+static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
+{
+ struct device *dev = st_dev;
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+ u32 irqstate;
+
+ /*
+ * Check the output ring for ready responses, kick
+ * tasklet if jobs done.
+ */
+ irqstate = rd_reg32(&jrp->rregs->jrintstatus);
+ if (!irqstate)
+ return IRQ_NONE;
+
+ /*
+ * If JobR error, we got more development work to do
+ * Flag a bug now, but we really need to shut down and
+ * restart the queue (and fix code).
+ */
+ if (irqstate & JRINT_JR_ERROR) {
+ dev_err(dev, "job ring error: irqstate: %08x\n", irqstate);
+ BUG();
+ }
+
+ /* mask valid interrupts */
+ setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
+
+ /* Have valid interrupt at this point, just ACK and trigger */
+ wr_reg32(&jrp->rregs->jrintstatus, irqstate);
+
+ preempt_disable();
+ tasklet_schedule(&jrp->irqtask);
+ preempt_enable();
+
+ return IRQ_HANDLED;
+}
+
+/* Deferred service handler, run as interrupt-fired tasklet */
+static void caam_jr_dequeue(unsigned long devarg)
+{
+ int hw_idx, sw_idx, i, head, tail;
+ struct device *dev = (struct device *)devarg;
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+ void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
+ u32 *userdesc, userstatus;
+ void *userarg;
+
+ while (rd_reg32(&jrp->rregs->outring_used)) {
+
+ head = ACCESS_ONCE(jrp->head);
+
+ spin_lock(&jrp->outlock);
+
+ sw_idx = tail = jrp->tail;
+ hw_idx = jrp->out_ring_read_index;
+
+ for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
+ sw_idx = (tail + i) & (JOBR_DEPTH - 1);
+
+ if (jrp->outring[hw_idx].desc ==
+ jrp->entinfo[sw_idx].desc_addr_dma)
+ break; /* found */
+ }
+ /* we should never fail to find a matching descriptor */
+ BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);
+
+ /* Unmap just-run descriptor so we can post-process */
+ dma_unmap_single(dev, jrp->outring[hw_idx].desc,
+ jrp->entinfo[sw_idx].desc_size,
+ DMA_TO_DEVICE);
+
+ /* mark completed, avoid matching on a recycled desc addr */
+ jrp->entinfo[sw_idx].desc_addr_dma = 0;
+
+ /* Stash callback params for use outside of lock */
+ usercall = jrp->entinfo[sw_idx].callbk;
+ userarg = jrp->entinfo[sw_idx].cbkarg;
+ userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
+ userstatus = jrp->outring[hw_idx].jrstatus;
+
+ /* set done */
+ wr_reg32(&jrp->rregs->outring_rmvd, 1);
+
+ jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) &
+ (JOBR_DEPTH - 1);
+
+ /*
+ * if this job completed out-of-order, do not increment
+ * the tail. Otherwise, increment tail by 1 plus the
+ * number of subsequent jobs already completed out-of-order
+ */
+ if (sw_idx == tail) {
+ do {
+ tail = (tail + 1) & (JOBR_DEPTH - 1);
+ } while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
+ jrp->entinfo[tail].desc_addr_dma == 0);
+
+ jrp->tail = tail;
+ }
+
+ spin_unlock(&jrp->outlock);
+
+ /* Finally, execute user's callback */
+ usercall(dev, userdesc, userstatus, userarg);
+ }
+
+ /* reenable / unmask IRQs */
+ clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
+}
+
+/**
+ * caam_jr_alloc() - Alloc a job ring for someone to use as needed.
+ *
+ * returns : pointer to the newly allocated physical
+ * JobR dev can be written to if successful.
+ **/
+struct device *caam_jr_alloc(void)
+{
+ struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL;
+ struct device *dev = NULL;
+ int min_tfm_cnt = INT_MAX;
+ int tfm_cnt;
+
+ spin_lock(&driver_data.jr_alloc_lock);
+
+ if (list_empty(&driver_data.jr_list)) {
+ spin_unlock(&driver_data.jr_alloc_lock);
+ return ERR_PTR(-ENODEV);
+ }
+
+ list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) {
+ tfm_cnt = atomic_read(&jrpriv->tfm_count);
+ if (tfm_cnt < min_tfm_cnt) {
+ min_tfm_cnt = tfm_cnt;
+ min_jrpriv = jrpriv;
+ }
+ if (!min_tfm_cnt)
+ break;
+ }
+
+ if (min_jrpriv) {
+ atomic_inc(&min_jrpriv->tfm_count);
+ dev = min_jrpriv->dev;
+ }
+ spin_unlock(&driver_data.jr_alloc_lock);
+
+ return dev;
+}
+EXPORT_SYMBOL(caam_jr_alloc);
+
+/**
+ * caam_jr_free() - Free the Job Ring
+ * @rdev - points to the dev that identifies the Job ring to
+ * be released.
+ **/
+void caam_jr_free(struct device *rdev)
+{
+ struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev);
+
+ atomic_dec(&jrpriv->tfm_count);
+}
+EXPORT_SYMBOL(caam_jr_free);
+
+/**
+ * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK,
+ * -EBUSY if the queue is full, -EIO if it cannot map the caller's
+ * descriptor.
+ * @dev: device of the job ring to be used. This device should have
+ * been assigned prior by caam_jr_register().
+ * @desc: points to a job descriptor that execute our request. All
+ * descriptors (and all referenced data) must be in a DMAable
+ * region, and all data references must be physical addresses
+ * accessible to CAAM (i.e. within a PAMU window granted
+ * to it).
+ * @cbk: pointer to a callback function to be invoked upon completion
+ * of this request. This has the form:
+ * callback(struct device *dev, u32 *desc, u32 stat, void *arg)
+ * where:
+ * @dev: contains the job ring device that processed this
+ * response.
+ * @desc: descriptor that initiated the request, same as
+ * "desc" being argued to caam_jr_enqueue().
+ * @status: untranslated status received from CAAM. See the
+ * reference manual for a detailed description of
+ * error meaning, or see the JRSTA definitions in the
+ * register header file
+ * @areq: optional pointer to an argument passed with the
+ * original request
+ * @areq: optional pointer to a user argument for use at callback
+ * time.
+ **/
+int caam_jr_enqueue(struct device *dev, u32 *desc,
+ void (*cbk)(struct device *dev, u32 *desc,
+ u32 status, void *areq),
+ void *areq)
+{
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+ struct caam_jrentry_info *head_entry;
+ int head, tail, desc_size;
+ dma_addr_t desc_dma;
+
+ desc_size = (*desc & HDR_JD_LENGTH_MASK) * sizeof(u32);
+ desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, desc_dma)) {
+ dev_err(dev, "caam_jr_enqueue(): can't map jobdesc\n");
+ return -EIO;
+ }
+
+ spin_lock_bh(&jrp->inplock);
+
+ head = jrp->head;
+ tail = ACCESS_ONCE(jrp->tail);
+
+ if (!rd_reg32(&jrp->rregs->inpring_avail) ||
+ CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
+ spin_unlock_bh(&jrp->inplock);
+ dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
+ return -EBUSY;
+ }
+
+ head_entry = &jrp->entinfo[head];
+ head_entry->desc_addr_virt = desc;
+ head_entry->desc_size = desc_size;
+ head_entry->callbk = (void *)cbk;
+ head_entry->cbkarg = areq;
+ head_entry->desc_addr_dma = desc_dma;
+
+ jrp->inpring[jrp->inp_ring_write_index] = desc_dma;
+
+ smp_wmb();
+
+ jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) &
+ (JOBR_DEPTH - 1);
+ jrp->head = (head + 1) & (JOBR_DEPTH - 1);
+
+ wr_reg32(&jrp->rregs->inpring_jobadd, 1);
+
+ spin_unlock_bh(&jrp->inplock);
+
+ return 0;
+}
+EXPORT_SYMBOL(caam_jr_enqueue);
+
+/*
+ * Init JobR independent of platform property detection
+ */
+static int caam_jr_init(struct device *dev)
+{
+ struct caam_drv_private_jr *jrp;
+ dma_addr_t inpbusaddr, outbusaddr;
+ int i, error;
+
+ jrp = dev_get_drvdata(dev);
+
+ tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
+
+ /* Connect job ring interrupt handler. */
+ error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
+ dev_name(dev), dev);
+ if (error) {
+ dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
+ jrp->ridx, jrp->irq);
+ goto out_kill_deq;
+ }
+
+ error = caam_reset_hw_jr(dev);
+ if (error)
+ goto out_free_irq;
+
+ error = -ENOMEM;
+ jrp->inpring = dma_alloc_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
+ &inpbusaddr, GFP_KERNEL);
+ if (!jrp->inpring)
+ goto out_free_irq;
+
+ jrp->outring = dma_alloc_coherent(dev, sizeof(struct jr_outentry) *
+ JOBR_DEPTH, &outbusaddr, GFP_KERNEL);
+ if (!jrp->outring)
+ goto out_free_inpring;
+
+ jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH,
+ GFP_KERNEL);
+ if (!jrp->entinfo)
+ goto out_free_outring;
+
+ for (i = 0; i < JOBR_DEPTH; i++)
+ jrp->entinfo[i].desc_addr_dma = !0;
+
+ /* Setup rings */
+ jrp->inp_ring_write_index = 0;
+ jrp->out_ring_read_index = 0;
+ jrp->head = 0;
+ jrp->tail = 0;
+
+ wr_reg64(&jrp->rregs->inpring_base, inpbusaddr);
+ wr_reg64(&jrp->rregs->outring_base, outbusaddr);
+ wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
+ wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
+
+ jrp->ringsize = JOBR_DEPTH;
+
+ spin_lock_init(&jrp->inplock);
+ spin_lock_init(&jrp->outlock);
+
+ /* Select interrupt coalescing parameters */
+ setbits32(&jrp->rregs->rconfig_lo, JOBR_INTC |
+ (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
+ (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
+
+ return 0;
+
+out_free_outring:
+ dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
+ jrp->outring, outbusaddr);
+out_free_inpring:
+ dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
+ jrp->inpring, inpbusaddr);
+ dev_err(dev, "can't allocate job rings for %d\n", jrp->ridx);
+out_free_irq:
+ free_irq(jrp->irq, dev);
+out_kill_deq:
+ tasklet_kill(&jrp->irqtask);
+ return error;
+}
+
+
+/*
+ * Probe routine for each detected JobR subsystem.
+ */
+static int caam_jr_probe(struct platform_device *pdev)
+{
+ struct device *jrdev;
+ struct device_node *nprop;
+ struct caam_job_ring __iomem *ctrl;
+ struct caam_drv_private_jr *jrpriv;
+ static int total_jobrs;
+ int error;
+
+ jrdev = &pdev->dev;
+ jrpriv = devm_kmalloc(jrdev, sizeof(struct caam_drv_private_jr),
+ GFP_KERNEL);
+ if (!jrpriv)
+ return -ENOMEM;
+
+ dev_set_drvdata(jrdev, jrpriv);
+
+ /* save ring identity relative to detection */
+ jrpriv->ridx = total_jobrs++;
+
+ nprop = pdev->dev.of_node;
+ /* Get configuration properties from device tree */
+ /* First, get register page */
+ ctrl = of_iomap(nprop, 0);
+ if (!ctrl) {
+ dev_err(jrdev, "of_iomap() failed\n");
+ return -ENOMEM;
+ }
+
+ jrpriv->rregs = (struct caam_job_ring __force *)ctrl;
+
+ if (sizeof(dma_addr_t) == sizeof(u64))
+ if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring"))
+ dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(40));
+ else
+ dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(36));
+ else
+ dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
+
+ /* Identify the interrupt */
+ jrpriv->irq = irq_of_parse_and_map(nprop, 0);
+
+ /* Now do the platform independent part */
+ error = caam_jr_init(jrdev); /* now turn on hardware */
+ if (error) {
+ irq_dispose_mapping(jrpriv->irq);
+ return error;
+ }
+
+ jrpriv->dev = jrdev;
+ spin_lock(&driver_data.jr_alloc_lock);
+ list_add_tail(&jrpriv->list_node, &driver_data.jr_list);
+ spin_unlock(&driver_data.jr_alloc_lock);
+
+ atomic_set(&jrpriv->tfm_count, 0);
+
+ return 0;
+}
+
+static struct of_device_id caam_jr_match[] = {
+ {
+ .compatible = "fsl,sec-v4.0-job-ring",
+ },
+ {
+ .compatible = "fsl,sec4.0-job-ring",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, caam_jr_match);
+
+static struct platform_driver caam_jr_driver = {
+ .driver = {
+ .name = "caam_jr",
+ .of_match_table = caam_jr_match,
+ },
+ .probe = caam_jr_probe,
+ .remove = caam_jr_remove,
+};
+
+static int __init jr_driver_init(void)
+{
+ spin_lock_init(&driver_data.jr_alloc_lock);
+ INIT_LIST_HEAD(&driver_data.jr_list);
+ return platform_driver_register(&caam_jr_driver);
+}
+
+static void __exit jr_driver_exit(void)
+{
+ platform_driver_unregister(&caam_jr_driver);
+}
+
+module_init(jr_driver_init);
+module_exit(jr_driver_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM JR request backend");
+MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
diff --git a/drivers/crypto/caam/jr.h b/drivers/crypto/caam/jr.h
new file mode 100644
index 000000000..97113a6d6
--- /dev/null
+++ b/drivers/crypto/caam/jr.h
@@ -0,0 +1,18 @@
+/*
+ * CAAM public-level include definitions for the JobR backend
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ */
+
+#ifndef JR_H
+#define JR_H
+
+/* Prototypes for backend-level services exposed to APIs */
+struct device *caam_jr_alloc(void);
+void caam_jr_free(struct device *rdev);
+int caam_jr_enqueue(struct device *dev, u32 *desc,
+ void (*cbk)(struct device *dev, u32 *desc, u32 status,
+ void *areq),
+ void *areq);
+
+#endif /* JR_H */
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
new file mode 100644
index 000000000..e1eaf4ff9
--- /dev/null
+++ b/drivers/crypto/caam/key_gen.c
@@ -0,0 +1,123 @@
+/*
+ * CAAM/SEC 4.x functions for handling key-generation jobs
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ */
+#include "compat.h"
+#include "jr.h"
+#include "error.h"
+#include "desc_constr.h"
+#include "key_gen.h"
+
+void split_key_done(struct device *dev, u32 *desc, u32 err,
+ void *context)
+{
+ struct split_key_result *res = context;
+
+#ifdef DEBUG
+ dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+ if (err)
+ caam_jr_strstatus(dev, err);
+
+ res->err = err;
+
+ complete(&res->completion);
+}
+EXPORT_SYMBOL(split_key_done);
+/*
+get a split ipad/opad key
+
+Split key generation-----------------------------------------------
+
+[00] 0xb0810008 jobdesc: stidx=1 share=never len=8
+[01] 0x04000014 key: class2->keyreg len=20
+ @0xffe01000
+[03] 0x84410014 operation: cls2-op sha1 hmac init dec
+[04] 0x24940000 fifold: class2 msgdata-last2 len=0 imm
+[05] 0xa4000001 jump: class2 local all ->1 [06]
+[06] 0x64260028 fifostr: class2 mdsplit-jdk len=40
+ @0xffe04000
+*/
+int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
+ int split_key_pad_len, const u8 *key_in, u32 keylen,
+ u32 alg_op)
+{
+ u32 *desc;
+ struct split_key_result result;
+ dma_addr_t dma_addr_in, dma_addr_out;
+ int ret = -ENOMEM;
+
+ desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
+ if (!desc) {
+ dev_err(jrdev, "unable to allocate key input memory\n");
+ return ret;
+ }
+
+ dma_addr_in = dma_map_single(jrdev, (void *)key_in, keylen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, dma_addr_in)) {
+ dev_err(jrdev, "unable to map key input memory\n");
+ goto out_free;
+ }
+
+ dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(jrdev, dma_addr_out)) {
+ dev_err(jrdev, "unable to map key output memory\n");
+ goto out_unmap_in;
+ }
+
+ init_job_desc(desc, 0);
+ append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
+
+ /* Sets MDHA up into an HMAC-INIT */
+ append_operation(desc, alg_op | OP_ALG_DECRYPT | OP_ALG_AS_INIT);
+
+ /*
+ * do a FIFO_LOAD of zero, this will trigger the internal key expansion
+ * into both pads inside MDHA
+ */
+ append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
+
+ /*
+ * FIFO_STORE with the explicit split-key content store
+ * (0x26 output type)
+ */
+ append_fifo_store(desc, dma_addr_out, split_key_len,
+ LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
+ print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+ result.err = 0;
+ init_completion(&result.completion);
+
+ ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
+ if (!ret) {
+ /* in progress */
+ wait_for_completion_interruptible(&result.completion);
+ ret = result.err;
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_out,
+ split_key_pad_len, 1);
+#endif
+ }
+
+ dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len,
+ DMA_FROM_DEVICE);
+out_unmap_in:
+ dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
+out_free:
+ kfree(desc);
+ return ret;
+}
+EXPORT_SYMBOL(gen_split_key);
diff --git a/drivers/crypto/caam/key_gen.h b/drivers/crypto/caam/key_gen.h
new file mode 100644
index 000000000..c5588f6d8
--- /dev/null
+++ b/drivers/crypto/caam/key_gen.h
@@ -0,0 +1,17 @@
+/*
+ * CAAM/SEC 4.x definitions for handling key-generation jobs
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ */
+
+struct split_key_result {
+ struct completion completion;
+ int err;
+};
+
+void split_key_done(struct device *dev, u32 *desc, u32 err, void *context);
+
+int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
+ int split_key_pad_len, const u8 *key_in, u32 keylen,
+ u32 alg_op);
diff --git a/drivers/crypto/caam/pdb.h b/drivers/crypto/caam/pdb.h
new file mode 100644
index 000000000..3a87c0cf8
--- /dev/null
+++ b/drivers/crypto/caam/pdb.h
@@ -0,0 +1,402 @@
+/*
+ * CAAM Protocol Data Block (PDB) definition header file
+ *
+ * Copyright 2008-2012 Freescale Semiconductor, Inc.
+ *
+ */
+
+#ifndef CAAM_PDB_H
+#define CAAM_PDB_H
+
+/*
+ * PDB- IPSec ESP Header Modification Options
+ */
+#define PDBHMO_ESP_DECAP_SHIFT 12
+#define PDBHMO_ESP_ENCAP_SHIFT 4
+/*
+ * Encap and Decap - Decrement TTL (Hop Limit) - Based on the value of the
+ * Options Byte IP version (IPvsn) field:
+ * if IPv4, decrement the inner IP header TTL field (byte 8);
+ * if IPv6 decrement the inner IP header Hop Limit field (byte 7).
+*/
+#define PDBHMO_ESP_DECAP_DEC_TTL (0x02 << PDBHMO_ESP_DECAP_SHIFT)
+#define PDBHMO_ESP_ENCAP_DEC_TTL (0x02 << PDBHMO_ESP_ENCAP_SHIFT)
+/*
+ * Decap - DiffServ Copy - Copy the IPv4 TOS or IPv6 Traffic Class byte
+ * from the outer IP header to the inner IP header.
+ */
+#define PDBHMO_ESP_DIFFSERV (0x01 << PDBHMO_ESP_DECAP_SHIFT)
+/*
+ * Encap- Copy DF bit -if an IPv4 tunnel mode outer IP header is coming from
+ * the PDB, copy the DF bit from the inner IP header to the outer IP header.
+ */
+#define PDBHMO_ESP_DFBIT (0x04 << PDBHMO_ESP_ENCAP_SHIFT)
+
+/*
+ * PDB - IPSec ESP Encap/Decap Options
+ */
+#define PDBOPTS_ESP_ARSNONE 0x00 /* no antireplay window */
+#define PDBOPTS_ESP_ARS32 0x40 /* 32-entry antireplay window */
+#define PDBOPTS_ESP_ARS64 0xc0 /* 64-entry antireplay window */
+#define PDBOPTS_ESP_IVSRC 0x20 /* IV comes from internal random gen */
+#define PDBOPTS_ESP_ESN 0x10 /* extended sequence included */
+#define PDBOPTS_ESP_OUTFMT 0x08 /* output only decapsulation (decap) */
+#define PDBOPTS_ESP_IPHDRSRC 0x08 /* IP header comes from PDB (encap) */
+#define PDBOPTS_ESP_INCIPHDR 0x04 /* Prepend IP header to output frame */
+#define PDBOPTS_ESP_IPVSN 0x02 /* process IPv6 header */
+#define PDBOPTS_ESP_AOFL 0x04 /* adjust out frame len (decap, SEC>=5.3)*/
+#define PDBOPTS_ESP_TUNNEL 0x01 /* tunnel mode next-header byte */
+#define PDBOPTS_ESP_IPV6 0x02 /* ip header version is V6 */
+#define PDBOPTS_ESP_DIFFSERV 0x40 /* copy TOS/TC from inner iphdr */
+#define PDBOPTS_ESP_UPDATE_CSUM 0x80 /* encap-update ip header checksum */
+#define PDBOPTS_ESP_VERIFY_CSUM 0x20 /* decap-validate ip header checksum */
+
+/*
+ * General IPSec encap/decap PDB definitions
+ */
+struct ipsec_encap_cbc {
+ u32 iv[4];
+};
+
+struct ipsec_encap_ctr {
+ u32 ctr_nonce;
+ u32 ctr_initial;
+ u32 iv[2];
+};
+
+struct ipsec_encap_ccm {
+ u32 salt; /* lower 24 bits */
+ u8 b0_flags;
+ u8 ctr_flags;
+ u16 ctr_initial;
+ u32 iv[2];
+};
+
+struct ipsec_encap_gcm {
+ u32 salt; /* lower 24 bits */
+ u32 rsvd1;
+ u32 iv[2];
+};
+
+struct ipsec_encap_pdb {
+ u8 hmo_rsvd;
+ u8 ip_nh;
+ u8 ip_nh_offset;
+ u8 options;
+ u32 seq_num_ext_hi;
+ u32 seq_num;
+ union {
+ struct ipsec_encap_cbc cbc;
+ struct ipsec_encap_ctr ctr;
+ struct ipsec_encap_ccm ccm;
+ struct ipsec_encap_gcm gcm;
+ };
+ u32 spi;
+ u16 rsvd1;
+ u16 ip_hdr_len;
+ u32 ip_hdr[0]; /* optional IP Header content */
+};
+
+struct ipsec_decap_cbc {
+ u32 rsvd[2];
+};
+
+struct ipsec_decap_ctr {
+ u32 salt;
+ u32 ctr_initial;
+};
+
+struct ipsec_decap_ccm {
+ u32 salt;
+ u8 iv_flags;
+ u8 ctr_flags;
+ u16 ctr_initial;
+};
+
+struct ipsec_decap_gcm {
+ u32 salt;
+ u32 resvd;
+};
+
+struct ipsec_decap_pdb {
+ u16 hmo_ip_hdr_len;
+ u8 ip_nh_offset;
+ u8 options;
+ union {
+ struct ipsec_decap_cbc cbc;
+ struct ipsec_decap_ctr ctr;
+ struct ipsec_decap_ccm ccm;
+ struct ipsec_decap_gcm gcm;
+ };
+ u32 seq_num_ext_hi;
+ u32 seq_num;
+ u32 anti_replay[2];
+ u32 end_index[0];
+};
+
+/*
+ * IPSec ESP Datapath Protocol Override Register (DPOVRD)
+ */
+struct ipsec_deco_dpovrd {
+#define IPSEC_ENCAP_DECO_DPOVRD_USE 0x80
+ u8 ovrd_ecn;
+ u8 ip_hdr_len;
+ u8 nh_offset;
+ u8 next_header; /* reserved if decap */
+};
+
+/*
+ * IEEE 802.11i WiFi Protocol Data Block
+ */
+#define WIFI_PDBOPTS_FCS 0x01
+#define WIFI_PDBOPTS_AR 0x40
+
+struct wifi_encap_pdb {
+ u16 mac_hdr_len;
+ u8 rsvd;
+ u8 options;
+ u8 iv_flags;
+ u8 pri;
+ u16 pn1;
+ u32 pn2;
+ u16 frm_ctrl_mask;
+ u16 seq_ctrl_mask;
+ u8 rsvd1[2];
+ u8 cnst;
+ u8 key_id;
+ u8 ctr_flags;
+ u8 rsvd2;
+ u16 ctr_init;
+};
+
+struct wifi_decap_pdb {
+ u16 mac_hdr_len;
+ u8 rsvd;
+ u8 options;
+ u8 iv_flags;
+ u8 pri;
+ u16 pn1;
+ u32 pn2;
+ u16 frm_ctrl_mask;
+ u16 seq_ctrl_mask;
+ u8 rsvd1[4];
+ u8 ctr_flags;
+ u8 rsvd2;
+ u16 ctr_init;
+};
+
+/*
+ * IEEE 802.16 WiMAX Protocol Data Block
+ */
+#define WIMAX_PDBOPTS_FCS 0x01
+#define WIMAX_PDBOPTS_AR 0x40 /* decap only */
+
+struct wimax_encap_pdb {
+ u8 rsvd[3];
+ u8 options;
+ u32 nonce;
+ u8 b0_flags;
+ u8 ctr_flags;
+ u16 ctr_init;
+ /* begin DECO writeback region */
+ u32 pn;
+ /* end DECO writeback region */
+};
+
+struct wimax_decap_pdb {
+ u8 rsvd[3];
+ u8 options;
+ u32 nonce;
+ u8 iv_flags;
+ u8 ctr_flags;
+ u16 ctr_init;
+ /* begin DECO writeback region */
+ u32 pn;
+ u8 rsvd1[2];
+ u16 antireplay_len;
+ u64 antireplay_scorecard;
+ /* end DECO writeback region */
+};
+
+/*
+ * IEEE 801.AE MacSEC Protocol Data Block
+ */
+#define MACSEC_PDBOPTS_FCS 0x01
+#define MACSEC_PDBOPTS_AR 0x40 /* used in decap only */
+
+struct macsec_encap_pdb {
+ u16 aad_len;
+ u8 rsvd;
+ u8 options;
+ u64 sci;
+ u16 ethertype;
+ u8 tci_an;
+ u8 rsvd1;
+ /* begin DECO writeback region */
+ u32 pn;
+ /* end DECO writeback region */
+};
+
+struct macsec_decap_pdb {
+ u16 aad_len;
+ u8 rsvd;
+ u8 options;
+ u64 sci;
+ u8 rsvd1[3];
+ /* begin DECO writeback region */
+ u8 antireplay_len;
+ u32 pn;
+ u64 antireplay_scorecard;
+ /* end DECO writeback region */
+};
+
+/*
+ * SSL/TLS/DTLS Protocol Data Blocks
+ */
+
+#define TLS_PDBOPTS_ARS32 0x40
+#define TLS_PDBOPTS_ARS64 0xc0
+#define TLS_PDBOPTS_OUTFMT 0x08
+#define TLS_PDBOPTS_IV_WRTBK 0x02 /* 1.1/1.2/DTLS only */
+#define TLS_PDBOPTS_EXP_RND_IV 0x01 /* 1.1/1.2/DTLS only */
+
+struct tls_block_encap_pdb {
+ u8 type;
+ u8 version[2];
+ u8 options;
+ u64 seq_num;
+ u32 iv[4];
+};
+
+struct tls_stream_encap_pdb {
+ u8 type;
+ u8 version[2];
+ u8 options;
+ u64 seq_num;
+ u8 i;
+ u8 j;
+ u8 rsvd1[2];
+};
+
+struct dtls_block_encap_pdb {
+ u8 type;
+ u8 version[2];
+ u8 options;
+ u16 epoch;
+ u16 seq_num[3];
+ u32 iv[4];
+};
+
+struct tls_block_decap_pdb {
+ u8 rsvd[3];
+ u8 options;
+ u64 seq_num;
+ u32 iv[4];
+};
+
+struct tls_stream_decap_pdb {
+ u8 rsvd[3];
+ u8 options;
+ u64 seq_num;
+ u8 i;
+ u8 j;
+ u8 rsvd1[2];
+};
+
+struct dtls_block_decap_pdb {
+ u8 rsvd[3];
+ u8 options;
+ u16 epoch;
+ u16 seq_num[3];
+ u32 iv[4];
+ u64 antireplay_scorecard;
+};
+
+/*
+ * SRTP Protocol Data Blocks
+ */
+#define SRTP_PDBOPTS_MKI 0x08
+#define SRTP_PDBOPTS_AR 0x40
+
+struct srtp_encap_pdb {
+ u8 x_len;
+ u8 mki_len;
+ u8 n_tag;
+ u8 options;
+ u32 cnst0;
+ u8 rsvd[2];
+ u16 cnst1;
+ u16 salt[7];
+ u16 cnst2;
+ u32 rsvd1;
+ u32 roc;
+ u32 opt_mki;
+};
+
+struct srtp_decap_pdb {
+ u8 x_len;
+ u8 mki_len;
+ u8 n_tag;
+ u8 options;
+ u32 cnst0;
+ u8 rsvd[2];
+ u16 cnst1;
+ u16 salt[7];
+ u16 cnst2;
+ u16 rsvd1;
+ u16 seq_num;
+ u32 roc;
+ u64 antireplay_scorecard;
+};
+
+/*
+ * DSA/ECDSA Protocol Data Blocks
+ * Two of these exist: DSA-SIGN, and DSA-VERIFY. They are similar
+ * except for the treatment of "w" for verify, "s" for sign,
+ * and the placement of "a,b".
+ */
+#define DSA_PDB_SGF_SHIFT 24
+#define DSA_PDB_SGF_MASK (0xff << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_Q (0x80 << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_R (0x40 << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_G (0x20 << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_W (0x10 << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_S (0x10 << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_F (0x08 << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_C (0x04 << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_D (0x02 << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_AB_SIGN (0x02 << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_AB_VERIFY (0x01 << DSA_PDB_SGF_SHIFT)
+
+#define DSA_PDB_L_SHIFT 7
+#define DSA_PDB_L_MASK (0x3ff << DSA_PDB_L_SHIFT)
+
+#define DSA_PDB_N_MASK 0x7f
+
+struct dsa_sign_pdb {
+ u32 sgf_ln; /* Use DSA_PDB_ defintions per above */
+ u8 *q;
+ u8 *r;
+ u8 *g; /* or Gx,y */
+ u8 *s;
+ u8 *f;
+ u8 *c;
+ u8 *d;
+ u8 *ab; /* ECC only */
+ u8 *u;
+};
+
+struct dsa_verify_pdb {
+ u32 sgf_ln;
+ u8 *q;
+ u8 *r;
+ u8 *g; /* or Gx,y */
+ u8 *w; /* or Wx,y */
+ u8 *f;
+ u8 *c;
+ u8 *d;
+ u8 *tmp; /* temporary data block */
+ u8 *ab; /* only used if ECC processing */
+};
+
+#endif
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
new file mode 100644
index 000000000..378ddc17f
--- /dev/null
+++ b/drivers/crypto/caam/regs.h
@@ -0,0 +1,780 @@
+/*
+ * CAAM hardware register-level view
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ */
+
+#ifndef REGS_H
+#define REGS_H
+
+#include <linux/types.h>
+#include <linux/io.h>
+
+/*
+ * Architecture-specific register access methods
+ *
+ * CAAM's bus-addressable registers are 64 bits internally.
+ * They have been wired to be safely accessible on 32-bit
+ * architectures, however. Registers were organized such
+ * that (a) they can be contained in 32 bits, (b) if not, then they
+ * can be treated as two 32-bit entities, or finally (c) if they
+ * must be treated as a single 64-bit value, then this can safely
+ * be done with two 32-bit cycles.
+ *
+ * For 32-bit operations on 64-bit values, CAAM follows the same
+ * 64-bit register access conventions as it's predecessors, in that
+ * writes are "triggered" by a write to the register at the numerically
+ * higher address, thus, a full 64-bit write cycle requires a write
+ * to the lower address, followed by a write to the higher address,
+ * which will latch/execute the write cycle.
+ *
+ * For example, let's assume a SW reset of CAAM through the master
+ * configuration register.
+ * - SWRST is in bit 31 of MCFG.
+ * - MCFG begins at base+0x0000.
+ * - Bits 63-32 are a 32-bit word at base+0x0000 (numerically-lower)
+ * - Bits 31-0 are a 32-bit word at base+0x0004 (numerically-higher)
+ *
+ * (and on Power, the convention is 0-31, 32-63, I know...)
+ *
+ * Assuming a 64-bit write to this MCFG to perform a software reset
+ * would then require a write of 0 to base+0x0000, followed by a
+ * write of 0x80000000 to base+0x0004, which would "execute" the
+ * reset.
+ *
+ * Of course, since MCFG 63-32 is all zero, we could cheat and simply
+ * write 0x8000000 to base+0x0004, and the reset would work fine.
+ * However, since CAAM does contain some write-and-read-intended
+ * 64-bit registers, this code defines 64-bit access methods for
+ * the sake of internal consistency and simplicity, and so that a
+ * clean transition to 64-bit is possible when it becomes necessary.
+ *
+ * There are limitations to this that the developer must recognize.
+ * 32-bit architectures cannot enforce an atomic-64 operation,
+ * Therefore:
+ *
+ * - On writes, since the HW is assumed to latch the cycle on the
+ * write of the higher-numeric-address word, then ordered
+ * writes work OK.
+ *
+ * - For reads, where a register contains a relevant value of more
+ * that 32 bits, the hardware employs logic to latch the other
+ * "half" of the data until read, ensuring an accurate value.
+ * This is of particular relevance when dealing with CAAM's
+ * performance counters.
+ *
+ */
+
+#ifdef __BIG_ENDIAN
+#define wr_reg32(reg, data) out_be32(reg, data)
+#define rd_reg32(reg) in_be32(reg)
+#ifdef CONFIG_64BIT
+#define wr_reg64(reg, data) out_be64(reg, data)
+#define rd_reg64(reg) in_be64(reg)
+#endif
+#else
+#ifdef __LITTLE_ENDIAN
+#define wr_reg32(reg, data) __raw_writel(data, reg)
+#define rd_reg32(reg) __raw_readl(reg)
+#ifdef CONFIG_64BIT
+#define wr_reg64(reg, data) __raw_writeq(data, reg)
+#define rd_reg64(reg) __raw_readq(reg)
+#endif
+#endif
+#endif
+
+#ifndef CONFIG_64BIT
+#ifdef __BIG_ENDIAN
+static inline void wr_reg64(u64 __iomem *reg, u64 data)
+{
+ wr_reg32((u32 __iomem *)reg, (data & 0xffffffff00000000ull) >> 32);
+ wr_reg32((u32 __iomem *)reg + 1, data & 0x00000000ffffffffull);
+}
+
+static inline u64 rd_reg64(u64 __iomem *reg)
+{
+ return (((u64)rd_reg32((u32 __iomem *)reg)) << 32) |
+ ((u64)rd_reg32((u32 __iomem *)reg + 1));
+}
+#else
+#ifdef __LITTLE_ENDIAN
+static inline void wr_reg64(u64 __iomem *reg, u64 data)
+{
+ wr_reg32((u32 __iomem *)reg + 1, (data & 0xffffffff00000000ull) >> 32);
+ wr_reg32((u32 __iomem *)reg, data & 0x00000000ffffffffull);
+}
+
+static inline u64 rd_reg64(u64 __iomem *reg)
+{
+ return (((u64)rd_reg32((u32 __iomem *)reg + 1)) << 32) |
+ ((u64)rd_reg32((u32 __iomem *)reg));
+}
+#endif
+#endif
+#endif
+
+/*
+ * jr_outentry
+ * Represents each entry in a JobR output ring
+ */
+struct jr_outentry {
+ dma_addr_t desc;/* Pointer to completed descriptor */
+ u32 jrstatus; /* Status for completed descriptor */
+} __packed;
+
+/*
+ * caam_perfmon - Performance Monitor/Secure Memory Status/
+ * CAAM Global Status/Component Version IDs
+ *
+ * Spans f00-fff wherever instantiated
+ */
+
+/* Number of DECOs */
+#define CHA_NUM_MS_DECONUM_SHIFT 24
+#define CHA_NUM_MS_DECONUM_MASK (0xfull << CHA_NUM_MS_DECONUM_SHIFT)
+
+/* CHA Version IDs */
+#define CHA_ID_LS_AES_SHIFT 0
+#define CHA_ID_LS_AES_MASK (0xfull << CHA_ID_LS_AES_SHIFT)
+
+#define CHA_ID_LS_DES_SHIFT 4
+#define CHA_ID_LS_DES_MASK (0xfull << CHA_ID_LS_DES_SHIFT)
+
+#define CHA_ID_LS_ARC4_SHIFT 8
+#define CHA_ID_LS_ARC4_MASK (0xfull << CHA_ID_LS_ARC4_SHIFT)
+
+#define CHA_ID_LS_MD_SHIFT 12
+#define CHA_ID_LS_MD_MASK (0xfull << CHA_ID_LS_MD_SHIFT)
+
+#define CHA_ID_LS_RNG_SHIFT 16
+#define CHA_ID_LS_RNG_MASK (0xfull << CHA_ID_LS_RNG_SHIFT)
+
+#define CHA_ID_LS_SNW8_SHIFT 20
+#define CHA_ID_LS_SNW8_MASK (0xfull << CHA_ID_LS_SNW8_SHIFT)
+
+#define CHA_ID_LS_KAS_SHIFT 24
+#define CHA_ID_LS_KAS_MASK (0xfull << CHA_ID_LS_KAS_SHIFT)
+
+#define CHA_ID_LS_PK_SHIFT 28
+#define CHA_ID_LS_PK_MASK (0xfull << CHA_ID_LS_PK_SHIFT)
+
+#define CHA_ID_MS_CRC_SHIFT 0
+#define CHA_ID_MS_CRC_MASK (0xfull << CHA_ID_MS_CRC_SHIFT)
+
+#define CHA_ID_MS_SNW9_SHIFT 4
+#define CHA_ID_MS_SNW9_MASK (0xfull << CHA_ID_MS_SNW9_SHIFT)
+
+#define CHA_ID_MS_DECO_SHIFT 24
+#define CHA_ID_MS_DECO_MASK (0xfull << CHA_ID_MS_DECO_SHIFT)
+
+#define CHA_ID_MS_JR_SHIFT 28
+#define CHA_ID_MS_JR_MASK (0xfull << CHA_ID_MS_JR_SHIFT)
+
+struct sec_vid {
+ u16 ip_id;
+ u8 maj_rev;
+ u8 min_rev;
+};
+
+struct caam_perfmon {
+ /* Performance Monitor Registers f00-f9f */
+ u64 req_dequeued; /* PC_REQ_DEQ - Dequeued Requests */
+ u64 ob_enc_req; /* PC_OB_ENC_REQ - Outbound Encrypt Requests */
+ u64 ib_dec_req; /* PC_IB_DEC_REQ - Inbound Decrypt Requests */
+ u64 ob_enc_bytes; /* PC_OB_ENCRYPT - Outbound Bytes Encrypted */
+ u64 ob_prot_bytes; /* PC_OB_PROTECT - Outbound Bytes Protected */
+ u64 ib_dec_bytes; /* PC_IB_DECRYPT - Inbound Bytes Decrypted */
+ u64 ib_valid_bytes; /* PC_IB_VALIDATED Inbound Bytes Validated */
+ u64 rsvd[13];
+
+ /* CAAM Hardware Instantiation Parameters fa0-fbf */
+ u32 cha_rev_ms; /* CRNR - CHA Rev No. Most significant half*/
+ u32 cha_rev_ls; /* CRNR - CHA Rev No. Least significant half*/
+#define CTPR_MS_QI_SHIFT 25
+#define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT)
+#define CTPR_MS_VIRT_EN_INCL 0x00000001
+#define CTPR_MS_VIRT_EN_POR 0x00000002
+#define CTPR_MS_PG_SZ_MASK 0x10
+#define CTPR_MS_PG_SZ_SHIFT 4
+ u32 comp_parms_ms; /* CTPR - Compile Parameters Register */
+ u32 comp_parms_ls; /* CTPR - Compile Parameters Register */
+ u64 rsvd1[2];
+
+ /* CAAM Global Status fc0-fdf */
+ u64 faultaddr; /* FAR - Fault Address */
+ u32 faultliodn; /* FALR - Fault Address LIODN */
+ u32 faultdetail; /* FADR - Fault Addr Detail */
+ u32 rsvd2;
+ u32 status; /* CSTA - CAAM Status */
+ u64 rsvd3;
+
+ /* Component Instantiation Parameters fe0-fff */
+ u32 rtic_id; /* RVID - RTIC Version ID */
+ u32 ccb_id; /* CCBVID - CCB Version ID */
+ u32 cha_id_ms; /* CHAVID - CHA Version ID Most Significant*/
+ u32 cha_id_ls; /* CHAVID - CHA Version ID Least Significant*/
+ u32 cha_num_ms; /* CHANUM - CHA Number Most Significant */
+ u32 cha_num_ls; /* CHANUM - CHA Number Least Significant*/
+ u32 caam_id_ms; /* CAAMVID - CAAM Version ID MS */
+ u32 caam_id_ls; /* CAAMVID - CAAM Version ID LS */
+};
+
+/* LIODN programming for DMA configuration */
+#define MSTRID_LOCK_LIODN 0x80000000
+#define MSTRID_LOCK_MAKETRUSTED 0x00010000 /* only for JR masterid */
+
+#define MSTRID_LIODN_MASK 0x0fff
+struct masterid {
+ u32 liodn_ms; /* lock and make-trusted control bits */
+ u32 liodn_ls; /* LIODN for non-sequence and seq access */
+};
+
+/* Partition ID for DMA configuration */
+struct partid {
+ u32 rsvd1;
+ u32 pidr; /* partition ID, DECO */
+};
+
+/* RNGB test mode (replicated twice in some configurations) */
+/* Padded out to 0x100 */
+struct rngtst {
+ u32 mode; /* RTSTMODEx - Test mode */
+ u32 rsvd1[3];
+ u32 reset; /* RTSTRESETx - Test reset control */
+ u32 rsvd2[3];
+ u32 status; /* RTSTSSTATUSx - Test status */
+ u32 rsvd3;
+ u32 errstat; /* RTSTERRSTATx - Test error status */
+ u32 rsvd4;
+ u32 errctl; /* RTSTERRCTLx - Test error control */
+ u32 rsvd5;
+ u32 entropy; /* RTSTENTROPYx - Test entropy */
+ u32 rsvd6[15];
+ u32 verifctl; /* RTSTVERIFCTLx - Test verification control */
+ u32 rsvd7;
+ u32 verifstat; /* RTSTVERIFSTATx - Test verification status */
+ u32 rsvd8;
+ u32 verifdata; /* RTSTVERIFDx - Test verification data */
+ u32 rsvd9;
+ u32 xkey; /* RTSTXKEYx - Test XKEY */
+ u32 rsvd10;
+ u32 oscctctl; /* RTSTOSCCTCTLx - Test osc. counter control */
+ u32 rsvd11;
+ u32 oscct; /* RTSTOSCCTx - Test oscillator counter */
+ u32 rsvd12;
+ u32 oscctstat; /* RTSTODCCTSTATx - Test osc counter status */
+ u32 rsvd13[2];
+ u32 ofifo[4]; /* RTSTOFIFOx - Test output FIFO */
+ u32 rsvd14[15];
+};
+
+/* RNG4 TRNG test registers */
+struct rng4tst {
+#define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */
+#define RTMCTL_SAMP_MODE_VON_NEUMANN_ES_SC 0 /* use von Neumann data in
+ both entropy shifter and
+ statistical checker */
+#define RTMCTL_SAMP_MODE_RAW_ES_SC 1 /* use raw data in both
+ entropy shifter and
+ statistical checker */
+#define RTMCTL_SAMP_MODE_VON_NEUMANN_ES_RAW_SC 2 /* use von Neumann data in
+ entropy shifter, raw data
+ in statistical checker */
+#define RTMCTL_SAMP_MODE_INVALID 3 /* invalid combination */
+ u32 rtmctl; /* misc. control register */
+ u32 rtscmisc; /* statistical check misc. register */
+ u32 rtpkrrng; /* poker range register */
+ union {
+ u32 rtpkrmax; /* PRGM=1: poker max. limit register */
+ u32 rtpkrsq; /* PRGM=0: poker square calc. result register */
+ };
+#define RTSDCTL_ENT_DLY_SHIFT 16
+#define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT)
+#define RTSDCTL_ENT_DLY_MIN 3200
+#define RTSDCTL_ENT_DLY_MAX 12800
+ u32 rtsdctl; /* seed control register */
+ union {
+ u32 rtsblim; /* PRGM=1: sparse bit limit register */
+ u32 rttotsam; /* PRGM=0: total samples register */
+ };
+ u32 rtfrqmin; /* frequency count min. limit register */
+#define RTFRQMAX_DISABLE (1 << 20)
+ union {
+ u32 rtfrqmax; /* PRGM=1: freq. count max. limit register */
+ u32 rtfrqcnt; /* PRGM=0: freq. count register */
+ };
+ u32 rsvd1[40];
+#define RDSTA_SKVT 0x80000000
+#define RDSTA_SKVN 0x40000000
+#define RDSTA_IF0 0x00000001
+#define RDSTA_IF1 0x00000002
+#define RDSTA_IFMASK (RDSTA_IF1 | RDSTA_IF0)
+ u32 rdsta;
+ u32 rsvd2[15];
+};
+
+/*
+ * caam_ctrl - basic core configuration
+ * starts base + 0x0000 padded out to 0x1000
+ */
+
+#define KEK_KEY_SIZE 8
+#define TKEK_KEY_SIZE 8
+#define TDSK_KEY_SIZE 8
+
+#define DECO_RESET 1 /* Use with DECO reset/availability regs */
+#define DECO_RESET_0 (DECO_RESET << 0)
+#define DECO_RESET_1 (DECO_RESET << 1)
+#define DECO_RESET_2 (DECO_RESET << 2)
+#define DECO_RESET_3 (DECO_RESET << 3)
+#define DECO_RESET_4 (DECO_RESET << 4)
+
+struct caam_ctrl {
+ /* Basic Configuration Section 000-01f */
+ /* Read/Writable */
+ u32 rsvd1;
+ u32 mcr; /* MCFG Master Config Register */
+ u32 rsvd2;
+ u32 scfgr; /* SCFGR, Security Config Register */
+
+ /* Bus Access Configuration Section 010-11f */
+ /* Read/Writable */
+ struct masterid jr_mid[4]; /* JRxLIODNR - JobR LIODN setup */
+ u32 rsvd3[11];
+ u32 jrstart; /* JRSTART - Job Ring Start Register */
+ struct masterid rtic_mid[4]; /* RTICxLIODNR - RTIC LIODN setup */
+ u32 rsvd4[5];
+ u32 deco_rsr; /* DECORSR - Deco Request Source */
+ u32 rsvd11;
+ u32 deco_rq; /* DECORR - DECO Request */
+ struct partid deco_mid[5]; /* DECOxLIODNR - 1 per DECO */
+ u32 rsvd5[22];
+
+ /* DECO Availability/Reset Section 120-3ff */
+ u32 deco_avail; /* DAR - DECO availability */
+ u32 deco_reset; /* DRR - DECO reset */
+ u32 rsvd6[182];
+
+ /* Key Encryption/Decryption Configuration 400-5ff */
+ /* Read/Writable only while in Non-secure mode */
+ u32 kek[KEK_KEY_SIZE]; /* JDKEKR - Key Encryption Key */
+ u32 tkek[TKEK_KEY_SIZE]; /* TDKEKR - Trusted Desc KEK */
+ u32 tdsk[TDSK_KEY_SIZE]; /* TDSKR - Trusted Desc Signing Key */
+ u32 rsvd7[32];
+ u64 sknonce; /* SKNR - Secure Key Nonce */
+ u32 rsvd8[70];
+
+ /* RNG Test/Verification/Debug Access 600-7ff */
+ /* (Useful in Test/Debug modes only...) */
+ union {
+ struct rngtst rtst[2];
+ struct rng4tst r4tst[2];
+ };
+
+ u32 rsvd9[448];
+
+ /* Performance Monitor f00-fff */
+ struct caam_perfmon perfmon;
+};
+
+/*
+ * Controller master config register defs
+ */
+#define MCFGR_SWRESET 0x80000000 /* software reset */
+#define MCFGR_WDENABLE 0x40000000 /* DECO watchdog enable */
+#define MCFGR_WDFAIL 0x20000000 /* DECO watchdog force-fail */
+#define MCFGR_DMA_RESET 0x10000000
+#define MCFGR_LONG_PTR 0x00010000 /* Use >32-bit desc addressing */
+#define SCFGR_RDBENABLE 0x00000400
+#define SCFGR_VIRT_EN 0x00008000
+#define DECORR_RQD0ENABLE 0x00000001 /* Enable DECO0 for direct access */
+#define DECORSR_JR0 0x00000001 /* JR to supply TZ, SDID, ICID */
+#define DECORSR_VALID 0x80000000
+#define DECORR_DEN0 0x00010000 /* DECO0 available for access*/
+
+/* AXI read cache control */
+#define MCFGR_ARCACHE_SHIFT 12
+#define MCFGR_ARCACHE_MASK (0xf << MCFGR_ARCACHE_SHIFT)
+
+/* AXI write cache control */
+#define MCFGR_AWCACHE_SHIFT 8
+#define MCFGR_AWCACHE_MASK (0xf << MCFGR_AWCACHE_SHIFT)
+
+/* AXI pipeline depth */
+#define MCFGR_AXIPIPE_SHIFT 4
+#define MCFGR_AXIPIPE_MASK (0xf << MCFGR_AXIPIPE_SHIFT)
+
+#define MCFGR_AXIPRI 0x00000008 /* Assert AXI priority sideband */
+#define MCFGR_BURST_64 0x00000001 /* Max burst size */
+
+/* JRSTART register offsets */
+#define JRSTART_JR0_START 0x00000001 /* Start Job ring 0 */
+#define JRSTART_JR1_START 0x00000002 /* Start Job ring 1 */
+#define JRSTART_JR2_START 0x00000004 /* Start Job ring 2 */
+#define JRSTART_JR3_START 0x00000008 /* Start Job ring 3 */
+
+/*
+ * caam_job_ring - direct job ring setup
+ * 1-4 possible per instantiation, base + 1000/2000/3000/4000
+ * Padded out to 0x1000
+ */
+struct caam_job_ring {
+ /* Input ring */
+ u64 inpring_base; /* IRBAx - Input desc ring baseaddr */
+ u32 rsvd1;
+ u32 inpring_size; /* IRSx - Input ring size */
+ u32 rsvd2;
+ u32 inpring_avail; /* IRSAx - Input ring room remaining */
+ u32 rsvd3;
+ u32 inpring_jobadd; /* IRJAx - Input ring jobs added */
+
+ /* Output Ring */
+ u64 outring_base; /* ORBAx - Output status ring base addr */
+ u32 rsvd4;
+ u32 outring_size; /* ORSx - Output ring size */
+ u32 rsvd5;
+ u32 outring_rmvd; /* ORJRx - Output ring jobs removed */
+ u32 rsvd6;
+ u32 outring_used; /* ORSFx - Output ring slots full */
+
+ /* Status/Configuration */
+ u32 rsvd7;
+ u32 jroutstatus; /* JRSTAx - JobR output status */
+ u32 rsvd8;
+ u32 jrintstatus; /* JRINTx - JobR interrupt status */
+ u32 rconfig_hi; /* JRxCFG - Ring configuration */
+ u32 rconfig_lo;
+
+ /* Indices. CAAM maintains as "heads" of each queue */
+ u32 rsvd9;
+ u32 inp_rdidx; /* IRRIx - Input ring read index */
+ u32 rsvd10;
+ u32 out_wtidx; /* ORWIx - Output ring write index */
+
+ /* Command/control */
+ u32 rsvd11;
+ u32 jrcommand; /* JRCRx - JobR command */
+
+ u32 rsvd12[932];
+
+ /* Performance Monitor f00-fff */
+ struct caam_perfmon perfmon;
+};
+
+#define JR_RINGSIZE_MASK 0x03ff
+/*
+ * jrstatus - Job Ring Output Status
+ * All values in lo word
+ * Also note, same values written out as status through QI
+ * in the command/status field of a frame descriptor
+ */
+#define JRSTA_SSRC_SHIFT 28
+#define JRSTA_SSRC_MASK 0xf0000000
+
+#define JRSTA_SSRC_NONE 0x00000000
+#define JRSTA_SSRC_CCB_ERROR 0x20000000
+#define JRSTA_SSRC_JUMP_HALT_USER 0x30000000
+#define JRSTA_SSRC_DECO 0x40000000
+#define JRSTA_SSRC_JRERROR 0x60000000
+#define JRSTA_SSRC_JUMP_HALT_CC 0x70000000
+
+#define JRSTA_DECOERR_JUMP 0x08000000
+#define JRSTA_DECOERR_INDEX_SHIFT 8
+#define JRSTA_DECOERR_INDEX_MASK 0xff00
+#define JRSTA_DECOERR_ERROR_MASK 0x00ff
+
+#define JRSTA_DECOERR_NONE 0x00
+#define JRSTA_DECOERR_LINKLEN 0x01
+#define JRSTA_DECOERR_LINKPTR 0x02
+#define JRSTA_DECOERR_JRCTRL 0x03
+#define JRSTA_DECOERR_DESCCMD 0x04
+#define JRSTA_DECOERR_ORDER 0x05
+#define JRSTA_DECOERR_KEYCMD 0x06
+#define JRSTA_DECOERR_LOADCMD 0x07
+#define JRSTA_DECOERR_STORECMD 0x08
+#define JRSTA_DECOERR_OPCMD 0x09
+#define JRSTA_DECOERR_FIFOLDCMD 0x0a
+#define JRSTA_DECOERR_FIFOSTCMD 0x0b
+#define JRSTA_DECOERR_MOVECMD 0x0c
+#define JRSTA_DECOERR_JUMPCMD 0x0d
+#define JRSTA_DECOERR_MATHCMD 0x0e
+#define JRSTA_DECOERR_SHASHCMD 0x0f
+#define JRSTA_DECOERR_SEQCMD 0x10
+#define JRSTA_DECOERR_DECOINTERNAL 0x11
+#define JRSTA_DECOERR_SHDESCHDR 0x12
+#define JRSTA_DECOERR_HDRLEN 0x13
+#define JRSTA_DECOERR_BURSTER 0x14
+#define JRSTA_DECOERR_DESCSIGNATURE 0x15
+#define JRSTA_DECOERR_DMA 0x16
+#define JRSTA_DECOERR_BURSTFIFO 0x17
+#define JRSTA_DECOERR_JRRESET 0x1a
+#define JRSTA_DECOERR_JOBFAIL 0x1b
+#define JRSTA_DECOERR_DNRERR 0x80
+#define JRSTA_DECOERR_UNDEFPCL 0x81
+#define JRSTA_DECOERR_PDBERR 0x82
+#define JRSTA_DECOERR_ANRPLY_LATE 0x83
+#define JRSTA_DECOERR_ANRPLY_REPLAY 0x84
+#define JRSTA_DECOERR_SEQOVF 0x85
+#define JRSTA_DECOERR_INVSIGN 0x86
+#define JRSTA_DECOERR_DSASIGN 0x87
+
+#define JRSTA_CCBERR_JUMP 0x08000000
+#define JRSTA_CCBERR_INDEX_MASK 0xff00
+#define JRSTA_CCBERR_INDEX_SHIFT 8
+#define JRSTA_CCBERR_CHAID_MASK 0x00f0
+#define JRSTA_CCBERR_CHAID_SHIFT 4
+#define JRSTA_CCBERR_ERRID_MASK 0x000f
+
+#define JRSTA_CCBERR_CHAID_AES (0x01 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_DES (0x02 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_ARC4 (0x03 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_MD (0x04 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_RNG (0x05 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_SNOW (0x06 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_KASUMI (0x07 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_PK (0x08 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_CRC (0x09 << JRSTA_CCBERR_CHAID_SHIFT)
+
+#define JRSTA_CCBERR_ERRID_NONE 0x00
+#define JRSTA_CCBERR_ERRID_MODE 0x01
+#define JRSTA_CCBERR_ERRID_DATASIZ 0x02
+#define JRSTA_CCBERR_ERRID_KEYSIZ 0x03
+#define JRSTA_CCBERR_ERRID_PKAMEMSZ 0x04
+#define JRSTA_CCBERR_ERRID_PKBMEMSZ 0x05
+#define JRSTA_CCBERR_ERRID_SEQUENCE 0x06
+#define JRSTA_CCBERR_ERRID_PKDIVZRO 0x07
+#define JRSTA_CCBERR_ERRID_PKMODEVN 0x08
+#define JRSTA_CCBERR_ERRID_KEYPARIT 0x09
+#define JRSTA_CCBERR_ERRID_ICVCHK 0x0a
+#define JRSTA_CCBERR_ERRID_HARDWARE 0x0b
+#define JRSTA_CCBERR_ERRID_CCMAAD 0x0c
+#define JRSTA_CCBERR_ERRID_INVCHA 0x0f
+
+#define JRINT_ERR_INDEX_MASK 0x3fff0000
+#define JRINT_ERR_INDEX_SHIFT 16
+#define JRINT_ERR_TYPE_MASK 0xf00
+#define JRINT_ERR_TYPE_SHIFT 8
+#define JRINT_ERR_HALT_MASK 0xc
+#define JRINT_ERR_HALT_SHIFT 2
+#define JRINT_ERR_HALT_INPROGRESS 0x4
+#define JRINT_ERR_HALT_COMPLETE 0x8
+#define JRINT_JR_ERROR 0x02
+#define JRINT_JR_INT 0x01
+
+#define JRINT_ERR_TYPE_WRITE 1
+#define JRINT_ERR_TYPE_BAD_INPADDR 3
+#define JRINT_ERR_TYPE_BAD_OUTADDR 4
+#define JRINT_ERR_TYPE_INV_INPWRT 5
+#define JRINT_ERR_TYPE_INV_OUTWRT 6
+#define JRINT_ERR_TYPE_RESET 7
+#define JRINT_ERR_TYPE_REMOVE_OFL 8
+#define JRINT_ERR_TYPE_ADD_OFL 9
+
+#define JRCFG_SOE 0x04
+#define JRCFG_ICEN 0x02
+#define JRCFG_IMSK 0x01
+#define JRCFG_ICDCT_SHIFT 8
+#define JRCFG_ICTT_SHIFT 16
+
+#define JRCR_RESET 0x01
+
+/*
+ * caam_assurance - Assurance Controller View
+ * base + 0x6000 padded out to 0x1000
+ */
+
+struct rtic_element {
+ u64 address;
+ u32 rsvd;
+ u32 length;
+};
+
+struct rtic_block {
+ struct rtic_element element[2];
+};
+
+struct rtic_memhash {
+ u32 memhash_be[32];
+ u32 memhash_le[32];
+};
+
+struct caam_assurance {
+ /* Status/Command/Watchdog */
+ u32 rsvd1;
+ u32 status; /* RSTA - Status */
+ u32 rsvd2;
+ u32 cmd; /* RCMD - Command */
+ u32 rsvd3;
+ u32 ctrl; /* RCTL - Control */
+ u32 rsvd4;
+ u32 throttle; /* RTHR - Throttle */
+ u32 rsvd5[2];
+ u64 watchdog; /* RWDOG - Watchdog Timer */
+ u32 rsvd6;
+ u32 rend; /* REND - Endian corrections */
+ u32 rsvd7[50];
+
+ /* Block access/configuration @ 100/110/120/130 */
+ struct rtic_block memblk[4]; /* Memory Blocks A-D */
+ u32 rsvd8[32];
+
+ /* Block hashes @ 200/300/400/500 */
+ struct rtic_memhash hash[4]; /* Block hash values A-D */
+ u32 rsvd_3[640];
+};
+
+/*
+ * caam_queue_if - QI configuration and control
+ * starts base + 0x7000, padded out to 0x1000 long
+ */
+
+struct caam_queue_if {
+ u32 qi_control_hi; /* QICTL - QI Control */
+ u32 qi_control_lo;
+ u32 rsvd1;
+ u32 qi_status; /* QISTA - QI Status */
+ u32 qi_deq_cfg_hi; /* QIDQC - QI Dequeue Configuration */
+ u32 qi_deq_cfg_lo;
+ u32 qi_enq_cfg_hi; /* QISEQC - QI Enqueue Command */
+ u32 qi_enq_cfg_lo;
+ u32 rsvd2[1016];
+};
+
+/* QI control bits - low word */
+#define QICTL_DQEN 0x01 /* Enable frame pop */
+#define QICTL_STOP 0x02 /* Stop dequeue/enqueue */
+#define QICTL_SOE 0x04 /* Stop on error */
+
+/* QI control bits - high word */
+#define QICTL_MBSI 0x01
+#define QICTL_MHWSI 0x02
+#define QICTL_MWSI 0x04
+#define QICTL_MDWSI 0x08
+#define QICTL_CBSI 0x10 /* CtrlDataByteSwapInput */
+#define QICTL_CHWSI 0x20 /* CtrlDataHalfSwapInput */
+#define QICTL_CWSI 0x40 /* CtrlDataWordSwapInput */
+#define QICTL_CDWSI 0x80 /* CtrlDataDWordSwapInput */
+#define QICTL_MBSO 0x0100
+#define QICTL_MHWSO 0x0200
+#define QICTL_MWSO 0x0400
+#define QICTL_MDWSO 0x0800
+#define QICTL_CBSO 0x1000 /* CtrlDataByteSwapOutput */
+#define QICTL_CHWSO 0x2000 /* CtrlDataHalfSwapOutput */
+#define QICTL_CWSO 0x4000 /* CtrlDataWordSwapOutput */
+#define QICTL_CDWSO 0x8000 /* CtrlDataDWordSwapOutput */
+#define QICTL_DMBS 0x010000
+#define QICTL_EPO 0x020000
+
+/* QI status bits */
+#define QISTA_PHRDERR 0x01 /* PreHeader Read Error */
+#define QISTA_CFRDERR 0x02 /* Compound Frame Read Error */
+#define QISTA_OFWRERR 0x04 /* Output Frame Read Error */
+#define QISTA_BPDERR 0x08 /* Buffer Pool Depleted */
+#define QISTA_BTSERR 0x10 /* Buffer Undersize */
+#define QISTA_CFWRERR 0x20 /* Compound Frame Write Err */
+#define QISTA_STOPD 0x80000000 /* QI Stopped (see QICTL) */
+
+/* deco_sg_table - DECO view of scatter/gather table */
+struct deco_sg_table {
+ u64 addr; /* Segment Address */
+ u32 elen; /* E, F bits + 30-bit length */
+ u32 bpid_offset; /* Buffer Pool ID + 16-bit length */
+};
+
+/*
+ * caam_deco - descriptor controller - CHA cluster block
+ *
+ * Only accessible when direct DECO access is turned on
+ * (done in DECORR, via MID programmed in DECOxMID
+ *
+ * 5 typical, base + 0x8000/9000/a000/b000
+ * Padded out to 0x1000 long
+ */
+struct caam_deco {
+ u32 rsvd1;
+ u32 cls1_mode; /* CxC1MR - Class 1 Mode */
+ u32 rsvd2;
+ u32 cls1_keysize; /* CxC1KSR - Class 1 Key Size */
+ u32 cls1_datasize_hi; /* CxC1DSR - Class 1 Data Size */
+ u32 cls1_datasize_lo;
+ u32 rsvd3;
+ u32 cls1_icvsize; /* CxC1ICVSR - Class 1 ICV size */
+ u32 rsvd4[5];
+ u32 cha_ctrl; /* CCTLR - CHA control */
+ u32 rsvd5;
+ u32 irq_crtl; /* CxCIRQ - CCB interrupt done/error/clear */
+ u32 rsvd6;
+ u32 clr_written; /* CxCWR - Clear-Written */
+ u32 ccb_status_hi; /* CxCSTA - CCB Status/Error */
+ u32 ccb_status_lo;
+ u32 rsvd7[3];
+ u32 aad_size; /* CxAADSZR - Current AAD Size */
+ u32 rsvd8;
+ u32 cls1_iv_size; /* CxC1IVSZR - Current Class 1 IV Size */
+ u32 rsvd9[7];
+ u32 pkha_a_size; /* PKASZRx - Size of PKHA A */
+ u32 rsvd10;
+ u32 pkha_b_size; /* PKBSZRx - Size of PKHA B */
+ u32 rsvd11;
+ u32 pkha_n_size; /* PKNSZRx - Size of PKHA N */
+ u32 rsvd12;
+ u32 pkha_e_size; /* PKESZRx - Size of PKHA E */
+ u32 rsvd13[24];
+ u32 cls1_ctx[16]; /* CxC1CTXR - Class 1 Context @100 */
+ u32 rsvd14[48];
+ u32 cls1_key[8]; /* CxC1KEYR - Class 1 Key @200 */
+ u32 rsvd15[121];
+ u32 cls2_mode; /* CxC2MR - Class 2 Mode */
+ u32 rsvd16;
+ u32 cls2_keysize; /* CxX2KSR - Class 2 Key Size */
+ u32 cls2_datasize_hi; /* CxC2DSR - Class 2 Data Size */
+ u32 cls2_datasize_lo;
+ u32 rsvd17;
+ u32 cls2_icvsize; /* CxC2ICVSZR - Class 2 ICV Size */
+ u32 rsvd18[56];
+ u32 cls2_ctx[18]; /* CxC2CTXR - Class 2 Context @500 */
+ u32 rsvd19[46];
+ u32 cls2_key[32]; /* CxC2KEYR - Class2 Key @600 */
+ u32 rsvd20[84];
+ u32 inp_infofifo_hi; /* CxIFIFO - Input Info FIFO @7d0 */
+ u32 inp_infofifo_lo;
+ u32 rsvd21[2];
+ u64 inp_datafifo; /* CxDFIFO - Input Data FIFO */
+ u32 rsvd22[2];
+ u64 out_datafifo; /* CxOFIFO - Output Data FIFO */
+ u32 rsvd23[2];
+ u32 jr_ctl_hi; /* CxJRR - JobR Control Register @800 */
+ u32 jr_ctl_lo;
+ u64 jr_descaddr; /* CxDADR - JobR Descriptor Address */
+#define DECO_OP_STATUS_HI_ERR_MASK 0xF00000FF
+ u32 op_status_hi; /* DxOPSTA - DECO Operation Status */
+ u32 op_status_lo;
+ u32 rsvd24[2];
+ u32 liodn; /* DxLSR - DECO LIODN Status - non-seq */
+ u32 td_liodn; /* DxLSR - DECO LIODN Status - trustdesc */
+ u32 rsvd26[6];
+ u64 math[4]; /* DxMTH - Math register */
+ u32 rsvd27[8];
+ struct deco_sg_table gthr_tbl[4]; /* DxGTR - Gather Tables */
+ u32 rsvd28[16];
+ struct deco_sg_table sctr_tbl[4]; /* DxSTR - Scatter Tables */
+ u32 rsvd29[48];
+ u32 descbuf[64]; /* DxDESB - Descriptor buffer */
+ u32 rscvd30[193];
+#define DESC_DBG_DECO_STAT_HOST_ERR 0x00D00000
+#define DESC_DBG_DECO_STAT_VALID 0x80000000
+#define DESC_DBG_DECO_STAT_MASK 0x00F00000
+ u32 desc_dbg; /* DxDDR - DECO Debug Register */
+ u32 rsvd31[126];
+};
+
+#define DECO_JQCR_WHL 0x20000000
+#define DECO_JQCR_FOUR 0x10000000
+
+#define JR_BLOCK_NUMBER 1
+#define ASSURE_BLOCK_NUMBER 6
+#define QI_BLOCK_NUMBER 7
+#define DECO_BLOCK_NUMBER 8
+#define PG_SIZE_4K 0x1000
+#define PG_SIZE_64K 0x10000
+#endif /* REGS_H */
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
new file mode 100644
index 000000000..3b918218a
--- /dev/null
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -0,0 +1,118 @@
+/*
+ * CAAM/SEC 4.x functions for using scatterlists in caam driver
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ */
+
+struct sec4_sg_entry;
+
+/*
+ * convert single dma address to h/w link table format
+ */
+static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
+ dma_addr_t dma, u32 len, u32 offset)
+{
+ sec4_sg_ptr->ptr = dma;
+ sec4_sg_ptr->len = len;
+ sec4_sg_ptr->reserved = 0;
+ sec4_sg_ptr->buf_pool_id = 0;
+ sec4_sg_ptr->offset = offset;
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr,
+ sizeof(struct sec4_sg_entry), 1);
+#endif
+}
+
+/*
+ * convert scatterlist to h/w link table format
+ * but does not have final bit; instead, returns last entry
+ */
+static inline struct sec4_sg_entry *
+sg_to_sec4_sg(struct scatterlist *sg, int sg_count,
+ struct sec4_sg_entry *sec4_sg_ptr, u32 offset)
+{
+ while (sg_count) {
+ dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg),
+ sg_dma_len(sg), offset);
+ sec4_sg_ptr++;
+ sg = sg_next(sg);
+ sg_count--;
+ }
+ return sec4_sg_ptr - 1;
+}
+
+/*
+ * convert scatterlist to h/w link table format
+ * scatterlist must have been previously dma mapped
+ */
+static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count,
+ struct sec4_sg_entry *sec4_sg_ptr,
+ u32 offset)
+{
+ sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset);
+ sec4_sg_ptr->len |= SEC4_SG_LEN_FIN;
+}
+
+/* count number of elements in scatterlist */
+static inline int __sg_count(struct scatterlist *sg_list, int nbytes,
+ bool *chained)
+{
+ struct scatterlist *sg = sg_list;
+ int sg_nents = 0;
+
+ while (nbytes > 0) {
+ sg_nents++;
+ nbytes -= sg->length;
+ if (!sg_is_last(sg) && (sg + 1)->length == 0)
+ *chained = true;
+ sg = sg_next(sg);
+ }
+
+ return sg_nents;
+}
+
+/* derive number of elements in scatterlist, but return 0 for 1 */
+static inline int sg_count(struct scatterlist *sg_list, int nbytes,
+ bool *chained)
+{
+ int sg_nents = __sg_count(sg_list, nbytes, chained);
+
+ if (likely(sg_nents == 1))
+ return 0;
+
+ return sg_nents;
+}
+
+static int dma_map_sg_chained(struct device *dev, struct scatterlist *sg,
+ unsigned int nents, enum dma_data_direction dir,
+ bool chained)
+{
+ if (unlikely(chained)) {
+ int i;
+ for (i = 0; i < nents; i++) {
+ dma_map_sg(dev, sg, 1, dir);
+ sg = sg_next(sg);
+ }
+ } else {
+ dma_map_sg(dev, sg, nents, dir);
+ }
+ return nents;
+}
+
+static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg,
+ unsigned int nents, enum dma_data_direction dir,
+ bool chained)
+{
+ if (unlikely(chained)) {
+ int i;
+ for (i = 0; i < nents; i++) {
+ dma_unmap_sg(dev, sg, 1, dir);
+ sg = sg_next(sg);
+ }
+ } else {
+ dma_unmap_sg(dev, sg, nents, dir);
+ }
+ return nents;
+}