diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-08-05 17:04:01 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-08-05 17:04:01 -0300 |
commit | 57f0f512b273f60d52568b8c6b77e17f5636edc0 (patch) | |
tree | 5e910f0e82173f4ef4f51111366a3f1299037a7b /drivers/crypto |
Initial import
Diffstat (limited to 'drivers/crypto')
164 files changed, 78826 insertions, 0 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig new file mode 100644 index 000000000..033c0c86f --- /dev/null +++ b/drivers/crypto/Kconfig @@ -0,0 +1,462 @@ + +menuconfig CRYPTO_HW + bool "Hardware crypto devices" + default y + ---help--- + Say Y here to get to see options for hardware crypto devices and + processors. This option alone does not add any kernel code. + + If you say N, all options in this submenu will be skipped and disabled. + +if CRYPTO_HW + +config CRYPTO_DEV_PADLOCK + tristate "Support for VIA PadLock ACE" + depends on X86 && !UML + help + Some VIA processors come with an integrated crypto engine + (so called VIA PadLock ACE, Advanced Cryptography Engine) + that provides instructions for very fast cryptographic + operations with supported algorithms. + + The instructions are used only when the CPU supports them. + Otherwise software encryption is used. + +config CRYPTO_DEV_PADLOCK_AES + tristate "PadLock driver for AES algorithm" + depends on CRYPTO_DEV_PADLOCK + select CRYPTO_BLKCIPHER + select CRYPTO_AES + help + Use VIA PadLock for AES algorithm. + + Available in VIA C3 and newer CPUs. + + If unsure say M. The compiled module will be + called padlock-aes. + +config CRYPTO_DEV_PADLOCK_SHA + tristate "PadLock driver for SHA1 and SHA256 algorithms" + depends on CRYPTO_DEV_PADLOCK + select CRYPTO_HASH + select CRYPTO_SHA1 + select CRYPTO_SHA256 + help + Use VIA PadLock for SHA1/SHA256 algorithms. + + Available in VIA C7 and newer processors. + + If unsure say M. The compiled module will be + called padlock-sha. + +config CRYPTO_DEV_GEODE + tristate "Support for the Geode LX AES engine" + depends on X86_32 && PCI + select CRYPTO_ALGAPI + select CRYPTO_BLKCIPHER + help + Say 'Y' here to use the AMD Geode LX processor on-board AES + engine for the CryptoAPI AES algorithm. + + To compile this driver as a module, choose M here: the module + will be called geode-aes. + +config ZCRYPT + tristate "Support for PCI-attached cryptographic adapters" + depends on S390 + select HW_RANDOM + help + Select this option if you want to use a PCI-attached cryptographic + adapter like: + + PCI Cryptographic Accelerator (PCICA) + + PCI Cryptographic Coprocessor (PCICC) + + PCI-X Cryptographic Coprocessor (PCIXCC) + + Crypto Express2 Coprocessor (CEX2C) + + Crypto Express2 Accelerator (CEX2A) + + Crypto Express3 Coprocessor (CEX3C) + + Crypto Express3 Accelerator (CEX3A) + +config CRYPTO_SHA1_S390 + tristate "SHA1 digest algorithm" + depends on S390 + select CRYPTO_HASH + help + This is the s390 hardware accelerated implementation of the + SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). + + It is available as of z990. + +config CRYPTO_SHA256_S390 + tristate "SHA256 digest algorithm" + depends on S390 + select CRYPTO_HASH + help + This is the s390 hardware accelerated implementation of the + SHA256 secure hash standard (DFIPS 180-2). + + It is available as of z9. + +config CRYPTO_SHA512_S390 + tristate "SHA384 and SHA512 digest algorithm" + depends on S390 + select CRYPTO_HASH + help + This is the s390 hardware accelerated implementation of the + SHA512 secure hash standard. + + It is available as of z10. + +config CRYPTO_DES_S390 + tristate "DES and Triple DES cipher algorithms" + depends on S390 + select CRYPTO_ALGAPI + select CRYPTO_BLKCIPHER + select CRYPTO_DES + help + This is the s390 hardware accelerated implementation of the + DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3). + + As of z990 the ECB and CBC mode are hardware accelerated. + As of z196 the CTR mode is hardware accelerated. + +config CRYPTO_AES_S390 + tristate "AES cipher algorithms" + depends on S390 + select CRYPTO_ALGAPI + select CRYPTO_BLKCIPHER + help + This is the s390 hardware accelerated implementation of the + AES cipher algorithms (FIPS-197). + + As of z9 the ECB and CBC modes are hardware accelerated + for 128 bit keys. + As of z10 the ECB and CBC modes are hardware accelerated + for all AES key sizes. + As of z196 the CTR mode is hardware accelerated for all AES + key sizes and XTS mode is hardware accelerated for 256 and + 512 bit keys. + +config S390_PRNG + tristate "Pseudo random number generator device driver" + depends on S390 + default "m" + help + Select this option if you want to use the s390 pseudo random number + generator. The PRNG is part of the cryptographic processor functions + and uses triple-DES to generate secure random numbers like the + ANSI X9.17 standard. User-space programs access the + pseudo-random-number device through the char device /dev/prandom. + + It is available as of z9. + +config CRYPTO_GHASH_S390 + tristate "GHASH digest algorithm" + depends on S390 + select CRYPTO_HASH + help + This is the s390 hardware accelerated implementation of the + GHASH message digest algorithm for GCM (Galois/Counter Mode). + + It is available as of z196. + +config CRYPTO_DEV_MV_CESA + tristate "Marvell's Cryptographic Engine" + depends on PLAT_ORION + select CRYPTO_ALGAPI + select CRYPTO_AES + select CRYPTO_BLKCIPHER2 + select CRYPTO_HASH + help + This driver allows you to utilize the Cryptographic Engines and + Security Accelerator (CESA) which can be found on the Marvell Orion + and Kirkwood SoCs, such as QNAP's TS-209. + + Currently the driver supports AES in ECB and CBC mode without DMA. + +config CRYPTO_DEV_NIAGARA2 + tristate "Niagara2 Stream Processing Unit driver" + select CRYPTO_DES + select CRYPTO_ALGAPI + depends on SPARC64 + help + Each core of a Niagara2 processor contains a Stream + Processing Unit, which itself contains several cryptographic + sub-units. One set provides the Modular Arithmetic Unit, + used for SSL offload. The other set provides the Cipher + Group, which can perform encryption, decryption, hashing, + checksumming, and raw copies. + +config CRYPTO_DEV_HIFN_795X + tristate "Driver HIFN 795x crypto accelerator chips" + select CRYPTO_DES + select CRYPTO_ALGAPI + select CRYPTO_BLKCIPHER + select HW_RANDOM if CRYPTO_DEV_HIFN_795X_RNG + depends on PCI + depends on !ARCH_DMA_ADDR_T_64BIT + help + This option allows you to have support for HIFN 795x crypto adapters. + +config CRYPTO_DEV_HIFN_795X_RNG + bool "HIFN 795x random number generator" + depends on CRYPTO_DEV_HIFN_795X + help + Select this option if you want to enable the random number generator + on the HIFN 795x crypto adapters. + +source drivers/crypto/caam/Kconfig + +config CRYPTO_DEV_TALITOS + tristate "Talitos Freescale Security Engine (SEC)" + select CRYPTO_ALGAPI + select CRYPTO_AUTHENC + select HW_RANDOM + depends on FSL_SOC + help + Say 'Y' here to use the Freescale Security Engine (SEC) + to offload cryptographic algorithm computation. + + The Freescale SEC is present on PowerQUICC 'E' processors, such + as the MPC8349E and MPC8548E. + + To compile this driver as a module, choose M here: the module + will be called talitos. + +config CRYPTO_DEV_IXP4XX + tristate "Driver for IXP4xx crypto hardware acceleration" + depends on ARCH_IXP4XX && IXP4XX_QMGR && IXP4XX_NPE + select CRYPTO_DES + select CRYPTO_ALGAPI + select CRYPTO_AUTHENC + select CRYPTO_BLKCIPHER + help + Driver for the IXP4xx NPE crypto engine. + +config CRYPTO_DEV_PPC4XX + tristate "Driver AMCC PPC4xx crypto accelerator" + depends on PPC && 4xx + select CRYPTO_HASH + select CRYPTO_ALGAPI + select CRYPTO_BLKCIPHER + help + This option allows you to have support for AMCC crypto acceleration. + +config CRYPTO_DEV_OMAP_SHAM + tristate "Support for OMAP MD5/SHA1/SHA2 hw accelerator" + depends on ARCH_OMAP2PLUS + select CRYPTO_SHA1 + select CRYPTO_MD5 + select CRYPTO_SHA256 + select CRYPTO_SHA512 + select CRYPTO_HMAC + help + OMAP processors have MD5/SHA1/SHA2 hw accelerator. Select this if you + want to use the OMAP module for MD5/SHA1/SHA2 algorithms. + +config CRYPTO_DEV_OMAP_AES + tristate "Support for OMAP AES hw engine" + depends on ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP2PLUS + select CRYPTO_AES + select CRYPTO_BLKCIPHER2 + help + OMAP processors have AES module accelerator. Select this if you + want to use the OMAP module for AES algorithms. + +config CRYPTO_DEV_OMAP_DES + tristate "Support for OMAP DES3DES hw engine" + depends on ARCH_OMAP2PLUS + select CRYPTO_DES + select CRYPTO_BLKCIPHER2 + help + OMAP processors have DES/3DES module accelerator. Select this if you + want to use the OMAP module for DES and 3DES algorithms. Currently + the ECB and CBC modes of operation supported by the driver. Also + accesses made on unaligned boundaries are also supported. + +config CRYPTO_DEV_PICOXCELL + tristate "Support for picoXcell IPSEC and Layer2 crypto engines" + depends on ARCH_PICOXCELL && HAVE_CLK + select CRYPTO_AES + select CRYPTO_AUTHENC + select CRYPTO_ALGAPI + select CRYPTO_DES + select CRYPTO_CBC + select CRYPTO_ECB + select CRYPTO_SEQIV + help + This option enables support for the hardware offload engines in the + Picochip picoXcell SoC devices. Select this for IPSEC ESP offload + and for 3gpp Layer 2 ciphering support. + + Saying m here will build a module named pipcoxcell_crypto. + +config CRYPTO_DEV_SAHARA + tristate "Support for SAHARA crypto accelerator" + depends on ARCH_MXC && OF + select CRYPTO_BLKCIPHER + select CRYPTO_AES + select CRYPTO_ECB + help + This option enables support for the SAHARA HW crypto accelerator + found in some Freescale i.MX chips. + +config CRYPTO_DEV_S5P + tristate "Support for Samsung S5PV210/Exynos crypto accelerator" + depends on ARCH_S5PV210 || ARCH_EXYNOS + select CRYPTO_AES + select CRYPTO_ALGAPI + select CRYPTO_BLKCIPHER + help + This option allows you to have support for S5P crypto acceleration. + Select this to offload Samsung S5PV210 or S5PC110, Exynos from AES + algorithms execution. + +config CRYPTO_DEV_NX + bool "Support for IBM Power7+ in-Nest cryptographic acceleration" + depends on PPC64 && IBMVIO && !CPU_LITTLE_ENDIAN + default n + help + Support for Power7+ in-Nest cryptographic acceleration. + +if CRYPTO_DEV_NX + source "drivers/crypto/nx/Kconfig" +endif + +config CRYPTO_DEV_UX500 + tristate "Driver for ST-Ericsson UX500 crypto hardware acceleration" + depends on ARCH_U8500 + select CRYPTO_ALGAPI + help + Driver for ST-Ericsson UX500 crypto engine. + +if CRYPTO_DEV_UX500 + source "drivers/crypto/ux500/Kconfig" +endif # if CRYPTO_DEV_UX500 + +config CRYPTO_DEV_BFIN_CRC + tristate "Support for Blackfin CRC hardware" + depends on BF60x + help + Newer Blackfin processors have CRC hardware. Select this if you + want to use the Blackfin CRC module. + +config CRYPTO_DEV_ATMEL_AES + tristate "Support for Atmel AES hw accelerator" + depends on ARCH_AT91 + select CRYPTO_CBC + select CRYPTO_ECB + select CRYPTO_AES + select CRYPTO_ALGAPI + select CRYPTO_BLKCIPHER + select AT_HDMAC + help + Some Atmel processors have AES hw accelerator. + Select this if you want to use the Atmel module for + AES algorithms. + + To compile this driver as a module, choose M here: the module + will be called atmel-aes. + +config CRYPTO_DEV_ATMEL_TDES + tristate "Support for Atmel DES/TDES hw accelerator" + depends on ARCH_AT91 + select CRYPTO_DES + select CRYPTO_CBC + select CRYPTO_ECB + select CRYPTO_ALGAPI + select CRYPTO_BLKCIPHER + help + Some Atmel processors have DES/TDES hw accelerator. + Select this if you want to use the Atmel module for + DES/TDES algorithms. + + To compile this driver as a module, choose M here: the module + will be called atmel-tdes. + +config CRYPTO_DEV_ATMEL_SHA + tristate "Support for Atmel SHA hw accelerator" + depends on ARCH_AT91 + select CRYPTO_SHA1 + select CRYPTO_SHA256 + select CRYPTO_SHA512 + select CRYPTO_ALGAPI + help + Some Atmel processors have SHA1/SHA224/SHA256/SHA384/SHA512 + hw accelerator. + Select this if you want to use the Atmel module for + SHA1/SHA224/SHA256/SHA384/SHA512 algorithms. + + To compile this driver as a module, choose M here: the module + will be called atmel-sha. + +config CRYPTO_DEV_CCP + bool "Support for AMD Cryptographic Coprocessor" + depends on ((X86 && PCI) || (ARM64 && (OF_ADDRESS || ACPI))) && HAS_IOMEM + default n + help + The AMD Cryptographic Coprocessor provides hardware support + for encryption, hashing and related operations. + +if CRYPTO_DEV_CCP + source "drivers/crypto/ccp/Kconfig" +endif + +config CRYPTO_DEV_MXS_DCP + tristate "Support for Freescale MXS DCP" + depends on ARCH_MXS + select CRYPTO_SHA1 + select CRYPTO_SHA256 + select CRYPTO_CBC + select CRYPTO_ECB + select CRYPTO_AES + select CRYPTO_BLKCIPHER + select CRYPTO_ALGAPI + help + The Freescale i.MX23/i.MX28 has SHA1/SHA256 and AES128 CBC/ECB + co-processor on the die. + + To compile this driver as a module, choose M here: the module + will be called mxs-dcp. + +source "drivers/crypto/qat/Kconfig" + +config CRYPTO_DEV_QCE + tristate "Qualcomm crypto engine accelerator" + depends on (ARCH_QCOM || COMPILE_TEST) && HAS_DMA && HAS_IOMEM + select CRYPTO_AES + select CRYPTO_DES + select CRYPTO_ECB + select CRYPTO_CBC + select CRYPTO_XTS + select CRYPTO_CTR + select CRYPTO_ALGAPI + select CRYPTO_BLKCIPHER + help + This driver supports Qualcomm crypto engine accelerator + hardware. To compile this driver as a module, choose M here. The + module will be called qcrypto. + +config CRYPTO_DEV_VMX + bool "Support for VMX cryptographic acceleration instructions" + depends on PPC64 + default n + help + Support for VMX cryptographic acceleration instructions. + +source "drivers/crypto/vmx/Kconfig" + +config CRYPTO_DEV_IMGTEC_HASH + tristate "Imagination Technologies hardware hash accelerator" + depends on MIPS || COMPILE_TEST + depends on HAS_DMA + select CRYPTO_ALGAPI + select CRYPTO_MD5 + select CRYPTO_SHA1 + select CRYPTO_SHA256 + select CRYPTO_HASH + help + This driver interfaces with the Imagination Technologies + hardware hash accelerator. Supporting MD5/SHA1/SHA224/SHA256 + hashing algorithms. + +endif # CRYPTO_HW diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile new file mode 100644 index 000000000..fb84be7e6 --- /dev/null +++ b/drivers/crypto/Makefile @@ -0,0 +1,29 @@ +obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o +obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o +obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o +obj-$(CONFIG_CRYPTO_DEV_BFIN_CRC) += bfin_crc.o +obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/ +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/ +obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o +obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o +obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o +obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o +obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o +obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o +obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o +n2_crypto-y := n2_core.o n2_asm.o +obj-$(CONFIG_CRYPTO_DEV_NX) += nx/ +obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o +obj-$(CONFIG_CRYPTO_DEV_OMAP_DES) += omap-des.o +obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o +obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o +obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o +obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o +obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ +obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o +obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o +obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o +obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/ +obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/ +obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/ +obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/ diff --git a/drivers/crypto/amcc/Makefile b/drivers/crypto/amcc/Makefile new file mode 100644 index 000000000..5c0c62b65 --- /dev/null +++ b/drivers/crypto/amcc/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += crypto4xx.o +crypto4xx-y := crypto4xx_core.o crypto4xx_alg.o crypto4xx_sa.o diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c new file mode 100644 index 000000000..4afca3968 --- /dev/null +++ b/drivers/crypto/amcc/crypto4xx_alg.c @@ -0,0 +1,295 @@ +/** + * AMCC SoC PPC4xx Crypto Driver + * + * Copyright (c) 2008 Applied Micro Circuits Corporation. + * All rights reserved. James Hsiao <jhsiao@amcc.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * This file implements the Linux crypto algorithms. + */ + +#include <linux/kernel.h> +#include <linux/interrupt.h> +#include <linux/spinlock_types.h> +#include <linux/scatterlist.h> +#include <linux/crypto.h> +#include <linux/hash.h> +#include <crypto/internal/hash.h> +#include <linux/dma-mapping.h> +#include <crypto/algapi.h> +#include <crypto/aes.h> +#include <crypto/sha.h> +#include "crypto4xx_reg_def.h" +#include "crypto4xx_sa.h" +#include "crypto4xx_core.h" + +static void set_dynamic_sa_command_0(struct dynamic_sa_ctl *sa, u32 save_h, + u32 save_iv, u32 ld_h, u32 ld_iv, + u32 hdr_proc, u32 h, u32 c, u32 pad_type, + u32 op_grp, u32 op, u32 dir) +{ + sa->sa_command_0.w = 0; + sa->sa_command_0.bf.save_hash_state = save_h; + sa->sa_command_0.bf.save_iv = save_iv; + sa->sa_command_0.bf.load_hash_state = ld_h; + sa->sa_command_0.bf.load_iv = ld_iv; + sa->sa_command_0.bf.hdr_proc = hdr_proc; + sa->sa_command_0.bf.hash_alg = h; + sa->sa_command_0.bf.cipher_alg = c; + sa->sa_command_0.bf.pad_type = pad_type & 3; + sa->sa_command_0.bf.extend_pad = pad_type >> 2; + sa->sa_command_0.bf.op_group = op_grp; + sa->sa_command_0.bf.opcode = op; + sa->sa_command_0.bf.dir = dir; +} + +static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm, + u32 hmac_mc, u32 cfb, u32 esn, + u32 sn_mask, u32 mute, u32 cp_pad, + u32 cp_pay, u32 cp_hdr) +{ + sa->sa_command_1.w = 0; + sa->sa_command_1.bf.crypto_mode31 = (cm & 4) >> 2; + sa->sa_command_1.bf.crypto_mode9_8 = cm & 3; + sa->sa_command_1.bf.feedback_mode = cfb, + sa->sa_command_1.bf.sa_rev = 1; + sa->sa_command_1.bf.extended_seq_num = esn; + sa->sa_command_1.bf.seq_num_mask = sn_mask; + sa->sa_command_1.bf.mutable_bit_proc = mute; + sa->sa_command_1.bf.copy_pad = cp_pad; + sa->sa_command_1.bf.copy_payload = cp_pay; + sa->sa_command_1.bf.copy_hdr = cp_hdr; +} + +int crypto4xx_encrypt(struct ablkcipher_request *req) +{ + struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + + ctx->direction = DIR_OUTBOUND; + ctx->hash_final = 0; + ctx->is_hash = 0; + ctx->pd_ctl = 0x1; + + return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst, + req->nbytes, req->info, + get_dynamic_sa_iv_size(ctx)); +} + +int crypto4xx_decrypt(struct ablkcipher_request *req) +{ + struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + + ctx->direction = DIR_INBOUND; + ctx->hash_final = 0; + ctx->is_hash = 0; + ctx->pd_ctl = 1; + + return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst, + req->nbytes, req->info, + get_dynamic_sa_iv_size(ctx)); +} + +/** + * AES Functions + */ +static int crypto4xx_setkey_aes(struct crypto_ablkcipher *cipher, + const u8 *key, + unsigned int keylen, + unsigned char cm, + u8 fb) +{ + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); + struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm); + struct dynamic_sa_ctl *sa; + int rc; + + if (keylen != AES_KEYSIZE_256 && + keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_128) { + crypto_ablkcipher_set_flags(cipher, + CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + /* Create SA */ + if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr) + crypto4xx_free_sa(ctx); + + rc = crypto4xx_alloc_sa(ctx, SA_AES128_LEN + (keylen-16) / 4); + if (rc) + return rc; + + if (ctx->state_record_dma_addr == 0) { + rc = crypto4xx_alloc_state_record(ctx); + if (rc) { + crypto4xx_free_sa(ctx); + return rc; + } + } + /* Setup SA */ + sa = (struct dynamic_sa_ctl *) ctx->sa_in; + ctx->hash_final = 0; + + set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV, + SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE, + SA_NO_HEADER_PROC, SA_HASH_ALG_NULL, + SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO, + SA_OP_GROUP_BASIC, SA_OPCODE_DECRYPT, + DIR_INBOUND); + + set_dynamic_sa_command_1(sa, cm, SA_HASH_MODE_HASH, + fb, SA_EXTENDED_SN_OFF, + SA_SEQ_MASK_OFF, SA_MC_ENABLE, + SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD, + SA_NOT_COPY_HDR); + crypto4xx_memcpy_le(ctx->sa_in + get_dynamic_sa_offset_key_field(ctx), + key, keylen); + sa->sa_contents = SA_AES_CONTENTS | (keylen << 2); + sa->sa_command_1.bf.key_len = keylen >> 3; + ctx->is_hash = 0; + ctx->direction = DIR_INBOUND; + memcpy(ctx->sa_in + get_dynamic_sa_offset_state_ptr_field(ctx), + (void *)&ctx->state_record_dma_addr, 4); + ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx); + + memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4); + sa = (struct dynamic_sa_ctl *) ctx->sa_out; + sa->sa_command_0.bf.dir = DIR_OUTBOUND; + + return 0; +} + +int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher, + const u8 *key, unsigned int keylen) +{ + return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_CBC, + CRYPTO_FEEDBACK_MODE_NO_FB); +} + +/** + * HASH SHA1 Functions + */ +static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm, + unsigned int sa_len, + unsigned char ha, + unsigned char hm) +{ + struct crypto_alg *alg = tfm->__crt_alg; + struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg); + struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm); + struct dynamic_sa_ctl *sa; + struct dynamic_sa_hash160 *sa_in; + int rc; + + ctx->dev = my_alg->dev; + ctx->is_hash = 1; + ctx->hash_final = 0; + + /* Create SA */ + if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr) + crypto4xx_free_sa(ctx); + + rc = crypto4xx_alloc_sa(ctx, sa_len); + if (rc) + return rc; + + if (ctx->state_record_dma_addr == 0) { + crypto4xx_alloc_state_record(ctx); + if (!ctx->state_record_dma_addr) { + crypto4xx_free_sa(ctx); + return -ENOMEM; + } + } + + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct crypto4xx_ctx)); + sa = (struct dynamic_sa_ctl *) ctx->sa_in; + set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV, + SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA, + SA_NO_HEADER_PROC, ha, SA_CIPHER_ALG_NULL, + SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC, + SA_OPCODE_HASH, DIR_INBOUND); + set_dynamic_sa_command_1(sa, 0, SA_HASH_MODE_HASH, + CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF, + SA_SEQ_MASK_OFF, SA_MC_ENABLE, + SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD, + SA_NOT_COPY_HDR); + ctx->direction = DIR_INBOUND; + sa->sa_contents = SA_HASH160_CONTENTS; + sa_in = (struct dynamic_sa_hash160 *) ctx->sa_in; + /* Need to zero hash digest in SA */ + memset(sa_in->inner_digest, 0, sizeof(sa_in->inner_digest)); + memset(sa_in->outer_digest, 0, sizeof(sa_in->outer_digest)); + sa_in->state_ptr = ctx->state_record_dma_addr; + ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx); + + return 0; +} + +int crypto4xx_hash_init(struct ahash_request *req) +{ + struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + int ds; + struct dynamic_sa_ctl *sa; + + sa = (struct dynamic_sa_ctl *) ctx->sa_in; + ds = crypto_ahash_digestsize( + __crypto_ahash_cast(req->base.tfm)); + sa->sa_command_0.bf.digest_len = ds >> 2; + sa->sa_command_0.bf.load_hash_state = SA_LOAD_HASH_FROM_SA; + ctx->is_hash = 1; + ctx->direction = DIR_INBOUND; + + return 0; +} + +int crypto4xx_hash_update(struct ahash_request *req) +{ + struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + + ctx->is_hash = 1; + ctx->hash_final = 0; + ctx->pd_ctl = 0x11; + ctx->direction = DIR_INBOUND; + + return crypto4xx_build_pd(&req->base, ctx, req->src, + (struct scatterlist *) req->result, + req->nbytes, NULL, 0); +} + +int crypto4xx_hash_final(struct ahash_request *req) +{ + return 0; +} + +int crypto4xx_hash_digest(struct ahash_request *req) +{ + struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + + ctx->hash_final = 1; + ctx->pd_ctl = 0x11; + ctx->direction = DIR_INBOUND; + + return crypto4xx_build_pd(&req->base, ctx, req->src, + (struct scatterlist *) req->result, + req->nbytes, NULL, 0); +} + +/** + * SHA1 Algorithm + */ +int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm) +{ + return crypto4xx_hash_alg_init(tfm, SA_HASH160_LEN, SA_HASH_ALG_SHA1, + SA_HASH_MODE_HASH); +} + + diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c new file mode 100644 index 000000000..3b28e8c3d --- /dev/null +++ b/drivers/crypto/amcc/crypto4xx_core.c @@ -0,0 +1,1302 @@ +/** + * AMCC SoC PPC4xx Crypto Driver + * + * Copyright (c) 2008 Applied Micro Circuits Corporation. + * All rights reserved. James Hsiao <jhsiao@amcc.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * This file implements AMCC crypto offload Linux device driver for use with + * Linux CryptoAPI. + */ + +#include <linux/kernel.h> +#include <linux/interrupt.h> +#include <linux/spinlock_types.h> +#include <linux/random.h> +#include <linux/scatterlist.h> +#include <linux/crypto.h> +#include <linux/dma-mapping.h> +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/slab.h> +#include <asm/dcr.h> +#include <asm/dcr-regs.h> +#include <asm/cacheflush.h> +#include <crypto/aes.h> +#include <crypto/sha.h> +#include "crypto4xx_reg_def.h" +#include "crypto4xx_core.h" +#include "crypto4xx_sa.h" + +#define PPC4XX_SEC_VERSION_STR "0.5" + +/** + * PPC4xx Crypto Engine Initialization Routine + */ +static void crypto4xx_hw_init(struct crypto4xx_device *dev) +{ + union ce_ring_size ring_size; + union ce_ring_contol ring_ctrl; + union ce_part_ring_size part_ring_size; + union ce_io_threshold io_threshold; + u32 rand_num; + union ce_pe_dma_cfg pe_dma_cfg; + u32 device_ctrl; + + writel(PPC4XX_BYTE_ORDER, dev->ce_base + CRYPTO4XX_BYTE_ORDER_CFG); + /* setup pe dma, include reset sg, pdr and pe, then release reset */ + pe_dma_cfg.w = 0; + pe_dma_cfg.bf.bo_sgpd_en = 1; + pe_dma_cfg.bf.bo_data_en = 0; + pe_dma_cfg.bf.bo_sa_en = 1; + pe_dma_cfg.bf.bo_pd_en = 1; + pe_dma_cfg.bf.dynamic_sa_en = 1; + pe_dma_cfg.bf.reset_sg = 1; + pe_dma_cfg.bf.reset_pdr = 1; + pe_dma_cfg.bf.reset_pe = 1; + writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG); + /* un reset pe,sg and pdr */ + pe_dma_cfg.bf.pe_mode = 0; + pe_dma_cfg.bf.reset_sg = 0; + pe_dma_cfg.bf.reset_pdr = 0; + pe_dma_cfg.bf.reset_pe = 0; + pe_dma_cfg.bf.bo_td_en = 0; + writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG); + writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_PDR_BASE); + writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_RDR_BASE); + writel(PPC4XX_PRNG_CTRL_AUTO_EN, dev->ce_base + CRYPTO4XX_PRNG_CTRL); + get_random_bytes(&rand_num, sizeof(rand_num)); + writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_L); + get_random_bytes(&rand_num, sizeof(rand_num)); + writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_H); + ring_size.w = 0; + ring_size.bf.ring_offset = PPC4XX_PD_SIZE; + ring_size.bf.ring_size = PPC4XX_NUM_PD; + writel(ring_size.w, dev->ce_base + CRYPTO4XX_RING_SIZE); + ring_ctrl.w = 0; + writel(ring_ctrl.w, dev->ce_base + CRYPTO4XX_RING_CTRL); + device_ctrl = readl(dev->ce_base + CRYPTO4XX_DEVICE_CTRL); + device_ctrl |= PPC4XX_DC_3DES_EN; + writel(device_ctrl, dev->ce_base + CRYPTO4XX_DEVICE_CTRL); + writel(dev->gdr_pa, dev->ce_base + CRYPTO4XX_GATH_RING_BASE); + writel(dev->sdr_pa, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE); + part_ring_size.w = 0; + part_ring_size.bf.sdr_size = PPC4XX_SDR_SIZE; + part_ring_size.bf.gdr_size = PPC4XX_GDR_SIZE; + writel(part_ring_size.w, dev->ce_base + CRYPTO4XX_PART_RING_SIZE); + writel(PPC4XX_SD_BUFFER_SIZE, dev->ce_base + CRYPTO4XX_PART_RING_CFG); + io_threshold.w = 0; + io_threshold.bf.output_threshold = PPC4XX_OUTPUT_THRESHOLD; + io_threshold.bf.input_threshold = PPC4XX_INPUT_THRESHOLD; + writel(io_threshold.w, dev->ce_base + CRYPTO4XX_IO_THRESHOLD); + writel(0, dev->ce_base + CRYPTO4XX_PDR_BASE_UADDR); + writel(0, dev->ce_base + CRYPTO4XX_RDR_BASE_UADDR); + writel(0, dev->ce_base + CRYPTO4XX_PKT_SRC_UADDR); + writel(0, dev->ce_base + CRYPTO4XX_PKT_DEST_UADDR); + writel(0, dev->ce_base + CRYPTO4XX_SA_UADDR); + writel(0, dev->ce_base + CRYPTO4XX_GATH_RING_BASE_UADDR); + writel(0, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE_UADDR); + /* un reset pe,sg and pdr */ + pe_dma_cfg.bf.pe_mode = 1; + pe_dma_cfg.bf.reset_sg = 0; + pe_dma_cfg.bf.reset_pdr = 0; + pe_dma_cfg.bf.reset_pe = 0; + pe_dma_cfg.bf.bo_td_en = 0; + writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG); + /*clear all pending interrupt*/ + writel(PPC4XX_INTERRUPT_CLR, dev->ce_base + CRYPTO4XX_INT_CLR); + writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT); + writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT); + writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG); + writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN); +} + +int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size) +{ + ctx->sa_in = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4, + &ctx->sa_in_dma_addr, GFP_ATOMIC); + if (ctx->sa_in == NULL) + return -ENOMEM; + + ctx->sa_out = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4, + &ctx->sa_out_dma_addr, GFP_ATOMIC); + if (ctx->sa_out == NULL) { + dma_free_coherent(ctx->dev->core_dev->device, + ctx->sa_len * 4, + ctx->sa_in, ctx->sa_in_dma_addr); + return -ENOMEM; + } + + memset(ctx->sa_in, 0, size * 4); + memset(ctx->sa_out, 0, size * 4); + ctx->sa_len = size; + + return 0; +} + +void crypto4xx_free_sa(struct crypto4xx_ctx *ctx) +{ + if (ctx->sa_in != NULL) + dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4, + ctx->sa_in, ctx->sa_in_dma_addr); + if (ctx->sa_out != NULL) + dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4, + ctx->sa_out, ctx->sa_out_dma_addr); + + ctx->sa_in_dma_addr = 0; + ctx->sa_out_dma_addr = 0; + ctx->sa_len = 0; +} + +u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx) +{ + ctx->state_record = dma_alloc_coherent(ctx->dev->core_dev->device, + sizeof(struct sa_state_record), + &ctx->state_record_dma_addr, GFP_ATOMIC); + if (!ctx->state_record_dma_addr) + return -ENOMEM; + memset(ctx->state_record, 0, sizeof(struct sa_state_record)); + + return 0; +} + +void crypto4xx_free_state_record(struct crypto4xx_ctx *ctx) +{ + if (ctx->state_record != NULL) + dma_free_coherent(ctx->dev->core_dev->device, + sizeof(struct sa_state_record), + ctx->state_record, + ctx->state_record_dma_addr); + ctx->state_record_dma_addr = 0; +} + +/** + * alloc memory for the gather ring + * no need to alloc buf for the ring + * gdr_tail, gdr_head and gdr_count are initialized by this function + */ +static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev) +{ + int i; + struct pd_uinfo *pd_uinfo; + dev->pdr = dma_alloc_coherent(dev->core_dev->device, + sizeof(struct ce_pd) * PPC4XX_NUM_PD, + &dev->pdr_pa, GFP_ATOMIC); + if (!dev->pdr) + return -ENOMEM; + + dev->pdr_uinfo = kzalloc(sizeof(struct pd_uinfo) * PPC4XX_NUM_PD, + GFP_KERNEL); + if (!dev->pdr_uinfo) { + dma_free_coherent(dev->core_dev->device, + sizeof(struct ce_pd) * PPC4XX_NUM_PD, + dev->pdr, + dev->pdr_pa); + return -ENOMEM; + } + memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD); + dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device, + 256 * PPC4XX_NUM_PD, + &dev->shadow_sa_pool_pa, + GFP_ATOMIC); + if (!dev->shadow_sa_pool) + return -ENOMEM; + + dev->shadow_sr_pool = dma_alloc_coherent(dev->core_dev->device, + sizeof(struct sa_state_record) * PPC4XX_NUM_PD, + &dev->shadow_sr_pool_pa, GFP_ATOMIC); + if (!dev->shadow_sr_pool) + return -ENOMEM; + for (i = 0; i < PPC4XX_NUM_PD; i++) { + pd_uinfo = (struct pd_uinfo *) (dev->pdr_uinfo + + sizeof(struct pd_uinfo) * i); + + /* alloc 256 bytes which is enough for any kind of dynamic sa */ + pd_uinfo->sa_va = dev->shadow_sa_pool + 256 * i; + pd_uinfo->sa_pa = dev->shadow_sa_pool_pa + 256 * i; + + /* alloc state record */ + pd_uinfo->sr_va = dev->shadow_sr_pool + + sizeof(struct sa_state_record) * i; + pd_uinfo->sr_pa = dev->shadow_sr_pool_pa + + sizeof(struct sa_state_record) * i; + } + + return 0; +} + +static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev) +{ + if (dev->pdr != NULL) + dma_free_coherent(dev->core_dev->device, + sizeof(struct ce_pd) * PPC4XX_NUM_PD, + dev->pdr, dev->pdr_pa); + if (dev->shadow_sa_pool) + dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD, + dev->shadow_sa_pool, dev->shadow_sa_pool_pa); + if (dev->shadow_sr_pool) + dma_free_coherent(dev->core_dev->device, + sizeof(struct sa_state_record) * PPC4XX_NUM_PD, + dev->shadow_sr_pool, dev->shadow_sr_pool_pa); + + kfree(dev->pdr_uinfo); +} + +static u32 crypto4xx_get_pd_from_pdr_nolock(struct crypto4xx_device *dev) +{ + u32 retval; + u32 tmp; + + retval = dev->pdr_head; + tmp = (dev->pdr_head + 1) % PPC4XX_NUM_PD; + + if (tmp == dev->pdr_tail) + return ERING_WAS_FULL; + + dev->pdr_head = tmp; + + return retval; +} + +static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx) +{ + struct pd_uinfo *pd_uinfo; + unsigned long flags; + + pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo + + sizeof(struct pd_uinfo) * idx); + spin_lock_irqsave(&dev->core_dev->lock, flags); + if (dev->pdr_tail != PPC4XX_LAST_PD) + dev->pdr_tail++; + else + dev->pdr_tail = 0; + pd_uinfo->state = PD_ENTRY_FREE; + spin_unlock_irqrestore(&dev->core_dev->lock, flags); + + return 0; +} + +static struct ce_pd *crypto4xx_get_pdp(struct crypto4xx_device *dev, + dma_addr_t *pd_dma, u32 idx) +{ + *pd_dma = dev->pdr_pa + sizeof(struct ce_pd) * idx; + + return dev->pdr + sizeof(struct ce_pd) * idx; +} + +/** + * alloc memory for the gather ring + * no need to alloc buf for the ring + * gdr_tail, gdr_head and gdr_count are initialized by this function + */ +static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev) +{ + dev->gdr = dma_alloc_coherent(dev->core_dev->device, + sizeof(struct ce_gd) * PPC4XX_NUM_GD, + &dev->gdr_pa, GFP_ATOMIC); + if (!dev->gdr) + return -ENOMEM; + + memset(dev->gdr, 0, sizeof(struct ce_gd) * PPC4XX_NUM_GD); + + return 0; +} + +static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev) +{ + dma_free_coherent(dev->core_dev->device, + sizeof(struct ce_gd) * PPC4XX_NUM_GD, + dev->gdr, dev->gdr_pa); +} + +/* + * when this function is called. + * preemption or interrupt must be disabled + */ +u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n) +{ + u32 retval; + u32 tmp; + if (n >= PPC4XX_NUM_GD) + return ERING_WAS_FULL; + + retval = dev->gdr_head; + tmp = (dev->gdr_head + n) % PPC4XX_NUM_GD; + if (dev->gdr_head > dev->gdr_tail) { + if (tmp < dev->gdr_head && tmp >= dev->gdr_tail) + return ERING_WAS_FULL; + } else if (dev->gdr_head < dev->gdr_tail) { + if (tmp < dev->gdr_head || tmp >= dev->gdr_tail) + return ERING_WAS_FULL; + } + dev->gdr_head = tmp; + + return retval; +} + +static u32 crypto4xx_put_gd_to_gdr(struct crypto4xx_device *dev) +{ + unsigned long flags; + + spin_lock_irqsave(&dev->core_dev->lock, flags); + if (dev->gdr_tail == dev->gdr_head) { + spin_unlock_irqrestore(&dev->core_dev->lock, flags); + return 0; + } + + if (dev->gdr_tail != PPC4XX_LAST_GD) + dev->gdr_tail++; + else + dev->gdr_tail = 0; + + spin_unlock_irqrestore(&dev->core_dev->lock, flags); + + return 0; +} + +static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev, + dma_addr_t *gd_dma, u32 idx) +{ + *gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx; + + return (struct ce_gd *) (dev->gdr + sizeof(struct ce_gd) * idx); +} + +/** + * alloc memory for the scatter ring + * need to alloc buf for the ring + * sdr_tail, sdr_head and sdr_count are initialized by this function + */ +static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev) +{ + int i; + struct ce_sd *sd_array; + + /* alloc memory for scatter descriptor ring */ + dev->sdr = dma_alloc_coherent(dev->core_dev->device, + sizeof(struct ce_sd) * PPC4XX_NUM_SD, + &dev->sdr_pa, GFP_ATOMIC); + if (!dev->sdr) + return -ENOMEM; + + dev->scatter_buffer_size = PPC4XX_SD_BUFFER_SIZE; + dev->scatter_buffer_va = + dma_alloc_coherent(dev->core_dev->device, + dev->scatter_buffer_size * PPC4XX_NUM_SD, + &dev->scatter_buffer_pa, GFP_ATOMIC); + if (!dev->scatter_buffer_va) { + dma_free_coherent(dev->core_dev->device, + sizeof(struct ce_sd) * PPC4XX_NUM_SD, + dev->sdr, dev->sdr_pa); + return -ENOMEM; + } + + sd_array = dev->sdr; + + for (i = 0; i < PPC4XX_NUM_SD; i++) { + sd_array[i].ptr = dev->scatter_buffer_pa + + dev->scatter_buffer_size * i; + } + + return 0; +} + +static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev) +{ + if (dev->sdr != NULL) + dma_free_coherent(dev->core_dev->device, + sizeof(struct ce_sd) * PPC4XX_NUM_SD, + dev->sdr, dev->sdr_pa); + + if (dev->scatter_buffer_va != NULL) + dma_free_coherent(dev->core_dev->device, + dev->scatter_buffer_size * PPC4XX_NUM_SD, + dev->scatter_buffer_va, + dev->scatter_buffer_pa); +} + +/* + * when this function is called. + * preemption or interrupt must be disabled + */ +static u32 crypto4xx_get_n_sd(struct crypto4xx_device *dev, int n) +{ + u32 retval; + u32 tmp; + + if (n >= PPC4XX_NUM_SD) + return ERING_WAS_FULL; + + retval = dev->sdr_head; + tmp = (dev->sdr_head + n) % PPC4XX_NUM_SD; + if (dev->sdr_head > dev->gdr_tail) { + if (tmp < dev->sdr_head && tmp >= dev->sdr_tail) + return ERING_WAS_FULL; + } else if (dev->sdr_head < dev->sdr_tail) { + if (tmp < dev->sdr_head || tmp >= dev->sdr_tail) + return ERING_WAS_FULL; + } /* the head = tail, or empty case is already take cared */ + dev->sdr_head = tmp; + + return retval; +} + +static u32 crypto4xx_put_sd_to_sdr(struct crypto4xx_device *dev) +{ + unsigned long flags; + + spin_lock_irqsave(&dev->core_dev->lock, flags); + if (dev->sdr_tail == dev->sdr_head) { + spin_unlock_irqrestore(&dev->core_dev->lock, flags); + return 0; + } + if (dev->sdr_tail != PPC4XX_LAST_SD) + dev->sdr_tail++; + else + dev->sdr_tail = 0; + spin_unlock_irqrestore(&dev->core_dev->lock, flags); + + return 0; +} + +static inline struct ce_sd *crypto4xx_get_sdp(struct crypto4xx_device *dev, + dma_addr_t *sd_dma, u32 idx) +{ + *sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx; + + return (struct ce_sd *)(dev->sdr + sizeof(struct ce_sd) * idx); +} + +static u32 crypto4xx_fill_one_page(struct crypto4xx_device *dev, + dma_addr_t *addr, u32 *length, + u32 *idx, u32 *offset, u32 *nbytes) +{ + u32 len; + + if (*length > dev->scatter_buffer_size) { + memcpy(phys_to_virt(*addr), + dev->scatter_buffer_va + + *idx * dev->scatter_buffer_size + *offset, + dev->scatter_buffer_size); + *offset = 0; + *length -= dev->scatter_buffer_size; + *nbytes -= dev->scatter_buffer_size; + if (*idx == PPC4XX_LAST_SD) + *idx = 0; + else + (*idx)++; + *addr = *addr + dev->scatter_buffer_size; + return 1; + } else if (*length < dev->scatter_buffer_size) { + memcpy(phys_to_virt(*addr), + dev->scatter_buffer_va + + *idx * dev->scatter_buffer_size + *offset, *length); + if ((*offset + *length) == dev->scatter_buffer_size) { + if (*idx == PPC4XX_LAST_SD) + *idx = 0; + else + (*idx)++; + *nbytes -= *length; + *offset = 0; + } else { + *nbytes -= *length; + *offset += *length; + } + + return 0; + } else { + len = (*nbytes <= dev->scatter_buffer_size) ? + (*nbytes) : dev->scatter_buffer_size; + memcpy(phys_to_virt(*addr), + dev->scatter_buffer_va + + *idx * dev->scatter_buffer_size + *offset, + len); + *offset = 0; + *nbytes -= len; + + if (*idx == PPC4XX_LAST_SD) + *idx = 0; + else + (*idx)++; + + return 0; + } +} + +static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev, + struct ce_pd *pd, + struct pd_uinfo *pd_uinfo, + u32 nbytes, + struct scatterlist *dst) +{ + dma_addr_t addr; + u32 this_sd; + u32 offset; + u32 len; + u32 i; + u32 sg_len; + struct scatterlist *sg; + + this_sd = pd_uinfo->first_sd; + offset = 0; + i = 0; + + while (nbytes) { + sg = &dst[i]; + sg_len = sg->length; + addr = dma_map_page(dev->core_dev->device, sg_page(sg), + sg->offset, sg->length, DMA_TO_DEVICE); + + if (offset == 0) { + len = (nbytes <= sg->length) ? nbytes : sg->length; + while (crypto4xx_fill_one_page(dev, &addr, &len, + &this_sd, &offset, &nbytes)) + ; + if (!nbytes) + return; + i++; + } else { + len = (nbytes <= (dev->scatter_buffer_size - offset)) ? + nbytes : (dev->scatter_buffer_size - offset); + len = (sg->length < len) ? sg->length : len; + while (crypto4xx_fill_one_page(dev, &addr, &len, + &this_sd, &offset, &nbytes)) + ; + if (!nbytes) + return; + sg_len -= len; + if (sg_len) { + addr += len; + while (crypto4xx_fill_one_page(dev, &addr, + &sg_len, &this_sd, &offset, &nbytes)) + ; + } + i++; + } + } +} + +static u32 crypto4xx_copy_digest_to_dst(struct pd_uinfo *pd_uinfo, + struct crypto4xx_ctx *ctx) +{ + struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in; + struct sa_state_record *state_record = + (struct sa_state_record *) pd_uinfo->sr_va; + + if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) { + memcpy((void *) pd_uinfo->dest_va, state_record->save_digest, + SA_HASH_ALG_SHA1_DIGEST_SIZE); + } + + return 0; +} + +static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev, + struct pd_uinfo *pd_uinfo) +{ + int i; + if (pd_uinfo->num_gd) { + for (i = 0; i < pd_uinfo->num_gd; i++) + crypto4xx_put_gd_to_gdr(dev); + pd_uinfo->first_gd = 0xffffffff; + pd_uinfo->num_gd = 0; + } + if (pd_uinfo->num_sd) { + for (i = 0; i < pd_uinfo->num_sd; i++) + crypto4xx_put_sd_to_sdr(dev); + + pd_uinfo->first_sd = 0xffffffff; + pd_uinfo->num_sd = 0; + } +} + +static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev, + struct pd_uinfo *pd_uinfo, + struct ce_pd *pd) +{ + struct crypto4xx_ctx *ctx; + struct ablkcipher_request *ablk_req; + struct scatterlist *dst; + dma_addr_t addr; + + ablk_req = ablkcipher_request_cast(pd_uinfo->async_req); + ctx = crypto_tfm_ctx(ablk_req->base.tfm); + + if (pd_uinfo->using_sd) { + crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo, ablk_req->nbytes, + ablk_req->dst); + } else { + dst = pd_uinfo->dest_va; + addr = dma_map_page(dev->core_dev->device, sg_page(dst), + dst->offset, dst->length, DMA_FROM_DEVICE); + } + crypto4xx_ret_sg_desc(dev, pd_uinfo); + if (ablk_req->base.complete != NULL) + ablk_req->base.complete(&ablk_req->base, 0); + + return 0; +} + +static u32 crypto4xx_ahash_done(struct crypto4xx_device *dev, + struct pd_uinfo *pd_uinfo) +{ + struct crypto4xx_ctx *ctx; + struct ahash_request *ahash_req; + + ahash_req = ahash_request_cast(pd_uinfo->async_req); + ctx = crypto_tfm_ctx(ahash_req->base.tfm); + + crypto4xx_copy_digest_to_dst(pd_uinfo, + crypto_tfm_ctx(ahash_req->base.tfm)); + crypto4xx_ret_sg_desc(dev, pd_uinfo); + /* call user provided callback function x */ + if (ahash_req->base.complete != NULL) + ahash_req->base.complete(&ahash_req->base, 0); + + return 0; +} + +static u32 crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx) +{ + struct ce_pd *pd; + struct pd_uinfo *pd_uinfo; + + pd = dev->pdr + sizeof(struct ce_pd)*idx; + pd_uinfo = dev->pdr_uinfo + sizeof(struct pd_uinfo)*idx; + if (crypto_tfm_alg_type(pd_uinfo->async_req->tfm) == + CRYPTO_ALG_TYPE_ABLKCIPHER) + return crypto4xx_ablkcipher_done(dev, pd_uinfo, pd); + else + return crypto4xx_ahash_done(dev, pd_uinfo); +} + +/** + * Note: Only use this function to copy items that is word aligned. + */ +void crypto4xx_memcpy_le(unsigned int *dst, + const unsigned char *buf, + int len) +{ + u8 *tmp; + for (; len >= 4; buf += 4, len -= 4) + *dst++ = cpu_to_le32(*(unsigned int *) buf); + + tmp = (u8 *)dst; + switch (len) { + case 3: + *tmp++ = 0; + *tmp++ = *(buf+2); + *tmp++ = *(buf+1); + *tmp++ = *buf; + break; + case 2: + *tmp++ = 0; + *tmp++ = 0; + *tmp++ = *(buf+1); + *tmp++ = *buf; + break; + case 1: + *tmp++ = 0; + *tmp++ = 0; + *tmp++ = 0; + *tmp++ = *buf; + break; + default: + break; + } +} + +static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev) +{ + crypto4xx_destroy_pdr(core_dev->dev); + crypto4xx_destroy_gdr(core_dev->dev); + crypto4xx_destroy_sdr(core_dev->dev); + iounmap(core_dev->dev->ce_base); + kfree(core_dev->dev); + kfree(core_dev); +} + +void crypto4xx_return_pd(struct crypto4xx_device *dev, + u32 pd_entry, struct ce_pd *pd, + struct pd_uinfo *pd_uinfo) +{ + /* irq should be already disabled */ + dev->pdr_head = pd_entry; + pd->pd_ctl.w = 0; + pd->pd_ctl_len.w = 0; + pd_uinfo->state = PD_ENTRY_FREE; +} + +/* + * derive number of elements in scatterlist + * Shamlessly copy from talitos.c + */ +static int get_sg_count(struct scatterlist *sg_list, int nbytes) +{ + struct scatterlist *sg = sg_list; + int sg_nents = 0; + + while (nbytes) { + sg_nents++; + if (sg->length > nbytes) + break; + nbytes -= sg->length; + sg = sg_next(sg); + } + + return sg_nents; +} + +static u32 get_next_gd(u32 current) +{ + if (current != PPC4XX_LAST_GD) + return current + 1; + else + return 0; +} + +static u32 get_next_sd(u32 current) +{ + if (current != PPC4XX_LAST_SD) + return current + 1; + else + return 0; +} + +u32 crypto4xx_build_pd(struct crypto_async_request *req, + struct crypto4xx_ctx *ctx, + struct scatterlist *src, + struct scatterlist *dst, + unsigned int datalen, + void *iv, u32 iv_len) +{ + struct crypto4xx_device *dev = ctx->dev; + dma_addr_t addr, pd_dma, sd_dma, gd_dma; + struct dynamic_sa_ctl *sa; + struct scatterlist *sg; + struct ce_gd *gd; + struct ce_pd *pd; + u32 num_gd, num_sd; + u32 fst_gd = 0xffffffff; + u32 fst_sd = 0xffffffff; + u32 pd_entry; + unsigned long flags; + struct pd_uinfo *pd_uinfo = NULL; + unsigned int nbytes = datalen, idx; + unsigned int ivlen = 0; + u32 gd_idx = 0; + + /* figure how many gd is needed */ + num_gd = get_sg_count(src, datalen); + if (num_gd == 1) + num_gd = 0; + + /* figure how many sd is needed */ + if (sg_is_last(dst) || ctx->is_hash) { + num_sd = 0; + } else { + if (datalen > PPC4XX_SD_BUFFER_SIZE) { + num_sd = datalen / PPC4XX_SD_BUFFER_SIZE; + if (datalen % PPC4XX_SD_BUFFER_SIZE) + num_sd++; + } else { + num_sd = 1; + } + } + + /* + * The follow section of code needs to be protected + * The gather ring and scatter ring needs to be consecutive + * In case of run out of any kind of descriptor, the descriptor + * already got must be return the original place. + */ + spin_lock_irqsave(&dev->core_dev->lock, flags); + if (num_gd) { + fst_gd = crypto4xx_get_n_gd(dev, num_gd); + if (fst_gd == ERING_WAS_FULL) { + spin_unlock_irqrestore(&dev->core_dev->lock, flags); + return -EAGAIN; + } + } + if (num_sd) { + fst_sd = crypto4xx_get_n_sd(dev, num_sd); + if (fst_sd == ERING_WAS_FULL) { + if (num_gd) + dev->gdr_head = fst_gd; + spin_unlock_irqrestore(&dev->core_dev->lock, flags); + return -EAGAIN; + } + } + pd_entry = crypto4xx_get_pd_from_pdr_nolock(dev); + if (pd_entry == ERING_WAS_FULL) { + if (num_gd) + dev->gdr_head = fst_gd; + if (num_sd) + dev->sdr_head = fst_sd; + spin_unlock_irqrestore(&dev->core_dev->lock, flags); + return -EAGAIN; + } + spin_unlock_irqrestore(&dev->core_dev->lock, flags); + + pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo + + sizeof(struct pd_uinfo) * pd_entry); + pd = crypto4xx_get_pdp(dev, &pd_dma, pd_entry); + pd_uinfo->async_req = req; + pd_uinfo->num_gd = num_gd; + pd_uinfo->num_sd = num_sd; + + if (iv_len || ctx->is_hash) { + ivlen = iv_len; + pd->sa = pd_uinfo->sa_pa; + sa = (struct dynamic_sa_ctl *) pd_uinfo->sa_va; + if (ctx->direction == DIR_INBOUND) + memcpy(sa, ctx->sa_in, ctx->sa_len * 4); + else + memcpy(sa, ctx->sa_out, ctx->sa_len * 4); + + memcpy((void *) sa + ctx->offset_to_sr_ptr, + &pd_uinfo->sr_pa, 4); + + if (iv_len) + crypto4xx_memcpy_le(pd_uinfo->sr_va, iv, iv_len); + } else { + if (ctx->direction == DIR_INBOUND) { + pd->sa = ctx->sa_in_dma_addr; + sa = (struct dynamic_sa_ctl *) ctx->sa_in; + } else { + pd->sa = ctx->sa_out_dma_addr; + sa = (struct dynamic_sa_ctl *) ctx->sa_out; + } + } + pd->sa_len = ctx->sa_len; + if (num_gd) { + /* get first gd we are going to use */ + gd_idx = fst_gd; + pd_uinfo->first_gd = fst_gd; + pd_uinfo->num_gd = num_gd; + gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx); + pd->src = gd_dma; + /* enable gather */ + sa->sa_command_0.bf.gather = 1; + idx = 0; + src = &src[0]; + /* walk the sg, and setup gather array */ + while (nbytes) { + sg = &src[idx]; + addr = dma_map_page(dev->core_dev->device, sg_page(sg), + sg->offset, sg->length, DMA_TO_DEVICE); + gd->ptr = addr; + gd->ctl_len.len = sg->length; + gd->ctl_len.done = 0; + gd->ctl_len.ready = 1; + if (sg->length >= nbytes) + break; + nbytes -= sg->length; + gd_idx = get_next_gd(gd_idx); + gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx); + idx++; + } + } else { + pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src), + src->offset, src->length, DMA_TO_DEVICE); + /* + * Disable gather in sa command + */ + sa->sa_command_0.bf.gather = 0; + /* + * Indicate gather array is not used + */ + pd_uinfo->first_gd = 0xffffffff; + pd_uinfo->num_gd = 0; + } + if (ctx->is_hash || sg_is_last(dst)) { + /* + * we know application give us dst a whole piece of memory + * no need to use scatter ring. + * In case of is_hash, the icv is always at end of src data. + */ + pd_uinfo->using_sd = 0; + pd_uinfo->first_sd = 0xffffffff; + pd_uinfo->num_sd = 0; + pd_uinfo->dest_va = dst; + sa->sa_command_0.bf.scatter = 0; + if (ctx->is_hash) + pd->dest = virt_to_phys((void *)dst); + else + pd->dest = (u32)dma_map_page(dev->core_dev->device, + sg_page(dst), dst->offset, + dst->length, DMA_TO_DEVICE); + } else { + struct ce_sd *sd = NULL; + u32 sd_idx = fst_sd; + nbytes = datalen; + sa->sa_command_0.bf.scatter = 1; + pd_uinfo->using_sd = 1; + pd_uinfo->dest_va = dst; + pd_uinfo->first_sd = fst_sd; + pd_uinfo->num_sd = num_sd; + sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx); + pd->dest = sd_dma; + /* setup scatter descriptor */ + sd->ctl.done = 0; + sd->ctl.rdy = 1; + /* sd->ptr should be setup by sd_init routine*/ + idx = 0; + if (nbytes >= PPC4XX_SD_BUFFER_SIZE) + nbytes -= PPC4XX_SD_BUFFER_SIZE; + else + nbytes = 0; + while (nbytes) { + sd_idx = get_next_sd(sd_idx); + sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx); + /* setup scatter descriptor */ + sd->ctl.done = 0; + sd->ctl.rdy = 1; + if (nbytes >= PPC4XX_SD_BUFFER_SIZE) + nbytes -= PPC4XX_SD_BUFFER_SIZE; + else + /* + * SD entry can hold PPC4XX_SD_BUFFER_SIZE, + * which is more than nbytes, so done. + */ + nbytes = 0; + } + } + + sa->sa_command_1.bf.hash_crypto_offset = 0; + pd->pd_ctl.w = ctx->pd_ctl; + pd->pd_ctl_len.w = 0x00400000 | (ctx->bypass << 24) | datalen; + pd_uinfo->state = PD_ENTRY_INUSE; + wmb(); + /* write any value to push engine to read a pd */ + writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD); + return -EINPROGRESS; +} + +/** + * Algorithm Registration Functions + */ +static int crypto4xx_alg_init(struct crypto_tfm *tfm) +{ + struct crypto_alg *alg = tfm->__crt_alg; + struct crypto4xx_alg *amcc_alg = crypto_alg_to_crypto4xx_alg(alg); + struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm); + + ctx->dev = amcc_alg->dev; + ctx->sa_in = NULL; + ctx->sa_out = NULL; + ctx->sa_in_dma_addr = 0; + ctx->sa_out_dma_addr = 0; + ctx->sa_len = 0; + + switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { + default: + tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx); + break; + case CRYPTO_ALG_TYPE_AHASH: + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct crypto4xx_ctx)); + break; + } + + return 0; +} + +static void crypto4xx_alg_exit(struct crypto_tfm *tfm) +{ + struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm); + + crypto4xx_free_sa(ctx); + crypto4xx_free_state_record(ctx); +} + +int crypto4xx_register_alg(struct crypto4xx_device *sec_dev, + struct crypto4xx_alg_common *crypto_alg, + int array_size) +{ + struct crypto4xx_alg *alg; + int i; + int rc = 0; + + for (i = 0; i < array_size; i++) { + alg = kzalloc(sizeof(struct crypto4xx_alg), GFP_KERNEL); + if (!alg) + return -ENOMEM; + + alg->alg = crypto_alg[i]; + alg->dev = sec_dev; + + switch (alg->alg.type) { + case CRYPTO_ALG_TYPE_AHASH: + rc = crypto_register_ahash(&alg->alg.u.hash); + break; + + default: + rc = crypto_register_alg(&alg->alg.u.cipher); + break; + } + + if (rc) { + list_del(&alg->entry); + kfree(alg); + } else { + list_add_tail(&alg->entry, &sec_dev->alg_list); + } + } + + return 0; +} + +static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev) +{ + struct crypto4xx_alg *alg, *tmp; + + list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) { + list_del(&alg->entry); + switch (alg->alg.type) { + case CRYPTO_ALG_TYPE_AHASH: + crypto_unregister_ahash(&alg->alg.u.hash); + break; + + default: + crypto_unregister_alg(&alg->alg.u.cipher); + } + kfree(alg); + } +} + +static void crypto4xx_bh_tasklet_cb(unsigned long data) +{ + struct device *dev = (struct device *)data; + struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev); + struct pd_uinfo *pd_uinfo; + struct ce_pd *pd; + u32 tail; + + while (core_dev->dev->pdr_head != core_dev->dev->pdr_tail) { + tail = core_dev->dev->pdr_tail; + pd_uinfo = core_dev->dev->pdr_uinfo + + sizeof(struct pd_uinfo)*tail; + pd = core_dev->dev->pdr + sizeof(struct ce_pd) * tail; + if ((pd_uinfo->state == PD_ENTRY_INUSE) && + pd->pd_ctl.bf.pe_done && + !pd->pd_ctl.bf.host_ready) { + pd->pd_ctl.bf.pe_done = 0; + crypto4xx_pd_done(core_dev->dev, tail); + crypto4xx_put_pd_to_pdr(core_dev->dev, tail); + pd_uinfo->state = PD_ENTRY_FREE; + } else { + /* if tail not done, break */ + break; + } + } +} + +/** + * Top Half of isr. + */ +static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data) +{ + struct device *dev = (struct device *)data; + struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev); + + if (core_dev->dev->ce_base == 0) + return 0; + + writel(PPC4XX_INTERRUPT_CLR, + core_dev->dev->ce_base + CRYPTO4XX_INT_CLR); + tasklet_schedule(&core_dev->tasklet); + + return IRQ_HANDLED; +} + +/** + * Supported Crypto Algorithms + */ +struct crypto4xx_alg_common crypto4xx_alg[] = { + /* Crypto AES modes */ + { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-ppc4xx", + .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct crypto4xx_ctx), + .cra_type = &crypto_ablkcipher_type, + .cra_init = crypto4xx_alg_init, + .cra_exit = crypto4xx_alg_exit, + .cra_module = THIS_MODULE, + .cra_u = { + .ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_IV_SIZE, + .setkey = crypto4xx_setkey_aes_cbc, + .encrypt = crypto4xx_encrypt, + .decrypt = crypto4xx_decrypt, + } + } + }}, +}; + +/** + * Module Initialization Routine + */ +static int crypto4xx_probe(struct platform_device *ofdev) +{ + int rc; + struct resource res; + struct device *dev = &ofdev->dev; + struct crypto4xx_core_device *core_dev; + + rc = of_address_to_resource(ofdev->dev.of_node, 0, &res); + if (rc) + return -ENODEV; + + if (of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-crypto")) { + mtdcri(SDR0, PPC460EX_SDR0_SRST, + mfdcri(SDR0, PPC460EX_SDR0_SRST) | PPC460EX_CE_RESET); + mtdcri(SDR0, PPC460EX_SDR0_SRST, + mfdcri(SDR0, PPC460EX_SDR0_SRST) & ~PPC460EX_CE_RESET); + } else if (of_find_compatible_node(NULL, NULL, + "amcc,ppc405ex-crypto")) { + mtdcri(SDR0, PPC405EX_SDR0_SRST, + mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET); + mtdcri(SDR0, PPC405EX_SDR0_SRST, + mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET); + } else if (of_find_compatible_node(NULL, NULL, + "amcc,ppc460sx-crypto")) { + mtdcri(SDR0, PPC460SX_SDR0_SRST, + mfdcri(SDR0, PPC460SX_SDR0_SRST) | PPC460SX_CE_RESET); + mtdcri(SDR0, PPC460SX_SDR0_SRST, + mfdcri(SDR0, PPC460SX_SDR0_SRST) & ~PPC460SX_CE_RESET); + } else { + printk(KERN_ERR "Crypto Function Not supported!\n"); + return -EINVAL; + } + + core_dev = kzalloc(sizeof(struct crypto4xx_core_device), GFP_KERNEL); + if (!core_dev) + return -ENOMEM; + + dev_set_drvdata(dev, core_dev); + core_dev->ofdev = ofdev; + core_dev->dev = kzalloc(sizeof(struct crypto4xx_device), GFP_KERNEL); + if (!core_dev->dev) + goto err_alloc_dev; + + core_dev->dev->core_dev = core_dev; + core_dev->device = dev; + spin_lock_init(&core_dev->lock); + INIT_LIST_HEAD(&core_dev->dev->alg_list); + rc = crypto4xx_build_pdr(core_dev->dev); + if (rc) + goto err_build_pdr; + + rc = crypto4xx_build_gdr(core_dev->dev); + if (rc) + goto err_build_gdr; + + rc = crypto4xx_build_sdr(core_dev->dev); + if (rc) + goto err_build_sdr; + + /* Init tasklet for bottom half processing */ + tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb, + (unsigned long) dev); + + /* Register for Crypto isr, Crypto Engine IRQ */ + core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); + rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0, + core_dev->dev->name, dev); + if (rc) + goto err_request_irq; + + core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0); + if (!core_dev->dev->ce_base) { + dev_err(dev, "failed to of_iomap\n"); + rc = -ENOMEM; + goto err_iomap; + } + + /* need to setup pdr, rdr, gdr and sdr before this */ + crypto4xx_hw_init(core_dev->dev); + + /* Register security algorithms with Linux CryptoAPI */ + rc = crypto4xx_register_alg(core_dev->dev, crypto4xx_alg, + ARRAY_SIZE(crypto4xx_alg)); + if (rc) + goto err_start_dev; + + return 0; + +err_start_dev: + iounmap(core_dev->dev->ce_base); +err_iomap: + free_irq(core_dev->irq, dev); +err_request_irq: + irq_dispose_mapping(core_dev->irq); + tasklet_kill(&core_dev->tasklet); + crypto4xx_destroy_sdr(core_dev->dev); +err_build_sdr: + crypto4xx_destroy_gdr(core_dev->dev); +err_build_gdr: + crypto4xx_destroy_pdr(core_dev->dev); +err_build_pdr: + kfree(core_dev->dev); +err_alloc_dev: + kfree(core_dev); + + return rc; +} + +static int crypto4xx_remove(struct platform_device *ofdev) +{ + struct device *dev = &ofdev->dev; + struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev); + + free_irq(core_dev->irq, dev); + irq_dispose_mapping(core_dev->irq); + + tasklet_kill(&core_dev->tasklet); + /* Un-register with Linux CryptoAPI */ + crypto4xx_unregister_alg(core_dev->dev); + /* Free all allocated memory */ + crypto4xx_stop_all(core_dev); + + return 0; +} + +static const struct of_device_id crypto4xx_match[] = { + { .compatible = "amcc,ppc4xx-crypto",}, + { }, +}; + +static struct platform_driver crypto4xx_driver = { + .driver = { + .name = "crypto4xx", + .of_match_table = crypto4xx_match, + }, + .probe = crypto4xx_probe, + .remove = crypto4xx_remove, +}; + +module_platform_driver(crypto4xx_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>"); +MODULE_DESCRIPTION("Driver for AMCC PPC4xx crypto accelerator"); + diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h new file mode 100644 index 000000000..bac0bdeb4 --- /dev/null +++ b/drivers/crypto/amcc/crypto4xx_core.h @@ -0,0 +1,196 @@ +/** + * AMCC SoC PPC4xx Crypto Driver + * + * Copyright (c) 2008 Applied Micro Circuits Corporation. + * All rights reserved. James Hsiao <jhsiao@amcc.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * This is the header file for AMCC Crypto offload Linux device driver for + * use with Linux CryptoAPI. + + */ + +#ifndef __CRYPTO4XX_CORE_H__ +#define __CRYPTO4XX_CORE_H__ + +#include <crypto/internal/hash.h> + +#define PPC460SX_SDR0_SRST 0x201 +#define PPC405EX_SDR0_SRST 0x200 +#define PPC460EX_SDR0_SRST 0x201 +#define PPC460EX_CE_RESET 0x08000000 +#define PPC460SX_CE_RESET 0x20000000 +#define PPC405EX_CE_RESET 0x00000008 + +#define CRYPTO4XX_CRYPTO_PRIORITY 300 +#define PPC4XX_LAST_PD 63 +#define PPC4XX_NUM_PD 64 +#define PPC4XX_LAST_GD 1023 +#define PPC4XX_NUM_GD 1024 +#define PPC4XX_LAST_SD 63 +#define PPC4XX_NUM_SD 64 +#define PPC4XX_SD_BUFFER_SIZE 2048 + +#define PD_ENTRY_INUSE 1 +#define PD_ENTRY_FREE 0 +#define ERING_WAS_FULL 0xffffffff + +struct crypto4xx_device; + +struct pd_uinfo { + struct crypto4xx_device *dev; + u32 state; + u32 using_sd; + u32 first_gd; /* first gather discriptor + used by this packet */ + u32 num_gd; /* number of gather discriptor + used by this packet */ + u32 first_sd; /* first scatter discriptor + used by this packet */ + u32 num_sd; /* number of scatter discriptors + used by this packet */ + void *sa_va; /* shadow sa, when using cp from ctx->sa */ + u32 sa_pa; + void *sr_va; /* state record for shadow sa */ + u32 sr_pa; + struct scatterlist *dest_va; + struct crypto_async_request *async_req; /* base crypto request + for this packet */ +}; + +struct crypto4xx_device { + struct crypto4xx_core_device *core_dev; + char *name; + u64 ce_phy_address; + void __iomem *ce_base; + + void *pdr; /* base address of packet + descriptor ring */ + dma_addr_t pdr_pa; /* physical address used to + program ce pdr_base_register */ + void *gdr; /* gather descriptor ring */ + dma_addr_t gdr_pa; /* physical address used to + program ce gdr_base_register */ + void *sdr; /* scatter descriptor ring */ + dma_addr_t sdr_pa; /* physical address used to + program ce sdr_base_register */ + void *scatter_buffer_va; + dma_addr_t scatter_buffer_pa; + u32 scatter_buffer_size; + + void *shadow_sa_pool; /* pool of memory for sa in pd_uinfo */ + dma_addr_t shadow_sa_pool_pa; + void *shadow_sr_pool; /* pool of memory for sr in pd_uinfo */ + dma_addr_t shadow_sr_pool_pa; + u32 pdr_tail; + u32 pdr_head; + u32 gdr_tail; + u32 gdr_head; + u32 sdr_tail; + u32 sdr_head; + void *pdr_uinfo; + struct list_head alg_list; /* List of algorithm supported + by this device */ +}; + +struct crypto4xx_core_device { + struct device *device; + struct platform_device *ofdev; + struct crypto4xx_device *dev; + u32 int_status; + u32 irq; + struct tasklet_struct tasklet; + spinlock_t lock; +}; + +struct crypto4xx_ctx { + struct crypto4xx_device *dev; + void *sa_in; + dma_addr_t sa_in_dma_addr; + void *sa_out; + dma_addr_t sa_out_dma_addr; + void *state_record; + dma_addr_t state_record_dma_addr; + u32 sa_len; + u32 offset_to_sr_ptr; /* offset to state ptr, in dynamic sa */ + u32 direction; + u32 next_hdr; + u32 save_iv; + u32 pd_ctl_len; + u32 pd_ctl; + u32 bypass; + u32 is_hash; + u32 hash_final; +}; + +struct crypto4xx_req_ctx { + struct crypto4xx_device *dev; /* Device in which + operation to send to */ + void *sa; + u32 sa_dma_addr; + u16 sa_len; +}; + +struct crypto4xx_alg_common { + u32 type; + union { + struct crypto_alg cipher; + struct ahash_alg hash; + } u; +}; + +struct crypto4xx_alg { + struct list_head entry; + struct crypto4xx_alg_common alg; + struct crypto4xx_device *dev; +}; + +static inline struct crypto4xx_alg *crypto_alg_to_crypto4xx_alg( + struct crypto_alg *x) +{ + switch (x->cra_flags & CRYPTO_ALG_TYPE_MASK) { + case CRYPTO_ALG_TYPE_AHASH: + return container_of(__crypto_ahash_alg(x), + struct crypto4xx_alg, alg.u.hash); + } + + return container_of(x, struct crypto4xx_alg, alg.u.cipher); +} + +extern int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size); +extern void crypto4xx_free_sa(struct crypto4xx_ctx *ctx); +extern u32 crypto4xx_alloc_sa_rctx(struct crypto4xx_ctx *ctx, + struct crypto4xx_ctx *rctx); +extern void crypto4xx_free_sa_rctx(struct crypto4xx_ctx *rctx); +extern void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx); +extern u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx); +extern u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx); +extern u32 get_dynamic_sa_offset_key_field(struct crypto4xx_ctx *ctx); +extern u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx); +extern void crypto4xx_memcpy_le(unsigned int *dst, + const unsigned char *buf, int len); +extern u32 crypto4xx_build_pd(struct crypto_async_request *req, + struct crypto4xx_ctx *ctx, + struct scatterlist *src, + struct scatterlist *dst, + unsigned int datalen, + void *iv, u32 iv_len); +extern int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher, + const u8 *key, unsigned int keylen); +extern int crypto4xx_encrypt(struct ablkcipher_request *req); +extern int crypto4xx_decrypt(struct ablkcipher_request *req); +extern int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm); +extern int crypto4xx_hash_digest(struct ahash_request *req); +extern int crypto4xx_hash_final(struct ahash_request *req); +extern int crypto4xx_hash_update(struct ahash_request *req); +extern int crypto4xx_hash_init(struct ahash_request *req); +#endif diff --git a/drivers/crypto/amcc/crypto4xx_reg_def.h b/drivers/crypto/amcc/crypto4xx_reg_def.h new file mode 100644 index 000000000..5f5fbc071 --- /dev/null +++ b/drivers/crypto/amcc/crypto4xx_reg_def.h @@ -0,0 +1,284 @@ +/** + * AMCC SoC PPC4xx Crypto Driver + * + * Copyright (c) 2008 Applied Micro Circuits Corporation. + * All rights reserved. James Hsiao <jhsiao@amcc.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * This filr defines the register set for Security Subsystem + */ + +#ifndef __CRYPTO4XX_REG_DEF_H__ +#define __CRYPTO4XX_REG_DEF_H__ + +/* CRYPTO4XX Register offset */ +#define CRYPTO4XX_DESCRIPTOR 0x00000000 +#define CRYPTO4XX_CTRL_STAT 0x00000000 +#define CRYPTO4XX_SOURCE 0x00000004 +#define CRYPTO4XX_DEST 0x00000008 +#define CRYPTO4XX_SA 0x0000000C +#define CRYPTO4XX_SA_LENGTH 0x00000010 +#define CRYPTO4XX_LENGTH 0x00000014 + +#define CRYPTO4XX_PE_DMA_CFG 0x00000040 +#define CRYPTO4XX_PE_DMA_STAT 0x00000044 +#define CRYPTO4XX_PDR_BASE 0x00000048 +#define CRYPTO4XX_RDR_BASE 0x0000004c +#define CRYPTO4XX_RING_SIZE 0x00000050 +#define CRYPTO4XX_RING_CTRL 0x00000054 +#define CRYPTO4XX_INT_RING_STAT 0x00000058 +#define CRYPTO4XX_EXT_RING_STAT 0x0000005c +#define CRYPTO4XX_IO_THRESHOLD 0x00000060 +#define CRYPTO4XX_GATH_RING_BASE 0x00000064 +#define CRYPTO4XX_SCAT_RING_BASE 0x00000068 +#define CRYPTO4XX_PART_RING_SIZE 0x0000006c +#define CRYPTO4XX_PART_RING_CFG 0x00000070 + +#define CRYPTO4XX_PDR_BASE_UADDR 0x00000080 +#define CRYPTO4XX_RDR_BASE_UADDR 0x00000084 +#define CRYPTO4XX_PKT_SRC_UADDR 0x00000088 +#define CRYPTO4XX_PKT_DEST_UADDR 0x0000008c +#define CRYPTO4XX_SA_UADDR 0x00000090 +#define CRYPTO4XX_GATH_RING_BASE_UADDR 0x000000A0 +#define CRYPTO4XX_SCAT_RING_BASE_UADDR 0x000000A4 + +#define CRYPTO4XX_SEQ_RD 0x00000408 +#define CRYPTO4XX_SEQ_MASK_RD 0x0000040C + +#define CRYPTO4XX_SA_CMD_0 0x00010600 +#define CRYPTO4XX_SA_CMD_1 0x00010604 + +#define CRYPTO4XX_STATE_PTR 0x000106dc +#define CRYPTO4XX_STATE_IV 0x00010700 +#define CRYPTO4XX_STATE_HASH_BYTE_CNT_0 0x00010710 +#define CRYPTO4XX_STATE_HASH_BYTE_CNT_1 0x00010714 + +#define CRYPTO4XX_STATE_IDIGEST_0 0x00010718 +#define CRYPTO4XX_STATE_IDIGEST_1 0x0001071c + +#define CRYPTO4XX_DATA_IN 0x00018000 +#define CRYPTO4XX_DATA_OUT 0x0001c000 + +#define CRYPTO4XX_INT_UNMASK_STAT 0x000500a0 +#define CRYPTO4XX_INT_MASK_STAT 0x000500a4 +#define CRYPTO4XX_INT_CLR 0x000500a4 +#define CRYPTO4XX_INT_EN 0x000500a8 + +#define CRYPTO4XX_INT_PKA 0x00000002 +#define CRYPTO4XX_INT_PDR_DONE 0x00008000 +#define CRYPTO4XX_INT_MA_WR_ERR 0x00020000 +#define CRYPTO4XX_INT_MA_RD_ERR 0x00010000 +#define CRYPTO4XX_INT_PE_ERR 0x00000200 +#define CRYPTO4XX_INT_USER_DMA_ERR 0x00000040 +#define CRYPTO4XX_INT_SLAVE_ERR 0x00000010 +#define CRYPTO4XX_INT_MASTER_ERR 0x00000008 +#define CRYPTO4XX_INT_ERROR 0x00030258 + +#define CRYPTO4XX_INT_CFG 0x000500ac +#define CRYPTO4XX_INT_DESCR_RD 0x000500b0 +#define CRYPTO4XX_INT_DESCR_CNT 0x000500b4 +#define CRYPTO4XX_INT_TIMEOUT_CNT 0x000500b8 + +#define CRYPTO4XX_DEVICE_CTRL 0x00060080 +#define CRYPTO4XX_DEVICE_ID 0x00060084 +#define CRYPTO4XX_DEVICE_INFO 0x00060088 +#define CRYPTO4XX_DMA_USER_SRC 0x00060094 +#define CRYPTO4XX_DMA_USER_DEST 0x00060098 +#define CRYPTO4XX_DMA_USER_CMD 0x0006009C + +#define CRYPTO4XX_DMA_CFG 0x000600d4 +#define CRYPTO4XX_BYTE_ORDER_CFG 0x000600d8 +#define CRYPTO4XX_ENDIAN_CFG 0x000600d8 + +#define CRYPTO4XX_PRNG_STAT 0x00070000 +#define CRYPTO4XX_PRNG_CTRL 0x00070004 +#define CRYPTO4XX_PRNG_SEED_L 0x00070008 +#define CRYPTO4XX_PRNG_SEED_H 0x0007000c + +#define CRYPTO4XX_PRNG_RES_0 0x00070020 +#define CRYPTO4XX_PRNG_RES_1 0x00070024 +#define CRYPTO4XX_PRNG_RES_2 0x00070028 +#define CRYPTO4XX_PRNG_RES_3 0x0007002C + +#define CRYPTO4XX_PRNG_LFSR_L 0x00070030 +#define CRYPTO4XX_PRNG_LFSR_H 0x00070034 + +/** + * Initialize CRYPTO ENGINE registers, and memory bases. + */ +#define PPC4XX_PDR_POLL 0x3ff +#define PPC4XX_OUTPUT_THRESHOLD 2 +#define PPC4XX_INPUT_THRESHOLD 2 +#define PPC4XX_PD_SIZE 6 +#define PPC4XX_CTX_DONE_INT 0x2000 +#define PPC4XX_PD_DONE_INT 0x8000 +#define PPC4XX_BYTE_ORDER 0x22222 +#define PPC4XX_INTERRUPT_CLR 0x3ffff +#define PPC4XX_PRNG_CTRL_AUTO_EN 0x3 +#define PPC4XX_DC_3DES_EN 1 +#define PPC4XX_INT_DESCR_CNT 4 +#define PPC4XX_INT_TIMEOUT_CNT 0 +#define PPC4XX_INT_CFG 1 +/** + * all follow define are ad hoc + */ +#define PPC4XX_RING_RETRY 100 +#define PPC4XX_RING_POLL 100 +#define PPC4XX_SDR_SIZE PPC4XX_NUM_SD +#define PPC4XX_GDR_SIZE PPC4XX_NUM_GD + +/** + * Generic Security Association (SA) with all possible fields. These will + * never likely used except for reference purpose. These structure format + * can be not changed as the hardware expects them to be layout as defined. + * Field can be removed or reduced but ordering can not be changed. + */ +#define CRYPTO4XX_DMA_CFG_OFFSET 0x40 +union ce_pe_dma_cfg { + struct { + u32 rsv:7; + u32 dir_host:1; + u32 rsv1:2; + u32 bo_td_en:1; + u32 dis_pdr_upd:1; + u32 bo_sgpd_en:1; + u32 bo_data_en:1; + u32 bo_sa_en:1; + u32 bo_pd_en:1; + u32 rsv2:4; + u32 dynamic_sa_en:1; + u32 pdr_mode:2; + u32 pe_mode:1; + u32 rsv3:5; + u32 reset_sg:1; + u32 reset_pdr:1; + u32 reset_pe:1; + } bf; + u32 w; +} __attribute__((packed)); + +#define CRYPTO4XX_PDR_BASE_OFFSET 0x48 +#define CRYPTO4XX_RDR_BASE_OFFSET 0x4c +#define CRYPTO4XX_RING_SIZE_OFFSET 0x50 +union ce_ring_size { + struct { + u32 ring_offset:16; + u32 rsv:6; + u32 ring_size:10; + } bf; + u32 w; +} __attribute__((packed)); + +#define CRYPTO4XX_RING_CONTROL_OFFSET 0x54 +union ce_ring_contol { + struct { + u32 continuous:1; + u32 rsv:5; + u32 ring_retry_divisor:10; + u32 rsv1:4; + u32 ring_poll_divisor:10; + } bf; + u32 w; +} __attribute__((packed)); + +#define CRYPTO4XX_IO_THRESHOLD_OFFSET 0x60 +union ce_io_threshold { + struct { + u32 rsv:6; + u32 output_threshold:10; + u32 rsv1:6; + u32 input_threshold:10; + } bf; + u32 w; +} __attribute__((packed)); + +#define CRYPTO4XX_GATHER_RING_BASE_OFFSET 0x64 +#define CRYPTO4XX_SCATTER_RING_BASE_OFFSET 0x68 + +union ce_part_ring_size { + struct { + u32 sdr_size:16; + u32 gdr_size:16; + } bf; + u32 w; +} __attribute__((packed)); + +#define MAX_BURST_SIZE_32 0 +#define MAX_BURST_SIZE_64 1 +#define MAX_BURST_SIZE_128 2 +#define MAX_BURST_SIZE_256 3 + +/* gather descriptor control length */ +struct gd_ctl_len { + u32 len:16; + u32 rsv:14; + u32 done:1; + u32 ready:1; +} __attribute__((packed)); + +struct ce_gd { + u32 ptr; + struct gd_ctl_len ctl_len; +} __attribute__((packed)); + +struct sd_ctl { + u32 ctl:30; + u32 done:1; + u32 rdy:1; +} __attribute__((packed)); + +struct ce_sd { + u32 ptr; + struct sd_ctl ctl; +} __attribute__((packed)); + +#define PD_PAD_CTL_32 0x10 +#define PD_PAD_CTL_64 0x20 +#define PD_PAD_CTL_128 0x40 +#define PD_PAD_CTL_256 0x80 +union ce_pd_ctl { + struct { + u32 pd_pad_ctl:8; + u32 status:8; + u32 next_hdr:8; + u32 rsv:2; + u32 cached_sa:1; + u32 hash_final:1; + u32 init_arc4:1; + u32 rsv1:1; + u32 pe_done:1; + u32 host_ready:1; + } bf; + u32 w; +} __attribute__((packed)); + +union ce_pd_ctl_len { + struct { + u32 bypass:8; + u32 pe_done:1; + u32 host_ready:1; + u32 rsv:2; + u32 pkt_len:20; + } bf; + u32 w; +} __attribute__((packed)); + +struct ce_pd { + union ce_pd_ctl pd_ctl; + u32 src; + u32 dest; + u32 sa; /* get from ctx->sa_dma_addr */ + u32 sa_len; /* only if dynamic sa is used */ + union ce_pd_ctl_len pd_ctl_len; + +} __attribute__((packed)); +#endif diff --git a/drivers/crypto/amcc/crypto4xx_sa.c b/drivers/crypto/amcc/crypto4xx_sa.c new file mode 100644 index 000000000..69182e2cc --- /dev/null +++ b/drivers/crypto/amcc/crypto4xx_sa.c @@ -0,0 +1,85 @@ +/** + * AMCC SoC PPC4xx Crypto Driver + * + * Copyright (c) 2008 Applied Micro Circuits Corporation. + * All rights reserved. James Hsiao <jhsiao@amcc.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * @file crypto4xx_sa.c + * + * This file implements the security context + * associate format. + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/mod_devicetable.h> +#include <linux/interrupt.h> +#include <linux/spinlock_types.h> +#include <linux/highmem.h> +#include <linux/scatterlist.h> +#include <linux/crypto.h> +#include <crypto/algapi.h> +#include <crypto/des.h> +#include "crypto4xx_reg_def.h" +#include "crypto4xx_sa.h" +#include "crypto4xx_core.h" + +u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx) +{ + u32 offset; + union dynamic_sa_contents cts; + + if (ctx->direction == DIR_INBOUND) + cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents; + else + cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents; + offset = cts.bf.key_size + + cts.bf.inner_size + + cts.bf.outer_size + + cts.bf.spi + + cts.bf.seq_num0 + + cts.bf.seq_num1 + + cts.bf.seq_num_mask0 + + cts.bf.seq_num_mask1 + + cts.bf.seq_num_mask2 + + cts.bf.seq_num_mask3 + + cts.bf.iv0 + + cts.bf.iv1 + + cts.bf.iv2 + + cts.bf.iv3; + + return sizeof(struct dynamic_sa_ctl) + offset * 4; +} + +u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx) +{ + union dynamic_sa_contents cts; + + if (ctx->direction == DIR_INBOUND) + cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents; + else + cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents; + return (cts.bf.iv0 + cts.bf.iv1 + cts.bf.iv2 + cts.bf.iv3) * 4; +} + +u32 get_dynamic_sa_offset_key_field(struct crypto4xx_ctx *ctx) +{ + union dynamic_sa_contents cts; + + if (ctx->direction == DIR_INBOUND) + cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents; + else + cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents; + + return sizeof(struct dynamic_sa_ctl); +} diff --git a/drivers/crypto/amcc/crypto4xx_sa.h b/drivers/crypto/amcc/crypto4xx_sa.h new file mode 100644 index 000000000..1352d58d4 --- /dev/null +++ b/drivers/crypto/amcc/crypto4xx_sa.h @@ -0,0 +1,243 @@ +/** + * AMCC SoC PPC4xx Crypto Driver + * + * Copyright (c) 2008 Applied Micro Circuits Corporation. + * All rights reserved. James Hsiao <jhsiao@amcc.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * This file defines the security context + * associate format. + */ + +#ifndef __CRYPTO4XX_SA_H__ +#define __CRYPTO4XX_SA_H__ + +#define AES_IV_SIZE 16 + +/** + * Contents of Dynamic Security Association (SA) with all possible fields + */ +union dynamic_sa_contents { + struct { + u32 arc4_state_ptr:1; + u32 arc4_ij_ptr:1; + u32 state_ptr:1; + u32 iv3:1; + u32 iv2:1; + u32 iv1:1; + u32 iv0:1; + u32 seq_num_mask3:1; + u32 seq_num_mask2:1; + u32 seq_num_mask1:1; + u32 seq_num_mask0:1; + u32 seq_num1:1; + u32 seq_num0:1; + u32 spi:1; + u32 outer_size:5; + u32 inner_size:5; + u32 key_size:4; + u32 cmd_size:4; + } bf; + u32 w; +} __attribute__((packed)); + +#define DIR_OUTBOUND 0 +#define DIR_INBOUND 1 +#define SA_OP_GROUP_BASIC 0 +#define SA_OPCODE_ENCRYPT 0 +#define SA_OPCODE_DECRYPT 0 +#define SA_OPCODE_HASH 3 +#define SA_CIPHER_ALG_DES 0 +#define SA_CIPHER_ALG_3DES 1 +#define SA_CIPHER_ALG_ARC4 2 +#define SA_CIPHER_ALG_AES 3 +#define SA_CIPHER_ALG_KASUMI 4 +#define SA_CIPHER_ALG_NULL 15 + +#define SA_HASH_ALG_MD5 0 +#define SA_HASH_ALG_SHA1 1 +#define SA_HASH_ALG_NULL 15 +#define SA_HASH_ALG_SHA1_DIGEST_SIZE 20 + +#define SA_LOAD_HASH_FROM_SA 0 +#define SA_LOAD_HASH_FROM_STATE 2 +#define SA_NOT_LOAD_HASH 3 +#define SA_LOAD_IV_FROM_SA 0 +#define SA_LOAD_IV_FROM_INPUT 1 +#define SA_LOAD_IV_FROM_STATE 2 +#define SA_LOAD_IV_GEN_IV 3 + +#define SA_PAD_TYPE_CONSTANT 2 +#define SA_PAD_TYPE_ZERO 3 +#define SA_PAD_TYPE_TLS 5 +#define SA_PAD_TYPE_DTLS 5 +#define SA_NOT_SAVE_HASH 0 +#define SA_SAVE_HASH 1 +#define SA_NOT_SAVE_IV 0 +#define SA_SAVE_IV 1 +#define SA_HEADER_PROC 1 +#define SA_NO_HEADER_PROC 0 + +union sa_command_0 { + struct { + u32 scatter:1; + u32 gather:1; + u32 save_hash_state:1; + u32 save_iv:1; + u32 load_hash_state:2; + u32 load_iv:2; + u32 digest_len:4; + u32 hdr_proc:1; + u32 extend_pad:1; + u32 stream_cipher_pad:1; + u32 rsv:1; + u32 hash_alg:4; + u32 cipher_alg:4; + u32 pad_type:2; + u32 op_group:2; + u32 dir:1; + u32 opcode:3; + } bf; + u32 w; +} __attribute__((packed)); + +#define CRYPTO_MODE_ECB 0 +#define CRYPTO_MODE_CBC 1 + +#define CRYPTO_FEEDBACK_MODE_NO_FB 0 +#define CRYPTO_FEEDBACK_MODE_64BIT_OFB 0 +#define CRYPTO_FEEDBACK_MODE_8BIT_CFB 1 +#define CRYPTO_FEEDBACK_MODE_1BIT_CFB 2 +#define CRYPTO_FEEDBACK_MODE_128BIT_CFB 3 + +#define SA_AES_KEY_LEN_128 2 +#define SA_AES_KEY_LEN_192 3 +#define SA_AES_KEY_LEN_256 4 + +#define SA_REV2 1 +/** + * The follow defines bits sa_command_1 + * In Basic hash mode this bit define simple hash or hmac. + * In IPsec mode, this bit define muting control. + */ +#define SA_HASH_MODE_HASH 0 +#define SA_HASH_MODE_HMAC 1 +#define SA_MC_ENABLE 0 +#define SA_MC_DISABLE 1 +#define SA_NOT_COPY_HDR 0 +#define SA_COPY_HDR 1 +#define SA_NOT_COPY_PAD 0 +#define SA_COPY_PAD 1 +#define SA_NOT_COPY_PAYLOAD 0 +#define SA_COPY_PAYLOAD 1 +#define SA_EXTENDED_SN_OFF 0 +#define SA_EXTENDED_SN_ON 1 +#define SA_SEQ_MASK_OFF 0 +#define SA_SEQ_MASK_ON 1 + +union sa_command_1 { + struct { + u32 crypto_mode31:1; + u32 save_arc4_state:1; + u32 arc4_stateful:1; + u32 key_len:5; + u32 hash_crypto_offset:8; + u32 sa_rev:2; + u32 byte_offset:1; + u32 hmac_muting:1; + u32 feedback_mode:2; + u32 crypto_mode9_8:2; + u32 extended_seq_num:1; + u32 seq_num_mask:1; + u32 mutable_bit_proc:1; + u32 ip_version:1; + u32 copy_pad:1; + u32 copy_payload:1; + u32 copy_hdr:1; + u32 rsv1:1; + } bf; + u32 w; +} __attribute__((packed)); + +struct dynamic_sa_ctl { + u32 sa_contents; + union sa_command_0 sa_command_0; + union sa_command_1 sa_command_1; +} __attribute__((packed)); + +/** + * State Record for Security Association (SA) + */ +struct sa_state_record { + u32 save_iv[4]; + u32 save_hash_byte_cnt[2]; + u32 save_digest[16]; +} __attribute__((packed)); + +/** + * Security Association (SA) for AES128 + * + */ +struct dynamic_sa_aes128 { + struct dynamic_sa_ctl ctrl; + u32 key[4]; + u32 iv[4]; /* for CBC, OFC, and CFB mode */ + u32 state_ptr; + u32 reserved; +} __attribute__((packed)); + +#define SA_AES128_LEN (sizeof(struct dynamic_sa_aes128)/4) +#define SA_AES128_CONTENTS 0x3e000042 + +/* + * Security Association (SA) for AES192 + */ +struct dynamic_sa_aes192 { + struct dynamic_sa_ctl ctrl; + u32 key[6]; + u32 iv[4]; /* for CBC, OFC, and CFB mode */ + u32 state_ptr; + u32 reserved; +} __attribute__((packed)); + +#define SA_AES192_LEN (sizeof(struct dynamic_sa_aes192)/4) +#define SA_AES192_CONTENTS 0x3e000062 + +/** + * Security Association (SA) for AES256 + */ +struct dynamic_sa_aes256 { + struct dynamic_sa_ctl ctrl; + u32 key[8]; + u32 iv[4]; /* for CBC, OFC, and CFB mode */ + u32 state_ptr; + u32 reserved; +} __attribute__((packed)); + +#define SA_AES256_LEN (sizeof(struct dynamic_sa_aes256)/4) +#define SA_AES256_CONTENTS 0x3e000082 +#define SA_AES_CONTENTS 0x3e000002 + +/** + * Security Association (SA) for HASH160: HMAC-SHA1 + */ +struct dynamic_sa_hash160 { + struct dynamic_sa_ctl ctrl; + u32 inner_digest[5]; + u32 outer_digest[5]; + u32 state_ptr; + u32 reserved; +} __attribute__((packed)); +#define SA_HASH160_LEN (sizeof(struct dynamic_sa_hash160)/4) +#define SA_HASH160_CONTENTS 0x2000a502 + +#endif diff --git a/drivers/crypto/atmel-aes-regs.h b/drivers/crypto/atmel-aes-regs.h new file mode 100644 index 000000000..2786bb1a5 --- /dev/null +++ b/drivers/crypto/atmel-aes-regs.h @@ -0,0 +1,62 @@ +#ifndef __ATMEL_AES_REGS_H__ +#define __ATMEL_AES_REGS_H__ + +#define AES_CR 0x00 +#define AES_CR_START (1 << 0) +#define AES_CR_SWRST (1 << 8) +#define AES_CR_LOADSEED (1 << 16) + +#define AES_MR 0x04 +#define AES_MR_CYPHER_DEC (0 << 0) +#define AES_MR_CYPHER_ENC (1 << 0) +#define AES_MR_DUALBUFF (1 << 3) +#define AES_MR_PROCDLY_MASK (0xF << 4) +#define AES_MR_PROCDLY_OFFSET 4 +#define AES_MR_SMOD_MASK (0x3 << 8) +#define AES_MR_SMOD_MANUAL (0x0 << 8) +#define AES_MR_SMOD_AUTO (0x1 << 8) +#define AES_MR_SMOD_IDATAR0 (0x2 << 8) +#define AES_MR_KEYSIZE_MASK (0x3 << 10) +#define AES_MR_KEYSIZE_128 (0x0 << 10) +#define AES_MR_KEYSIZE_192 (0x1 << 10) +#define AES_MR_KEYSIZE_256 (0x2 << 10) +#define AES_MR_OPMOD_MASK (0x7 << 12) +#define AES_MR_OPMOD_ECB (0x0 << 12) +#define AES_MR_OPMOD_CBC (0x1 << 12) +#define AES_MR_OPMOD_OFB (0x2 << 12) +#define AES_MR_OPMOD_CFB (0x3 << 12) +#define AES_MR_OPMOD_CTR (0x4 << 12) +#define AES_MR_LOD (0x1 << 15) +#define AES_MR_CFBS_MASK (0x7 << 16) +#define AES_MR_CFBS_128b (0x0 << 16) +#define AES_MR_CFBS_64b (0x1 << 16) +#define AES_MR_CFBS_32b (0x2 << 16) +#define AES_MR_CFBS_16b (0x3 << 16) +#define AES_MR_CFBS_8b (0x4 << 16) +#define AES_MR_CKEY_MASK (0xF << 20) +#define AES_MR_CKEY_OFFSET 20 +#define AES_MR_CMTYP_MASK (0x1F << 24) +#define AES_MR_CMTYP_OFFSET 24 + +#define AES_IER 0x10 +#define AES_IDR 0x14 +#define AES_IMR 0x18 +#define AES_ISR 0x1C +#define AES_INT_DATARDY (1 << 0) +#define AES_INT_URAD (1 << 8) +#define AES_ISR_URAT_MASK (0xF << 12) +#define AES_ISR_URAT_IDR_WR_PROC (0x0 << 12) +#define AES_ISR_URAT_ODR_RD_PROC (0x1 << 12) +#define AES_ISR_URAT_MR_WR_PROC (0x2 << 12) +#define AES_ISR_URAT_ODR_RD_SUBK (0x3 << 12) +#define AES_ISR_URAT_MR_WR_SUBK (0x4 << 12) +#define AES_ISR_URAT_WOR_RD (0x5 << 12) + +#define AES_KEYWR(x) (0x20 + ((x) * 0x04)) +#define AES_IDATAR(x) (0x40 + ((x) * 0x04)) +#define AES_ODATAR(x) (0x50 + ((x) * 0x04)) +#define AES_IVR(x) (0x60 + ((x) * 0x04)) + +#define AES_HW_VERSION 0xFC + +#endif /* __ATMEL_AES_REGS_H__ */ diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c new file mode 100644 index 000000000..0f9a9dc06 --- /dev/null +++ b/drivers/crypto/atmel-aes.c @@ -0,0 +1,1498 @@ +/* + * Cryptographic API. + * + * Support for ATMEL AES HW acceleration. + * + * Copyright (c) 2012 Eukréa Electromatique - ATMEL + * Author: Nicolas Royer <nicolas@eukrea.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Some ideas are from omap-aes.c driver. + */ + + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/err.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/hw_random.h> +#include <linux/platform_device.h> + +#include <linux/device.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/scatterlist.h> +#include <linux/dma-mapping.h> +#include <linux/of_device.h> +#include <linux/delay.h> +#include <linux/crypto.h> +#include <linux/cryptohash.h> +#include <crypto/scatterwalk.h> +#include <crypto/algapi.h> +#include <crypto/aes.h> +#include <crypto/hash.h> +#include <crypto/internal/hash.h> +#include <linux/platform_data/crypto-atmel.h> +#include <dt-bindings/dma/at91.h> +#include "atmel-aes-regs.h" + +#define CFB8_BLOCK_SIZE 1 +#define CFB16_BLOCK_SIZE 2 +#define CFB32_BLOCK_SIZE 4 +#define CFB64_BLOCK_SIZE 8 + +/* AES flags */ +#define AES_FLAGS_MODE_MASK 0x03ff +#define AES_FLAGS_ENCRYPT BIT(0) +#define AES_FLAGS_CBC BIT(1) +#define AES_FLAGS_CFB BIT(2) +#define AES_FLAGS_CFB8 BIT(3) +#define AES_FLAGS_CFB16 BIT(4) +#define AES_FLAGS_CFB32 BIT(5) +#define AES_FLAGS_CFB64 BIT(6) +#define AES_FLAGS_CFB128 BIT(7) +#define AES_FLAGS_OFB BIT(8) +#define AES_FLAGS_CTR BIT(9) + +#define AES_FLAGS_INIT BIT(16) +#define AES_FLAGS_DMA BIT(17) +#define AES_FLAGS_BUSY BIT(18) +#define AES_FLAGS_FAST BIT(19) + +#define ATMEL_AES_QUEUE_LENGTH 50 + +#define ATMEL_AES_DMA_THRESHOLD 16 + + +struct atmel_aes_caps { + bool has_dualbuff; + bool has_cfb64; + u32 max_burst_size; +}; + +struct atmel_aes_dev; + +struct atmel_aes_ctx { + struct atmel_aes_dev *dd; + + int keylen; + u32 key[AES_KEYSIZE_256 / sizeof(u32)]; + + u16 block_size; +}; + +struct atmel_aes_reqctx { + unsigned long mode; +}; + +struct atmel_aes_dma { + struct dma_chan *chan; + struct dma_slave_config dma_conf; +}; + +struct atmel_aes_dev { + struct list_head list; + unsigned long phys_base; + void __iomem *io_base; + + struct atmel_aes_ctx *ctx; + struct device *dev; + struct clk *iclk; + int irq; + + unsigned long flags; + int err; + + spinlock_t lock; + struct crypto_queue queue; + + struct tasklet_struct done_task; + struct tasklet_struct queue_task; + + struct ablkcipher_request *req; + size_t total; + + struct scatterlist *in_sg; + unsigned int nb_in_sg; + size_t in_offset; + struct scatterlist *out_sg; + unsigned int nb_out_sg; + size_t out_offset; + + size_t bufcnt; + size_t buflen; + size_t dma_size; + + void *buf_in; + int dma_in; + dma_addr_t dma_addr_in; + struct atmel_aes_dma dma_lch_in; + + void *buf_out; + int dma_out; + dma_addr_t dma_addr_out; + struct atmel_aes_dma dma_lch_out; + + struct atmel_aes_caps caps; + + u32 hw_version; +}; + +struct atmel_aes_drv { + struct list_head dev_list; + spinlock_t lock; +}; + +static struct atmel_aes_drv atmel_aes = { + .dev_list = LIST_HEAD_INIT(atmel_aes.dev_list), + .lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock), +}; + +static int atmel_aes_sg_length(struct ablkcipher_request *req, + struct scatterlist *sg) +{ + unsigned int total = req->nbytes; + int sg_nb; + unsigned int len; + struct scatterlist *sg_list; + + sg_nb = 0; + sg_list = sg; + total = req->nbytes; + + while (total) { + len = min(sg_list->length, total); + + sg_nb++; + total -= len; + + sg_list = sg_next(sg_list); + if (!sg_list) + total = 0; + } + + return sg_nb; +} + +static int atmel_aes_sg_copy(struct scatterlist **sg, size_t *offset, + void *buf, size_t buflen, size_t total, int out) +{ + unsigned int count, off = 0; + + while (buflen && total) { + count = min((*sg)->length - *offset, total); + count = min(count, buflen); + + if (!count) + return off; + + scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out); + + off += count; + buflen -= count; + *offset += count; + total -= count; + + if (*offset == (*sg)->length) { + *sg = sg_next(*sg); + if (*sg) + *offset = 0; + else + total = 0; + } + } + + return off; +} + +static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset) +{ + return readl_relaxed(dd->io_base + offset); +} + +static inline void atmel_aes_write(struct atmel_aes_dev *dd, + u32 offset, u32 value) +{ + writel_relaxed(value, dd->io_base + offset); +} + +static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset, + u32 *value, int count) +{ + for (; count--; value++, offset += 4) + *value = atmel_aes_read(dd, offset); +} + +static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset, + u32 *value, int count) +{ + for (; count--; value++, offset += 4) + atmel_aes_write(dd, offset, *value); +} + +static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_ctx *ctx) +{ + struct atmel_aes_dev *aes_dd = NULL; + struct atmel_aes_dev *tmp; + + spin_lock_bh(&atmel_aes.lock); + if (!ctx->dd) { + list_for_each_entry(tmp, &atmel_aes.dev_list, list) { + aes_dd = tmp; + break; + } + ctx->dd = aes_dd; + } else { + aes_dd = ctx->dd; + } + + spin_unlock_bh(&atmel_aes.lock); + + return aes_dd; +} + +static int atmel_aes_hw_init(struct atmel_aes_dev *dd) +{ + clk_prepare_enable(dd->iclk); + + if (!(dd->flags & AES_FLAGS_INIT)) { + atmel_aes_write(dd, AES_CR, AES_CR_SWRST); + atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET); + dd->flags |= AES_FLAGS_INIT; + dd->err = 0; + } + + return 0; +} + +static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd) +{ + return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff; +} + +static void atmel_aes_hw_version_init(struct atmel_aes_dev *dd) +{ + atmel_aes_hw_init(dd); + + dd->hw_version = atmel_aes_get_version(dd); + + dev_info(dd->dev, + "version: 0x%x\n", dd->hw_version); + + clk_disable_unprepare(dd->iclk); +} + +static void atmel_aes_finish_req(struct atmel_aes_dev *dd, int err) +{ + struct ablkcipher_request *req = dd->req; + + clk_disable_unprepare(dd->iclk); + dd->flags &= ~AES_FLAGS_BUSY; + + req->base.complete(&req->base, err); +} + +static void atmel_aes_dma_callback(void *data) +{ + struct atmel_aes_dev *dd = data; + + /* dma_lch_out - completed */ + tasklet_schedule(&dd->done_task); +} + +static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd, + dma_addr_t dma_addr_in, dma_addr_t dma_addr_out, int length) +{ + struct scatterlist sg[2]; + struct dma_async_tx_descriptor *in_desc, *out_desc; + + dd->dma_size = length; + + dma_sync_single_for_device(dd->dev, dma_addr_in, length, + DMA_TO_DEVICE); + dma_sync_single_for_device(dd->dev, dma_addr_out, length, + DMA_FROM_DEVICE); + + if (dd->flags & AES_FLAGS_CFB8) { + dd->dma_lch_in.dma_conf.dst_addr_width = + DMA_SLAVE_BUSWIDTH_1_BYTE; + dd->dma_lch_out.dma_conf.src_addr_width = + DMA_SLAVE_BUSWIDTH_1_BYTE; + } else if (dd->flags & AES_FLAGS_CFB16) { + dd->dma_lch_in.dma_conf.dst_addr_width = + DMA_SLAVE_BUSWIDTH_2_BYTES; + dd->dma_lch_out.dma_conf.src_addr_width = + DMA_SLAVE_BUSWIDTH_2_BYTES; + } else { + dd->dma_lch_in.dma_conf.dst_addr_width = + DMA_SLAVE_BUSWIDTH_4_BYTES; + dd->dma_lch_out.dma_conf.src_addr_width = + DMA_SLAVE_BUSWIDTH_4_BYTES; + } + + if (dd->flags & (AES_FLAGS_CFB8 | AES_FLAGS_CFB16 | + AES_FLAGS_CFB32 | AES_FLAGS_CFB64)) { + dd->dma_lch_in.dma_conf.src_maxburst = 1; + dd->dma_lch_in.dma_conf.dst_maxburst = 1; + dd->dma_lch_out.dma_conf.src_maxburst = 1; + dd->dma_lch_out.dma_conf.dst_maxburst = 1; + } else { + dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size; + dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size; + dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size; + dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size; + } + + dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf); + dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf); + + dd->flags |= AES_FLAGS_DMA; + + sg_init_table(&sg[0], 1); + sg_dma_address(&sg[0]) = dma_addr_in; + sg_dma_len(&sg[0]) = length; + + sg_init_table(&sg[1], 1); + sg_dma_address(&sg[1]) = dma_addr_out; + sg_dma_len(&sg[1]) = length; + + in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0], + 1, DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!in_desc) + return -EINVAL; + + out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1], + 1, DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!out_desc) + return -EINVAL; + + out_desc->callback = atmel_aes_dma_callback; + out_desc->callback_param = dd; + + dmaengine_submit(out_desc); + dma_async_issue_pending(dd->dma_lch_out.chan); + + dmaengine_submit(in_desc); + dma_async_issue_pending(dd->dma_lch_in.chan); + + return 0; +} + +static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd) +{ + dd->flags &= ~AES_FLAGS_DMA; + + dma_sync_single_for_cpu(dd->dev, dd->dma_addr_in, + dd->dma_size, DMA_TO_DEVICE); + dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out, + dd->dma_size, DMA_FROM_DEVICE); + + /* use cache buffers */ + dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg); + if (!dd->nb_in_sg) + return -EINVAL; + + dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg); + if (!dd->nb_out_sg) + return -EINVAL; + + dd->bufcnt = sg_copy_to_buffer(dd->in_sg, dd->nb_in_sg, + dd->buf_in, dd->total); + + if (!dd->bufcnt) + return -EINVAL; + + dd->total -= dd->bufcnt; + + atmel_aes_write(dd, AES_IER, AES_INT_DATARDY); + atmel_aes_write_n(dd, AES_IDATAR(0), (u32 *) dd->buf_in, + dd->bufcnt >> 2); + + return 0; +} + +static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd) +{ + int err, fast = 0, in, out; + size_t count; + dma_addr_t addr_in, addr_out; + + if ((!dd->in_offset) && (!dd->out_offset)) { + /* check for alignment */ + in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) && + IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size); + out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) && + IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size); + fast = in && out; + + if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg)) + fast = 0; + } + + + if (fast) { + count = min(dd->total, sg_dma_len(dd->in_sg)); + count = min(count, sg_dma_len(dd->out_sg)); + + err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); + if (!err) { + dev_err(dd->dev, "dma_map_sg() error\n"); + return -EINVAL; + } + + err = dma_map_sg(dd->dev, dd->out_sg, 1, + DMA_FROM_DEVICE); + if (!err) { + dev_err(dd->dev, "dma_map_sg() error\n"); + dma_unmap_sg(dd->dev, dd->in_sg, 1, + DMA_TO_DEVICE); + return -EINVAL; + } + + addr_in = sg_dma_address(dd->in_sg); + addr_out = sg_dma_address(dd->out_sg); + + dd->flags |= AES_FLAGS_FAST; + + } else { + dma_sync_single_for_cpu(dd->dev, dd->dma_addr_in, + dd->dma_size, DMA_TO_DEVICE); + + /* use cache buffers */ + count = atmel_aes_sg_copy(&dd->in_sg, &dd->in_offset, + dd->buf_in, dd->buflen, dd->total, 0); + + addr_in = dd->dma_addr_in; + addr_out = dd->dma_addr_out; + + dd->flags &= ~AES_FLAGS_FAST; + } + + dd->total -= count; + + err = atmel_aes_crypt_dma(dd, addr_in, addr_out, count); + + if (err && (dd->flags & AES_FLAGS_FAST)) { + dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); + dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE); + } + + return err; +} + +static int atmel_aes_write_ctrl(struct atmel_aes_dev *dd) +{ + int err; + u32 valcr = 0, valmr = 0; + + err = atmel_aes_hw_init(dd); + + if (err) + return err; + + /* MR register must be set before IV registers */ + if (dd->ctx->keylen == AES_KEYSIZE_128) + valmr |= AES_MR_KEYSIZE_128; + else if (dd->ctx->keylen == AES_KEYSIZE_192) + valmr |= AES_MR_KEYSIZE_192; + else + valmr |= AES_MR_KEYSIZE_256; + + if (dd->flags & AES_FLAGS_CBC) { + valmr |= AES_MR_OPMOD_CBC; + } else if (dd->flags & AES_FLAGS_CFB) { + valmr |= AES_MR_OPMOD_CFB; + if (dd->flags & AES_FLAGS_CFB8) + valmr |= AES_MR_CFBS_8b; + else if (dd->flags & AES_FLAGS_CFB16) + valmr |= AES_MR_CFBS_16b; + else if (dd->flags & AES_FLAGS_CFB32) + valmr |= AES_MR_CFBS_32b; + else if (dd->flags & AES_FLAGS_CFB64) + valmr |= AES_MR_CFBS_64b; + else if (dd->flags & AES_FLAGS_CFB128) + valmr |= AES_MR_CFBS_128b; + } else if (dd->flags & AES_FLAGS_OFB) { + valmr |= AES_MR_OPMOD_OFB; + } else if (dd->flags & AES_FLAGS_CTR) { + valmr |= AES_MR_OPMOD_CTR; + } else { + valmr |= AES_MR_OPMOD_ECB; + } + + if (dd->flags & AES_FLAGS_ENCRYPT) + valmr |= AES_MR_CYPHER_ENC; + + if (dd->total > ATMEL_AES_DMA_THRESHOLD) { + valmr |= AES_MR_SMOD_IDATAR0; + if (dd->caps.has_dualbuff) + valmr |= AES_MR_DUALBUFF; + } else { + valmr |= AES_MR_SMOD_AUTO; + } + + atmel_aes_write(dd, AES_CR, valcr); + atmel_aes_write(dd, AES_MR, valmr); + + atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key, + dd->ctx->keylen >> 2); + + if (((dd->flags & AES_FLAGS_CBC) || (dd->flags & AES_FLAGS_CFB) || + (dd->flags & AES_FLAGS_OFB) || (dd->flags & AES_FLAGS_CTR)) && + dd->req->info) { + atmel_aes_write_n(dd, AES_IVR(0), dd->req->info, 4); + } + + return 0; +} + +static int atmel_aes_handle_queue(struct atmel_aes_dev *dd, + struct ablkcipher_request *req) +{ + struct crypto_async_request *async_req, *backlog; + struct atmel_aes_ctx *ctx; + struct atmel_aes_reqctx *rctx; + unsigned long flags; + int err, ret = 0; + + spin_lock_irqsave(&dd->lock, flags); + if (req) + ret = ablkcipher_enqueue_request(&dd->queue, req); + if (dd->flags & AES_FLAGS_BUSY) { + spin_unlock_irqrestore(&dd->lock, flags); + return ret; + } + backlog = crypto_get_backlog(&dd->queue); + async_req = crypto_dequeue_request(&dd->queue); + if (async_req) + dd->flags |= AES_FLAGS_BUSY; + spin_unlock_irqrestore(&dd->lock, flags); + + if (!async_req) + return ret; + + if (backlog) + backlog->complete(backlog, -EINPROGRESS); + + req = ablkcipher_request_cast(async_req); + + /* assign new request to device */ + dd->req = req; + dd->total = req->nbytes; + dd->in_offset = 0; + dd->in_sg = req->src; + dd->out_offset = 0; + dd->out_sg = req->dst; + + rctx = ablkcipher_request_ctx(req); + ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); + rctx->mode &= AES_FLAGS_MODE_MASK; + dd->flags = (dd->flags & ~AES_FLAGS_MODE_MASK) | rctx->mode; + dd->ctx = ctx; + ctx->dd = dd; + + err = atmel_aes_write_ctrl(dd); + if (!err) { + if (dd->total > ATMEL_AES_DMA_THRESHOLD) + err = atmel_aes_crypt_dma_start(dd); + else + err = atmel_aes_crypt_cpu_start(dd); + } + if (err) { + /* aes_task will not finish it, so do it here */ + atmel_aes_finish_req(dd, err); + tasklet_schedule(&dd->queue_task); + } + + return ret; +} + +static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev *dd) +{ + int err = -EINVAL; + size_t count; + + if (dd->flags & AES_FLAGS_DMA) { + err = 0; + if (dd->flags & AES_FLAGS_FAST) { + dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); + dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); + } else { + dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out, + dd->dma_size, DMA_FROM_DEVICE); + + /* copy data */ + count = atmel_aes_sg_copy(&dd->out_sg, &dd->out_offset, + dd->buf_out, dd->buflen, dd->dma_size, 1); + if (count != dd->dma_size) { + err = -EINVAL; + pr_err("not all data converted: %u\n", count); + } + } + } + + return err; +} + + +static int atmel_aes_buff_init(struct atmel_aes_dev *dd) +{ + int err = -ENOMEM; + + dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0); + dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0); + dd->buflen = PAGE_SIZE; + dd->buflen &= ~(AES_BLOCK_SIZE - 1); + + if (!dd->buf_in || !dd->buf_out) { + dev_err(dd->dev, "unable to alloc pages.\n"); + goto err_alloc; + } + + /* MAP here */ + dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in, + dd->buflen, DMA_TO_DEVICE); + if (dma_mapping_error(dd->dev, dd->dma_addr_in)) { + dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); + err = -EINVAL; + goto err_map_in; + } + + dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out, + dd->buflen, DMA_FROM_DEVICE); + if (dma_mapping_error(dd->dev, dd->dma_addr_out)) { + dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); + err = -EINVAL; + goto err_map_out; + } + + return 0; + +err_map_out: + dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, + DMA_TO_DEVICE); +err_map_in: +err_alloc: + free_page((unsigned long)dd->buf_out); + free_page((unsigned long)dd->buf_in); + if (err) + pr_err("error: %d\n", err); + return err; +} + +static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd) +{ + dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, + DMA_FROM_DEVICE); + dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, + DMA_TO_DEVICE); + free_page((unsigned long)dd->buf_out); + free_page((unsigned long)dd->buf_in); +} + +static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode) +{ + struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx( + crypto_ablkcipher_reqtfm(req)); + struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req); + struct atmel_aes_dev *dd; + + if (mode & AES_FLAGS_CFB8) { + if (!IS_ALIGNED(req->nbytes, CFB8_BLOCK_SIZE)) { + pr_err("request size is not exact amount of CFB8 blocks\n"); + return -EINVAL; + } + ctx->block_size = CFB8_BLOCK_SIZE; + } else if (mode & AES_FLAGS_CFB16) { + if (!IS_ALIGNED(req->nbytes, CFB16_BLOCK_SIZE)) { + pr_err("request size is not exact amount of CFB16 blocks\n"); + return -EINVAL; + } + ctx->block_size = CFB16_BLOCK_SIZE; + } else if (mode & AES_FLAGS_CFB32) { + if (!IS_ALIGNED(req->nbytes, CFB32_BLOCK_SIZE)) { + pr_err("request size is not exact amount of CFB32 blocks\n"); + return -EINVAL; + } + ctx->block_size = CFB32_BLOCK_SIZE; + } else if (mode & AES_FLAGS_CFB64) { + if (!IS_ALIGNED(req->nbytes, CFB64_BLOCK_SIZE)) { + pr_err("request size is not exact amount of CFB64 blocks\n"); + return -EINVAL; + } + ctx->block_size = CFB64_BLOCK_SIZE; + } else { + if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { + pr_err("request size is not exact amount of AES blocks\n"); + return -EINVAL; + } + ctx->block_size = AES_BLOCK_SIZE; + } + + dd = atmel_aes_find_dev(ctx); + if (!dd) + return -ENODEV; + + rctx->mode = mode; + + return atmel_aes_handle_queue(dd, req); +} + +static bool atmel_aes_filter(struct dma_chan *chan, void *slave) +{ + struct at_dma_slave *sl = slave; + + if (sl && sl->dma_dev == chan->device->dev) { + chan->private = sl; + return true; + } else { + return false; + } +} + +static int atmel_aes_dma_init(struct atmel_aes_dev *dd, + struct crypto_platform_data *pdata) +{ + int err = -ENOMEM; + dma_cap_mask_t mask; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + /* Try to grab 2 DMA channels */ + dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask, + atmel_aes_filter, &pdata->dma_slave->rxdata, dd->dev, "tx"); + if (!dd->dma_lch_in.chan) + goto err_dma_in; + + dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV; + dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base + + AES_IDATAR(0); + dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size; + dd->dma_lch_in.dma_conf.src_addr_width = + DMA_SLAVE_BUSWIDTH_4_BYTES; + dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size; + dd->dma_lch_in.dma_conf.dst_addr_width = + DMA_SLAVE_BUSWIDTH_4_BYTES; + dd->dma_lch_in.dma_conf.device_fc = false; + + dd->dma_lch_out.chan = dma_request_slave_channel_compat(mask, + atmel_aes_filter, &pdata->dma_slave->txdata, dd->dev, "rx"); + if (!dd->dma_lch_out.chan) + goto err_dma_out; + + dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM; + dd->dma_lch_out.dma_conf.src_addr = dd->phys_base + + AES_ODATAR(0); + dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size; + dd->dma_lch_out.dma_conf.src_addr_width = + DMA_SLAVE_BUSWIDTH_4_BYTES; + dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size; + dd->dma_lch_out.dma_conf.dst_addr_width = + DMA_SLAVE_BUSWIDTH_4_BYTES; + dd->dma_lch_out.dma_conf.device_fc = false; + + return 0; + +err_dma_out: + dma_release_channel(dd->dma_lch_in.chan); +err_dma_in: + dev_warn(dd->dev, "no DMA channel available\n"); + return err; +} + +static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd) +{ + dma_release_channel(dd->dma_lch_in.chan); + dma_release_channel(dd->dma_lch_out.chan); +} + +static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); + + if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && + keylen != AES_KEYSIZE_256) { + crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + memcpy(ctx->key, key, keylen); + ctx->keylen = keylen; + + return 0; +} + +static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req) +{ + return atmel_aes_crypt(req, + AES_FLAGS_ENCRYPT); +} + +static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req) +{ + return atmel_aes_crypt(req, + 0); +} + +static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req) +{ + return atmel_aes_crypt(req, + AES_FLAGS_ENCRYPT | AES_FLAGS_CBC); +} + +static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req) +{ + return atmel_aes_crypt(req, + AES_FLAGS_CBC); +} + +static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req) +{ + return atmel_aes_crypt(req, + AES_FLAGS_ENCRYPT | AES_FLAGS_OFB); +} + +static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req) +{ + return atmel_aes_crypt(req, + AES_FLAGS_OFB); +} + +static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req) +{ + return atmel_aes_crypt(req, + AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB128); +} + +static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req) +{ + return atmel_aes_crypt(req, + AES_FLAGS_CFB | AES_FLAGS_CFB128); +} + +static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req) +{ + return atmel_aes_crypt(req, + AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB64); +} + +static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req) +{ + return atmel_aes_crypt(req, + AES_FLAGS_CFB | AES_FLAGS_CFB64); +} + +static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req) +{ + return atmel_aes_crypt(req, + AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB32); +} + +static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req) +{ + return atmel_aes_crypt(req, + AES_FLAGS_CFB | AES_FLAGS_CFB32); +} + +static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req) +{ + return atmel_aes_crypt(req, + AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB16); +} + +static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req) +{ + return atmel_aes_crypt(req, + AES_FLAGS_CFB | AES_FLAGS_CFB16); +} + +static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req) +{ + return atmel_aes_crypt(req, + AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB8); +} + +static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req) +{ + return atmel_aes_crypt(req, + AES_FLAGS_CFB | AES_FLAGS_CFB8); +} + +static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req) +{ + return atmel_aes_crypt(req, + AES_FLAGS_ENCRYPT | AES_FLAGS_CTR); +} + +static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req) +{ + return atmel_aes_crypt(req, + AES_FLAGS_CTR); +} + +static int atmel_aes_cra_init(struct crypto_tfm *tfm) +{ + tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx); + + return 0; +} + +static void atmel_aes_cra_exit(struct crypto_tfm *tfm) +{ +} + +static struct crypto_alg aes_algs[] = { +{ + .cra_name = "ecb(aes)", + .cra_driver_name = "atmel-ecb-aes", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct atmel_aes_ctx), + .cra_alignmask = 0xf, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = atmel_aes_cra_init, + .cra_exit = atmel_aes_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = atmel_aes_setkey, + .encrypt = atmel_aes_ecb_encrypt, + .decrypt = atmel_aes_ecb_decrypt, + } +}, +{ + .cra_name = "cbc(aes)", + .cra_driver_name = "atmel-cbc-aes", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct atmel_aes_ctx), + .cra_alignmask = 0xf, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = atmel_aes_cra_init, + .cra_exit = atmel_aes_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = atmel_aes_setkey, + .encrypt = atmel_aes_cbc_encrypt, + .decrypt = atmel_aes_cbc_decrypt, + } +}, +{ + .cra_name = "ofb(aes)", + .cra_driver_name = "atmel-ofb-aes", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct atmel_aes_ctx), + .cra_alignmask = 0xf, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = atmel_aes_cra_init, + .cra_exit = atmel_aes_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = atmel_aes_setkey, + .encrypt = atmel_aes_ofb_encrypt, + .decrypt = atmel_aes_ofb_decrypt, + } +}, +{ + .cra_name = "cfb(aes)", + .cra_driver_name = "atmel-cfb-aes", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct atmel_aes_ctx), + .cra_alignmask = 0xf, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = atmel_aes_cra_init, + .cra_exit = atmel_aes_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = atmel_aes_setkey, + .encrypt = atmel_aes_cfb_encrypt, + .decrypt = atmel_aes_cfb_decrypt, + } +}, +{ + .cra_name = "cfb32(aes)", + .cra_driver_name = "atmel-cfb32-aes", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = CFB32_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct atmel_aes_ctx), + .cra_alignmask = 0x3, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = atmel_aes_cra_init, + .cra_exit = atmel_aes_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = atmel_aes_setkey, + .encrypt = atmel_aes_cfb32_encrypt, + .decrypt = atmel_aes_cfb32_decrypt, + } +}, +{ + .cra_name = "cfb16(aes)", + .cra_driver_name = "atmel-cfb16-aes", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = CFB16_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct atmel_aes_ctx), + .cra_alignmask = 0x1, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = atmel_aes_cra_init, + .cra_exit = atmel_aes_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = atmel_aes_setkey, + .encrypt = atmel_aes_cfb16_encrypt, + .decrypt = atmel_aes_cfb16_decrypt, + } +}, +{ + .cra_name = "cfb8(aes)", + .cra_driver_name = "atmel-cfb8-aes", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = CFB8_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct atmel_aes_ctx), + .cra_alignmask = 0x0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = atmel_aes_cra_init, + .cra_exit = atmel_aes_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = atmel_aes_setkey, + .encrypt = atmel_aes_cfb8_encrypt, + .decrypt = atmel_aes_cfb8_decrypt, + } +}, +{ + .cra_name = "ctr(aes)", + .cra_driver_name = "atmel-ctr-aes", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct atmel_aes_ctx), + .cra_alignmask = 0xf, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = atmel_aes_cra_init, + .cra_exit = atmel_aes_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = atmel_aes_setkey, + .encrypt = atmel_aes_ctr_encrypt, + .decrypt = atmel_aes_ctr_decrypt, + } +}, +}; + +static struct crypto_alg aes_cfb64_alg = { + .cra_name = "cfb64(aes)", + .cra_driver_name = "atmel-cfb64-aes", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = CFB64_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct atmel_aes_ctx), + .cra_alignmask = 0x7, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = atmel_aes_cra_init, + .cra_exit = atmel_aes_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = atmel_aes_setkey, + .encrypt = atmel_aes_cfb64_encrypt, + .decrypt = atmel_aes_cfb64_decrypt, + } +}; + +static void atmel_aes_queue_task(unsigned long data) +{ + struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data; + + atmel_aes_handle_queue(dd, NULL); +} + +static void atmel_aes_done_task(unsigned long data) +{ + struct atmel_aes_dev *dd = (struct atmel_aes_dev *) data; + int err; + + if (!(dd->flags & AES_FLAGS_DMA)) { + atmel_aes_read_n(dd, AES_ODATAR(0), (u32 *) dd->buf_out, + dd->bufcnt >> 2); + + if (sg_copy_from_buffer(dd->out_sg, dd->nb_out_sg, + dd->buf_out, dd->bufcnt)) + err = 0; + else + err = -EINVAL; + + goto cpu_end; + } + + err = atmel_aes_crypt_dma_stop(dd); + + err = dd->err ? : err; + + if (dd->total && !err) { + if (dd->flags & AES_FLAGS_FAST) { + dd->in_sg = sg_next(dd->in_sg); + dd->out_sg = sg_next(dd->out_sg); + if (!dd->in_sg || !dd->out_sg) + err = -EINVAL; + } + if (!err) + err = atmel_aes_crypt_dma_start(dd); + if (!err) + return; /* DMA started. Not fininishing. */ + } + +cpu_end: + atmel_aes_finish_req(dd, err); + atmel_aes_handle_queue(dd, NULL); +} + +static irqreturn_t atmel_aes_irq(int irq, void *dev_id) +{ + struct atmel_aes_dev *aes_dd = dev_id; + u32 reg; + + reg = atmel_aes_read(aes_dd, AES_ISR); + if (reg & atmel_aes_read(aes_dd, AES_IMR)) { + atmel_aes_write(aes_dd, AES_IDR, reg); + if (AES_FLAGS_BUSY & aes_dd->flags) + tasklet_schedule(&aes_dd->done_task); + else + dev_warn(aes_dd->dev, "AES interrupt when no active requests.\n"); + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(aes_algs); i++) + crypto_unregister_alg(&aes_algs[i]); + if (dd->caps.has_cfb64) + crypto_unregister_alg(&aes_cfb64_alg); +} + +static int atmel_aes_register_algs(struct atmel_aes_dev *dd) +{ + int err, i, j; + + for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { + err = crypto_register_alg(&aes_algs[i]); + if (err) + goto err_aes_algs; + } + + if (dd->caps.has_cfb64) { + err = crypto_register_alg(&aes_cfb64_alg); + if (err) + goto err_aes_cfb64_alg; + } + + return 0; + +err_aes_cfb64_alg: + i = ARRAY_SIZE(aes_algs); +err_aes_algs: + for (j = 0; j < i; j++) + crypto_unregister_alg(&aes_algs[j]); + + return err; +} + +static void atmel_aes_get_cap(struct atmel_aes_dev *dd) +{ + dd->caps.has_dualbuff = 0; + dd->caps.has_cfb64 = 0; + dd->caps.max_burst_size = 1; + + /* keep only major version number */ + switch (dd->hw_version & 0xff0) { + case 0x200: + dd->caps.has_dualbuff = 1; + dd->caps.has_cfb64 = 1; + dd->caps.max_burst_size = 4; + break; + case 0x130: + dd->caps.has_dualbuff = 1; + dd->caps.has_cfb64 = 1; + dd->caps.max_burst_size = 4; + break; + case 0x120: + break; + default: + dev_warn(dd->dev, + "Unmanaged aes version, set minimum capabilities\n"); + break; + } +} + +#if defined(CONFIG_OF) +static const struct of_device_id atmel_aes_dt_ids[] = { + { .compatible = "atmel,at91sam9g46-aes" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids); + +static struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct crypto_platform_data *pdata; + + if (!np) { + dev_err(&pdev->dev, "device node not found\n"); + return ERR_PTR(-EINVAL); + } + + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) { + dev_err(&pdev->dev, "could not allocate memory for pdata\n"); + return ERR_PTR(-ENOMEM); + } + + pdata->dma_slave = devm_kzalloc(&pdev->dev, + sizeof(*(pdata->dma_slave)), + GFP_KERNEL); + if (!pdata->dma_slave) { + dev_err(&pdev->dev, "could not allocate memory for dma_slave\n"); + devm_kfree(&pdev->dev, pdata); + return ERR_PTR(-ENOMEM); + } + + return pdata; +} +#else +static inline struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev) +{ + return ERR_PTR(-EINVAL); +} +#endif + +static int atmel_aes_probe(struct platform_device *pdev) +{ + struct atmel_aes_dev *aes_dd; + struct crypto_platform_data *pdata; + struct device *dev = &pdev->dev; + struct resource *aes_res; + unsigned long aes_phys_size; + int err; + + pdata = pdev->dev.platform_data; + if (!pdata) { + pdata = atmel_aes_of_init(pdev); + if (IS_ERR(pdata)) { + err = PTR_ERR(pdata); + goto aes_dd_err; + } + } + + if (!pdata->dma_slave) { + err = -ENXIO; + goto aes_dd_err; + } + + aes_dd = kzalloc(sizeof(struct atmel_aes_dev), GFP_KERNEL); + if (aes_dd == NULL) { + dev_err(dev, "unable to alloc data struct.\n"); + err = -ENOMEM; + goto aes_dd_err; + } + + aes_dd->dev = dev; + + platform_set_drvdata(pdev, aes_dd); + + INIT_LIST_HEAD(&aes_dd->list); + spin_lock_init(&aes_dd->lock); + + tasklet_init(&aes_dd->done_task, atmel_aes_done_task, + (unsigned long)aes_dd); + tasklet_init(&aes_dd->queue_task, atmel_aes_queue_task, + (unsigned long)aes_dd); + + crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH); + + aes_dd->irq = -1; + + /* Get the base address */ + aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!aes_res) { + dev_err(dev, "no MEM resource info\n"); + err = -ENODEV; + goto res_err; + } + aes_dd->phys_base = aes_res->start; + aes_phys_size = resource_size(aes_res); + + /* Get the IRQ */ + aes_dd->irq = platform_get_irq(pdev, 0); + if (aes_dd->irq < 0) { + dev_err(dev, "no IRQ resource info\n"); + err = aes_dd->irq; + goto aes_irq_err; + } + + err = request_irq(aes_dd->irq, atmel_aes_irq, IRQF_SHARED, "atmel-aes", + aes_dd); + if (err) { + dev_err(dev, "unable to request aes irq.\n"); + goto aes_irq_err; + } + + /* Initializing the clock */ + aes_dd->iclk = clk_get(&pdev->dev, "aes_clk"); + if (IS_ERR(aes_dd->iclk)) { + dev_err(dev, "clock initialization failed.\n"); + err = PTR_ERR(aes_dd->iclk); + goto clk_err; + } + + aes_dd->io_base = ioremap(aes_dd->phys_base, aes_phys_size); + if (!aes_dd->io_base) { + dev_err(dev, "can't ioremap\n"); + err = -ENOMEM; + goto aes_io_err; + } + + atmel_aes_hw_version_init(aes_dd); + + atmel_aes_get_cap(aes_dd); + + err = atmel_aes_buff_init(aes_dd); + if (err) + goto err_aes_buff; + + err = atmel_aes_dma_init(aes_dd, pdata); + if (err) + goto err_aes_dma; + + spin_lock(&atmel_aes.lock); + list_add_tail(&aes_dd->list, &atmel_aes.dev_list); + spin_unlock(&atmel_aes.lock); + + err = atmel_aes_register_algs(aes_dd); + if (err) + goto err_algs; + + dev_info(dev, "Atmel AES - Using %s, %s for DMA transfers\n", + dma_chan_name(aes_dd->dma_lch_in.chan), + dma_chan_name(aes_dd->dma_lch_out.chan)); + + return 0; + +err_algs: + spin_lock(&atmel_aes.lock); + list_del(&aes_dd->list); + spin_unlock(&atmel_aes.lock); + atmel_aes_dma_cleanup(aes_dd); +err_aes_dma: + atmel_aes_buff_cleanup(aes_dd); +err_aes_buff: + iounmap(aes_dd->io_base); +aes_io_err: + clk_put(aes_dd->iclk); +clk_err: + free_irq(aes_dd->irq, aes_dd); +aes_irq_err: +res_err: + tasklet_kill(&aes_dd->done_task); + tasklet_kill(&aes_dd->queue_task); + kfree(aes_dd); + aes_dd = NULL; +aes_dd_err: + dev_err(dev, "initialization failed.\n"); + + return err; +} + +static int atmel_aes_remove(struct platform_device *pdev) +{ + static struct atmel_aes_dev *aes_dd; + + aes_dd = platform_get_drvdata(pdev); + if (!aes_dd) + return -ENODEV; + spin_lock(&atmel_aes.lock); + list_del(&aes_dd->list); + spin_unlock(&atmel_aes.lock); + + atmel_aes_unregister_algs(aes_dd); + + tasklet_kill(&aes_dd->done_task); + tasklet_kill(&aes_dd->queue_task); + + atmel_aes_dma_cleanup(aes_dd); + + iounmap(aes_dd->io_base); + + clk_put(aes_dd->iclk); + + if (aes_dd->irq > 0) + free_irq(aes_dd->irq, aes_dd); + + kfree(aes_dd); + aes_dd = NULL; + + return 0; +} + +static struct platform_driver atmel_aes_driver = { + .probe = atmel_aes_probe, + .remove = atmel_aes_remove, + .driver = { + .name = "atmel_aes", + .of_match_table = of_match_ptr(atmel_aes_dt_ids), + }, +}; + +module_platform_driver(atmel_aes_driver); + +MODULE_DESCRIPTION("Atmel AES hw acceleration support."); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique"); diff --git a/drivers/crypto/atmel-sha-regs.h b/drivers/crypto/atmel-sha-regs.h new file mode 100644 index 000000000..83b2d7425 --- /dev/null +++ b/drivers/crypto/atmel-sha-regs.h @@ -0,0 +1,51 @@ +#ifndef __ATMEL_SHA_REGS_H__ +#define __ATMEL_SHA_REGS_H__ + +#define SHA_REG_DIGEST(x) (0x80 + ((x) * 0x04)) +#define SHA_REG_DIN(x) (0x40 + ((x) * 0x04)) + +#define SHA_CR 0x00 +#define SHA_CR_START (1 << 0) +#define SHA_CR_FIRST (1 << 4) +#define SHA_CR_SWRST (1 << 8) + +#define SHA_MR 0x04 +#define SHA_MR_MODE_MASK (0x3 << 0) +#define SHA_MR_MODE_MANUAL 0x0 +#define SHA_MR_MODE_AUTO 0x1 +#define SHA_MR_MODE_PDC 0x2 +#define SHA_MR_PROCDLY (1 << 4) +#define SHA_MR_ALGO_SHA1 (0 << 8) +#define SHA_MR_ALGO_SHA256 (1 << 8) +#define SHA_MR_ALGO_SHA384 (2 << 8) +#define SHA_MR_ALGO_SHA512 (3 << 8) +#define SHA_MR_ALGO_SHA224 (4 << 8) +#define SHA_MR_DUALBUFF (1 << 16) + +#define SHA_IER 0x10 +#define SHA_IDR 0x14 +#define SHA_IMR 0x18 +#define SHA_ISR 0x1C +#define SHA_INT_DATARDY (1 << 0) +#define SHA_INT_ENDTX (1 << 1) +#define SHA_INT_TXBUFE (1 << 2) +#define SHA_INT_URAD (1 << 8) +#define SHA_ISR_URAT_MASK (0x7 << 12) +#define SHA_ISR_URAT_IDR (0x0 << 12) +#define SHA_ISR_URAT_ODR (0x1 << 12) +#define SHA_ISR_URAT_MR (0x2 << 12) +#define SHA_ISR_URAT_WO (0x5 << 12) + +#define SHA_HW_VERSION 0xFC + +#define SHA_TPR 0x108 +#define SHA_TCR 0x10C +#define SHA_TNPR 0x118 +#define SHA_TNCR 0x11C +#define SHA_PTCR 0x120 +#define SHA_PTCR_TXTEN (1 << 8) +#define SHA_PTCR_TXTDIS (1 << 9) +#define SHA_PTSR 0x124 +#define SHA_PTSR_TXTEN (1 << 8) + +#endif /* __ATMEL_SHA_REGS_H__ */ diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c new file mode 100644 index 000000000..5b35433c5 --- /dev/null +++ b/drivers/crypto/atmel-sha.c @@ -0,0 +1,1515 @@ +/* + * Cryptographic API. + * + * Support for ATMEL SHA1/SHA256 HW acceleration. + * + * Copyright (c) 2012 Eukréa Electromatique - ATMEL + * Author: Nicolas Royer <nicolas@eukrea.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Some ideas are from omap-sham.c drivers. + */ + + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/err.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/hw_random.h> +#include <linux/platform_device.h> + +#include <linux/device.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/scatterlist.h> +#include <linux/dma-mapping.h> +#include <linux/of_device.h> +#include <linux/delay.h> +#include <linux/crypto.h> +#include <linux/cryptohash.h> +#include <crypto/scatterwalk.h> +#include <crypto/algapi.h> +#include <crypto/sha.h> +#include <crypto/hash.h> +#include <crypto/internal/hash.h> +#include <linux/platform_data/crypto-atmel.h> +#include "atmel-sha-regs.h" + +/* SHA flags */ +#define SHA_FLAGS_BUSY BIT(0) +#define SHA_FLAGS_FINAL BIT(1) +#define SHA_FLAGS_DMA_ACTIVE BIT(2) +#define SHA_FLAGS_OUTPUT_READY BIT(3) +#define SHA_FLAGS_INIT BIT(4) +#define SHA_FLAGS_CPU BIT(5) +#define SHA_FLAGS_DMA_READY BIT(6) + +#define SHA_FLAGS_FINUP BIT(16) +#define SHA_FLAGS_SG BIT(17) +#define SHA_FLAGS_SHA1 BIT(18) +#define SHA_FLAGS_SHA224 BIT(19) +#define SHA_FLAGS_SHA256 BIT(20) +#define SHA_FLAGS_SHA384 BIT(21) +#define SHA_FLAGS_SHA512 BIT(22) +#define SHA_FLAGS_ERROR BIT(23) +#define SHA_FLAGS_PAD BIT(24) + +#define SHA_OP_UPDATE 1 +#define SHA_OP_FINAL 2 + +#define SHA_BUFFER_LEN PAGE_SIZE + +#define ATMEL_SHA_DMA_THRESHOLD 56 + +struct atmel_sha_caps { + bool has_dma; + bool has_dualbuff; + bool has_sha224; + bool has_sha_384_512; +}; + +struct atmel_sha_dev; + +struct atmel_sha_reqctx { + struct atmel_sha_dev *dd; + unsigned long flags; + unsigned long op; + + u8 digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32)); + u64 digcnt[2]; + size_t bufcnt; + size_t buflen; + dma_addr_t dma_addr; + + /* walk state */ + struct scatterlist *sg; + unsigned int offset; /* offset in current sg */ + unsigned int total; /* total request */ + + size_t block_size; + + u8 buffer[0] __aligned(sizeof(u32)); +}; + +struct atmel_sha_ctx { + struct atmel_sha_dev *dd; + + unsigned long flags; +}; + +#define ATMEL_SHA_QUEUE_LENGTH 50 + +struct atmel_sha_dma { + struct dma_chan *chan; + struct dma_slave_config dma_conf; +}; + +struct atmel_sha_dev { + struct list_head list; + unsigned long phys_base; + struct device *dev; + struct clk *iclk; + int irq; + void __iomem *io_base; + + spinlock_t lock; + int err; + struct tasklet_struct done_task; + + unsigned long flags; + struct crypto_queue queue; + struct ahash_request *req; + + struct atmel_sha_dma dma_lch_in; + + struct atmel_sha_caps caps; + + u32 hw_version; +}; + +struct atmel_sha_drv { + struct list_head dev_list; + spinlock_t lock; +}; + +static struct atmel_sha_drv atmel_sha = { + .dev_list = LIST_HEAD_INIT(atmel_sha.dev_list), + .lock = __SPIN_LOCK_UNLOCKED(atmel_sha.lock), +}; + +static inline u32 atmel_sha_read(struct atmel_sha_dev *dd, u32 offset) +{ + return readl_relaxed(dd->io_base + offset); +} + +static inline void atmel_sha_write(struct atmel_sha_dev *dd, + u32 offset, u32 value) +{ + writel_relaxed(value, dd->io_base + offset); +} + +static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx) +{ + size_t count; + + while ((ctx->bufcnt < ctx->buflen) && ctx->total) { + count = min(ctx->sg->length - ctx->offset, ctx->total); + count = min(count, ctx->buflen - ctx->bufcnt); + + if (count <= 0) { + /* + * Check if count <= 0 because the buffer is full or + * because the sg length is 0. In the latest case, + * check if there is another sg in the list, a 0 length + * sg doesn't necessarily mean the end of the sg list. + */ + if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) { + ctx->sg = sg_next(ctx->sg); + continue; + } else { + break; + } + } + + scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg, + ctx->offset, count, 0); + + ctx->bufcnt += count; + ctx->offset += count; + ctx->total -= count; + + if (ctx->offset == ctx->sg->length) { + ctx->sg = sg_next(ctx->sg); + if (ctx->sg) + ctx->offset = 0; + else + ctx->total = 0; + } + } + + return 0; +} + +/* + * The purpose of this padding is to ensure that the padded message is a + * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512). + * The bit "1" is appended at the end of the message followed by + * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or + * 128 bits block (SHA384/SHA512) equals to the message length in bits + * is appended. + * + * For SHA1/SHA224/SHA256, padlen is calculated as followed: + * - if message length < 56 bytes then padlen = 56 - message length + * - else padlen = 64 + 56 - message length + * + * For SHA384/SHA512, padlen is calculated as followed: + * - if message length < 112 bytes then padlen = 112 - message length + * - else padlen = 128 + 112 - message length + */ +static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length) +{ + unsigned int index, padlen; + u64 bits[2]; + u64 size[2]; + + size[0] = ctx->digcnt[0]; + size[1] = ctx->digcnt[1]; + + size[0] += ctx->bufcnt; + if (size[0] < ctx->bufcnt) + size[1]++; + + size[0] += length; + if (size[0] < length) + size[1]++; + + bits[1] = cpu_to_be64(size[0] << 3); + bits[0] = cpu_to_be64(size[1] << 3 | size[0] >> 61); + + if (ctx->flags & (SHA_FLAGS_SHA384 | SHA_FLAGS_SHA512)) { + index = ctx->bufcnt & 0x7f; + padlen = (index < 112) ? (112 - index) : ((128+112) - index); + *(ctx->buffer + ctx->bufcnt) = 0x80; + memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1); + memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16); + ctx->bufcnt += padlen + 16; + ctx->flags |= SHA_FLAGS_PAD; + } else { + index = ctx->bufcnt & 0x3f; + padlen = (index < 56) ? (56 - index) : ((64+56) - index); + *(ctx->buffer + ctx->bufcnt) = 0x80; + memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1); + memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8); + ctx->bufcnt += padlen + 8; + ctx->flags |= SHA_FLAGS_PAD; + } +} + +static int atmel_sha_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct atmel_sha_ctx *tctx = crypto_ahash_ctx(tfm); + struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); + struct atmel_sha_dev *dd = NULL; + struct atmel_sha_dev *tmp; + + spin_lock_bh(&atmel_sha.lock); + if (!tctx->dd) { + list_for_each_entry(tmp, &atmel_sha.dev_list, list) { + dd = tmp; + break; + } + tctx->dd = dd; + } else { + dd = tctx->dd; + } + + spin_unlock_bh(&atmel_sha.lock); + + ctx->dd = dd; + + ctx->flags = 0; + + dev_dbg(dd->dev, "init: digest size: %d\n", + crypto_ahash_digestsize(tfm)); + + switch (crypto_ahash_digestsize(tfm)) { + case SHA1_DIGEST_SIZE: + ctx->flags |= SHA_FLAGS_SHA1; + ctx->block_size = SHA1_BLOCK_SIZE; + break; + case SHA224_DIGEST_SIZE: + ctx->flags |= SHA_FLAGS_SHA224; + ctx->block_size = SHA224_BLOCK_SIZE; + break; + case SHA256_DIGEST_SIZE: + ctx->flags |= SHA_FLAGS_SHA256; + ctx->block_size = SHA256_BLOCK_SIZE; + break; + case SHA384_DIGEST_SIZE: + ctx->flags |= SHA_FLAGS_SHA384; + ctx->block_size = SHA384_BLOCK_SIZE; + break; + case SHA512_DIGEST_SIZE: + ctx->flags |= SHA_FLAGS_SHA512; + ctx->block_size = SHA512_BLOCK_SIZE; + break; + default: + return -EINVAL; + break; + } + + ctx->bufcnt = 0; + ctx->digcnt[0] = 0; + ctx->digcnt[1] = 0; + ctx->buflen = SHA_BUFFER_LEN; + + return 0; +} + +static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma) +{ + struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); + u32 valcr = 0, valmr = SHA_MR_MODE_AUTO; + + if (likely(dma)) { + if (!dd->caps.has_dma) + atmel_sha_write(dd, SHA_IER, SHA_INT_TXBUFE); + valmr = SHA_MR_MODE_PDC; + if (dd->caps.has_dualbuff) + valmr |= SHA_MR_DUALBUFF; + } else { + atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY); + } + + if (ctx->flags & SHA_FLAGS_SHA1) + valmr |= SHA_MR_ALGO_SHA1; + else if (ctx->flags & SHA_FLAGS_SHA224) + valmr |= SHA_MR_ALGO_SHA224; + else if (ctx->flags & SHA_FLAGS_SHA256) + valmr |= SHA_MR_ALGO_SHA256; + else if (ctx->flags & SHA_FLAGS_SHA384) + valmr |= SHA_MR_ALGO_SHA384; + else if (ctx->flags & SHA_FLAGS_SHA512) + valmr |= SHA_MR_ALGO_SHA512; + + /* Setting CR_FIRST only for the first iteration */ + if (!(ctx->digcnt[0] || ctx->digcnt[1])) + valcr = SHA_CR_FIRST; + + atmel_sha_write(dd, SHA_CR, valcr); + atmel_sha_write(dd, SHA_MR, valmr); +} + +static int atmel_sha_xmit_cpu(struct atmel_sha_dev *dd, const u8 *buf, + size_t length, int final) +{ + struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); + int count, len32; + const u32 *buffer = (const u32 *)buf; + + dev_dbg(dd->dev, "xmit_cpu: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n", + ctx->digcnt[1], ctx->digcnt[0], length, final); + + atmel_sha_write_ctrl(dd, 0); + + /* should be non-zero before next lines to disable clocks later */ + ctx->digcnt[0] += length; + if (ctx->digcnt[0] < length) + ctx->digcnt[1]++; + + if (final) + dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ + + len32 = DIV_ROUND_UP(length, sizeof(u32)); + + dd->flags |= SHA_FLAGS_CPU; + + for (count = 0; count < len32; count++) + atmel_sha_write(dd, SHA_REG_DIN(count), buffer[count]); + + return -EINPROGRESS; +} + +static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1, + size_t length1, dma_addr_t dma_addr2, size_t length2, int final) +{ + struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); + int len32; + + dev_dbg(dd->dev, "xmit_pdc: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n", + ctx->digcnt[1], ctx->digcnt[0], length1, final); + + len32 = DIV_ROUND_UP(length1, sizeof(u32)); + atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTDIS); + atmel_sha_write(dd, SHA_TPR, dma_addr1); + atmel_sha_write(dd, SHA_TCR, len32); + + len32 = DIV_ROUND_UP(length2, sizeof(u32)); + atmel_sha_write(dd, SHA_TNPR, dma_addr2); + atmel_sha_write(dd, SHA_TNCR, len32); + + atmel_sha_write_ctrl(dd, 1); + + /* should be non-zero before next lines to disable clocks later */ + ctx->digcnt[0] += length1; + if (ctx->digcnt[0] < length1) + ctx->digcnt[1]++; + + if (final) + dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ + + dd->flags |= SHA_FLAGS_DMA_ACTIVE; + + /* Start DMA transfer */ + atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTEN); + + return -EINPROGRESS; +} + +static void atmel_sha_dma_callback(void *data) +{ + struct atmel_sha_dev *dd = data; + + /* dma_lch_in - completed - wait DATRDY */ + atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY); +} + +static int atmel_sha_xmit_dma(struct atmel_sha_dev *dd, dma_addr_t dma_addr1, + size_t length1, dma_addr_t dma_addr2, size_t length2, int final) +{ + struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); + struct dma_async_tx_descriptor *in_desc; + struct scatterlist sg[2]; + + dev_dbg(dd->dev, "xmit_dma: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n", + ctx->digcnt[1], ctx->digcnt[0], length1, final); + + dd->dma_lch_in.dma_conf.src_maxburst = 16; + dd->dma_lch_in.dma_conf.dst_maxburst = 16; + + dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf); + + if (length2) { + sg_init_table(sg, 2); + sg_dma_address(&sg[0]) = dma_addr1; + sg_dma_len(&sg[0]) = length1; + sg_dma_address(&sg[1]) = dma_addr2; + sg_dma_len(&sg[1]) = length2; + in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 2, + DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + } else { + sg_init_table(sg, 1); + sg_dma_address(&sg[0]) = dma_addr1; + sg_dma_len(&sg[0]) = length1; + in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 1, + DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + } + if (!in_desc) + return -EINVAL; + + in_desc->callback = atmel_sha_dma_callback; + in_desc->callback_param = dd; + + atmel_sha_write_ctrl(dd, 1); + + /* should be non-zero before next lines to disable clocks later */ + ctx->digcnt[0] += length1; + if (ctx->digcnt[0] < length1) + ctx->digcnt[1]++; + + if (final) + dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ + + dd->flags |= SHA_FLAGS_DMA_ACTIVE; + + /* Start DMA transfer */ + dmaengine_submit(in_desc); + dma_async_issue_pending(dd->dma_lch_in.chan); + + return -EINPROGRESS; +} + +static int atmel_sha_xmit_start(struct atmel_sha_dev *dd, dma_addr_t dma_addr1, + size_t length1, dma_addr_t dma_addr2, size_t length2, int final) +{ + if (dd->caps.has_dma) + return atmel_sha_xmit_dma(dd, dma_addr1, length1, + dma_addr2, length2, final); + else + return atmel_sha_xmit_pdc(dd, dma_addr1, length1, + dma_addr2, length2, final); +} + +static int atmel_sha_update_cpu(struct atmel_sha_dev *dd) +{ + struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); + int bufcnt; + + atmel_sha_append_sg(ctx); + atmel_sha_fill_padding(ctx, 0); + bufcnt = ctx->bufcnt; + ctx->bufcnt = 0; + + return atmel_sha_xmit_cpu(dd, ctx->buffer, bufcnt, 1); +} + +static int atmel_sha_xmit_dma_map(struct atmel_sha_dev *dd, + struct atmel_sha_reqctx *ctx, + size_t length, int final) +{ + ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, + ctx->buflen + ctx->block_size, DMA_TO_DEVICE); + if (dma_mapping_error(dd->dev, ctx->dma_addr)) { + dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen + + ctx->block_size); + return -EINVAL; + } + + ctx->flags &= ~SHA_FLAGS_SG; + + /* next call does not fail... so no unmap in the case of error */ + return atmel_sha_xmit_start(dd, ctx->dma_addr, length, 0, 0, final); +} + +static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd) +{ + struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); + unsigned int final; + size_t count; + + atmel_sha_append_sg(ctx); + + final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; + + dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: 0x%llx 0x%llx, final: %d\n", + ctx->bufcnt, ctx->digcnt[1], ctx->digcnt[0], final); + + if (final) + atmel_sha_fill_padding(ctx, 0); + + if (final || (ctx->bufcnt == ctx->buflen)) { + count = ctx->bufcnt; + ctx->bufcnt = 0; + return atmel_sha_xmit_dma_map(dd, ctx, count, final); + } + + return 0; +} + +static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd) +{ + struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); + unsigned int length, final, tail; + struct scatterlist *sg; + unsigned int count; + + if (!ctx->total) + return 0; + + if (ctx->bufcnt || ctx->offset) + return atmel_sha_update_dma_slow(dd); + + dev_dbg(dd->dev, "fast: digcnt: 0x%llx 0x%llx, bufcnt: %u, total: %u\n", + ctx->digcnt[1], ctx->digcnt[0], ctx->bufcnt, ctx->total); + + sg = ctx->sg; + + if (!IS_ALIGNED(sg->offset, sizeof(u32))) + return atmel_sha_update_dma_slow(dd); + + if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->block_size)) + /* size is not ctx->block_size aligned */ + return atmel_sha_update_dma_slow(dd); + + length = min(ctx->total, sg->length); + + if (sg_is_last(sg)) { + if (!(ctx->flags & SHA_FLAGS_FINUP)) { + /* not last sg must be ctx->block_size aligned */ + tail = length & (ctx->block_size - 1); + length -= tail; + } + } + + ctx->total -= length; + ctx->offset = length; /* offset where to start slow */ + + final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; + + /* Add padding */ + if (final) { + tail = length & (ctx->block_size - 1); + length -= tail; + ctx->total += tail; + ctx->offset = length; /* offset where to start slow */ + + sg = ctx->sg; + atmel_sha_append_sg(ctx); + + atmel_sha_fill_padding(ctx, length); + + ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, + ctx->buflen + ctx->block_size, DMA_TO_DEVICE); + if (dma_mapping_error(dd->dev, ctx->dma_addr)) { + dev_err(dd->dev, "dma %u bytes error\n", + ctx->buflen + ctx->block_size); + return -EINVAL; + } + + if (length == 0) { + ctx->flags &= ~SHA_FLAGS_SG; + count = ctx->bufcnt; + ctx->bufcnt = 0; + return atmel_sha_xmit_start(dd, ctx->dma_addr, count, 0, + 0, final); + } else { + ctx->sg = sg; + if (!dma_map_sg(dd->dev, ctx->sg, 1, + DMA_TO_DEVICE)) { + dev_err(dd->dev, "dma_map_sg error\n"); + return -EINVAL; + } + + ctx->flags |= SHA_FLAGS_SG; + + count = ctx->bufcnt; + ctx->bufcnt = 0; + return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), + length, ctx->dma_addr, count, final); + } + } + + if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) { + dev_err(dd->dev, "dma_map_sg error\n"); + return -EINVAL; + } + + ctx->flags |= SHA_FLAGS_SG; + + /* next call does not fail... so no unmap in the case of error */ + return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), length, 0, + 0, final); +} + +static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd) +{ + struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); + + if (ctx->flags & SHA_FLAGS_SG) { + dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); + if (ctx->sg->length == ctx->offset) { + ctx->sg = sg_next(ctx->sg); + if (ctx->sg) + ctx->offset = 0; + } + if (ctx->flags & SHA_FLAGS_PAD) { + dma_unmap_single(dd->dev, ctx->dma_addr, + ctx->buflen + ctx->block_size, DMA_TO_DEVICE); + } + } else { + dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen + + ctx->block_size, DMA_TO_DEVICE); + } + + return 0; +} + +static int atmel_sha_update_req(struct atmel_sha_dev *dd) +{ + struct ahash_request *req = dd->req; + struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); + int err; + + dev_dbg(dd->dev, "update_req: total: %u, digcnt: 0x%llx 0x%llx\n", + ctx->total, ctx->digcnt[1], ctx->digcnt[0]); + + if (ctx->flags & SHA_FLAGS_CPU) + err = atmel_sha_update_cpu(dd); + else + err = atmel_sha_update_dma_start(dd); + + /* wait for dma completion before can take more data */ + dev_dbg(dd->dev, "update: err: %d, digcnt: 0x%llx 0%llx\n", + err, ctx->digcnt[1], ctx->digcnt[0]); + + return err; +} + +static int atmel_sha_final_req(struct atmel_sha_dev *dd) +{ + struct ahash_request *req = dd->req; + struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); + int err = 0; + int count; + + if (ctx->bufcnt >= ATMEL_SHA_DMA_THRESHOLD) { + atmel_sha_fill_padding(ctx, 0); + count = ctx->bufcnt; + ctx->bufcnt = 0; + err = atmel_sha_xmit_dma_map(dd, ctx, count, 1); + } + /* faster to handle last block with cpu */ + else { + atmel_sha_fill_padding(ctx, 0); + count = ctx->bufcnt; + ctx->bufcnt = 0; + err = atmel_sha_xmit_cpu(dd, ctx->buffer, count, 1); + } + + dev_dbg(dd->dev, "final_req: err: %d\n", err); + + return err; +} + +static void atmel_sha_copy_hash(struct ahash_request *req) +{ + struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); + u32 *hash = (u32 *)ctx->digest; + int i; + + if (ctx->flags & SHA_FLAGS_SHA1) + for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) + hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); + else if (ctx->flags & SHA_FLAGS_SHA224) + for (i = 0; i < SHA224_DIGEST_SIZE / sizeof(u32); i++) + hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); + else if (ctx->flags & SHA_FLAGS_SHA256) + for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(u32); i++) + hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); + else if (ctx->flags & SHA_FLAGS_SHA384) + for (i = 0; i < SHA384_DIGEST_SIZE / sizeof(u32); i++) + hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); + else + for (i = 0; i < SHA512_DIGEST_SIZE / sizeof(u32); i++) + hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); +} + +static void atmel_sha_copy_ready_hash(struct ahash_request *req) +{ + struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); + + if (!req->result) + return; + + if (ctx->flags & SHA_FLAGS_SHA1) + memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE); + else if (ctx->flags & SHA_FLAGS_SHA224) + memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE); + else if (ctx->flags & SHA_FLAGS_SHA256) + memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE); + else if (ctx->flags & SHA_FLAGS_SHA384) + memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE); + else + memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE); +} + +static int atmel_sha_finish(struct ahash_request *req) +{ + struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); + struct atmel_sha_dev *dd = ctx->dd; + int err = 0; + + if (ctx->digcnt[0] || ctx->digcnt[1]) + atmel_sha_copy_ready_hash(req); + + dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %d\n", ctx->digcnt[1], + ctx->digcnt[0], ctx->bufcnt); + + return err; +} + +static void atmel_sha_finish_req(struct ahash_request *req, int err) +{ + struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); + struct atmel_sha_dev *dd = ctx->dd; + + if (!err) { + atmel_sha_copy_hash(req); + if (SHA_FLAGS_FINAL & dd->flags) + err = atmel_sha_finish(req); + } else { + ctx->flags |= SHA_FLAGS_ERROR; + } + + /* atomic operation is not needed here */ + dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU | + SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY); + + clk_disable_unprepare(dd->iclk); + + if (req->base.complete) + req->base.complete(&req->base, err); + + /* handle new request */ + tasklet_schedule(&dd->done_task); +} + +static int atmel_sha_hw_init(struct atmel_sha_dev *dd) +{ + clk_prepare_enable(dd->iclk); + + if (!(SHA_FLAGS_INIT & dd->flags)) { + atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST); + dd->flags |= SHA_FLAGS_INIT; + dd->err = 0; + } + + return 0; +} + +static inline unsigned int atmel_sha_get_version(struct atmel_sha_dev *dd) +{ + return atmel_sha_read(dd, SHA_HW_VERSION) & 0x00000fff; +} + +static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd) +{ + atmel_sha_hw_init(dd); + + dd->hw_version = atmel_sha_get_version(dd); + + dev_info(dd->dev, + "version: 0x%x\n", dd->hw_version); + + clk_disable_unprepare(dd->iclk); +} + +static int atmel_sha_handle_queue(struct atmel_sha_dev *dd, + struct ahash_request *req) +{ + struct crypto_async_request *async_req, *backlog; + struct atmel_sha_reqctx *ctx; + unsigned long flags; + int err = 0, ret = 0; + + spin_lock_irqsave(&dd->lock, flags); + if (req) + ret = ahash_enqueue_request(&dd->queue, req); + + if (SHA_FLAGS_BUSY & dd->flags) { + spin_unlock_irqrestore(&dd->lock, flags); + return ret; + } + + backlog = crypto_get_backlog(&dd->queue); + async_req = crypto_dequeue_request(&dd->queue); + if (async_req) + dd->flags |= SHA_FLAGS_BUSY; + + spin_unlock_irqrestore(&dd->lock, flags); + + if (!async_req) + return ret; + + if (backlog) + backlog->complete(backlog, -EINPROGRESS); + + req = ahash_request_cast(async_req); + dd->req = req; + ctx = ahash_request_ctx(req); + + dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", + ctx->op, req->nbytes); + + err = atmel_sha_hw_init(dd); + + if (err) + goto err1; + + if (ctx->op == SHA_OP_UPDATE) { + err = atmel_sha_update_req(dd); + if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP)) + /* no final() after finup() */ + err = atmel_sha_final_req(dd); + } else if (ctx->op == SHA_OP_FINAL) { + err = atmel_sha_final_req(dd); + } + +err1: + if (err != -EINPROGRESS) + /* done_task will not finish it, so do it here */ + atmel_sha_finish_req(req, err); + + dev_dbg(dd->dev, "exit, err: %d\n", err); + + return ret; +} + +static int atmel_sha_enqueue(struct ahash_request *req, unsigned int op) +{ + struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); + struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm); + struct atmel_sha_dev *dd = tctx->dd; + + ctx->op = op; + + return atmel_sha_handle_queue(dd, req); +} + +static int atmel_sha_update(struct ahash_request *req) +{ + struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); + + if (!req->nbytes) + return 0; + + ctx->total = req->nbytes; + ctx->sg = req->src; + ctx->offset = 0; + + if (ctx->flags & SHA_FLAGS_FINUP) { + if (ctx->bufcnt + ctx->total < ATMEL_SHA_DMA_THRESHOLD) + /* faster to use CPU for short transfers */ + ctx->flags |= SHA_FLAGS_CPU; + } else if (ctx->bufcnt + ctx->total < ctx->buflen) { + atmel_sha_append_sg(ctx); + return 0; + } + return atmel_sha_enqueue(req, SHA_OP_UPDATE); +} + +static int atmel_sha_final(struct ahash_request *req) +{ + struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); + struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm); + struct atmel_sha_dev *dd = tctx->dd; + + int err = 0; + + ctx->flags |= SHA_FLAGS_FINUP; + + if (ctx->flags & SHA_FLAGS_ERROR) + return 0; /* uncompleted hash is not needed */ + + if (ctx->bufcnt) { + return atmel_sha_enqueue(req, SHA_OP_FINAL); + } else if (!(ctx->flags & SHA_FLAGS_PAD)) { /* add padding */ + err = atmel_sha_hw_init(dd); + if (err) + goto err1; + + dd->flags |= SHA_FLAGS_BUSY; + err = atmel_sha_final_req(dd); + } else { + /* copy ready hash (+ finalize hmac) */ + return atmel_sha_finish(req); + } + +err1: + if (err != -EINPROGRESS) + /* done_task will not finish it, so do it here */ + atmel_sha_finish_req(req, err); + + return err; +} + +static int atmel_sha_finup(struct ahash_request *req) +{ + struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); + int err1, err2; + + ctx->flags |= SHA_FLAGS_FINUP; + + err1 = atmel_sha_update(req); + if (err1 == -EINPROGRESS || err1 == -EBUSY) + return err1; + + /* + * final() has to be always called to cleanup resources + * even if udpate() failed, except EINPROGRESS + */ + err2 = atmel_sha_final(req); + + return err1 ?: err2; +} + +static int atmel_sha_digest(struct ahash_request *req) +{ + return atmel_sha_init(req) ?: atmel_sha_finup(req); +} + +static int atmel_sha_cra_init(struct crypto_tfm *tfm) +{ + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct atmel_sha_reqctx) + + SHA_BUFFER_LEN + SHA512_BLOCK_SIZE); + + return 0; +} + +static struct ahash_alg sha_1_256_algs[] = { +{ + .init = atmel_sha_init, + .update = atmel_sha_update, + .final = atmel_sha_final, + .finup = atmel_sha_finup, + .digest = atmel_sha_digest, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .base = { + .cra_name = "sha1", + .cra_driver_name = "atmel-sha1", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct atmel_sha_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = atmel_sha_cra_init, + } + } +}, +{ + .init = atmel_sha_init, + .update = atmel_sha_update, + .final = atmel_sha_final, + .finup = atmel_sha_finup, + .digest = atmel_sha_digest, + .halg = { + .digestsize = SHA256_DIGEST_SIZE, + .base = { + .cra_name = "sha256", + .cra_driver_name = "atmel-sha256", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct atmel_sha_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = atmel_sha_cra_init, + } + } +}, +}; + +static struct ahash_alg sha_224_alg = { + .init = atmel_sha_init, + .update = atmel_sha_update, + .final = atmel_sha_final, + .finup = atmel_sha_finup, + .digest = atmel_sha_digest, + .halg = { + .digestsize = SHA224_DIGEST_SIZE, + .base = { + .cra_name = "sha224", + .cra_driver_name = "atmel-sha224", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = SHA224_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct atmel_sha_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = atmel_sha_cra_init, + } + } +}; + +static struct ahash_alg sha_384_512_algs[] = { +{ + .init = atmel_sha_init, + .update = atmel_sha_update, + .final = atmel_sha_final, + .finup = atmel_sha_finup, + .digest = atmel_sha_digest, + .halg = { + .digestsize = SHA384_DIGEST_SIZE, + .base = { + .cra_name = "sha384", + .cra_driver_name = "atmel-sha384", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = SHA384_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct atmel_sha_ctx), + .cra_alignmask = 0x3, + .cra_module = THIS_MODULE, + .cra_init = atmel_sha_cra_init, + } + } +}, +{ + .init = atmel_sha_init, + .update = atmel_sha_update, + .final = atmel_sha_final, + .finup = atmel_sha_finup, + .digest = atmel_sha_digest, + .halg = { + .digestsize = SHA512_DIGEST_SIZE, + .base = { + .cra_name = "sha512", + .cra_driver_name = "atmel-sha512", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = SHA512_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct atmel_sha_ctx), + .cra_alignmask = 0x3, + .cra_module = THIS_MODULE, + .cra_init = atmel_sha_cra_init, + } + } +}, +}; + +static void atmel_sha_done_task(unsigned long data) +{ + struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data; + int err = 0; + + if (!(SHA_FLAGS_BUSY & dd->flags)) { + atmel_sha_handle_queue(dd, NULL); + return; + } + + if (SHA_FLAGS_CPU & dd->flags) { + if (SHA_FLAGS_OUTPUT_READY & dd->flags) { + dd->flags &= ~SHA_FLAGS_OUTPUT_READY; + goto finish; + } + } else if (SHA_FLAGS_DMA_READY & dd->flags) { + if (SHA_FLAGS_DMA_ACTIVE & dd->flags) { + dd->flags &= ~SHA_FLAGS_DMA_ACTIVE; + atmel_sha_update_dma_stop(dd); + if (dd->err) { + err = dd->err; + goto finish; + } + } + if (SHA_FLAGS_OUTPUT_READY & dd->flags) { + /* hash or semi-hash ready */ + dd->flags &= ~(SHA_FLAGS_DMA_READY | + SHA_FLAGS_OUTPUT_READY); + err = atmel_sha_update_dma_start(dd); + if (err != -EINPROGRESS) + goto finish; + } + } + return; + +finish: + /* finish curent request */ + atmel_sha_finish_req(dd->req, err); +} + +static irqreturn_t atmel_sha_irq(int irq, void *dev_id) +{ + struct atmel_sha_dev *sha_dd = dev_id; + u32 reg; + + reg = atmel_sha_read(sha_dd, SHA_ISR); + if (reg & atmel_sha_read(sha_dd, SHA_IMR)) { + atmel_sha_write(sha_dd, SHA_IDR, reg); + if (SHA_FLAGS_BUSY & sha_dd->flags) { + sha_dd->flags |= SHA_FLAGS_OUTPUT_READY; + if (!(SHA_FLAGS_CPU & sha_dd->flags)) + sha_dd->flags |= SHA_FLAGS_DMA_READY; + tasklet_schedule(&sha_dd->done_task); + } else { + dev_warn(sha_dd->dev, "SHA interrupt when no active requests.\n"); + } + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static void atmel_sha_unregister_algs(struct atmel_sha_dev *dd) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) + crypto_unregister_ahash(&sha_1_256_algs[i]); + + if (dd->caps.has_sha224) + crypto_unregister_ahash(&sha_224_alg); + + if (dd->caps.has_sha_384_512) { + for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) + crypto_unregister_ahash(&sha_384_512_algs[i]); + } +} + +static int atmel_sha_register_algs(struct atmel_sha_dev *dd) +{ + int err, i, j; + + for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) { + err = crypto_register_ahash(&sha_1_256_algs[i]); + if (err) + goto err_sha_1_256_algs; + } + + if (dd->caps.has_sha224) { + err = crypto_register_ahash(&sha_224_alg); + if (err) + goto err_sha_224_algs; + } + + if (dd->caps.has_sha_384_512) { + for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) { + err = crypto_register_ahash(&sha_384_512_algs[i]); + if (err) + goto err_sha_384_512_algs; + } + } + + return 0; + +err_sha_384_512_algs: + for (j = 0; j < i; j++) + crypto_unregister_ahash(&sha_384_512_algs[j]); + crypto_unregister_ahash(&sha_224_alg); +err_sha_224_algs: + i = ARRAY_SIZE(sha_1_256_algs); +err_sha_1_256_algs: + for (j = 0; j < i; j++) + crypto_unregister_ahash(&sha_1_256_algs[j]); + + return err; +} + +static bool atmel_sha_filter(struct dma_chan *chan, void *slave) +{ + struct at_dma_slave *sl = slave; + + if (sl && sl->dma_dev == chan->device->dev) { + chan->private = sl; + return true; + } else { + return false; + } +} + +static int atmel_sha_dma_init(struct atmel_sha_dev *dd, + struct crypto_platform_data *pdata) +{ + int err = -ENOMEM; + dma_cap_mask_t mask_in; + + /* Try to grab DMA channel */ + dma_cap_zero(mask_in); + dma_cap_set(DMA_SLAVE, mask_in); + + dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask_in, + atmel_sha_filter, &pdata->dma_slave->rxdata, dd->dev, "tx"); + if (!dd->dma_lch_in.chan) { + dev_warn(dd->dev, "no DMA channel available\n"); + return err; + } + + dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV; + dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base + + SHA_REG_DIN(0); + dd->dma_lch_in.dma_conf.src_maxburst = 1; + dd->dma_lch_in.dma_conf.src_addr_width = + DMA_SLAVE_BUSWIDTH_4_BYTES; + dd->dma_lch_in.dma_conf.dst_maxburst = 1; + dd->dma_lch_in.dma_conf.dst_addr_width = + DMA_SLAVE_BUSWIDTH_4_BYTES; + dd->dma_lch_in.dma_conf.device_fc = false; + + return 0; +} + +static void atmel_sha_dma_cleanup(struct atmel_sha_dev *dd) +{ + dma_release_channel(dd->dma_lch_in.chan); +} + +static void atmel_sha_get_cap(struct atmel_sha_dev *dd) +{ + + dd->caps.has_dma = 0; + dd->caps.has_dualbuff = 0; + dd->caps.has_sha224 = 0; + dd->caps.has_sha_384_512 = 0; + + /* keep only major version number */ + switch (dd->hw_version & 0xff0) { + case 0x420: + dd->caps.has_dma = 1; + dd->caps.has_dualbuff = 1; + dd->caps.has_sha224 = 1; + dd->caps.has_sha_384_512 = 1; + break; + case 0x410: + dd->caps.has_dma = 1; + dd->caps.has_dualbuff = 1; + dd->caps.has_sha224 = 1; + dd->caps.has_sha_384_512 = 1; + break; + case 0x400: + dd->caps.has_dma = 1; + dd->caps.has_dualbuff = 1; + dd->caps.has_sha224 = 1; + break; + case 0x320: + break; + default: + dev_warn(dd->dev, + "Unmanaged sha version, set minimum capabilities\n"); + break; + } +} + +#if defined(CONFIG_OF) +static const struct of_device_id atmel_sha_dt_ids[] = { + { .compatible = "atmel,at91sam9g46-sha" }, + { /* sentinel */ } +}; + +MODULE_DEVICE_TABLE(of, atmel_sha_dt_ids); + +static struct crypto_platform_data *atmel_sha_of_init(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct crypto_platform_data *pdata; + + if (!np) { + dev_err(&pdev->dev, "device node not found\n"); + return ERR_PTR(-EINVAL); + } + + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) { + dev_err(&pdev->dev, "could not allocate memory for pdata\n"); + return ERR_PTR(-ENOMEM); + } + + pdata->dma_slave = devm_kzalloc(&pdev->dev, + sizeof(*(pdata->dma_slave)), + GFP_KERNEL); + if (!pdata->dma_slave) { + dev_err(&pdev->dev, "could not allocate memory for dma_slave\n"); + return ERR_PTR(-ENOMEM); + } + + return pdata; +} +#else /* CONFIG_OF */ +static inline struct crypto_platform_data *atmel_sha_of_init(struct platform_device *dev) +{ + return ERR_PTR(-EINVAL); +} +#endif + +static int atmel_sha_probe(struct platform_device *pdev) +{ + struct atmel_sha_dev *sha_dd; + struct crypto_platform_data *pdata; + struct device *dev = &pdev->dev; + struct resource *sha_res; + unsigned long sha_phys_size; + int err; + + sha_dd = devm_kzalloc(&pdev->dev, sizeof(struct atmel_sha_dev), + GFP_KERNEL); + if (sha_dd == NULL) { + dev_err(dev, "unable to alloc data struct.\n"); + err = -ENOMEM; + goto sha_dd_err; + } + + sha_dd->dev = dev; + + platform_set_drvdata(pdev, sha_dd); + + INIT_LIST_HEAD(&sha_dd->list); + spin_lock_init(&sha_dd->lock); + + tasklet_init(&sha_dd->done_task, atmel_sha_done_task, + (unsigned long)sha_dd); + + crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH); + + sha_dd->irq = -1; + + /* Get the base address */ + sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!sha_res) { + dev_err(dev, "no MEM resource info\n"); + err = -ENODEV; + goto res_err; + } + sha_dd->phys_base = sha_res->start; + sha_phys_size = resource_size(sha_res); + + /* Get the IRQ */ + sha_dd->irq = platform_get_irq(pdev, 0); + if (sha_dd->irq < 0) { + dev_err(dev, "no IRQ resource info\n"); + err = sha_dd->irq; + goto res_err; + } + + err = request_irq(sha_dd->irq, atmel_sha_irq, IRQF_SHARED, "atmel-sha", + sha_dd); + if (err) { + dev_err(dev, "unable to request sha irq.\n"); + goto res_err; + } + + /* Initializing the clock */ + sha_dd->iclk = clk_get(&pdev->dev, "sha_clk"); + if (IS_ERR(sha_dd->iclk)) { + dev_err(dev, "clock initialization failed.\n"); + err = PTR_ERR(sha_dd->iclk); + goto clk_err; + } + + sha_dd->io_base = ioremap(sha_dd->phys_base, sha_phys_size); + if (!sha_dd->io_base) { + dev_err(dev, "can't ioremap\n"); + err = -ENOMEM; + goto sha_io_err; + } + + atmel_sha_hw_version_init(sha_dd); + + atmel_sha_get_cap(sha_dd); + + if (sha_dd->caps.has_dma) { + pdata = pdev->dev.platform_data; + if (!pdata) { + pdata = atmel_sha_of_init(pdev); + if (IS_ERR(pdata)) { + dev_err(&pdev->dev, "platform data not available\n"); + err = PTR_ERR(pdata); + goto err_pdata; + } + } + if (!pdata->dma_slave) { + err = -ENXIO; + goto err_pdata; + } + err = atmel_sha_dma_init(sha_dd, pdata); + if (err) + goto err_sha_dma; + + dev_info(dev, "using %s for DMA transfers\n", + dma_chan_name(sha_dd->dma_lch_in.chan)); + } + + spin_lock(&atmel_sha.lock); + list_add_tail(&sha_dd->list, &atmel_sha.dev_list); + spin_unlock(&atmel_sha.lock); + + err = atmel_sha_register_algs(sha_dd); + if (err) + goto err_algs; + + dev_info(dev, "Atmel SHA1/SHA256%s%s\n", + sha_dd->caps.has_sha224 ? "/SHA224" : "", + sha_dd->caps.has_sha_384_512 ? "/SHA384/SHA512" : ""); + + return 0; + +err_algs: + spin_lock(&atmel_sha.lock); + list_del(&sha_dd->list); + spin_unlock(&atmel_sha.lock); + if (sha_dd->caps.has_dma) + atmel_sha_dma_cleanup(sha_dd); +err_sha_dma: +err_pdata: + iounmap(sha_dd->io_base); +sha_io_err: + clk_put(sha_dd->iclk); +clk_err: + free_irq(sha_dd->irq, sha_dd); +res_err: + tasklet_kill(&sha_dd->done_task); +sha_dd_err: + dev_err(dev, "initialization failed.\n"); + + return err; +} + +static int atmel_sha_remove(struct platform_device *pdev) +{ + static struct atmel_sha_dev *sha_dd; + + sha_dd = platform_get_drvdata(pdev); + if (!sha_dd) + return -ENODEV; + spin_lock(&atmel_sha.lock); + list_del(&sha_dd->list); + spin_unlock(&atmel_sha.lock); + + atmel_sha_unregister_algs(sha_dd); + + tasklet_kill(&sha_dd->done_task); + + if (sha_dd->caps.has_dma) + atmel_sha_dma_cleanup(sha_dd); + + iounmap(sha_dd->io_base); + + clk_put(sha_dd->iclk); + + if (sha_dd->irq >= 0) + free_irq(sha_dd->irq, sha_dd); + + return 0; +} + +static struct platform_driver atmel_sha_driver = { + .probe = atmel_sha_probe, + .remove = atmel_sha_remove, + .driver = { + .name = "atmel_sha", + .of_match_table = of_match_ptr(atmel_sha_dt_ids), + }, +}; + +module_platform_driver(atmel_sha_driver); + +MODULE_DESCRIPTION("Atmel SHA (1/256/224/384/512) hw acceleration support."); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique"); diff --git a/drivers/crypto/atmel-tdes-regs.h b/drivers/crypto/atmel-tdes-regs.h new file mode 100644 index 000000000..f86734d0f --- /dev/null +++ b/drivers/crypto/atmel-tdes-regs.h @@ -0,0 +1,91 @@ +#ifndef __ATMEL_TDES_REGS_H__ +#define __ATMEL_TDES_REGS_H__ + +#define TDES_CR 0x00 +#define TDES_CR_START (1 << 0) +#define TDES_CR_SWRST (1 << 8) +#define TDES_CR_LOADSEED (1 << 16) + +#define TDES_MR 0x04 +#define TDES_MR_CYPHER_DEC (0 << 0) +#define TDES_MR_CYPHER_ENC (1 << 0) +#define TDES_MR_TDESMOD_MASK (0x3 << 1) +#define TDES_MR_TDESMOD_DES (0x0 << 1) +#define TDES_MR_TDESMOD_TDES (0x1 << 1) +#define TDES_MR_TDESMOD_XTEA (0x2 << 1) +#define TDES_MR_KEYMOD_3KEY (0 << 4) +#define TDES_MR_KEYMOD_2KEY (1 << 4) +#define TDES_MR_SMOD_MASK (0x3 << 8) +#define TDES_MR_SMOD_MANUAL (0x0 << 8) +#define TDES_MR_SMOD_AUTO (0x1 << 8) +#define TDES_MR_SMOD_PDC (0x2 << 8) +#define TDES_MR_OPMOD_MASK (0x3 << 12) +#define TDES_MR_OPMOD_ECB (0x0 << 12) +#define TDES_MR_OPMOD_CBC (0x1 << 12) +#define TDES_MR_OPMOD_OFB (0x2 << 12) +#define TDES_MR_OPMOD_CFB (0x3 << 12) +#define TDES_MR_LOD (0x1 << 15) +#define TDES_MR_CFBS_MASK (0x3 << 16) +#define TDES_MR_CFBS_64b (0x0 << 16) +#define TDES_MR_CFBS_32b (0x1 << 16) +#define TDES_MR_CFBS_16b (0x2 << 16) +#define TDES_MR_CFBS_8b (0x3 << 16) +#define TDES_MR_CKEY_MASK (0xF << 20) +#define TDES_MR_CKEY_OFFSET 20 +#define TDES_MR_CTYPE_MASK (0x3F << 24) +#define TDES_MR_CTYPE_OFFSET 24 + +#define TDES_IER 0x10 +#define TDES_IDR 0x14 +#define TDES_IMR 0x18 +#define TDES_ISR 0x1C +#define TDES_INT_DATARDY (1 << 0) +#define TDES_INT_ENDRX (1 << 1) +#define TDES_INT_ENDTX (1 << 2) +#define TDES_INT_RXBUFF (1 << 3) +#define TDES_INT_TXBUFE (1 << 4) +#define TDES_INT_URAD (1 << 8) +#define TDES_ISR_URAT_MASK (0x3 << 12) +#define TDES_ISR_URAT_IDR (0x0 << 12) +#define TDES_ISR_URAT_ODR (0x1 << 12) +#define TDES_ISR_URAT_MR (0x2 << 12) +#define TDES_ISR_URAT_WO (0x3 << 12) + + +#define TDES_KEY1W1R 0x20 +#define TDES_KEY1W2R 0x24 +#define TDES_KEY2W1R 0x28 +#define TDES_KEY2W2R 0x2C +#define TDES_KEY3W1R 0x30 +#define TDES_KEY3W2R 0x34 +#define TDES_IDATA1R 0x40 +#define TDES_IDATA2R 0x44 +#define TDES_ODATA1R 0x50 +#define TDES_ODATA2R 0x54 +#define TDES_IV1R 0x60 +#define TDES_IV2R 0x64 + +#define TDES_XTEARNDR 0x70 +#define TDES_XTEARNDR_XTEA_RNDS_MASK (0x3F << 0) +#define TDES_XTEARNDR_XTEA_RNDS_OFFSET 0 + +#define TDES_HW_VERSION 0xFC + +#define TDES_RPR 0x100 +#define TDES_RCR 0x104 +#define TDES_TPR 0x108 +#define TDES_TCR 0x10C +#define TDES_RNPR 0x118 +#define TDES_RNCR 0x11C +#define TDES_TNPR 0x118 +#define TDES_TNCR 0x11C +#define TDES_PTCR 0x120 +#define TDES_PTCR_RXTEN (1 << 0) +#define TDES_PTCR_RXTDIS (1 << 1) +#define TDES_PTCR_TXTEN (1 << 8) +#define TDES_PTCR_TXTDIS (1 << 9) +#define TDES_PTSR 0x124 +#define TDES_PTSR_RXTEN (1 << 0) +#define TDES_PTSR_TXTEN (1 << 8) + +#endif /* __ATMEL_TDES_REGS_H__ */ diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c new file mode 100644 index 000000000..ca2999709 --- /dev/null +++ b/drivers/crypto/atmel-tdes.c @@ -0,0 +1,1536 @@ +/* + * Cryptographic API. + * + * Support for ATMEL DES/TDES HW acceleration. + * + * Copyright (c) 2012 Eukréa Electromatique - ATMEL + * Author: Nicolas Royer <nicolas@eukrea.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Some ideas are from omap-aes.c drivers. + */ + + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/err.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/hw_random.h> +#include <linux/platform_device.h> + +#include <linux/device.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/scatterlist.h> +#include <linux/dma-mapping.h> +#include <linux/of_device.h> +#include <linux/delay.h> +#include <linux/crypto.h> +#include <linux/cryptohash.h> +#include <crypto/scatterwalk.h> +#include <crypto/algapi.h> +#include <crypto/des.h> +#include <crypto/hash.h> +#include <crypto/internal/hash.h> +#include <linux/platform_data/crypto-atmel.h> +#include "atmel-tdes-regs.h" + +/* TDES flags */ +#define TDES_FLAGS_MODE_MASK 0x00ff +#define TDES_FLAGS_ENCRYPT BIT(0) +#define TDES_FLAGS_CBC BIT(1) +#define TDES_FLAGS_CFB BIT(2) +#define TDES_FLAGS_CFB8 BIT(3) +#define TDES_FLAGS_CFB16 BIT(4) +#define TDES_FLAGS_CFB32 BIT(5) +#define TDES_FLAGS_CFB64 BIT(6) +#define TDES_FLAGS_OFB BIT(7) + +#define TDES_FLAGS_INIT BIT(16) +#define TDES_FLAGS_FAST BIT(17) +#define TDES_FLAGS_BUSY BIT(18) +#define TDES_FLAGS_DMA BIT(19) + +#define ATMEL_TDES_QUEUE_LENGTH 50 + +#define CFB8_BLOCK_SIZE 1 +#define CFB16_BLOCK_SIZE 2 +#define CFB32_BLOCK_SIZE 4 + +struct atmel_tdes_caps { + bool has_dma; + u32 has_cfb_3keys; +}; + +struct atmel_tdes_dev; + +struct atmel_tdes_ctx { + struct atmel_tdes_dev *dd; + + int keylen; + u32 key[3*DES_KEY_SIZE / sizeof(u32)]; + unsigned long flags; + + u16 block_size; +}; + +struct atmel_tdes_reqctx { + unsigned long mode; +}; + +struct atmel_tdes_dma { + struct dma_chan *chan; + struct dma_slave_config dma_conf; +}; + +struct atmel_tdes_dev { + struct list_head list; + unsigned long phys_base; + void __iomem *io_base; + + struct atmel_tdes_ctx *ctx; + struct device *dev; + struct clk *iclk; + int irq; + + unsigned long flags; + int err; + + spinlock_t lock; + struct crypto_queue queue; + + struct tasklet_struct done_task; + struct tasklet_struct queue_task; + + struct ablkcipher_request *req; + size_t total; + + struct scatterlist *in_sg; + unsigned int nb_in_sg; + size_t in_offset; + struct scatterlist *out_sg; + unsigned int nb_out_sg; + size_t out_offset; + + size_t buflen; + size_t dma_size; + + void *buf_in; + int dma_in; + dma_addr_t dma_addr_in; + struct atmel_tdes_dma dma_lch_in; + + void *buf_out; + int dma_out; + dma_addr_t dma_addr_out; + struct atmel_tdes_dma dma_lch_out; + + struct atmel_tdes_caps caps; + + u32 hw_version; +}; + +struct atmel_tdes_drv { + struct list_head dev_list; + spinlock_t lock; +}; + +static struct atmel_tdes_drv atmel_tdes = { + .dev_list = LIST_HEAD_INIT(atmel_tdes.dev_list), + .lock = __SPIN_LOCK_UNLOCKED(atmel_tdes.lock), +}; + +static int atmel_tdes_sg_copy(struct scatterlist **sg, size_t *offset, + void *buf, size_t buflen, size_t total, int out) +{ + unsigned int count, off = 0; + + while (buflen && total) { + count = min((*sg)->length - *offset, total); + count = min(count, buflen); + + if (!count) + return off; + + scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out); + + off += count; + buflen -= count; + *offset += count; + total -= count; + + if (*offset == (*sg)->length) { + *sg = sg_next(*sg); + if (*sg) + *offset = 0; + else + total = 0; + } + } + + return off; +} + +static inline u32 atmel_tdes_read(struct atmel_tdes_dev *dd, u32 offset) +{ + return readl_relaxed(dd->io_base + offset); +} + +static inline void atmel_tdes_write(struct atmel_tdes_dev *dd, + u32 offset, u32 value) +{ + writel_relaxed(value, dd->io_base + offset); +} + +static void atmel_tdes_write_n(struct atmel_tdes_dev *dd, u32 offset, + u32 *value, int count) +{ + for (; count--; value++, offset += 4) + atmel_tdes_write(dd, offset, *value); +} + +static struct atmel_tdes_dev *atmel_tdes_find_dev(struct atmel_tdes_ctx *ctx) +{ + struct atmel_tdes_dev *tdes_dd = NULL; + struct atmel_tdes_dev *tmp; + + spin_lock_bh(&atmel_tdes.lock); + if (!ctx->dd) { + list_for_each_entry(tmp, &atmel_tdes.dev_list, list) { + tdes_dd = tmp; + break; + } + ctx->dd = tdes_dd; + } else { + tdes_dd = ctx->dd; + } + spin_unlock_bh(&atmel_tdes.lock); + + return tdes_dd; +} + +static int atmel_tdes_hw_init(struct atmel_tdes_dev *dd) +{ + clk_prepare_enable(dd->iclk); + + if (!(dd->flags & TDES_FLAGS_INIT)) { + atmel_tdes_write(dd, TDES_CR, TDES_CR_SWRST); + dd->flags |= TDES_FLAGS_INIT; + dd->err = 0; + } + + return 0; +} + +static inline unsigned int atmel_tdes_get_version(struct atmel_tdes_dev *dd) +{ + return atmel_tdes_read(dd, TDES_HW_VERSION) & 0x00000fff; +} + +static void atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd) +{ + atmel_tdes_hw_init(dd); + + dd->hw_version = atmel_tdes_get_version(dd); + + dev_info(dd->dev, + "version: 0x%x\n", dd->hw_version); + + clk_disable_unprepare(dd->iclk); +} + +static void atmel_tdes_dma_callback(void *data) +{ + struct atmel_tdes_dev *dd = data; + + /* dma_lch_out - completed */ + tasklet_schedule(&dd->done_task); +} + +static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd) +{ + int err; + u32 valcr = 0, valmr = TDES_MR_SMOD_PDC; + + err = atmel_tdes_hw_init(dd); + + if (err) + return err; + + if (!dd->caps.has_dma) + atmel_tdes_write(dd, TDES_PTCR, + TDES_PTCR_TXTDIS | TDES_PTCR_RXTDIS); + + /* MR register must be set before IV registers */ + if (dd->ctx->keylen > (DES_KEY_SIZE << 1)) { + valmr |= TDES_MR_KEYMOD_3KEY; + valmr |= TDES_MR_TDESMOD_TDES; + } else if (dd->ctx->keylen > DES_KEY_SIZE) { + valmr |= TDES_MR_KEYMOD_2KEY; + valmr |= TDES_MR_TDESMOD_TDES; + } else { + valmr |= TDES_MR_TDESMOD_DES; + } + + if (dd->flags & TDES_FLAGS_CBC) { + valmr |= TDES_MR_OPMOD_CBC; + } else if (dd->flags & TDES_FLAGS_CFB) { + valmr |= TDES_MR_OPMOD_CFB; + + if (dd->flags & TDES_FLAGS_CFB8) + valmr |= TDES_MR_CFBS_8b; + else if (dd->flags & TDES_FLAGS_CFB16) + valmr |= TDES_MR_CFBS_16b; + else if (dd->flags & TDES_FLAGS_CFB32) + valmr |= TDES_MR_CFBS_32b; + else if (dd->flags & TDES_FLAGS_CFB64) + valmr |= TDES_MR_CFBS_64b; + } else if (dd->flags & TDES_FLAGS_OFB) { + valmr |= TDES_MR_OPMOD_OFB; + } + + if ((dd->flags & TDES_FLAGS_ENCRYPT) || (dd->flags & TDES_FLAGS_OFB)) + valmr |= TDES_MR_CYPHER_ENC; + + atmel_tdes_write(dd, TDES_CR, valcr); + atmel_tdes_write(dd, TDES_MR, valmr); + + atmel_tdes_write_n(dd, TDES_KEY1W1R, dd->ctx->key, + dd->ctx->keylen >> 2); + + if (((dd->flags & TDES_FLAGS_CBC) || (dd->flags & TDES_FLAGS_CFB) || + (dd->flags & TDES_FLAGS_OFB)) && dd->req->info) { + atmel_tdes_write_n(dd, TDES_IV1R, dd->req->info, 2); + } + + return 0; +} + +static int atmel_tdes_crypt_pdc_stop(struct atmel_tdes_dev *dd) +{ + int err = 0; + size_t count; + + atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS); + + if (dd->flags & TDES_FLAGS_FAST) { + dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); + dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); + } else { + dma_sync_single_for_device(dd->dev, dd->dma_addr_out, + dd->dma_size, DMA_FROM_DEVICE); + + /* copy data */ + count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset, + dd->buf_out, dd->buflen, dd->dma_size, 1); + if (count != dd->dma_size) { + err = -EINVAL; + pr_err("not all data converted: %u\n", count); + } + } + + return err; +} + +static int atmel_tdes_buff_init(struct atmel_tdes_dev *dd) +{ + int err = -ENOMEM; + + dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0); + dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0); + dd->buflen = PAGE_SIZE; + dd->buflen &= ~(DES_BLOCK_SIZE - 1); + + if (!dd->buf_in || !dd->buf_out) { + dev_err(dd->dev, "unable to alloc pages.\n"); + goto err_alloc; + } + + /* MAP here */ + dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in, + dd->buflen, DMA_TO_DEVICE); + if (dma_mapping_error(dd->dev, dd->dma_addr_in)) { + dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); + err = -EINVAL; + goto err_map_in; + } + + dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out, + dd->buflen, DMA_FROM_DEVICE); + if (dma_mapping_error(dd->dev, dd->dma_addr_out)) { + dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); + err = -EINVAL; + goto err_map_out; + } + + return 0; + +err_map_out: + dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, + DMA_TO_DEVICE); +err_map_in: +err_alloc: + free_page((unsigned long)dd->buf_out); + free_page((unsigned long)dd->buf_in); + if (err) + pr_err("error: %d\n", err); + return err; +} + +static void atmel_tdes_buff_cleanup(struct atmel_tdes_dev *dd) +{ + dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, + DMA_FROM_DEVICE); + dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, + DMA_TO_DEVICE); + free_page((unsigned long)dd->buf_out); + free_page((unsigned long)dd->buf_in); +} + +static int atmel_tdes_crypt_pdc(struct crypto_tfm *tfm, dma_addr_t dma_addr_in, + dma_addr_t dma_addr_out, int length) +{ + struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm); + struct atmel_tdes_dev *dd = ctx->dd; + int len32; + + dd->dma_size = length; + + if (!(dd->flags & TDES_FLAGS_FAST)) { + dma_sync_single_for_device(dd->dev, dma_addr_in, length, + DMA_TO_DEVICE); + } + + if ((dd->flags & TDES_FLAGS_CFB) && (dd->flags & TDES_FLAGS_CFB8)) + len32 = DIV_ROUND_UP(length, sizeof(u8)); + else if ((dd->flags & TDES_FLAGS_CFB) && (dd->flags & TDES_FLAGS_CFB16)) + len32 = DIV_ROUND_UP(length, sizeof(u16)); + else + len32 = DIV_ROUND_UP(length, sizeof(u32)); + + atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS); + atmel_tdes_write(dd, TDES_TPR, dma_addr_in); + atmel_tdes_write(dd, TDES_TCR, len32); + atmel_tdes_write(dd, TDES_RPR, dma_addr_out); + atmel_tdes_write(dd, TDES_RCR, len32); + + /* Enable Interrupt */ + atmel_tdes_write(dd, TDES_IER, TDES_INT_ENDRX); + + /* Start DMA transfer */ + atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTEN | TDES_PTCR_RXTEN); + + return 0; +} + +static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in, + dma_addr_t dma_addr_out, int length) +{ + struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm); + struct atmel_tdes_dev *dd = ctx->dd; + struct scatterlist sg[2]; + struct dma_async_tx_descriptor *in_desc, *out_desc; + + dd->dma_size = length; + + if (!(dd->flags & TDES_FLAGS_FAST)) { + dma_sync_single_for_device(dd->dev, dma_addr_in, length, + DMA_TO_DEVICE); + } + + if (dd->flags & TDES_FLAGS_CFB8) { + dd->dma_lch_in.dma_conf.dst_addr_width = + DMA_SLAVE_BUSWIDTH_1_BYTE; + dd->dma_lch_out.dma_conf.src_addr_width = + DMA_SLAVE_BUSWIDTH_1_BYTE; + } else if (dd->flags & TDES_FLAGS_CFB16) { + dd->dma_lch_in.dma_conf.dst_addr_width = + DMA_SLAVE_BUSWIDTH_2_BYTES; + dd->dma_lch_out.dma_conf.src_addr_width = + DMA_SLAVE_BUSWIDTH_2_BYTES; + } else { + dd->dma_lch_in.dma_conf.dst_addr_width = + DMA_SLAVE_BUSWIDTH_4_BYTES; + dd->dma_lch_out.dma_conf.src_addr_width = + DMA_SLAVE_BUSWIDTH_4_BYTES; + } + + dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf); + dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf); + + dd->flags |= TDES_FLAGS_DMA; + + sg_init_table(&sg[0], 1); + sg_dma_address(&sg[0]) = dma_addr_in; + sg_dma_len(&sg[0]) = length; + + sg_init_table(&sg[1], 1); + sg_dma_address(&sg[1]) = dma_addr_out; + sg_dma_len(&sg[1]) = length; + + in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0], + 1, DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!in_desc) + return -EINVAL; + + out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1], + 1, DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!out_desc) + return -EINVAL; + + out_desc->callback = atmel_tdes_dma_callback; + out_desc->callback_param = dd; + + dmaengine_submit(out_desc); + dma_async_issue_pending(dd->dma_lch_out.chan); + + dmaengine_submit(in_desc); + dma_async_issue_pending(dd->dma_lch_in.chan); + + return 0; +} + +static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd) +{ + struct crypto_tfm *tfm = crypto_ablkcipher_tfm( + crypto_ablkcipher_reqtfm(dd->req)); + int err, fast = 0, in, out; + size_t count; + dma_addr_t addr_in, addr_out; + + if ((!dd->in_offset) && (!dd->out_offset)) { + /* check for alignment */ + in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) && + IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size); + out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) && + IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size); + fast = in && out; + + if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg)) + fast = 0; + } + + + if (fast) { + count = min(dd->total, sg_dma_len(dd->in_sg)); + count = min(count, sg_dma_len(dd->out_sg)); + + err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); + if (!err) { + dev_err(dd->dev, "dma_map_sg() error\n"); + return -EINVAL; + } + + err = dma_map_sg(dd->dev, dd->out_sg, 1, + DMA_FROM_DEVICE); + if (!err) { + dev_err(dd->dev, "dma_map_sg() error\n"); + dma_unmap_sg(dd->dev, dd->in_sg, 1, + DMA_TO_DEVICE); + return -EINVAL; + } + + addr_in = sg_dma_address(dd->in_sg); + addr_out = sg_dma_address(dd->out_sg); + + dd->flags |= TDES_FLAGS_FAST; + + } else { + /* use cache buffers */ + count = atmel_tdes_sg_copy(&dd->in_sg, &dd->in_offset, + dd->buf_in, dd->buflen, dd->total, 0); + + addr_in = dd->dma_addr_in; + addr_out = dd->dma_addr_out; + + dd->flags &= ~TDES_FLAGS_FAST; + } + + dd->total -= count; + + if (dd->caps.has_dma) + err = atmel_tdes_crypt_dma(tfm, addr_in, addr_out, count); + else + err = atmel_tdes_crypt_pdc(tfm, addr_in, addr_out, count); + + if (err && (dd->flags & TDES_FLAGS_FAST)) { + dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); + dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE); + } + + return err; +} + +static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err) +{ + struct ablkcipher_request *req = dd->req; + + clk_disable_unprepare(dd->iclk); + + dd->flags &= ~TDES_FLAGS_BUSY; + + req->base.complete(&req->base, err); +} + +static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd, + struct ablkcipher_request *req) +{ + struct crypto_async_request *async_req, *backlog; + struct atmel_tdes_ctx *ctx; + struct atmel_tdes_reqctx *rctx; + unsigned long flags; + int err, ret = 0; + + spin_lock_irqsave(&dd->lock, flags); + if (req) + ret = ablkcipher_enqueue_request(&dd->queue, req); + if (dd->flags & TDES_FLAGS_BUSY) { + spin_unlock_irqrestore(&dd->lock, flags); + return ret; + } + backlog = crypto_get_backlog(&dd->queue); + async_req = crypto_dequeue_request(&dd->queue); + if (async_req) + dd->flags |= TDES_FLAGS_BUSY; + spin_unlock_irqrestore(&dd->lock, flags); + + if (!async_req) + return ret; + + if (backlog) + backlog->complete(backlog, -EINPROGRESS); + + req = ablkcipher_request_cast(async_req); + + /* assign new request to device */ + dd->req = req; + dd->total = req->nbytes; + dd->in_offset = 0; + dd->in_sg = req->src; + dd->out_offset = 0; + dd->out_sg = req->dst; + + rctx = ablkcipher_request_ctx(req); + ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); + rctx->mode &= TDES_FLAGS_MODE_MASK; + dd->flags = (dd->flags & ~TDES_FLAGS_MODE_MASK) | rctx->mode; + dd->ctx = ctx; + ctx->dd = dd; + + err = atmel_tdes_write_ctrl(dd); + if (!err) + err = atmel_tdes_crypt_start(dd); + if (err) { + /* des_task will not finish it, so do it here */ + atmel_tdes_finish_req(dd, err); + tasklet_schedule(&dd->queue_task); + } + + return ret; +} + +static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd) +{ + int err = -EINVAL; + size_t count; + + if (dd->flags & TDES_FLAGS_DMA) { + err = 0; + if (dd->flags & TDES_FLAGS_FAST) { + dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); + dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); + } else { + dma_sync_single_for_device(dd->dev, dd->dma_addr_out, + dd->dma_size, DMA_FROM_DEVICE); + + /* copy data */ + count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset, + dd->buf_out, dd->buflen, dd->dma_size, 1); + if (count != dd->dma_size) { + err = -EINVAL; + pr_err("not all data converted: %u\n", count); + } + } + } + return err; +} + +static int atmel_tdes_crypt(struct ablkcipher_request *req, unsigned long mode) +{ + struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx( + crypto_ablkcipher_reqtfm(req)); + struct atmel_tdes_reqctx *rctx = ablkcipher_request_ctx(req); + + if (mode & TDES_FLAGS_CFB8) { + if (!IS_ALIGNED(req->nbytes, CFB8_BLOCK_SIZE)) { + pr_err("request size is not exact amount of CFB8 blocks\n"); + return -EINVAL; + } + ctx->block_size = CFB8_BLOCK_SIZE; + } else if (mode & TDES_FLAGS_CFB16) { + if (!IS_ALIGNED(req->nbytes, CFB16_BLOCK_SIZE)) { + pr_err("request size is not exact amount of CFB16 blocks\n"); + return -EINVAL; + } + ctx->block_size = CFB16_BLOCK_SIZE; + } else if (mode & TDES_FLAGS_CFB32) { + if (!IS_ALIGNED(req->nbytes, CFB32_BLOCK_SIZE)) { + pr_err("request size is not exact amount of CFB32 blocks\n"); + return -EINVAL; + } + ctx->block_size = CFB32_BLOCK_SIZE; + } else { + if (!IS_ALIGNED(req->nbytes, DES_BLOCK_SIZE)) { + pr_err("request size is not exact amount of DES blocks\n"); + return -EINVAL; + } + ctx->block_size = DES_BLOCK_SIZE; + } + + rctx->mode = mode; + + return atmel_tdes_handle_queue(ctx->dd, req); +} + +static bool atmel_tdes_filter(struct dma_chan *chan, void *slave) +{ + struct at_dma_slave *sl = slave; + + if (sl && sl->dma_dev == chan->device->dev) { + chan->private = sl; + return true; + } else { + return false; + } +} + +static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd, + struct crypto_platform_data *pdata) +{ + int err = -ENOMEM; + dma_cap_mask_t mask; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + /* Try to grab 2 DMA channels */ + dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask, + atmel_tdes_filter, &pdata->dma_slave->rxdata, dd->dev, "tx"); + if (!dd->dma_lch_in.chan) + goto err_dma_in; + + dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV; + dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base + + TDES_IDATA1R; + dd->dma_lch_in.dma_conf.src_maxburst = 1; + dd->dma_lch_in.dma_conf.src_addr_width = + DMA_SLAVE_BUSWIDTH_4_BYTES; + dd->dma_lch_in.dma_conf.dst_maxburst = 1; + dd->dma_lch_in.dma_conf.dst_addr_width = + DMA_SLAVE_BUSWIDTH_4_BYTES; + dd->dma_lch_in.dma_conf.device_fc = false; + + dd->dma_lch_out.chan = dma_request_slave_channel_compat(mask, + atmel_tdes_filter, &pdata->dma_slave->txdata, dd->dev, "rx"); + if (!dd->dma_lch_out.chan) + goto err_dma_out; + + dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM; + dd->dma_lch_out.dma_conf.src_addr = dd->phys_base + + TDES_ODATA1R; + dd->dma_lch_out.dma_conf.src_maxburst = 1; + dd->dma_lch_out.dma_conf.src_addr_width = + DMA_SLAVE_BUSWIDTH_4_BYTES; + dd->dma_lch_out.dma_conf.dst_maxburst = 1; + dd->dma_lch_out.dma_conf.dst_addr_width = + DMA_SLAVE_BUSWIDTH_4_BYTES; + dd->dma_lch_out.dma_conf.device_fc = false; + + return 0; + +err_dma_out: + dma_release_channel(dd->dma_lch_in.chan); +err_dma_in: + dev_warn(dd->dev, "no DMA channel available\n"); + return err; +} + +static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd) +{ + dma_release_channel(dd->dma_lch_in.chan); + dma_release_channel(dd->dma_lch_out.chan); +} + +static int atmel_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int keylen) +{ + u32 tmp[DES_EXPKEY_WORDS]; + int err; + struct crypto_tfm *ctfm = crypto_ablkcipher_tfm(tfm); + + struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm); + + if (keylen != DES_KEY_SIZE) { + crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + err = des_ekey(tmp, key); + if (err == 0 && (ctfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + ctfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; + return -EINVAL; + } + + memcpy(ctx->key, key, keylen); + ctx->keylen = keylen; + + return 0; +} + +static int atmel_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm); + const char *alg_name; + + alg_name = crypto_tfm_alg_name(crypto_ablkcipher_tfm(tfm)); + + /* + * HW bug in cfb 3-keys mode. + */ + if (!ctx->dd->caps.has_cfb_3keys && strstr(alg_name, "cfb") + && (keylen != 2*DES_KEY_SIZE)) { + crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } else if ((keylen != 2*DES_KEY_SIZE) && (keylen != 3*DES_KEY_SIZE)) { + crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + memcpy(ctx->key, key, keylen); + ctx->keylen = keylen; + + return 0; +} + +static int atmel_tdes_ecb_encrypt(struct ablkcipher_request *req) +{ + return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT); +} + +static int atmel_tdes_ecb_decrypt(struct ablkcipher_request *req) +{ + return atmel_tdes_crypt(req, 0); +} + +static int atmel_tdes_cbc_encrypt(struct ablkcipher_request *req) +{ + return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CBC); +} + +static int atmel_tdes_cbc_decrypt(struct ablkcipher_request *req) +{ + return atmel_tdes_crypt(req, TDES_FLAGS_CBC); +} +static int atmel_tdes_cfb_encrypt(struct ablkcipher_request *req) +{ + return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB); +} + +static int atmel_tdes_cfb_decrypt(struct ablkcipher_request *req) +{ + return atmel_tdes_crypt(req, TDES_FLAGS_CFB); +} + +static int atmel_tdes_cfb8_encrypt(struct ablkcipher_request *req) +{ + return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB | + TDES_FLAGS_CFB8); +} + +static int atmel_tdes_cfb8_decrypt(struct ablkcipher_request *req) +{ + return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB8); +} + +static int atmel_tdes_cfb16_encrypt(struct ablkcipher_request *req) +{ + return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB | + TDES_FLAGS_CFB16); +} + +static int atmel_tdes_cfb16_decrypt(struct ablkcipher_request *req) +{ + return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB16); +} + +static int atmel_tdes_cfb32_encrypt(struct ablkcipher_request *req) +{ + return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB | + TDES_FLAGS_CFB32); +} + +static int atmel_tdes_cfb32_decrypt(struct ablkcipher_request *req) +{ + return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB32); +} + +static int atmel_tdes_ofb_encrypt(struct ablkcipher_request *req) +{ + return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_OFB); +} + +static int atmel_tdes_ofb_decrypt(struct ablkcipher_request *req) +{ + return atmel_tdes_crypt(req, TDES_FLAGS_OFB); +} + +static int atmel_tdes_cra_init(struct crypto_tfm *tfm) +{ + struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm); + struct atmel_tdes_dev *dd; + + tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_tdes_reqctx); + + dd = atmel_tdes_find_dev(ctx); + if (!dd) + return -ENODEV; + + return 0; +} + +static void atmel_tdes_cra_exit(struct crypto_tfm *tfm) +{ +} + +static struct crypto_alg tdes_algs[] = { +{ + .cra_name = "ecb(des)", + .cra_driver_name = "atmel-ecb-des", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct atmel_tdes_ctx), + .cra_alignmask = 0x7, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = atmel_tdes_cra_init, + .cra_exit = atmel_tdes_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .setkey = atmel_des_setkey, + .encrypt = atmel_tdes_ecb_encrypt, + .decrypt = atmel_tdes_ecb_decrypt, + } +}, +{ + .cra_name = "cbc(des)", + .cra_driver_name = "atmel-cbc-des", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct atmel_tdes_ctx), + .cra_alignmask = 0x7, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = atmel_tdes_cra_init, + .cra_exit = atmel_tdes_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .setkey = atmel_des_setkey, + .encrypt = atmel_tdes_cbc_encrypt, + .decrypt = atmel_tdes_cbc_decrypt, + } +}, +{ + .cra_name = "cfb(des)", + .cra_driver_name = "atmel-cfb-des", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct atmel_tdes_ctx), + .cra_alignmask = 0x7, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = atmel_tdes_cra_init, + .cra_exit = atmel_tdes_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .setkey = atmel_des_setkey, + .encrypt = atmel_tdes_cfb_encrypt, + .decrypt = atmel_tdes_cfb_decrypt, + } +}, +{ + .cra_name = "cfb8(des)", + .cra_driver_name = "atmel-cfb8-des", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = CFB8_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct atmel_tdes_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = atmel_tdes_cra_init, + .cra_exit = atmel_tdes_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .setkey = atmel_des_setkey, + .encrypt = atmel_tdes_cfb8_encrypt, + .decrypt = atmel_tdes_cfb8_decrypt, + } +}, +{ + .cra_name = "cfb16(des)", + .cra_driver_name = "atmel-cfb16-des", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = CFB16_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct atmel_tdes_ctx), + .cra_alignmask = 0x1, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = atmel_tdes_cra_init, + .cra_exit = atmel_tdes_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .setkey = atmel_des_setkey, + .encrypt = atmel_tdes_cfb16_encrypt, + .decrypt = atmel_tdes_cfb16_decrypt, + } +}, +{ + .cra_name = "cfb32(des)", + .cra_driver_name = "atmel-cfb32-des", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = CFB32_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct atmel_tdes_ctx), + .cra_alignmask = 0x3, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = atmel_tdes_cra_init, + .cra_exit = atmel_tdes_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .setkey = atmel_des_setkey, + .encrypt = atmel_tdes_cfb32_encrypt, + .decrypt = atmel_tdes_cfb32_decrypt, + } +}, +{ + .cra_name = "ofb(des)", + .cra_driver_name = "atmel-ofb-des", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct atmel_tdes_ctx), + .cra_alignmask = 0x7, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = atmel_tdes_cra_init, + .cra_exit = atmel_tdes_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .setkey = atmel_des_setkey, + .encrypt = atmel_tdes_ofb_encrypt, + .decrypt = atmel_tdes_ofb_decrypt, + } +}, +{ + .cra_name = "ecb(des3_ede)", + .cra_driver_name = "atmel-ecb-tdes", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct atmel_tdes_ctx), + .cra_alignmask = 0x7, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = atmel_tdes_cra_init, + .cra_exit = atmel_tdes_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = 2 * DES_KEY_SIZE, + .max_keysize = 3 * DES_KEY_SIZE, + .setkey = atmel_tdes_setkey, + .encrypt = atmel_tdes_ecb_encrypt, + .decrypt = atmel_tdes_ecb_decrypt, + } +}, +{ + .cra_name = "cbc(des3_ede)", + .cra_driver_name = "atmel-cbc-tdes", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct atmel_tdes_ctx), + .cra_alignmask = 0x7, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = atmel_tdes_cra_init, + .cra_exit = atmel_tdes_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = 2*DES_KEY_SIZE, + .max_keysize = 3*DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .setkey = atmel_tdes_setkey, + .encrypt = atmel_tdes_cbc_encrypt, + .decrypt = atmel_tdes_cbc_decrypt, + } +}, +{ + .cra_name = "cfb(des3_ede)", + .cra_driver_name = "atmel-cfb-tdes", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct atmel_tdes_ctx), + .cra_alignmask = 0x7, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = atmel_tdes_cra_init, + .cra_exit = atmel_tdes_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = 2*DES_KEY_SIZE, + .max_keysize = 2*DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .setkey = atmel_tdes_setkey, + .encrypt = atmel_tdes_cfb_encrypt, + .decrypt = atmel_tdes_cfb_decrypt, + } +}, +{ + .cra_name = "cfb8(des3_ede)", + .cra_driver_name = "atmel-cfb8-tdes", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = CFB8_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct atmel_tdes_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = atmel_tdes_cra_init, + .cra_exit = atmel_tdes_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = 2*DES_KEY_SIZE, + .max_keysize = 2*DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .setkey = atmel_tdes_setkey, + .encrypt = atmel_tdes_cfb8_encrypt, + .decrypt = atmel_tdes_cfb8_decrypt, + } +}, +{ + .cra_name = "cfb16(des3_ede)", + .cra_driver_name = "atmel-cfb16-tdes", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = CFB16_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct atmel_tdes_ctx), + .cra_alignmask = 0x1, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = atmel_tdes_cra_init, + .cra_exit = atmel_tdes_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = 2*DES_KEY_SIZE, + .max_keysize = 2*DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .setkey = atmel_tdes_setkey, + .encrypt = atmel_tdes_cfb16_encrypt, + .decrypt = atmel_tdes_cfb16_decrypt, + } +}, +{ + .cra_name = "cfb32(des3_ede)", + .cra_driver_name = "atmel-cfb32-tdes", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = CFB32_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct atmel_tdes_ctx), + .cra_alignmask = 0x3, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = atmel_tdes_cra_init, + .cra_exit = atmel_tdes_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = 2*DES_KEY_SIZE, + .max_keysize = 2*DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .setkey = atmel_tdes_setkey, + .encrypt = atmel_tdes_cfb32_encrypt, + .decrypt = atmel_tdes_cfb32_decrypt, + } +}, +{ + .cra_name = "ofb(des3_ede)", + .cra_driver_name = "atmel-ofb-tdes", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct atmel_tdes_ctx), + .cra_alignmask = 0x7, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = atmel_tdes_cra_init, + .cra_exit = atmel_tdes_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = 2*DES_KEY_SIZE, + .max_keysize = 3*DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .setkey = atmel_tdes_setkey, + .encrypt = atmel_tdes_ofb_encrypt, + .decrypt = atmel_tdes_ofb_decrypt, + } +}, +}; + +static void atmel_tdes_queue_task(unsigned long data) +{ + struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *)data; + + atmel_tdes_handle_queue(dd, NULL); +} + +static void atmel_tdes_done_task(unsigned long data) +{ + struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *) data; + int err; + + if (!(dd->flags & TDES_FLAGS_DMA)) + err = atmel_tdes_crypt_pdc_stop(dd); + else + err = atmel_tdes_crypt_dma_stop(dd); + + err = dd->err ? : err; + + if (dd->total && !err) { + if (dd->flags & TDES_FLAGS_FAST) { + dd->in_sg = sg_next(dd->in_sg); + dd->out_sg = sg_next(dd->out_sg); + if (!dd->in_sg || !dd->out_sg) + err = -EINVAL; + } + if (!err) + err = atmel_tdes_crypt_start(dd); + if (!err) + return; /* DMA started. Not fininishing. */ + } + + atmel_tdes_finish_req(dd, err); + atmel_tdes_handle_queue(dd, NULL); +} + +static irqreturn_t atmel_tdes_irq(int irq, void *dev_id) +{ + struct atmel_tdes_dev *tdes_dd = dev_id; + u32 reg; + + reg = atmel_tdes_read(tdes_dd, TDES_ISR); + if (reg & atmel_tdes_read(tdes_dd, TDES_IMR)) { + atmel_tdes_write(tdes_dd, TDES_IDR, reg); + if (TDES_FLAGS_BUSY & tdes_dd->flags) + tasklet_schedule(&tdes_dd->done_task); + else + dev_warn(tdes_dd->dev, "TDES interrupt when no active requests.\n"); + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static void atmel_tdes_unregister_algs(struct atmel_tdes_dev *dd) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) + crypto_unregister_alg(&tdes_algs[i]); +} + +static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd) +{ + int err, i, j; + + for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) { + err = crypto_register_alg(&tdes_algs[i]); + if (err) + goto err_tdes_algs; + } + + return 0; + +err_tdes_algs: + for (j = 0; j < i; j++) + crypto_unregister_alg(&tdes_algs[j]); + + return err; +} + +static void atmel_tdes_get_cap(struct atmel_tdes_dev *dd) +{ + + dd->caps.has_dma = 0; + dd->caps.has_cfb_3keys = 0; + + /* keep only major version number */ + switch (dd->hw_version & 0xf00) { + case 0x700: + dd->caps.has_dma = 1; + dd->caps.has_cfb_3keys = 1; + break; + case 0x600: + break; + default: + dev_warn(dd->dev, + "Unmanaged tdes version, set minimum capabilities\n"); + break; + } +} + +#if defined(CONFIG_OF) +static const struct of_device_id atmel_tdes_dt_ids[] = { + { .compatible = "atmel,at91sam9g46-tdes" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, atmel_tdes_dt_ids); + +static struct crypto_platform_data *atmel_tdes_of_init(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct crypto_platform_data *pdata; + + if (!np) { + dev_err(&pdev->dev, "device node not found\n"); + return ERR_PTR(-EINVAL); + } + + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) { + dev_err(&pdev->dev, "could not allocate memory for pdata\n"); + return ERR_PTR(-ENOMEM); + } + + pdata->dma_slave = devm_kzalloc(&pdev->dev, + sizeof(*(pdata->dma_slave)), + GFP_KERNEL); + if (!pdata->dma_slave) { + dev_err(&pdev->dev, "could not allocate memory for dma_slave\n"); + return ERR_PTR(-ENOMEM); + } + + return pdata; +} +#else /* CONFIG_OF */ +static inline struct crypto_platform_data *atmel_tdes_of_init(struct platform_device *pdev) +{ + return ERR_PTR(-EINVAL); +} +#endif + +static int atmel_tdes_probe(struct platform_device *pdev) +{ + struct atmel_tdes_dev *tdes_dd; + struct crypto_platform_data *pdata; + struct device *dev = &pdev->dev; + struct resource *tdes_res; + unsigned long tdes_phys_size; + int err; + + tdes_dd = devm_kmalloc(&pdev->dev, sizeof(*tdes_dd), GFP_KERNEL); + if (tdes_dd == NULL) { + dev_err(dev, "unable to alloc data struct.\n"); + err = -ENOMEM; + goto tdes_dd_err; + } + + tdes_dd->dev = dev; + + platform_set_drvdata(pdev, tdes_dd); + + INIT_LIST_HEAD(&tdes_dd->list); + spin_lock_init(&tdes_dd->lock); + + tasklet_init(&tdes_dd->done_task, atmel_tdes_done_task, + (unsigned long)tdes_dd); + tasklet_init(&tdes_dd->queue_task, atmel_tdes_queue_task, + (unsigned long)tdes_dd); + + crypto_init_queue(&tdes_dd->queue, ATMEL_TDES_QUEUE_LENGTH); + + tdes_dd->irq = -1; + + /* Get the base address */ + tdes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!tdes_res) { + dev_err(dev, "no MEM resource info\n"); + err = -ENODEV; + goto res_err; + } + tdes_dd->phys_base = tdes_res->start; + tdes_phys_size = resource_size(tdes_res); + + /* Get the IRQ */ + tdes_dd->irq = platform_get_irq(pdev, 0); + if (tdes_dd->irq < 0) { + dev_err(dev, "no IRQ resource info\n"); + err = tdes_dd->irq; + goto res_err; + } + + err = request_irq(tdes_dd->irq, atmel_tdes_irq, IRQF_SHARED, + "atmel-tdes", tdes_dd); + if (err) { + dev_err(dev, "unable to request tdes irq.\n"); + goto tdes_irq_err; + } + + /* Initializing the clock */ + tdes_dd->iclk = clk_get(&pdev->dev, "tdes_clk"); + if (IS_ERR(tdes_dd->iclk)) { + dev_err(dev, "clock initialization failed.\n"); + err = PTR_ERR(tdes_dd->iclk); + goto clk_err; + } + + tdes_dd->io_base = ioremap(tdes_dd->phys_base, tdes_phys_size); + if (!tdes_dd->io_base) { + dev_err(dev, "can't ioremap\n"); + err = -ENOMEM; + goto tdes_io_err; + } + + atmel_tdes_hw_version_init(tdes_dd); + + atmel_tdes_get_cap(tdes_dd); + + err = atmel_tdes_buff_init(tdes_dd); + if (err) + goto err_tdes_buff; + + if (tdes_dd->caps.has_dma) { + pdata = pdev->dev.platform_data; + if (!pdata) { + pdata = atmel_tdes_of_init(pdev); + if (IS_ERR(pdata)) { + dev_err(&pdev->dev, "platform data not available\n"); + err = PTR_ERR(pdata); + goto err_pdata; + } + } + if (!pdata->dma_slave) { + err = -ENXIO; + goto err_pdata; + } + err = atmel_tdes_dma_init(tdes_dd, pdata); + if (err) + goto err_tdes_dma; + + dev_info(dev, "using %s, %s for DMA transfers\n", + dma_chan_name(tdes_dd->dma_lch_in.chan), + dma_chan_name(tdes_dd->dma_lch_out.chan)); + } + + spin_lock(&atmel_tdes.lock); + list_add_tail(&tdes_dd->list, &atmel_tdes.dev_list); + spin_unlock(&atmel_tdes.lock); + + err = atmel_tdes_register_algs(tdes_dd); + if (err) + goto err_algs; + + dev_info(dev, "Atmel DES/TDES\n"); + + return 0; + +err_algs: + spin_lock(&atmel_tdes.lock); + list_del(&tdes_dd->list); + spin_unlock(&atmel_tdes.lock); + if (tdes_dd->caps.has_dma) + atmel_tdes_dma_cleanup(tdes_dd); +err_tdes_dma: +err_pdata: + atmel_tdes_buff_cleanup(tdes_dd); +err_tdes_buff: + iounmap(tdes_dd->io_base); +tdes_io_err: + clk_put(tdes_dd->iclk); +clk_err: + free_irq(tdes_dd->irq, tdes_dd); +tdes_irq_err: +res_err: + tasklet_kill(&tdes_dd->done_task); + tasklet_kill(&tdes_dd->queue_task); +tdes_dd_err: + dev_err(dev, "initialization failed.\n"); + + return err; +} + +static int atmel_tdes_remove(struct platform_device *pdev) +{ + static struct atmel_tdes_dev *tdes_dd; + + tdes_dd = platform_get_drvdata(pdev); + if (!tdes_dd) + return -ENODEV; + spin_lock(&atmel_tdes.lock); + list_del(&tdes_dd->list); + spin_unlock(&atmel_tdes.lock); + + atmel_tdes_unregister_algs(tdes_dd); + + tasklet_kill(&tdes_dd->done_task); + tasklet_kill(&tdes_dd->queue_task); + + if (tdes_dd->caps.has_dma) + atmel_tdes_dma_cleanup(tdes_dd); + + atmel_tdes_buff_cleanup(tdes_dd); + + iounmap(tdes_dd->io_base); + + clk_put(tdes_dd->iclk); + + if (tdes_dd->irq >= 0) + free_irq(tdes_dd->irq, tdes_dd); + + return 0; +} + +static struct platform_driver atmel_tdes_driver = { + .probe = atmel_tdes_probe, + .remove = atmel_tdes_remove, + .driver = { + .name = "atmel_tdes", + .of_match_table = of_match_ptr(atmel_tdes_dt_ids), + }, +}; + +module_platform_driver(atmel_tdes_driver); + +MODULE_DESCRIPTION("Atmel DES/TDES hw acceleration support."); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique"); diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c new file mode 100644 index 000000000..d9af9403a --- /dev/null +++ b/drivers/crypto/bfin_crc.c @@ -0,0 +1,767 @@ +/* + * Cryptographic API. + * + * Support Blackfin CRC HW acceleration. + * + * Copyright 2012 Analog Devices Inc. + * + * Licensed under the GPL-2. + */ + +#include <linux/err.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/irq.h> +#include <linux/io.h> +#include <linux/platform_device.h> +#include <linux/scatterlist.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> +#include <linux/crypto.h> +#include <linux/cryptohash.h> +#include <crypto/scatterwalk.h> +#include <crypto/algapi.h> +#include <crypto/hash.h> +#include <crypto/internal/hash.h> +#include <asm/unaligned.h> + +#include <asm/dma.h> +#include <asm/portmux.h> +#include <asm/io.h> + +#include "bfin_crc.h" + +#define CRC_CCRYPTO_QUEUE_LENGTH 5 + +#define DRIVER_NAME "bfin-hmac-crc" +#define CHKSUM_DIGEST_SIZE 4 +#define CHKSUM_BLOCK_SIZE 1 + +#define CRC_MAX_DMA_DESC 100 + +#define CRC_CRYPTO_STATE_UPDATE 1 +#define CRC_CRYPTO_STATE_FINALUPDATE 2 +#define CRC_CRYPTO_STATE_FINISH 3 + +struct bfin_crypto_crc { + struct list_head list; + struct device *dev; + spinlock_t lock; + + int irq; + int dma_ch; + u32 poly; + struct crc_register *regs; + + struct ahash_request *req; /* current request in operation */ + struct dma_desc_array *sg_cpu; /* virt addr of sg dma descriptors */ + dma_addr_t sg_dma; /* phy addr of sg dma descriptors */ + u8 *sg_mid_buf; + dma_addr_t sg_mid_dma; /* phy addr of sg mid buffer */ + + struct tasklet_struct done_task; + struct crypto_queue queue; /* waiting requests */ + + u8 busy:1; /* crc device in operation flag */ +}; + +static struct bfin_crypto_crc_list { + struct list_head dev_list; + spinlock_t lock; +} crc_list; + +struct bfin_crypto_crc_reqctx { + struct bfin_crypto_crc *crc; + + unsigned int total; /* total request bytes */ + size_t sg_buflen; /* bytes for this update */ + unsigned int sg_nents; + struct scatterlist *sg; /* sg list head for this update*/ + struct scatterlist bufsl[2]; /* chained sg list */ + + size_t bufnext_len; + size_t buflast_len; + u8 bufnext[CHKSUM_DIGEST_SIZE]; /* extra bytes for next udpate */ + u8 buflast[CHKSUM_DIGEST_SIZE]; /* extra bytes from last udpate */ + + u8 flag; +}; + +struct bfin_crypto_crc_ctx { + struct bfin_crypto_crc *crc; + u32 key; +}; + + +/* + * derive number of elements in scatterlist + */ +static int sg_count(struct scatterlist *sg_list) +{ + struct scatterlist *sg = sg_list; + int sg_nents = 1; + + if (sg_list == NULL) + return 0; + + while (!sg_is_last(sg)) { + sg_nents++; + sg = sg_next(sg); + } + + return sg_nents; +} + +/* + * get element in scatter list by given index + */ +static struct scatterlist *sg_get(struct scatterlist *sg_list, unsigned int nents, + unsigned int index) +{ + struct scatterlist *sg = NULL; + int i; + + for_each_sg(sg_list, sg, nents, i) + if (i == index) + break; + + return sg; +} + +static int bfin_crypto_crc_init_hw(struct bfin_crypto_crc *crc, u32 key) +{ + writel(0, &crc->regs->datacntrld); + writel(MODE_CALC_CRC << OPMODE_OFFSET, &crc->regs->control); + writel(key, &crc->regs->curresult); + + /* setup CRC interrupts */ + writel(CMPERRI | DCNTEXPI, &crc->regs->status); + writel(CMPERRI | DCNTEXPI, &crc->regs->intrenset); + + return 0; +} + +static int bfin_crypto_crc_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm); + struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req); + struct bfin_crypto_crc *crc; + + dev_dbg(ctx->crc->dev, "crc_init\n"); + spin_lock_bh(&crc_list.lock); + list_for_each_entry(crc, &crc_list.dev_list, list) { + crc_ctx->crc = crc; + break; + } + spin_unlock_bh(&crc_list.lock); + + if (sg_count(req->src) > CRC_MAX_DMA_DESC) { + dev_dbg(ctx->crc->dev, "init: requested sg list is too big > %d\n", + CRC_MAX_DMA_DESC); + return -EINVAL; + } + + ctx->crc = crc; + ctx->bufnext_len = 0; + ctx->buflast_len = 0; + ctx->sg_buflen = 0; + ctx->total = 0; + ctx->flag = 0; + + /* init crc results */ + put_unaligned_le32(crc_ctx->key, req->result); + + dev_dbg(ctx->crc->dev, "init: digest size: %d\n", + crypto_ahash_digestsize(tfm)); + + return bfin_crypto_crc_init_hw(crc, crc_ctx->key); +} + +static void bfin_crypto_crc_config_dma(struct bfin_crypto_crc *crc) +{ + struct scatterlist *sg; + struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(crc->req); + int i = 0, j = 0; + unsigned long dma_config; + unsigned int dma_count; + unsigned int dma_addr; + unsigned int mid_dma_count = 0; + int dma_mod; + + dma_map_sg(crc->dev, ctx->sg, ctx->sg_nents, DMA_TO_DEVICE); + + for_each_sg(ctx->sg, sg, ctx->sg_nents, j) { + dma_addr = sg_dma_address(sg); + /* deduce extra bytes in last sg */ + if (sg_is_last(sg)) + dma_count = sg_dma_len(sg) - ctx->bufnext_len; + else + dma_count = sg_dma_len(sg); + + if (mid_dma_count) { + /* Append last middle dma buffer to 4 bytes with first + bytes in current sg buffer. Move addr of current + sg and deduce the length of current sg. + */ + memcpy(crc->sg_mid_buf +(i << 2) + mid_dma_count, + sg_virt(sg), + CHKSUM_DIGEST_SIZE - mid_dma_count); + dma_addr += CHKSUM_DIGEST_SIZE - mid_dma_count; + dma_count -= CHKSUM_DIGEST_SIZE - mid_dma_count; + + dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | + DMAEN | PSIZE_32 | WDSIZE_32; + + /* setup new dma descriptor for next middle dma */ + crc->sg_cpu[i].start_addr = crc->sg_mid_dma + (i << 2); + crc->sg_cpu[i].cfg = dma_config; + crc->sg_cpu[i].x_count = 1; + crc->sg_cpu[i].x_modify = CHKSUM_DIGEST_SIZE; + dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, " + "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n", + i, crc->sg_cpu[i].start_addr, + crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count, + crc->sg_cpu[i].x_modify); + i++; + } + + dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32; + /* chop current sg dma len to multiple of 32 bits */ + mid_dma_count = dma_count % 4; + dma_count &= ~0x3; + + if (dma_addr % 4 == 0) { + dma_config |= WDSIZE_32; + dma_count >>= 2; + dma_mod = 4; + } else if (dma_addr % 2 == 0) { + dma_config |= WDSIZE_16; + dma_count >>= 1; + dma_mod = 2; + } else { + dma_config |= WDSIZE_8; + dma_mod = 1; + } + + crc->sg_cpu[i].start_addr = dma_addr; + crc->sg_cpu[i].cfg = dma_config; + crc->sg_cpu[i].x_count = dma_count; + crc->sg_cpu[i].x_modify = dma_mod; + dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, " + "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n", + i, crc->sg_cpu[i].start_addr, + crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count, + crc->sg_cpu[i].x_modify); + i++; + + if (mid_dma_count) { + /* copy extra bytes to next middle dma buffer */ + memcpy(crc->sg_mid_buf + (i << 2), + (u8*)sg_virt(sg) + (dma_count << 2), + mid_dma_count); + } + } + + dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32 | WDSIZE_32; + /* For final update req, append the buffer for next update as well*/ + if (ctx->bufnext_len && (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE || + ctx->flag == CRC_CRYPTO_STATE_FINISH)) { + crc->sg_cpu[i].start_addr = dma_map_single(crc->dev, ctx->bufnext, + CHKSUM_DIGEST_SIZE, DMA_TO_DEVICE); + crc->sg_cpu[i].cfg = dma_config; + crc->sg_cpu[i].x_count = 1; + crc->sg_cpu[i].x_modify = CHKSUM_DIGEST_SIZE; + dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, " + "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n", + i, crc->sg_cpu[i].start_addr, + crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count, + crc->sg_cpu[i].x_modify); + i++; + } + + if (i == 0) + return; + + /* Set the last descriptor to stop mode */ + crc->sg_cpu[i - 1].cfg &= ~(DMAFLOW | NDSIZE); + crc->sg_cpu[i - 1].cfg |= DI_EN; + set_dma_curr_desc_addr(crc->dma_ch, (unsigned long *)crc->sg_dma); + set_dma_x_count(crc->dma_ch, 0); + set_dma_x_modify(crc->dma_ch, 0); + set_dma_config(crc->dma_ch, dma_config); +} + +static int bfin_crypto_crc_handle_queue(struct bfin_crypto_crc *crc, + struct ahash_request *req) +{ + struct crypto_async_request *async_req, *backlog; + struct bfin_crypto_crc_reqctx *ctx; + struct scatterlist *sg; + int ret = 0; + int nsg, i, j; + unsigned int nextlen; + unsigned long flags; + u32 reg; + + spin_lock_irqsave(&crc->lock, flags); + if (req) + ret = ahash_enqueue_request(&crc->queue, req); + if (crc->busy) { + spin_unlock_irqrestore(&crc->lock, flags); + return ret; + } + backlog = crypto_get_backlog(&crc->queue); + async_req = crypto_dequeue_request(&crc->queue); + if (async_req) + crc->busy = 1; + spin_unlock_irqrestore(&crc->lock, flags); + + if (!async_req) + return ret; + + if (backlog) + backlog->complete(backlog, -EINPROGRESS); + + req = ahash_request_cast(async_req); + crc->req = req; + ctx = ahash_request_ctx(req); + ctx->sg = NULL; + ctx->sg_buflen = 0; + ctx->sg_nents = 0; + + dev_dbg(crc->dev, "handling new req, flag=%u, nbytes: %d\n", + ctx->flag, req->nbytes); + + if (ctx->flag == CRC_CRYPTO_STATE_FINISH) { + if (ctx->bufnext_len == 0) { + crc->busy = 0; + return 0; + } + + /* Pack last crc update buffer to 32bit */ + memset(ctx->bufnext + ctx->bufnext_len, 0, + CHKSUM_DIGEST_SIZE - ctx->bufnext_len); + } else { + /* Pack small data which is less than 32bit to buffer for next update. */ + if (ctx->bufnext_len + req->nbytes < CHKSUM_DIGEST_SIZE) { + memcpy(ctx->bufnext + ctx->bufnext_len, + sg_virt(req->src), req->nbytes); + ctx->bufnext_len += req->nbytes; + if (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE && + ctx->bufnext_len) { + goto finish_update; + } else { + crc->busy = 0; + return 0; + } + } + + if (ctx->bufnext_len) { + /* Chain in extra bytes of last update */ + ctx->buflast_len = ctx->bufnext_len; + memcpy(ctx->buflast, ctx->bufnext, ctx->buflast_len); + + nsg = ctx->sg_buflen ? 2 : 1; + sg_init_table(ctx->bufsl, nsg); + sg_set_buf(ctx->bufsl, ctx->buflast, ctx->buflast_len); + if (nsg > 1) + scatterwalk_sg_chain(ctx->bufsl, nsg, + req->src); + ctx->sg = ctx->bufsl; + } else + ctx->sg = req->src; + + /* Chop crc buffer size to multiple of 32 bit */ + nsg = ctx->sg_nents = sg_count(ctx->sg); + ctx->sg_buflen = ctx->buflast_len + req->nbytes; + ctx->bufnext_len = ctx->sg_buflen % 4; + ctx->sg_buflen &= ~0x3; + + if (ctx->bufnext_len) { + /* copy extra bytes to buffer for next update */ + memset(ctx->bufnext, 0, CHKSUM_DIGEST_SIZE); + nextlen = ctx->bufnext_len; + for (i = nsg - 1; i >= 0; i--) { + sg = sg_get(ctx->sg, nsg, i); + j = min(nextlen, sg_dma_len(sg)); + memcpy(ctx->bufnext + nextlen - j, + sg_virt(sg) + sg_dma_len(sg) - j, j); + if (j == sg_dma_len(sg)) + ctx->sg_nents--; + nextlen -= j; + if (nextlen == 0) + break; + } + } + } + +finish_update: + if (ctx->bufnext_len && (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE || + ctx->flag == CRC_CRYPTO_STATE_FINISH)) + ctx->sg_buflen += CHKSUM_DIGEST_SIZE; + + /* set CRC data count before start DMA */ + writel(ctx->sg_buflen >> 2, &crc->regs->datacnt); + + /* setup and enable CRC DMA */ + bfin_crypto_crc_config_dma(crc); + + /* finally kick off CRC operation */ + reg = readl(&crc->regs->control); + writel(reg | BLKEN, &crc->regs->control); + + return -EINPROGRESS; +} + +static int bfin_crypto_crc_update(struct ahash_request *req) +{ + struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req); + + if (!req->nbytes) + return 0; + + dev_dbg(ctx->crc->dev, "crc_update\n"); + ctx->total += req->nbytes; + ctx->flag = CRC_CRYPTO_STATE_UPDATE; + + return bfin_crypto_crc_handle_queue(ctx->crc, req); +} + +static int bfin_crypto_crc_final(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm); + struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req); + + dev_dbg(ctx->crc->dev, "crc_final\n"); + ctx->flag = CRC_CRYPTO_STATE_FINISH; + crc_ctx->key = 0; + + return bfin_crypto_crc_handle_queue(ctx->crc, req); +} + +static int bfin_crypto_crc_finup(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm); + struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req); + + dev_dbg(ctx->crc->dev, "crc_finishupdate\n"); + ctx->total += req->nbytes; + ctx->flag = CRC_CRYPTO_STATE_FINALUPDATE; + crc_ctx->key = 0; + + return bfin_crypto_crc_handle_queue(ctx->crc, req); +} + +static int bfin_crypto_crc_digest(struct ahash_request *req) +{ + int ret; + + ret = bfin_crypto_crc_init(req); + if (ret) + return ret; + + return bfin_crypto_crc_finup(req); +} + +static int bfin_crypto_crc_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) +{ + struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm); + + dev_dbg(crc_ctx->crc->dev, "crc_setkey\n"); + if (keylen != CHKSUM_DIGEST_SIZE) { + crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + crc_ctx->key = get_unaligned_le32(key); + + return 0; +} + +static int bfin_crypto_crc_cra_init(struct crypto_tfm *tfm) +{ + struct bfin_crypto_crc_ctx *crc_ctx = crypto_tfm_ctx(tfm); + + crc_ctx->key = 0; + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct bfin_crypto_crc_reqctx)); + + return 0; +} + +static void bfin_crypto_crc_cra_exit(struct crypto_tfm *tfm) +{ +} + +static struct ahash_alg algs = { + .init = bfin_crypto_crc_init, + .update = bfin_crypto_crc_update, + .final = bfin_crypto_crc_final, + .finup = bfin_crypto_crc_finup, + .digest = bfin_crypto_crc_digest, + .setkey = bfin_crypto_crc_setkey, + .halg.digestsize = CHKSUM_DIGEST_SIZE, + .halg.base = { + .cra_name = "hmac(crc32)", + .cra_driver_name = DRIVER_NAME, + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC, + .cra_blocksize = CHKSUM_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct bfin_crypto_crc_ctx), + .cra_alignmask = 3, + .cra_module = THIS_MODULE, + .cra_init = bfin_crypto_crc_cra_init, + .cra_exit = bfin_crypto_crc_cra_exit, + } +}; + +static void bfin_crypto_crc_done_task(unsigned long data) +{ + struct bfin_crypto_crc *crc = (struct bfin_crypto_crc *)data; + + bfin_crypto_crc_handle_queue(crc, NULL); +} + +static irqreturn_t bfin_crypto_crc_handler(int irq, void *dev_id) +{ + struct bfin_crypto_crc *crc = dev_id; + u32 reg; + + if (readl(&crc->regs->status) & DCNTEXP) { + writel(DCNTEXP, &crc->regs->status); + + /* prepare results */ + put_unaligned_le32(readl(&crc->regs->result), + crc->req->result); + + reg = readl(&crc->regs->control); + writel(reg & ~BLKEN, &crc->regs->control); + crc->busy = 0; + + if (crc->req->base.complete) + crc->req->base.complete(&crc->req->base, 0); + + tasklet_schedule(&crc->done_task); + + return IRQ_HANDLED; + } else + return IRQ_NONE; +} + +#ifdef CONFIG_PM +/** + * bfin_crypto_crc_suspend - suspend crc device + * @pdev: device being suspended + * @state: requested suspend state + */ +static int bfin_crypto_crc_suspend(struct platform_device *pdev, pm_message_t state) +{ + struct bfin_crypto_crc *crc = platform_get_drvdata(pdev); + int i = 100000; + + while ((readl(&crc->regs->control) & BLKEN) && --i) + cpu_relax(); + + if (i == 0) + return -EBUSY; + + return 0; +} +#else +# define bfin_crypto_crc_suspend NULL +#endif + +#define bfin_crypto_crc_resume NULL + +/** + * bfin_crypto_crc_probe - Initialize module + * + */ +static int bfin_crypto_crc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct bfin_crypto_crc *crc; + unsigned int timeout = 100000; + int ret; + + crc = devm_kzalloc(dev, sizeof(*crc), GFP_KERNEL); + if (!crc) { + dev_err(&pdev->dev, "fail to malloc bfin_crypto_crc\n"); + return -ENOMEM; + } + + crc->dev = dev; + + INIT_LIST_HEAD(&crc->list); + spin_lock_init(&crc->lock); + tasklet_init(&crc->done_task, bfin_crypto_crc_done_task, (unsigned long)crc); + crypto_init_queue(&crc->queue, CRC_CCRYPTO_QUEUE_LENGTH); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res == NULL) { + dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n"); + return -ENOENT; + } + + crc->regs = devm_ioremap_resource(dev, res); + if (IS_ERR((void *)crc->regs)) { + dev_err(&pdev->dev, "Cannot map CRC IO\n"); + return PTR_ERR((void *)crc->regs); + } + + crc->irq = platform_get_irq(pdev, 0); + if (crc->irq < 0) { + dev_err(&pdev->dev, "No CRC DCNTEXP IRQ specified\n"); + return -ENOENT; + } + + ret = devm_request_irq(dev, crc->irq, bfin_crypto_crc_handler, + IRQF_SHARED, dev_name(dev), crc); + if (ret) { + dev_err(&pdev->dev, "Unable to request blackfin crc irq\n"); + return ret; + } + + res = platform_get_resource(pdev, IORESOURCE_DMA, 0); + if (res == NULL) { + dev_err(&pdev->dev, "No CRC DMA channel specified\n"); + return -ENOENT; + } + crc->dma_ch = res->start; + + ret = request_dma(crc->dma_ch, dev_name(dev)); + if (ret) { + dev_err(&pdev->dev, "Unable to attach Blackfin CRC DMA channel\n"); + return ret; + } + + crc->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &crc->sg_dma, GFP_KERNEL); + if (crc->sg_cpu == NULL) { + ret = -ENOMEM; + goto out_error_dma; + } + /* + * need at most CRC_MAX_DMA_DESC sg + CRC_MAX_DMA_DESC middle + + * 1 last + 1 next dma descriptors + */ + crc->sg_mid_buf = (u8 *)(crc->sg_cpu + ((CRC_MAX_DMA_DESC + 1) << 1)); + crc->sg_mid_dma = crc->sg_dma + sizeof(struct dma_desc_array) + * ((CRC_MAX_DMA_DESC + 1) << 1); + + writel(0, &crc->regs->control); + crc->poly = (u32)pdev->dev.platform_data; + writel(crc->poly, &crc->regs->poly); + + while (!(readl(&crc->regs->status) & LUTDONE) && (--timeout) > 0) + cpu_relax(); + + if (timeout == 0) + dev_info(&pdev->dev, "init crc poly timeout\n"); + + platform_set_drvdata(pdev, crc); + + spin_lock(&crc_list.lock); + list_add(&crc->list, &crc_list.dev_list); + spin_unlock(&crc_list.lock); + + if (list_is_singular(&crc_list.dev_list)) { + ret = crypto_register_ahash(&algs); + if (ret) { + dev_err(&pdev->dev, + "Can't register crypto ahash device\n"); + goto out_error_dma; + } + } + + dev_info(&pdev->dev, "initialized\n"); + + return 0; + +out_error_dma: + if (crc->sg_cpu) + dma_free_coherent(&pdev->dev, PAGE_SIZE, crc->sg_cpu, crc->sg_dma); + free_dma(crc->dma_ch); + + return ret; +} + +/** + * bfin_crypto_crc_remove - Initialize module + * + */ +static int bfin_crypto_crc_remove(struct platform_device *pdev) +{ + struct bfin_crypto_crc *crc = platform_get_drvdata(pdev); + + if (!crc) + return -ENODEV; + + spin_lock(&crc_list.lock); + list_del(&crc->list); + spin_unlock(&crc_list.lock); + + crypto_unregister_ahash(&algs); + tasklet_kill(&crc->done_task); + free_dma(crc->dma_ch); + + return 0; +} + +static struct platform_driver bfin_crypto_crc_driver = { + .probe = bfin_crypto_crc_probe, + .remove = bfin_crypto_crc_remove, + .suspend = bfin_crypto_crc_suspend, + .resume = bfin_crypto_crc_resume, + .driver = { + .name = DRIVER_NAME, + }, +}; + +/** + * bfin_crypto_crc_mod_init - Initialize module + * + * Checks the module params and registers the platform driver. + * Real work is in the platform probe function. + */ +static int __init bfin_crypto_crc_mod_init(void) +{ + int ret; + + pr_info("Blackfin hardware CRC crypto driver\n"); + + INIT_LIST_HEAD(&crc_list.dev_list); + spin_lock_init(&crc_list.lock); + + ret = platform_driver_register(&bfin_crypto_crc_driver); + if (ret) { + pr_err("unable to register driver\n"); + return ret; + } + + return 0; +} + +/** + * bfin_crypto_crc_mod_exit - Deinitialize module + */ +static void __exit bfin_crypto_crc_mod_exit(void) +{ + platform_driver_unregister(&bfin_crypto_crc_driver); +} + +module_init(bfin_crypto_crc_mod_init); +module_exit(bfin_crypto_crc_mod_exit); + +MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>"); +MODULE_DESCRIPTION("Blackfin CRC hardware crypto driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/crypto/bfin_crc.h b/drivers/crypto/bfin_crc.h new file mode 100644 index 000000000..75cef4dc8 --- /dev/null +++ b/drivers/crypto/bfin_crc.h @@ -0,0 +1,125 @@ +/* + * bfin_crc.h - interface to Blackfin CRC controllers + * + * Copyright 2012 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#ifndef __BFIN_CRC_H__ +#define __BFIN_CRC_H__ + +/* Function driver which use hardware crc must initialize the structure */ +struct crc_info { + /* Input data address */ + unsigned char *in_addr; + /* Output data address */ + unsigned char *out_addr; + /* Input or output bytes */ + unsigned long datasize; + union { + /* CRC to compare with that of input buffer */ + unsigned long crc_compare; + /* Value to compare with input data */ + unsigned long val_verify; + /* Value to fill */ + unsigned long val_fill; + }; + /* Value to program the 32b CRC Polynomial */ + unsigned long crc_poly; + union { + /* CRC calculated from the input data */ + unsigned long crc_result; + /* First failed position to verify input data */ + unsigned long pos_verify; + }; + /* CRC mirror flags */ + unsigned int bitmirr:1; + unsigned int bytmirr:1; + unsigned int w16swp:1; + unsigned int fdsel:1; + unsigned int rsltmirr:1; + unsigned int polymirr:1; + unsigned int cmpmirr:1; +}; + +/* Userspace interface */ +#define CRC_IOC_MAGIC 'C' +#define CRC_IOC_CALC_CRC _IOWR('C', 0x01, unsigned int) +#define CRC_IOC_MEMCPY_CRC _IOWR('C', 0x02, unsigned int) +#define CRC_IOC_VERIFY_VAL _IOWR('C', 0x03, unsigned int) +#define CRC_IOC_FILL_VAL _IOWR('C', 0x04, unsigned int) + + +#ifdef __KERNEL__ + +#include <linux/types.h> +#include <linux/spinlock.h> +#include <linux/miscdevice.h> + +struct crc_register { + u32 control; + u32 datacnt; + u32 datacntrld; + u32 __pad_1[2]; + u32 compare; + u32 fillval; + u32 datafifo; + u32 intren; + u32 intrenset; + u32 intrenclr; + u32 poly; + u32 __pad_2[4]; + u32 status; + u32 datacntcap; + u32 __pad_3; + u32 result; + u32 curresult; + u32 __pad_4[3]; + u32 revid; +}; + +/* CRC_STATUS Masks */ +#define CMPERR 0x00000002 /* Compare error */ +#define DCNTEXP 0x00000010 /* datacnt register expired */ +#define IBR 0x00010000 /* Input buffer ready */ +#define OBR 0x00020000 /* Output buffer ready */ +#define IRR 0x00040000 /* Immediate result readt */ +#define LUTDONE 0x00080000 /* Look-up table generation done */ +#define FSTAT 0x00700000 /* FIFO status */ +#define MAX_FIFO 4 /* Max fifo size */ + +/* CRC_CONTROL Masks */ +#define BLKEN 0x00000001 /* Block enable */ +#define OPMODE 0x000000F0 /* Operation mode */ +#define OPMODE_OFFSET 4 /* Operation mode mask offset*/ +#define MODE_DMACPY_CRC 1 /* MTM CRC compute and compare */ +#define MODE_DATA_FILL 2 /* MTM data fill */ +#define MODE_CALC_CRC 3 /* MSM CRC compute and compare */ +#define MODE_DATA_VERIFY 4 /* MSM data verify */ +#define AUTOCLRZ 0x00000100 /* Auto clear to zero */ +#define AUTOCLRF 0x00000200 /* Auto clear to one */ +#define OBRSTALL 0x00001000 /* Stall on output buffer ready */ +#define IRRSTALL 0x00002000 /* Stall on immediate result ready */ +#define BITMIRR 0x00010000 /* Mirror bits within each byte of 32-bit input data */ +#define BITMIRR_OFFSET 16 /* Mirror bits offset */ +#define BYTMIRR 0x00020000 /* Mirror bytes of 32-bit input data */ +#define BYTMIRR_OFFSET 17 /* Mirror bytes offset */ +#define W16SWP 0x00040000 /* Mirror uppper and lower 16-bit word of 32-bit input data */ +#define W16SWP_OFFSET 18 /* Mirror 16-bit word offset */ +#define FDSEL 0x00080000 /* FIFO is written after input data is mirrored */ +#define FDSEL_OFFSET 19 /* Mirror FIFO offset */ +#define RSLTMIRR 0x00100000 /* CRC result registers are mirrored. */ +#define RSLTMIRR_OFFSET 20 /* Mirror CRC result offset. */ +#define POLYMIRR 0x00200000 /* CRC poly register is mirrored. */ +#define POLYMIRR_OFFSET 21 /* Mirror CRC poly offset. */ +#define CMPMIRR 0x00400000 /* CRC compare register is mirrored. */ +#define CMPMIRR_OFFSET 22 /* Mirror CRC compare offset. */ + +/* CRC_INTREN Masks */ +#define CMPERRI 0x02 /* CRC_ERROR_INTR */ +#define DCNTEXPI 0x10 /* CRC_STATUS_INTR */ + +#endif + +#endif diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig new file mode 100644 index 000000000..e7555ff4c --- /dev/null +++ b/drivers/crypto/caam/Kconfig @@ -0,0 +1,121 @@ +config CRYPTO_DEV_FSL_CAAM + tristate "Freescale CAAM-Multicore driver backend" + depends on FSL_SOC + help + Enables the driver module for Freescale's Cryptographic Accelerator + and Assurance Module (CAAM), also known as the SEC version 4 (SEC4). + This module creates job ring devices, and configures h/w + to operate as a DPAA component automatically, depending + on h/w feature availability. + + To compile this driver as a module, choose M here: the module + will be called caam. + +config CRYPTO_DEV_FSL_CAAM_JR + tristate "Freescale CAAM Job Ring driver backend" + depends on CRYPTO_DEV_FSL_CAAM + default y + help + Enables the driver module for Job Rings which are part of + Freescale's Cryptographic Accelerator + and Assurance Module (CAAM). This module adds a job ring operation + interface. + + To compile this driver as a module, choose M here: the module + will be called caam_jr. + +config CRYPTO_DEV_FSL_CAAM_RINGSIZE + int "Job Ring size" + depends on CRYPTO_DEV_FSL_CAAM_JR + range 2 9 + default "9" + help + Select size of Job Rings as a power of 2, within the + range 2-9 (ring size 4-512). + Examples: + 2 => 4 + 3 => 8 + 4 => 16 + 5 => 32 + 6 => 64 + 7 => 128 + 8 => 256 + 9 => 512 + +config CRYPTO_DEV_FSL_CAAM_INTC + bool "Job Ring interrupt coalescing" + depends on CRYPTO_DEV_FSL_CAAM_JR + default n + help + Enable the Job Ring's interrupt coalescing feature. + + Note: the driver already provides adequate + interrupt coalescing in software. + +config CRYPTO_DEV_FSL_CAAM_INTC_COUNT_THLD + int "Job Ring interrupt coalescing count threshold" + depends on CRYPTO_DEV_FSL_CAAM_INTC + range 1 255 + default 255 + help + Select number of descriptor completions to queue before + raising an interrupt, in the range 1-255. Note that a selection + of 1 functionally defeats the coalescing feature, and a selection + equal or greater than the job ring size will force timeouts. + +config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD + int "Job Ring interrupt coalescing timer threshold" + depends on CRYPTO_DEV_FSL_CAAM_INTC + range 1 65535 + default 2048 + help + Select number of bus clocks/64 to timeout in the case that one or + more descriptor completions are queued without reaching the count + threshold. Range is 1-65535. + +config CRYPTO_DEV_FSL_CAAM_CRYPTO_API + tristate "Register algorithm implementations with the Crypto API" + depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR + default y + select CRYPTO_ALGAPI + select CRYPTO_AUTHENC + help + Selecting this will offload crypto for users of the + scatterlist crypto API (such as the linux native IPSec + stack) to the SEC4 via job ring. + + To compile this as a module, choose M here: the module + will be called caamalg. + +config CRYPTO_DEV_FSL_CAAM_AHASH_API + tristate "Register hash algorithm implementations with Crypto API" + depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR + default y + select CRYPTO_HASH + help + Selecting this will offload ahash for users of the + scatterlist crypto API to the SEC4 via job ring. + + To compile this as a module, choose M here: the module + will be called caamhash. + +config CRYPTO_DEV_FSL_CAAM_RNG_API + tristate "Register caam device for hwrng API" + depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR + default y + select CRYPTO_RNG + select HW_RANDOM + help + Selecting this will register the SEC4 hardware rng to + the hw_random API for suppying the kernel entropy pool. + + To compile this as a module, choose M here: the module + will be called caamrng. + +config CRYPTO_DEV_FSL_CAAM_DEBUG + bool "Enable debug output in CAAM driver" + depends on CRYPTO_DEV_FSL_CAAM + default n + help + Selecting this will enable printing of various debug + information in the CAAM driver. diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile new file mode 100644 index 000000000..550758a33 --- /dev/null +++ b/drivers/crypto/caam/Makefile @@ -0,0 +1,15 @@ +# +# Makefile for the CAAM backend and dependent components +# +ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG), y) + EXTRA_CFLAGS := -DDEBUG +endif + +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o + +caam-objs := ctrl.o +caam_jr-objs := jr.o key_gen.o error.o diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c new file mode 100644 index 000000000..29071a156 --- /dev/null +++ b/drivers/crypto/caam/caamalg.c @@ -0,0 +1,4312 @@ +/* + * caam - Freescale FSL CAAM support for crypto API + * + * Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Based on talitos crypto API driver. + * + * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008): + * + * --------------- --------------- + * | JobDesc #1 |-------------------->| ShareDesc | + * | *(packet 1) | | (PDB) | + * --------------- |------------->| (hashKey) | + * . | | (cipherKey) | + * . | |-------->| (operation) | + * --------------- | | --------------- + * | JobDesc #2 |------| | + * | *(packet 2) | | + * --------------- | + * . | + * . | + * --------------- | + * | JobDesc #3 |------------ + * | *(packet 3) | + * --------------- + * + * The SharedDesc never changes for a connection unless rekeyed, but + * each packet will likely be in a different place. So all we need + * to know to process the packet is where the input is, where the + * output goes, and what context we want to process with. Context is + * in the SharedDesc, packet references in the JobDesc. + * + * So, a job desc looks like: + * + * --------------------- + * | Header | + * | ShareDesc Pointer | + * | SEQ_OUT_PTR | + * | (output buffer) | + * | (output length) | + * | SEQ_IN_PTR | + * | (input buffer) | + * | (input length) | + * --------------------- + */ + +#include "compat.h" + +#include "regs.h" +#include "intern.h" +#include "desc_constr.h" +#include "jr.h" +#include "error.h" +#include "sg_sw_sec4.h" +#include "key_gen.h" + +/* + * crypto alg + */ +#define CAAM_CRA_PRIORITY 3000 +/* max key is sum of AES_MAX_KEY_SIZE, max split key size */ +#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \ + CTR_RFC3686_NONCE_SIZE + \ + SHA512_DIGEST_SIZE * 2) +/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ +#define CAAM_MAX_IV_LENGTH 16 + +/* length of descriptors text */ +#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) +#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ) +#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ) +#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ) + +/* Note: Nonce is counted in enckeylen */ +#define DESC_AEAD_CTR_RFC3686_LEN (6 * CAAM_CMD_SZ) + +#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ) +#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ) +#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ) + +#define DESC_GCM_BASE (3 * CAAM_CMD_SZ) +#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 23 * CAAM_CMD_SZ) +#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 19 * CAAM_CMD_SZ) + +#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ) +#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 15 * CAAM_CMD_SZ) +#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 14 * CAAM_CMD_SZ) +#define DESC_RFC4106_GIVENC_LEN (DESC_RFC4106_BASE + 21 * CAAM_CMD_SZ) + +#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ) +#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 25 * CAAM_CMD_SZ) +#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 27 * CAAM_CMD_SZ) +#define DESC_RFC4543_GIVENC_LEN (DESC_RFC4543_BASE + 30 * CAAM_CMD_SZ) + +#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ) +#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \ + 20 * CAAM_CMD_SZ) +#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \ + 15 * CAAM_CMD_SZ) + +#define DESC_MAX_USED_BYTES (DESC_RFC4543_GIVENC_LEN + \ + CAAM_MAX_KEY_SIZE) +#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) + +#ifdef DEBUG +/* for print_hex_dumps with line references */ +#define debug(format, arg...) printk(format, arg) +#else +#define debug(format, arg...) +#endif +static struct list_head alg_list; + +/* Set DK bit in class 1 operation if shared */ +static inline void append_dec_op1(u32 *desc, u32 type) +{ + u32 *jump_cmd, *uncond_jump_cmd; + + /* DK bit is valid only for AES */ + if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) { + append_operation(desc, type | OP_ALG_AS_INITFINAL | + OP_ALG_DECRYPT); + return; + } + + jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); + append_operation(desc, type | OP_ALG_AS_INITFINAL | + OP_ALG_DECRYPT); + uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL); + set_jump_tgt_here(desc, jump_cmd); + append_operation(desc, type | OP_ALG_AS_INITFINAL | + OP_ALG_DECRYPT | OP_ALG_AAI_DK); + set_jump_tgt_here(desc, uncond_jump_cmd); +} + +/* + * For aead functions, read payload and write payload, + * both of which are specified in req->src and req->dst + */ +static inline void aead_append_src_dst(u32 *desc, u32 msg_type) +{ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | + KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH); +} + +/* + * For aead encrypt and decrypt, read iv for both classes + */ +static inline void aead_append_ld_iv(u32 *desc, int ivsize, int ivoffset) +{ + append_seq_load(desc, ivsize, LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT | + (ivoffset << LDST_OFFSET_SHIFT)); + append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | + (ivoffset << MOVE_OFFSET_SHIFT) | ivsize); +} + +/* + * For ablkcipher encrypt and decrypt, read from req->src and + * write to req->dst + */ +static inline void ablkcipher_append_src_dst(u32 *desc) +{ + append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | + KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); +} + +/* + * If all data, including src (with assoc and iv) or dst (with iv only) are + * contiguous + */ +#define GIV_SRC_CONTIG 1 +#define GIV_DST_CONTIG (1 << 1) + +/* + * per-session context + */ +struct caam_ctx { + struct device *jrdev; + u32 sh_desc_enc[DESC_MAX_USED_LEN]; + u32 sh_desc_dec[DESC_MAX_USED_LEN]; + u32 sh_desc_givenc[DESC_MAX_USED_LEN]; + dma_addr_t sh_desc_enc_dma; + dma_addr_t sh_desc_dec_dma; + dma_addr_t sh_desc_givenc_dma; + u32 class1_alg_type; + u32 class2_alg_type; + u32 alg_op; + u8 key[CAAM_MAX_KEY_SIZE]; + dma_addr_t key_dma; + unsigned int enckeylen; + unsigned int split_key_len; + unsigned int split_key_pad_len; + unsigned int authsize; +}; + +static void append_key_aead(u32 *desc, struct caam_ctx *ctx, + int keys_fit_inline, bool is_rfc3686) +{ + u32 *nonce; + unsigned int enckeylen = ctx->enckeylen; + + /* + * RFC3686 specific: + * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE} + * | enckeylen = encryption key size + nonce size + */ + if (is_rfc3686) + enckeylen -= CTR_RFC3686_NONCE_SIZE; + + if (keys_fit_inline) { + append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, + ctx->split_key_len, CLASS_2 | + KEY_DEST_MDHA_SPLIT | KEY_ENC); + append_key_as_imm(desc, (void *)ctx->key + + ctx->split_key_pad_len, enckeylen, + enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); + } else { + append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 | + KEY_DEST_MDHA_SPLIT | KEY_ENC); + append_key(desc, ctx->key_dma + ctx->split_key_pad_len, + enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); + } + + /* Load Counter into CONTEXT1 reg */ + if (is_rfc3686) { + nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len + + enckeylen); + append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB | + LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); + append_move(desc, + MOVE_SRC_OUTFIFO | + MOVE_DEST_CLASS1CTX | + (16 << MOVE_OFFSET_SHIFT) | + (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT)); + } +} + +static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx, + int keys_fit_inline, bool is_rfc3686) +{ + u32 *key_jump_cmd; + + /* Note: Context registers are saved. */ + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); + + /* Skip if already shared */ + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD); + + append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686); + + set_jump_tgt_here(desc, key_jump_cmd); +} + +static int aead_null_set_sh_desc(struct crypto_aead *aead) +{ + struct aead_tfm *tfm = &aead->base.crt_aead; + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + bool keys_fit_inline = false; + u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd; + u32 *desc; + + /* + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ + if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN + + ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX) + keys_fit_inline = true; + + /* aead_encrypt shared descriptor */ + desc = ctx->sh_desc_enc; + + init_sh_desc(desc, HDR_SHARE_SERIAL); + + /* Skip if already shared */ + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD); + if (keys_fit_inline) + append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, + ctx->split_key_len, CLASS_2 | + KEY_DEST_MDHA_SPLIT | KEY_ENC); + else + append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 | + KEY_DEST_MDHA_SPLIT | KEY_ENC); + set_jump_tgt_here(desc, key_jump_cmd); + + /* cryptlen = seqoutlen - authsize */ + append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); + + /* + * NULL encryption; IV is zero + * assoclen = (assoclen + cryptlen) - cryptlen + */ + append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ); + + /* read assoc before reading payload */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | + KEY_VLF); + + /* Prepare to read and write cryptlen bytes */ + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); + + /* + * MOVE_LEN opcode is not available in all SEC HW revisions, + * thus need to do some magic, i.e. self-patch the descriptor + * buffer. + */ + read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | + MOVE_DEST_MATH3 | + (0x6 << MOVE_LEN_SHIFT)); + write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | + MOVE_DEST_DESCBUF | + MOVE_WAITCOMP | + (0x8 << MOVE_LEN_SHIFT)); + + /* Class 2 operation */ + append_operation(desc, ctx->class2_alg_type | + OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); + + /* Read and write cryptlen bytes */ + aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); + + set_move_tgt_here(desc, read_move_cmd); + set_move_tgt_here(desc, write_move_cmd); + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); + append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO | + MOVE_AUX_LS); + + /* Write ICV */ + append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | + LDST_SRCDST_BYTE_CONTEXT); + + ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, + desc_bytes(desc), + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { + dev_err(jrdev, "unable to map shared descriptor\n"); + return -ENOMEM; + } +#ifdef DEBUG + print_hex_dump(KERN_ERR, + "aead null enc shdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); +#endif + + /* + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ + keys_fit_inline = false; + if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN + + ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX) + keys_fit_inline = true; + + desc = ctx->sh_desc_dec; + + /* aead_decrypt shared descriptor */ + init_sh_desc(desc, HDR_SHARE_SERIAL); + + /* Skip if already shared */ + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD); + if (keys_fit_inline) + append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, + ctx->split_key_len, CLASS_2 | + KEY_DEST_MDHA_SPLIT | KEY_ENC); + else + append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 | + KEY_DEST_MDHA_SPLIT | KEY_ENC); + set_jump_tgt_here(desc, key_jump_cmd); + + /* Class 2 operation */ + append_operation(desc, ctx->class2_alg_type | + OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); + + /* assoclen + cryptlen = seqinlen - ivsize - authsize */ + append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, + ctx->authsize + tfm->ivsize); + /* assoclen = (assoclen + cryptlen) - cryptlen */ + append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); + append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); + + /* read assoc before reading payload */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | + KEY_VLF); + + /* Prepare to read and write cryptlen bytes */ + append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); + append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); + + /* + * MOVE_LEN opcode is not available in all SEC HW revisions, + * thus need to do some magic, i.e. self-patch the descriptor + * buffer. + */ + read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | + MOVE_DEST_MATH2 | + (0x6 << MOVE_LEN_SHIFT)); + write_move_cmd = append_move(desc, MOVE_SRC_MATH2 | + MOVE_DEST_DESCBUF | + MOVE_WAITCOMP | + (0x8 << MOVE_LEN_SHIFT)); + + /* Read and write cryptlen bytes */ + aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); + + /* + * Insert a NOP here, since we need at least 4 instructions between + * code patching the descriptor buffer and the location being patched. + */ + jump_cmd = append_jump(desc, JUMP_TEST_ALL); + set_jump_tgt_here(desc, jump_cmd); + + set_move_tgt_here(desc, read_move_cmd); + set_move_tgt_here(desc, write_move_cmd); + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); + append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO | + MOVE_AUX_LS); + append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); + + /* Load ICV */ + append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 | + FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); + + ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, + desc_bytes(desc), + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { + dev_err(jrdev, "unable to map shared descriptor\n"); + return -ENOMEM; + } +#ifdef DEBUG + print_hex_dump(KERN_ERR, + "aead null dec shdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); +#endif + + return 0; +} + +static int aead_set_sh_desc(struct crypto_aead *aead) +{ + struct aead_tfm *tfm = &aead->base.crt_aead; + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct crypto_tfm *ctfm = crypto_aead_tfm(aead); + const char *alg_name = crypto_tfm_alg_name(ctfm); + struct device *jrdev = ctx->jrdev; + bool keys_fit_inline; + u32 geniv, moveiv; + u32 ctx1_iv_off = 0; + u32 *desc; + const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == + OP_ALG_AAI_CTR_MOD128); + const bool is_rfc3686 = (ctr_mode && + (strstr(alg_name, "rfc3686") != NULL)); + + if (!ctx->authsize) + return 0; + + /* NULL encryption / decryption */ + if (!ctx->enckeylen) + return aead_null_set_sh_desc(aead); + + /* + * AES-CTR needs to load IV in CONTEXT1 reg + * at an offset of 128bits (16bytes) + * CONTEXT1[255:128] = IV + */ + if (ctr_mode) + ctx1_iv_off = 16; + + /* + * RFC3686 specific: + * CONTEXT1[255:128] = {NONCE, IV, COUNTER} + */ + if (is_rfc3686) + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; + + /* + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ + keys_fit_inline = false; + if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN + + ctx->split_key_pad_len + ctx->enckeylen + + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <= + CAAM_DESC_BYTES_MAX) + keys_fit_inline = true; + + /* aead_encrypt shared descriptor */ + desc = ctx->sh_desc_enc; + + /* Note: Context registers are saved. */ + init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686); + + /* Class 2 operation */ + append_operation(desc, ctx->class2_alg_type | + OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); + + /* cryptlen = seqoutlen - authsize */ + append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); + + /* assoclen + cryptlen = seqinlen - ivsize */ + append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize); + + /* assoclen = (assoclen + cryptlen) - cryptlen */ + append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ); + + /* read assoc before reading payload */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | + KEY_VLF); + aead_append_ld_iv(desc, tfm->ivsize, ctx1_iv_off); + + /* Load Counter into CONTEXT1 reg */ + if (is_rfc3686) + append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM | + LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT | + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << + LDST_OFFSET_SHIFT)); + + /* Class 1 operation */ + append_operation(desc, ctx->class1_alg_type | + OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); + + /* Read and write cryptlen bytes */ + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); + aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); + + /* Write ICV */ + append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | + LDST_SRCDST_BYTE_CONTEXT); + + ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, + desc_bytes(desc), + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { + dev_err(jrdev, "unable to map shared descriptor\n"); + return -ENOMEM; + } +#ifdef DEBUG + print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); +#endif + + /* + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ + keys_fit_inline = false; + if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN + + ctx->split_key_pad_len + ctx->enckeylen + + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <= + CAAM_DESC_BYTES_MAX) + keys_fit_inline = true; + + /* aead_decrypt shared descriptor */ + desc = ctx->sh_desc_dec; + + /* Note: Context registers are saved. */ + init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686); + + /* Class 2 operation */ + append_operation(desc, ctx->class2_alg_type | + OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); + + /* assoclen + cryptlen = seqinlen - ivsize - authsize */ + append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, + ctx->authsize + tfm->ivsize); + /* assoclen = (assoclen + cryptlen) - cryptlen */ + append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); + append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); + + /* read assoc before reading payload */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | + KEY_VLF); + + aead_append_ld_iv(desc, tfm->ivsize, ctx1_iv_off); + + /* Load Counter into CONTEXT1 reg */ + if (is_rfc3686) + append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM | + LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT | + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << + LDST_OFFSET_SHIFT)); + + /* Choose operation */ + if (ctr_mode) + append_operation(desc, ctx->class1_alg_type | + OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT); + else + append_dec_op1(desc, ctx->class1_alg_type); + + /* Read and write cryptlen bytes */ + append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); + append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); + aead_append_src_dst(desc, FIFOLD_TYPE_MSG); + + /* Load ICV */ + append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 | + FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); + + ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, + desc_bytes(desc), + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { + dev_err(jrdev, "unable to map shared descriptor\n"); + return -ENOMEM; + } +#ifdef DEBUG + print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); +#endif + + /* + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ + keys_fit_inline = false; + if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN + + ctx->split_key_pad_len + ctx->enckeylen + + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <= + CAAM_DESC_BYTES_MAX) + keys_fit_inline = true; + + /* aead_givencrypt shared descriptor */ + desc = ctx->sh_desc_givenc; + + /* Note: Context registers are saved. */ + init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686); + + /* Generate IV */ + geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | + NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | + NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT); + append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); + append_move(desc, MOVE_WAITCOMP | + MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX | + (ctx1_iv_off << MOVE_OFFSET_SHIFT) | + (tfm->ivsize << MOVE_LEN_SHIFT)); + append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); + + /* Copy IV to class 1 context */ + append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO | + (ctx1_iv_off << MOVE_OFFSET_SHIFT) | + (tfm->ivsize << MOVE_LEN_SHIFT)); + + /* Return to encryption */ + append_operation(desc, ctx->class2_alg_type | + OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); + + /* ivsize + cryptlen = seqoutlen - authsize */ + append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); + + /* assoclen = seqinlen - (ivsize + cryptlen) */ + append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ); + + /* read assoc before reading payload */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | + KEY_VLF); + + /* Copy iv from outfifo to class 2 fifo */ + moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 | + NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT); + append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB | + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); + append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB | + LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM); + + /* Load Counter into CONTEXT1 reg */ + if (is_rfc3686) + append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM | + LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT | + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << + LDST_OFFSET_SHIFT)); + + /* Class 1 operation */ + append_operation(desc, ctx->class1_alg_type | + OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); + + /* Will write ivsize + cryptlen */ + append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); + + /* Not need to reload iv */ + append_seq_fifo_load(desc, tfm->ivsize, + FIFOLD_CLASS_SKIP); + + /* Will read cryptlen */ + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); + aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); + + /* Write ICV */ + append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | + LDST_SRCDST_BYTE_CONTEXT); + + ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, + desc_bytes(desc), + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { + dev_err(jrdev, "unable to map shared descriptor\n"); + return -ENOMEM; + } +#ifdef DEBUG + print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); +#endif + + return 0; +} + +static int aead_setauthsize(struct crypto_aead *authenc, + unsigned int authsize) +{ + struct caam_ctx *ctx = crypto_aead_ctx(authenc); + + ctx->authsize = authsize; + aead_set_sh_desc(authenc); + + return 0; +} + +static int gcm_set_sh_desc(struct crypto_aead *aead) +{ + struct aead_tfm *tfm = &aead->base.crt_aead; + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + bool keys_fit_inline = false; + u32 *key_jump_cmd, *zero_payload_jump_cmd, + *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2; + u32 *desc; + + if (!ctx->enckeylen || !ctx->authsize) + return 0; + + /* + * AES GCM encrypt shared descriptor + * Job Descriptor and Shared Descriptor + * must fit into the 64-word Descriptor h/w Buffer + */ + if (DESC_GCM_ENC_LEN + DESC_JOB_IO_LEN + + ctx->enckeylen <= CAAM_DESC_BYTES_MAX) + keys_fit_inline = true; + + desc = ctx->sh_desc_enc; + + init_sh_desc(desc, HDR_SHARE_SERIAL); + + /* skip key loading if they are loaded due to sharing */ + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD | JUMP_COND_SELF); + if (keys_fit_inline) + append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, + ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); + else + append_key(desc, ctx->key_dma, ctx->enckeylen, + CLASS_1 | KEY_DEST_CLASS_REG); + set_jump_tgt_here(desc, key_jump_cmd); + + /* class 1 operation */ + append_operation(desc, ctx->class1_alg_type | + OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); + + /* cryptlen = seqoutlen - authsize */ + append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); + + /* assoclen + cryptlen = seqinlen - ivsize */ + append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize); + + /* assoclen = (assoclen + cryptlen) - cryptlen */ + append_math_sub(desc, REG1, REG2, REG3, CAAM_CMD_SZ); + + /* if cryptlen is ZERO jump to zero-payload commands */ + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL | + JUMP_COND_MATH_Z); + /* read IV */ + append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 | + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); + + /* if assoclen is ZERO, skip reading the assoc data */ + append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ); + zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL | + JUMP_COND_MATH_Z); + + /* read assoc data */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | + FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); + set_jump_tgt_here(desc, zero_assoc_jump_cmd1); + + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); + + /* write encrypted data */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); + + /* read payload data */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); + + /* jump the zero-payload commands */ + append_jump(desc, JUMP_TEST_ALL | 7); + + /* zero-payload commands */ + set_jump_tgt_here(desc, zero_payload_jump_cmd); + + /* if assoclen is ZERO, jump to IV reading - is the only input data */ + append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ); + zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL | + JUMP_COND_MATH_Z); + /* read IV */ + append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 | + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); + + /* read assoc data */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | + FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1); + + /* jump to ICV writing */ + append_jump(desc, JUMP_TEST_ALL | 2); + + /* read IV - is the only input data */ + set_jump_tgt_here(desc, zero_assoc_jump_cmd2); + append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 | + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | + FIFOLD_TYPE_LAST1); + + /* write ICV */ + append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT); + + ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, + desc_bytes(desc), + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { + dev_err(jrdev, "unable to map shared descriptor\n"); + return -ENOMEM; + } +#ifdef DEBUG + print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); +#endif + + /* + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ + keys_fit_inline = false; + if (DESC_GCM_DEC_LEN + DESC_JOB_IO_LEN + + ctx->enckeylen <= CAAM_DESC_BYTES_MAX) + keys_fit_inline = true; + + desc = ctx->sh_desc_dec; + + init_sh_desc(desc, HDR_SHARE_SERIAL); + + /* skip key loading if they are loaded due to sharing */ + key_jump_cmd = append_jump(desc, JUMP_JSL | + JUMP_TEST_ALL | JUMP_COND_SHRD | + JUMP_COND_SELF); + if (keys_fit_inline) + append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, + ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); + else + append_key(desc, ctx->key_dma, ctx->enckeylen, + CLASS_1 | KEY_DEST_CLASS_REG); + set_jump_tgt_here(desc, key_jump_cmd); + + /* class 1 operation */ + append_operation(desc, ctx->class1_alg_type | + OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); + + /* assoclen + cryptlen = seqinlen - ivsize - icvsize */ + append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, + ctx->authsize + tfm->ivsize); + + /* assoclen = (assoclen + cryptlen) - cryptlen */ + append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); + append_math_sub(desc, REG1, REG3, REG2, CAAM_CMD_SZ); + + /* read IV */ + append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 | + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); + + /* jump to zero-payload command if cryptlen is zero */ + append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL | + JUMP_COND_MATH_Z); + + append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ); + /* if asoclen is ZERO, skip reading assoc data */ + zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL | + JUMP_COND_MATH_Z); + /* read assoc data */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | + FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); + set_jump_tgt_here(desc, zero_assoc_jump_cmd1); + + append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); + + /* store encrypted data */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); + + /* read payload data */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | + FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); + + /* jump the zero-payload commands */ + append_jump(desc, JUMP_TEST_ALL | 4); + + /* zero-payload command */ + set_jump_tgt_here(desc, zero_payload_jump_cmd); + + /* if assoclen is ZERO, jump to ICV reading */ + append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ); + zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL | + JUMP_COND_MATH_Z); + /* read assoc data */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | + FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); + set_jump_tgt_here(desc, zero_assoc_jump_cmd2); + + /* read ICV */ + append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 | + FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); + + ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, + desc_bytes(desc), + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { + dev_err(jrdev, "unable to map shared descriptor\n"); + return -ENOMEM; + } +#ifdef DEBUG + print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); +#endif + + return 0; +} + +static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) +{ + struct caam_ctx *ctx = crypto_aead_ctx(authenc); + + ctx->authsize = authsize; + gcm_set_sh_desc(authenc); + + return 0; +} + +static int rfc4106_set_sh_desc(struct crypto_aead *aead) +{ + struct aead_tfm *tfm = &aead->base.crt_aead; + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + bool keys_fit_inline = false; + u32 *key_jump_cmd, *move_cmd, *write_iv_cmd; + u32 *desc; + u32 geniv; + + if (!ctx->enckeylen || !ctx->authsize) + return 0; + + /* + * RFC4106 encrypt shared descriptor + * Job Descriptor and Shared Descriptor + * must fit into the 64-word Descriptor h/w Buffer + */ + if (DESC_RFC4106_ENC_LEN + DESC_JOB_IO_LEN + + ctx->enckeylen <= CAAM_DESC_BYTES_MAX) + keys_fit_inline = true; + + desc = ctx->sh_desc_enc; + + init_sh_desc(desc, HDR_SHARE_SERIAL); + + /* Skip key loading if it is loaded due to sharing */ + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD); + if (keys_fit_inline) + append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, + ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); + else + append_key(desc, ctx->key_dma, ctx->enckeylen, + CLASS_1 | KEY_DEST_CLASS_REG); + set_jump_tgt_here(desc, key_jump_cmd); + + /* Class 1 operation */ + append_operation(desc, ctx->class1_alg_type | + OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); + + /* cryptlen = seqoutlen - authsize */ + append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); + + /* assoclen + cryptlen = seqinlen - ivsize */ + append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize); + + /* assoclen = (assoclen + cryptlen) - cryptlen */ + append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ); + + /* Read Salt */ + append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen), + 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV); + /* Read AES-GCM-ESP IV */ + append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 | + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); + + /* Read assoc data */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | + FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); + + /* Will read cryptlen bytes */ + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); + + /* Write encrypted data */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); + + /* Read payload data */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); + + /* Write ICV */ + append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT); + + ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, + desc_bytes(desc), + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { + dev_err(jrdev, "unable to map shared descriptor\n"); + return -ENOMEM; + } +#ifdef DEBUG + print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); +#endif + + /* + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ + keys_fit_inline = false; + if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN + + ctx->enckeylen <= CAAM_DESC_BYTES_MAX) + keys_fit_inline = true; + + desc = ctx->sh_desc_dec; + + init_sh_desc(desc, HDR_SHARE_SERIAL); + + /* Skip key loading if it is loaded due to sharing */ + key_jump_cmd = append_jump(desc, JUMP_JSL | + JUMP_TEST_ALL | JUMP_COND_SHRD); + if (keys_fit_inline) + append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, + ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); + else + append_key(desc, ctx->key_dma, ctx->enckeylen, + CLASS_1 | KEY_DEST_CLASS_REG); + set_jump_tgt_here(desc, key_jump_cmd); + + /* Class 1 operation */ + append_operation(desc, ctx->class1_alg_type | + OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); + + /* assoclen + cryptlen = seqinlen - ivsize - icvsize */ + append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, + ctx->authsize + tfm->ivsize); + + /* assoclen = (assoclen + cryptlen) - cryptlen */ + append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); + append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); + + /* Will write cryptlen bytes */ + append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); + + /* Read Salt */ + append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen), + 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV); + /* Read AES-GCM-ESP IV */ + append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 | + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); + + /* Read assoc data */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | + FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); + + /* Will read cryptlen bytes */ + append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); + + /* Store payload data */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); + + /* Read encrypted data */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | + FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); + + /* Read ICV */ + append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 | + FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); + + ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, + desc_bytes(desc), + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { + dev_err(jrdev, "unable to map shared descriptor\n"); + return -ENOMEM; + } +#ifdef DEBUG + print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); +#endif + + /* + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ + keys_fit_inline = false; + if (DESC_RFC4106_GIVENC_LEN + DESC_JOB_IO_LEN + + ctx->split_key_pad_len + ctx->enckeylen <= + CAAM_DESC_BYTES_MAX) + keys_fit_inline = true; + + /* rfc4106_givencrypt shared descriptor */ + desc = ctx->sh_desc_givenc; + + init_sh_desc(desc, HDR_SHARE_SERIAL); + + /* Skip key loading if it is loaded due to sharing */ + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD); + if (keys_fit_inline) + append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, + ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); + else + append_key(desc, ctx->key_dma, ctx->enckeylen, + CLASS_1 | KEY_DEST_CLASS_REG); + set_jump_tgt_here(desc, key_jump_cmd); + + /* Generate IV */ + geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | + NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | + NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT); + append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); + move_cmd = append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_DESCBUF | + (tfm->ivsize << MOVE_LEN_SHIFT)); + append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); + + /* Copy generated IV to OFIFO */ + write_iv_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_OUTFIFO | + (tfm->ivsize << MOVE_LEN_SHIFT)); + + /* Class 1 operation */ + append_operation(desc, ctx->class1_alg_type | + OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); + + /* ivsize + cryptlen = seqoutlen - authsize */ + append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); + + /* assoclen = seqinlen - (ivsize + cryptlen) */ + append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ); + + /* Will write ivsize + cryptlen */ + append_math_add(desc, VARSEQOUTLEN, REG3, REG0, CAAM_CMD_SZ); + + /* Read Salt and generated IV */ + append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV | + FIFOLD_TYPE_FLUSH1 | IMMEDIATE | 12); + /* Append Salt */ + append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4); + set_move_tgt_here(desc, move_cmd); + set_move_tgt_here(desc, write_iv_cmd); + /* Blank commands. Will be overwritten by generated IV. */ + append_cmd(desc, 0x00000000); + append_cmd(desc, 0x00000000); + /* End of blank commands */ + + /* No need to reload iv */ + append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_SKIP); + + /* Read assoc data */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | + FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); + + /* Will read cryptlen */ + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); + + /* Store generated IV and encrypted data */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); + + /* Read payload data */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); + + /* Write ICV */ + append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT); + + ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, + desc_bytes(desc), + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { + dev_err(jrdev, "unable to map shared descriptor\n"); + return -ENOMEM; + } +#ifdef DEBUG + print_hex_dump(KERN_ERR, + "rfc4106 givenc shdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); +#endif + + return 0; +} + +static int rfc4106_setauthsize(struct crypto_aead *authenc, + unsigned int authsize) +{ + struct caam_ctx *ctx = crypto_aead_ctx(authenc); + + ctx->authsize = authsize; + rfc4106_set_sh_desc(authenc); + + return 0; +} + +static int rfc4543_set_sh_desc(struct crypto_aead *aead) +{ + struct aead_tfm *tfm = &aead->base.crt_aead; + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + bool keys_fit_inline = false; + u32 *key_jump_cmd, *write_iv_cmd, *write_aad_cmd; + u32 *read_move_cmd, *write_move_cmd; + u32 *desc; + u32 geniv; + + if (!ctx->enckeylen || !ctx->authsize) + return 0; + + /* + * RFC4543 encrypt shared descriptor + * Job Descriptor and Shared Descriptor + * must fit into the 64-word Descriptor h/w Buffer + */ + if (DESC_RFC4543_ENC_LEN + DESC_JOB_IO_LEN + + ctx->enckeylen <= CAAM_DESC_BYTES_MAX) + keys_fit_inline = true; + + desc = ctx->sh_desc_enc; + + init_sh_desc(desc, HDR_SHARE_SERIAL); + + /* Skip key loading if it is loaded due to sharing */ + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD); + if (keys_fit_inline) + append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, + ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); + else + append_key(desc, ctx->key_dma, ctx->enckeylen, + CLASS_1 | KEY_DEST_CLASS_REG); + set_jump_tgt_here(desc, key_jump_cmd); + + /* Class 1 operation */ + append_operation(desc, ctx->class1_alg_type | + OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); + + /* Load AES-GMAC ESP IV into Math1 register */ + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_DECO_MATH1 | + LDST_CLASS_DECO | tfm->ivsize); + + /* Wait the DMA transaction to finish */ + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | + (1 << JUMP_OFFSET_SHIFT)); + + /* Overwrite blank immediate AES-GMAC ESP IV data */ + write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF | + (tfm->ivsize << MOVE_LEN_SHIFT)); + + /* Overwrite blank immediate AAD data */ + write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF | + (tfm->ivsize << MOVE_LEN_SHIFT)); + + /* cryptlen = seqoutlen - authsize */ + append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); + + /* assoclen = (seqinlen - ivsize) - cryptlen */ + append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ); + + /* Read Salt and AES-GMAC ESP IV */ + append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize)); + /* Append Salt */ + append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4); + set_move_tgt_here(desc, write_iv_cmd); + /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */ + append_cmd(desc, 0x00000000); + append_cmd(desc, 0x00000000); + /* End of blank commands */ + + /* Read assoc data */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | + FIFOLD_TYPE_AAD); + + /* Will read cryptlen bytes */ + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); + + /* Will write cryptlen bytes */ + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); + + /* + * MOVE_LEN opcode is not available in all SEC HW revisions, + * thus need to do some magic, i.e. self-patch the descriptor + * buffer. + */ + read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 | + (0x6 << MOVE_LEN_SHIFT)); + write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF | + (0x8 << MOVE_LEN_SHIFT)); + + /* Authenticate AES-GMAC ESP IV */ + append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | + FIFOLD_TYPE_AAD | tfm->ivsize); + set_move_tgt_here(desc, write_aad_cmd); + /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */ + append_cmd(desc, 0x00000000); + append_cmd(desc, 0x00000000); + /* End of blank commands */ + + /* Read and write cryptlen bytes */ + aead_append_src_dst(desc, FIFOLD_TYPE_AAD); + + set_move_tgt_here(desc, read_move_cmd); + set_move_tgt_here(desc, write_move_cmd); + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); + /* Move payload data to OFIFO */ + append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO); + + /* Write ICV */ + append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT); + + ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, + desc_bytes(desc), + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { + dev_err(jrdev, "unable to map shared descriptor\n"); + return -ENOMEM; + } +#ifdef DEBUG + print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); +#endif + + /* + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ + keys_fit_inline = false; + if (DESC_RFC4543_DEC_LEN + DESC_JOB_IO_LEN + + ctx->enckeylen <= CAAM_DESC_BYTES_MAX) + keys_fit_inline = true; + + desc = ctx->sh_desc_dec; + + init_sh_desc(desc, HDR_SHARE_SERIAL); + + /* Skip key loading if it is loaded due to sharing */ + key_jump_cmd = append_jump(desc, JUMP_JSL | + JUMP_TEST_ALL | JUMP_COND_SHRD); + if (keys_fit_inline) + append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, + ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); + else + append_key(desc, ctx->key_dma, ctx->enckeylen, + CLASS_1 | KEY_DEST_CLASS_REG); + set_jump_tgt_here(desc, key_jump_cmd); + + /* Class 1 operation */ + append_operation(desc, ctx->class1_alg_type | + OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); + + /* Load AES-GMAC ESP IV into Math1 register */ + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_DECO_MATH1 | + LDST_CLASS_DECO | tfm->ivsize); + + /* Wait the DMA transaction to finish */ + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | + (1 << JUMP_OFFSET_SHIFT)); + + /* assoclen + cryptlen = (seqinlen - ivsize) - icvsize */ + append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, ctx->authsize); + + /* Overwrite blank immediate AES-GMAC ESP IV data */ + write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF | + (tfm->ivsize << MOVE_LEN_SHIFT)); + + /* Overwrite blank immediate AAD data */ + write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF | + (tfm->ivsize << MOVE_LEN_SHIFT)); + + /* assoclen = (assoclen + cryptlen) - cryptlen */ + append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); + append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); + + /* + * MOVE_LEN opcode is not available in all SEC HW revisions, + * thus need to do some magic, i.e. self-patch the descriptor + * buffer. + */ + read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 | + (0x6 << MOVE_LEN_SHIFT)); + write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF | + (0x8 << MOVE_LEN_SHIFT)); + + /* Read Salt and AES-GMAC ESP IV */ + append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize)); + /* Append Salt */ + append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4); + set_move_tgt_here(desc, write_iv_cmd); + /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */ + append_cmd(desc, 0x00000000); + append_cmd(desc, 0x00000000); + /* End of blank commands */ + + /* Read assoc data */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | + FIFOLD_TYPE_AAD); + + /* Will read cryptlen bytes */ + append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); + + /* Will write cryptlen bytes */ + append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); + + /* Authenticate AES-GMAC ESP IV */ + append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | + FIFOLD_TYPE_AAD | tfm->ivsize); + set_move_tgt_here(desc, write_aad_cmd); + /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */ + append_cmd(desc, 0x00000000); + append_cmd(desc, 0x00000000); + /* End of blank commands */ + + /* Store payload data */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); + + /* In-snoop cryptlen data */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF | + FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1); + + set_move_tgt_here(desc, read_move_cmd); + set_move_tgt_here(desc, write_move_cmd); + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); + /* Move payload data to OFIFO */ + append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO); + append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); + + /* Read ICV */ + append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 | + FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); + + ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, + desc_bytes(desc), + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { + dev_err(jrdev, "unable to map shared descriptor\n"); + return -ENOMEM; + } +#ifdef DEBUG + print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); +#endif + + /* + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ + keys_fit_inline = false; + if (DESC_RFC4543_GIVENC_LEN + DESC_JOB_IO_LEN + + ctx->enckeylen <= CAAM_DESC_BYTES_MAX) + keys_fit_inline = true; + + /* rfc4543_givencrypt shared descriptor */ + desc = ctx->sh_desc_givenc; + + init_sh_desc(desc, HDR_SHARE_SERIAL); + + /* Skip key loading if it is loaded due to sharing */ + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD); + if (keys_fit_inline) + append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, + ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); + else + append_key(desc, ctx->key_dma, ctx->enckeylen, + CLASS_1 | KEY_DEST_CLASS_REG); + set_jump_tgt_here(desc, key_jump_cmd); + + /* Generate IV */ + geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | + NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | + NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT); + append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); + /* Move generated IV to Math1 register */ + append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_MATH1 | + (tfm->ivsize << MOVE_LEN_SHIFT)); + append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); + + /* Overwrite blank immediate AES-GMAC IV data */ + write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF | + (tfm->ivsize << MOVE_LEN_SHIFT)); + + /* Overwrite blank immediate AAD data */ + write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF | + (tfm->ivsize << MOVE_LEN_SHIFT)); + + /* Copy generated IV to OFIFO */ + append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_OUTFIFO | + (tfm->ivsize << MOVE_LEN_SHIFT)); + + /* Class 1 operation */ + append_operation(desc, ctx->class1_alg_type | + OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); + + /* ivsize + cryptlen = seqoutlen - authsize */ + append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); + + /* assoclen = seqinlen - (ivsize + cryptlen) */ + append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ); + + /* Will write ivsize + cryptlen */ + append_math_add(desc, VARSEQOUTLEN, REG3, REG0, CAAM_CMD_SZ); + + /* + * MOVE_LEN opcode is not available in all SEC HW revisions, + * thus need to do some magic, i.e. self-patch the descriptor + * buffer. + */ + read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 | + (0x6 << MOVE_LEN_SHIFT)); + write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF | + (0x8 << MOVE_LEN_SHIFT)); + + /* Read Salt and AES-GMAC generated IV */ + append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize)); + /* Append Salt */ + append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4); + set_move_tgt_here(desc, write_iv_cmd); + /* Blank commands. Will be overwritten by AES-GMAC generated IV. */ + append_cmd(desc, 0x00000000); + append_cmd(desc, 0x00000000); + /* End of blank commands */ + + /* No need to reload iv */ + append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_SKIP); + + /* Read assoc data */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | + FIFOLD_TYPE_AAD); + + /* Will read cryptlen */ + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); + + /* Authenticate AES-GMAC IV */ + append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | + FIFOLD_TYPE_AAD | tfm->ivsize); + set_move_tgt_here(desc, write_aad_cmd); + /* Blank commands. Will be overwritten by AES-GMAC IV. */ + append_cmd(desc, 0x00000000); + append_cmd(desc, 0x00000000); + /* End of blank commands */ + + /* Read and write cryptlen bytes */ + aead_append_src_dst(desc, FIFOLD_TYPE_AAD); + + set_move_tgt_here(desc, read_move_cmd); + set_move_tgt_here(desc, write_move_cmd); + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); + /* Move payload data to OFIFO */ + append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO); + + /* Write ICV */ + append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT); + + ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, + desc_bytes(desc), + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { + dev_err(jrdev, "unable to map shared descriptor\n"); + return -ENOMEM; + } +#ifdef DEBUG + print_hex_dump(KERN_ERR, + "rfc4543 givenc shdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); +#endif + + return 0; +} + +static int rfc4543_setauthsize(struct crypto_aead *authenc, + unsigned int authsize) +{ + struct caam_ctx *ctx = crypto_aead_ctx(authenc); + + ctx->authsize = authsize; + rfc4543_set_sh_desc(authenc); + + return 0; +} + +static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in, + u32 authkeylen) +{ + return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len, + ctx->split_key_pad_len, key_in, authkeylen, + ctx->alg_op); +} + +static int aead_setkey(struct crypto_aead *aead, + const u8 *key, unsigned int keylen) +{ + /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ + static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 }; + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + struct crypto_authenc_keys keys; + int ret = 0; + + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) + goto badkey; + + /* Pick class 2 key length from algorithm submask */ + ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >> + OP_ALG_ALGSEL_SHIFT] * 2; + ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16); + + if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE) + goto badkey; + +#ifdef DEBUG + printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n", + keys.authkeylen + keys.enckeylen, keys.enckeylen, + keys.authkeylen); + printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n", + ctx->split_key_len, ctx->split_key_pad_len); + print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); +#endif + + ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen); + if (ret) { + goto badkey; + } + + /* postpend encryption key to auth split key */ + memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen); + + ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len + + keys.enckeylen, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->key_dma)) { + dev_err(jrdev, "unable to map key i/o memory\n"); + return -ENOMEM; + } +#ifdef DEBUG + print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, + ctx->split_key_pad_len + keys.enckeylen, 1); +#endif + + ctx->enckeylen = keys.enckeylen; + + ret = aead_set_sh_desc(aead); + if (ret) { + dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len + + keys.enckeylen, DMA_TO_DEVICE); + } + + return ret; +badkey: + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; +} + +static int gcm_setkey(struct crypto_aead *aead, + const u8 *key, unsigned int keylen) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + int ret = 0; + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); +#endif + + memcpy(ctx->key, key, keylen); + ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->key_dma)) { + dev_err(jrdev, "unable to map key i/o memory\n"); + return -ENOMEM; + } + ctx->enckeylen = keylen; + + ret = gcm_set_sh_desc(aead); + if (ret) { + dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen, + DMA_TO_DEVICE); + } + + return ret; +} + +static int rfc4106_setkey(struct crypto_aead *aead, + const u8 *key, unsigned int keylen) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + int ret = 0; + + if (keylen < 4) + return -EINVAL; + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); +#endif + + memcpy(ctx->key, key, keylen); + + /* + * The last four bytes of the key material are used as the salt value + * in the nonce. Update the AES key length. + */ + ctx->enckeylen = keylen - 4; + + ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen, + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->key_dma)) { + dev_err(jrdev, "unable to map key i/o memory\n"); + return -ENOMEM; + } + + ret = rfc4106_set_sh_desc(aead); + if (ret) { + dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen, + DMA_TO_DEVICE); + } + + return ret; +} + +static int rfc4543_setkey(struct crypto_aead *aead, + const u8 *key, unsigned int keylen) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + int ret = 0; + + if (keylen < 4) + return -EINVAL; + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); +#endif + + memcpy(ctx->key, key, keylen); + + /* + * The last four bytes of the key material are used as the salt value + * in the nonce. Update the AES key length. + */ + ctx->enckeylen = keylen - 4; + + ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen, + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->key_dma)) { + dev_err(jrdev, "unable to map key i/o memory\n"); + return -ENOMEM; + } + + ret = rfc4543_set_sh_desc(aead); + if (ret) { + dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen, + DMA_TO_DEVICE); + } + + return ret; +} + +static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, + const u8 *key, unsigned int keylen) +{ + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); + struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher; + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher); + const char *alg_name = crypto_tfm_alg_name(tfm); + struct device *jrdev = ctx->jrdev; + int ret = 0; + u32 *key_jump_cmd; + u32 *desc; + u32 *nonce; + u32 geniv; + u32 ctx1_iv_off = 0; + const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == + OP_ALG_AAI_CTR_MOD128); + const bool is_rfc3686 = (ctr_mode && + (strstr(alg_name, "rfc3686") != NULL)); + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); +#endif + /* + * AES-CTR needs to load IV in CONTEXT1 reg + * at an offset of 128bits (16bytes) + * CONTEXT1[255:128] = IV + */ + if (ctr_mode) + ctx1_iv_off = 16; + + /* + * RFC3686 specific: + * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} + * | *key = {KEY, NONCE} + */ + if (is_rfc3686) { + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; + keylen -= CTR_RFC3686_NONCE_SIZE; + } + + memcpy(ctx->key, key, keylen); + ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->key_dma)) { + dev_err(jrdev, "unable to map key i/o memory\n"); + return -ENOMEM; + } + ctx->enckeylen = keylen; + + /* ablkcipher_encrypt shared descriptor */ + desc = ctx->sh_desc_enc; + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); + /* Skip if already shared */ + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD); + + /* Load class1 key only */ + append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, + ctx->enckeylen, CLASS_1 | + KEY_DEST_CLASS_REG); + + /* Load nonce into CONTEXT1 reg */ + if (is_rfc3686) { + nonce = (u32 *)(key + keylen); + append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB | + LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); + append_move(desc, MOVE_WAITCOMP | + MOVE_SRC_OUTFIFO | + MOVE_DEST_CLASS1CTX | + (16 << MOVE_OFFSET_SHIFT) | + (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT)); + } + + set_jump_tgt_here(desc, key_jump_cmd); + + /* Load iv */ + append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT | + LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT)); + + /* Load counter into CONTEXT1 reg */ + if (is_rfc3686) + append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM | + LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT | + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << + LDST_OFFSET_SHIFT)); + + /* Load operation */ + append_operation(desc, ctx->class1_alg_type | + OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); + + /* Perform operation */ + ablkcipher_append_src_dst(desc); + + ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, + desc_bytes(desc), + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { + dev_err(jrdev, "unable to map shared descriptor\n"); + return -ENOMEM; + } +#ifdef DEBUG + print_hex_dump(KERN_ERR, + "ablkcipher enc shdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); +#endif + /* ablkcipher_decrypt shared descriptor */ + desc = ctx->sh_desc_dec; + + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); + /* Skip if already shared */ + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD); + + /* Load class1 key only */ + append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, + ctx->enckeylen, CLASS_1 | + KEY_DEST_CLASS_REG); + + /* Load nonce into CONTEXT1 reg */ + if (is_rfc3686) { + nonce = (u32 *)(key + keylen); + append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB | + LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); + append_move(desc, MOVE_WAITCOMP | + MOVE_SRC_OUTFIFO | + MOVE_DEST_CLASS1CTX | + (16 << MOVE_OFFSET_SHIFT) | + (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT)); + } + + set_jump_tgt_here(desc, key_jump_cmd); + + /* load IV */ + append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT | + LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT)); + + /* Load counter into CONTEXT1 reg */ + if (is_rfc3686) + append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM | + LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT | + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << + LDST_OFFSET_SHIFT)); + + /* Choose operation */ + if (ctr_mode) + append_operation(desc, ctx->class1_alg_type | + OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT); + else + append_dec_op1(desc, ctx->class1_alg_type); + + /* Perform operation */ + ablkcipher_append_src_dst(desc); + + ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, + desc_bytes(desc), + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { + dev_err(jrdev, "unable to map shared descriptor\n"); + return -ENOMEM; + } + +#ifdef DEBUG + print_hex_dump(KERN_ERR, + "ablkcipher dec shdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); +#endif + /* ablkcipher_givencrypt shared descriptor */ + desc = ctx->sh_desc_givenc; + + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); + /* Skip if already shared */ + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD); + + /* Load class1 key only */ + append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, + ctx->enckeylen, CLASS_1 | + KEY_DEST_CLASS_REG); + + /* Load Nonce into CONTEXT1 reg */ + if (is_rfc3686) { + nonce = (u32 *)(key + keylen); + append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB | + LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); + append_move(desc, MOVE_WAITCOMP | + MOVE_SRC_OUTFIFO | + MOVE_DEST_CLASS1CTX | + (16 << MOVE_OFFSET_SHIFT) | + (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT)); + } + set_jump_tgt_here(desc, key_jump_cmd); + + /* Generate IV */ + geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | + NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | + NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT); + append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); + append_move(desc, MOVE_WAITCOMP | + MOVE_SRC_INFIFO | + MOVE_DEST_CLASS1CTX | + (crt->ivsize << MOVE_LEN_SHIFT) | + (ctx1_iv_off << MOVE_OFFSET_SHIFT)); + append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); + + /* Copy generated IV to memory */ + append_seq_store(desc, crt->ivsize, + LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB | + (ctx1_iv_off << LDST_OFFSET_SHIFT)); + + /* Load Counter into CONTEXT1 reg */ + if (is_rfc3686) + append_load_imm_u32(desc, (u32)1, LDST_IMM | + LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT | + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << + LDST_OFFSET_SHIFT)); + + if (ctx1_iv_off) + append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP | + (1 << JUMP_OFFSET_SHIFT)); + + /* Load operation */ + append_operation(desc, ctx->class1_alg_type | + OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); + + /* Perform operation */ + ablkcipher_append_src_dst(desc); + + ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, + desc_bytes(desc), + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { + dev_err(jrdev, "unable to map shared descriptor\n"); + return -ENOMEM; + } +#ifdef DEBUG + print_hex_dump(KERN_ERR, + "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); +#endif + + return ret; +} + +/* + * aead_edesc - s/w-extended aead descriptor + * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist + * @assoc_chained: if source is chained + * @src_nents: number of segments in input scatterlist + * @src_chained: if source is chained + * @dst_nents: number of segments in output scatterlist + * @dst_chained: if destination is chained + * @iv_dma: dma address of iv for checking continuity and link table + * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) + * @sec4_sg_bytes: length of dma mapped sec4_sg space + * @sec4_sg_dma: bus physical mapped address of h/w link table + * @hw_desc: the h/w job descriptor followed by any referenced link tables + */ +struct aead_edesc { + int assoc_nents; + bool assoc_chained; + int src_nents; + bool src_chained; + int dst_nents; + bool dst_chained; + dma_addr_t iv_dma; + int sec4_sg_bytes; + dma_addr_t sec4_sg_dma; + struct sec4_sg_entry *sec4_sg; + u32 hw_desc[0]; +}; + +/* + * ablkcipher_edesc - s/w-extended ablkcipher descriptor + * @src_nents: number of segments in input scatterlist + * @src_chained: if source is chained + * @dst_nents: number of segments in output scatterlist + * @dst_chained: if destination is chained + * @iv_dma: dma address of iv for checking continuity and link table + * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) + * @sec4_sg_bytes: length of dma mapped sec4_sg space + * @sec4_sg_dma: bus physical mapped address of h/w link table + * @hw_desc: the h/w job descriptor followed by any referenced link tables + */ +struct ablkcipher_edesc { + int src_nents; + bool src_chained; + int dst_nents; + bool dst_chained; + dma_addr_t iv_dma; + int sec4_sg_bytes; + dma_addr_t sec4_sg_dma; + struct sec4_sg_entry *sec4_sg; + u32 hw_desc[0]; +}; + +static void caam_unmap(struct device *dev, struct scatterlist *src, + struct scatterlist *dst, int src_nents, + bool src_chained, int dst_nents, bool dst_chained, + dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma, + int sec4_sg_bytes) +{ + if (dst != src) { + dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE, + src_chained); + dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE, + dst_chained); + } else { + dma_unmap_sg_chained(dev, src, src_nents ? : 1, + DMA_BIDIRECTIONAL, src_chained); + } + + if (iv_dma) + dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); + if (sec4_sg_bytes) + dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes, + DMA_TO_DEVICE); +} + +static void aead_unmap(struct device *dev, + struct aead_edesc *edesc, + struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + int ivsize = crypto_aead_ivsize(aead); + + dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents, + DMA_TO_DEVICE, edesc->assoc_chained); + + caam_unmap(dev, req->src, req->dst, + edesc->src_nents, edesc->src_chained, edesc->dst_nents, + edesc->dst_chained, edesc->iv_dma, ivsize, + edesc->sec4_sg_dma, edesc->sec4_sg_bytes); +} + +static void ablkcipher_unmap(struct device *dev, + struct ablkcipher_edesc *edesc, + struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); + int ivsize = crypto_ablkcipher_ivsize(ablkcipher); + + caam_unmap(dev, req->src, req->dst, + edesc->src_nents, edesc->src_chained, edesc->dst_nents, + edesc->dst_chained, edesc->iv_dma, ivsize, + edesc->sec4_sg_dma, edesc->sec4_sg_bytes); +} + +static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, + void *context) +{ + struct aead_request *req = context; + struct aead_edesc *edesc; +#ifdef DEBUG + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + int ivsize = crypto_aead_ivsize(aead); + + dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); +#endif + + edesc = (struct aead_edesc *)((char *)desc - + offsetof(struct aead_edesc, hw_desc)); + + if (err) + caam_jr_strstatus(jrdev, err); + + aead_unmap(jrdev, edesc, req); + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), + req->assoclen , 1); + print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize, + edesc->src_nents ? 100 : ivsize, 1); + print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), + edesc->src_nents ? 100 : req->cryptlen + + ctx->authsize + 4, 1); +#endif + + kfree(edesc); + + aead_request_complete(req, err); +} + +static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, + void *context) +{ + struct aead_request *req = context; + struct aead_edesc *edesc; +#ifdef DEBUG + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + int ivsize = crypto_aead_ivsize(aead); + + dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); +#endif + + edesc = (struct aead_edesc *)((char *)desc - + offsetof(struct aead_edesc, hw_desc)); + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->iv, + ivsize, 1); + print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst), + req->cryptlen - ctx->authsize, 1); +#endif + + if (err) + caam_jr_strstatus(jrdev, err); + + aead_unmap(jrdev, edesc, req); + + /* + * verify hw auth check passed else return -EBADMSG + */ + if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK) + err = -EBADMSG; + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, + ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)), + sizeof(struct iphdr) + req->assoclen + + ((req->cryptlen > 1500) ? 1500 : req->cryptlen) + + ctx->authsize + 36, 1); + if (!err && edesc->sec4_sg_bytes) { + struct scatterlist *sg = sg_last(req->src, edesc->src_nents); + print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg), + sg->length + ctx->authsize + 16, 1); + } +#endif + + kfree(edesc); + + aead_request_complete(req, err); +} + +static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, + void *context) +{ + struct ablkcipher_request *req = context; + struct ablkcipher_edesc *edesc; +#ifdef DEBUG + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); + int ivsize = crypto_ablkcipher_ivsize(ablkcipher); + + dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); +#endif + + edesc = (struct ablkcipher_edesc *)((char *)desc - + offsetof(struct ablkcipher_edesc, hw_desc)); + + if (err) + caam_jr_strstatus(jrdev, err); + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->info, + edesc->src_nents > 1 ? 100 : ivsize, 1); + print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), + edesc->dst_nents > 1 ? 100 : req->nbytes, 1); +#endif + + ablkcipher_unmap(jrdev, edesc, req); + kfree(edesc); + + ablkcipher_request_complete(req, err); +} + +static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, + void *context) +{ + struct ablkcipher_request *req = context; + struct ablkcipher_edesc *edesc; +#ifdef DEBUG + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); + int ivsize = crypto_ablkcipher_ivsize(ablkcipher); + + dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); +#endif + + edesc = (struct ablkcipher_edesc *)((char *)desc - + offsetof(struct ablkcipher_edesc, hw_desc)); + if (err) + caam_jr_strstatus(jrdev, err); + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->info, + ivsize, 1); + print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), + edesc->dst_nents > 1 ? 100 : req->nbytes, 1); +#endif + + ablkcipher_unmap(jrdev, edesc, req); + kfree(edesc); + + ablkcipher_request_complete(req, err); +} + +/* + * Fill in aead job descriptor + */ +static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, + struct aead_edesc *edesc, + struct aead_request *req, + bool all_contig, bool encrypt) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + int ivsize = crypto_aead_ivsize(aead); + int authsize = ctx->authsize; + u32 *desc = edesc->hw_desc; + u32 out_options = 0, in_options; + dma_addr_t dst_dma, src_dma; + int len, sec4_sg_index = 0; + bool is_gcm = false; + +#ifdef DEBUG + debug("assoclen %d cryptlen %d authsize %d\n", + req->assoclen, req->cryptlen, authsize); + print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), + req->assoclen , 1); + print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->iv, + edesc->src_nents ? 100 : ivsize, 1); + print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), + edesc->src_nents ? 100 : req->cryptlen, 1); + print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, + desc_bytes(sh_desc), 1); +#endif + + if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) == + OP_ALG_ALGSEL_AES) && + ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM)) + is_gcm = true; + + len = desc_len(sh_desc); + init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); + + if (all_contig) { + if (is_gcm) + src_dma = edesc->iv_dma; + else + src_dma = sg_dma_address(req->assoc); + in_options = 0; + } else { + src_dma = edesc->sec4_sg_dma; + sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 + + (edesc->src_nents ? : 1); + in_options = LDST_SGF; + } + + append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen, + in_options); + + if (likely(req->src == req->dst)) { + if (all_contig) { + dst_dma = sg_dma_address(req->src); + } else { + dst_dma = src_dma + sizeof(struct sec4_sg_entry) * + ((edesc->assoc_nents ? : 1) + 1); + out_options = LDST_SGF; + } + } else { + if (!edesc->dst_nents) { + dst_dma = sg_dma_address(req->dst); + } else { + dst_dma = edesc->sec4_sg_dma + + sec4_sg_index * + sizeof(struct sec4_sg_entry); + out_options = LDST_SGF; + } + } + if (encrypt) + append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize, + out_options); + else + append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize, + out_options); +} + +/* + * Fill in aead givencrypt job descriptor + */ +static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr, + struct aead_edesc *edesc, + struct aead_request *req, + int contig) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + int ivsize = crypto_aead_ivsize(aead); + int authsize = ctx->authsize; + u32 *desc = edesc->hw_desc; + u32 out_options = 0, in_options; + dma_addr_t dst_dma, src_dma; + int len, sec4_sg_index = 0; + bool is_gcm = false; + +#ifdef DEBUG + debug("assoclen %d cryptlen %d authsize %d\n", + req->assoclen, req->cryptlen, authsize); + print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), + req->assoclen , 1); + print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1); + print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), + edesc->src_nents > 1 ? 100 : req->cryptlen, 1); + print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, + desc_bytes(sh_desc), 1); +#endif + + if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) == + OP_ALG_ALGSEL_AES) && + ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM)) + is_gcm = true; + + len = desc_len(sh_desc); + init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); + + if (contig & GIV_SRC_CONTIG) { + if (is_gcm) + src_dma = edesc->iv_dma; + else + src_dma = sg_dma_address(req->assoc); + in_options = 0; + } else { + src_dma = edesc->sec4_sg_dma; + sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents; + in_options = LDST_SGF; + } + append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen, + in_options); + + if (contig & GIV_DST_CONTIG) { + dst_dma = edesc->iv_dma; + } else { + if (likely(req->src == req->dst)) { + dst_dma = src_dma + sizeof(struct sec4_sg_entry) * + (edesc->assoc_nents + + (is_gcm ? 1 + edesc->src_nents : 0)); + out_options = LDST_SGF; + } else { + dst_dma = edesc->sec4_sg_dma + + sec4_sg_index * + sizeof(struct sec4_sg_entry); + out_options = LDST_SGF; + } + } + + append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize, + out_options); +} + +/* + * Fill in ablkcipher job descriptor + */ +static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, + struct ablkcipher_edesc *edesc, + struct ablkcipher_request *req, + bool iv_contig) +{ + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); + int ivsize = crypto_ablkcipher_ivsize(ablkcipher); + u32 *desc = edesc->hw_desc; + u32 out_options = 0, in_options; + dma_addr_t dst_dma, src_dma; + int len, sec4_sg_index = 0; + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->info, + ivsize, 1); + print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), + edesc->src_nents ? 100 : req->nbytes, 1); +#endif + + len = desc_len(sh_desc); + init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); + + if (iv_contig) { + src_dma = edesc->iv_dma; + in_options = 0; + } else { + src_dma = edesc->sec4_sg_dma; + sec4_sg_index += edesc->src_nents + 1; + in_options = LDST_SGF; + } + append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options); + + if (likely(req->src == req->dst)) { + if (!edesc->src_nents && iv_contig) { + dst_dma = sg_dma_address(req->src); + } else { + dst_dma = edesc->sec4_sg_dma + + sizeof(struct sec4_sg_entry); + out_options = LDST_SGF; + } + } else { + if (!edesc->dst_nents) { + dst_dma = sg_dma_address(req->dst); + } else { + dst_dma = edesc->sec4_sg_dma + + sec4_sg_index * sizeof(struct sec4_sg_entry); + out_options = LDST_SGF; + } + } + append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options); +} + +/* + * Fill in ablkcipher givencrypt job descriptor + */ +static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr, + struct ablkcipher_edesc *edesc, + struct ablkcipher_request *req, + bool iv_contig) +{ + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); + int ivsize = crypto_ablkcipher_ivsize(ablkcipher); + u32 *desc = edesc->hw_desc; + u32 out_options, in_options; + dma_addr_t dst_dma, src_dma; + int len, sec4_sg_index = 0; + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->info, + ivsize, 1); + print_hex_dump(KERN_ERR, "src @" __stringify(__LINE__) ": ", + DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), + edesc->src_nents ? 100 : req->nbytes, 1); +#endif + + len = desc_len(sh_desc); + init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); + + if (!edesc->src_nents) { + src_dma = sg_dma_address(req->src); + in_options = 0; + } else { + src_dma = edesc->sec4_sg_dma; + sec4_sg_index += edesc->src_nents; + in_options = LDST_SGF; + } + append_seq_in_ptr(desc, src_dma, req->nbytes, in_options); + + if (iv_contig) { + dst_dma = edesc->iv_dma; + out_options = 0; + } else { + dst_dma = edesc->sec4_sg_dma + + sec4_sg_index * sizeof(struct sec4_sg_entry); + out_options = LDST_SGF; + } + append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options); +} + +/* + * allocate and map the aead extended descriptor + */ +static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, + int desc_bytes, bool *all_contig_ptr, + bool encrypt) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | + CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; + int assoc_nents, src_nents, dst_nents = 0; + struct aead_edesc *edesc; + dma_addr_t iv_dma = 0; + int sgc; + bool all_contig = true; + bool assoc_chained = false, src_chained = false, dst_chained = false; + int ivsize = crypto_aead_ivsize(aead); + int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; + unsigned int authsize = ctx->authsize; + bool is_gcm = false; + + assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained); + + if (unlikely(req->dst != req->src)) { + src_nents = sg_count(req->src, req->cryptlen, &src_chained); + dst_nents = sg_count(req->dst, + req->cryptlen + + (encrypt ? authsize : (-authsize)), + &dst_chained); + } else { + src_nents = sg_count(req->src, + req->cryptlen + + (encrypt ? authsize : 0), + &src_chained); + } + + sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, + DMA_TO_DEVICE, assoc_chained); + if (likely(req->src == req->dst)) { + sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, + DMA_BIDIRECTIONAL, src_chained); + } else { + sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, + DMA_TO_DEVICE, src_chained); + sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, + DMA_FROM_DEVICE, dst_chained); + } + + iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, iv_dma)) { + dev_err(jrdev, "unable to map IV\n"); + return ERR_PTR(-ENOMEM); + } + + if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) == + OP_ALG_ALGSEL_AES) && + ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM)) + is_gcm = true; + + /* + * Check if data are contiguous. + * GCM expected input sequence: IV, AAD, text + * All other - expected input sequence: AAD, IV, text + */ + if (is_gcm) + all_contig = (!assoc_nents && + iv_dma + ivsize == sg_dma_address(req->assoc) && + !src_nents && sg_dma_address(req->assoc) + + req->assoclen == sg_dma_address(req->src)); + else + all_contig = (!assoc_nents && sg_dma_address(req->assoc) + + req->assoclen == iv_dma && !src_nents && + iv_dma + ivsize == sg_dma_address(req->src)); + if (!all_contig) { + assoc_nents = assoc_nents ? : 1; + src_nents = src_nents ? : 1; + sec4_sg_len = assoc_nents + 1 + src_nents; + } + + sec4_sg_len += dst_nents; + + sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); + + /* allocate space for base edesc and hw desc commands, link tables */ + edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + + sec4_sg_bytes, GFP_DMA | flags); + if (!edesc) { + dev_err(jrdev, "could not allocate extended descriptor\n"); + return ERR_PTR(-ENOMEM); + } + + edesc->assoc_nents = assoc_nents; + edesc->assoc_chained = assoc_chained; + edesc->src_nents = src_nents; + edesc->src_chained = src_chained; + edesc->dst_nents = dst_nents; + edesc->dst_chained = dst_chained; + edesc->iv_dma = iv_dma; + edesc->sec4_sg_bytes = sec4_sg_bytes; + edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + + desc_bytes; + *all_contig_ptr = all_contig; + + sec4_sg_index = 0; + if (!all_contig) { + if (!is_gcm) { + sg_to_sec4_sg(req->assoc, + assoc_nents, + edesc->sec4_sg + + sec4_sg_index, 0); + sec4_sg_index += assoc_nents; + } + + dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, + iv_dma, ivsize, 0); + sec4_sg_index += 1; + + if (is_gcm) { + sg_to_sec4_sg(req->assoc, + assoc_nents, + edesc->sec4_sg + + sec4_sg_index, 0); + sec4_sg_index += assoc_nents; + } + + sg_to_sec4_sg_last(req->src, + src_nents, + edesc->sec4_sg + + sec4_sg_index, 0); + sec4_sg_index += src_nents; + } + if (dst_nents) { + sg_to_sec4_sg_last(req->dst, dst_nents, + edesc->sec4_sg + sec4_sg_index, 0); + } + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, + sec4_sg_bytes, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { + dev_err(jrdev, "unable to map S/G table\n"); + return ERR_PTR(-ENOMEM); + } + + return edesc; +} + +static int aead_encrypt(struct aead_request *req) +{ + struct aead_edesc *edesc; + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + bool all_contig; + u32 *desc; + int ret = 0; + + /* allocate extended descriptor */ + edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * + CAAM_CMD_SZ, &all_contig, true); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + /* Create and submit job descriptor */ + init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req, + all_contig, true); +#ifdef DEBUG + print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, + desc_bytes(edesc->hw_desc), 1); +#endif + + desc = edesc->hw_desc; + ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); + if (!ret) { + ret = -EINPROGRESS; + } else { + aead_unmap(jrdev, edesc, req); + kfree(edesc); + } + + return ret; +} + +static int aead_decrypt(struct aead_request *req) +{ + struct aead_edesc *edesc; + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + bool all_contig; + u32 *desc; + int ret = 0; + + /* allocate extended descriptor */ + edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * + CAAM_CMD_SZ, &all_contig, false); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), + req->cryptlen, 1); +#endif + + /* Create and submit job descriptor*/ + init_aead_job(ctx->sh_desc_dec, + ctx->sh_desc_dec_dma, edesc, req, all_contig, false); +#ifdef DEBUG + print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, + desc_bytes(edesc->hw_desc), 1); +#endif + + desc = edesc->hw_desc; + ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); + if (!ret) { + ret = -EINPROGRESS; + } else { + aead_unmap(jrdev, edesc, req); + kfree(edesc); + } + + return ret; +} + +/* + * allocate and map the aead extended descriptor for aead givencrypt + */ +static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request + *greq, int desc_bytes, + u32 *contig_ptr) +{ + struct aead_request *req = &greq->areq; + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | + CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; + int assoc_nents, src_nents, dst_nents = 0; + struct aead_edesc *edesc; + dma_addr_t iv_dma = 0; + int sgc; + u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG; + int ivsize = crypto_aead_ivsize(aead); + bool assoc_chained = false, src_chained = false, dst_chained = false; + int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; + bool is_gcm = false; + + assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained); + src_nents = sg_count(req->src, req->cryptlen, &src_chained); + + if (unlikely(req->dst != req->src)) + dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize, + &dst_chained); + + sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, + DMA_TO_DEVICE, assoc_chained); + if (likely(req->src == req->dst)) { + sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, + DMA_BIDIRECTIONAL, src_chained); + } else { + sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, + DMA_TO_DEVICE, src_chained); + sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, + DMA_FROM_DEVICE, dst_chained); + } + + iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, iv_dma)) { + dev_err(jrdev, "unable to map IV\n"); + return ERR_PTR(-ENOMEM); + } + + if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) == + OP_ALG_ALGSEL_AES) && + ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM)) + is_gcm = true; + + /* + * Check if data are contiguous. + * GCM expected input sequence: IV, AAD, text + * All other - expected input sequence: AAD, IV, text + */ + + if (is_gcm) { + if (assoc_nents || iv_dma + ivsize != + sg_dma_address(req->assoc) || src_nents || + sg_dma_address(req->assoc) + req->assoclen != + sg_dma_address(req->src)) + contig &= ~GIV_SRC_CONTIG; + } else { + if (assoc_nents || + sg_dma_address(req->assoc) + req->assoclen != iv_dma || + src_nents || iv_dma + ivsize != sg_dma_address(req->src)) + contig &= ~GIV_SRC_CONTIG; + } + + if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst)) + contig &= ~GIV_DST_CONTIG; + + if (!(contig & GIV_SRC_CONTIG)) { + assoc_nents = assoc_nents ? : 1; + src_nents = src_nents ? : 1; + sec4_sg_len += assoc_nents + 1 + src_nents; + if (req->src == req->dst && + (src_nents || iv_dma + ivsize != sg_dma_address(req->src))) + contig &= ~GIV_DST_CONTIG; + } + + /* + * Add new sg entries for GCM output sequence. + * Expected output sequence: IV, encrypted text. + */ + if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG)) + sec4_sg_len += 1 + src_nents; + + if (unlikely(req->src != req->dst)) { + dst_nents = dst_nents ? : 1; + sec4_sg_len += 1 + dst_nents; + } + + sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); + + /* allocate space for base edesc and hw desc commands, link tables */ + edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + + sec4_sg_bytes, GFP_DMA | flags); + if (!edesc) { + dev_err(jrdev, "could not allocate extended descriptor\n"); + return ERR_PTR(-ENOMEM); + } + + edesc->assoc_nents = assoc_nents; + edesc->assoc_chained = assoc_chained; + edesc->src_nents = src_nents; + edesc->src_chained = src_chained; + edesc->dst_nents = dst_nents; + edesc->dst_chained = dst_chained; + edesc->iv_dma = iv_dma; + edesc->sec4_sg_bytes = sec4_sg_bytes; + edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + + desc_bytes; + *contig_ptr = contig; + + sec4_sg_index = 0; + if (!(contig & GIV_SRC_CONTIG)) { + if (!is_gcm) { + sg_to_sec4_sg(req->assoc, assoc_nents, + edesc->sec4_sg + sec4_sg_index, 0); + sec4_sg_index += assoc_nents; + } + + dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, + iv_dma, ivsize, 0); + sec4_sg_index += 1; + + if (is_gcm) { + sg_to_sec4_sg(req->assoc, assoc_nents, + edesc->sec4_sg + sec4_sg_index, 0); + sec4_sg_index += assoc_nents; + } + + sg_to_sec4_sg_last(req->src, src_nents, + edesc->sec4_sg + + sec4_sg_index, 0); + sec4_sg_index += src_nents; + } + + if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG)) { + dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, + iv_dma, ivsize, 0); + sec4_sg_index += 1; + sg_to_sec4_sg_last(req->src, src_nents, + edesc->sec4_sg + sec4_sg_index, 0); + } + + if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) { + dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, + iv_dma, ivsize, 0); + sec4_sg_index += 1; + sg_to_sec4_sg_last(req->dst, dst_nents, + edesc->sec4_sg + sec4_sg_index, 0); + } + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, + sec4_sg_bytes, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { + dev_err(jrdev, "unable to map S/G table\n"); + return ERR_PTR(-ENOMEM); + } + + return edesc; +} + +static int aead_givencrypt(struct aead_givcrypt_request *areq) +{ + struct aead_request *req = &areq->areq; + struct aead_edesc *edesc; + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + u32 contig; + u32 *desc; + int ret = 0; + + /* allocate extended descriptor */ + edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN * + CAAM_CMD_SZ, &contig); + + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), + req->cryptlen, 1); +#endif + + /* Create and submit job descriptor*/ + init_aead_giv_job(ctx->sh_desc_givenc, + ctx->sh_desc_givenc_dma, edesc, req, contig); +#ifdef DEBUG + print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, + desc_bytes(edesc->hw_desc), 1); +#endif + + desc = edesc->hw_desc; + ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); + if (!ret) { + ret = -EINPROGRESS; + } else { + aead_unmap(jrdev, edesc, req); + kfree(edesc); + } + + return ret; +} + +static int aead_null_givencrypt(struct aead_givcrypt_request *areq) +{ + return aead_encrypt(&areq->areq); +} + +/* + * allocate and map the ablkcipher extended descriptor for ablkcipher + */ +static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request + *req, int desc_bytes, + bool *iv_contig_out) +{ + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); + struct device *jrdev = ctx->jrdev; + gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | + CRYPTO_TFM_REQ_MAY_SLEEP)) ? + GFP_KERNEL : GFP_ATOMIC; + int src_nents, dst_nents = 0, sec4_sg_bytes; + struct ablkcipher_edesc *edesc; + dma_addr_t iv_dma = 0; + bool iv_contig = false; + int sgc; + int ivsize = crypto_ablkcipher_ivsize(ablkcipher); + bool src_chained = false, dst_chained = false; + int sec4_sg_index; + + src_nents = sg_count(req->src, req->nbytes, &src_chained); + + if (req->dst != req->src) + dst_nents = sg_count(req->dst, req->nbytes, &dst_chained); + + if (likely(req->src == req->dst)) { + sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, + DMA_BIDIRECTIONAL, src_chained); + } else { + sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, + DMA_TO_DEVICE, src_chained); + sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, + DMA_FROM_DEVICE, dst_chained); + } + + iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, iv_dma)) { + dev_err(jrdev, "unable to map IV\n"); + return ERR_PTR(-ENOMEM); + } + + /* + * Check if iv can be contiguous with source and destination. + * If so, include it. If not, create scatterlist. + */ + if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src)) + iv_contig = true; + else + src_nents = src_nents ? : 1; + sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) * + sizeof(struct sec4_sg_entry); + + /* allocate space for base edesc and hw desc commands, link tables */ + edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes + + sec4_sg_bytes, GFP_DMA | flags); + if (!edesc) { + dev_err(jrdev, "could not allocate extended descriptor\n"); + return ERR_PTR(-ENOMEM); + } + + edesc->src_nents = src_nents; + edesc->src_chained = src_chained; + edesc->dst_nents = dst_nents; + edesc->dst_chained = dst_chained; + edesc->sec4_sg_bytes = sec4_sg_bytes; + edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + + desc_bytes; + + sec4_sg_index = 0; + if (!iv_contig) { + dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); + sg_to_sec4_sg_last(req->src, src_nents, + edesc->sec4_sg + 1, 0); + sec4_sg_index += 1 + src_nents; + } + + if (dst_nents) { + sg_to_sec4_sg_last(req->dst, dst_nents, + edesc->sec4_sg + sec4_sg_index, 0); + } + + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, + sec4_sg_bytes, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { + dev_err(jrdev, "unable to map S/G table\n"); + return ERR_PTR(-ENOMEM); + } + + edesc->iv_dma = iv_dma; + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, + sec4_sg_bytes, 1); +#endif + + *iv_contig_out = iv_contig; + return edesc; +} + +static int ablkcipher_encrypt(struct ablkcipher_request *req) +{ + struct ablkcipher_edesc *edesc; + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); + struct device *jrdev = ctx->jrdev; + bool iv_contig; + u32 *desc; + int ret = 0; + + /* allocate extended descriptor */ + edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * + CAAM_CMD_SZ, &iv_contig); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + /* Create and submit job descriptor*/ + init_ablkcipher_job(ctx->sh_desc_enc, + ctx->sh_desc_enc_dma, edesc, req, iv_contig); +#ifdef DEBUG + print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, + desc_bytes(edesc->hw_desc), 1); +#endif + desc = edesc->hw_desc; + ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req); + + if (!ret) { + ret = -EINPROGRESS; + } else { + ablkcipher_unmap(jrdev, edesc, req); + kfree(edesc); + } + + return ret; +} + +static int ablkcipher_decrypt(struct ablkcipher_request *req) +{ + struct ablkcipher_edesc *edesc; + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); + struct device *jrdev = ctx->jrdev; + bool iv_contig; + u32 *desc; + int ret = 0; + + /* allocate extended descriptor */ + edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * + CAAM_CMD_SZ, &iv_contig); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + /* Create and submit job descriptor*/ + init_ablkcipher_job(ctx->sh_desc_dec, + ctx->sh_desc_dec_dma, edesc, req, iv_contig); + desc = edesc->hw_desc; +#ifdef DEBUG + print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, + desc_bytes(edesc->hw_desc), 1); +#endif + + ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req); + if (!ret) { + ret = -EINPROGRESS; + } else { + ablkcipher_unmap(jrdev, edesc, req); + kfree(edesc); + } + + return ret; +} + +/* + * allocate and map the ablkcipher extended descriptor + * for ablkcipher givencrypt + */ +static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( + struct skcipher_givcrypt_request *greq, + int desc_bytes, + bool *iv_contig_out) +{ + struct ablkcipher_request *req = &greq->creq; + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); + struct device *jrdev = ctx->jrdev; + gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | + CRYPTO_TFM_REQ_MAY_SLEEP)) ? + GFP_KERNEL : GFP_ATOMIC; + int src_nents, dst_nents = 0, sec4_sg_bytes; + struct ablkcipher_edesc *edesc; + dma_addr_t iv_dma = 0; + bool iv_contig = false; + int sgc; + int ivsize = crypto_ablkcipher_ivsize(ablkcipher); + bool src_chained = false, dst_chained = false; + int sec4_sg_index; + + src_nents = sg_count(req->src, req->nbytes, &src_chained); + + if (unlikely(req->dst != req->src)) + dst_nents = sg_count(req->dst, req->nbytes, &dst_chained); + + if (likely(req->src == req->dst)) { + sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, + DMA_BIDIRECTIONAL, src_chained); + } else { + sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, + DMA_TO_DEVICE, src_chained); + sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, + DMA_FROM_DEVICE, dst_chained); + } + + /* + * Check if iv can be contiguous with source and destination. + * If so, include it. If not, create scatterlist. + */ + iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, iv_dma)) { + dev_err(jrdev, "unable to map IV\n"); + return ERR_PTR(-ENOMEM); + } + + if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst)) + iv_contig = true; + else + dst_nents = dst_nents ? : 1; + sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) * + sizeof(struct sec4_sg_entry); + + /* allocate space for base edesc and hw desc commands, link tables */ + edesc = kmalloc(sizeof(*edesc) + desc_bytes + + sec4_sg_bytes, GFP_DMA | flags); + if (!edesc) { + dev_err(jrdev, "could not allocate extended descriptor\n"); + return ERR_PTR(-ENOMEM); + } + + edesc->src_nents = src_nents; + edesc->src_chained = src_chained; + edesc->dst_nents = dst_nents; + edesc->dst_chained = dst_chained; + edesc->sec4_sg_bytes = sec4_sg_bytes; + edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + + desc_bytes; + + sec4_sg_index = 0; + if (src_nents) { + sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0); + sec4_sg_index += src_nents; + } + + if (!iv_contig) { + dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, + iv_dma, ivsize, 0); + sec4_sg_index += 1; + sg_to_sec4_sg_last(req->dst, dst_nents, + edesc->sec4_sg + sec4_sg_index, 0); + } + + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, + sec4_sg_bytes, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { + dev_err(jrdev, "unable to map S/G table\n"); + return ERR_PTR(-ENOMEM); + } + edesc->iv_dma = iv_dma; + +#ifdef DEBUG + print_hex_dump(KERN_ERR, + "ablkcipher sec4_sg@" __stringify(__LINE__) ": ", + DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, + sec4_sg_bytes, 1); +#endif + + *iv_contig_out = iv_contig; + return edesc; +} + +static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq) +{ + struct ablkcipher_request *req = &creq->creq; + struct ablkcipher_edesc *edesc; + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); + struct device *jrdev = ctx->jrdev; + bool iv_contig; + u32 *desc; + int ret = 0; + + /* allocate extended descriptor */ + edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN * + CAAM_CMD_SZ, &iv_contig); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + /* Create and submit job descriptor*/ + init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma, + edesc, req, iv_contig); +#ifdef DEBUG + print_hex_dump(KERN_ERR, + "ablkcipher jobdesc@" __stringify(__LINE__) ": ", + DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, + desc_bytes(edesc->hw_desc), 1); +#endif + desc = edesc->hw_desc; + ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req); + + if (!ret) { + ret = -EINPROGRESS; + } else { + ablkcipher_unmap(jrdev, edesc, req); + kfree(edesc); + } + + return ret; +} + +#define template_aead template_u.aead +#define template_ablkcipher template_u.ablkcipher +struct caam_alg_template { + char name[CRYPTO_MAX_ALG_NAME]; + char driver_name[CRYPTO_MAX_ALG_NAME]; + unsigned int blocksize; + u32 type; + union { + struct ablkcipher_alg ablkcipher; + struct aead_alg aead; + struct blkcipher_alg blkcipher; + struct cipher_alg cipher; + struct compress_alg compress; + struct rng_alg rng; + } template_u; + u32 class1_alg_type; + u32 class2_alg_type; + u32 alg_op; +}; + +static struct caam_alg_template driver_algs[] = { + /* single-pass ipsec_esp descriptor */ + { + .name = "authenc(hmac(md5),ecb(cipher_null))", + .driver_name = "authenc-hmac-md5-ecb-cipher_null-caam", + .blocksize = NULL_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_null_givencrypt, + .geniv = "<built-in>", + .ivsize = NULL_IV_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .class1_alg_type = 0, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + }, + { + .name = "authenc(hmac(sha1),ecb(cipher_null))", + .driver_name = "authenc-hmac-sha1-ecb-cipher_null-caam", + .blocksize = NULL_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_null_givencrypt, + .geniv = "<built-in>", + .ivsize = NULL_IV_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .class1_alg_type = 0, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, + }, + { + .name = "authenc(hmac(sha224),ecb(cipher_null))", + .driver_name = "authenc-hmac-sha224-ecb-cipher_null-caam", + .blocksize = NULL_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_null_givencrypt, + .geniv = "<built-in>", + .ivsize = NULL_IV_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .class1_alg_type = 0, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + }, + { + .name = "authenc(hmac(sha256),ecb(cipher_null))", + .driver_name = "authenc-hmac-sha256-ecb-cipher_null-caam", + .blocksize = NULL_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_null_givencrypt, + .geniv = "<built-in>", + .ivsize = NULL_IV_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .class1_alg_type = 0, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, + }, + { + .name = "authenc(hmac(sha384),ecb(cipher_null))", + .driver_name = "authenc-hmac-sha384-ecb-cipher_null-caam", + .blocksize = NULL_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_null_givencrypt, + .geniv = "<built-in>", + .ivsize = NULL_IV_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .class1_alg_type = 0, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + }, + { + .name = "authenc(hmac(sha512),ecb(cipher_null))", + .driver_name = "authenc-hmac-sha512-ecb-cipher_null-caam", + .blocksize = NULL_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_null_givencrypt, + .geniv = "<built-in>", + .ivsize = NULL_IV_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .class1_alg_type = 0, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, + }, + { + .name = "authenc(hmac(md5),cbc(aes))", + .driver_name = "authenc-hmac-md5-cbc-aes-caam", + .blocksize = AES_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "<built-in>", + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + }, + { + .name = "authenc(hmac(sha1),cbc(aes))", + .driver_name = "authenc-hmac-sha1-cbc-aes-caam", + .blocksize = AES_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "<built-in>", + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, + }, + { + .name = "authenc(hmac(sha224),cbc(aes))", + .driver_name = "authenc-hmac-sha224-cbc-aes-caam", + .blocksize = AES_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "<built-in>", + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + }, + { + .name = "authenc(hmac(sha256),cbc(aes))", + .driver_name = "authenc-hmac-sha256-cbc-aes-caam", + .blocksize = AES_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "<built-in>", + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, + }, + { + .name = "authenc(hmac(sha384),cbc(aes))", + .driver_name = "authenc-hmac-sha384-cbc-aes-caam", + .blocksize = AES_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "<built-in>", + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + }, + + { + .name = "authenc(hmac(sha512),cbc(aes))", + .driver_name = "authenc-hmac-sha512-cbc-aes-caam", + .blocksize = AES_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "<built-in>", + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, + }, + { + .name = "authenc(hmac(md5),cbc(des3_ede))", + .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam", + .blocksize = DES3_EDE_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "<built-in>", + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + }, + { + .name = "authenc(hmac(sha1),cbc(des3_ede))", + .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam", + .blocksize = DES3_EDE_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "<built-in>", + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, + }, + { + .name = "authenc(hmac(sha224),cbc(des3_ede))", + .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam", + .blocksize = DES3_EDE_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "<built-in>", + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + }, + { + .name = "authenc(hmac(sha256),cbc(des3_ede))", + .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam", + .blocksize = DES3_EDE_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "<built-in>", + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, + }, + { + .name = "authenc(hmac(sha384),cbc(des3_ede))", + .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam", + .blocksize = DES3_EDE_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "<built-in>", + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + }, + { + .name = "authenc(hmac(sha512),cbc(des3_ede))", + .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam", + .blocksize = DES3_EDE_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "<built-in>", + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, + }, + { + .name = "authenc(hmac(md5),cbc(des))", + .driver_name = "authenc-hmac-md5-cbc-des-caam", + .blocksize = DES_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "<built-in>", + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + }, + { + .name = "authenc(hmac(sha1),cbc(des))", + .driver_name = "authenc-hmac-sha1-cbc-des-caam", + .blocksize = DES_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "<built-in>", + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, + }, + { + .name = "authenc(hmac(sha224),cbc(des))", + .driver_name = "authenc-hmac-sha224-cbc-des-caam", + .blocksize = DES_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "<built-in>", + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + }, + { + .name = "authenc(hmac(sha256),cbc(des))", + .driver_name = "authenc-hmac-sha256-cbc-des-caam", + .blocksize = DES_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "<built-in>", + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, + }, + { + .name = "authenc(hmac(sha384),cbc(des))", + .driver_name = "authenc-hmac-sha384-cbc-des-caam", + .blocksize = DES_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "<built-in>", + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + }, + { + .name = "authenc(hmac(sha512),cbc(des))", + .driver_name = "authenc-hmac-sha512-cbc-des-caam", + .blocksize = DES_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "<built-in>", + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, + }, + { + .name = "authenc(hmac(md5),rfc3686(ctr(aes)))", + .driver_name = "authenc-hmac-md5-rfc3686-ctr-aes-caam", + .blocksize = 1, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "<built-in>", + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + }, + { + .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))", + .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-caam", + .blocksize = 1, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "<built-in>", + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, + }, + { + .name = "authenc(hmac(sha224),rfc3686(ctr(aes)))", + .driver_name = "authenc-hmac-sha224-rfc3686-ctr-aes-caam", + .blocksize = 1, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "<built-in>", + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + }, + { + .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))", + .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-caam", + .blocksize = 1, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "<built-in>", + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, + }, + { + .name = "authenc(hmac(sha384),rfc3686(ctr(aes)))", + .driver_name = "authenc-hmac-sha384-rfc3686-ctr-aes-caam", + .blocksize = 1, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "<built-in>", + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + }, + { + .name = "authenc(hmac(sha512),rfc3686(ctr(aes)))", + .driver_name = "authenc-hmac-sha512-rfc3686-ctr-aes-caam", + .blocksize = 1, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "<built-in>", + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, + }, + { + .name = "rfc4106(gcm(aes))", + .driver_name = "rfc4106-gcm-aes-caam", + .blocksize = 1, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = rfc4106_setkey, + .setauthsize = rfc4106_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "<built-in>", + .ivsize = 8, + .maxauthsize = AES_BLOCK_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, + }, + { + .name = "rfc4543(gcm(aes))", + .driver_name = "rfc4543-gcm-aes-caam", + .blocksize = 1, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = rfc4543_setkey, + .setauthsize = rfc4543_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "<built-in>", + .ivsize = 8, + .maxauthsize = AES_BLOCK_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, + }, + /* Galois Counter Mode */ + { + .name = "gcm(aes)", + .driver_name = "gcm-aes-caam", + .blocksize = 1, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = gcm_setkey, + .setauthsize = gcm_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = NULL, + .geniv = "<built-in>", + .ivsize = 12, + .maxauthsize = AES_BLOCK_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, + }, + /* ablkcipher descriptor */ + { + .name = "cbc(aes)", + .driver_name = "cbc-aes-caam", + .blocksize = AES_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_GIVCIPHER, + .template_ablkcipher = { + .setkey = ablkcipher_setkey, + .encrypt = ablkcipher_encrypt, + .decrypt = ablkcipher_decrypt, + .givencrypt = ablkcipher_givencrypt, + .geniv = "<built-in>", + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + }, + { + .name = "cbc(des3_ede)", + .driver_name = "cbc-3des-caam", + .blocksize = DES3_EDE_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_GIVCIPHER, + .template_ablkcipher = { + .setkey = ablkcipher_setkey, + .encrypt = ablkcipher_encrypt, + .decrypt = ablkcipher_decrypt, + .givencrypt = ablkcipher_givencrypt, + .geniv = "<built-in>", + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + }, + { + .name = "cbc(des)", + .driver_name = "cbc-des-caam", + .blocksize = DES_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_GIVCIPHER, + .template_ablkcipher = { + .setkey = ablkcipher_setkey, + .encrypt = ablkcipher_encrypt, + .decrypt = ablkcipher_decrypt, + .givencrypt = ablkcipher_givencrypt, + .geniv = "<built-in>", + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + }, + { + .name = "ctr(aes)", + .driver_name = "ctr-aes-caam", + .blocksize = 1, + .type = CRYPTO_ALG_TYPE_ABLKCIPHER, + .template_ablkcipher = { + .setkey = ablkcipher_setkey, + .encrypt = ablkcipher_encrypt, + .decrypt = ablkcipher_decrypt, + .geniv = "chainiv", + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, + }, + { + .name = "rfc3686(ctr(aes))", + .driver_name = "rfc3686-ctr-aes-caam", + .blocksize = 1, + .type = CRYPTO_ALG_TYPE_GIVCIPHER, + .template_ablkcipher = { + .setkey = ablkcipher_setkey, + .encrypt = ablkcipher_encrypt, + .decrypt = ablkcipher_decrypt, + .givencrypt = ablkcipher_givencrypt, + .geniv = "<built-in>", + .min_keysize = AES_MIN_KEY_SIZE + + CTR_RFC3686_NONCE_SIZE, + .max_keysize = AES_MAX_KEY_SIZE + + CTR_RFC3686_NONCE_SIZE, + .ivsize = CTR_RFC3686_IV_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, + } +}; + +struct caam_crypto_alg { + struct list_head entry; + int class1_alg_type; + int class2_alg_type; + int alg_op; + struct crypto_alg crypto_alg; +}; + +static int caam_cra_init(struct crypto_tfm *tfm) +{ + struct crypto_alg *alg = tfm->__crt_alg; + struct caam_crypto_alg *caam_alg = + container_of(alg, struct caam_crypto_alg, crypto_alg); + struct caam_ctx *ctx = crypto_tfm_ctx(tfm); + + ctx->jrdev = caam_jr_alloc(); + if (IS_ERR(ctx->jrdev)) { + pr_err("Job Ring Device allocation for transform failed\n"); + return PTR_ERR(ctx->jrdev); + } + + /* copy descriptor header template value */ + ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type; + ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type; + ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op; + + return 0; +} + +static void caam_cra_exit(struct crypto_tfm *tfm) +{ + struct caam_ctx *ctx = crypto_tfm_ctx(tfm); + + if (ctx->sh_desc_enc_dma && + !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma)) + dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma, + desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE); + if (ctx->sh_desc_dec_dma && + !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma)) + dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma, + desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE); + if (ctx->sh_desc_givenc_dma && + !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma)) + dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma, + desc_bytes(ctx->sh_desc_givenc), + DMA_TO_DEVICE); + if (ctx->key_dma && + !dma_mapping_error(ctx->jrdev, ctx->key_dma)) + dma_unmap_single(ctx->jrdev, ctx->key_dma, + ctx->enckeylen + ctx->split_key_pad_len, + DMA_TO_DEVICE); + + caam_jr_free(ctx->jrdev); +} + +static void __exit caam_algapi_exit(void) +{ + + struct caam_crypto_alg *t_alg, *n; + + if (!alg_list.next) + return; + + list_for_each_entry_safe(t_alg, n, &alg_list, entry) { + crypto_unregister_alg(&t_alg->crypto_alg); + list_del(&t_alg->entry); + kfree(t_alg); + } +} + +static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template + *template) +{ + struct caam_crypto_alg *t_alg; + struct crypto_alg *alg; + + t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL); + if (!t_alg) { + pr_err("failed to allocate t_alg\n"); + return ERR_PTR(-ENOMEM); + } + + alg = &t_alg->crypto_alg; + + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name); + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + template->driver_name); + alg->cra_module = THIS_MODULE; + alg->cra_init = caam_cra_init; + alg->cra_exit = caam_cra_exit; + alg->cra_priority = CAAM_CRA_PRIORITY; + alg->cra_blocksize = template->blocksize; + alg->cra_alignmask = 0; + alg->cra_ctxsize = sizeof(struct caam_ctx); + alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY | + template->type; + switch (template->type) { + case CRYPTO_ALG_TYPE_GIVCIPHER: + alg->cra_type = &crypto_givcipher_type; + alg->cra_ablkcipher = template->template_ablkcipher; + break; + case CRYPTO_ALG_TYPE_ABLKCIPHER: + alg->cra_type = &crypto_ablkcipher_type; + alg->cra_ablkcipher = template->template_ablkcipher; + break; + case CRYPTO_ALG_TYPE_AEAD: + alg->cra_type = &crypto_aead_type; + alg->cra_aead = template->template_aead; + break; + } + + t_alg->class1_alg_type = template->class1_alg_type; + t_alg->class2_alg_type = template->class2_alg_type; + t_alg->alg_op = template->alg_op; + + return t_alg; +} + +static int __init caam_algapi_init(void) +{ + struct device_node *dev_node; + struct platform_device *pdev; + struct device *ctrldev; + void *priv; + int i = 0, err = 0; + + dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); + if (!dev_node) { + dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); + if (!dev_node) + return -ENODEV; + } + + pdev = of_find_device_by_node(dev_node); + if (!pdev) { + of_node_put(dev_node); + return -ENODEV; + } + + ctrldev = &pdev->dev; + priv = dev_get_drvdata(ctrldev); + of_node_put(dev_node); + + /* + * If priv is NULL, it's probably because the caam driver wasn't + * properly initialized (e.g. RNG4 init failed). Thus, bail out here. + */ + if (!priv) + return -ENODEV; + + + INIT_LIST_HEAD(&alg_list); + + /* register crypto algorithms the device supports */ + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { + /* TODO: check if h/w supports alg */ + struct caam_crypto_alg *t_alg; + + t_alg = caam_alg_alloc(&driver_algs[i]); + if (IS_ERR(t_alg)) { + err = PTR_ERR(t_alg); + pr_warn("%s alg allocation failed\n", + driver_algs[i].driver_name); + continue; + } + + err = crypto_register_alg(&t_alg->crypto_alg); + if (err) { + pr_warn("%s alg registration failed\n", + t_alg->crypto_alg.cra_driver_name); + kfree(t_alg); + } else + list_add_tail(&t_alg->entry, &alg_list); + } + if (!list_empty(&alg_list)) + pr_info("caam algorithms registered in /proc/crypto\n"); + + return err; +} + +module_init(caam_algapi_init); +module_exit(caam_algapi_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("FSL CAAM support for crypto API"); +MODULE_AUTHOR("Freescale Semiconductor - NMG/STC"); diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c new file mode 100644 index 000000000..332c8ef8d --- /dev/null +++ b/drivers/crypto/caam/caamhash.c @@ -0,0 +1,1963 @@ +/* + * caam - Freescale FSL CAAM support for ahash functions of crypto API + * + * Copyright 2011 Freescale Semiconductor, Inc. + * + * Based on caamalg.c crypto API driver. + * + * relationship of digest job descriptor or first job descriptor after init to + * shared descriptors: + * + * --------------- --------------- + * | JobDesc #1 |-------------------->| ShareDesc | + * | *(packet 1) | | (hashKey) | + * --------------- | (operation) | + * --------------- + * + * relationship of subsequent job descriptors to shared descriptors: + * + * --------------- --------------- + * | JobDesc #2 |-------------------->| ShareDesc | + * | *(packet 2) | |------------->| (hashKey) | + * --------------- | |-------->| (operation) | + * . | | | (load ctx2) | + * . | | --------------- + * --------------- | | + * | JobDesc #3 |------| | + * | *(packet 3) | | + * --------------- | + * . | + * . | + * --------------- | + * | JobDesc #4 |------------ + * | *(packet 4) | + * --------------- + * + * The SharedDesc never changes for a connection unless rekeyed, but + * each packet will likely be in a different place. So all we need + * to know to process the packet is where the input is, where the + * output goes, and what context we want to process with. Context is + * in the SharedDesc, packet references in the JobDesc. + * + * So, a job desc looks like: + * + * --------------------- + * | Header | + * | ShareDesc Pointer | + * | SEQ_OUT_PTR | + * | (output buffer) | + * | (output length) | + * | SEQ_IN_PTR | + * | (input buffer) | + * | (input length) | + * --------------------- + */ + +#include "compat.h" + +#include "regs.h" +#include "intern.h" +#include "desc_constr.h" +#include "jr.h" +#include "error.h" +#include "sg_sw_sec4.h" +#include "key_gen.h" + +#define CAAM_CRA_PRIORITY 3000 + +/* max hash key is max split key size */ +#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2) + +#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE +#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE + +/* length of descriptors text */ +#define DESC_AHASH_BASE (4 * CAAM_CMD_SZ) +#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ) +#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ) +#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ) +#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ) +#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ) + +#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \ + CAAM_MAX_HASH_KEY_SIZE) +#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ) + +/* caam context sizes for hashes: running digest + 8 */ +#define HASH_MSG_LEN 8 +#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE) + +#ifdef DEBUG +/* for print_hex_dumps with line references */ +#define debug(format, arg...) printk(format, arg) +#else +#define debug(format, arg...) +#endif + + +static struct list_head hash_list; + +/* ahash per-session context */ +struct caam_hash_ctx { + struct device *jrdev; + u32 sh_desc_update[DESC_HASH_MAX_USED_LEN]; + u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN]; + u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN]; + u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN]; + u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN]; + dma_addr_t sh_desc_update_dma; + dma_addr_t sh_desc_update_first_dma; + dma_addr_t sh_desc_fin_dma; + dma_addr_t sh_desc_digest_dma; + dma_addr_t sh_desc_finup_dma; + u32 alg_type; + u32 alg_op; + u8 key[CAAM_MAX_HASH_KEY_SIZE]; + dma_addr_t key_dma; + int ctx_len; + unsigned int split_key_len; + unsigned int split_key_pad_len; +}; + +/* ahash state */ +struct caam_hash_state { + dma_addr_t buf_dma; + dma_addr_t ctx_dma; + u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; + int buflen_0; + u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; + int buflen_1; + u8 caam_ctx[MAX_CTX_LEN]; + int (*update)(struct ahash_request *req); + int (*final)(struct ahash_request *req); + int (*finup)(struct ahash_request *req); + int current_buf; +}; + +/* Common job descriptor seq in/out ptr routines */ + +/* Map state->caam_ctx, and append seq_out_ptr command that points to it */ +static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev, + struct caam_hash_state *state, + int ctx_len) +{ + state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, + ctx_len, DMA_FROM_DEVICE); + if (dma_mapping_error(jrdev, state->ctx_dma)) { + dev_err(jrdev, "unable to map ctx\n"); + return -ENOMEM; + } + + append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0); + + return 0; +} + +/* Map req->result, and append seq_out_ptr command that points to it */ +static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev, + u8 *result, int digestsize) +{ + dma_addr_t dst_dma; + + dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE); + append_seq_out_ptr(desc, dst_dma, digestsize, 0); + + return dst_dma; +} + +/* Map current buffer in state and put it in link table */ +static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev, + struct sec4_sg_entry *sec4_sg, + u8 *buf, int buflen) +{ + dma_addr_t buf_dma; + + buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); + dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0); + + return buf_dma; +} + +/* Map req->src and put it in link table */ +static inline void src_map_to_sec4_sg(struct device *jrdev, + struct scatterlist *src, int src_nents, + struct sec4_sg_entry *sec4_sg, + bool chained) +{ + dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained); + sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0); +} + +/* + * Only put buffer in link table if it contains data, which is possible, + * since a buffer has previously been used, and needs to be unmapped, + */ +static inline dma_addr_t +try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg, + u8 *buf, dma_addr_t buf_dma, int buflen, + int last_buflen) +{ + if (buf_dma && !dma_mapping_error(jrdev, buf_dma)) + dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE); + if (buflen) + buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen); + else + buf_dma = 0; + + return buf_dma; +} + +/* Map state->caam_ctx, and add it to link table */ +static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev, + struct caam_hash_state *state, int ctx_len, + struct sec4_sg_entry *sec4_sg, u32 flag) +{ + state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag); + if (dma_mapping_error(jrdev, state->ctx_dma)) { + dev_err(jrdev, "unable to map ctx\n"); + return -ENOMEM; + } + + dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0); + + return 0; +} + +/* Common shared descriptor commands */ +static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx) +{ + append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, + ctx->split_key_len, CLASS_2 | + KEY_DEST_MDHA_SPLIT | KEY_ENC); +} + +/* Append key if it has been set */ +static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx) +{ + u32 *key_jump_cmd; + + init_sh_desc(desc, HDR_SHARE_SERIAL); + + if (ctx->split_key_len) { + /* Skip if already shared */ + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD); + + append_key_ahash(desc, ctx); + + set_jump_tgt_here(desc, key_jump_cmd); + } + + /* Propagate errors from shared to job descriptor */ + append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); +} + +/* + * For ahash read data from seqin following state->caam_ctx, + * and write resulting class2 context to seqout, which may be state->caam_ctx + * or req->result + */ +static inline void ahash_append_load_str(u32 *desc, int digestsize) +{ + /* Calculate remaining bytes to read */ + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); + + /* Read remaining bytes */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 | + FIFOLD_TYPE_MSG | KEY_VLF); + + /* Store class2 context bytes */ + append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | + LDST_SRCDST_BYTE_CONTEXT); +} + +/* + * For ahash update, final and finup, import context, read and write to seqout + */ +static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state, + int digestsize, + struct caam_hash_ctx *ctx) +{ + init_sh_desc_key_ahash(desc, ctx); + + /* Import context from software */ + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | + LDST_CLASS_2_CCB | ctx->ctx_len); + + /* Class 2 operation */ + append_operation(desc, op | state | OP_ALG_ENCRYPT); + + /* + * Load from buf and/or src and write to req->result or state->context + */ + ahash_append_load_str(desc, digestsize); +} + +/* For ahash firsts and digest, read and write to seqout */ +static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state, + int digestsize, struct caam_hash_ctx *ctx) +{ + init_sh_desc_key_ahash(desc, ctx); + + /* Class 2 operation */ + append_operation(desc, op | state | OP_ALG_ENCRYPT); + + /* + * Load from buf and/or src and write to req->result or state->context + */ + ahash_append_load_str(desc, digestsize); +} + +static int ahash_set_sh_desc(struct crypto_ahash *ahash) +{ + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + int digestsize = crypto_ahash_digestsize(ahash); + struct device *jrdev = ctx->jrdev; + u32 have_key = 0; + u32 *desc; + + if (ctx->split_key_len) + have_key = OP_ALG_AAI_HMAC_PRECOMP; + + /* ahash_update shared descriptor */ + desc = ctx->sh_desc_update; + + init_sh_desc(desc, HDR_SHARE_SERIAL); + + /* Import context from software */ + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | + LDST_CLASS_2_CCB | ctx->ctx_len); + + /* Class 2 operation */ + append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE | + OP_ALG_ENCRYPT); + + /* Load data and write to result or context */ + ahash_append_load_str(desc, ctx->ctx_len); + + ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc), + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) { + dev_err(jrdev, "unable to map shared descriptor\n"); + return -ENOMEM; + } +#ifdef DEBUG + print_hex_dump(KERN_ERR, + "ahash update shdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); +#endif + + /* ahash_update_first shared descriptor */ + desc = ctx->sh_desc_update_first; + + ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT, + ctx->ctx_len, ctx); + + ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc, + desc_bytes(desc), + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) { + dev_err(jrdev, "unable to map shared descriptor\n"); + return -ENOMEM; + } +#ifdef DEBUG + print_hex_dump(KERN_ERR, + "ahash update first shdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); +#endif + + /* ahash_final shared descriptor */ + desc = ctx->sh_desc_fin; + + ahash_ctx_data_to_out(desc, have_key | ctx->alg_type, + OP_ALG_AS_FINALIZE, digestsize, ctx); + + ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc), + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) { + dev_err(jrdev, "unable to map shared descriptor\n"); + return -ENOMEM; + } +#ifdef DEBUG + print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); +#endif + + /* ahash_finup shared descriptor */ + desc = ctx->sh_desc_finup; + + ahash_ctx_data_to_out(desc, have_key | ctx->alg_type, + OP_ALG_AS_FINALIZE, digestsize, ctx); + + ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc), + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) { + dev_err(jrdev, "unable to map shared descriptor\n"); + return -ENOMEM; + } +#ifdef DEBUG + print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); +#endif + + /* ahash_digest shared descriptor */ + desc = ctx->sh_desc_digest; + + ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL, + digestsize, ctx); + + ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc, + desc_bytes(desc), + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) { + dev_err(jrdev, "unable to map shared descriptor\n"); + return -ENOMEM; + } +#ifdef DEBUG + print_hex_dump(KERN_ERR, + "ahash digest shdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); +#endif + + return 0; +} + +static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in, + u32 keylen) +{ + return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len, + ctx->split_key_pad_len, key_in, keylen, + ctx->alg_op); +} + +/* Digest hash size if it is too large */ +static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, + u32 *keylen, u8 *key_out, u32 digestsize) +{ + struct device *jrdev = ctx->jrdev; + u32 *desc; + struct split_key_result result; + dma_addr_t src_dma, dst_dma; + int ret = 0; + + desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); + if (!desc) { + dev_err(jrdev, "unable to allocate key input memory\n"); + return -ENOMEM; + } + + init_job_desc(desc, 0); + + src_dma = dma_map_single(jrdev, (void *)key_in, *keylen, + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, src_dma)) { + dev_err(jrdev, "unable to map key input memory\n"); + kfree(desc); + return -ENOMEM; + } + dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize, + DMA_FROM_DEVICE); + if (dma_mapping_error(jrdev, dst_dma)) { + dev_err(jrdev, "unable to map key output memory\n"); + dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); + kfree(desc); + return -ENOMEM; + } + + /* Job descriptor to perform unkeyed hash on key_in */ + append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT | + OP_ALG_AS_INITFINAL); + append_seq_in_ptr(desc, src_dma, *keylen, 0); + append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 | + FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG); + append_seq_out_ptr(desc, dst_dma, digestsize, 0); + append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | + LDST_SRCDST_BYTE_CONTEXT); + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1); + print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); +#endif + + result.err = 0; + init_completion(&result.completion); + + ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); + if (!ret) { + /* in progress */ + wait_for_completion_interruptible(&result.completion); + ret = result.err; +#ifdef DEBUG + print_hex_dump(KERN_ERR, + "digested key@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key_in, + digestsize, 1); +#endif + } + dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); + dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE); + + *keylen = digestsize; + + kfree(desc); + + return ret; +} + +static int ahash_setkey(struct crypto_ahash *ahash, + const u8 *key, unsigned int keylen) +{ + /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ + static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 }; + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct device *jrdev = ctx->jrdev; + int blocksize = crypto_tfm_alg_blocksize(&ahash->base); + int digestsize = crypto_ahash_digestsize(ahash); + int ret = 0; + u8 *hashed_key = NULL; + +#ifdef DEBUG + printk(KERN_ERR "keylen %d\n", keylen); +#endif + + if (keylen > blocksize) { + hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL | + GFP_DMA); + if (!hashed_key) + return -ENOMEM; + ret = hash_digest_key(ctx, key, &keylen, hashed_key, + digestsize); + if (ret) + goto badkey; + key = hashed_key; + } + + /* Pick class 2 key length from algorithm submask */ + ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >> + OP_ALG_ALGSEL_SHIFT] * 2; + ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16); + +#ifdef DEBUG + printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n", + ctx->split_key_len, ctx->split_key_pad_len); + print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); +#endif + + ret = gen_split_hash_key(ctx, key, keylen); + if (ret) + goto badkey; + + ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len, + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->key_dma)) { + dev_err(jrdev, "unable to map key i/o memory\n"); + ret = -ENOMEM; + goto map_err; + } +#ifdef DEBUG + print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, + ctx->split_key_pad_len, 1); +#endif + + ret = ahash_set_sh_desc(ahash); + if (ret) { + dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len, + DMA_TO_DEVICE); + } + +map_err: + kfree(hashed_key); + return ret; +badkey: + kfree(hashed_key); + crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; +} + +/* + * ahash_edesc - s/w-extended ahash descriptor + * @dst_dma: physical mapped address of req->result + * @sec4_sg_dma: physical mapped address of h/w link table + * @chained: if source is chained + * @src_nents: number of segments in input scatterlist + * @sec4_sg_bytes: length of dma mapped sec4_sg space + * @sec4_sg: pointer to h/w link table + * @hw_desc: the h/w job descriptor followed by any referenced link tables + */ +struct ahash_edesc { + dma_addr_t dst_dma; + dma_addr_t sec4_sg_dma; + bool chained; + int src_nents; + int sec4_sg_bytes; + struct sec4_sg_entry *sec4_sg; + u32 hw_desc[0]; +}; + +static inline void ahash_unmap(struct device *dev, + struct ahash_edesc *edesc, + struct ahash_request *req, int dst_len) +{ + if (edesc->src_nents) + dma_unmap_sg_chained(dev, req->src, edesc->src_nents, + DMA_TO_DEVICE, edesc->chained); + if (edesc->dst_dma) + dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE); + + if (edesc->sec4_sg_bytes) + dma_unmap_single(dev, edesc->sec4_sg_dma, + edesc->sec4_sg_bytes, DMA_TO_DEVICE); +} + +static inline void ahash_unmap_ctx(struct device *dev, + struct ahash_edesc *edesc, + struct ahash_request *req, int dst_len, u32 flag) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); + + if (state->ctx_dma) + dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag); + ahash_unmap(dev, edesc, req, dst_len); +} + +static void ahash_done(struct device *jrdev, u32 *desc, u32 err, + void *context) +{ + struct ahash_request *req = context; + struct ahash_edesc *edesc; + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + int digestsize = crypto_ahash_digestsize(ahash); +#ifdef DEBUG + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); + + dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); +#endif + + edesc = (struct ahash_edesc *)((char *)desc - + offsetof(struct ahash_edesc, hw_desc)); + if (err) + caam_jr_strstatus(jrdev, err); + + ahash_unmap(jrdev, edesc, req, digestsize); + kfree(edesc); + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, + ctx->ctx_len, 1); + if (req->result) + print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->result, + digestsize, 1); +#endif + + req->base.complete(&req->base, err); +} + +static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, + void *context) +{ + struct ahash_request *req = context; + struct ahash_edesc *edesc; + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); +#ifdef DEBUG + struct caam_hash_state *state = ahash_request_ctx(req); + int digestsize = crypto_ahash_digestsize(ahash); + + dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); +#endif + + edesc = (struct ahash_edesc *)((char *)desc - + offsetof(struct ahash_edesc, hw_desc)); + if (err) + caam_jr_strstatus(jrdev, err); + + ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); + kfree(edesc); + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, + ctx->ctx_len, 1); + if (req->result) + print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->result, + digestsize, 1); +#endif + + req->base.complete(&req->base, err); +} + +static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, + void *context) +{ + struct ahash_request *req = context; + struct ahash_edesc *edesc; + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + int digestsize = crypto_ahash_digestsize(ahash); +#ifdef DEBUG + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); + + dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); +#endif + + edesc = (struct ahash_edesc *)((char *)desc - + offsetof(struct ahash_edesc, hw_desc)); + if (err) + caam_jr_strstatus(jrdev, err); + + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE); + kfree(edesc); + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, + ctx->ctx_len, 1); + if (req->result) + print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->result, + digestsize, 1); +#endif + + req->base.complete(&req->base, err); +} + +static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, + void *context) +{ + struct ahash_request *req = context; + struct ahash_edesc *edesc; + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); +#ifdef DEBUG + struct caam_hash_state *state = ahash_request_ctx(req); + int digestsize = crypto_ahash_digestsize(ahash); + + dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); +#endif + + edesc = (struct ahash_edesc *)((char *)desc - + offsetof(struct ahash_edesc, hw_desc)); + if (err) + caam_jr_strstatus(jrdev, err); + + ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE); + kfree(edesc); + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, + ctx->ctx_len, 1); + if (req->result) + print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->result, + digestsize, 1); +#endif + + req->base.complete(&req->base, err); +} + +/* submit update job descriptor */ +static int ahash_update_ctx(struct ahash_request *req) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); + struct device *jrdev = ctx->jrdev; + gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | + CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; + u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; + int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0; + u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1; + int *next_buflen = state->current_buf ? &state->buflen_0 : + &state->buflen_1, last_buflen; + int in_len = *buflen + req->nbytes, to_hash; + u32 *sh_desc = ctx->sh_desc_update, *desc; + dma_addr_t ptr = ctx->sh_desc_update_dma; + int src_nents, sec4_sg_bytes, sec4_sg_src_index; + struct ahash_edesc *edesc; + bool chained = false; + int ret = 0; + int sh_len; + + last_buflen = *next_buflen; + *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); + to_hash = in_len - *next_buflen; + + if (to_hash) { + src_nents = __sg_count(req->src, req->nbytes - (*next_buflen), + &chained); + sec4_sg_src_index = 1 + (*buflen ? 1 : 0); + sec4_sg_bytes = (sec4_sg_src_index + src_nents) * + sizeof(struct sec4_sg_entry); + + /* + * allocate space for base edesc and hw desc commands, + * link tables + */ + edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + + sec4_sg_bytes, GFP_DMA | flags); + if (!edesc) { + dev_err(jrdev, + "could not allocate extended descriptor\n"); + return -ENOMEM; + } + + edesc->src_nents = src_nents; + edesc->chained = chained; + edesc->sec4_sg_bytes = sec4_sg_bytes; + edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + + DESC_JOB_IO_LEN; + + ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, + edesc->sec4_sg, DMA_BIDIRECTIONAL); + if (ret) + return ret; + + state->buf_dma = try_buf_map_to_sec4_sg(jrdev, + edesc->sec4_sg + 1, + buf, state->buf_dma, + *buflen, last_buflen); + + if (src_nents) { + src_map_to_sec4_sg(jrdev, req->src, src_nents, + edesc->sec4_sg + sec4_sg_src_index, + chained); + if (*next_buflen) { + scatterwalk_map_and_copy(next_buf, req->src, + to_hash - *buflen, + *next_buflen, 0); + state->current_buf = !state->current_buf; + } + } else { + (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= + SEC4_SG_LEN_FIN; + } + + sh_len = desc_len(sh_desc); + desc = edesc->hw_desc; + init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | + HDR_REVERSE); + + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, + sec4_sg_bytes, + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { + dev_err(jrdev, "unable to map S/G table\n"); + return -ENOMEM; + } + + append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + + to_hash, LDST_SGF); + + append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0); + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); +#endif + + ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req); + if (!ret) { + ret = -EINPROGRESS; + } else { + ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, + DMA_BIDIRECTIONAL); + kfree(edesc); + } + } else if (*next_buflen) { + scatterwalk_map_and_copy(buf + *buflen, req->src, 0, + req->nbytes, 0); + *buflen = *next_buflen; + *next_buflen = last_buflen; + } +#ifdef DEBUG + print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); + print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, next_buf, + *next_buflen, 1); +#endif + + return ret; +} + +static int ahash_final_ctx(struct ahash_request *req) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); + struct device *jrdev = ctx->jrdev; + gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | + CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; + u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; + int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; + int last_buflen = state->current_buf ? state->buflen_0 : + state->buflen_1; + u32 *sh_desc = ctx->sh_desc_fin, *desc; + dma_addr_t ptr = ctx->sh_desc_fin_dma; + int sec4_sg_bytes; + int digestsize = crypto_ahash_digestsize(ahash); + struct ahash_edesc *edesc; + int ret = 0; + int sh_len; + + sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry); + + /* allocate space for base edesc and hw desc commands, link tables */ + edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + + sec4_sg_bytes, GFP_DMA | flags); + if (!edesc) { + dev_err(jrdev, "could not allocate extended descriptor\n"); + return -ENOMEM; + } + + sh_len = desc_len(sh_desc); + desc = edesc->hw_desc; + init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); + + edesc->sec4_sg_bytes = sec4_sg_bytes; + edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + + DESC_JOB_IO_LEN; + edesc->src_nents = 0; + + ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, + edesc->sec4_sg, DMA_TO_DEVICE); + if (ret) + return ret; + + state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, + buf, state->buf_dma, buflen, + last_buflen); + (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN; + + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, + sec4_sg_bytes, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { + dev_err(jrdev, "unable to map S/G table\n"); + return -ENOMEM; + } + + append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, + LDST_SGF); + + edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, + digestsize); + if (dma_mapping_error(jrdev, edesc->dst_dma)) { + dev_err(jrdev, "unable to map dst\n"); + return -ENOMEM; + } + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); +#endif + + ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); + if (!ret) { + ret = -EINPROGRESS; + } else { + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); + kfree(edesc); + } + + return ret; +} + +static int ahash_finup_ctx(struct ahash_request *req) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); + struct device *jrdev = ctx->jrdev; + gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | + CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; + u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; + int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; + int last_buflen = state->current_buf ? state->buflen_0 : + state->buflen_1; + u32 *sh_desc = ctx->sh_desc_finup, *desc; + dma_addr_t ptr = ctx->sh_desc_finup_dma; + int sec4_sg_bytes, sec4_sg_src_index; + int src_nents; + int digestsize = crypto_ahash_digestsize(ahash); + struct ahash_edesc *edesc; + bool chained = false; + int ret = 0; + int sh_len; + + src_nents = __sg_count(req->src, req->nbytes, &chained); + sec4_sg_src_index = 1 + (buflen ? 1 : 0); + sec4_sg_bytes = (sec4_sg_src_index + src_nents) * + sizeof(struct sec4_sg_entry); + + /* allocate space for base edesc and hw desc commands, link tables */ + edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + + sec4_sg_bytes, GFP_DMA | flags); + if (!edesc) { + dev_err(jrdev, "could not allocate extended descriptor\n"); + return -ENOMEM; + } + + sh_len = desc_len(sh_desc); + desc = edesc->hw_desc; + init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); + + edesc->src_nents = src_nents; + edesc->chained = chained; + edesc->sec4_sg_bytes = sec4_sg_bytes; + edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + + DESC_JOB_IO_LEN; + + ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, + edesc->sec4_sg, DMA_TO_DEVICE); + if (ret) + return ret; + + state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, + buf, state->buf_dma, buflen, + last_buflen); + + src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + + sec4_sg_src_index, chained); + + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, + sec4_sg_bytes, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { + dev_err(jrdev, "unable to map S/G table\n"); + return -ENOMEM; + } + + append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + + buflen + req->nbytes, LDST_SGF); + + edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, + digestsize); + if (dma_mapping_error(jrdev, edesc->dst_dma)) { + dev_err(jrdev, "unable to map dst\n"); + return -ENOMEM; + } + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); +#endif + + ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); + if (!ret) { + ret = -EINPROGRESS; + } else { + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); + kfree(edesc); + } + + return ret; +} + +static int ahash_digest(struct ahash_request *req) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct device *jrdev = ctx->jrdev; + gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | + CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; + u32 *sh_desc = ctx->sh_desc_digest, *desc; + dma_addr_t ptr = ctx->sh_desc_digest_dma; + int digestsize = crypto_ahash_digestsize(ahash); + int src_nents, sec4_sg_bytes; + dma_addr_t src_dma; + struct ahash_edesc *edesc; + bool chained = false; + int ret = 0; + u32 options; + int sh_len; + + src_nents = sg_count(req->src, req->nbytes, &chained); + dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE, + chained); + sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); + + /* allocate space for base edesc and hw desc commands, link tables */ + edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes + + DESC_JOB_IO_LEN, GFP_DMA | flags); + if (!edesc) { + dev_err(jrdev, "could not allocate extended descriptor\n"); + return -ENOMEM; + } + edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + + DESC_JOB_IO_LEN; + edesc->sec4_sg_bytes = sec4_sg_bytes; + edesc->src_nents = src_nents; + edesc->chained = chained; + + sh_len = desc_len(sh_desc); + desc = edesc->hw_desc; + init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); + + if (src_nents) { + sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0); + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, + sec4_sg_bytes, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { + dev_err(jrdev, "unable to map S/G table\n"); + return -ENOMEM; + } + src_dma = edesc->sec4_sg_dma; + options = LDST_SGF; + } else { + src_dma = sg_dma_address(req->src); + options = 0; + } + append_seq_in_ptr(desc, src_dma, req->nbytes, options); + + edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, + digestsize); + if (dma_mapping_error(jrdev, edesc->dst_dma)) { + dev_err(jrdev, "unable to map dst\n"); + return -ENOMEM; + } + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); +#endif + + ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); + if (!ret) { + ret = -EINPROGRESS; + } else { + ahash_unmap(jrdev, edesc, req, digestsize); + kfree(edesc); + } + + return ret; +} + +/* submit ahash final if it the first job descriptor */ +static int ahash_final_no_ctx(struct ahash_request *req) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); + struct device *jrdev = ctx->jrdev; + gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | + CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; + u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; + int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; + u32 *sh_desc = ctx->sh_desc_digest, *desc; + dma_addr_t ptr = ctx->sh_desc_digest_dma; + int digestsize = crypto_ahash_digestsize(ahash); + struct ahash_edesc *edesc; + int ret = 0; + int sh_len; + + /* allocate space for base edesc and hw desc commands, link tables */ + edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN, + GFP_DMA | flags); + if (!edesc) { + dev_err(jrdev, "could not allocate extended descriptor\n"); + return -ENOMEM; + } + + edesc->sec4_sg_bytes = 0; + sh_len = desc_len(sh_desc); + desc = edesc->hw_desc; + init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); + + state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, state->buf_dma)) { + dev_err(jrdev, "unable to map src\n"); + return -ENOMEM; + } + + append_seq_in_ptr(desc, state->buf_dma, buflen, 0); + + edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, + digestsize); + if (dma_mapping_error(jrdev, edesc->dst_dma)) { + dev_err(jrdev, "unable to map dst\n"); + return -ENOMEM; + } + edesc->src_nents = 0; + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); +#endif + + ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); + if (!ret) { + ret = -EINPROGRESS; + } else { + ahash_unmap(jrdev, edesc, req, digestsize); + kfree(edesc); + } + + return ret; +} + +/* submit ahash update if it the first job descriptor after update */ +static int ahash_update_no_ctx(struct ahash_request *req) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); + struct device *jrdev = ctx->jrdev; + gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | + CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; + u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; + int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0; + u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1; + int *next_buflen = state->current_buf ? &state->buflen_0 : + &state->buflen_1; + int in_len = *buflen + req->nbytes, to_hash; + int sec4_sg_bytes, src_nents; + struct ahash_edesc *edesc; + u32 *desc, *sh_desc = ctx->sh_desc_update_first; + dma_addr_t ptr = ctx->sh_desc_update_first_dma; + bool chained = false; + int ret = 0; + int sh_len; + + *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); + to_hash = in_len - *next_buflen; + + if (to_hash) { + src_nents = __sg_count(req->src, req->nbytes - (*next_buflen), + &chained); + sec4_sg_bytes = (1 + src_nents) * + sizeof(struct sec4_sg_entry); + + /* + * allocate space for base edesc and hw desc commands, + * link tables + */ + edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + + sec4_sg_bytes, GFP_DMA | flags); + if (!edesc) { + dev_err(jrdev, + "could not allocate extended descriptor\n"); + return -ENOMEM; + } + + edesc->src_nents = src_nents; + edesc->chained = chained; + edesc->sec4_sg_bytes = sec4_sg_bytes; + edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + + DESC_JOB_IO_LEN; + edesc->dst_dma = 0; + + state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, + buf, *buflen); + src_map_to_sec4_sg(jrdev, req->src, src_nents, + edesc->sec4_sg + 1, chained); + if (*next_buflen) { + scatterwalk_map_and_copy(next_buf, req->src, + to_hash - *buflen, + *next_buflen, 0); + state->current_buf = !state->current_buf; + } + + sh_len = desc_len(sh_desc); + desc = edesc->hw_desc; + init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | + HDR_REVERSE); + + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, + sec4_sg_bytes, + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { + dev_err(jrdev, "unable to map S/G table\n"); + return -ENOMEM; + } + + append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); + + ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); + if (ret) + return ret; + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); +#endif + + ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); + if (!ret) { + ret = -EINPROGRESS; + state->update = ahash_update_ctx; + state->finup = ahash_finup_ctx; + state->final = ahash_final_ctx; + } else { + ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, + DMA_TO_DEVICE); + kfree(edesc); + } + } else if (*next_buflen) { + scatterwalk_map_and_copy(buf + *buflen, req->src, 0, + req->nbytes, 0); + *buflen = *next_buflen; + *next_buflen = 0; + } +#ifdef DEBUG + print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); + print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, next_buf, + *next_buflen, 1); +#endif + + return ret; +} + +/* submit ahash finup if it the first job descriptor after update */ +static int ahash_finup_no_ctx(struct ahash_request *req) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); + struct device *jrdev = ctx->jrdev; + gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | + CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; + u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; + int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; + int last_buflen = state->current_buf ? state->buflen_0 : + state->buflen_1; + u32 *sh_desc = ctx->sh_desc_digest, *desc; + dma_addr_t ptr = ctx->sh_desc_digest_dma; + int sec4_sg_bytes, sec4_sg_src_index, src_nents; + int digestsize = crypto_ahash_digestsize(ahash); + struct ahash_edesc *edesc; + bool chained = false; + int sh_len; + int ret = 0; + + src_nents = __sg_count(req->src, req->nbytes, &chained); + sec4_sg_src_index = 2; + sec4_sg_bytes = (sec4_sg_src_index + src_nents) * + sizeof(struct sec4_sg_entry); + + /* allocate space for base edesc and hw desc commands, link tables */ + edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + + sec4_sg_bytes, GFP_DMA | flags); + if (!edesc) { + dev_err(jrdev, "could not allocate extended descriptor\n"); + return -ENOMEM; + } + + sh_len = desc_len(sh_desc); + desc = edesc->hw_desc; + init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); + + edesc->src_nents = src_nents; + edesc->chained = chained; + edesc->sec4_sg_bytes = sec4_sg_bytes; + edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + + DESC_JOB_IO_LEN; + + state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf, + state->buf_dma, buflen, + last_buflen); + + src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1, + chained); + + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, + sec4_sg_bytes, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { + dev_err(jrdev, "unable to map S/G table\n"); + return -ENOMEM; + } + + append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen + + req->nbytes, LDST_SGF); + + edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, + digestsize); + if (dma_mapping_error(jrdev, edesc->dst_dma)) { + dev_err(jrdev, "unable to map dst\n"); + return -ENOMEM; + } + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); +#endif + + ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); + if (!ret) { + ret = -EINPROGRESS; + } else { + ahash_unmap(jrdev, edesc, req, digestsize); + kfree(edesc); + } + + return ret; +} + +/* submit first update job descriptor after init */ +static int ahash_update_first(struct ahash_request *req) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); + struct device *jrdev = ctx->jrdev; + gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | + CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; + u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0; + int *next_buflen = state->current_buf ? + &state->buflen_1 : &state->buflen_0; + int to_hash; + u32 *sh_desc = ctx->sh_desc_update_first, *desc; + dma_addr_t ptr = ctx->sh_desc_update_first_dma; + int sec4_sg_bytes, src_nents; + dma_addr_t src_dma; + u32 options; + struct ahash_edesc *edesc; + bool chained = false; + int ret = 0; + int sh_len; + + *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) - + 1); + to_hash = req->nbytes - *next_buflen; + + if (to_hash) { + src_nents = sg_count(req->src, req->nbytes - (*next_buflen), + &chained); + dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, + DMA_TO_DEVICE, chained); + sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); + + /* + * allocate space for base edesc and hw desc commands, + * link tables + */ + edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + + sec4_sg_bytes, GFP_DMA | flags); + if (!edesc) { + dev_err(jrdev, + "could not allocate extended descriptor\n"); + return -ENOMEM; + } + + edesc->src_nents = src_nents; + edesc->chained = chained; + edesc->sec4_sg_bytes = sec4_sg_bytes; + edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + + DESC_JOB_IO_LEN; + edesc->dst_dma = 0; + + if (src_nents) { + sg_to_sec4_sg_last(req->src, src_nents, + edesc->sec4_sg, 0); + edesc->sec4_sg_dma = dma_map_single(jrdev, + edesc->sec4_sg, + sec4_sg_bytes, + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { + dev_err(jrdev, "unable to map S/G table\n"); + return -ENOMEM; + } + src_dma = edesc->sec4_sg_dma; + options = LDST_SGF; + } else { + src_dma = sg_dma_address(req->src); + options = 0; + } + + if (*next_buflen) + scatterwalk_map_and_copy(next_buf, req->src, to_hash, + *next_buflen, 0); + + sh_len = desc_len(sh_desc); + desc = edesc->hw_desc; + init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | + HDR_REVERSE); + + append_seq_in_ptr(desc, src_dma, to_hash, options); + + ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); + if (ret) + return ret; + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); +#endif + + ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, + req); + if (!ret) { + ret = -EINPROGRESS; + state->update = ahash_update_ctx; + state->finup = ahash_finup_ctx; + state->final = ahash_final_ctx; + } else { + ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, + DMA_TO_DEVICE); + kfree(edesc); + } + } else if (*next_buflen) { + state->update = ahash_update_no_ctx; + state->finup = ahash_finup_no_ctx; + state->final = ahash_final_no_ctx; + scatterwalk_map_and_copy(next_buf, req->src, 0, + req->nbytes, 0); + } +#ifdef DEBUG + print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, next_buf, + *next_buflen, 1); +#endif + + return ret; +} + +static int ahash_finup_first(struct ahash_request *req) +{ + return ahash_digest(req); +} + +static int ahash_init(struct ahash_request *req) +{ + struct caam_hash_state *state = ahash_request_ctx(req); + + state->update = ahash_update_first; + state->finup = ahash_finup_first; + state->final = ahash_final_no_ctx; + + state->current_buf = 0; + state->buf_dma = 0; + state->buflen_0 = 0; + state->buflen_1 = 0; + + return 0; +} + +static int ahash_update(struct ahash_request *req) +{ + struct caam_hash_state *state = ahash_request_ctx(req); + + return state->update(req); +} + +static int ahash_finup(struct ahash_request *req) +{ + struct caam_hash_state *state = ahash_request_ctx(req); + + return state->finup(req); +} + +static int ahash_final(struct ahash_request *req) +{ + struct caam_hash_state *state = ahash_request_ctx(req); + + return state->final(req); +} + +static int ahash_export(struct ahash_request *req, void *out) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); + + memcpy(out, ctx, sizeof(struct caam_hash_ctx)); + memcpy(out + sizeof(struct caam_hash_ctx), state, + sizeof(struct caam_hash_state)); + return 0; +} + +static int ahash_import(struct ahash_request *req, const void *in) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); + + memcpy(ctx, in, sizeof(struct caam_hash_ctx)); + memcpy(state, in + sizeof(struct caam_hash_ctx), + sizeof(struct caam_hash_state)); + return 0; +} + +struct caam_hash_template { + char name[CRYPTO_MAX_ALG_NAME]; + char driver_name[CRYPTO_MAX_ALG_NAME]; + char hmac_name[CRYPTO_MAX_ALG_NAME]; + char hmac_driver_name[CRYPTO_MAX_ALG_NAME]; + unsigned int blocksize; + struct ahash_alg template_ahash; + u32 alg_type; + u32 alg_op; +}; + +/* ahash descriptors */ +static struct caam_hash_template driver_hash[] = { + { + .name = "sha1", + .driver_name = "sha1-caam", + .hmac_name = "hmac(sha1)", + .hmac_driver_name = "hmac-sha1-caam", + .blocksize = SHA1_BLOCK_SIZE, + .template_ahash = { + .init = ahash_init, + .update = ahash_update, + .final = ahash_final, + .finup = ahash_finup, + .digest = ahash_digest, + .export = ahash_export, + .import = ahash_import, + .setkey = ahash_setkey, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + }, + }, + .alg_type = OP_ALG_ALGSEL_SHA1, + .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, + }, { + .name = "sha224", + .driver_name = "sha224-caam", + .hmac_name = "hmac(sha224)", + .hmac_driver_name = "hmac-sha224-caam", + .blocksize = SHA224_BLOCK_SIZE, + .template_ahash = { + .init = ahash_init, + .update = ahash_update, + .final = ahash_final, + .finup = ahash_finup, + .digest = ahash_digest, + .export = ahash_export, + .import = ahash_import, + .setkey = ahash_setkey, + .halg = { + .digestsize = SHA224_DIGEST_SIZE, + }, + }, + .alg_type = OP_ALG_ALGSEL_SHA224, + .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + }, { + .name = "sha256", + .driver_name = "sha256-caam", + .hmac_name = "hmac(sha256)", + .hmac_driver_name = "hmac-sha256-caam", + .blocksize = SHA256_BLOCK_SIZE, + .template_ahash = { + .init = ahash_init, + .update = ahash_update, + .final = ahash_final, + .finup = ahash_finup, + .digest = ahash_digest, + .export = ahash_export, + .import = ahash_import, + .setkey = ahash_setkey, + .halg = { + .digestsize = SHA256_DIGEST_SIZE, + }, + }, + .alg_type = OP_ALG_ALGSEL_SHA256, + .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, + }, { + .name = "sha384", + .driver_name = "sha384-caam", + .hmac_name = "hmac(sha384)", + .hmac_driver_name = "hmac-sha384-caam", + .blocksize = SHA384_BLOCK_SIZE, + .template_ahash = { + .init = ahash_init, + .update = ahash_update, + .final = ahash_final, + .finup = ahash_finup, + .digest = ahash_digest, + .export = ahash_export, + .import = ahash_import, + .setkey = ahash_setkey, + .halg = { + .digestsize = SHA384_DIGEST_SIZE, + }, + }, + .alg_type = OP_ALG_ALGSEL_SHA384, + .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + }, { + .name = "sha512", + .driver_name = "sha512-caam", + .hmac_name = "hmac(sha512)", + .hmac_driver_name = "hmac-sha512-caam", + .blocksize = SHA512_BLOCK_SIZE, + .template_ahash = { + .init = ahash_init, + .update = ahash_update, + .final = ahash_final, + .finup = ahash_finup, + .digest = ahash_digest, + .export = ahash_export, + .import = ahash_import, + .setkey = ahash_setkey, + .halg = { + .digestsize = SHA512_DIGEST_SIZE, + }, + }, + .alg_type = OP_ALG_ALGSEL_SHA512, + .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, + }, { + .name = "md5", + .driver_name = "md5-caam", + .hmac_name = "hmac(md5)", + .hmac_driver_name = "hmac-md5-caam", + .blocksize = MD5_BLOCK_WORDS * 4, + .template_ahash = { + .init = ahash_init, + .update = ahash_update, + .final = ahash_final, + .finup = ahash_finup, + .digest = ahash_digest, + .export = ahash_export, + .import = ahash_import, + .setkey = ahash_setkey, + .halg = { + .digestsize = MD5_DIGEST_SIZE, + }, + }, + .alg_type = OP_ALG_ALGSEL_MD5, + .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + }, +}; + +struct caam_hash_alg { + struct list_head entry; + int alg_type; + int alg_op; + struct ahash_alg ahash_alg; +}; + +static int caam_hash_cra_init(struct crypto_tfm *tfm) +{ + struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); + struct crypto_alg *base = tfm->__crt_alg; + struct hash_alg_common *halg = + container_of(base, struct hash_alg_common, base); + struct ahash_alg *alg = + container_of(halg, struct ahash_alg, halg); + struct caam_hash_alg *caam_hash = + container_of(alg, struct caam_hash_alg, ahash_alg); + struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); + /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ + static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, + HASH_MSG_LEN + SHA1_DIGEST_SIZE, + HASH_MSG_LEN + 32, + HASH_MSG_LEN + SHA256_DIGEST_SIZE, + HASH_MSG_LEN + 64, + HASH_MSG_LEN + SHA512_DIGEST_SIZE }; + int ret = 0; + + /* + * Get a Job ring from Job Ring driver to ensure in-order + * crypto request processing per tfm + */ + ctx->jrdev = caam_jr_alloc(); + if (IS_ERR(ctx->jrdev)) { + pr_err("Job Ring Device allocation for transform failed\n"); + return PTR_ERR(ctx->jrdev); + } + /* copy descriptor header template value */ + ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; + ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op; + + ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >> + OP_ALG_ALGSEL_SHIFT]; + + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct caam_hash_state)); + + ret = ahash_set_sh_desc(ahash); + + return ret; +} + +static void caam_hash_cra_exit(struct crypto_tfm *tfm) +{ + struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); + + if (ctx->sh_desc_update_dma && + !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma)) + dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma, + desc_bytes(ctx->sh_desc_update), + DMA_TO_DEVICE); + if (ctx->sh_desc_update_first_dma && + !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma)) + dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma, + desc_bytes(ctx->sh_desc_update_first), + DMA_TO_DEVICE); + if (ctx->sh_desc_fin_dma && + !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma)) + dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma, + desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE); + if (ctx->sh_desc_digest_dma && + !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma)) + dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma, + desc_bytes(ctx->sh_desc_digest), + DMA_TO_DEVICE); + if (ctx->sh_desc_finup_dma && + !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma)) + dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma, + desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE); + + caam_jr_free(ctx->jrdev); +} + +static void __exit caam_algapi_hash_exit(void) +{ + struct caam_hash_alg *t_alg, *n; + + if (!hash_list.next) + return; + + list_for_each_entry_safe(t_alg, n, &hash_list, entry) { + crypto_unregister_ahash(&t_alg->ahash_alg); + list_del(&t_alg->entry); + kfree(t_alg); + } +} + +static struct caam_hash_alg * +caam_hash_alloc(struct caam_hash_template *template, + bool keyed) +{ + struct caam_hash_alg *t_alg; + struct ahash_alg *halg; + struct crypto_alg *alg; + + t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL); + if (!t_alg) { + pr_err("failed to allocate t_alg\n"); + return ERR_PTR(-ENOMEM); + } + + t_alg->ahash_alg = template->template_ahash; + halg = &t_alg->ahash_alg; + alg = &halg->halg.base; + + if (keyed) { + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", + template->hmac_name); + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + template->hmac_driver_name); + } else { + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", + template->name); + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + template->driver_name); + } + alg->cra_module = THIS_MODULE; + alg->cra_init = caam_hash_cra_init; + alg->cra_exit = caam_hash_cra_exit; + alg->cra_ctxsize = sizeof(struct caam_hash_ctx); + alg->cra_priority = CAAM_CRA_PRIORITY; + alg->cra_blocksize = template->blocksize; + alg->cra_alignmask = 0; + alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH; + alg->cra_type = &crypto_ahash_type; + + t_alg->alg_type = template->alg_type; + t_alg->alg_op = template->alg_op; + + return t_alg; +} + +static int __init caam_algapi_hash_init(void) +{ + struct device_node *dev_node; + struct platform_device *pdev; + struct device *ctrldev; + void *priv; + int i = 0, err = 0; + + dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); + if (!dev_node) { + dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); + if (!dev_node) + return -ENODEV; + } + + pdev = of_find_device_by_node(dev_node); + if (!pdev) { + of_node_put(dev_node); + return -ENODEV; + } + + ctrldev = &pdev->dev; + priv = dev_get_drvdata(ctrldev); + of_node_put(dev_node); + + /* + * If priv is NULL, it's probably because the caam driver wasn't + * properly initialized (e.g. RNG4 init failed). Thus, bail out here. + */ + if (!priv) + return -ENODEV; + + INIT_LIST_HEAD(&hash_list); + + /* register crypto algorithms the device supports */ + for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { + /* TODO: check if h/w supports alg */ + struct caam_hash_alg *t_alg; + + /* register hmac version */ + t_alg = caam_hash_alloc(&driver_hash[i], true); + if (IS_ERR(t_alg)) { + err = PTR_ERR(t_alg); + pr_warn("%s alg allocation failed\n", + driver_hash[i].driver_name); + continue; + } + + err = crypto_register_ahash(&t_alg->ahash_alg); + if (err) { + pr_warn("%s alg registration failed\n", + t_alg->ahash_alg.halg.base.cra_driver_name); + kfree(t_alg); + } else + list_add_tail(&t_alg->entry, &hash_list); + + /* register unkeyed version */ + t_alg = caam_hash_alloc(&driver_hash[i], false); + if (IS_ERR(t_alg)) { + err = PTR_ERR(t_alg); + pr_warn("%s alg allocation failed\n", + driver_hash[i].driver_name); + continue; + } + + err = crypto_register_ahash(&t_alg->ahash_alg); + if (err) { + pr_warn("%s alg registration failed\n", + t_alg->ahash_alg.halg.base.cra_driver_name); + kfree(t_alg); + } else + list_add_tail(&t_alg->entry, &hash_list); + } + + return err; +} + +module_init(caam_algapi_hash_init); +module_exit(caam_algapi_hash_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API"); +MODULE_AUTHOR("Freescale Semiconductor - NMG"); diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c new file mode 100644 index 000000000..509533720 --- /dev/null +++ b/drivers/crypto/caam/caamrng.c @@ -0,0 +1,362 @@ +/* + * caam - Freescale FSL CAAM support for hw_random + * + * Copyright 2011 Freescale Semiconductor, Inc. + * + * Based on caamalg.c crypto API driver. + * + * relationship between job descriptors to shared descriptors: + * + * --------------- -------------- + * | JobDesc #0 |-------------------->| ShareDesc | + * | *(buffer 0) | |------------->| (generate) | + * --------------- | | (move) | + * | | (store) | + * --------------- | -------------- + * | JobDesc #1 |------| + * | *(buffer 1) | + * --------------- + * + * A job desc looks like this: + * + * --------------------- + * | Header | + * | ShareDesc Pointer | + * | SEQ_OUT_PTR | + * | (output buffer) | + * --------------------- + * + * The SharedDesc never changes, and each job descriptor points to one of two + * buffers for each device, from which the data will be copied into the + * requested destination + */ + +#include <linux/hw_random.h> +#include <linux/completion.h> +#include <linux/atomic.h> + +#include "compat.h" + +#include "regs.h" +#include "intern.h" +#include "desc_constr.h" +#include "jr.h" +#include "error.h" + +/* + * Maximum buffer size: maximum number of random, cache-aligned bytes that + * will be generated and moved to seq out ptr (extlen not allowed) + */ +#define RN_BUF_SIZE (0xffff / L1_CACHE_BYTES * \ + L1_CACHE_BYTES) + +/* length of descriptors */ +#define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2) +#define DESC_RNG_LEN (4 * CAAM_CMD_SZ) + +/* Buffer, its dma address and lock */ +struct buf_data { + u8 buf[RN_BUF_SIZE] ____cacheline_aligned; + dma_addr_t addr; + struct completion filled; + u32 hw_desc[DESC_JOB_O_LEN]; +#define BUF_NOT_EMPTY 0 +#define BUF_EMPTY 1 +#define BUF_PENDING 2 /* Empty, but with job pending --don't submit another */ + atomic_t empty; +}; + +/* rng per-device context */ +struct caam_rng_ctx { + struct device *jrdev; + dma_addr_t sh_desc_dma; + u32 sh_desc[DESC_RNG_LEN]; + unsigned int cur_buf_idx; + int current_buf; + struct buf_data bufs[2]; +}; + +static struct caam_rng_ctx *rng_ctx; + +static inline void rng_unmap_buf(struct device *jrdev, struct buf_data *bd) +{ + if (bd->addr) + dma_unmap_single(jrdev, bd->addr, RN_BUF_SIZE, + DMA_FROM_DEVICE); +} + +static inline void rng_unmap_ctx(struct caam_rng_ctx *ctx) +{ + struct device *jrdev = ctx->jrdev; + + if (ctx->sh_desc_dma) + dma_unmap_single(jrdev, ctx->sh_desc_dma, + desc_bytes(ctx->sh_desc), DMA_TO_DEVICE); + rng_unmap_buf(jrdev, &ctx->bufs[0]); + rng_unmap_buf(jrdev, &ctx->bufs[1]); +} + +static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context) +{ + struct buf_data *bd; + + bd = (struct buf_data *)((char *)desc - + offsetof(struct buf_data, hw_desc)); + + if (err) + caam_jr_strstatus(jrdev, err); + + atomic_set(&bd->empty, BUF_NOT_EMPTY); + complete(&bd->filled); +#ifdef DEBUG + print_hex_dump(KERN_ERR, "rng refreshed buf@: ", + DUMP_PREFIX_ADDRESS, 16, 4, bd->buf, RN_BUF_SIZE, 1); +#endif +} + +static inline int submit_job(struct caam_rng_ctx *ctx, int to_current) +{ + struct buf_data *bd = &ctx->bufs[!(to_current ^ ctx->current_buf)]; + struct device *jrdev = ctx->jrdev; + u32 *desc = bd->hw_desc; + int err; + + dev_dbg(jrdev, "submitting job %d\n", !(to_current ^ ctx->current_buf)); + init_completion(&bd->filled); + err = caam_jr_enqueue(jrdev, desc, rng_done, ctx); + if (err) + complete(&bd->filled); /* don't wait on failed job*/ + else + atomic_inc(&bd->empty); /* note if pending */ + + return err; +} + +static int caam_read(struct hwrng *rng, void *data, size_t max, bool wait) +{ + struct caam_rng_ctx *ctx = rng_ctx; + struct buf_data *bd = &ctx->bufs[ctx->current_buf]; + int next_buf_idx, copied_idx; + int err; + + if (atomic_read(&bd->empty)) { + /* try to submit job if there wasn't one */ + if (atomic_read(&bd->empty) == BUF_EMPTY) { + err = submit_job(ctx, 1); + /* if can't submit job, can't even wait */ + if (err) + return 0; + } + /* no immediate data, so exit if not waiting */ + if (!wait) + return 0; + + /* waiting for pending job */ + if (atomic_read(&bd->empty)) + wait_for_completion(&bd->filled); + } + + next_buf_idx = ctx->cur_buf_idx + max; + dev_dbg(ctx->jrdev, "%s: start reading at buffer %d, idx %d\n", + __func__, ctx->current_buf, ctx->cur_buf_idx); + + /* if enough data in current buffer */ + if (next_buf_idx < RN_BUF_SIZE) { + memcpy(data, bd->buf + ctx->cur_buf_idx, max); + ctx->cur_buf_idx = next_buf_idx; + return max; + } + + /* else, copy what's left... */ + copied_idx = RN_BUF_SIZE - ctx->cur_buf_idx; + memcpy(data, bd->buf + ctx->cur_buf_idx, copied_idx); + ctx->cur_buf_idx = 0; + atomic_set(&bd->empty, BUF_EMPTY); + + /* ...refill... */ + submit_job(ctx, 1); + + /* and use next buffer */ + ctx->current_buf = !ctx->current_buf; + dev_dbg(ctx->jrdev, "switched to buffer %d\n", ctx->current_buf); + + /* since there already is some data read, don't wait */ + return copied_idx + caam_read(rng, data + copied_idx, + max - copied_idx, false); +} + +static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx) +{ + struct device *jrdev = ctx->jrdev; + u32 *desc = ctx->sh_desc; + + init_sh_desc(desc, HDR_SHARE_SERIAL); + + /* Propagate errors from shared to job descriptor */ + append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); + + /* Generate random bytes */ + append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG); + + /* Store bytes */ + append_seq_fifo_store(desc, RN_BUF_SIZE, FIFOST_TYPE_RNGSTORE); + + ctx->sh_desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->sh_desc_dma)) { + dev_err(jrdev, "unable to map shared descriptor\n"); + return -ENOMEM; + } +#ifdef DEBUG + print_hex_dump(KERN_ERR, "rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4, + desc, desc_bytes(desc), 1); +#endif + return 0; +} + +static inline int rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id) +{ + struct device *jrdev = ctx->jrdev; + struct buf_data *bd = &ctx->bufs[buf_id]; + u32 *desc = bd->hw_desc; + int sh_len = desc_len(ctx->sh_desc); + + init_job_desc_shared(desc, ctx->sh_desc_dma, sh_len, HDR_SHARE_DEFER | + HDR_REVERSE); + + bd->addr = dma_map_single(jrdev, bd->buf, RN_BUF_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(jrdev, bd->addr)) { + dev_err(jrdev, "unable to map dst\n"); + return -ENOMEM; + } + + append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0); +#ifdef DEBUG + print_hex_dump(KERN_ERR, "rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4, + desc, desc_bytes(desc), 1); +#endif + return 0; +} + +static void caam_cleanup(struct hwrng *rng) +{ + int i; + struct buf_data *bd; + + for (i = 0; i < 2; i++) { + bd = &rng_ctx->bufs[i]; + if (atomic_read(&bd->empty) == BUF_PENDING) + wait_for_completion(&bd->filled); + } + + rng_unmap_ctx(rng_ctx); +} + +static int caam_init_buf(struct caam_rng_ctx *ctx, int buf_id) +{ + struct buf_data *bd = &ctx->bufs[buf_id]; + int err; + + err = rng_create_job_desc(ctx, buf_id); + if (err) + return err; + + atomic_set(&bd->empty, BUF_EMPTY); + submit_job(ctx, buf_id == ctx->current_buf); + wait_for_completion(&bd->filled); + + return 0; +} + +static int caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev) +{ + int err; + + ctx->jrdev = jrdev; + + err = rng_create_sh_desc(ctx); + if (err) + return err; + + ctx->current_buf = 0; + ctx->cur_buf_idx = 0; + + err = caam_init_buf(ctx, 0); + if (err) + return err; + + err = caam_init_buf(ctx, 1); + if (err) + return err; + + return 0; +} + +static struct hwrng caam_rng = { + .name = "rng-caam", + .cleanup = caam_cleanup, + .read = caam_read, +}; + +static void __exit caam_rng_exit(void) +{ + caam_jr_free(rng_ctx->jrdev); + hwrng_unregister(&caam_rng); + kfree(rng_ctx); +} + +static int __init caam_rng_init(void) +{ + struct device *dev; + struct device_node *dev_node; + struct platform_device *pdev; + struct device *ctrldev; + void *priv; + int err; + + dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); + if (!dev_node) { + dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); + if (!dev_node) + return -ENODEV; + } + + pdev = of_find_device_by_node(dev_node); + if (!pdev) { + of_node_put(dev_node); + return -ENODEV; + } + + ctrldev = &pdev->dev; + priv = dev_get_drvdata(ctrldev); + of_node_put(dev_node); + + /* + * If priv is NULL, it's probably because the caam driver wasn't + * properly initialized (e.g. RNG4 init failed). Thus, bail out here. + */ + if (!priv) + return -ENODEV; + + dev = caam_jr_alloc(); + if (IS_ERR(dev)) { + pr_err("Job Ring Device allocation for transform failed\n"); + return PTR_ERR(dev); + } + rng_ctx = kmalloc(sizeof(struct caam_rng_ctx), GFP_DMA); + if (!rng_ctx) + return -ENOMEM; + err = caam_init_rng(rng_ctx, dev); + if (err) + return err; + + dev_info(dev, "registering rng-caam\n"); + return hwrng_register(&caam_rng); +} + +module_init(caam_rng_init); +module_exit(caam_rng_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("FSL CAAM support for hw_random API"); +MODULE_AUTHOR("Freescale Semiconductor - NMG"); diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h new file mode 100644 index 000000000..acd7743e2 --- /dev/null +++ b/drivers/crypto/caam/compat.h @@ -0,0 +1,41 @@ +/* + * Copyright 2008-2011 Freescale Semiconductor, Inc. + */ + +#ifndef CAAM_COMPAT_H +#define CAAM_COMPAT_H + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/crypto.h> +#include <linux/hash.h> +#include <linux/hw_random.h> +#include <linux/of_platform.h> +#include <linux/dma-mapping.h> +#include <linux/io.h> +#include <linux/spinlock.h> +#include <linux/rtnetlink.h> +#include <linux/in.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/debugfs.h> +#include <linux/circ_buf.h> +#include <net/xfrm.h> + +#include <crypto/algapi.h> +#include <crypto/null.h> +#include <crypto/aes.h> +#include <crypto/ctr.h> +#include <crypto/des.h> +#include <crypto/sha.h> +#include <crypto/md5.h> +#include <crypto/aead.h> +#include <crypto/authenc.h> +#include <crypto/scatterwalk.h> +#include <crypto/internal/skcipher.h> +#include <crypto/internal/hash.h> + +#endif /* !defined(CAAM_COMPAT_H) */ diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c new file mode 100644 index 000000000..efba4ccd4 --- /dev/null +++ b/drivers/crypto/caam/ctrl.c @@ -0,0 +1,729 @@ +/* * CAAM control-plane driver backend + * Controller-level driver, kernel property detection, initialization + * + * Copyright 2008-2012 Freescale Semiconductor, Inc. + */ + +#include <linux/device.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> + +#include "compat.h" +#include "regs.h" +#include "intern.h" +#include "jr.h" +#include "desc_constr.h" +#include "error.h" + +/* + * Descriptor to instantiate RNG State Handle 0 in normal mode and + * load the JDKEK, TDKEK and TDSK registers + */ +static void build_instantiation_desc(u32 *desc, int handle, int do_sk) +{ + u32 *jump_cmd, op_flags; + + init_job_desc(desc, 0); + + op_flags = OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG | + (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT; + + /* INIT RNG in non-test mode */ + append_operation(desc, op_flags); + + if (!handle && do_sk) { + /* + * For SH0, Secure Keys must be generated as well + */ + + /* wait for done */ + jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1); + set_jump_tgt_here(desc, jump_cmd); + + /* + * load 1 to clear written reg: + * resets the done interrrupt and returns the RNG to idle. + */ + append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW); + + /* Initialize State Handle */ + append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG | + OP_ALG_AAI_RNG4_SK); + } + + append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT); +} + +/* Descriptor for deinstantiation of State Handle 0 of the RNG block. */ +static void build_deinstantiation_desc(u32 *desc, int handle) +{ + init_job_desc(desc, 0); + + /* Uninstantiate State Handle 0 */ + append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG | + (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INITFINAL); + + append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT); +} + +/* + * run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of + * the software (no JR/QI used). + * @ctrldev - pointer to device + * @status - descriptor status, after being run + * + * Return: - 0 if no error occurred + * - -ENODEV if the DECO couldn't be acquired + * - -EAGAIN if an error occurred while executing the descriptor + */ +static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc, + u32 *status) +{ + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); + struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl; + struct caam_deco __iomem *deco = ctrlpriv->deco; + unsigned int timeout = 100000; + u32 deco_dbg_reg, flags; + int i; + + + if (ctrlpriv->virt_en == 1) { + setbits32(&ctrl->deco_rsr, DECORSR_JR0); + + while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) && + --timeout) + cpu_relax(); + + timeout = 100000; + } + + setbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE); + + while (!(rd_reg32(&ctrl->deco_rq) & DECORR_DEN0) && + --timeout) + cpu_relax(); + + if (!timeout) { + dev_err(ctrldev, "failed to acquire DECO 0\n"); + clrbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE); + return -ENODEV; + } + + for (i = 0; i < desc_len(desc); i++) + wr_reg32(&deco->descbuf[i], *(desc + i)); + + flags = DECO_JQCR_WHL; + /* + * If the descriptor length is longer than 4 words, then the + * FOUR bit in JRCTRL register must be set. + */ + if (desc_len(desc) >= 4) + flags |= DECO_JQCR_FOUR; + + /* Instruct the DECO to execute it */ + wr_reg32(&deco->jr_ctl_hi, flags); + + timeout = 10000000; + do { + deco_dbg_reg = rd_reg32(&deco->desc_dbg); + /* + * If an error occured in the descriptor, then + * the DECO status field will be set to 0x0D + */ + if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) == + DESC_DBG_DECO_STAT_HOST_ERR) + break; + cpu_relax(); + } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout); + + *status = rd_reg32(&deco->op_status_hi) & + DECO_OP_STATUS_HI_ERR_MASK; + + if (ctrlpriv->virt_en == 1) + clrbits32(&ctrl->deco_rsr, DECORSR_JR0); + + /* Mark the DECO as free */ + clrbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE); + + if (!timeout) + return -EAGAIN; + + return 0; +} + +/* + * instantiate_rng - builds and executes a descriptor on DECO0, + * which initializes the RNG block. + * @ctrldev - pointer to device + * @state_handle_mask - bitmask containing the instantiation status + * for the RNG4 state handles which exist in + * the RNG4 block: 1 if it's been instantiated + * by an external entry, 0 otherwise. + * @gen_sk - generate data to be loaded into the JDKEK, TDKEK and TDSK; + * Caution: this can be done only once; if the keys need to be + * regenerated, a POR is required + * + * Return: - 0 if no error occurred + * - -ENOMEM if there isn't enough memory to allocate the descriptor + * - -ENODEV if DECO0 couldn't be acquired + * - -EAGAIN if an error occurred when executing the descriptor + * f.i. there was a RNG hardware error due to not "good enough" + * entropy being aquired. + */ +static int instantiate_rng(struct device *ctrldev, int state_handle_mask, + int gen_sk) +{ + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); + struct caam_ctrl __iomem *ctrl; + u32 *desc, status, rdsta_val; + int ret = 0, sh_idx; + + ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl; + desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL); + if (!desc) + return -ENOMEM; + + for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) { + /* + * If the corresponding bit is set, this state handle + * was initialized by somebody else, so it's left alone. + */ + if ((1 << sh_idx) & state_handle_mask) + continue; + + /* Create the descriptor for instantiating RNG State Handle */ + build_instantiation_desc(desc, sh_idx, gen_sk); + + /* Try to run it through DECO0 */ + ret = run_descriptor_deco0(ctrldev, desc, &status); + + /* + * If ret is not 0, or descriptor status is not 0, then + * something went wrong. No need to try the next state + * handle (if available), bail out here. + * Also, if for some reason, the State Handle didn't get + * instantiated although the descriptor has finished + * without any error (HW optimizations for later + * CAAM eras), then try again. + */ + rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK; + if (status || !(rdsta_val & (1 << sh_idx))) + ret = -EAGAIN; + if (ret) + break; + dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx); + /* Clear the contents before recreating the descriptor */ + memset(desc, 0x00, CAAM_CMD_SZ * 7); + } + + kfree(desc); + + return ret; +} + +/* + * deinstantiate_rng - builds and executes a descriptor on DECO0, + * which deinitializes the RNG block. + * @ctrldev - pointer to device + * @state_handle_mask - bitmask containing the instantiation status + * for the RNG4 state handles which exist in + * the RNG4 block: 1 if it's been instantiated + * + * Return: - 0 if no error occurred + * - -ENOMEM if there isn't enough memory to allocate the descriptor + * - -ENODEV if DECO0 couldn't be acquired + * - -EAGAIN if an error occurred when executing the descriptor + */ +static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask) +{ + u32 *desc, status; + int sh_idx, ret = 0; + + desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL); + if (!desc) + return -ENOMEM; + + for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) { + /* + * If the corresponding bit is set, then it means the state + * handle was initialized by us, and thus it needs to be + * deintialized as well + */ + if ((1 << sh_idx) & state_handle_mask) { + /* + * Create the descriptor for deinstantating this state + * handle + */ + build_deinstantiation_desc(desc, sh_idx); + + /* Try to run it through DECO0 */ + ret = run_descriptor_deco0(ctrldev, desc, &status); + + if (ret || status) { + dev_err(ctrldev, + "Failed to deinstantiate RNG4 SH%d\n", + sh_idx); + break; + } + dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx); + } + } + + kfree(desc); + + return ret; +} + +static int caam_remove(struct platform_device *pdev) +{ + struct device *ctrldev; + struct caam_drv_private *ctrlpriv; + struct caam_ctrl __iomem *ctrl; + int ring, ret = 0; + + ctrldev = &pdev->dev; + ctrlpriv = dev_get_drvdata(ctrldev); + ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl; + + /* Remove platform devices for JobRs */ + for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) { + if (ctrlpriv->jrpdev[ring]) + of_device_unregister(ctrlpriv->jrpdev[ring]); + } + + /* De-initialize RNG state handles initialized by this driver. */ + if (ctrlpriv->rng4_sh_init) + deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init); + + /* Shut down debug views */ +#ifdef CONFIG_DEBUG_FS + debugfs_remove_recursive(ctrlpriv->dfs_root); +#endif + + /* Unmap controller region */ + iounmap(&ctrl); + + return ret; +} + +/* + * kick_trng - sets the various parameters for enabling the initialization + * of the RNG4 block in CAAM + * @pdev - pointer to the platform device + * @ent_delay - Defines the length (in system clocks) of each entropy sample. + */ +static void kick_trng(struct platform_device *pdev, int ent_delay) +{ + struct device *ctrldev = &pdev->dev; + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); + struct caam_ctrl __iomem *ctrl; + struct rng4tst __iomem *r4tst; + u32 val; + + ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl; + r4tst = &ctrl->r4tst[0]; + + /* put RNG4 into program mode */ + setbits32(&r4tst->rtmctl, RTMCTL_PRGM); + + /* + * Performance-wise, it does not make sense to + * set the delay to a value that is lower + * than the last one that worked (i.e. the state handles + * were instantiated properly. Thus, instead of wasting + * time trying to set the values controlling the sample + * frequency, the function simply returns. + */ + val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK) + >> RTSDCTL_ENT_DLY_SHIFT; + if (ent_delay <= val) { + /* put RNG4 into run mode */ + clrbits32(&r4tst->rtmctl, RTMCTL_PRGM); + return; + } + + val = rd_reg32(&r4tst->rtsdctl); + val = (val & ~RTSDCTL_ENT_DLY_MASK) | + (ent_delay << RTSDCTL_ENT_DLY_SHIFT); + wr_reg32(&r4tst->rtsdctl, val); + /* min. freq. count, equal to 1/4 of the entropy sample length */ + wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2); + /* disable maximum frequency count */ + wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE); + /* read the control register */ + val = rd_reg32(&r4tst->rtmctl); + /* + * select raw sampling in both entropy shifter + * and statistical checker + */ + setbits32(&val, RTMCTL_SAMP_MODE_RAW_ES_SC); + /* put RNG4 into run mode */ + clrbits32(&val, RTMCTL_PRGM); + /* write back the control register */ + wr_reg32(&r4tst->rtmctl, val); +} + +/** + * caam_get_era() - Return the ERA of the SEC on SoC, based + * on "sec-era" propery in the DTS. This property is updated by u-boot. + **/ +int caam_get_era(void) +{ + struct device_node *caam_node; + for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") { + const uint32_t *prop = (uint32_t *)of_get_property(caam_node, + "fsl,sec-era", + NULL); + return prop ? *prop : -ENOTSUPP; + } + + return -ENOTSUPP; +} +EXPORT_SYMBOL(caam_get_era); + +/* Probe routine for CAAM top (controller) level */ +static int caam_probe(struct platform_device *pdev) +{ + int ret, ring, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN; + u64 caam_id; + struct device *dev; + struct device_node *nprop, *np; + struct caam_ctrl __iomem *ctrl; + struct caam_drv_private *ctrlpriv; +#ifdef CONFIG_DEBUG_FS + struct caam_perfmon *perfmon; +#endif + u32 scfgr, comp_params; + u32 cha_vid_ls; + int pg_size; + int BLOCK_OFFSET = 0; + + ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(struct caam_drv_private), + GFP_KERNEL); + if (!ctrlpriv) + return -ENOMEM; + + dev = &pdev->dev; + dev_set_drvdata(dev, ctrlpriv); + ctrlpriv->pdev = pdev; + nprop = pdev->dev.of_node; + + /* Get configuration properties from device tree */ + /* First, get register page */ + ctrl = of_iomap(nprop, 0); + if (ctrl == NULL) { + dev_err(dev, "caam: of_iomap() failed\n"); + return -ENOMEM; + } + /* Finding the page size for using the CTPR_MS register */ + comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms); + pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT; + + /* Allocating the BLOCK_OFFSET based on the supported page size on + * the platform + */ + if (pg_size == 0) + BLOCK_OFFSET = PG_SIZE_4K; + else + BLOCK_OFFSET = PG_SIZE_64K; + + ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl; + ctrlpriv->assure = (struct caam_assurance __force *) + ((uint8_t *)ctrl + + BLOCK_OFFSET * ASSURE_BLOCK_NUMBER + ); + ctrlpriv->deco = (struct caam_deco __force *) + ((uint8_t *)ctrl + + BLOCK_OFFSET * DECO_BLOCK_NUMBER + ); + + /* Get the IRQ of the controller (for security violations only) */ + ctrlpriv->secvio_irq = irq_of_parse_and_map(nprop, 0); + + /* + * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel, + * long pointers in master configuration register + */ + setbits32(&ctrl->mcr, MCFGR_WDENABLE | + (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0)); + + /* + * Read the Compile Time paramters and SCFGR to determine + * if Virtualization is enabled for this platform + */ + scfgr = rd_reg32(&ctrl->scfgr); + + ctrlpriv->virt_en = 0; + if (comp_params & CTPR_MS_VIRT_EN_INCL) { + /* VIRT_EN_INCL = 1 & VIRT_EN_POR = 1 or + * VIRT_EN_INCL = 1 & VIRT_EN_POR = 0 & SCFGR_VIRT_EN = 1 + */ + if ((comp_params & CTPR_MS_VIRT_EN_POR) || + (!(comp_params & CTPR_MS_VIRT_EN_POR) && + (scfgr & SCFGR_VIRT_EN))) + ctrlpriv->virt_en = 1; + } else { + /* VIRT_EN_INCL = 0 && VIRT_EN_POR_VALUE = 1 */ + if (comp_params & CTPR_MS_VIRT_EN_POR) + ctrlpriv->virt_en = 1; + } + + if (ctrlpriv->virt_en == 1) + setbits32(&ctrl->jrstart, JRSTART_JR0_START | + JRSTART_JR1_START | JRSTART_JR2_START | + JRSTART_JR3_START); + + if (sizeof(dma_addr_t) == sizeof(u64)) + if (of_device_is_compatible(nprop, "fsl,sec-v5.0")) + dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)); + else + dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36)); + else + dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + + /* + * Detect and enable JobRs + * First, find out how many ring spec'ed, allocate references + * for all, then go probe each one. + */ + rspec = 0; + for_each_available_child_of_node(nprop, np) + if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") || + of_device_is_compatible(np, "fsl,sec4.0-job-ring")) + rspec++; + + ctrlpriv->jrpdev = devm_kzalloc(&pdev->dev, + sizeof(struct platform_device *) * rspec, + GFP_KERNEL); + if (ctrlpriv->jrpdev == NULL) { + iounmap(&ctrl); + return -ENOMEM; + } + + ring = 0; + ctrlpriv->total_jobrs = 0; + for_each_available_child_of_node(nprop, np) + if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") || + of_device_is_compatible(np, "fsl,sec4.0-job-ring")) { + ctrlpriv->jrpdev[ring] = + of_platform_device_create(np, NULL, dev); + if (!ctrlpriv->jrpdev[ring]) { + pr_warn("JR%d Platform device creation error\n", + ring); + continue; + } + ctrlpriv->jr[ring] = (struct caam_job_ring __force *) + ((uint8_t *)ctrl + + (ring + JR_BLOCK_NUMBER) * + BLOCK_OFFSET + ); + ctrlpriv->total_jobrs++; + ring++; + } + + /* Check to see if QI present. If so, enable */ + ctrlpriv->qi_present = + !!(rd_reg32(&ctrl->perfmon.comp_parms_ms) & + CTPR_MS_QI_MASK); + if (ctrlpriv->qi_present) { + ctrlpriv->qi = (struct caam_queue_if __force *) + ((uint8_t *)ctrl + + BLOCK_OFFSET * QI_BLOCK_NUMBER + ); + /* This is all that's required to physically enable QI */ + wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN); + } + + /* If no QI and no rings specified, quit and go home */ + if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) { + dev_err(dev, "no queues configured, terminating\n"); + caam_remove(pdev); + return -ENOMEM; + } + + cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls); + + /* + * If SEC has RNG version >= 4 and RNG state handle has not been + * already instantiated, do RNG instantiation + */ + if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) { + ctrlpriv->rng4_sh_init = + rd_reg32(&ctrl->r4tst[0].rdsta); + /* + * If the secure keys (TDKEK, JDKEK, TDSK), were already + * generated, signal this to the function that is instantiating + * the state handles. An error would occur if RNG4 attempts + * to regenerate these keys before the next POR. + */ + gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1; + ctrlpriv->rng4_sh_init &= RDSTA_IFMASK; + do { + int inst_handles = + rd_reg32(&ctrl->r4tst[0].rdsta) & + RDSTA_IFMASK; + /* + * If either SH were instantiated by somebody else + * (e.g. u-boot) then it is assumed that the entropy + * parameters are properly set and thus the function + * setting these (kick_trng(...)) is skipped. + * Also, if a handle was instantiated, do not change + * the TRNG parameters. + */ + if (!(ctrlpriv->rng4_sh_init || inst_handles)) { + dev_info(dev, + "Entropy delay = %u\n", + ent_delay); + kick_trng(pdev, ent_delay); + ent_delay += 400; + } + /* + * if instantiate_rng(...) fails, the loop will rerun + * and the kick_trng(...) function will modfiy the + * upper and lower limits of the entropy sampling + * interval, leading to a sucessful initialization of + * the RNG. + */ + ret = instantiate_rng(dev, inst_handles, + gen_sk); + if (ret == -EAGAIN) + /* + * if here, the loop will rerun, + * so don't hog the CPU + */ + cpu_relax(); + } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX)); + if (ret) { + dev_err(dev, "failed to instantiate RNG"); + caam_remove(pdev); + return ret; + } + /* + * Set handles init'ed by this module as the complement of the + * already initialized ones + */ + ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK; + + /* Enable RDB bit so that RNG works faster */ + setbits32(&ctrl->scfgr, SCFGR_RDBENABLE); + } + + /* NOTE: RTIC detection ought to go here, around Si time */ + + caam_id = (u64)rd_reg32(&ctrl->perfmon.caam_id_ms) << 32 | + (u64)rd_reg32(&ctrl->perfmon.caam_id_ls); + + /* Report "alive" for developer to see */ + dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id, + caam_get_era()); + dev_info(dev, "job rings = %d, qi = %d\n", + ctrlpriv->total_jobrs, ctrlpriv->qi_present); + +#ifdef CONFIG_DEBUG_FS + /* + * FIXME: needs better naming distinction, as some amalgamation of + * "caam" and nprop->full_name. The OF name isn't distinctive, + * but does separate instances + */ + perfmon = (struct caam_perfmon __force *)&ctrl->perfmon; + + ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL); + ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root); + + /* Controller-level - performance monitor counters */ + ctrlpriv->ctl_rq_dequeued = + debugfs_create_u64("rq_dequeued", + S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->req_dequeued); + ctrlpriv->ctl_ob_enc_req = + debugfs_create_u64("ob_rq_encrypted", + S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->ob_enc_req); + ctrlpriv->ctl_ib_dec_req = + debugfs_create_u64("ib_rq_decrypted", + S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->ib_dec_req); + ctrlpriv->ctl_ob_enc_bytes = + debugfs_create_u64("ob_bytes_encrypted", + S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->ob_enc_bytes); + ctrlpriv->ctl_ob_prot_bytes = + debugfs_create_u64("ob_bytes_protected", + S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->ob_prot_bytes); + ctrlpriv->ctl_ib_dec_bytes = + debugfs_create_u64("ib_bytes_decrypted", + S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->ib_dec_bytes); + ctrlpriv->ctl_ib_valid_bytes = + debugfs_create_u64("ib_bytes_validated", + S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->ib_valid_bytes); + + /* Controller level - global status values */ + ctrlpriv->ctl_faultaddr = + debugfs_create_u64("fault_addr", + S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->faultaddr); + ctrlpriv->ctl_faultdetail = + debugfs_create_u32("fault_detail", + S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->faultdetail); + ctrlpriv->ctl_faultstatus = + debugfs_create_u32("fault_status", + S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->status); + + /* Internal covering keys (useful in non-secure mode only) */ + ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0]; + ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32); + ctrlpriv->ctl_kek = debugfs_create_blob("kek", + S_IRUSR | + S_IRGRP | S_IROTH, + ctrlpriv->ctl, + &ctrlpriv->ctl_kek_wrap); + + ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0]; + ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32); + ctrlpriv->ctl_tkek = debugfs_create_blob("tkek", + S_IRUSR | + S_IRGRP | S_IROTH, + ctrlpriv->ctl, + &ctrlpriv->ctl_tkek_wrap); + + ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0]; + ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32); + ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk", + S_IRUSR | + S_IRGRP | S_IROTH, + ctrlpriv->ctl, + &ctrlpriv->ctl_tdsk_wrap); +#endif + return 0; +} + +static struct of_device_id caam_match[] = { + { + .compatible = "fsl,sec-v4.0", + }, + { + .compatible = "fsl,sec4.0", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, caam_match); + +static struct platform_driver caam_driver = { + .driver = { + .name = "caam", + .of_match_table = caam_match, + }, + .probe = caam_probe, + .remove = caam_remove, +}; + +module_platform_driver(caam_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("FSL CAAM request backend"); +MODULE_AUTHOR("Freescale Semiconductor - NMG/STC"); diff --git a/drivers/crypto/caam/ctrl.h b/drivers/crypto/caam/ctrl.h new file mode 100644 index 000000000..cac5402a4 --- /dev/null +++ b/drivers/crypto/caam/ctrl.h @@ -0,0 +1,13 @@ +/* + * CAAM control-plane driver backend public-level include definitions + * + * Copyright 2012 Freescale Semiconductor, Inc. + */ + +#ifndef CTRL_H +#define CTRL_H + +/* Prototypes for backend-level services exposed to APIs */ +int caam_get_era(void); + +#endif /* CTRL_H */ diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h new file mode 100644 index 000000000..d397ff9d5 --- /dev/null +++ b/drivers/crypto/caam/desc.h @@ -0,0 +1,1621 @@ +/* + * CAAM descriptor composition header + * Definitions to support CAAM descriptor instruction generation + * + * Copyright 2008-2011 Freescale Semiconductor, Inc. + */ + +#ifndef DESC_H +#define DESC_H + +struct sec4_sg_entry { + u64 ptr; +#define SEC4_SG_LEN_FIN 0x40000000 +#define SEC4_SG_LEN_EXT 0x80000000 + u32 len; + u8 reserved; + u8 buf_pool_id; + u16 offset; +}; + +/* Max size of any CAAM descriptor in 32-bit words, inclusive of header */ +#define MAX_CAAM_DESCSIZE 64 + +/* Block size of any entity covered/uncovered with a KEK/TKEK */ +#define KEK_BLOCKSIZE 16 + +/* + * Supported descriptor command types as they show up + * inside a descriptor command word. + */ +#define CMD_SHIFT 27 +#define CMD_MASK 0xf8000000 + +#define CMD_KEY (0x00 << CMD_SHIFT) +#define CMD_SEQ_KEY (0x01 << CMD_SHIFT) +#define CMD_LOAD (0x02 << CMD_SHIFT) +#define CMD_SEQ_LOAD (0x03 << CMD_SHIFT) +#define CMD_FIFO_LOAD (0x04 << CMD_SHIFT) +#define CMD_SEQ_FIFO_LOAD (0x05 << CMD_SHIFT) +#define CMD_STORE (0x0a << CMD_SHIFT) +#define CMD_SEQ_STORE (0x0b << CMD_SHIFT) +#define CMD_FIFO_STORE (0x0c << CMD_SHIFT) +#define CMD_SEQ_FIFO_STORE (0x0d << CMD_SHIFT) +#define CMD_MOVE_LEN (0x0e << CMD_SHIFT) +#define CMD_MOVE (0x0f << CMD_SHIFT) +#define CMD_OPERATION (0x10 << CMD_SHIFT) +#define CMD_SIGNATURE (0x12 << CMD_SHIFT) +#define CMD_JUMP (0x14 << CMD_SHIFT) +#define CMD_MATH (0x15 << CMD_SHIFT) +#define CMD_DESC_HDR (0x16 << CMD_SHIFT) +#define CMD_SHARED_DESC_HDR (0x17 << CMD_SHIFT) +#define CMD_SEQ_IN_PTR (0x1e << CMD_SHIFT) +#define CMD_SEQ_OUT_PTR (0x1f << CMD_SHIFT) + +/* General-purpose class selector for all commands */ +#define CLASS_SHIFT 25 +#define CLASS_MASK (0x03 << CLASS_SHIFT) + +#define CLASS_NONE (0x00 << CLASS_SHIFT) +#define CLASS_1 (0x01 << CLASS_SHIFT) +#define CLASS_2 (0x02 << CLASS_SHIFT) +#define CLASS_BOTH (0x03 << CLASS_SHIFT) + +/* + * Descriptor header command constructs + * Covers shared, job, and trusted descriptor headers + */ + +/* + * Do Not Run - marks a descriptor inexecutable if there was + * a preceding error somewhere + */ +#define HDR_DNR 0x01000000 + +/* + * ONE - should always be set. Combination of ONE (always + * set) and ZRO (always clear) forms an endianness sanity check + */ +#define HDR_ONE 0x00800000 +#define HDR_ZRO 0x00008000 + +/* Start Index or SharedDesc Length */ +#define HDR_START_IDX_MASK 0x3f +#define HDR_START_IDX_SHIFT 16 + +/* If shared descriptor header, 6-bit length */ +#define HDR_DESCLEN_SHR_MASK 0x3f + +/* If non-shared header, 7-bit length */ +#define HDR_DESCLEN_MASK 0x7f + +/* This is a TrustedDesc (if not SharedDesc) */ +#define HDR_TRUSTED 0x00004000 + +/* Make into TrustedDesc (if not SharedDesc) */ +#define HDR_MAKE_TRUSTED 0x00002000 + +/* Save context if self-shared (if SharedDesc) */ +#define HDR_SAVECTX 0x00001000 + +/* Next item points to SharedDesc */ +#define HDR_SHARED 0x00001000 + +/* + * Reverse Execution Order - execute JobDesc first, then + * execute SharedDesc (normally SharedDesc goes first). + */ +#define HDR_REVERSE 0x00000800 + +/* Propogate DNR property to SharedDesc */ +#define HDR_PROP_DNR 0x00000800 + +/* JobDesc/SharedDesc share property */ +#define HDR_SD_SHARE_MASK 0x03 +#define HDR_SD_SHARE_SHIFT 8 +#define HDR_JD_SHARE_MASK 0x07 +#define HDR_JD_SHARE_SHIFT 8 + +#define HDR_SHARE_NEVER (0x00 << HDR_SD_SHARE_SHIFT) +#define HDR_SHARE_WAIT (0x01 << HDR_SD_SHARE_SHIFT) +#define HDR_SHARE_SERIAL (0x02 << HDR_SD_SHARE_SHIFT) +#define HDR_SHARE_ALWAYS (0x03 << HDR_SD_SHARE_SHIFT) +#define HDR_SHARE_DEFER (0x04 << HDR_SD_SHARE_SHIFT) + +/* JobDesc/SharedDesc descriptor length */ +#define HDR_JD_LENGTH_MASK 0x7f +#define HDR_SD_LENGTH_MASK 0x3f + +/* + * KEY/SEQ_KEY Command Constructs + */ + +/* Key Destination Class: 01 = Class 1, 02 - Class 2 */ +#define KEY_DEST_CLASS_SHIFT 25 /* use CLASS_1 or CLASS_2 */ +#define KEY_DEST_CLASS_MASK (0x03 << KEY_DEST_CLASS_SHIFT) + +/* Scatter-Gather Table/Variable Length Field */ +#define KEY_SGF 0x01000000 +#define KEY_VLF 0x01000000 + +/* Immediate - Key follows command in the descriptor */ +#define KEY_IMM 0x00800000 + +/* + * Encrypted - Key is encrypted either with the KEK, or + * with the TDKEK if TK is set + */ +#define KEY_ENC 0x00400000 + +/* + * No Write Back - Do not allow key to be FIFO STOREd + */ +#define KEY_NWB 0x00200000 + +/* + * Enhanced Encryption of Key + */ +#define KEY_EKT 0x00100000 + +/* + * Encrypted with Trusted Key + */ +#define KEY_TK 0x00008000 + +/* + * KDEST - Key Destination: 0 - class key register, + * 1 - PKHA 'e', 2 - AFHA Sbox, 3 - MDHA split-key + */ +#define KEY_DEST_SHIFT 16 +#define KEY_DEST_MASK (0x03 << KEY_DEST_SHIFT) + +#define KEY_DEST_CLASS_REG (0x00 << KEY_DEST_SHIFT) +#define KEY_DEST_PKHA_E (0x01 << KEY_DEST_SHIFT) +#define KEY_DEST_AFHA_SBOX (0x02 << KEY_DEST_SHIFT) +#define KEY_DEST_MDHA_SPLIT (0x03 << KEY_DEST_SHIFT) + +/* Length in bytes */ +#define KEY_LENGTH_MASK 0x000003ff + +/* + * LOAD/SEQ_LOAD/STORE/SEQ_STORE Command Constructs + */ + +/* + * Load/Store Destination: 0 = class independent CCB, + * 1 = class 1 CCB, 2 = class 2 CCB, 3 = DECO + */ +#define LDST_CLASS_SHIFT 25 +#define LDST_CLASS_MASK (0x03 << LDST_CLASS_SHIFT) +#define LDST_CLASS_IND_CCB (0x00 << LDST_CLASS_SHIFT) +#define LDST_CLASS_1_CCB (0x01 << LDST_CLASS_SHIFT) +#define LDST_CLASS_2_CCB (0x02 << LDST_CLASS_SHIFT) +#define LDST_CLASS_DECO (0x03 << LDST_CLASS_SHIFT) + +/* Scatter-Gather Table/Variable Length Field */ +#define LDST_SGF 0x01000000 +#define LDST_VLF LDST_SGF + +/* Immediate - Key follows this command in descriptor */ +#define LDST_IMM_MASK 1 +#define LDST_IMM_SHIFT 23 +#define LDST_IMM (LDST_IMM_MASK << LDST_IMM_SHIFT) + +/* SRC/DST - Destination for LOAD, Source for STORE */ +#define LDST_SRCDST_SHIFT 16 +#define LDST_SRCDST_MASK (0x7f << LDST_SRCDST_SHIFT) + +#define LDST_SRCDST_BYTE_CONTEXT (0x20 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_BYTE_KEY (0x40 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_BYTE_INFIFO (0x7c << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_BYTE_OUTFIFO (0x7e << LDST_SRCDST_SHIFT) + +#define LDST_SRCDST_WORD_MODE_REG (0x00 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_KEYSZ_REG (0x01 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_DATASZ_REG (0x02 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_ICVSZ_REG (0x03 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_CHACTRL (0x06 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_DECOCTRL (0x06 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_IRQCTRL (0x07 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_DECO_PCLOVRD (0x07 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_CLRW (0x08 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_DECO_MATH0 (0x08 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_STAT (0x09 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_DECO_MATH1 (0x09 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_DECO_MATH2 (0x0a << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_DECO_AAD_SZ (0x0b << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_DECO_MATH3 (0x0b << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_CLASS1_ICV_SZ (0x0c << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_ALTDS_CLASS1 (0x0f << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_PKHA_A_SZ (0x10 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_PKHA_N_SZ (0x12 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_PKHA_E_SZ (0x13 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_CLASS_CTX (0x20 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_DESCBUF (0x40 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_DESCBUF_JOB (0x41 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_DESCBUF_SHARED (0x42 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_DESCBUF_JOB_WE (0x45 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_DESCBUF_SHARED_WE (0x46 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_INFO_FIFO (0x7a << LDST_SRCDST_SHIFT) + +/* Offset in source/destination */ +#define LDST_OFFSET_SHIFT 8 +#define LDST_OFFSET_MASK (0xff << LDST_OFFSET_SHIFT) + +/* LDOFF definitions used when DST = LDST_SRCDST_WORD_DECOCTRL */ +/* These could also be shifted by LDST_OFFSET_SHIFT - this reads better */ +#define LDOFF_CHG_SHARE_SHIFT 0 +#define LDOFF_CHG_SHARE_MASK (0x3 << LDOFF_CHG_SHARE_SHIFT) +#define LDOFF_CHG_SHARE_NEVER (0x1 << LDOFF_CHG_SHARE_SHIFT) +#define LDOFF_CHG_SHARE_OK_PROP (0x2 << LDOFF_CHG_SHARE_SHIFT) +#define LDOFF_CHG_SHARE_OK_NO_PROP (0x3 << LDOFF_CHG_SHARE_SHIFT) + +#define LDOFF_ENABLE_AUTO_NFIFO (1 << 2) +#define LDOFF_DISABLE_AUTO_NFIFO (1 << 3) + +#define LDOFF_CHG_NONSEQLIODN_SHIFT 4 +#define LDOFF_CHG_NONSEQLIODN_MASK (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT) +#define LDOFF_CHG_NONSEQLIODN_SEQ (0x1 << LDOFF_CHG_NONSEQLIODN_SHIFT) +#define LDOFF_CHG_NONSEQLIODN_NON_SEQ (0x2 << LDOFF_CHG_NONSEQLIODN_SHIFT) +#define LDOFF_CHG_NONSEQLIODN_TRUSTED (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT) + +#define LDOFF_CHG_SEQLIODN_SHIFT 6 +#define LDOFF_CHG_SEQLIODN_MASK (0x3 << LDOFF_CHG_SEQLIODN_SHIFT) +#define LDOFF_CHG_SEQLIODN_SEQ (0x1 << LDOFF_CHG_SEQLIODN_SHIFT) +#define LDOFF_CHG_SEQLIODN_NON_SEQ (0x2 << LDOFF_CHG_SEQLIODN_SHIFT) +#define LDOFF_CHG_SEQLIODN_TRUSTED (0x3 << LDOFF_CHG_SEQLIODN_SHIFT) + +/* Data length in bytes */ +#define LDST_LEN_SHIFT 0 +#define LDST_LEN_MASK (0xff << LDST_LEN_SHIFT) + +/* Special Length definitions when dst=deco-ctrl */ +#define LDLEN_ENABLE_OSL_COUNT (1 << 7) +#define LDLEN_RST_CHA_OFIFO_PTR (1 << 6) +#define LDLEN_RST_OFIFO (1 << 5) +#define LDLEN_SET_OFIFO_OFF_VALID (1 << 4) +#define LDLEN_SET_OFIFO_OFF_RSVD (1 << 3) +#define LDLEN_SET_OFIFO_OFFSET_SHIFT 0 +#define LDLEN_SET_OFIFO_OFFSET_MASK (3 << LDLEN_SET_OFIFO_OFFSET_SHIFT) + +/* + * FIFO_LOAD/FIFO_STORE/SEQ_FIFO_LOAD/SEQ_FIFO_STORE + * Command Constructs + */ + +/* + * Load Destination: 0 = skip (SEQ_FIFO_LOAD only), + * 1 = Load for Class1, 2 = Load for Class2, 3 = Load both + * Store Source: 0 = normal, 1 = Class1key, 2 = Class2key + */ +#define FIFOLD_CLASS_SHIFT 25 +#define FIFOLD_CLASS_MASK (0x03 << FIFOLD_CLASS_SHIFT) +#define FIFOLD_CLASS_SKIP (0x00 << FIFOLD_CLASS_SHIFT) +#define FIFOLD_CLASS_CLASS1 (0x01 << FIFOLD_CLASS_SHIFT) +#define FIFOLD_CLASS_CLASS2 (0x02 << FIFOLD_CLASS_SHIFT) +#define FIFOLD_CLASS_BOTH (0x03 << FIFOLD_CLASS_SHIFT) + +#define FIFOST_CLASS_SHIFT 25 +#define FIFOST_CLASS_MASK (0x03 << FIFOST_CLASS_SHIFT) +#define FIFOST_CLASS_NORMAL (0x00 << FIFOST_CLASS_SHIFT) +#define FIFOST_CLASS_CLASS1KEY (0x01 << FIFOST_CLASS_SHIFT) +#define FIFOST_CLASS_CLASS2KEY (0x02 << FIFOST_CLASS_SHIFT) + +/* + * Scatter-Gather Table/Variable Length Field + * If set for FIFO_LOAD, refers to a SG table. Within + * SEQ_FIFO_LOAD, is variable input sequence + */ +#define FIFOLDST_SGF_SHIFT 24 +#define FIFOLDST_SGF_MASK (1 << FIFOLDST_SGF_SHIFT) +#define FIFOLDST_VLF_MASK (1 << FIFOLDST_SGF_SHIFT) +#define FIFOLDST_SGF (1 << FIFOLDST_SGF_SHIFT) +#define FIFOLDST_VLF (1 << FIFOLDST_SGF_SHIFT) + +/* Immediate - Data follows command in descriptor */ +#define FIFOLD_IMM_SHIFT 23 +#define FIFOLD_IMM_MASK (1 << FIFOLD_IMM_SHIFT) +#define FIFOLD_IMM (1 << FIFOLD_IMM_SHIFT) + +/* Continue - Not the last FIFO store to come */ +#define FIFOST_CONT_SHIFT 23 +#define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT) + +/* + * Extended Length - use 32-bit extended length that + * follows the pointer field. Illegal with IMM set + */ +#define FIFOLDST_EXT_SHIFT 22 +#define FIFOLDST_EXT_MASK (1 << FIFOLDST_EXT_SHIFT) +#define FIFOLDST_EXT (1 << FIFOLDST_EXT_SHIFT) + +/* Input data type.*/ +#define FIFOLD_TYPE_SHIFT 16 +#define FIFOLD_CONT_TYPE_SHIFT 19 /* shift past last-flush bits */ +#define FIFOLD_TYPE_MASK (0x3f << FIFOLD_TYPE_SHIFT) + +/* PK types */ +#define FIFOLD_TYPE_PK (0x00 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_PK_MASK (0x30 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_PK_TYPEMASK (0x0f << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_PK_A0 (0x00 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_PK_A1 (0x01 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_PK_A2 (0x02 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_PK_A3 (0x03 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_PK_B0 (0x04 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_PK_B1 (0x05 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_PK_B2 (0x06 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_PK_B3 (0x07 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_PK_N (0x08 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_PK_A (0x0c << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_PK_B (0x0d << FIFOLD_TYPE_SHIFT) + +/* Other types. Need to OR in last/flush bits as desired */ +#define FIFOLD_TYPE_MSG_MASK (0x38 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_MSG (0x10 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_MSG1OUT2 (0x18 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_IV (0x20 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_BITDATA (0x28 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_AAD (0x30 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_ICV (0x38 << FIFOLD_TYPE_SHIFT) + +/* Last/Flush bits for use with "other" types above */ +#define FIFOLD_TYPE_ACT_MASK (0x07 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_NOACTION (0x00 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_FLUSH1 (0x01 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_LAST1 (0x02 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_LAST2FLUSH (0x03 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_LAST2 (0x04 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_LAST2FLUSH1 (0x05 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_LASTBOTH (0x06 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_LASTBOTHFL (0x07 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_NOINFOFIFO (0x0F << FIFOLD_TYPE_SHIFT) + +#define FIFOLDST_LEN_MASK 0xffff +#define FIFOLDST_EXT_LEN_MASK 0xffffffff + +/* Output data types */ +#define FIFOST_TYPE_SHIFT 16 +#define FIFOST_TYPE_MASK (0x3f << FIFOST_TYPE_SHIFT) + +#define FIFOST_TYPE_PKHA_A0 (0x00 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_PKHA_A1 (0x01 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_PKHA_A2 (0x02 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_PKHA_A3 (0x03 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_PKHA_B0 (0x04 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_PKHA_B1 (0x05 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_PKHA_B2 (0x06 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_PKHA_B3 (0x07 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_AF_SBOX_JKEK (0x10 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_PKHA_E_TKEK (0x23 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_KEY_KEK (0x24 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_KEY_TKEK (0x25 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_SPLIT_KEK (0x26 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_SPLIT_TKEK (0x27 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_OUTFIFO_KEK (0x28 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_OUTFIFO_TKEK (0x29 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT) + +/* + * OPERATION Command Constructs + */ + +/* Operation type selectors - OP TYPE */ +#define OP_TYPE_SHIFT 24 +#define OP_TYPE_MASK (0x07 << OP_TYPE_SHIFT) + +#define OP_TYPE_UNI_PROTOCOL (0x00 << OP_TYPE_SHIFT) +#define OP_TYPE_PK (0x01 << OP_TYPE_SHIFT) +#define OP_TYPE_CLASS1_ALG (0x02 << OP_TYPE_SHIFT) +#define OP_TYPE_CLASS2_ALG (0x04 << OP_TYPE_SHIFT) +#define OP_TYPE_DECAP_PROTOCOL (0x06 << OP_TYPE_SHIFT) +#define OP_TYPE_ENCAP_PROTOCOL (0x07 << OP_TYPE_SHIFT) + +/* ProtocolID selectors - PROTID */ +#define OP_PCLID_SHIFT 16 +#define OP_PCLID_MASK (0xff << 16) + +/* Assuming OP_TYPE = OP_TYPE_UNI_PROTOCOL */ +#define OP_PCLID_IKEV1_PRF (0x01 << OP_PCLID_SHIFT) +#define OP_PCLID_IKEV2_PRF (0x02 << OP_PCLID_SHIFT) +#define OP_PCLID_SSL30_PRF (0x08 << OP_PCLID_SHIFT) +#define OP_PCLID_TLS10_PRF (0x09 << OP_PCLID_SHIFT) +#define OP_PCLID_TLS11_PRF (0x0a << OP_PCLID_SHIFT) +#define OP_PCLID_DTLS10_PRF (0x0c << OP_PCLID_SHIFT) +#define OP_PCLID_PRF (0x06 << OP_PCLID_SHIFT) +#define OP_PCLID_BLOB (0x0d << OP_PCLID_SHIFT) +#define OP_PCLID_SECRETKEY (0x11 << OP_PCLID_SHIFT) +#define OP_PCLID_PUBLICKEYPAIR (0x14 << OP_PCLID_SHIFT) +#define OP_PCLID_DSASIGN (0x15 << OP_PCLID_SHIFT) +#define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT) + +/* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */ +#define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT) +#define OP_PCLID_SRTP (0x02 << OP_PCLID_SHIFT) +#define OP_PCLID_MACSEC (0x03 << OP_PCLID_SHIFT) +#define OP_PCLID_WIFI (0x04 << OP_PCLID_SHIFT) +#define OP_PCLID_WIMAX (0x05 << OP_PCLID_SHIFT) +#define OP_PCLID_SSL30 (0x08 << OP_PCLID_SHIFT) +#define OP_PCLID_TLS10 (0x09 << OP_PCLID_SHIFT) +#define OP_PCLID_TLS11 (0x0a << OP_PCLID_SHIFT) +#define OP_PCLID_TLS12 (0x0b << OP_PCLID_SHIFT) +#define OP_PCLID_DTLS (0x0c << OP_PCLID_SHIFT) + +/* + * ProtocolInfo selectors + */ +#define OP_PCLINFO_MASK 0xffff + +/* for OP_PCLID_IPSEC */ +#define OP_PCL_IPSEC_CIPHER_MASK 0xff00 +#define OP_PCL_IPSEC_AUTH_MASK 0x00ff + +#define OP_PCL_IPSEC_DES_IV64 0x0100 +#define OP_PCL_IPSEC_DES 0x0200 +#define OP_PCL_IPSEC_3DES 0x0300 +#define OP_PCL_IPSEC_AES_CBC 0x0c00 +#define OP_PCL_IPSEC_AES_CTR 0x0d00 +#define OP_PCL_IPSEC_AES_XTS 0x1600 +#define OP_PCL_IPSEC_AES_CCM8 0x0e00 +#define OP_PCL_IPSEC_AES_CCM12 0x0f00 +#define OP_PCL_IPSEC_AES_CCM16 0x1000 +#define OP_PCL_IPSEC_AES_GCM8 0x1200 +#define OP_PCL_IPSEC_AES_GCM12 0x1300 +#define OP_PCL_IPSEC_AES_GCM16 0x1400 + +#define OP_PCL_IPSEC_HMAC_NULL 0x0000 +#define OP_PCL_IPSEC_HMAC_MD5_96 0x0001 +#define OP_PCL_IPSEC_HMAC_SHA1_96 0x0002 +#define OP_PCL_IPSEC_AES_XCBC_MAC_96 0x0005 +#define OP_PCL_IPSEC_HMAC_MD5_128 0x0006 +#define OP_PCL_IPSEC_HMAC_SHA1_160 0x0007 +#define OP_PCL_IPSEC_HMAC_SHA2_256_128 0x000c +#define OP_PCL_IPSEC_HMAC_SHA2_384_192 0x000d +#define OP_PCL_IPSEC_HMAC_SHA2_512_256 0x000e + +/* For SRTP - OP_PCLID_SRTP */ +#define OP_PCL_SRTP_CIPHER_MASK 0xff00 +#define OP_PCL_SRTP_AUTH_MASK 0x00ff + +#define OP_PCL_SRTP_AES_CTR 0x0d00 + +#define OP_PCL_SRTP_HMAC_SHA1_160 0x0007 + +/* For SSL 3.0 - OP_PCLID_SSL30 */ +#define OP_PCL_SSL30_AES_128_CBC_SHA 0x002f +#define OP_PCL_SSL30_AES_128_CBC_SHA_2 0x0030 +#define OP_PCL_SSL30_AES_128_CBC_SHA_3 0x0031 +#define OP_PCL_SSL30_AES_128_CBC_SHA_4 0x0032 +#define OP_PCL_SSL30_AES_128_CBC_SHA_5 0x0033 +#define OP_PCL_SSL30_AES_128_CBC_SHA_6 0x0034 +#define OP_PCL_SSL30_AES_128_CBC_SHA_7 0x008c +#define OP_PCL_SSL30_AES_128_CBC_SHA_8 0x0090 +#define OP_PCL_SSL30_AES_128_CBC_SHA_9 0x0094 +#define OP_PCL_SSL30_AES_128_CBC_SHA_10 0xc004 +#define OP_PCL_SSL30_AES_128_CBC_SHA_11 0xc009 +#define OP_PCL_SSL30_AES_128_CBC_SHA_12 0xc00e +#define OP_PCL_SSL30_AES_128_CBC_SHA_13 0xc013 +#define OP_PCL_SSL30_AES_128_CBC_SHA_14 0xc018 +#define OP_PCL_SSL30_AES_128_CBC_SHA_15 0xc01d +#define OP_PCL_SSL30_AES_128_CBC_SHA_16 0xc01e +#define OP_PCL_SSL30_AES_128_CBC_SHA_17 0xc01f + +#define OP_PCL_SSL30_AES_256_CBC_SHA 0x0035 +#define OP_PCL_SSL30_AES_256_CBC_SHA_2 0x0036 +#define OP_PCL_SSL30_AES_256_CBC_SHA_3 0x0037 +#define OP_PCL_SSL30_AES_256_CBC_SHA_4 0x0038 +#define OP_PCL_SSL30_AES_256_CBC_SHA_5 0x0039 +#define OP_PCL_SSL30_AES_256_CBC_SHA_6 0x003a +#define OP_PCL_SSL30_AES_256_CBC_SHA_7 0x008d +#define OP_PCL_SSL30_AES_256_CBC_SHA_8 0x0091 +#define OP_PCL_SSL30_AES_256_CBC_SHA_9 0x0095 +#define OP_PCL_SSL30_AES_256_CBC_SHA_10 0xc005 +#define OP_PCL_SSL30_AES_256_CBC_SHA_11 0xc00a +#define OP_PCL_SSL30_AES_256_CBC_SHA_12 0xc00f +#define OP_PCL_SSL30_AES_256_CBC_SHA_13 0xc014 +#define OP_PCL_SSL30_AES_256_CBC_SHA_14 0xc019 +#define OP_PCL_SSL30_AES_256_CBC_SHA_15 0xc020 +#define OP_PCL_SSL30_AES_256_CBC_SHA_16 0xc021 +#define OP_PCL_SSL30_AES_256_CBC_SHA_17 0xc022 + +#define OP_PCL_SSL30_3DES_EDE_CBC_MD5 0x0023 + +#define OP_PCL_SSL30_3DES_EDE_CBC_SHA 0x001f +#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_2 0x008b +#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_3 0x008f +#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_4 0x0093 +#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_5 0x000a +#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_6 0x000d +#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_7 0x0010 +#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_8 0x0013 +#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_9 0x0016 +#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_10 0x001b +#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_11 0xc003 +#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_12 0xc008 +#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_13 0xc00d +#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_14 0xc012 +#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_15 0xc017 +#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_16 0xc01a +#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_17 0xc01b +#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_18 0xc01c + +#define OP_PCL_SSL30_DES40_CBC_MD5 0x0029 + +#define OP_PCL_SSL30_DES_CBC_MD5 0x0022 + +#define OP_PCL_SSL30_DES40_CBC_SHA 0x0008 +#define OP_PCL_SSL30_DES40_CBC_SHA_2 0x000b +#define OP_PCL_SSL30_DES40_CBC_SHA_3 0x000e +#define OP_PCL_SSL30_DES40_CBC_SHA_4 0x0011 +#define OP_PCL_SSL30_DES40_CBC_SHA_5 0x0014 +#define OP_PCL_SSL30_DES40_CBC_SHA_6 0x0019 +#define OP_PCL_SSL30_DES40_CBC_SHA_7 0x0026 + +#define OP_PCL_SSL30_DES_CBC_SHA 0x001e +#define OP_PCL_SSL30_DES_CBC_SHA_2 0x0009 +#define OP_PCL_SSL30_DES_CBC_SHA_3 0x000c +#define OP_PCL_SSL30_DES_CBC_SHA_4 0x000f +#define OP_PCL_SSL30_DES_CBC_SHA_5 0x0012 +#define OP_PCL_SSL30_DES_CBC_SHA_6 0x0015 +#define OP_PCL_SSL30_DES_CBC_SHA_7 0x001a + +#define OP_PCL_SSL30_RC4_128_MD5 0x0024 +#define OP_PCL_SSL30_RC4_128_MD5_2 0x0004 +#define OP_PCL_SSL30_RC4_128_MD5_3 0x0018 + +#define OP_PCL_SSL30_RC4_40_MD5 0x002b +#define OP_PCL_SSL30_RC4_40_MD5_2 0x0003 +#define OP_PCL_SSL30_RC4_40_MD5_3 0x0017 + +#define OP_PCL_SSL30_RC4_128_SHA 0x0020 +#define OP_PCL_SSL30_RC4_128_SHA_2 0x008a +#define OP_PCL_SSL30_RC4_128_SHA_3 0x008e +#define OP_PCL_SSL30_RC4_128_SHA_4 0x0092 +#define OP_PCL_SSL30_RC4_128_SHA_5 0x0005 +#define OP_PCL_SSL30_RC4_128_SHA_6 0xc002 +#define OP_PCL_SSL30_RC4_128_SHA_7 0xc007 +#define OP_PCL_SSL30_RC4_128_SHA_8 0xc00c +#define OP_PCL_SSL30_RC4_128_SHA_9 0xc011 +#define OP_PCL_SSL30_RC4_128_SHA_10 0xc016 + +#define OP_PCL_SSL30_RC4_40_SHA 0x0028 + + +/* For TLS 1.0 - OP_PCLID_TLS10 */ +#define OP_PCL_TLS10_AES_128_CBC_SHA 0x002f +#define OP_PCL_TLS10_AES_128_CBC_SHA_2 0x0030 +#define OP_PCL_TLS10_AES_128_CBC_SHA_3 0x0031 +#define OP_PCL_TLS10_AES_128_CBC_SHA_4 0x0032 +#define OP_PCL_TLS10_AES_128_CBC_SHA_5 0x0033 +#define OP_PCL_TLS10_AES_128_CBC_SHA_6 0x0034 +#define OP_PCL_TLS10_AES_128_CBC_SHA_7 0x008c +#define OP_PCL_TLS10_AES_128_CBC_SHA_8 0x0090 +#define OP_PCL_TLS10_AES_128_CBC_SHA_9 0x0094 +#define OP_PCL_TLS10_AES_128_CBC_SHA_10 0xc004 +#define OP_PCL_TLS10_AES_128_CBC_SHA_11 0xc009 +#define OP_PCL_TLS10_AES_128_CBC_SHA_12 0xc00e +#define OP_PCL_TLS10_AES_128_CBC_SHA_13 0xc013 +#define OP_PCL_TLS10_AES_128_CBC_SHA_14 0xc018 +#define OP_PCL_TLS10_AES_128_CBC_SHA_15 0xc01d +#define OP_PCL_TLS10_AES_128_CBC_SHA_16 0xc01e +#define OP_PCL_TLS10_AES_128_CBC_SHA_17 0xc01f + +#define OP_PCL_TLS10_AES_256_CBC_SHA 0x0035 +#define OP_PCL_TLS10_AES_256_CBC_SHA_2 0x0036 +#define OP_PCL_TLS10_AES_256_CBC_SHA_3 0x0037 +#define OP_PCL_TLS10_AES_256_CBC_SHA_4 0x0038 +#define OP_PCL_TLS10_AES_256_CBC_SHA_5 0x0039 +#define OP_PCL_TLS10_AES_256_CBC_SHA_6 0x003a +#define OP_PCL_TLS10_AES_256_CBC_SHA_7 0x008d +#define OP_PCL_TLS10_AES_256_CBC_SHA_8 0x0091 +#define OP_PCL_TLS10_AES_256_CBC_SHA_9 0x0095 +#define OP_PCL_TLS10_AES_256_CBC_SHA_10 0xc005 +#define OP_PCL_TLS10_AES_256_CBC_SHA_11 0xc00a +#define OP_PCL_TLS10_AES_256_CBC_SHA_12 0xc00f +#define OP_PCL_TLS10_AES_256_CBC_SHA_13 0xc014 +#define OP_PCL_TLS10_AES_256_CBC_SHA_14 0xc019 +#define OP_PCL_TLS10_AES_256_CBC_SHA_15 0xc020 +#define OP_PCL_TLS10_AES_256_CBC_SHA_16 0xc021 +#define OP_PCL_TLS10_AES_256_CBC_SHA_17 0xc022 + +/* #define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0x0023 */ + +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA 0x001f +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_2 0x008b +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_3 0x008f +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_4 0x0093 +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_5 0x000a +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_6 0x000d +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_7 0x0010 +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_8 0x0013 +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_9 0x0016 +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_10 0x001b +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_11 0xc003 +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_12 0xc008 +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_13 0xc00d +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_14 0xc012 +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_15 0xc017 +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_16 0xc01a +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_17 0xc01b +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_18 0xc01c + +#define OP_PCL_TLS10_DES40_CBC_MD5 0x0029 + +#define OP_PCL_TLS10_DES_CBC_MD5 0x0022 + +#define OP_PCL_TLS10_DES40_CBC_SHA 0x0008 +#define OP_PCL_TLS10_DES40_CBC_SHA_2 0x000b +#define OP_PCL_TLS10_DES40_CBC_SHA_3 0x000e +#define OP_PCL_TLS10_DES40_CBC_SHA_4 0x0011 +#define OP_PCL_TLS10_DES40_CBC_SHA_5 0x0014 +#define OP_PCL_TLS10_DES40_CBC_SHA_6 0x0019 +#define OP_PCL_TLS10_DES40_CBC_SHA_7 0x0026 + + +#define OP_PCL_TLS10_DES_CBC_SHA 0x001e +#define OP_PCL_TLS10_DES_CBC_SHA_2 0x0009 +#define OP_PCL_TLS10_DES_CBC_SHA_3 0x000c +#define OP_PCL_TLS10_DES_CBC_SHA_4 0x000f +#define OP_PCL_TLS10_DES_CBC_SHA_5 0x0012 +#define OP_PCL_TLS10_DES_CBC_SHA_6 0x0015 +#define OP_PCL_TLS10_DES_CBC_SHA_7 0x001a + +#define OP_PCL_TLS10_RC4_128_MD5 0x0024 +#define OP_PCL_TLS10_RC4_128_MD5_2 0x0004 +#define OP_PCL_TLS10_RC4_128_MD5_3 0x0018 + +#define OP_PCL_TLS10_RC4_40_MD5 0x002b +#define OP_PCL_TLS10_RC4_40_MD5_2 0x0003 +#define OP_PCL_TLS10_RC4_40_MD5_3 0x0017 + +#define OP_PCL_TLS10_RC4_128_SHA 0x0020 +#define OP_PCL_TLS10_RC4_128_SHA_2 0x008a +#define OP_PCL_TLS10_RC4_128_SHA_3 0x008e +#define OP_PCL_TLS10_RC4_128_SHA_4 0x0092 +#define OP_PCL_TLS10_RC4_128_SHA_5 0x0005 +#define OP_PCL_TLS10_RC4_128_SHA_6 0xc002 +#define OP_PCL_TLS10_RC4_128_SHA_7 0xc007 +#define OP_PCL_TLS10_RC4_128_SHA_8 0xc00c +#define OP_PCL_TLS10_RC4_128_SHA_9 0xc011 +#define OP_PCL_TLS10_RC4_128_SHA_10 0xc016 + +#define OP_PCL_TLS10_RC4_40_SHA 0x0028 + +#define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0xff23 +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA160 0xff30 +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA224 0xff34 +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA256 0xff36 +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA384 0xff33 +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA512 0xff35 +#define OP_PCL_TLS10_AES_128_CBC_SHA160 0xff80 +#define OP_PCL_TLS10_AES_128_CBC_SHA224 0xff84 +#define OP_PCL_TLS10_AES_128_CBC_SHA256 0xff86 +#define OP_PCL_TLS10_AES_128_CBC_SHA384 0xff83 +#define OP_PCL_TLS10_AES_128_CBC_SHA512 0xff85 +#define OP_PCL_TLS10_AES_192_CBC_SHA160 0xff20 +#define OP_PCL_TLS10_AES_192_CBC_SHA224 0xff24 +#define OP_PCL_TLS10_AES_192_CBC_SHA256 0xff26 +#define OP_PCL_TLS10_AES_192_CBC_SHA384 0xff23 +#define OP_PCL_TLS10_AES_192_CBC_SHA512 0xff25 +#define OP_PCL_TLS10_AES_256_CBC_SHA160 0xff60 +#define OP_PCL_TLS10_AES_256_CBC_SHA224 0xff64 +#define OP_PCL_TLS10_AES_256_CBC_SHA256 0xff66 +#define OP_PCL_TLS10_AES_256_CBC_SHA384 0xff63 +#define OP_PCL_TLS10_AES_256_CBC_SHA512 0xff65 + + + +/* For TLS 1.1 - OP_PCLID_TLS11 */ +#define OP_PCL_TLS11_AES_128_CBC_SHA 0x002f +#define OP_PCL_TLS11_AES_128_CBC_SHA_2 0x0030 +#define OP_PCL_TLS11_AES_128_CBC_SHA_3 0x0031 +#define OP_PCL_TLS11_AES_128_CBC_SHA_4 0x0032 +#define OP_PCL_TLS11_AES_128_CBC_SHA_5 0x0033 +#define OP_PCL_TLS11_AES_128_CBC_SHA_6 0x0034 +#define OP_PCL_TLS11_AES_128_CBC_SHA_7 0x008c +#define OP_PCL_TLS11_AES_128_CBC_SHA_8 0x0090 +#define OP_PCL_TLS11_AES_128_CBC_SHA_9 0x0094 +#define OP_PCL_TLS11_AES_128_CBC_SHA_10 0xc004 +#define OP_PCL_TLS11_AES_128_CBC_SHA_11 0xc009 +#define OP_PCL_TLS11_AES_128_CBC_SHA_12 0xc00e +#define OP_PCL_TLS11_AES_128_CBC_SHA_13 0xc013 +#define OP_PCL_TLS11_AES_128_CBC_SHA_14 0xc018 +#define OP_PCL_TLS11_AES_128_CBC_SHA_15 0xc01d +#define OP_PCL_TLS11_AES_128_CBC_SHA_16 0xc01e +#define OP_PCL_TLS11_AES_128_CBC_SHA_17 0xc01f + +#define OP_PCL_TLS11_AES_256_CBC_SHA 0x0035 +#define OP_PCL_TLS11_AES_256_CBC_SHA_2 0x0036 +#define OP_PCL_TLS11_AES_256_CBC_SHA_3 0x0037 +#define OP_PCL_TLS11_AES_256_CBC_SHA_4 0x0038 +#define OP_PCL_TLS11_AES_256_CBC_SHA_5 0x0039 +#define OP_PCL_TLS11_AES_256_CBC_SHA_6 0x003a +#define OP_PCL_TLS11_AES_256_CBC_SHA_7 0x008d +#define OP_PCL_TLS11_AES_256_CBC_SHA_8 0x0091 +#define OP_PCL_TLS11_AES_256_CBC_SHA_9 0x0095 +#define OP_PCL_TLS11_AES_256_CBC_SHA_10 0xc005 +#define OP_PCL_TLS11_AES_256_CBC_SHA_11 0xc00a +#define OP_PCL_TLS11_AES_256_CBC_SHA_12 0xc00f +#define OP_PCL_TLS11_AES_256_CBC_SHA_13 0xc014 +#define OP_PCL_TLS11_AES_256_CBC_SHA_14 0xc019 +#define OP_PCL_TLS11_AES_256_CBC_SHA_15 0xc020 +#define OP_PCL_TLS11_AES_256_CBC_SHA_16 0xc021 +#define OP_PCL_TLS11_AES_256_CBC_SHA_17 0xc022 + +/* #define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0x0023 */ + +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA 0x001f +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_2 0x008b +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_3 0x008f +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_4 0x0093 +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_5 0x000a +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_6 0x000d +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_7 0x0010 +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_8 0x0013 +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_9 0x0016 +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_10 0x001b +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_11 0xc003 +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_12 0xc008 +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_13 0xc00d +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_14 0xc012 +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_15 0xc017 +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_16 0xc01a +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_17 0xc01b +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_18 0xc01c + +#define OP_PCL_TLS11_DES40_CBC_MD5 0x0029 + +#define OP_PCL_TLS11_DES_CBC_MD5 0x0022 + +#define OP_PCL_TLS11_DES40_CBC_SHA 0x0008 +#define OP_PCL_TLS11_DES40_CBC_SHA_2 0x000b +#define OP_PCL_TLS11_DES40_CBC_SHA_3 0x000e +#define OP_PCL_TLS11_DES40_CBC_SHA_4 0x0011 +#define OP_PCL_TLS11_DES40_CBC_SHA_5 0x0014 +#define OP_PCL_TLS11_DES40_CBC_SHA_6 0x0019 +#define OP_PCL_TLS11_DES40_CBC_SHA_7 0x0026 + +#define OP_PCL_TLS11_DES_CBC_SHA 0x001e +#define OP_PCL_TLS11_DES_CBC_SHA_2 0x0009 +#define OP_PCL_TLS11_DES_CBC_SHA_3 0x000c +#define OP_PCL_TLS11_DES_CBC_SHA_4 0x000f +#define OP_PCL_TLS11_DES_CBC_SHA_5 0x0012 +#define OP_PCL_TLS11_DES_CBC_SHA_6 0x0015 +#define OP_PCL_TLS11_DES_CBC_SHA_7 0x001a + +#define OP_PCL_TLS11_RC4_128_MD5 0x0024 +#define OP_PCL_TLS11_RC4_128_MD5_2 0x0004 +#define OP_PCL_TLS11_RC4_128_MD5_3 0x0018 + +#define OP_PCL_TLS11_RC4_40_MD5 0x002b +#define OP_PCL_TLS11_RC4_40_MD5_2 0x0003 +#define OP_PCL_TLS11_RC4_40_MD5_3 0x0017 + +#define OP_PCL_TLS11_RC4_128_SHA 0x0020 +#define OP_PCL_TLS11_RC4_128_SHA_2 0x008a +#define OP_PCL_TLS11_RC4_128_SHA_3 0x008e +#define OP_PCL_TLS11_RC4_128_SHA_4 0x0092 +#define OP_PCL_TLS11_RC4_128_SHA_5 0x0005 +#define OP_PCL_TLS11_RC4_128_SHA_6 0xc002 +#define OP_PCL_TLS11_RC4_128_SHA_7 0xc007 +#define OP_PCL_TLS11_RC4_128_SHA_8 0xc00c +#define OP_PCL_TLS11_RC4_128_SHA_9 0xc011 +#define OP_PCL_TLS11_RC4_128_SHA_10 0xc016 + +#define OP_PCL_TLS11_RC4_40_SHA 0x0028 + +#define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0xff23 +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA160 0xff30 +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA224 0xff34 +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA256 0xff36 +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA384 0xff33 +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA512 0xff35 +#define OP_PCL_TLS11_AES_128_CBC_SHA160 0xff80 +#define OP_PCL_TLS11_AES_128_CBC_SHA224 0xff84 +#define OP_PCL_TLS11_AES_128_CBC_SHA256 0xff86 +#define OP_PCL_TLS11_AES_128_CBC_SHA384 0xff83 +#define OP_PCL_TLS11_AES_128_CBC_SHA512 0xff85 +#define OP_PCL_TLS11_AES_192_CBC_SHA160 0xff20 +#define OP_PCL_TLS11_AES_192_CBC_SHA224 0xff24 +#define OP_PCL_TLS11_AES_192_CBC_SHA256 0xff26 +#define OP_PCL_TLS11_AES_192_CBC_SHA384 0xff23 +#define OP_PCL_TLS11_AES_192_CBC_SHA512 0xff25 +#define OP_PCL_TLS11_AES_256_CBC_SHA160 0xff60 +#define OP_PCL_TLS11_AES_256_CBC_SHA224 0xff64 +#define OP_PCL_TLS11_AES_256_CBC_SHA256 0xff66 +#define OP_PCL_TLS11_AES_256_CBC_SHA384 0xff63 +#define OP_PCL_TLS11_AES_256_CBC_SHA512 0xff65 + + +/* For TLS 1.2 - OP_PCLID_TLS12 */ +#define OP_PCL_TLS12_AES_128_CBC_SHA 0x002f +#define OP_PCL_TLS12_AES_128_CBC_SHA_2 0x0030 +#define OP_PCL_TLS12_AES_128_CBC_SHA_3 0x0031 +#define OP_PCL_TLS12_AES_128_CBC_SHA_4 0x0032 +#define OP_PCL_TLS12_AES_128_CBC_SHA_5 0x0033 +#define OP_PCL_TLS12_AES_128_CBC_SHA_6 0x0034 +#define OP_PCL_TLS12_AES_128_CBC_SHA_7 0x008c +#define OP_PCL_TLS12_AES_128_CBC_SHA_8 0x0090 +#define OP_PCL_TLS12_AES_128_CBC_SHA_9 0x0094 +#define OP_PCL_TLS12_AES_128_CBC_SHA_10 0xc004 +#define OP_PCL_TLS12_AES_128_CBC_SHA_11 0xc009 +#define OP_PCL_TLS12_AES_128_CBC_SHA_12 0xc00e +#define OP_PCL_TLS12_AES_128_CBC_SHA_13 0xc013 +#define OP_PCL_TLS12_AES_128_CBC_SHA_14 0xc018 +#define OP_PCL_TLS12_AES_128_CBC_SHA_15 0xc01d +#define OP_PCL_TLS12_AES_128_CBC_SHA_16 0xc01e +#define OP_PCL_TLS12_AES_128_CBC_SHA_17 0xc01f + +#define OP_PCL_TLS12_AES_256_CBC_SHA 0x0035 +#define OP_PCL_TLS12_AES_256_CBC_SHA_2 0x0036 +#define OP_PCL_TLS12_AES_256_CBC_SHA_3 0x0037 +#define OP_PCL_TLS12_AES_256_CBC_SHA_4 0x0038 +#define OP_PCL_TLS12_AES_256_CBC_SHA_5 0x0039 +#define OP_PCL_TLS12_AES_256_CBC_SHA_6 0x003a +#define OP_PCL_TLS12_AES_256_CBC_SHA_7 0x008d +#define OP_PCL_TLS12_AES_256_CBC_SHA_8 0x0091 +#define OP_PCL_TLS12_AES_256_CBC_SHA_9 0x0095 +#define OP_PCL_TLS12_AES_256_CBC_SHA_10 0xc005 +#define OP_PCL_TLS12_AES_256_CBC_SHA_11 0xc00a +#define OP_PCL_TLS12_AES_256_CBC_SHA_12 0xc00f +#define OP_PCL_TLS12_AES_256_CBC_SHA_13 0xc014 +#define OP_PCL_TLS12_AES_256_CBC_SHA_14 0xc019 +#define OP_PCL_TLS12_AES_256_CBC_SHA_15 0xc020 +#define OP_PCL_TLS12_AES_256_CBC_SHA_16 0xc021 +#define OP_PCL_TLS12_AES_256_CBC_SHA_17 0xc022 + +/* #define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0x0023 */ + +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA 0x001f +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_2 0x008b +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_3 0x008f +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_4 0x0093 +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_5 0x000a +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_6 0x000d +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_7 0x0010 +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_8 0x0013 +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_9 0x0016 +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_10 0x001b +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_11 0xc003 +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_12 0xc008 +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_13 0xc00d +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_14 0xc012 +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_15 0xc017 +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_16 0xc01a +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_17 0xc01b +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_18 0xc01c + +#define OP_PCL_TLS12_DES40_CBC_MD5 0x0029 + +#define OP_PCL_TLS12_DES_CBC_MD5 0x0022 + +#define OP_PCL_TLS12_DES40_CBC_SHA 0x0008 +#define OP_PCL_TLS12_DES40_CBC_SHA_2 0x000b +#define OP_PCL_TLS12_DES40_CBC_SHA_3 0x000e +#define OP_PCL_TLS12_DES40_CBC_SHA_4 0x0011 +#define OP_PCL_TLS12_DES40_CBC_SHA_5 0x0014 +#define OP_PCL_TLS12_DES40_CBC_SHA_6 0x0019 +#define OP_PCL_TLS12_DES40_CBC_SHA_7 0x0026 + +#define OP_PCL_TLS12_DES_CBC_SHA 0x001e +#define OP_PCL_TLS12_DES_CBC_SHA_2 0x0009 +#define OP_PCL_TLS12_DES_CBC_SHA_3 0x000c +#define OP_PCL_TLS12_DES_CBC_SHA_4 0x000f +#define OP_PCL_TLS12_DES_CBC_SHA_5 0x0012 +#define OP_PCL_TLS12_DES_CBC_SHA_6 0x0015 +#define OP_PCL_TLS12_DES_CBC_SHA_7 0x001a + +#define OP_PCL_TLS12_RC4_128_MD5 0x0024 +#define OP_PCL_TLS12_RC4_128_MD5_2 0x0004 +#define OP_PCL_TLS12_RC4_128_MD5_3 0x0018 + +#define OP_PCL_TLS12_RC4_40_MD5 0x002b +#define OP_PCL_TLS12_RC4_40_MD5_2 0x0003 +#define OP_PCL_TLS12_RC4_40_MD5_3 0x0017 + +#define OP_PCL_TLS12_RC4_128_SHA 0x0020 +#define OP_PCL_TLS12_RC4_128_SHA_2 0x008a +#define OP_PCL_TLS12_RC4_128_SHA_3 0x008e +#define OP_PCL_TLS12_RC4_128_SHA_4 0x0092 +#define OP_PCL_TLS12_RC4_128_SHA_5 0x0005 +#define OP_PCL_TLS12_RC4_128_SHA_6 0xc002 +#define OP_PCL_TLS12_RC4_128_SHA_7 0xc007 +#define OP_PCL_TLS12_RC4_128_SHA_8 0xc00c +#define OP_PCL_TLS12_RC4_128_SHA_9 0xc011 +#define OP_PCL_TLS12_RC4_128_SHA_10 0xc016 + +#define OP_PCL_TLS12_RC4_40_SHA 0x0028 + +/* #define OP_PCL_TLS12_AES_128_CBC_SHA256 0x003c */ +#define OP_PCL_TLS12_AES_128_CBC_SHA256_2 0x003e +#define OP_PCL_TLS12_AES_128_CBC_SHA256_3 0x003f +#define OP_PCL_TLS12_AES_128_CBC_SHA256_4 0x0040 +#define OP_PCL_TLS12_AES_128_CBC_SHA256_5 0x0067 +#define OP_PCL_TLS12_AES_128_CBC_SHA256_6 0x006c + +/* #define OP_PCL_TLS12_AES_256_CBC_SHA256 0x003d */ +#define OP_PCL_TLS12_AES_256_CBC_SHA256_2 0x0068 +#define OP_PCL_TLS12_AES_256_CBC_SHA256_3 0x0069 +#define OP_PCL_TLS12_AES_256_CBC_SHA256_4 0x006a +#define OP_PCL_TLS12_AES_256_CBC_SHA256_5 0x006b +#define OP_PCL_TLS12_AES_256_CBC_SHA256_6 0x006d + +/* AEAD_AES_xxx_CCM/GCM remain to be defined... */ + +#define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0xff23 +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA160 0xff30 +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA224 0xff34 +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA256 0xff36 +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA384 0xff33 +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA512 0xff35 +#define OP_PCL_TLS12_AES_128_CBC_SHA160 0xff80 +#define OP_PCL_TLS12_AES_128_CBC_SHA224 0xff84 +#define OP_PCL_TLS12_AES_128_CBC_SHA256 0xff86 +#define OP_PCL_TLS12_AES_128_CBC_SHA384 0xff83 +#define OP_PCL_TLS12_AES_128_CBC_SHA512 0xff85 +#define OP_PCL_TLS12_AES_192_CBC_SHA160 0xff20 +#define OP_PCL_TLS12_AES_192_CBC_SHA224 0xff24 +#define OP_PCL_TLS12_AES_192_CBC_SHA256 0xff26 +#define OP_PCL_TLS12_AES_192_CBC_SHA384 0xff23 +#define OP_PCL_TLS12_AES_192_CBC_SHA512 0xff25 +#define OP_PCL_TLS12_AES_256_CBC_SHA160 0xff60 +#define OP_PCL_TLS12_AES_256_CBC_SHA224 0xff64 +#define OP_PCL_TLS12_AES_256_CBC_SHA256 0xff66 +#define OP_PCL_TLS12_AES_256_CBC_SHA384 0xff63 +#define OP_PCL_TLS12_AES_256_CBC_SHA512 0xff65 + +/* For DTLS - OP_PCLID_DTLS */ + +#define OP_PCL_DTLS_AES_128_CBC_SHA 0x002f +#define OP_PCL_DTLS_AES_128_CBC_SHA_2 0x0030 +#define OP_PCL_DTLS_AES_128_CBC_SHA_3 0x0031 +#define OP_PCL_DTLS_AES_128_CBC_SHA_4 0x0032 +#define OP_PCL_DTLS_AES_128_CBC_SHA_5 0x0033 +#define OP_PCL_DTLS_AES_128_CBC_SHA_6 0x0034 +#define OP_PCL_DTLS_AES_128_CBC_SHA_7 0x008c +#define OP_PCL_DTLS_AES_128_CBC_SHA_8 0x0090 +#define OP_PCL_DTLS_AES_128_CBC_SHA_9 0x0094 +#define OP_PCL_DTLS_AES_128_CBC_SHA_10 0xc004 +#define OP_PCL_DTLS_AES_128_CBC_SHA_11 0xc009 +#define OP_PCL_DTLS_AES_128_CBC_SHA_12 0xc00e +#define OP_PCL_DTLS_AES_128_CBC_SHA_13 0xc013 +#define OP_PCL_DTLS_AES_128_CBC_SHA_14 0xc018 +#define OP_PCL_DTLS_AES_128_CBC_SHA_15 0xc01d +#define OP_PCL_DTLS_AES_128_CBC_SHA_16 0xc01e +#define OP_PCL_DTLS_AES_128_CBC_SHA_17 0xc01f + +#define OP_PCL_DTLS_AES_256_CBC_SHA 0x0035 +#define OP_PCL_DTLS_AES_256_CBC_SHA_2 0x0036 +#define OP_PCL_DTLS_AES_256_CBC_SHA_3 0x0037 +#define OP_PCL_DTLS_AES_256_CBC_SHA_4 0x0038 +#define OP_PCL_DTLS_AES_256_CBC_SHA_5 0x0039 +#define OP_PCL_DTLS_AES_256_CBC_SHA_6 0x003a +#define OP_PCL_DTLS_AES_256_CBC_SHA_7 0x008d +#define OP_PCL_DTLS_AES_256_CBC_SHA_8 0x0091 +#define OP_PCL_DTLS_AES_256_CBC_SHA_9 0x0095 +#define OP_PCL_DTLS_AES_256_CBC_SHA_10 0xc005 +#define OP_PCL_DTLS_AES_256_CBC_SHA_11 0xc00a +#define OP_PCL_DTLS_AES_256_CBC_SHA_12 0xc00f +#define OP_PCL_DTLS_AES_256_CBC_SHA_13 0xc014 +#define OP_PCL_DTLS_AES_256_CBC_SHA_14 0xc019 +#define OP_PCL_DTLS_AES_256_CBC_SHA_15 0xc020 +#define OP_PCL_DTLS_AES_256_CBC_SHA_16 0xc021 +#define OP_PCL_DTLS_AES_256_CBC_SHA_17 0xc022 + +/* #define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0x0023 */ + +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA 0x001f +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_2 0x008b +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_3 0x008f +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_4 0x0093 +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_5 0x000a +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_6 0x000d +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_7 0x0010 +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_8 0x0013 +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_9 0x0016 +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_10 0x001b +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_11 0xc003 +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_12 0xc008 +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_13 0xc00d +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_14 0xc012 +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_15 0xc017 +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_16 0xc01a +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_17 0xc01b +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_18 0xc01c + +#define OP_PCL_DTLS_DES40_CBC_MD5 0x0029 + +#define OP_PCL_DTLS_DES_CBC_MD5 0x0022 + +#define OP_PCL_DTLS_DES40_CBC_SHA 0x0008 +#define OP_PCL_DTLS_DES40_CBC_SHA_2 0x000b +#define OP_PCL_DTLS_DES40_CBC_SHA_3 0x000e +#define OP_PCL_DTLS_DES40_CBC_SHA_4 0x0011 +#define OP_PCL_DTLS_DES40_CBC_SHA_5 0x0014 +#define OP_PCL_DTLS_DES40_CBC_SHA_6 0x0019 +#define OP_PCL_DTLS_DES40_CBC_SHA_7 0x0026 + + +#define OP_PCL_DTLS_DES_CBC_SHA 0x001e +#define OP_PCL_DTLS_DES_CBC_SHA_2 0x0009 +#define OP_PCL_DTLS_DES_CBC_SHA_3 0x000c +#define OP_PCL_DTLS_DES_CBC_SHA_4 0x000f +#define OP_PCL_DTLS_DES_CBC_SHA_5 0x0012 +#define OP_PCL_DTLS_DES_CBC_SHA_6 0x0015 +#define OP_PCL_DTLS_DES_CBC_SHA_7 0x001a + + +#define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0xff23 +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA160 0xff30 +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA224 0xff34 +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA256 0xff36 +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA384 0xff33 +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA512 0xff35 +#define OP_PCL_DTLS_AES_128_CBC_SHA160 0xff80 +#define OP_PCL_DTLS_AES_128_CBC_SHA224 0xff84 +#define OP_PCL_DTLS_AES_128_CBC_SHA256 0xff86 +#define OP_PCL_DTLS_AES_128_CBC_SHA384 0xff83 +#define OP_PCL_DTLS_AES_128_CBC_SHA512 0xff85 +#define OP_PCL_DTLS_AES_192_CBC_SHA160 0xff20 +#define OP_PCL_DTLS_AES_192_CBC_SHA224 0xff24 +#define OP_PCL_DTLS_AES_192_CBC_SHA256 0xff26 +#define OP_PCL_DTLS_AES_192_CBC_SHA384 0xff23 +#define OP_PCL_DTLS_AES_192_CBC_SHA512 0xff25 +#define OP_PCL_DTLS_AES_256_CBC_SHA160 0xff60 +#define OP_PCL_DTLS_AES_256_CBC_SHA224 0xff64 +#define OP_PCL_DTLS_AES_256_CBC_SHA256 0xff66 +#define OP_PCL_DTLS_AES_256_CBC_SHA384 0xff63 +#define OP_PCL_DTLS_AES_256_CBC_SHA512 0xff65 + +/* 802.16 WiMAX protinfos */ +#define OP_PCL_WIMAX_OFDM 0x0201 +#define OP_PCL_WIMAX_OFDMA 0x0231 + +/* 802.11 WiFi protinfos */ +#define OP_PCL_WIFI 0xac04 + +/* MacSec protinfos */ +#define OP_PCL_MACSEC 0x0001 + +/* PKI unidirectional protocol protinfo bits */ +#define OP_PCL_PKPROT_TEST 0x0008 +#define OP_PCL_PKPROT_DECRYPT 0x0004 +#define OP_PCL_PKPROT_ECC 0x0002 +#define OP_PCL_PKPROT_F2M 0x0001 + +/* For non-protocol/alg-only op commands */ +#define OP_ALG_TYPE_SHIFT 24 +#define OP_ALG_TYPE_MASK (0x7 << OP_ALG_TYPE_SHIFT) +#define OP_ALG_TYPE_CLASS1 2 +#define OP_ALG_TYPE_CLASS2 4 + +#define OP_ALG_ALGSEL_SHIFT 16 +#define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_SUBMASK (0x0f << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_AES (0x10 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_SHA256 (0x43 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_SHA384 (0x44 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_SHA512 (0x45 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_RNG (0x50 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_SNOW (0x60 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_SNOW_F8 (0x60 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_KASUMI (0x70 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_CRC (0x90 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_SNOW_F9 (0xA0 << OP_ALG_ALGSEL_SHIFT) + +#define OP_ALG_AAI_SHIFT 4 +#define OP_ALG_AAI_MASK (0x1ff << OP_ALG_AAI_SHIFT) + +/* blockcipher AAI set */ +#define OP_ALG_AAI_CTR_MOD128 (0x00 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CTR_MOD8 (0x01 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CTR_MOD16 (0x02 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CTR_MOD24 (0x03 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CTR_MOD32 (0x04 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CTR_MOD40 (0x05 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CTR_MOD48 (0x06 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CTR_MOD56 (0x07 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CTR_MOD64 (0x08 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CTR_MOD72 (0x09 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CTR_MOD80 (0x0a << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CTR_MOD88 (0x0b << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CTR_MOD96 (0x0c << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CTR_MOD104 (0x0d << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CTR_MOD112 (0x0e << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CTR_MOD120 (0x0f << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CBC (0x10 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_ECB (0x20 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CFB (0x30 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_OFB (0x40 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_XTS (0x50 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CMAC (0x60 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_XCBC_MAC (0x70 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CCM (0x80 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_GCM (0x90 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CBC_XCBCMAC (0xa0 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CTR_XCBCMAC (0xb0 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CHECKODD (0x80 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_DK (0x100 << OP_ALG_AAI_SHIFT) + +/* randomizer AAI set */ +#define OP_ALG_AAI_RNG (0x00 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_RNG_NZB (0x10 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_RNG_OBP (0x20 << OP_ALG_AAI_SHIFT) + +/* RNG4 AAI set */ +#define OP_ALG_AAI_RNG4_SH_0 (0x00 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_RNG4_SH_1 (0x01 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_RNG4_PS (0x40 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_RNG4_AI (0x80 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_RNG4_SK (0x100 << OP_ALG_AAI_SHIFT) + +/* hmac/smac AAI set */ +#define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_HMAC (0x01 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_SMAC (0x02 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_HMAC_PRECOMP (0x04 << OP_ALG_AAI_SHIFT) + +/* CRC AAI set*/ +#define OP_ALG_AAI_802 (0x01 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_3385 (0x02 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CUST_POLY (0x04 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_DIS (0x10 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_DOS (0x20 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_DOC (0x40 << OP_ALG_AAI_SHIFT) + +/* Kasumi/SNOW AAI set */ +#define OP_ALG_AAI_F8 (0xc0 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_F9 (0xc8 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_GSM (0x10 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_EDGE (0x20 << OP_ALG_AAI_SHIFT) + +#define OP_ALG_AS_SHIFT 2 +#define OP_ALG_AS_MASK (0x3 << OP_ALG_AS_SHIFT) +#define OP_ALG_AS_UPDATE (0 << OP_ALG_AS_SHIFT) +#define OP_ALG_AS_INIT (1 << OP_ALG_AS_SHIFT) +#define OP_ALG_AS_FINALIZE (2 << OP_ALG_AS_SHIFT) +#define OP_ALG_AS_INITFINAL (3 << OP_ALG_AS_SHIFT) + +#define OP_ALG_ICV_SHIFT 1 +#define OP_ALG_ICV_MASK (1 << OP_ALG_ICV_SHIFT) +#define OP_ALG_ICV_OFF (0 << OP_ALG_ICV_SHIFT) +#define OP_ALG_ICV_ON (1 << OP_ALG_ICV_SHIFT) + +#define OP_ALG_DIR_SHIFT 0 +#define OP_ALG_DIR_MASK 1 +#define OP_ALG_DECRYPT 0 +#define OP_ALG_ENCRYPT 1 + +/* PKHA algorithm type set */ +#define OP_ALG_PK 0x00800000 +#define OP_ALG_PK_FUN_MASK 0x3f /* clrmem, modmath, or cpymem */ + +/* PKHA mode clear memory functions */ +#define OP_ALG_PKMODE_A_RAM 0x80000 +#define OP_ALG_PKMODE_B_RAM 0x40000 +#define OP_ALG_PKMODE_E_RAM 0x20000 +#define OP_ALG_PKMODE_N_RAM 0x10000 +#define OP_ALG_PKMODE_CLEARMEM 0x00001 + +/* PKHA mode modular-arithmetic functions */ +#define OP_ALG_PKMODE_MOD_IN_MONTY 0x80000 +#define OP_ALG_PKMODE_MOD_OUT_MONTY 0x40000 +#define OP_ALG_PKMODE_MOD_F2M 0x20000 +#define OP_ALG_PKMODE_MOD_R2_IN 0x10000 +#define OP_ALG_PKMODE_PRJECTV 0x00800 +#define OP_ALG_PKMODE_TIME_EQ 0x400 +#define OP_ALG_PKMODE_OUT_B 0x000 +#define OP_ALG_PKMODE_OUT_A 0x100 +#define OP_ALG_PKMODE_MOD_ADD 0x002 +#define OP_ALG_PKMODE_MOD_SUB_AB 0x003 +#define OP_ALG_PKMODE_MOD_SUB_BA 0x004 +#define OP_ALG_PKMODE_MOD_MULT 0x005 +#define OP_ALG_PKMODE_MOD_EXPO 0x006 +#define OP_ALG_PKMODE_MOD_REDUCT 0x007 +#define OP_ALG_PKMODE_MOD_INV 0x008 +#define OP_ALG_PKMODE_MOD_ECC_ADD 0x009 +#define OP_ALG_PKMODE_MOD_ECC_DBL 0x00a +#define OP_ALG_PKMODE_MOD_ECC_MULT 0x00b +#define OP_ALG_PKMODE_MOD_MONT_CNST 0x00c +#define OP_ALG_PKMODE_MOD_CRT_CNST 0x00d +#define OP_ALG_PKMODE_MOD_GCD 0x00e +#define OP_ALG_PKMODE_MOD_PRIMALITY 0x00f + +/* PKHA mode copy-memory functions */ +#define OP_ALG_PKMODE_SRC_REG_SHIFT 13 +#define OP_ALG_PKMODE_SRC_REG_MASK (7 << OP_ALG_PKMODE_SRC_REG_SHIFT) +#define OP_ALG_PKMODE_DST_REG_SHIFT 10 +#define OP_ALG_PKMODE_DST_REG_MASK (7 << OP_ALG_PKMODE_DST_REG_SHIFT) +#define OP_ALG_PKMODE_SRC_SEG_SHIFT 8 +#define OP_ALG_PKMODE_SRC_SEG_MASK (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT) +#define OP_ALG_PKMODE_DST_SEG_SHIFT 6 +#define OP_ALG_PKMODE_DST_SEG_MASK (3 << OP_ALG_PKMODE_DST_SEG_SHIFT) + +#define OP_ALG_PKMODE_SRC_REG_A (0 << OP_ALG_PKMODE_SRC_REG_SHIFT) +#define OP_ALG_PKMODE_SRC_REG_B (1 << OP_ALG_PKMODE_SRC_REG_SHIFT) +#define OP_ALG_PKMODE_SRC_REG_N (3 << OP_ALG_PKMODE_SRC_REG_SHIFT) +#define OP_ALG_PKMODE_DST_REG_A (0 << OP_ALG_PKMODE_DST_REG_SHIFT) +#define OP_ALG_PKMODE_DST_REG_B (1 << OP_ALG_PKMODE_DST_REG_SHIFT) +#define OP_ALG_PKMODE_DST_REG_E (2 << OP_ALG_PKMODE_DST_REG_SHIFT) +#define OP_ALG_PKMODE_DST_REG_N (3 << OP_ALG_PKMODE_DST_REG_SHIFT) +#define OP_ALG_PKMODE_SRC_SEG_0 (0 << OP_ALG_PKMODE_SRC_SEG_SHIFT) +#define OP_ALG_PKMODE_SRC_SEG_1 (1 << OP_ALG_PKMODE_SRC_SEG_SHIFT) +#define OP_ALG_PKMODE_SRC_SEG_2 (2 << OP_ALG_PKMODE_SRC_SEG_SHIFT) +#define OP_ALG_PKMODE_SRC_SEG_3 (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT) +#define OP_ALG_PKMODE_DST_SEG_0 (0 << OP_ALG_PKMODE_DST_SEG_SHIFT) +#define OP_ALG_PKMODE_DST_SEG_1 (1 << OP_ALG_PKMODE_DST_SEG_SHIFT) +#define OP_ALG_PKMODE_DST_SEG_2 (2 << OP_ALG_PKMODE_DST_SEG_SHIFT) +#define OP_ALG_PKMODE_DST_SEG_3 (3 << OP_ALG_PKMODE_DST_SEG_SHIFT) +#define OP_ALG_PKMODE_CPYMEM_N_SZ 0x80 +#define OP_ALG_PKMODE_CPYMEM_SRC_SZ 0x81 + +/* + * SEQ_IN_PTR Command Constructs + */ + +/* Release Buffers */ +#define SQIN_RBS 0x04000000 + +/* Sequence pointer is really a descriptor */ +#define SQIN_INL 0x02000000 + +/* Sequence pointer is a scatter-gather table */ +#define SQIN_SGF 0x01000000 + +/* Appends to a previous pointer */ +#define SQIN_PRE 0x00800000 + +/* Use extended length following pointer */ +#define SQIN_EXT 0x00400000 + +/* Restore sequence with pointer/length */ +#define SQIN_RTO 0x00200000 + +/* Replace job descriptor */ +#define SQIN_RJD 0x00100000 + +#define SQIN_LEN_SHIFT 0 +#define SQIN_LEN_MASK (0xffff << SQIN_LEN_SHIFT) + +/* + * SEQ_OUT_PTR Command Constructs + */ + +/* Sequence pointer is a scatter-gather table */ +#define SQOUT_SGF 0x01000000 + +/* Appends to a previous pointer */ +#define SQOUT_PRE SQIN_PRE + +/* Restore sequence with pointer/length */ +#define SQOUT_RTO SQIN_RTO + +/* Use extended length following pointer */ +#define SQOUT_EXT 0x00400000 + +#define SQOUT_LEN_SHIFT 0 +#define SQOUT_LEN_MASK (0xffff << SQOUT_LEN_SHIFT) + + +/* + * SIGNATURE Command Constructs + */ + +/* TYPE field is all that's relevant */ +#define SIGN_TYPE_SHIFT 16 +#define SIGN_TYPE_MASK (0x0f << SIGN_TYPE_SHIFT) + +#define SIGN_TYPE_FINAL (0x00 << SIGN_TYPE_SHIFT) +#define SIGN_TYPE_FINAL_RESTORE (0x01 << SIGN_TYPE_SHIFT) +#define SIGN_TYPE_FINAL_NONZERO (0x02 << SIGN_TYPE_SHIFT) +#define SIGN_TYPE_IMM_2 (0x0a << SIGN_TYPE_SHIFT) +#define SIGN_TYPE_IMM_3 (0x0b << SIGN_TYPE_SHIFT) +#define SIGN_TYPE_IMM_4 (0x0c << SIGN_TYPE_SHIFT) + +/* + * MOVE Command Constructs + */ + +#define MOVE_AUX_SHIFT 25 +#define MOVE_AUX_MASK (3 << MOVE_AUX_SHIFT) +#define MOVE_AUX_MS (2 << MOVE_AUX_SHIFT) +#define MOVE_AUX_LS (1 << MOVE_AUX_SHIFT) + +#define MOVE_WAITCOMP_SHIFT 24 +#define MOVE_WAITCOMP_MASK (1 << MOVE_WAITCOMP_SHIFT) +#define MOVE_WAITCOMP (1 << MOVE_WAITCOMP_SHIFT) + +#define MOVE_SRC_SHIFT 20 +#define MOVE_SRC_MASK (0x0f << MOVE_SRC_SHIFT) +#define MOVE_SRC_CLASS1CTX (0x00 << MOVE_SRC_SHIFT) +#define MOVE_SRC_CLASS2CTX (0x01 << MOVE_SRC_SHIFT) +#define MOVE_SRC_OUTFIFO (0x02 << MOVE_SRC_SHIFT) +#define MOVE_SRC_DESCBUF (0x03 << MOVE_SRC_SHIFT) +#define MOVE_SRC_MATH0 (0x04 << MOVE_SRC_SHIFT) +#define MOVE_SRC_MATH1 (0x05 << MOVE_SRC_SHIFT) +#define MOVE_SRC_MATH2 (0x06 << MOVE_SRC_SHIFT) +#define MOVE_SRC_MATH3 (0x07 << MOVE_SRC_SHIFT) +#define MOVE_SRC_INFIFO (0x08 << MOVE_SRC_SHIFT) +#define MOVE_SRC_INFIFO_CL (0x09 << MOVE_SRC_SHIFT) + +#define MOVE_DEST_SHIFT 16 +#define MOVE_DEST_MASK (0x0f << MOVE_DEST_SHIFT) +#define MOVE_DEST_CLASS1CTX (0x00 << MOVE_DEST_SHIFT) +#define MOVE_DEST_CLASS2CTX (0x01 << MOVE_DEST_SHIFT) +#define MOVE_DEST_OUTFIFO (0x02 << MOVE_DEST_SHIFT) +#define MOVE_DEST_DESCBUF (0x03 << MOVE_DEST_SHIFT) +#define MOVE_DEST_MATH0 (0x04 << MOVE_DEST_SHIFT) +#define MOVE_DEST_MATH1 (0x05 << MOVE_DEST_SHIFT) +#define MOVE_DEST_MATH2 (0x06 << MOVE_DEST_SHIFT) +#define MOVE_DEST_MATH3 (0x07 << MOVE_DEST_SHIFT) +#define MOVE_DEST_CLASS1INFIFO (0x08 << MOVE_DEST_SHIFT) +#define MOVE_DEST_CLASS2INFIFO (0x09 << MOVE_DEST_SHIFT) +#define MOVE_DEST_INFIFO_NOINFO (0x0a << MOVE_DEST_SHIFT) +#define MOVE_DEST_PK_A (0x0c << MOVE_DEST_SHIFT) +#define MOVE_DEST_CLASS1KEY (0x0d << MOVE_DEST_SHIFT) +#define MOVE_DEST_CLASS2KEY (0x0e << MOVE_DEST_SHIFT) + +#define MOVE_OFFSET_SHIFT 8 +#define MOVE_OFFSET_MASK (0xff << MOVE_OFFSET_SHIFT) + +#define MOVE_LEN_SHIFT 0 +#define MOVE_LEN_MASK (0xff << MOVE_LEN_SHIFT) + +#define MOVELEN_MRSEL_SHIFT 0 +#define MOVELEN_MRSEL_MASK (0x3 << MOVE_LEN_SHIFT) + +/* + * MATH Command Constructs + */ + +#define MATH_IFB_SHIFT 26 +#define MATH_IFB_MASK (1 << MATH_IFB_SHIFT) +#define MATH_IFB (1 << MATH_IFB_SHIFT) + +#define MATH_NFU_SHIFT 25 +#define MATH_NFU_MASK (1 << MATH_NFU_SHIFT) +#define MATH_NFU (1 << MATH_NFU_SHIFT) + +#define MATH_STL_SHIFT 24 +#define MATH_STL_MASK (1 << MATH_STL_SHIFT) +#define MATH_STL (1 << MATH_STL_SHIFT) + +/* Function selectors */ +#define MATH_FUN_SHIFT 20 +#define MATH_FUN_MASK (0x0f << MATH_FUN_SHIFT) +#define MATH_FUN_ADD (0x00 << MATH_FUN_SHIFT) +#define MATH_FUN_ADDC (0x01 << MATH_FUN_SHIFT) +#define MATH_FUN_SUB (0x02 << MATH_FUN_SHIFT) +#define MATH_FUN_SUBB (0x03 << MATH_FUN_SHIFT) +#define MATH_FUN_OR (0x04 << MATH_FUN_SHIFT) +#define MATH_FUN_AND (0x05 << MATH_FUN_SHIFT) +#define MATH_FUN_XOR (0x06 << MATH_FUN_SHIFT) +#define MATH_FUN_LSHIFT (0x07 << MATH_FUN_SHIFT) +#define MATH_FUN_RSHIFT (0x08 << MATH_FUN_SHIFT) +#define MATH_FUN_SHLD (0x09 << MATH_FUN_SHIFT) +#define MATH_FUN_ZBYT (0x0a << MATH_FUN_SHIFT) + +/* Source 0 selectors */ +#define MATH_SRC0_SHIFT 16 +#define MATH_SRC0_MASK (0x0f << MATH_SRC0_SHIFT) +#define MATH_SRC0_REG0 (0x00 << MATH_SRC0_SHIFT) +#define MATH_SRC0_REG1 (0x01 << MATH_SRC0_SHIFT) +#define MATH_SRC0_REG2 (0x02 << MATH_SRC0_SHIFT) +#define MATH_SRC0_REG3 (0x03 << MATH_SRC0_SHIFT) +#define MATH_SRC0_IMM (0x04 << MATH_SRC0_SHIFT) +#define MATH_SRC0_DPOVRD (0x07 << MATH_SRC0_SHIFT) +#define MATH_SRC0_SEQINLEN (0x08 << MATH_SRC0_SHIFT) +#define MATH_SRC0_SEQOUTLEN (0x09 << MATH_SRC0_SHIFT) +#define MATH_SRC0_VARSEQINLEN (0x0a << MATH_SRC0_SHIFT) +#define MATH_SRC0_VARSEQOUTLEN (0x0b << MATH_SRC0_SHIFT) +#define MATH_SRC0_ZERO (0x0c << MATH_SRC0_SHIFT) + +/* Source 1 selectors */ +#define MATH_SRC1_SHIFT 12 +#define MATH_SRC1_MASK (0x0f << MATH_SRC1_SHIFT) +#define MATH_SRC1_REG0 (0x00 << MATH_SRC1_SHIFT) +#define MATH_SRC1_REG1 (0x01 << MATH_SRC1_SHIFT) +#define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT) +#define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT) +#define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT) +#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC0_SHIFT) +#define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT) +#define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT) +#define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT) + +/* Destination selectors */ +#define MATH_DEST_SHIFT 8 +#define MATH_DEST_MASK (0x0f << MATH_DEST_SHIFT) +#define MATH_DEST_REG0 (0x00 << MATH_DEST_SHIFT) +#define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT) +#define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT) +#define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT) +#define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT) +#define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT) +#define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT) +#define MATH_DEST_VARSEQOUTLEN (0x0b << MATH_DEST_SHIFT) +#define MATH_DEST_NONE (0x0f << MATH_DEST_SHIFT) + +/* Length selectors */ +#define MATH_LEN_SHIFT 0 +#define MATH_LEN_MASK (0x0f << MATH_LEN_SHIFT) +#define MATH_LEN_1BYTE 0x01 +#define MATH_LEN_2BYTE 0x02 +#define MATH_LEN_4BYTE 0x04 +#define MATH_LEN_8BYTE 0x08 + +/* + * JUMP Command Constructs + */ + +#define JUMP_CLASS_SHIFT 25 +#define JUMP_CLASS_MASK (3 << JUMP_CLASS_SHIFT) +#define JUMP_CLASS_NONE 0 +#define JUMP_CLASS_CLASS1 (1 << JUMP_CLASS_SHIFT) +#define JUMP_CLASS_CLASS2 (2 << JUMP_CLASS_SHIFT) +#define JUMP_CLASS_BOTH (3 << JUMP_CLASS_SHIFT) + +#define JUMP_JSL_SHIFT 24 +#define JUMP_JSL_MASK (1 << JUMP_JSL_SHIFT) +#define JUMP_JSL (1 << JUMP_JSL_SHIFT) + +#define JUMP_TYPE_SHIFT 22 +#define JUMP_TYPE_MASK (0x03 << JUMP_TYPE_SHIFT) +#define JUMP_TYPE_LOCAL (0x00 << JUMP_TYPE_SHIFT) +#define JUMP_TYPE_NONLOCAL (0x01 << JUMP_TYPE_SHIFT) +#define JUMP_TYPE_HALT (0x02 << JUMP_TYPE_SHIFT) +#define JUMP_TYPE_HALT_USER (0x03 << JUMP_TYPE_SHIFT) + +#define JUMP_TEST_SHIFT 16 +#define JUMP_TEST_MASK (0x03 << JUMP_TEST_SHIFT) +#define JUMP_TEST_ALL (0x00 << JUMP_TEST_SHIFT) +#define JUMP_TEST_INVALL (0x01 << JUMP_TEST_SHIFT) +#define JUMP_TEST_ANY (0x02 << JUMP_TEST_SHIFT) +#define JUMP_TEST_INVANY (0x03 << JUMP_TEST_SHIFT) + +/* Condition codes. JSL bit is factored in */ +#define JUMP_COND_SHIFT 8 +#define JUMP_COND_MASK (0x100ff << JUMP_COND_SHIFT) +#define JUMP_COND_PK_0 (0x80 << JUMP_COND_SHIFT) +#define JUMP_COND_PK_GCD_1 (0x40 << JUMP_COND_SHIFT) +#define JUMP_COND_PK_PRIME (0x20 << JUMP_COND_SHIFT) +#define JUMP_COND_MATH_N (0x08 << JUMP_COND_SHIFT) +#define JUMP_COND_MATH_Z (0x04 << JUMP_COND_SHIFT) +#define JUMP_COND_MATH_C (0x02 << JUMP_COND_SHIFT) +#define JUMP_COND_MATH_NV (0x01 << JUMP_COND_SHIFT) + +#define JUMP_COND_JRP ((0x80 << JUMP_COND_SHIFT) | JUMP_JSL) +#define JUMP_COND_SHRD ((0x40 << JUMP_COND_SHIFT) | JUMP_JSL) +#define JUMP_COND_SELF ((0x20 << JUMP_COND_SHIFT) | JUMP_JSL) +#define JUMP_COND_CALM ((0x10 << JUMP_COND_SHIFT) | JUMP_JSL) +#define JUMP_COND_NIP ((0x08 << JUMP_COND_SHIFT) | JUMP_JSL) +#define JUMP_COND_NIFP ((0x04 << JUMP_COND_SHIFT) | JUMP_JSL) +#define JUMP_COND_NOP ((0x02 << JUMP_COND_SHIFT) | JUMP_JSL) +#define JUMP_COND_NCP ((0x01 << JUMP_COND_SHIFT) | JUMP_JSL) + +#define JUMP_OFFSET_SHIFT 0 +#define JUMP_OFFSET_MASK (0xff << JUMP_OFFSET_SHIFT) + +/* + * NFIFO ENTRY + * Data Constructs + * + */ +#define NFIFOENTRY_DEST_SHIFT 30 +#define NFIFOENTRY_DEST_MASK (3 << NFIFOENTRY_DEST_SHIFT) +#define NFIFOENTRY_DEST_DECO (0 << NFIFOENTRY_DEST_SHIFT) +#define NFIFOENTRY_DEST_CLASS1 (1 << NFIFOENTRY_DEST_SHIFT) +#define NFIFOENTRY_DEST_CLASS2 (2 << NFIFOENTRY_DEST_SHIFT) +#define NFIFOENTRY_DEST_BOTH (3 << NFIFOENTRY_DEST_SHIFT) + +#define NFIFOENTRY_LC2_SHIFT 29 +#define NFIFOENTRY_LC2_MASK (1 << NFIFOENTRY_LC2_SHIFT) +#define NFIFOENTRY_LC2 (1 << NFIFOENTRY_LC2_SHIFT) + +#define NFIFOENTRY_LC1_SHIFT 28 +#define NFIFOENTRY_LC1_MASK (1 << NFIFOENTRY_LC1_SHIFT) +#define NFIFOENTRY_LC1 (1 << NFIFOENTRY_LC1_SHIFT) + +#define NFIFOENTRY_FC2_SHIFT 27 +#define NFIFOENTRY_FC2_MASK (1 << NFIFOENTRY_FC2_SHIFT) +#define NFIFOENTRY_FC2 (1 << NFIFOENTRY_FC2_SHIFT) + +#define NFIFOENTRY_FC1_SHIFT 26 +#define NFIFOENTRY_FC1_MASK (1 << NFIFOENTRY_FC1_SHIFT) +#define NFIFOENTRY_FC1 (1 << NFIFOENTRY_FC1_SHIFT) + +#define NFIFOENTRY_STYPE_SHIFT 24 +#define NFIFOENTRY_STYPE_MASK (3 << NFIFOENTRY_STYPE_SHIFT) +#define NFIFOENTRY_STYPE_DFIFO (0 << NFIFOENTRY_STYPE_SHIFT) +#define NFIFOENTRY_STYPE_OFIFO (1 << NFIFOENTRY_STYPE_SHIFT) +#define NFIFOENTRY_STYPE_PAD (2 << NFIFOENTRY_STYPE_SHIFT) +#define NFIFOENTRY_STYPE_SNOOP (3 << NFIFOENTRY_STYPE_SHIFT) + +#define NFIFOENTRY_DTYPE_SHIFT 20 +#define NFIFOENTRY_DTYPE_MASK (0xF << NFIFOENTRY_DTYPE_SHIFT) + +#define NFIFOENTRY_DTYPE_SBOX (0x0 << NFIFOENTRY_DTYPE_SHIFT) +#define NFIFOENTRY_DTYPE_AAD (0x1 << NFIFOENTRY_DTYPE_SHIFT) +#define NFIFOENTRY_DTYPE_IV (0x2 << NFIFOENTRY_DTYPE_SHIFT) +#define NFIFOENTRY_DTYPE_SAD (0x3 << NFIFOENTRY_DTYPE_SHIFT) +#define NFIFOENTRY_DTYPE_ICV (0xA << NFIFOENTRY_DTYPE_SHIFT) +#define NFIFOENTRY_DTYPE_SKIP (0xE << NFIFOENTRY_DTYPE_SHIFT) +#define NFIFOENTRY_DTYPE_MSG (0xF << NFIFOENTRY_DTYPE_SHIFT) + +#define NFIFOENTRY_DTYPE_PK_A0 (0x0 << NFIFOENTRY_DTYPE_SHIFT) +#define NFIFOENTRY_DTYPE_PK_A1 (0x1 << NFIFOENTRY_DTYPE_SHIFT) +#define NFIFOENTRY_DTYPE_PK_A2 (0x2 << NFIFOENTRY_DTYPE_SHIFT) +#define NFIFOENTRY_DTYPE_PK_A3 (0x3 << NFIFOENTRY_DTYPE_SHIFT) +#define NFIFOENTRY_DTYPE_PK_B0 (0x4 << NFIFOENTRY_DTYPE_SHIFT) +#define NFIFOENTRY_DTYPE_PK_B1 (0x5 << NFIFOENTRY_DTYPE_SHIFT) +#define NFIFOENTRY_DTYPE_PK_B2 (0x6 << NFIFOENTRY_DTYPE_SHIFT) +#define NFIFOENTRY_DTYPE_PK_B3 (0x7 << NFIFOENTRY_DTYPE_SHIFT) +#define NFIFOENTRY_DTYPE_PK_N (0x8 << NFIFOENTRY_DTYPE_SHIFT) +#define NFIFOENTRY_DTYPE_PK_E (0x9 << NFIFOENTRY_DTYPE_SHIFT) +#define NFIFOENTRY_DTYPE_PK_A (0xC << NFIFOENTRY_DTYPE_SHIFT) +#define NFIFOENTRY_DTYPE_PK_B (0xD << NFIFOENTRY_DTYPE_SHIFT) + + +#define NFIFOENTRY_BND_SHIFT 19 +#define NFIFOENTRY_BND_MASK (1 << NFIFOENTRY_BND_SHIFT) +#define NFIFOENTRY_BND (1 << NFIFOENTRY_BND_SHIFT) + +#define NFIFOENTRY_PTYPE_SHIFT 16 +#define NFIFOENTRY_PTYPE_MASK (0x7 << NFIFOENTRY_PTYPE_SHIFT) + +#define NFIFOENTRY_PTYPE_ZEROS (0x0 << NFIFOENTRY_PTYPE_SHIFT) +#define NFIFOENTRY_PTYPE_RND_NOZEROS (0x1 << NFIFOENTRY_PTYPE_SHIFT) +#define NFIFOENTRY_PTYPE_INCREMENT (0x2 << NFIFOENTRY_PTYPE_SHIFT) +#define NFIFOENTRY_PTYPE_RND (0x3 << NFIFOENTRY_PTYPE_SHIFT) +#define NFIFOENTRY_PTYPE_ZEROS_NZ (0x4 << NFIFOENTRY_PTYPE_SHIFT) +#define NFIFOENTRY_PTYPE_RND_NZ_LZ (0x5 << NFIFOENTRY_PTYPE_SHIFT) +#define NFIFOENTRY_PTYPE_N (0x6 << NFIFOENTRY_PTYPE_SHIFT) +#define NFIFOENTRY_PTYPE_RND_NZ_N (0x7 << NFIFOENTRY_PTYPE_SHIFT) + +#define NFIFOENTRY_OC_SHIFT 15 +#define NFIFOENTRY_OC_MASK (1 << NFIFOENTRY_OC_SHIFT) +#define NFIFOENTRY_OC (1 << NFIFOENTRY_OC_SHIFT) + +#define NFIFOENTRY_AST_SHIFT 14 +#define NFIFOENTRY_AST_MASK (1 << NFIFOENTRY_OC_SHIFT) +#define NFIFOENTRY_AST (1 << NFIFOENTRY_OC_SHIFT) + +#define NFIFOENTRY_BM_SHIFT 11 +#define NFIFOENTRY_BM_MASK (1 << NFIFOENTRY_BM_SHIFT) +#define NFIFOENTRY_BM (1 << NFIFOENTRY_BM_SHIFT) + +#define NFIFOENTRY_PS_SHIFT 10 +#define NFIFOENTRY_PS_MASK (1 << NFIFOENTRY_PS_SHIFT) +#define NFIFOENTRY_PS (1 << NFIFOENTRY_PS_SHIFT) + +#define NFIFOENTRY_DLEN_SHIFT 0 +#define NFIFOENTRY_DLEN_MASK (0xFFF << NFIFOENTRY_DLEN_SHIFT) + +#define NFIFOENTRY_PLEN_SHIFT 0 +#define NFIFOENTRY_PLEN_MASK (0xFF << NFIFOENTRY_PLEN_SHIFT) + +/* Append Load Immediate Command */ +#define FD_CMD_APPEND_LOAD_IMMEDIATE 0x80000000 + +/* Set SEQ LIODN equal to the Non-SEQ LIODN for the job */ +#define FD_CMD_SET_SEQ_LIODN_EQUAL_NONSEQ_LIODN 0x40000000 + +/* Frame Descriptor Command for Replacement Job Descriptor */ +#define FD_CMD_REPLACE_JOB_DESC 0x20000000 + +#endif /* DESC_H */ diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h new file mode 100644 index 000000000..9f79fd7bd --- /dev/null +++ b/drivers/crypto/caam/desc_constr.h @@ -0,0 +1,390 @@ +/* + * caam descriptor construction helper functions + * + * Copyright 2008-2012 Freescale Semiconductor, Inc. + */ + +#include "desc.h" + +#define IMMEDIATE (1 << 23) +#define CAAM_CMD_SZ sizeof(u32) +#define CAAM_PTR_SZ sizeof(dma_addr_t) +#define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * MAX_CAAM_DESCSIZE) +#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3) + +#ifdef DEBUG +#define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\ + &__func__[sizeof("append")]); } while (0) +#else +#define PRINT_POS +#endif + +#define SET_OK_NO_PROP_ERRORS (IMMEDIATE | LDST_CLASS_DECO | \ + LDST_SRCDST_WORD_DECOCTRL | \ + (LDOFF_CHG_SHARE_OK_NO_PROP << \ + LDST_OFFSET_SHIFT)) +#define DISABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \ + LDST_SRCDST_WORD_DECOCTRL | \ + (LDOFF_DISABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT)) +#define ENABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \ + LDST_SRCDST_WORD_DECOCTRL | \ + (LDOFF_ENABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT)) + +static inline int desc_len(u32 *desc) +{ + return *desc & HDR_DESCLEN_MASK; +} + +static inline int desc_bytes(void *desc) +{ + return desc_len(desc) * CAAM_CMD_SZ; +} + +static inline u32 *desc_end(u32 *desc) +{ + return desc + desc_len(desc); +} + +static inline void *sh_desc_pdb(u32 *desc) +{ + return desc + 1; +} + +static inline void init_desc(u32 *desc, u32 options) +{ + *desc = (options | HDR_ONE) + 1; +} + +static inline void init_sh_desc(u32 *desc, u32 options) +{ + PRINT_POS; + init_desc(desc, CMD_SHARED_DESC_HDR | options); +} + +static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes) +{ + u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ; + + init_sh_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT) + pdb_len) | + options); +} + +static inline void init_job_desc(u32 *desc, u32 options) +{ + init_desc(desc, CMD_DESC_HDR | options); +} + +static inline void append_ptr(u32 *desc, dma_addr_t ptr) +{ + dma_addr_t *offset = (dma_addr_t *)desc_end(desc); + + *offset = ptr; + + (*desc) += CAAM_PTR_SZ / CAAM_CMD_SZ; +} + +static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len, + u32 options) +{ + PRINT_POS; + init_job_desc(desc, HDR_SHARED | options | + (len << HDR_START_IDX_SHIFT)); + append_ptr(desc, ptr); +} + +static inline void append_data(u32 *desc, void *data, int len) +{ + u32 *offset = desc_end(desc); + + if (len) /* avoid sparse warning: memcpy with byte count of 0 */ + memcpy(offset, data, len); + + (*desc) += (len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ; +} + +static inline void append_cmd(u32 *desc, u32 command) +{ + u32 *cmd = desc_end(desc); + + *cmd = command; + + (*desc)++; +} + +#define append_u32 append_cmd + +static inline void append_u64(u32 *desc, u64 data) +{ + u32 *offset = desc_end(desc); + + *offset = upper_32_bits(data); + *(++offset) = lower_32_bits(data); + + (*desc) += 2; +} + +/* Write command without affecting header, and return pointer to next word */ +static inline u32 *write_cmd(u32 *desc, u32 command) +{ + *desc = command; + + return desc + 1; +} + +static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len, + u32 command) +{ + append_cmd(desc, command | len); + append_ptr(desc, ptr); +} + +/* Write length after pointer, rather than inside command */ +static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr, + unsigned int len, u32 command) +{ + append_cmd(desc, command); + if (!(command & (SQIN_RTO | SQIN_PRE))) + append_ptr(desc, ptr); + append_cmd(desc, len); +} + +static inline void append_cmd_data(u32 *desc, void *data, int len, + u32 command) +{ + append_cmd(desc, command | IMMEDIATE | len); + append_data(desc, data, len); +} + +#define APPEND_CMD_RET(cmd, op) \ +static inline u32 *append_##cmd(u32 *desc, u32 options) \ +{ \ + u32 *cmd = desc_end(desc); \ + PRINT_POS; \ + append_cmd(desc, CMD_##op | options); \ + return cmd; \ +} +APPEND_CMD_RET(jump, JUMP) +APPEND_CMD_RET(move, MOVE) + +static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd) +{ + *jump_cmd = *jump_cmd | (desc_len(desc) - (jump_cmd - desc)); +} + +static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd) +{ + *move_cmd &= ~MOVE_OFFSET_MASK; + *move_cmd = *move_cmd | ((desc_len(desc) << (MOVE_OFFSET_SHIFT + 2)) & + MOVE_OFFSET_MASK); +} + +#define APPEND_CMD(cmd, op) \ +static inline void append_##cmd(u32 *desc, u32 options) \ +{ \ + PRINT_POS; \ + append_cmd(desc, CMD_##op | options); \ +} +APPEND_CMD(operation, OPERATION) + +#define APPEND_CMD_LEN(cmd, op) \ +static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \ +{ \ + PRINT_POS; \ + append_cmd(desc, CMD_##op | len | options); \ +} + +APPEND_CMD_LEN(seq_load, SEQ_LOAD) +APPEND_CMD_LEN(seq_store, SEQ_STORE) +APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_LOAD) +APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE) + +#define APPEND_CMD_PTR(cmd, op) \ +static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \ + u32 options) \ +{ \ + PRINT_POS; \ + append_cmd_ptr(desc, ptr, len, CMD_##op | options); \ +} +APPEND_CMD_PTR(key, KEY) +APPEND_CMD_PTR(load, LOAD) +APPEND_CMD_PTR(fifo_load, FIFO_LOAD) +APPEND_CMD_PTR(fifo_store, FIFO_STORE) + +static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len, + u32 options) +{ + u32 cmd_src; + + cmd_src = options & LDST_SRCDST_MASK; + + append_cmd(desc, CMD_STORE | options | len); + + /* The following options do not require pointer */ + if (!(cmd_src == LDST_SRCDST_WORD_DESCBUF_SHARED || + cmd_src == LDST_SRCDST_WORD_DESCBUF_JOB || + cmd_src == LDST_SRCDST_WORD_DESCBUF_JOB_WE || + cmd_src == LDST_SRCDST_WORD_DESCBUF_SHARED_WE)) + append_ptr(desc, ptr); +} + +#define APPEND_SEQ_PTR_INTLEN(cmd, op) \ +static inline void append_seq_##cmd##_ptr_intlen(u32 *desc, dma_addr_t ptr, \ + unsigned int len, \ + u32 options) \ +{ \ + PRINT_POS; \ + if (options & (SQIN_RTO | SQIN_PRE)) \ + append_cmd(desc, CMD_SEQ_##op##_PTR | len | options); \ + else \ + append_cmd_ptr(desc, ptr, len, CMD_SEQ_##op##_PTR | options); \ +} +APPEND_SEQ_PTR_INTLEN(in, IN) +APPEND_SEQ_PTR_INTLEN(out, OUT) + +#define APPEND_CMD_PTR_TO_IMM(cmd, op) \ +static inline void append_##cmd##_as_imm(u32 *desc, void *data, \ + unsigned int len, u32 options) \ +{ \ + PRINT_POS; \ + append_cmd_data(desc, data, len, CMD_##op | options); \ +} +APPEND_CMD_PTR_TO_IMM(load, LOAD); +APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD); + +#define APPEND_CMD_PTR_EXTLEN(cmd, op) \ +static inline void append_##cmd##_extlen(u32 *desc, dma_addr_t ptr, \ + unsigned int len, u32 options) \ +{ \ + PRINT_POS; \ + append_cmd_ptr_extlen(desc, ptr, len, CMD_##op | SQIN_EXT | options); \ +} +APPEND_CMD_PTR_EXTLEN(seq_in_ptr, SEQ_IN_PTR) +APPEND_CMD_PTR_EXTLEN(seq_out_ptr, SEQ_OUT_PTR) + +/* + * Determine whether to store length internally or externally depending on + * the size of its type + */ +#define APPEND_CMD_PTR_LEN(cmd, op, type) \ +static inline void append_##cmd(u32 *desc, dma_addr_t ptr, \ + type len, u32 options) \ +{ \ + PRINT_POS; \ + if (sizeof(type) > sizeof(u16)) \ + append_##cmd##_extlen(desc, ptr, len, options); \ + else \ + append_##cmd##_intlen(desc, ptr, len, options); \ +} +APPEND_CMD_PTR_LEN(seq_in_ptr, SEQ_IN_PTR, u32) +APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_PTR, u32) + +/* + * 2nd variant for commands whose specified immediate length differs + * from length of immediate data provided, e.g., split keys + */ +#define APPEND_CMD_PTR_TO_IMM2(cmd, op) \ +static inline void append_##cmd##_as_imm(u32 *desc, void *data, \ + unsigned int data_len, \ + unsigned int len, u32 options) \ +{ \ + PRINT_POS; \ + append_cmd(desc, CMD_##op | IMMEDIATE | len | options); \ + append_data(desc, data, data_len); \ +} +APPEND_CMD_PTR_TO_IMM2(key, KEY); + +#define APPEND_CMD_RAW_IMM(cmd, op, type) \ +static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \ + u32 options) \ +{ \ + PRINT_POS; \ + append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(type)); \ + append_cmd(desc, immediate); \ +} +APPEND_CMD_RAW_IMM(load, LOAD, u32); + +/* + * Append math command. Only the last part of destination and source need to + * be specified + */ +#define APPEND_MATH(op, desc, dest, src_0, src_1, len) \ +append_cmd(desc, CMD_MATH | MATH_FUN_##op | MATH_DEST_##dest | \ + MATH_SRC0_##src_0 | MATH_SRC1_##src_1 | (u32)len); + +#define append_math_add(desc, dest, src0, src1, len) \ + APPEND_MATH(ADD, desc, dest, src0, src1, len) +#define append_math_sub(desc, dest, src0, src1, len) \ + APPEND_MATH(SUB, desc, dest, src0, src1, len) +#define append_math_add_c(desc, dest, src0, src1, len) \ + APPEND_MATH(ADDC, desc, dest, src0, src1, len) +#define append_math_sub_b(desc, dest, src0, src1, len) \ + APPEND_MATH(SUBB, desc, dest, src0, src1, len) +#define append_math_and(desc, dest, src0, src1, len) \ + APPEND_MATH(AND, desc, dest, src0, src1, len) +#define append_math_or(desc, dest, src0, src1, len) \ + APPEND_MATH(OR, desc, dest, src0, src1, len) +#define append_math_xor(desc, dest, src0, src1, len) \ + APPEND_MATH(XOR, desc, dest, src0, src1, len) +#define append_math_lshift(desc, dest, src0, src1, len) \ + APPEND_MATH(LSHIFT, desc, dest, src0, src1, len) +#define append_math_rshift(desc, dest, src0, src1, len) \ + APPEND_MATH(RSHIFT, desc, dest, src0, src1, len) +#define append_math_ldshift(desc, dest, src0, src1, len) \ + APPEND_MATH(SHLD, desc, dest, src0, src1, len) + +/* Exactly one source is IMM. Data is passed in as u32 value */ +#define APPEND_MATH_IMM_u32(op, desc, dest, src_0, src_1, data) \ +do { \ + APPEND_MATH(op, desc, dest, src_0, src_1, CAAM_CMD_SZ); \ + append_cmd(desc, data); \ +} while (0) + +#define append_math_add_imm_u32(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u32(ADD, desc, dest, src0, src1, data) +#define append_math_sub_imm_u32(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u32(SUB, desc, dest, src0, src1, data) +#define append_math_add_c_imm_u32(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u32(ADDC, desc, dest, src0, src1, data) +#define append_math_sub_b_imm_u32(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u32(SUBB, desc, dest, src0, src1, data) +#define append_math_and_imm_u32(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u32(AND, desc, dest, src0, src1, data) +#define append_math_or_imm_u32(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u32(OR, desc, dest, src0, src1, data) +#define append_math_xor_imm_u32(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u32(XOR, desc, dest, src0, src1, data) +#define append_math_lshift_imm_u32(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u32(LSHIFT, desc, dest, src0, src1, data) +#define append_math_rshift_imm_u32(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u32(RSHIFT, desc, dest, src0, src1, data) + +/* Exactly one source is IMM. Data is passed in as u64 value */ +#define APPEND_MATH_IMM_u64(op, desc, dest, src_0, src_1, data) \ +do { \ + u32 upper = (data >> 16) >> 16; \ + APPEND_MATH(op, desc, dest, src_0, src_1, CAAM_CMD_SZ * 2 | \ + (upper ? 0 : MATH_IFB)); \ + if (upper) \ + append_u64(desc, data); \ + else \ + append_u32(desc, data); \ +} while (0) + +#define append_math_add_imm_u64(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u64(ADD, desc, dest, src0, src1, data) +#define append_math_sub_imm_u64(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u64(SUB, desc, dest, src0, src1, data) +#define append_math_add_c_imm_u64(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u64(ADDC, desc, dest, src0, src1, data) +#define append_math_sub_b_imm_u64(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u64(SUBB, desc, dest, src0, src1, data) +#define append_math_and_imm_u64(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u64(AND, desc, dest, src0, src1, data) +#define append_math_or_imm_u64(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u64(OR, desc, dest, src0, src1, data) +#define append_math_xor_imm_u64(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u64(XOR, desc, dest, src0, src1, data) +#define append_math_lshift_imm_u64(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u64(LSHIFT, desc, dest, src0, src1, data) +#define append_math_rshift_imm_u64(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u64(RSHIFT, desc, dest, src0, src1, data) diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c new file mode 100644 index 000000000..33e41ea83 --- /dev/null +++ b/drivers/crypto/caam/error.c @@ -0,0 +1,253 @@ +/* + * CAAM Error Reporting + * + * Copyright 2009-2011 Freescale Semiconductor, Inc. + */ + +#include "compat.h" +#include "regs.h" +#include "intern.h" +#include "desc.h" +#include "jr.h" +#include "error.h" + +static const struct { + u8 value; + const char *error_text; +} desc_error_list[] = { + { 0x00, "No error." }, + { 0x01, "SGT Length Error. The descriptor is trying to read more data than is contained in the SGT table." }, + { 0x02, "SGT Null Entry Error." }, + { 0x03, "Job Ring Control Error. There is a bad value in the Job Ring Control register." }, + { 0x04, "Invalid Descriptor Command. The Descriptor Command field is invalid." }, + { 0x05, "Reserved." }, + { 0x06, "Invalid KEY Command" }, + { 0x07, "Invalid LOAD Command" }, + { 0x08, "Invalid STORE Command" }, + { 0x09, "Invalid OPERATION Command" }, + { 0x0A, "Invalid FIFO LOAD Command" }, + { 0x0B, "Invalid FIFO STORE Command" }, + { 0x0C, "Invalid MOVE/MOVE_LEN Command" }, + { 0x0D, "Invalid JUMP Command. A nonlocal JUMP Command is invalid because the target is not a Job Header Command, or the jump is from a Trusted Descriptor to a Job Descriptor, or because the target Descriptor contains a Shared Descriptor." }, + { 0x0E, "Invalid MATH Command" }, + { 0x0F, "Invalid SIGNATURE Command" }, + { 0x10, "Invalid Sequence Command. A SEQ IN PTR OR SEQ OUT PTR Command is invalid or a SEQ KEY, SEQ LOAD, SEQ FIFO LOAD, or SEQ FIFO STORE decremented the input or output sequence length below 0. This error may result if a built-in PROTOCOL Command has encountered a malformed PDU." }, + { 0x11, "Skip data type invalid. The type must be 0xE or 0xF."}, + { 0x12, "Shared Descriptor Header Error" }, + { 0x13, "Header Error. Invalid length or parity, or certain other problems." }, + { 0x14, "Burster Error. Burster has gotten to an illegal state" }, + { 0x15, "Context Register Length Error. The descriptor is trying to read or write past the end of the Context Register. A SEQ LOAD or SEQ STORE with the VLF bit set was executed with too large a length in the variable length register (VSOL for SEQ STORE or VSIL for SEQ LOAD)." }, + { 0x16, "DMA Error" }, + { 0x17, "Reserved." }, + { 0x1A, "Job failed due to JR reset" }, + { 0x1B, "Job failed due to Fail Mode" }, + { 0x1C, "DECO Watchdog timer timeout error" }, + { 0x1D, "DECO tried to copy a key from another DECO but the other DECO's Key Registers were locked" }, + { 0x1E, "DECO attempted to copy data from a DECO that had an unmasked Descriptor error" }, + { 0x1F, "LIODN error. DECO was trying to share from itself or from another DECO but the two Non-SEQ LIODN values didn't match or the 'shared from' DECO's Descriptor required that the SEQ LIODNs be the same and they aren't." }, + { 0x20, "DECO has completed a reset initiated via the DRR register" }, + { 0x21, "Nonce error. When using EKT (CCM) key encryption option in the FIFO STORE Command, the Nonce counter reached its maximum value and this encryption mode can no longer be used." }, + { 0x22, "Meta data is too large (> 511 bytes) for TLS decap (input frame; block ciphers) and IPsec decap (output frame, when doing the next header byte update) and DCRC (output frame)." }, + { 0x23, "Read Input Frame error" }, + { 0x24, "JDKEK, TDKEK or TDSK not loaded error" }, + { 0x80, "DNR (do not run) error" }, + { 0x81, "undefined protocol command" }, + { 0x82, "invalid setting in PDB" }, + { 0x83, "Anti-replay LATE error" }, + { 0x84, "Anti-replay REPLAY error" }, + { 0x85, "Sequence number overflow" }, + { 0x86, "Sigver invalid signature" }, + { 0x87, "DSA Sign Illegal test descriptor" }, + { 0x88, "Protocol Format Error - A protocol has seen an error in the format of data received. When running RSA, this means that formatting with random padding was used, and did not follow the form: 0x00, 0x02, 8-to-N bytes of non-zero pad, 0x00, F data." }, + { 0x89, "Protocol Size Error - A protocol has seen an error in size. When running RSA, pdb size N < (size of F) when no formatting is used; or pdb size N < (F + 11) when formatting is used." }, + { 0xC1, "Blob Command error: Undefined mode" }, + { 0xC2, "Blob Command error: Secure Memory Blob mode error" }, + { 0xC4, "Blob Command error: Black Blob key or input size error" }, + { 0xC5, "Blob Command error: Invalid key destination" }, + { 0xC8, "Blob Command error: Trusted/Secure mode error" }, + { 0xF0, "IPsec TTL or hop limit field either came in as 0, or was decremented to 0" }, + { 0xF1, "3GPP HFN matches or exceeds the Threshold" }, +}; + +static const char * const cha_id_list[] = { + "", + "AES", + "DES", + "ARC4", + "MDHA", + "RNG", + "SNOW f8", + "Kasumi f8/9", + "PKHA", + "CRCA", + "SNOW f9", + "ZUCE", + "ZUCA", +}; + +static const char * const err_id_list[] = { + "No error.", + "Mode error.", + "Data size error.", + "Key size error.", + "PKHA A memory size error.", + "PKHA B memory size error.", + "Data arrived out of sequence error.", + "PKHA divide-by-zero error.", + "PKHA modulus even error.", + "DES key parity error.", + "ICV check failed.", + "Hardware error.", + "Unsupported CCM AAD size.", + "Class 1 CHA is not reset", + "Invalid CHA combination was selected", + "Invalid CHA selected.", +}; + +static const char * const rng_err_id_list[] = { + "", + "", + "", + "Instantiate", + "Not instantiated", + "Test instantiate", + "Prediction resistance", + "Prediction resistance and test request", + "Uninstantiate", + "Secure key generation", +}; + +static void report_ccb_status(struct device *jrdev, const u32 status, + const char *error) +{ + u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >> + JRSTA_CCBERR_CHAID_SHIFT; + u8 err_id = status & JRSTA_CCBERR_ERRID_MASK; + u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >> + JRSTA_DECOERR_INDEX_SHIFT; + char *idx_str; + const char *cha_str = "unidentified cha_id value 0x"; + char cha_err_code[3] = { 0 }; + const char *err_str = "unidentified err_id value 0x"; + char err_err_code[3] = { 0 }; + + if (status & JRSTA_DECOERR_JUMP) + idx_str = "jump tgt desc idx"; + else + idx_str = "desc idx"; + + if (cha_id < ARRAY_SIZE(cha_id_list)) + cha_str = cha_id_list[cha_id]; + else + snprintf(cha_err_code, sizeof(cha_err_code), "%02x", cha_id); + + if ((cha_id << JRSTA_CCBERR_CHAID_SHIFT) == JRSTA_CCBERR_CHAID_RNG && + err_id < ARRAY_SIZE(rng_err_id_list) && + strlen(rng_err_id_list[err_id])) { + /* RNG-only error */ + err_str = rng_err_id_list[err_id]; + } else if (err_id < ARRAY_SIZE(err_id_list)) + err_str = err_id_list[err_id]; + else + snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id); + + /* + * CCB ICV check failures are part of normal operation life; + * we leave the upper layers to do what they want with them. + */ + if (err_id != JRSTA_CCBERR_ERRID_ICVCHK) + dev_err(jrdev, "%08x: %s: %s %d: %s%s: %s%s\n", + status, error, idx_str, idx, + cha_str, cha_err_code, + err_str, err_err_code); +} + +static void report_jump_status(struct device *jrdev, const u32 status, + const char *error) +{ + dev_err(jrdev, "%08x: %s: %s() not implemented\n", + status, error, __func__); +} + +static void report_deco_status(struct device *jrdev, const u32 status, + const char *error) +{ + u8 err_id = status & JRSTA_DECOERR_ERROR_MASK; + u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >> + JRSTA_DECOERR_INDEX_SHIFT; + char *idx_str; + const char *err_str = "unidentified error value 0x"; + char err_err_code[3] = { 0 }; + int i; + + if (status & JRSTA_DECOERR_JUMP) + idx_str = "jump tgt desc idx"; + else + idx_str = "desc idx"; + + for (i = 0; i < ARRAY_SIZE(desc_error_list); i++) + if (desc_error_list[i].value == err_id) + break; + + if (i != ARRAY_SIZE(desc_error_list) && desc_error_list[i].error_text) + err_str = desc_error_list[i].error_text; + else + snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id); + + dev_err(jrdev, "%08x: %s: %s %d: %s%s\n", + status, error, idx_str, idx, err_str, err_err_code); +} + +static void report_jr_status(struct device *jrdev, const u32 status, + const char *error) +{ + dev_err(jrdev, "%08x: %s: %s() not implemented\n", + status, error, __func__); +} + +static void report_cond_code_status(struct device *jrdev, const u32 status, + const char *error) +{ + dev_err(jrdev, "%08x: %s: %s() not implemented\n", + status, error, __func__); +} + +void caam_jr_strstatus(struct device *jrdev, u32 status) +{ + static const struct stat_src { + void (*report_ssed)(struct device *jrdev, const u32 status, + const char *error); + const char *error; + } status_src[16] = { + { NULL, "No error" }, + { NULL, NULL }, + { report_ccb_status, "CCB" }, + { report_jump_status, "Jump" }, + { report_deco_status, "DECO" }, + { NULL, "Queue Manager Interface" }, + { report_jr_status, "Job Ring" }, + { report_cond_code_status, "Condition Code" }, + { NULL, NULL }, + { NULL, NULL }, + { NULL, NULL }, + { NULL, NULL }, + { NULL, NULL }, + { NULL, NULL }, + { NULL, NULL }, + { NULL, NULL }, + }; + u32 ssrc = status >> JRSTA_SSRC_SHIFT; + const char *error = status_src[ssrc].error; + + /* + * If there is an error handling function, call it to report the error. + * Otherwise print the error source name. + */ + if (status_src[ssrc].report_ssed) + status_src[ssrc].report_ssed(jrdev, status, error); + else if (error) + dev_err(jrdev, "%d: %s\n", ssrc, error); + else + dev_err(jrdev, "%d: unknown error source\n", ssrc); +} +EXPORT_SYMBOL(caam_jr_strstatus); diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h new file mode 100644 index 000000000..b6350b0d9 --- /dev/null +++ b/drivers/crypto/caam/error.h @@ -0,0 +1,11 @@ +/* + * CAAM Error Reporting code header + * + * Copyright 2009-2011 Freescale Semiconductor, Inc. + */ + +#ifndef CAAM_ERROR_H +#define CAAM_ERROR_H +#define CAAM_ERROR_STR_MAX 302 +void caam_jr_strstatus(struct device *jrdev, u32 status); +#endif /* CAAM_ERROR_H */ diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h new file mode 100644 index 000000000..89b94cc9e --- /dev/null +++ b/drivers/crypto/caam/intern.h @@ -0,0 +1,113 @@ +/* + * CAAM/SEC 4.x driver backend + * Private/internal definitions between modules + * + * Copyright 2008-2011 Freescale Semiconductor, Inc. + * + */ + +#ifndef INTERN_H +#define INTERN_H + +/* Currently comes from Kconfig param as a ^2 (driver-required) */ +#define JOBR_DEPTH (1 << CONFIG_CRYPTO_DEV_FSL_CAAM_RINGSIZE) + +/* Kconfig params for interrupt coalescing if selected (else zero) */ +#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_INTC +#define JOBR_INTC JRCFG_ICEN +#define JOBR_INTC_TIME_THLD CONFIG_CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD +#define JOBR_INTC_COUNT_THLD CONFIG_CRYPTO_DEV_FSL_CAAM_INTC_COUNT_THLD +#else +#define JOBR_INTC 0 +#define JOBR_INTC_TIME_THLD 0 +#define JOBR_INTC_COUNT_THLD 0 +#endif + +/* + * Storage for tracking each in-process entry moving across a ring + * Each entry on an output ring needs one of these + */ +struct caam_jrentry_info { + void (*callbk)(struct device *dev, u32 *desc, u32 status, void *arg); + void *cbkarg; /* Argument per ring entry */ + u32 *desc_addr_virt; /* Stored virt addr for postprocessing */ + dma_addr_t desc_addr_dma; /* Stored bus addr for done matching */ + u32 desc_size; /* Stored size for postprocessing, header derived */ +}; + +/* Private sub-storage for a single JobR */ +struct caam_drv_private_jr { + struct list_head list_node; /* Job Ring device list */ + struct device *dev; + int ridx; + struct caam_job_ring __iomem *rregs; /* JobR's register space */ + struct tasklet_struct irqtask; + int irq; /* One per queue */ + + /* Number of scatterlist crypt transforms active on the JobR */ + atomic_t tfm_count ____cacheline_aligned; + + /* Job ring info */ + int ringsize; /* Size of rings (assume input = output) */ + struct caam_jrentry_info *entinfo; /* Alloc'ed 1 per ring entry */ + spinlock_t inplock ____cacheline_aligned; /* Input ring index lock */ + int inp_ring_write_index; /* Input index "tail" */ + int head; /* entinfo (s/w ring) head index */ + dma_addr_t *inpring; /* Base of input ring, alloc DMA-safe */ + spinlock_t outlock ____cacheline_aligned; /* Output ring index lock */ + int out_ring_read_index; /* Output index "tail" */ + int tail; /* entinfo (s/w ring) tail index */ + struct jr_outentry *outring; /* Base of output ring, DMA-safe */ +}; + +/* + * Driver-private storage for a single CAAM block instance + */ +struct caam_drv_private { + + struct device *dev; + struct platform_device **jrpdev; /* Alloc'ed array per sub-device */ + struct platform_device *pdev; + + /* Physical-presence section */ + struct caam_ctrl __iomem *ctrl; /* controller region */ + struct caam_deco __iomem *deco; /* DECO/CCB views */ + struct caam_assurance __iomem *assure; + struct caam_queue_if __iomem *qi; /* QI control region */ + struct caam_job_ring __iomem *jr[4]; /* JobR's register space */ + + /* + * Detected geometry block. Filled in from device tree if powerpc, + * or from register-based version detection code + */ + u8 total_jobrs; /* Total Job Rings in device */ + u8 qi_present; /* Nonzero if QI present in device */ + int secvio_irq; /* Security violation interrupt number */ + int virt_en; /* Virtualization enabled in CAAM */ + +#define RNG4_MAX_HANDLES 2 + /* RNG4 block */ + u32 rng4_sh_init; /* This bitmap shows which of the State + Handles of the RNG4 block are initialized + by this driver */ + + /* + * debugfs entries for developer view into driver/device + * variables at runtime. + */ +#ifdef CONFIG_DEBUG_FS + struct dentry *dfs_root; + struct dentry *ctl; /* controller dir */ + struct dentry *ctl_rq_dequeued, *ctl_ob_enc_req, *ctl_ib_dec_req; + struct dentry *ctl_ob_enc_bytes, *ctl_ob_prot_bytes; + struct dentry *ctl_ib_dec_bytes, *ctl_ib_valid_bytes; + struct dentry *ctl_faultaddr, *ctl_faultdetail, *ctl_faultstatus; + + struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap; + struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk; +#endif +}; + +void caam_jr_algapi_init(struct device *dev); +void caam_jr_algapi_remove(struct device *dev); +#endif /* INTERN_H */ diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c new file mode 100644 index 000000000..b8b5d47ac --- /dev/null +++ b/drivers/crypto/caam/jr.c @@ -0,0 +1,550 @@ +/* + * CAAM/SEC 4.x transport/backend driver + * JobR backend functionality + * + * Copyright 2008-2012 Freescale Semiconductor, Inc. + */ + +#include <linux/of_irq.h> +#include <linux/of_address.h> + +#include "compat.h" +#include "regs.h" +#include "jr.h" +#include "desc.h" +#include "intern.h" + +struct jr_driver_data { + /* List of Physical JobR's with the Driver */ + struct list_head jr_list; + spinlock_t jr_alloc_lock; /* jr_list lock */ +} ____cacheline_aligned; + +static struct jr_driver_data driver_data; + +static int caam_reset_hw_jr(struct device *dev) +{ + struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); + unsigned int timeout = 100000; + + /* + * mask interrupts since we are going to poll + * for reset completion status + */ + setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); + + /* initiate flush (required prior to reset) */ + wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET); + while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) == + JRINT_ERR_HALT_INPROGRESS) && --timeout) + cpu_relax(); + + if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) != + JRINT_ERR_HALT_COMPLETE || timeout == 0) { + dev_err(dev, "failed to flush job ring %d\n", jrp->ridx); + return -EIO; + } + + /* initiate reset */ + timeout = 100000; + wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET); + while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout) + cpu_relax(); + + if (timeout == 0) { + dev_err(dev, "failed to reset job ring %d\n", jrp->ridx); + return -EIO; + } + + /* unmask interrupts */ + clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); + + return 0; +} + +/* + * Shutdown JobR independent of platform property code + */ +int caam_jr_shutdown(struct device *dev) +{ + struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); + dma_addr_t inpbusaddr, outbusaddr; + int ret; + + ret = caam_reset_hw_jr(dev); + + tasklet_kill(&jrp->irqtask); + + /* Release interrupt */ + free_irq(jrp->irq, dev); + + /* Free rings */ + inpbusaddr = rd_reg64(&jrp->rregs->inpring_base); + outbusaddr = rd_reg64(&jrp->rregs->outring_base); + dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH, + jrp->inpring, inpbusaddr); + dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH, + jrp->outring, outbusaddr); + kfree(jrp->entinfo); + + return ret; +} + +static int caam_jr_remove(struct platform_device *pdev) +{ + int ret; + struct device *jrdev; + struct caam_drv_private_jr *jrpriv; + + jrdev = &pdev->dev; + jrpriv = dev_get_drvdata(jrdev); + + /* + * Return EBUSY if job ring already allocated. + */ + if (atomic_read(&jrpriv->tfm_count)) { + dev_err(jrdev, "Device is busy\n"); + return -EBUSY; + } + + /* Remove the node from Physical JobR list maintained by driver */ + spin_lock(&driver_data.jr_alloc_lock); + list_del(&jrpriv->list_node); + spin_unlock(&driver_data.jr_alloc_lock); + + /* Release ring */ + ret = caam_jr_shutdown(jrdev); + if (ret) + dev_err(jrdev, "Failed to shut down job ring\n"); + irq_dispose_mapping(jrpriv->irq); + + return ret; +} + +/* Main per-ring interrupt handler */ +static irqreturn_t caam_jr_interrupt(int irq, void *st_dev) +{ + struct device *dev = st_dev; + struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); + u32 irqstate; + + /* + * Check the output ring for ready responses, kick + * tasklet if jobs done. + */ + irqstate = rd_reg32(&jrp->rregs->jrintstatus); + if (!irqstate) + return IRQ_NONE; + + /* + * If JobR error, we got more development work to do + * Flag a bug now, but we really need to shut down and + * restart the queue (and fix code). + */ + if (irqstate & JRINT_JR_ERROR) { + dev_err(dev, "job ring error: irqstate: %08x\n", irqstate); + BUG(); + } + + /* mask valid interrupts */ + setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); + + /* Have valid interrupt at this point, just ACK and trigger */ + wr_reg32(&jrp->rregs->jrintstatus, irqstate); + + preempt_disable(); + tasklet_schedule(&jrp->irqtask); + preempt_enable(); + + return IRQ_HANDLED; +} + +/* Deferred service handler, run as interrupt-fired tasklet */ +static void caam_jr_dequeue(unsigned long devarg) +{ + int hw_idx, sw_idx, i, head, tail; + struct device *dev = (struct device *)devarg; + struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); + void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg); + u32 *userdesc, userstatus; + void *userarg; + + while (rd_reg32(&jrp->rregs->outring_used)) { + + head = ACCESS_ONCE(jrp->head); + + spin_lock(&jrp->outlock); + + sw_idx = tail = jrp->tail; + hw_idx = jrp->out_ring_read_index; + + for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) { + sw_idx = (tail + i) & (JOBR_DEPTH - 1); + + if (jrp->outring[hw_idx].desc == + jrp->entinfo[sw_idx].desc_addr_dma) + break; /* found */ + } + /* we should never fail to find a matching descriptor */ + BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0); + + /* Unmap just-run descriptor so we can post-process */ + dma_unmap_single(dev, jrp->outring[hw_idx].desc, + jrp->entinfo[sw_idx].desc_size, + DMA_TO_DEVICE); + + /* mark completed, avoid matching on a recycled desc addr */ + jrp->entinfo[sw_idx].desc_addr_dma = 0; + + /* Stash callback params for use outside of lock */ + usercall = jrp->entinfo[sw_idx].callbk; + userarg = jrp->entinfo[sw_idx].cbkarg; + userdesc = jrp->entinfo[sw_idx].desc_addr_virt; + userstatus = jrp->outring[hw_idx].jrstatus; + + /* set done */ + wr_reg32(&jrp->rregs->outring_rmvd, 1); + + jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) & + (JOBR_DEPTH - 1); + + /* + * if this job completed out-of-order, do not increment + * the tail. Otherwise, increment tail by 1 plus the + * number of subsequent jobs already completed out-of-order + */ + if (sw_idx == tail) { + do { + tail = (tail + 1) & (JOBR_DEPTH - 1); + } while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 && + jrp->entinfo[tail].desc_addr_dma == 0); + + jrp->tail = tail; + } + + spin_unlock(&jrp->outlock); + + /* Finally, execute user's callback */ + usercall(dev, userdesc, userstatus, userarg); + } + + /* reenable / unmask IRQs */ + clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); +} + +/** + * caam_jr_alloc() - Alloc a job ring for someone to use as needed. + * + * returns : pointer to the newly allocated physical + * JobR dev can be written to if successful. + **/ +struct device *caam_jr_alloc(void) +{ + struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL; + struct device *dev = NULL; + int min_tfm_cnt = INT_MAX; + int tfm_cnt; + + spin_lock(&driver_data.jr_alloc_lock); + + if (list_empty(&driver_data.jr_list)) { + spin_unlock(&driver_data.jr_alloc_lock); + return ERR_PTR(-ENODEV); + } + + list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) { + tfm_cnt = atomic_read(&jrpriv->tfm_count); + if (tfm_cnt < min_tfm_cnt) { + min_tfm_cnt = tfm_cnt; + min_jrpriv = jrpriv; + } + if (!min_tfm_cnt) + break; + } + + if (min_jrpriv) { + atomic_inc(&min_jrpriv->tfm_count); + dev = min_jrpriv->dev; + } + spin_unlock(&driver_data.jr_alloc_lock); + + return dev; +} +EXPORT_SYMBOL(caam_jr_alloc); + +/** + * caam_jr_free() - Free the Job Ring + * @rdev - points to the dev that identifies the Job ring to + * be released. + **/ +void caam_jr_free(struct device *rdev) +{ + struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev); + + atomic_dec(&jrpriv->tfm_count); +} +EXPORT_SYMBOL(caam_jr_free); + +/** + * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK, + * -EBUSY if the queue is full, -EIO if it cannot map the caller's + * descriptor. + * @dev: device of the job ring to be used. This device should have + * been assigned prior by caam_jr_register(). + * @desc: points to a job descriptor that execute our request. All + * descriptors (and all referenced data) must be in a DMAable + * region, and all data references must be physical addresses + * accessible to CAAM (i.e. within a PAMU window granted + * to it). + * @cbk: pointer to a callback function to be invoked upon completion + * of this request. This has the form: + * callback(struct device *dev, u32 *desc, u32 stat, void *arg) + * where: + * @dev: contains the job ring device that processed this + * response. + * @desc: descriptor that initiated the request, same as + * "desc" being argued to caam_jr_enqueue(). + * @status: untranslated status received from CAAM. See the + * reference manual for a detailed description of + * error meaning, or see the JRSTA definitions in the + * register header file + * @areq: optional pointer to an argument passed with the + * original request + * @areq: optional pointer to a user argument for use at callback + * time. + **/ +int caam_jr_enqueue(struct device *dev, u32 *desc, + void (*cbk)(struct device *dev, u32 *desc, + u32 status, void *areq), + void *areq) +{ + struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); + struct caam_jrentry_info *head_entry; + int head, tail, desc_size; + dma_addr_t desc_dma; + + desc_size = (*desc & HDR_JD_LENGTH_MASK) * sizeof(u32); + desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE); + if (dma_mapping_error(dev, desc_dma)) { + dev_err(dev, "caam_jr_enqueue(): can't map jobdesc\n"); + return -EIO; + } + + spin_lock_bh(&jrp->inplock); + + head = jrp->head; + tail = ACCESS_ONCE(jrp->tail); + + if (!rd_reg32(&jrp->rregs->inpring_avail) || + CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) { + spin_unlock_bh(&jrp->inplock); + dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE); + return -EBUSY; + } + + head_entry = &jrp->entinfo[head]; + head_entry->desc_addr_virt = desc; + head_entry->desc_size = desc_size; + head_entry->callbk = (void *)cbk; + head_entry->cbkarg = areq; + head_entry->desc_addr_dma = desc_dma; + + jrp->inpring[jrp->inp_ring_write_index] = desc_dma; + + smp_wmb(); + + jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) & + (JOBR_DEPTH - 1); + jrp->head = (head + 1) & (JOBR_DEPTH - 1); + + wr_reg32(&jrp->rregs->inpring_jobadd, 1); + + spin_unlock_bh(&jrp->inplock); + + return 0; +} +EXPORT_SYMBOL(caam_jr_enqueue); + +/* + * Init JobR independent of platform property detection + */ +static int caam_jr_init(struct device *dev) +{ + struct caam_drv_private_jr *jrp; + dma_addr_t inpbusaddr, outbusaddr; + int i, error; + + jrp = dev_get_drvdata(dev); + + tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev); + + /* Connect job ring interrupt handler. */ + error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED, + dev_name(dev), dev); + if (error) { + dev_err(dev, "can't connect JobR %d interrupt (%d)\n", + jrp->ridx, jrp->irq); + goto out_kill_deq; + } + + error = caam_reset_hw_jr(dev); + if (error) + goto out_free_irq; + + error = -ENOMEM; + jrp->inpring = dma_alloc_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH, + &inpbusaddr, GFP_KERNEL); + if (!jrp->inpring) + goto out_free_irq; + + jrp->outring = dma_alloc_coherent(dev, sizeof(struct jr_outentry) * + JOBR_DEPTH, &outbusaddr, GFP_KERNEL); + if (!jrp->outring) + goto out_free_inpring; + + jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH, + GFP_KERNEL); + if (!jrp->entinfo) + goto out_free_outring; + + for (i = 0; i < JOBR_DEPTH; i++) + jrp->entinfo[i].desc_addr_dma = !0; + + /* Setup rings */ + jrp->inp_ring_write_index = 0; + jrp->out_ring_read_index = 0; + jrp->head = 0; + jrp->tail = 0; + + wr_reg64(&jrp->rregs->inpring_base, inpbusaddr); + wr_reg64(&jrp->rregs->outring_base, outbusaddr); + wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH); + wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH); + + jrp->ringsize = JOBR_DEPTH; + + spin_lock_init(&jrp->inplock); + spin_lock_init(&jrp->outlock); + + /* Select interrupt coalescing parameters */ + setbits32(&jrp->rregs->rconfig_lo, JOBR_INTC | + (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) | + (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT)); + + return 0; + +out_free_outring: + dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH, + jrp->outring, outbusaddr); +out_free_inpring: + dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH, + jrp->inpring, inpbusaddr); + dev_err(dev, "can't allocate job rings for %d\n", jrp->ridx); +out_free_irq: + free_irq(jrp->irq, dev); +out_kill_deq: + tasklet_kill(&jrp->irqtask); + return error; +} + + +/* + * Probe routine for each detected JobR subsystem. + */ +static int caam_jr_probe(struct platform_device *pdev) +{ + struct device *jrdev; + struct device_node *nprop; + struct caam_job_ring __iomem *ctrl; + struct caam_drv_private_jr *jrpriv; + static int total_jobrs; + int error; + + jrdev = &pdev->dev; + jrpriv = devm_kmalloc(jrdev, sizeof(struct caam_drv_private_jr), + GFP_KERNEL); + if (!jrpriv) + return -ENOMEM; + + dev_set_drvdata(jrdev, jrpriv); + + /* save ring identity relative to detection */ + jrpriv->ridx = total_jobrs++; + + nprop = pdev->dev.of_node; + /* Get configuration properties from device tree */ + /* First, get register page */ + ctrl = of_iomap(nprop, 0); + if (!ctrl) { + dev_err(jrdev, "of_iomap() failed\n"); + return -ENOMEM; + } + + jrpriv->rregs = (struct caam_job_ring __force *)ctrl; + + if (sizeof(dma_addr_t) == sizeof(u64)) + if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring")) + dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(40)); + else + dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(36)); + else + dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32)); + + /* Identify the interrupt */ + jrpriv->irq = irq_of_parse_and_map(nprop, 0); + + /* Now do the platform independent part */ + error = caam_jr_init(jrdev); /* now turn on hardware */ + if (error) { + irq_dispose_mapping(jrpriv->irq); + return error; + } + + jrpriv->dev = jrdev; + spin_lock(&driver_data.jr_alloc_lock); + list_add_tail(&jrpriv->list_node, &driver_data.jr_list); + spin_unlock(&driver_data.jr_alloc_lock); + + atomic_set(&jrpriv->tfm_count, 0); + + return 0; +} + +static struct of_device_id caam_jr_match[] = { + { + .compatible = "fsl,sec-v4.0-job-ring", + }, + { + .compatible = "fsl,sec4.0-job-ring", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, caam_jr_match); + +static struct platform_driver caam_jr_driver = { + .driver = { + .name = "caam_jr", + .of_match_table = caam_jr_match, + }, + .probe = caam_jr_probe, + .remove = caam_jr_remove, +}; + +static int __init jr_driver_init(void) +{ + spin_lock_init(&driver_data.jr_alloc_lock); + INIT_LIST_HEAD(&driver_data.jr_list); + return platform_driver_register(&caam_jr_driver); +} + +static void __exit jr_driver_exit(void) +{ + platform_driver_unregister(&caam_jr_driver); +} + +module_init(jr_driver_init); +module_exit(jr_driver_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("FSL CAAM JR request backend"); +MODULE_AUTHOR("Freescale Semiconductor - NMG/STC"); diff --git a/drivers/crypto/caam/jr.h b/drivers/crypto/caam/jr.h new file mode 100644 index 000000000..97113a6d6 --- /dev/null +++ b/drivers/crypto/caam/jr.h @@ -0,0 +1,18 @@ +/* + * CAAM public-level include definitions for the JobR backend + * + * Copyright 2008-2011 Freescale Semiconductor, Inc. + */ + +#ifndef JR_H +#define JR_H + +/* Prototypes for backend-level services exposed to APIs */ +struct device *caam_jr_alloc(void); +void caam_jr_free(struct device *rdev); +int caam_jr_enqueue(struct device *dev, u32 *desc, + void (*cbk)(struct device *dev, u32 *desc, u32 status, + void *areq), + void *areq); + +#endif /* JR_H */ diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c new file mode 100644 index 000000000..e1eaf4ff9 --- /dev/null +++ b/drivers/crypto/caam/key_gen.c @@ -0,0 +1,123 @@ +/* + * CAAM/SEC 4.x functions for handling key-generation jobs + * + * Copyright 2008-2011 Freescale Semiconductor, Inc. + * + */ +#include "compat.h" +#include "jr.h" +#include "error.h" +#include "desc_constr.h" +#include "key_gen.h" + +void split_key_done(struct device *dev, u32 *desc, u32 err, + void *context) +{ + struct split_key_result *res = context; + +#ifdef DEBUG + dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); +#endif + + if (err) + caam_jr_strstatus(dev, err); + + res->err = err; + + complete(&res->completion); +} +EXPORT_SYMBOL(split_key_done); +/* +get a split ipad/opad key + +Split key generation----------------------------------------------- + +[00] 0xb0810008 jobdesc: stidx=1 share=never len=8 +[01] 0x04000014 key: class2->keyreg len=20 + @0xffe01000 +[03] 0x84410014 operation: cls2-op sha1 hmac init dec +[04] 0x24940000 fifold: class2 msgdata-last2 len=0 imm +[05] 0xa4000001 jump: class2 local all ->1 [06] +[06] 0x64260028 fifostr: class2 mdsplit-jdk len=40 + @0xffe04000 +*/ +int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, + int split_key_pad_len, const u8 *key_in, u32 keylen, + u32 alg_op) +{ + u32 *desc; + struct split_key_result result; + dma_addr_t dma_addr_in, dma_addr_out; + int ret = -ENOMEM; + + desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); + if (!desc) { + dev_err(jrdev, "unable to allocate key input memory\n"); + return ret; + } + + dma_addr_in = dma_map_single(jrdev, (void *)key_in, keylen, + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, dma_addr_in)) { + dev_err(jrdev, "unable to map key input memory\n"); + goto out_free; + } + + dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(jrdev, dma_addr_out)) { + dev_err(jrdev, "unable to map key output memory\n"); + goto out_unmap_in; + } + + init_job_desc(desc, 0); + append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG); + + /* Sets MDHA up into an HMAC-INIT */ + append_operation(desc, alg_op | OP_ALG_DECRYPT | OP_ALG_AS_INIT); + + /* + * do a FIFO_LOAD of zero, this will trigger the internal key expansion + * into both pads inside MDHA + */ + append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB | + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2); + + /* + * FIFO_STORE with the explicit split-key content store + * (0x26 output type) + */ + append_fifo_store(desc, dma_addr_out, split_key_len, + LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK); + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1); + print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); +#endif + + result.err = 0; + init_completion(&result.completion); + + ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); + if (!ret) { + /* in progress */ + wait_for_completion_interruptible(&result.completion); + ret = result.err; +#ifdef DEBUG + print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key_out, + split_key_pad_len, 1); +#endif + } + + dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len, + DMA_FROM_DEVICE); +out_unmap_in: + dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE); +out_free: + kfree(desc); + return ret; +} +EXPORT_SYMBOL(gen_split_key); diff --git a/drivers/crypto/caam/key_gen.h b/drivers/crypto/caam/key_gen.h new file mode 100644 index 000000000..c5588f6d8 --- /dev/null +++ b/drivers/crypto/caam/key_gen.h @@ -0,0 +1,17 @@ +/* + * CAAM/SEC 4.x definitions for handling key-generation jobs + * + * Copyright 2008-2011 Freescale Semiconductor, Inc. + * + */ + +struct split_key_result { + struct completion completion; + int err; +}; + +void split_key_done(struct device *dev, u32 *desc, u32 err, void *context); + +int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, + int split_key_pad_len, const u8 *key_in, u32 keylen, + u32 alg_op); diff --git a/drivers/crypto/caam/pdb.h b/drivers/crypto/caam/pdb.h new file mode 100644 index 000000000..3a87c0cf8 --- /dev/null +++ b/drivers/crypto/caam/pdb.h @@ -0,0 +1,402 @@ +/* + * CAAM Protocol Data Block (PDB) definition header file + * + * Copyright 2008-2012 Freescale Semiconductor, Inc. + * + */ + +#ifndef CAAM_PDB_H +#define CAAM_PDB_H + +/* + * PDB- IPSec ESP Header Modification Options + */ +#define PDBHMO_ESP_DECAP_SHIFT 12 +#define PDBHMO_ESP_ENCAP_SHIFT 4 +/* + * Encap and Decap - Decrement TTL (Hop Limit) - Based on the value of the + * Options Byte IP version (IPvsn) field: + * if IPv4, decrement the inner IP header TTL field (byte 8); + * if IPv6 decrement the inner IP header Hop Limit field (byte 7). +*/ +#define PDBHMO_ESP_DECAP_DEC_TTL (0x02 << PDBHMO_ESP_DECAP_SHIFT) +#define PDBHMO_ESP_ENCAP_DEC_TTL (0x02 << PDBHMO_ESP_ENCAP_SHIFT) +/* + * Decap - DiffServ Copy - Copy the IPv4 TOS or IPv6 Traffic Class byte + * from the outer IP header to the inner IP header. + */ +#define PDBHMO_ESP_DIFFSERV (0x01 << PDBHMO_ESP_DECAP_SHIFT) +/* + * Encap- Copy DF bit -if an IPv4 tunnel mode outer IP header is coming from + * the PDB, copy the DF bit from the inner IP header to the outer IP header. + */ +#define PDBHMO_ESP_DFBIT (0x04 << PDBHMO_ESP_ENCAP_SHIFT) + +/* + * PDB - IPSec ESP Encap/Decap Options + */ +#define PDBOPTS_ESP_ARSNONE 0x00 /* no antireplay window */ +#define PDBOPTS_ESP_ARS32 0x40 /* 32-entry antireplay window */ +#define PDBOPTS_ESP_ARS64 0xc0 /* 64-entry antireplay window */ +#define PDBOPTS_ESP_IVSRC 0x20 /* IV comes from internal random gen */ +#define PDBOPTS_ESP_ESN 0x10 /* extended sequence included */ +#define PDBOPTS_ESP_OUTFMT 0x08 /* output only decapsulation (decap) */ +#define PDBOPTS_ESP_IPHDRSRC 0x08 /* IP header comes from PDB (encap) */ +#define PDBOPTS_ESP_INCIPHDR 0x04 /* Prepend IP header to output frame */ +#define PDBOPTS_ESP_IPVSN 0x02 /* process IPv6 header */ +#define PDBOPTS_ESP_AOFL 0x04 /* adjust out frame len (decap, SEC>=5.3)*/ +#define PDBOPTS_ESP_TUNNEL 0x01 /* tunnel mode next-header byte */ +#define PDBOPTS_ESP_IPV6 0x02 /* ip header version is V6 */ +#define PDBOPTS_ESP_DIFFSERV 0x40 /* copy TOS/TC from inner iphdr */ +#define PDBOPTS_ESP_UPDATE_CSUM 0x80 /* encap-update ip header checksum */ +#define PDBOPTS_ESP_VERIFY_CSUM 0x20 /* decap-validate ip header checksum */ + +/* + * General IPSec encap/decap PDB definitions + */ +struct ipsec_encap_cbc { + u32 iv[4]; +}; + +struct ipsec_encap_ctr { + u32 ctr_nonce; + u32 ctr_initial; + u32 iv[2]; +}; + +struct ipsec_encap_ccm { + u32 salt; /* lower 24 bits */ + u8 b0_flags; + u8 ctr_flags; + u16 ctr_initial; + u32 iv[2]; +}; + +struct ipsec_encap_gcm { + u32 salt; /* lower 24 bits */ + u32 rsvd1; + u32 iv[2]; +}; + +struct ipsec_encap_pdb { + u8 hmo_rsvd; + u8 ip_nh; + u8 ip_nh_offset; + u8 options; + u32 seq_num_ext_hi; + u32 seq_num; + union { + struct ipsec_encap_cbc cbc; + struct ipsec_encap_ctr ctr; + struct ipsec_encap_ccm ccm; + struct ipsec_encap_gcm gcm; + }; + u32 spi; + u16 rsvd1; + u16 ip_hdr_len; + u32 ip_hdr[0]; /* optional IP Header content */ +}; + +struct ipsec_decap_cbc { + u32 rsvd[2]; +}; + +struct ipsec_decap_ctr { + u32 salt; + u32 ctr_initial; +}; + +struct ipsec_decap_ccm { + u32 salt; + u8 iv_flags; + u8 ctr_flags; + u16 ctr_initial; +}; + +struct ipsec_decap_gcm { + u32 salt; + u32 resvd; +}; + +struct ipsec_decap_pdb { + u16 hmo_ip_hdr_len; + u8 ip_nh_offset; + u8 options; + union { + struct ipsec_decap_cbc cbc; + struct ipsec_decap_ctr ctr; + struct ipsec_decap_ccm ccm; + struct ipsec_decap_gcm gcm; + }; + u32 seq_num_ext_hi; + u32 seq_num; + u32 anti_replay[2]; + u32 end_index[0]; +}; + +/* + * IPSec ESP Datapath Protocol Override Register (DPOVRD) + */ +struct ipsec_deco_dpovrd { +#define IPSEC_ENCAP_DECO_DPOVRD_USE 0x80 + u8 ovrd_ecn; + u8 ip_hdr_len; + u8 nh_offset; + u8 next_header; /* reserved if decap */ +}; + +/* + * IEEE 802.11i WiFi Protocol Data Block + */ +#define WIFI_PDBOPTS_FCS 0x01 +#define WIFI_PDBOPTS_AR 0x40 + +struct wifi_encap_pdb { + u16 mac_hdr_len; + u8 rsvd; + u8 options; + u8 iv_flags; + u8 pri; + u16 pn1; + u32 pn2; + u16 frm_ctrl_mask; + u16 seq_ctrl_mask; + u8 rsvd1[2]; + u8 cnst; + u8 key_id; + u8 ctr_flags; + u8 rsvd2; + u16 ctr_init; +}; + +struct wifi_decap_pdb { + u16 mac_hdr_len; + u8 rsvd; + u8 options; + u8 iv_flags; + u8 pri; + u16 pn1; + u32 pn2; + u16 frm_ctrl_mask; + u16 seq_ctrl_mask; + u8 rsvd1[4]; + u8 ctr_flags; + u8 rsvd2; + u16 ctr_init; +}; + +/* + * IEEE 802.16 WiMAX Protocol Data Block + */ +#define WIMAX_PDBOPTS_FCS 0x01 +#define WIMAX_PDBOPTS_AR 0x40 /* decap only */ + +struct wimax_encap_pdb { + u8 rsvd[3]; + u8 options; + u32 nonce; + u8 b0_flags; + u8 ctr_flags; + u16 ctr_init; + /* begin DECO writeback region */ + u32 pn; + /* end DECO writeback region */ +}; + +struct wimax_decap_pdb { + u8 rsvd[3]; + u8 options; + u32 nonce; + u8 iv_flags; + u8 ctr_flags; + u16 ctr_init; + /* begin DECO writeback region */ + u32 pn; + u8 rsvd1[2]; + u16 antireplay_len; + u64 antireplay_scorecard; + /* end DECO writeback region */ +}; + +/* + * IEEE 801.AE MacSEC Protocol Data Block + */ +#define MACSEC_PDBOPTS_FCS 0x01 +#define MACSEC_PDBOPTS_AR 0x40 /* used in decap only */ + +struct macsec_encap_pdb { + u16 aad_len; + u8 rsvd; + u8 options; + u64 sci; + u16 ethertype; + u8 tci_an; + u8 rsvd1; + /* begin DECO writeback region */ + u32 pn; + /* end DECO writeback region */ +}; + +struct macsec_decap_pdb { + u16 aad_len; + u8 rsvd; + u8 options; + u64 sci; + u8 rsvd1[3]; + /* begin DECO writeback region */ + u8 antireplay_len; + u32 pn; + u64 antireplay_scorecard; + /* end DECO writeback region */ +}; + +/* + * SSL/TLS/DTLS Protocol Data Blocks + */ + +#define TLS_PDBOPTS_ARS32 0x40 +#define TLS_PDBOPTS_ARS64 0xc0 +#define TLS_PDBOPTS_OUTFMT 0x08 +#define TLS_PDBOPTS_IV_WRTBK 0x02 /* 1.1/1.2/DTLS only */ +#define TLS_PDBOPTS_EXP_RND_IV 0x01 /* 1.1/1.2/DTLS only */ + +struct tls_block_encap_pdb { + u8 type; + u8 version[2]; + u8 options; + u64 seq_num; + u32 iv[4]; +}; + +struct tls_stream_encap_pdb { + u8 type; + u8 version[2]; + u8 options; + u64 seq_num; + u8 i; + u8 j; + u8 rsvd1[2]; +}; + +struct dtls_block_encap_pdb { + u8 type; + u8 version[2]; + u8 options; + u16 epoch; + u16 seq_num[3]; + u32 iv[4]; +}; + +struct tls_block_decap_pdb { + u8 rsvd[3]; + u8 options; + u64 seq_num; + u32 iv[4]; +}; + +struct tls_stream_decap_pdb { + u8 rsvd[3]; + u8 options; + u64 seq_num; + u8 i; + u8 j; + u8 rsvd1[2]; +}; + +struct dtls_block_decap_pdb { + u8 rsvd[3]; + u8 options; + u16 epoch; + u16 seq_num[3]; + u32 iv[4]; + u64 antireplay_scorecard; +}; + +/* + * SRTP Protocol Data Blocks + */ +#define SRTP_PDBOPTS_MKI 0x08 +#define SRTP_PDBOPTS_AR 0x40 + +struct srtp_encap_pdb { + u8 x_len; + u8 mki_len; + u8 n_tag; + u8 options; + u32 cnst0; + u8 rsvd[2]; + u16 cnst1; + u16 salt[7]; + u16 cnst2; + u32 rsvd1; + u32 roc; + u32 opt_mki; +}; + +struct srtp_decap_pdb { + u8 x_len; + u8 mki_len; + u8 n_tag; + u8 options; + u32 cnst0; + u8 rsvd[2]; + u16 cnst1; + u16 salt[7]; + u16 cnst2; + u16 rsvd1; + u16 seq_num; + u32 roc; + u64 antireplay_scorecard; +}; + +/* + * DSA/ECDSA Protocol Data Blocks + * Two of these exist: DSA-SIGN, and DSA-VERIFY. They are similar + * except for the treatment of "w" for verify, "s" for sign, + * and the placement of "a,b". + */ +#define DSA_PDB_SGF_SHIFT 24 +#define DSA_PDB_SGF_MASK (0xff << DSA_PDB_SGF_SHIFT) +#define DSA_PDB_SGF_Q (0x80 << DSA_PDB_SGF_SHIFT) +#define DSA_PDB_SGF_R (0x40 << DSA_PDB_SGF_SHIFT) +#define DSA_PDB_SGF_G (0x20 << DSA_PDB_SGF_SHIFT) +#define DSA_PDB_SGF_W (0x10 << DSA_PDB_SGF_SHIFT) +#define DSA_PDB_SGF_S (0x10 << DSA_PDB_SGF_SHIFT) +#define DSA_PDB_SGF_F (0x08 << DSA_PDB_SGF_SHIFT) +#define DSA_PDB_SGF_C (0x04 << DSA_PDB_SGF_SHIFT) +#define DSA_PDB_SGF_D (0x02 << DSA_PDB_SGF_SHIFT) +#define DSA_PDB_SGF_AB_SIGN (0x02 << DSA_PDB_SGF_SHIFT) +#define DSA_PDB_SGF_AB_VERIFY (0x01 << DSA_PDB_SGF_SHIFT) + +#define DSA_PDB_L_SHIFT 7 +#define DSA_PDB_L_MASK (0x3ff << DSA_PDB_L_SHIFT) + +#define DSA_PDB_N_MASK 0x7f + +struct dsa_sign_pdb { + u32 sgf_ln; /* Use DSA_PDB_ defintions per above */ + u8 *q; + u8 *r; + u8 *g; /* or Gx,y */ + u8 *s; + u8 *f; + u8 *c; + u8 *d; + u8 *ab; /* ECC only */ + u8 *u; +}; + +struct dsa_verify_pdb { + u32 sgf_ln; + u8 *q; + u8 *r; + u8 *g; /* or Gx,y */ + u8 *w; /* or Wx,y */ + u8 *f; + u8 *c; + u8 *d; + u8 *tmp; /* temporary data block */ + u8 *ab; /* only used if ECC processing */ +}; + +#endif diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h new file mode 100644 index 000000000..378ddc17f --- /dev/null +++ b/drivers/crypto/caam/regs.h @@ -0,0 +1,780 @@ +/* + * CAAM hardware register-level view + * + * Copyright 2008-2011 Freescale Semiconductor, Inc. + */ + +#ifndef REGS_H +#define REGS_H + +#include <linux/types.h> +#include <linux/io.h> + +/* + * Architecture-specific register access methods + * + * CAAM's bus-addressable registers are 64 bits internally. + * They have been wired to be safely accessible on 32-bit + * architectures, however. Registers were organized such + * that (a) they can be contained in 32 bits, (b) if not, then they + * can be treated as two 32-bit entities, or finally (c) if they + * must be treated as a single 64-bit value, then this can safely + * be done with two 32-bit cycles. + * + * For 32-bit operations on 64-bit values, CAAM follows the same + * 64-bit register access conventions as it's predecessors, in that + * writes are "triggered" by a write to the register at the numerically + * higher address, thus, a full 64-bit write cycle requires a write + * to the lower address, followed by a write to the higher address, + * which will latch/execute the write cycle. + * + * For example, let's assume a SW reset of CAAM through the master + * configuration register. + * - SWRST is in bit 31 of MCFG. + * - MCFG begins at base+0x0000. + * - Bits 63-32 are a 32-bit word at base+0x0000 (numerically-lower) + * - Bits 31-0 are a 32-bit word at base+0x0004 (numerically-higher) + * + * (and on Power, the convention is 0-31, 32-63, I know...) + * + * Assuming a 64-bit write to this MCFG to perform a software reset + * would then require a write of 0 to base+0x0000, followed by a + * write of 0x80000000 to base+0x0004, which would "execute" the + * reset. + * + * Of course, since MCFG 63-32 is all zero, we could cheat and simply + * write 0x8000000 to base+0x0004, and the reset would work fine. + * However, since CAAM does contain some write-and-read-intended + * 64-bit registers, this code defines 64-bit access methods for + * the sake of internal consistency and simplicity, and so that a + * clean transition to 64-bit is possible when it becomes necessary. + * + * There are limitations to this that the developer must recognize. + * 32-bit architectures cannot enforce an atomic-64 operation, + * Therefore: + * + * - On writes, since the HW is assumed to latch the cycle on the + * write of the higher-numeric-address word, then ordered + * writes work OK. + * + * - For reads, where a register contains a relevant value of more + * that 32 bits, the hardware employs logic to latch the other + * "half" of the data until read, ensuring an accurate value. + * This is of particular relevance when dealing with CAAM's + * performance counters. + * + */ + +#ifdef __BIG_ENDIAN +#define wr_reg32(reg, data) out_be32(reg, data) +#define rd_reg32(reg) in_be32(reg) +#ifdef CONFIG_64BIT +#define wr_reg64(reg, data) out_be64(reg, data) +#define rd_reg64(reg) in_be64(reg) +#endif +#else +#ifdef __LITTLE_ENDIAN +#define wr_reg32(reg, data) __raw_writel(data, reg) +#define rd_reg32(reg) __raw_readl(reg) +#ifdef CONFIG_64BIT +#define wr_reg64(reg, data) __raw_writeq(data, reg) +#define rd_reg64(reg) __raw_readq(reg) +#endif +#endif +#endif + +#ifndef CONFIG_64BIT +#ifdef __BIG_ENDIAN +static inline void wr_reg64(u64 __iomem *reg, u64 data) +{ + wr_reg32((u32 __iomem *)reg, (data & 0xffffffff00000000ull) >> 32); + wr_reg32((u32 __iomem *)reg + 1, data & 0x00000000ffffffffull); +} + +static inline u64 rd_reg64(u64 __iomem *reg) +{ + return (((u64)rd_reg32((u32 __iomem *)reg)) << 32) | + ((u64)rd_reg32((u32 __iomem *)reg + 1)); +} +#else +#ifdef __LITTLE_ENDIAN +static inline void wr_reg64(u64 __iomem *reg, u64 data) +{ + wr_reg32((u32 __iomem *)reg + 1, (data & 0xffffffff00000000ull) >> 32); + wr_reg32((u32 __iomem *)reg, data & 0x00000000ffffffffull); +} + +static inline u64 rd_reg64(u64 __iomem *reg) +{ + return (((u64)rd_reg32((u32 __iomem *)reg + 1)) << 32) | + ((u64)rd_reg32((u32 __iomem *)reg)); +} +#endif +#endif +#endif + +/* + * jr_outentry + * Represents each entry in a JobR output ring + */ +struct jr_outentry { + dma_addr_t desc;/* Pointer to completed descriptor */ + u32 jrstatus; /* Status for completed descriptor */ +} __packed; + +/* + * caam_perfmon - Performance Monitor/Secure Memory Status/ + * CAAM Global Status/Component Version IDs + * + * Spans f00-fff wherever instantiated + */ + +/* Number of DECOs */ +#define CHA_NUM_MS_DECONUM_SHIFT 24 +#define CHA_NUM_MS_DECONUM_MASK (0xfull << CHA_NUM_MS_DECONUM_SHIFT) + +/* CHA Version IDs */ +#define CHA_ID_LS_AES_SHIFT 0 +#define CHA_ID_LS_AES_MASK (0xfull << CHA_ID_LS_AES_SHIFT) + +#define CHA_ID_LS_DES_SHIFT 4 +#define CHA_ID_LS_DES_MASK (0xfull << CHA_ID_LS_DES_SHIFT) + +#define CHA_ID_LS_ARC4_SHIFT 8 +#define CHA_ID_LS_ARC4_MASK (0xfull << CHA_ID_LS_ARC4_SHIFT) + +#define CHA_ID_LS_MD_SHIFT 12 +#define CHA_ID_LS_MD_MASK (0xfull << CHA_ID_LS_MD_SHIFT) + +#define CHA_ID_LS_RNG_SHIFT 16 +#define CHA_ID_LS_RNG_MASK (0xfull << CHA_ID_LS_RNG_SHIFT) + +#define CHA_ID_LS_SNW8_SHIFT 20 +#define CHA_ID_LS_SNW8_MASK (0xfull << CHA_ID_LS_SNW8_SHIFT) + +#define CHA_ID_LS_KAS_SHIFT 24 +#define CHA_ID_LS_KAS_MASK (0xfull << CHA_ID_LS_KAS_SHIFT) + +#define CHA_ID_LS_PK_SHIFT 28 +#define CHA_ID_LS_PK_MASK (0xfull << CHA_ID_LS_PK_SHIFT) + +#define CHA_ID_MS_CRC_SHIFT 0 +#define CHA_ID_MS_CRC_MASK (0xfull << CHA_ID_MS_CRC_SHIFT) + +#define CHA_ID_MS_SNW9_SHIFT 4 +#define CHA_ID_MS_SNW9_MASK (0xfull << CHA_ID_MS_SNW9_SHIFT) + +#define CHA_ID_MS_DECO_SHIFT 24 +#define CHA_ID_MS_DECO_MASK (0xfull << CHA_ID_MS_DECO_SHIFT) + +#define CHA_ID_MS_JR_SHIFT 28 +#define CHA_ID_MS_JR_MASK (0xfull << CHA_ID_MS_JR_SHIFT) + +struct sec_vid { + u16 ip_id; + u8 maj_rev; + u8 min_rev; +}; + +struct caam_perfmon { + /* Performance Monitor Registers f00-f9f */ + u64 req_dequeued; /* PC_REQ_DEQ - Dequeued Requests */ + u64 ob_enc_req; /* PC_OB_ENC_REQ - Outbound Encrypt Requests */ + u64 ib_dec_req; /* PC_IB_DEC_REQ - Inbound Decrypt Requests */ + u64 ob_enc_bytes; /* PC_OB_ENCRYPT - Outbound Bytes Encrypted */ + u64 ob_prot_bytes; /* PC_OB_PROTECT - Outbound Bytes Protected */ + u64 ib_dec_bytes; /* PC_IB_DECRYPT - Inbound Bytes Decrypted */ + u64 ib_valid_bytes; /* PC_IB_VALIDATED Inbound Bytes Validated */ + u64 rsvd[13]; + + /* CAAM Hardware Instantiation Parameters fa0-fbf */ + u32 cha_rev_ms; /* CRNR - CHA Rev No. Most significant half*/ + u32 cha_rev_ls; /* CRNR - CHA Rev No. Least significant half*/ +#define CTPR_MS_QI_SHIFT 25 +#define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT) +#define CTPR_MS_VIRT_EN_INCL 0x00000001 +#define CTPR_MS_VIRT_EN_POR 0x00000002 +#define CTPR_MS_PG_SZ_MASK 0x10 +#define CTPR_MS_PG_SZ_SHIFT 4 + u32 comp_parms_ms; /* CTPR - Compile Parameters Register */ + u32 comp_parms_ls; /* CTPR - Compile Parameters Register */ + u64 rsvd1[2]; + + /* CAAM Global Status fc0-fdf */ + u64 faultaddr; /* FAR - Fault Address */ + u32 faultliodn; /* FALR - Fault Address LIODN */ + u32 faultdetail; /* FADR - Fault Addr Detail */ + u32 rsvd2; + u32 status; /* CSTA - CAAM Status */ + u64 rsvd3; + + /* Component Instantiation Parameters fe0-fff */ + u32 rtic_id; /* RVID - RTIC Version ID */ + u32 ccb_id; /* CCBVID - CCB Version ID */ + u32 cha_id_ms; /* CHAVID - CHA Version ID Most Significant*/ + u32 cha_id_ls; /* CHAVID - CHA Version ID Least Significant*/ + u32 cha_num_ms; /* CHANUM - CHA Number Most Significant */ + u32 cha_num_ls; /* CHANUM - CHA Number Least Significant*/ + u32 caam_id_ms; /* CAAMVID - CAAM Version ID MS */ + u32 caam_id_ls; /* CAAMVID - CAAM Version ID LS */ +}; + +/* LIODN programming for DMA configuration */ +#define MSTRID_LOCK_LIODN 0x80000000 +#define MSTRID_LOCK_MAKETRUSTED 0x00010000 /* only for JR masterid */ + +#define MSTRID_LIODN_MASK 0x0fff +struct masterid { + u32 liodn_ms; /* lock and make-trusted control bits */ + u32 liodn_ls; /* LIODN for non-sequence and seq access */ +}; + +/* Partition ID for DMA configuration */ +struct partid { + u32 rsvd1; + u32 pidr; /* partition ID, DECO */ +}; + +/* RNGB test mode (replicated twice in some configurations) */ +/* Padded out to 0x100 */ +struct rngtst { + u32 mode; /* RTSTMODEx - Test mode */ + u32 rsvd1[3]; + u32 reset; /* RTSTRESETx - Test reset control */ + u32 rsvd2[3]; + u32 status; /* RTSTSSTATUSx - Test status */ + u32 rsvd3; + u32 errstat; /* RTSTERRSTATx - Test error status */ + u32 rsvd4; + u32 errctl; /* RTSTERRCTLx - Test error control */ + u32 rsvd5; + u32 entropy; /* RTSTENTROPYx - Test entropy */ + u32 rsvd6[15]; + u32 verifctl; /* RTSTVERIFCTLx - Test verification control */ + u32 rsvd7; + u32 verifstat; /* RTSTVERIFSTATx - Test verification status */ + u32 rsvd8; + u32 verifdata; /* RTSTVERIFDx - Test verification data */ + u32 rsvd9; + u32 xkey; /* RTSTXKEYx - Test XKEY */ + u32 rsvd10; + u32 oscctctl; /* RTSTOSCCTCTLx - Test osc. counter control */ + u32 rsvd11; + u32 oscct; /* RTSTOSCCTx - Test oscillator counter */ + u32 rsvd12; + u32 oscctstat; /* RTSTODCCTSTATx - Test osc counter status */ + u32 rsvd13[2]; + u32 ofifo[4]; /* RTSTOFIFOx - Test output FIFO */ + u32 rsvd14[15]; +}; + +/* RNG4 TRNG test registers */ +struct rng4tst { +#define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */ +#define RTMCTL_SAMP_MODE_VON_NEUMANN_ES_SC 0 /* use von Neumann data in + both entropy shifter and + statistical checker */ +#define RTMCTL_SAMP_MODE_RAW_ES_SC 1 /* use raw data in both + entropy shifter and + statistical checker */ +#define RTMCTL_SAMP_MODE_VON_NEUMANN_ES_RAW_SC 2 /* use von Neumann data in + entropy shifter, raw data + in statistical checker */ +#define RTMCTL_SAMP_MODE_INVALID 3 /* invalid combination */ + u32 rtmctl; /* misc. control register */ + u32 rtscmisc; /* statistical check misc. register */ + u32 rtpkrrng; /* poker range register */ + union { + u32 rtpkrmax; /* PRGM=1: poker max. limit register */ + u32 rtpkrsq; /* PRGM=0: poker square calc. result register */ + }; +#define RTSDCTL_ENT_DLY_SHIFT 16 +#define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT) +#define RTSDCTL_ENT_DLY_MIN 3200 +#define RTSDCTL_ENT_DLY_MAX 12800 + u32 rtsdctl; /* seed control register */ + union { + u32 rtsblim; /* PRGM=1: sparse bit limit register */ + u32 rttotsam; /* PRGM=0: total samples register */ + }; + u32 rtfrqmin; /* frequency count min. limit register */ +#define RTFRQMAX_DISABLE (1 << 20) + union { + u32 rtfrqmax; /* PRGM=1: freq. count max. limit register */ + u32 rtfrqcnt; /* PRGM=0: freq. count register */ + }; + u32 rsvd1[40]; +#define RDSTA_SKVT 0x80000000 +#define RDSTA_SKVN 0x40000000 +#define RDSTA_IF0 0x00000001 +#define RDSTA_IF1 0x00000002 +#define RDSTA_IFMASK (RDSTA_IF1 | RDSTA_IF0) + u32 rdsta; + u32 rsvd2[15]; +}; + +/* + * caam_ctrl - basic core configuration + * starts base + 0x0000 padded out to 0x1000 + */ + +#define KEK_KEY_SIZE 8 +#define TKEK_KEY_SIZE 8 +#define TDSK_KEY_SIZE 8 + +#define DECO_RESET 1 /* Use with DECO reset/availability regs */ +#define DECO_RESET_0 (DECO_RESET << 0) +#define DECO_RESET_1 (DECO_RESET << 1) +#define DECO_RESET_2 (DECO_RESET << 2) +#define DECO_RESET_3 (DECO_RESET << 3) +#define DECO_RESET_4 (DECO_RESET << 4) + +struct caam_ctrl { + /* Basic Configuration Section 000-01f */ + /* Read/Writable */ + u32 rsvd1; + u32 mcr; /* MCFG Master Config Register */ + u32 rsvd2; + u32 scfgr; /* SCFGR, Security Config Register */ + + /* Bus Access Configuration Section 010-11f */ + /* Read/Writable */ + struct masterid jr_mid[4]; /* JRxLIODNR - JobR LIODN setup */ + u32 rsvd3[11]; + u32 jrstart; /* JRSTART - Job Ring Start Register */ + struct masterid rtic_mid[4]; /* RTICxLIODNR - RTIC LIODN setup */ + u32 rsvd4[5]; + u32 deco_rsr; /* DECORSR - Deco Request Source */ + u32 rsvd11; + u32 deco_rq; /* DECORR - DECO Request */ + struct partid deco_mid[5]; /* DECOxLIODNR - 1 per DECO */ + u32 rsvd5[22]; + + /* DECO Availability/Reset Section 120-3ff */ + u32 deco_avail; /* DAR - DECO availability */ + u32 deco_reset; /* DRR - DECO reset */ + u32 rsvd6[182]; + + /* Key Encryption/Decryption Configuration 400-5ff */ + /* Read/Writable only while in Non-secure mode */ + u32 kek[KEK_KEY_SIZE]; /* JDKEKR - Key Encryption Key */ + u32 tkek[TKEK_KEY_SIZE]; /* TDKEKR - Trusted Desc KEK */ + u32 tdsk[TDSK_KEY_SIZE]; /* TDSKR - Trusted Desc Signing Key */ + u32 rsvd7[32]; + u64 sknonce; /* SKNR - Secure Key Nonce */ + u32 rsvd8[70]; + + /* RNG Test/Verification/Debug Access 600-7ff */ + /* (Useful in Test/Debug modes only...) */ + union { + struct rngtst rtst[2]; + struct rng4tst r4tst[2]; + }; + + u32 rsvd9[448]; + + /* Performance Monitor f00-fff */ + struct caam_perfmon perfmon; +}; + +/* + * Controller master config register defs + */ +#define MCFGR_SWRESET 0x80000000 /* software reset */ +#define MCFGR_WDENABLE 0x40000000 /* DECO watchdog enable */ +#define MCFGR_WDFAIL 0x20000000 /* DECO watchdog force-fail */ +#define MCFGR_DMA_RESET 0x10000000 +#define MCFGR_LONG_PTR 0x00010000 /* Use >32-bit desc addressing */ +#define SCFGR_RDBENABLE 0x00000400 +#define SCFGR_VIRT_EN 0x00008000 +#define DECORR_RQD0ENABLE 0x00000001 /* Enable DECO0 for direct access */ +#define DECORSR_JR0 0x00000001 /* JR to supply TZ, SDID, ICID */ +#define DECORSR_VALID 0x80000000 +#define DECORR_DEN0 0x00010000 /* DECO0 available for access*/ + +/* AXI read cache control */ +#define MCFGR_ARCACHE_SHIFT 12 +#define MCFGR_ARCACHE_MASK (0xf << MCFGR_ARCACHE_SHIFT) + +/* AXI write cache control */ +#define MCFGR_AWCACHE_SHIFT 8 +#define MCFGR_AWCACHE_MASK (0xf << MCFGR_AWCACHE_SHIFT) + +/* AXI pipeline depth */ +#define MCFGR_AXIPIPE_SHIFT 4 +#define MCFGR_AXIPIPE_MASK (0xf << MCFGR_AXIPIPE_SHIFT) + +#define MCFGR_AXIPRI 0x00000008 /* Assert AXI priority sideband */ +#define MCFGR_BURST_64 0x00000001 /* Max burst size */ + +/* JRSTART register offsets */ +#define JRSTART_JR0_START 0x00000001 /* Start Job ring 0 */ +#define JRSTART_JR1_START 0x00000002 /* Start Job ring 1 */ +#define JRSTART_JR2_START 0x00000004 /* Start Job ring 2 */ +#define JRSTART_JR3_START 0x00000008 /* Start Job ring 3 */ + +/* + * caam_job_ring - direct job ring setup + * 1-4 possible per instantiation, base + 1000/2000/3000/4000 + * Padded out to 0x1000 + */ +struct caam_job_ring { + /* Input ring */ + u64 inpring_base; /* IRBAx - Input desc ring baseaddr */ + u32 rsvd1; + u32 inpring_size; /* IRSx - Input ring size */ + u32 rsvd2; + u32 inpring_avail; /* IRSAx - Input ring room remaining */ + u32 rsvd3; + u32 inpring_jobadd; /* IRJAx - Input ring jobs added */ + + /* Output Ring */ + u64 outring_base; /* ORBAx - Output status ring base addr */ + u32 rsvd4; + u32 outring_size; /* ORSx - Output ring size */ + u32 rsvd5; + u32 outring_rmvd; /* ORJRx - Output ring jobs removed */ + u32 rsvd6; + u32 outring_used; /* ORSFx - Output ring slots full */ + + /* Status/Configuration */ + u32 rsvd7; + u32 jroutstatus; /* JRSTAx - JobR output status */ + u32 rsvd8; + u32 jrintstatus; /* JRINTx - JobR interrupt status */ + u32 rconfig_hi; /* JRxCFG - Ring configuration */ + u32 rconfig_lo; + + /* Indices. CAAM maintains as "heads" of each queue */ + u32 rsvd9; + u32 inp_rdidx; /* IRRIx - Input ring read index */ + u32 rsvd10; + u32 out_wtidx; /* ORWIx - Output ring write index */ + + /* Command/control */ + u32 rsvd11; + u32 jrcommand; /* JRCRx - JobR command */ + + u32 rsvd12[932]; + + /* Performance Monitor f00-fff */ + struct caam_perfmon perfmon; +}; + +#define JR_RINGSIZE_MASK 0x03ff +/* + * jrstatus - Job Ring Output Status + * All values in lo word + * Also note, same values written out as status through QI + * in the command/status field of a frame descriptor + */ +#define JRSTA_SSRC_SHIFT 28 +#define JRSTA_SSRC_MASK 0xf0000000 + +#define JRSTA_SSRC_NONE 0x00000000 +#define JRSTA_SSRC_CCB_ERROR 0x20000000 +#define JRSTA_SSRC_JUMP_HALT_USER 0x30000000 +#define JRSTA_SSRC_DECO 0x40000000 +#define JRSTA_SSRC_JRERROR 0x60000000 +#define JRSTA_SSRC_JUMP_HALT_CC 0x70000000 + +#define JRSTA_DECOERR_JUMP 0x08000000 +#define JRSTA_DECOERR_INDEX_SHIFT 8 +#define JRSTA_DECOERR_INDEX_MASK 0xff00 +#define JRSTA_DECOERR_ERROR_MASK 0x00ff + +#define JRSTA_DECOERR_NONE 0x00 +#define JRSTA_DECOERR_LINKLEN 0x01 +#define JRSTA_DECOERR_LINKPTR 0x02 +#define JRSTA_DECOERR_JRCTRL 0x03 +#define JRSTA_DECOERR_DESCCMD 0x04 +#define JRSTA_DECOERR_ORDER 0x05 +#define JRSTA_DECOERR_KEYCMD 0x06 +#define JRSTA_DECOERR_LOADCMD 0x07 +#define JRSTA_DECOERR_STORECMD 0x08 +#define JRSTA_DECOERR_OPCMD 0x09 +#define JRSTA_DECOERR_FIFOLDCMD 0x0a +#define JRSTA_DECOERR_FIFOSTCMD 0x0b +#define JRSTA_DECOERR_MOVECMD 0x0c +#define JRSTA_DECOERR_JUMPCMD 0x0d +#define JRSTA_DECOERR_MATHCMD 0x0e +#define JRSTA_DECOERR_SHASHCMD 0x0f +#define JRSTA_DECOERR_SEQCMD 0x10 +#define JRSTA_DECOERR_DECOINTERNAL 0x11 +#define JRSTA_DECOERR_SHDESCHDR 0x12 +#define JRSTA_DECOERR_HDRLEN 0x13 +#define JRSTA_DECOERR_BURSTER 0x14 +#define JRSTA_DECOERR_DESCSIGNATURE 0x15 +#define JRSTA_DECOERR_DMA 0x16 +#define JRSTA_DECOERR_BURSTFIFO 0x17 +#define JRSTA_DECOERR_JRRESET 0x1a +#define JRSTA_DECOERR_JOBFAIL 0x1b +#define JRSTA_DECOERR_DNRERR 0x80 +#define JRSTA_DECOERR_UNDEFPCL 0x81 +#define JRSTA_DECOERR_PDBERR 0x82 +#define JRSTA_DECOERR_ANRPLY_LATE 0x83 +#define JRSTA_DECOERR_ANRPLY_REPLAY 0x84 +#define JRSTA_DECOERR_SEQOVF 0x85 +#define JRSTA_DECOERR_INVSIGN 0x86 +#define JRSTA_DECOERR_DSASIGN 0x87 + +#define JRSTA_CCBERR_JUMP 0x08000000 +#define JRSTA_CCBERR_INDEX_MASK 0xff00 +#define JRSTA_CCBERR_INDEX_SHIFT 8 +#define JRSTA_CCBERR_CHAID_MASK 0x00f0 +#define JRSTA_CCBERR_CHAID_SHIFT 4 +#define JRSTA_CCBERR_ERRID_MASK 0x000f + +#define JRSTA_CCBERR_CHAID_AES (0x01 << JRSTA_CCBERR_CHAID_SHIFT) +#define JRSTA_CCBERR_CHAID_DES (0x02 << JRSTA_CCBERR_CHAID_SHIFT) +#define JRSTA_CCBERR_CHAID_ARC4 (0x03 << JRSTA_CCBERR_CHAID_SHIFT) +#define JRSTA_CCBERR_CHAID_MD (0x04 << JRSTA_CCBERR_CHAID_SHIFT) +#define JRSTA_CCBERR_CHAID_RNG (0x05 << JRSTA_CCBERR_CHAID_SHIFT) +#define JRSTA_CCBERR_CHAID_SNOW (0x06 << JRSTA_CCBERR_CHAID_SHIFT) +#define JRSTA_CCBERR_CHAID_KASUMI (0x07 << JRSTA_CCBERR_CHAID_SHIFT) +#define JRSTA_CCBERR_CHAID_PK (0x08 << JRSTA_CCBERR_CHAID_SHIFT) +#define JRSTA_CCBERR_CHAID_CRC (0x09 << JRSTA_CCBERR_CHAID_SHIFT) + +#define JRSTA_CCBERR_ERRID_NONE 0x00 +#define JRSTA_CCBERR_ERRID_MODE 0x01 +#define JRSTA_CCBERR_ERRID_DATASIZ 0x02 +#define JRSTA_CCBERR_ERRID_KEYSIZ 0x03 +#define JRSTA_CCBERR_ERRID_PKAMEMSZ 0x04 +#define JRSTA_CCBERR_ERRID_PKBMEMSZ 0x05 +#define JRSTA_CCBERR_ERRID_SEQUENCE 0x06 +#define JRSTA_CCBERR_ERRID_PKDIVZRO 0x07 +#define JRSTA_CCBERR_ERRID_PKMODEVN 0x08 +#define JRSTA_CCBERR_ERRID_KEYPARIT 0x09 +#define JRSTA_CCBERR_ERRID_ICVCHK 0x0a +#define JRSTA_CCBERR_ERRID_HARDWARE 0x0b +#define JRSTA_CCBERR_ERRID_CCMAAD 0x0c +#define JRSTA_CCBERR_ERRID_INVCHA 0x0f + +#define JRINT_ERR_INDEX_MASK 0x3fff0000 +#define JRINT_ERR_INDEX_SHIFT 16 +#define JRINT_ERR_TYPE_MASK 0xf00 +#define JRINT_ERR_TYPE_SHIFT 8 +#define JRINT_ERR_HALT_MASK 0xc +#define JRINT_ERR_HALT_SHIFT 2 +#define JRINT_ERR_HALT_INPROGRESS 0x4 +#define JRINT_ERR_HALT_COMPLETE 0x8 +#define JRINT_JR_ERROR 0x02 +#define JRINT_JR_INT 0x01 + +#define JRINT_ERR_TYPE_WRITE 1 +#define JRINT_ERR_TYPE_BAD_INPADDR 3 +#define JRINT_ERR_TYPE_BAD_OUTADDR 4 +#define JRINT_ERR_TYPE_INV_INPWRT 5 +#define JRINT_ERR_TYPE_INV_OUTWRT 6 +#define JRINT_ERR_TYPE_RESET 7 +#define JRINT_ERR_TYPE_REMOVE_OFL 8 +#define JRINT_ERR_TYPE_ADD_OFL 9 + +#define JRCFG_SOE 0x04 +#define JRCFG_ICEN 0x02 +#define JRCFG_IMSK 0x01 +#define JRCFG_ICDCT_SHIFT 8 +#define JRCFG_ICTT_SHIFT 16 + +#define JRCR_RESET 0x01 + +/* + * caam_assurance - Assurance Controller View + * base + 0x6000 padded out to 0x1000 + */ + +struct rtic_element { + u64 address; + u32 rsvd; + u32 length; +}; + +struct rtic_block { + struct rtic_element element[2]; +}; + +struct rtic_memhash { + u32 memhash_be[32]; + u32 memhash_le[32]; +}; + +struct caam_assurance { + /* Status/Command/Watchdog */ + u32 rsvd1; + u32 status; /* RSTA - Status */ + u32 rsvd2; + u32 cmd; /* RCMD - Command */ + u32 rsvd3; + u32 ctrl; /* RCTL - Control */ + u32 rsvd4; + u32 throttle; /* RTHR - Throttle */ + u32 rsvd5[2]; + u64 watchdog; /* RWDOG - Watchdog Timer */ + u32 rsvd6; + u32 rend; /* REND - Endian corrections */ + u32 rsvd7[50]; + + /* Block access/configuration @ 100/110/120/130 */ + struct rtic_block memblk[4]; /* Memory Blocks A-D */ + u32 rsvd8[32]; + + /* Block hashes @ 200/300/400/500 */ + struct rtic_memhash hash[4]; /* Block hash values A-D */ + u32 rsvd_3[640]; +}; + +/* + * caam_queue_if - QI configuration and control + * starts base + 0x7000, padded out to 0x1000 long + */ + +struct caam_queue_if { + u32 qi_control_hi; /* QICTL - QI Control */ + u32 qi_control_lo; + u32 rsvd1; + u32 qi_status; /* QISTA - QI Status */ + u32 qi_deq_cfg_hi; /* QIDQC - QI Dequeue Configuration */ + u32 qi_deq_cfg_lo; + u32 qi_enq_cfg_hi; /* QISEQC - QI Enqueue Command */ + u32 qi_enq_cfg_lo; + u32 rsvd2[1016]; +}; + +/* QI control bits - low word */ +#define QICTL_DQEN 0x01 /* Enable frame pop */ +#define QICTL_STOP 0x02 /* Stop dequeue/enqueue */ +#define QICTL_SOE 0x04 /* Stop on error */ + +/* QI control bits - high word */ +#define QICTL_MBSI 0x01 +#define QICTL_MHWSI 0x02 +#define QICTL_MWSI 0x04 +#define QICTL_MDWSI 0x08 +#define QICTL_CBSI 0x10 /* CtrlDataByteSwapInput */ +#define QICTL_CHWSI 0x20 /* CtrlDataHalfSwapInput */ +#define QICTL_CWSI 0x40 /* CtrlDataWordSwapInput */ +#define QICTL_CDWSI 0x80 /* CtrlDataDWordSwapInput */ +#define QICTL_MBSO 0x0100 +#define QICTL_MHWSO 0x0200 +#define QICTL_MWSO 0x0400 +#define QICTL_MDWSO 0x0800 +#define QICTL_CBSO 0x1000 /* CtrlDataByteSwapOutput */ +#define QICTL_CHWSO 0x2000 /* CtrlDataHalfSwapOutput */ +#define QICTL_CWSO 0x4000 /* CtrlDataWordSwapOutput */ +#define QICTL_CDWSO 0x8000 /* CtrlDataDWordSwapOutput */ +#define QICTL_DMBS 0x010000 +#define QICTL_EPO 0x020000 + +/* QI status bits */ +#define QISTA_PHRDERR 0x01 /* PreHeader Read Error */ +#define QISTA_CFRDERR 0x02 /* Compound Frame Read Error */ +#define QISTA_OFWRERR 0x04 /* Output Frame Read Error */ +#define QISTA_BPDERR 0x08 /* Buffer Pool Depleted */ +#define QISTA_BTSERR 0x10 /* Buffer Undersize */ +#define QISTA_CFWRERR 0x20 /* Compound Frame Write Err */ +#define QISTA_STOPD 0x80000000 /* QI Stopped (see QICTL) */ + +/* deco_sg_table - DECO view of scatter/gather table */ +struct deco_sg_table { + u64 addr; /* Segment Address */ + u32 elen; /* E, F bits + 30-bit length */ + u32 bpid_offset; /* Buffer Pool ID + 16-bit length */ +}; + +/* + * caam_deco - descriptor controller - CHA cluster block + * + * Only accessible when direct DECO access is turned on + * (done in DECORR, via MID programmed in DECOxMID + * + * 5 typical, base + 0x8000/9000/a000/b000 + * Padded out to 0x1000 long + */ +struct caam_deco { + u32 rsvd1; + u32 cls1_mode; /* CxC1MR - Class 1 Mode */ + u32 rsvd2; + u32 cls1_keysize; /* CxC1KSR - Class 1 Key Size */ + u32 cls1_datasize_hi; /* CxC1DSR - Class 1 Data Size */ + u32 cls1_datasize_lo; + u32 rsvd3; + u32 cls1_icvsize; /* CxC1ICVSR - Class 1 ICV size */ + u32 rsvd4[5]; + u32 cha_ctrl; /* CCTLR - CHA control */ + u32 rsvd5; + u32 irq_crtl; /* CxCIRQ - CCB interrupt done/error/clear */ + u32 rsvd6; + u32 clr_written; /* CxCWR - Clear-Written */ + u32 ccb_status_hi; /* CxCSTA - CCB Status/Error */ + u32 ccb_status_lo; + u32 rsvd7[3]; + u32 aad_size; /* CxAADSZR - Current AAD Size */ + u32 rsvd8; + u32 cls1_iv_size; /* CxC1IVSZR - Current Class 1 IV Size */ + u32 rsvd9[7]; + u32 pkha_a_size; /* PKASZRx - Size of PKHA A */ + u32 rsvd10; + u32 pkha_b_size; /* PKBSZRx - Size of PKHA B */ + u32 rsvd11; + u32 pkha_n_size; /* PKNSZRx - Size of PKHA N */ + u32 rsvd12; + u32 pkha_e_size; /* PKESZRx - Size of PKHA E */ + u32 rsvd13[24]; + u32 cls1_ctx[16]; /* CxC1CTXR - Class 1 Context @100 */ + u32 rsvd14[48]; + u32 cls1_key[8]; /* CxC1KEYR - Class 1 Key @200 */ + u32 rsvd15[121]; + u32 cls2_mode; /* CxC2MR - Class 2 Mode */ + u32 rsvd16; + u32 cls2_keysize; /* CxX2KSR - Class 2 Key Size */ + u32 cls2_datasize_hi; /* CxC2DSR - Class 2 Data Size */ + u32 cls2_datasize_lo; + u32 rsvd17; + u32 cls2_icvsize; /* CxC2ICVSZR - Class 2 ICV Size */ + u32 rsvd18[56]; + u32 cls2_ctx[18]; /* CxC2CTXR - Class 2 Context @500 */ + u32 rsvd19[46]; + u32 cls2_key[32]; /* CxC2KEYR - Class2 Key @600 */ + u32 rsvd20[84]; + u32 inp_infofifo_hi; /* CxIFIFO - Input Info FIFO @7d0 */ + u32 inp_infofifo_lo; + u32 rsvd21[2]; + u64 inp_datafifo; /* CxDFIFO - Input Data FIFO */ + u32 rsvd22[2]; + u64 out_datafifo; /* CxOFIFO - Output Data FIFO */ + u32 rsvd23[2]; + u32 jr_ctl_hi; /* CxJRR - JobR Control Register @800 */ + u32 jr_ctl_lo; + u64 jr_descaddr; /* CxDADR - JobR Descriptor Address */ +#define DECO_OP_STATUS_HI_ERR_MASK 0xF00000FF + u32 op_status_hi; /* DxOPSTA - DECO Operation Status */ + u32 op_status_lo; + u32 rsvd24[2]; + u32 liodn; /* DxLSR - DECO LIODN Status - non-seq */ + u32 td_liodn; /* DxLSR - DECO LIODN Status - trustdesc */ + u32 rsvd26[6]; + u64 math[4]; /* DxMTH - Math register */ + u32 rsvd27[8]; + struct deco_sg_table gthr_tbl[4]; /* DxGTR - Gather Tables */ + u32 rsvd28[16]; + struct deco_sg_table sctr_tbl[4]; /* DxSTR - Scatter Tables */ + u32 rsvd29[48]; + u32 descbuf[64]; /* DxDESB - Descriptor buffer */ + u32 rscvd30[193]; +#define DESC_DBG_DECO_STAT_HOST_ERR 0x00D00000 +#define DESC_DBG_DECO_STAT_VALID 0x80000000 +#define DESC_DBG_DECO_STAT_MASK 0x00F00000 + u32 desc_dbg; /* DxDDR - DECO Debug Register */ + u32 rsvd31[126]; +}; + +#define DECO_JQCR_WHL 0x20000000 +#define DECO_JQCR_FOUR 0x10000000 + +#define JR_BLOCK_NUMBER 1 +#define ASSURE_BLOCK_NUMBER 6 +#define QI_BLOCK_NUMBER 7 +#define DECO_BLOCK_NUMBER 8 +#define PG_SIZE_4K 0x1000 +#define PG_SIZE_64K 0x10000 +#endif /* REGS_H */ diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h new file mode 100644 index 000000000..3b918218a --- /dev/null +++ b/drivers/crypto/caam/sg_sw_sec4.h @@ -0,0 +1,118 @@ +/* + * CAAM/SEC 4.x functions for using scatterlists in caam driver + * + * Copyright 2008-2011 Freescale Semiconductor, Inc. + * + */ + +struct sec4_sg_entry; + +/* + * convert single dma address to h/w link table format + */ +static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr, + dma_addr_t dma, u32 len, u32 offset) +{ + sec4_sg_ptr->ptr = dma; + sec4_sg_ptr->len = len; + sec4_sg_ptr->reserved = 0; + sec4_sg_ptr->buf_pool_id = 0; + sec4_sg_ptr->offset = offset; +#ifdef DEBUG + print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ", + DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr, + sizeof(struct sec4_sg_entry), 1); +#endif +} + +/* + * convert scatterlist to h/w link table format + * but does not have final bit; instead, returns last entry + */ +static inline struct sec4_sg_entry * +sg_to_sec4_sg(struct scatterlist *sg, int sg_count, + struct sec4_sg_entry *sec4_sg_ptr, u32 offset) +{ + while (sg_count) { + dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), + sg_dma_len(sg), offset); + sec4_sg_ptr++; + sg = sg_next(sg); + sg_count--; + } + return sec4_sg_ptr - 1; +} + +/* + * convert scatterlist to h/w link table format + * scatterlist must have been previously dma mapped + */ +static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count, + struct sec4_sg_entry *sec4_sg_ptr, + u32 offset) +{ + sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset); + sec4_sg_ptr->len |= SEC4_SG_LEN_FIN; +} + +/* count number of elements in scatterlist */ +static inline int __sg_count(struct scatterlist *sg_list, int nbytes, + bool *chained) +{ + struct scatterlist *sg = sg_list; + int sg_nents = 0; + + while (nbytes > 0) { + sg_nents++; + nbytes -= sg->length; + if (!sg_is_last(sg) && (sg + 1)->length == 0) + *chained = true; + sg = sg_next(sg); + } + + return sg_nents; +} + +/* derive number of elements in scatterlist, but return 0 for 1 */ +static inline int sg_count(struct scatterlist *sg_list, int nbytes, + bool *chained) +{ + int sg_nents = __sg_count(sg_list, nbytes, chained); + + if (likely(sg_nents == 1)) + return 0; + + return sg_nents; +} + +static int dma_map_sg_chained(struct device *dev, struct scatterlist *sg, + unsigned int nents, enum dma_data_direction dir, + bool chained) +{ + if (unlikely(chained)) { + int i; + for (i = 0; i < nents; i++) { + dma_map_sg(dev, sg, 1, dir); + sg = sg_next(sg); + } + } else { + dma_map_sg(dev, sg, nents, dir); + } + return nents; +} + +static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg, + unsigned int nents, enum dma_data_direction dir, + bool chained) +{ + if (unlikely(chained)) { + int i; + for (i = 0; i < nents; i++) { + dma_unmap_sg(dev, sg, 1, dir); + sg = sg_next(sg); + } + } else { + dma_unmap_sg(dev, sg, nents, dir); + } + return nents; +} diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig new file mode 100644 index 000000000..7639ffc36 --- /dev/null +++ b/drivers/crypto/ccp/Kconfig @@ -0,0 +1,24 @@ +config CRYPTO_DEV_CCP_DD + tristate "Cryptographic Coprocessor device driver" + depends on CRYPTO_DEV_CCP + default m + select HW_RANDOM + help + Provides the interface to use the AMD Cryptographic Coprocessor + which can be used to accelerate or offload encryption operations + such as SHA, AES and more. If you choose 'M' here, this module + will be called ccp. + +config CRYPTO_DEV_CCP_CRYPTO + tristate "Encryption and hashing acceleration support" + depends on CRYPTO_DEV_CCP_DD + default m + select CRYPTO_ALGAPI + select CRYPTO_HASH + select CRYPTO_BLKCIPHER + select CRYPTO_AUTHENC + help + Support for using the cryptographic API with the AMD Cryptographic + Coprocessor. This module supports acceleration and offload of SHA + and AES algorithms. If you choose 'M' here, this module will be + called ccp_crypto. diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile new file mode 100644 index 000000000..55a1f3951 --- /dev/null +++ b/drivers/crypto/ccp/Makefile @@ -0,0 +1,10 @@ +obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o +ccp-objs := ccp-dev.o ccp-ops.o ccp-platform.o +ccp-$(CONFIG_PCI) += ccp-pci.o + +obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o +ccp-crypto-objs := ccp-crypto-main.o \ + ccp-crypto-aes.o \ + ccp-crypto-aes-cmac.o \ + ccp-crypto-aes-xts.o \ + ccp-crypto-sha.o diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c new file mode 100644 index 000000000..ea7e84469 --- /dev/null +++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c @@ -0,0 +1,367 @@ +/* + * AMD Cryptographic Coprocessor (CCP) AES CMAC crypto API support + * + * Copyright (C) 2013 Advanced Micro Devices, Inc. + * + * Author: Tom Lendacky <thomas.lendacky@amd.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/delay.h> +#include <linux/scatterlist.h> +#include <linux/crypto.h> +#include <crypto/algapi.h> +#include <crypto/aes.h> +#include <crypto/hash.h> +#include <crypto/internal/hash.h> +#include <crypto/scatterwalk.h> + +#include "ccp-crypto.h" + +static int ccp_aes_cmac_complete(struct crypto_async_request *async_req, + int ret) +{ + struct ahash_request *req = ahash_request_cast(async_req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req); + unsigned int digest_size = crypto_ahash_digestsize(tfm); + + if (ret) + goto e_free; + + if (rctx->hash_rem) { + /* Save remaining data to buffer */ + unsigned int offset = rctx->nbytes - rctx->hash_rem; + + scatterwalk_map_and_copy(rctx->buf, rctx->src, + offset, rctx->hash_rem, 0); + rctx->buf_count = rctx->hash_rem; + } else { + rctx->buf_count = 0; + } + + /* Update result area if supplied */ + if (req->result) + memcpy(req->result, rctx->iv, digest_size); + +e_free: + sg_free_table(&rctx->data_sg); + + return ret; +} + +static int ccp_do_cmac_update(struct ahash_request *req, unsigned int nbytes, + unsigned int final) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); + struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req); + struct scatterlist *sg, *cmac_key_sg = NULL; + unsigned int block_size = + crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); + unsigned int need_pad, sg_count; + gfp_t gfp; + u64 len; + int ret; + + if (!ctx->u.aes.key_len) + return -EINVAL; + + if (nbytes) + rctx->null_msg = 0; + + len = (u64)rctx->buf_count + (u64)nbytes; + + if (!final && (len <= block_size)) { + scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src, + 0, nbytes, 0); + rctx->buf_count += nbytes; + + return 0; + } + + rctx->src = req->src; + rctx->nbytes = nbytes; + + rctx->final = final; + rctx->hash_rem = final ? 0 : len & (block_size - 1); + rctx->hash_cnt = len - rctx->hash_rem; + if (!final && !rctx->hash_rem) { + /* CCP can't do zero length final, so keep some data around */ + rctx->hash_cnt -= block_size; + rctx->hash_rem = block_size; + } + + if (final && (rctx->null_msg || (len & (block_size - 1)))) + need_pad = 1; + else + need_pad = 0; + + sg_init_one(&rctx->iv_sg, rctx->iv, sizeof(rctx->iv)); + + /* Build the data scatterlist table - allocate enough entries for all + * possible data pieces (buffer, input data, padding) + */ + sg_count = (nbytes) ? sg_nents(req->src) + 2 : 2; + gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? + GFP_KERNEL : GFP_ATOMIC; + ret = sg_alloc_table(&rctx->data_sg, sg_count, gfp); + if (ret) + return ret; + + sg = NULL; + if (rctx->buf_count) { + sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count); + sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->buf_sg); + } + + if (nbytes) + sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src); + + if (need_pad) { + int pad_length = block_size - (len & (block_size - 1)); + + rctx->hash_cnt += pad_length; + + memset(rctx->pad, 0, sizeof(rctx->pad)); + rctx->pad[0] = 0x80; + sg_init_one(&rctx->pad_sg, rctx->pad, pad_length); + sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->pad_sg); + } + if (sg) { + sg_mark_end(sg); + sg = rctx->data_sg.sgl; + } + + /* Initialize the K1/K2 scatterlist */ + if (final) + cmac_key_sg = (need_pad) ? &ctx->u.aes.k2_sg + : &ctx->u.aes.k1_sg; + + memset(&rctx->cmd, 0, sizeof(rctx->cmd)); + INIT_LIST_HEAD(&rctx->cmd.entry); + rctx->cmd.engine = CCP_ENGINE_AES; + rctx->cmd.u.aes.type = ctx->u.aes.type; + rctx->cmd.u.aes.mode = ctx->u.aes.mode; + rctx->cmd.u.aes.action = CCP_AES_ACTION_ENCRYPT; + rctx->cmd.u.aes.key = &ctx->u.aes.key_sg; + rctx->cmd.u.aes.key_len = ctx->u.aes.key_len; + rctx->cmd.u.aes.iv = &rctx->iv_sg; + rctx->cmd.u.aes.iv_len = AES_BLOCK_SIZE; + rctx->cmd.u.aes.src = sg; + rctx->cmd.u.aes.src_len = rctx->hash_cnt; + rctx->cmd.u.aes.dst = NULL; + rctx->cmd.u.aes.cmac_key = cmac_key_sg; + rctx->cmd.u.aes.cmac_key_len = ctx->u.aes.kn_len; + rctx->cmd.u.aes.cmac_final = final; + + ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); + + return ret; +} + +static int ccp_aes_cmac_init(struct ahash_request *req) +{ + struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req); + + memset(rctx, 0, sizeof(*rctx)); + + rctx->null_msg = 1; + + return 0; +} + +static int ccp_aes_cmac_update(struct ahash_request *req) +{ + return ccp_do_cmac_update(req, req->nbytes, 0); +} + +static int ccp_aes_cmac_final(struct ahash_request *req) +{ + return ccp_do_cmac_update(req, 0, 1); +} + +static int ccp_aes_cmac_finup(struct ahash_request *req) +{ + return ccp_do_cmac_update(req, req->nbytes, 1); +} + +static int ccp_aes_cmac_digest(struct ahash_request *req) +{ + int ret; + + ret = ccp_aes_cmac_init(req); + if (ret) + return ret; + + return ccp_aes_cmac_finup(req); +} + +static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int key_len) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); + struct ccp_crypto_ahash_alg *alg = + ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm)); + u64 k0_hi, k0_lo, k1_hi, k1_lo, k2_hi, k2_lo; + u64 rb_hi = 0x00, rb_lo = 0x87; + __be64 *gk; + int ret; + + switch (key_len) { + case AES_KEYSIZE_128: + ctx->u.aes.type = CCP_AES_TYPE_128; + break; + case AES_KEYSIZE_192: + ctx->u.aes.type = CCP_AES_TYPE_192; + break; + case AES_KEYSIZE_256: + ctx->u.aes.type = CCP_AES_TYPE_256; + break; + default: + crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + ctx->u.aes.mode = alg->mode; + + /* Set to zero until complete */ + ctx->u.aes.key_len = 0; + + /* Set the key for the AES cipher used to generate the keys */ + ret = crypto_cipher_setkey(ctx->u.aes.tfm_cipher, key, key_len); + if (ret) + return ret; + + /* Encrypt a block of zeroes - use key area in context */ + memset(ctx->u.aes.key, 0, sizeof(ctx->u.aes.key)); + crypto_cipher_encrypt_one(ctx->u.aes.tfm_cipher, ctx->u.aes.key, + ctx->u.aes.key); + + /* Generate K1 and K2 */ + k0_hi = be64_to_cpu(*((__be64 *)ctx->u.aes.key)); + k0_lo = be64_to_cpu(*((__be64 *)ctx->u.aes.key + 1)); + + k1_hi = (k0_hi << 1) | (k0_lo >> 63); + k1_lo = k0_lo << 1; + if (ctx->u.aes.key[0] & 0x80) { + k1_hi ^= rb_hi; + k1_lo ^= rb_lo; + } + gk = (__be64 *)ctx->u.aes.k1; + *gk = cpu_to_be64(k1_hi); + gk++; + *gk = cpu_to_be64(k1_lo); + + k2_hi = (k1_hi << 1) | (k1_lo >> 63); + k2_lo = k1_lo << 1; + if (ctx->u.aes.k1[0] & 0x80) { + k2_hi ^= rb_hi; + k2_lo ^= rb_lo; + } + gk = (__be64 *)ctx->u.aes.k2; + *gk = cpu_to_be64(k2_hi); + gk++; + *gk = cpu_to_be64(k2_lo); + + ctx->u.aes.kn_len = sizeof(ctx->u.aes.k1); + sg_init_one(&ctx->u.aes.k1_sg, ctx->u.aes.k1, sizeof(ctx->u.aes.k1)); + sg_init_one(&ctx->u.aes.k2_sg, ctx->u.aes.k2, sizeof(ctx->u.aes.k2)); + + /* Save the supplied key */ + memset(ctx->u.aes.key, 0, sizeof(ctx->u.aes.key)); + memcpy(ctx->u.aes.key, key, key_len); + ctx->u.aes.key_len = key_len; + sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len); + + return ret; +} + +static int ccp_aes_cmac_cra_init(struct crypto_tfm *tfm) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); + struct crypto_cipher *cipher_tfm; + + ctx->complete = ccp_aes_cmac_complete; + ctx->u.aes.key_len = 0; + + crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_aes_cmac_req_ctx)); + + cipher_tfm = crypto_alloc_cipher("aes", 0, + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(cipher_tfm)) { + pr_warn("could not load aes cipher driver\n"); + return PTR_ERR(cipher_tfm); + } + ctx->u.aes.tfm_cipher = cipher_tfm; + + return 0; +} + +static void ccp_aes_cmac_cra_exit(struct crypto_tfm *tfm) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); + + if (ctx->u.aes.tfm_cipher) + crypto_free_cipher(ctx->u.aes.tfm_cipher); + ctx->u.aes.tfm_cipher = NULL; +} + +int ccp_register_aes_cmac_algs(struct list_head *head) +{ + struct ccp_crypto_ahash_alg *ccp_alg; + struct ahash_alg *alg; + struct hash_alg_common *halg; + struct crypto_alg *base; + int ret; + + ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); + if (!ccp_alg) + return -ENOMEM; + + INIT_LIST_HEAD(&ccp_alg->entry); + ccp_alg->mode = CCP_AES_MODE_CMAC; + + alg = &ccp_alg->alg; + alg->init = ccp_aes_cmac_init; + alg->update = ccp_aes_cmac_update; + alg->final = ccp_aes_cmac_final; + alg->finup = ccp_aes_cmac_finup; + alg->digest = ccp_aes_cmac_digest; + alg->setkey = ccp_aes_cmac_setkey; + + halg = &alg->halg; + halg->digestsize = AES_BLOCK_SIZE; + + base = &halg->base; + snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)"); + snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "cmac-aes-ccp"); + base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK; + base->cra_blocksize = AES_BLOCK_SIZE; + base->cra_ctxsize = sizeof(struct ccp_ctx); + base->cra_priority = CCP_CRA_PRIORITY; + base->cra_type = &crypto_ahash_type; + base->cra_init = ccp_aes_cmac_cra_init; + base->cra_exit = ccp_aes_cmac_cra_exit; + base->cra_module = THIS_MODULE; + + ret = crypto_register_ahash(alg); + if (ret) { + pr_err("%s ahash algorithm registration error (%d)\n", + base->cra_name, ret); + kfree(ccp_alg); + return ret; + } + + list_add(&ccp_alg->entry, head); + + return 0; +} diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c new file mode 100644 index 000000000..52c7395cb --- /dev/null +++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c @@ -0,0 +1,277 @@ +/* + * AMD Cryptographic Coprocessor (CCP) AES XTS crypto API support + * + * Copyright (C) 2013 Advanced Micro Devices, Inc. + * + * Author: Tom Lendacky <thomas.lendacky@amd.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/delay.h> +#include <linux/scatterlist.h> +#include <linux/crypto.h> +#include <crypto/algapi.h> +#include <crypto/aes.h> +#include <crypto/scatterwalk.h> + +#include "ccp-crypto.h" + +struct ccp_aes_xts_def { + const char *name; + const char *drv_name; +}; + +static struct ccp_aes_xts_def aes_xts_algs[] = { + { + .name = "xts(aes)", + .drv_name = "xts-aes-ccp", + }, +}; + +struct ccp_unit_size_map { + unsigned int size; + u32 value; +}; + +static struct ccp_unit_size_map unit_size_map[] = { + { + .size = 4096, + .value = CCP_XTS_AES_UNIT_SIZE_4096, + }, + { + .size = 2048, + .value = CCP_XTS_AES_UNIT_SIZE_2048, + }, + { + .size = 1024, + .value = CCP_XTS_AES_UNIT_SIZE_1024, + }, + { + .size = 512, + .value = CCP_XTS_AES_UNIT_SIZE_512, + }, + { + .size = 256, + .value = CCP_XTS_AES_UNIT_SIZE__LAST, + }, + { + .size = 128, + .value = CCP_XTS_AES_UNIT_SIZE__LAST, + }, + { + .size = 64, + .value = CCP_XTS_AES_UNIT_SIZE__LAST, + }, + { + .size = 32, + .value = CCP_XTS_AES_UNIT_SIZE__LAST, + }, + { + .size = 16, + .value = CCP_XTS_AES_UNIT_SIZE_16, + }, + { + .size = 1, + .value = CCP_XTS_AES_UNIT_SIZE__LAST, + }, +}; + +static int ccp_aes_xts_complete(struct crypto_async_request *async_req, int ret) +{ + struct ablkcipher_request *req = ablkcipher_request_cast(async_req); + struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); + + if (ret) + return ret; + + memcpy(req->info, rctx->iv, AES_BLOCK_SIZE); + + return 0; +} + +static int ccp_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int key_len) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm)); + + /* Only support 128-bit AES key with a 128-bit Tweak key, + * otherwise use the fallback + */ + switch (key_len) { + case AES_KEYSIZE_128 * 2: + memcpy(ctx->u.aes.key, key, key_len); + break; + } + ctx->u.aes.key_len = key_len / 2; + sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len); + + return crypto_ablkcipher_setkey(ctx->u.aes.tfm_ablkcipher, key, + key_len); +} + +static int ccp_aes_xts_crypt(struct ablkcipher_request *req, + unsigned int encrypt) +{ + struct crypto_tfm *tfm = + crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); + struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); + unsigned int unit; + int ret; + + if (!ctx->u.aes.key_len) + return -EINVAL; + + if (req->nbytes & (AES_BLOCK_SIZE - 1)) + return -EINVAL; + + if (!req->info) + return -EINVAL; + + for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) + if (!(req->nbytes & (unit_size_map[unit].size - 1))) + break; + + if ((unit_size_map[unit].value == CCP_XTS_AES_UNIT_SIZE__LAST) || + (ctx->u.aes.key_len != AES_KEYSIZE_128)) { + /* Use the fallback to process the request for any + * unsupported unit sizes or key sizes + */ + ablkcipher_request_set_tfm(req, ctx->u.aes.tfm_ablkcipher); + ret = (encrypt) ? crypto_ablkcipher_encrypt(req) : + crypto_ablkcipher_decrypt(req); + ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); + + return ret; + } + + memcpy(rctx->iv, req->info, AES_BLOCK_SIZE); + sg_init_one(&rctx->iv_sg, rctx->iv, AES_BLOCK_SIZE); + + memset(&rctx->cmd, 0, sizeof(rctx->cmd)); + INIT_LIST_HEAD(&rctx->cmd.entry); + rctx->cmd.engine = CCP_ENGINE_XTS_AES_128; + rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT + : CCP_AES_ACTION_DECRYPT; + rctx->cmd.u.xts.unit_size = unit_size_map[unit].value; + rctx->cmd.u.xts.key = &ctx->u.aes.key_sg; + rctx->cmd.u.xts.key_len = ctx->u.aes.key_len; + rctx->cmd.u.xts.iv = &rctx->iv_sg; + rctx->cmd.u.xts.iv_len = AES_BLOCK_SIZE; + rctx->cmd.u.xts.src = req->src; + rctx->cmd.u.xts.src_len = req->nbytes; + rctx->cmd.u.xts.dst = req->dst; + + ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); + + return ret; +} + +static int ccp_aes_xts_encrypt(struct ablkcipher_request *req) +{ + return ccp_aes_xts_crypt(req, 1); +} + +static int ccp_aes_xts_decrypt(struct ablkcipher_request *req) +{ + return ccp_aes_xts_crypt(req, 0); +} + +static int ccp_aes_xts_cra_init(struct crypto_tfm *tfm) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_ablkcipher *fallback_tfm; + + ctx->complete = ccp_aes_xts_complete; + ctx->u.aes.key_len = 0; + + fallback_tfm = crypto_alloc_ablkcipher(crypto_tfm_alg_name(tfm), 0, + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback_tfm)) { + pr_warn("could not load fallback driver %s\n", + crypto_tfm_alg_name(tfm)); + return PTR_ERR(fallback_tfm); + } + ctx->u.aes.tfm_ablkcipher = fallback_tfm; + + tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx) + + fallback_tfm->base.crt_ablkcipher.reqsize; + + return 0; +} + +static void ccp_aes_xts_cra_exit(struct crypto_tfm *tfm) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); + + if (ctx->u.aes.tfm_ablkcipher) + crypto_free_ablkcipher(ctx->u.aes.tfm_ablkcipher); + ctx->u.aes.tfm_ablkcipher = NULL; +} + +static int ccp_register_aes_xts_alg(struct list_head *head, + const struct ccp_aes_xts_def *def) +{ + struct ccp_crypto_ablkcipher_alg *ccp_alg; + struct crypto_alg *alg; + int ret; + + ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); + if (!ccp_alg) + return -ENOMEM; + + INIT_LIST_HEAD(&ccp_alg->entry); + + alg = &ccp_alg->alg; + + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + def->drv_name); + alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK; + alg->cra_blocksize = AES_BLOCK_SIZE; + alg->cra_ctxsize = sizeof(struct ccp_ctx); + alg->cra_priority = CCP_CRA_PRIORITY; + alg->cra_type = &crypto_ablkcipher_type; + alg->cra_ablkcipher.setkey = ccp_aes_xts_setkey; + alg->cra_ablkcipher.encrypt = ccp_aes_xts_encrypt; + alg->cra_ablkcipher.decrypt = ccp_aes_xts_decrypt; + alg->cra_ablkcipher.min_keysize = AES_MIN_KEY_SIZE * 2; + alg->cra_ablkcipher.max_keysize = AES_MAX_KEY_SIZE * 2; + alg->cra_ablkcipher.ivsize = AES_BLOCK_SIZE; + alg->cra_init = ccp_aes_xts_cra_init; + alg->cra_exit = ccp_aes_xts_cra_exit; + alg->cra_module = THIS_MODULE; + + ret = crypto_register_alg(alg); + if (ret) { + pr_err("%s ablkcipher algorithm registration error (%d)\n", + alg->cra_name, ret); + kfree(ccp_alg); + return ret; + } + + list_add(&ccp_alg->entry, head); + + return 0; +} + +int ccp_register_aes_xts_algs(struct list_head *head) +{ + int i, ret; + + for (i = 0; i < ARRAY_SIZE(aes_xts_algs); i++) { + ret = ccp_register_aes_xts_alg(head, &aes_xts_algs[i]); + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c new file mode 100644 index 000000000..7984f9108 --- /dev/null +++ b/drivers/crypto/ccp/ccp-crypto-aes.c @@ -0,0 +1,368 @@ +/* + * AMD Cryptographic Coprocessor (CCP) AES crypto API support + * + * Copyright (C) 2013 Advanced Micro Devices, Inc. + * + * Author: Tom Lendacky <thomas.lendacky@amd.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/delay.h> +#include <linux/scatterlist.h> +#include <linux/crypto.h> +#include <crypto/algapi.h> +#include <crypto/aes.h> +#include <crypto/ctr.h> +#include <crypto/scatterwalk.h> + +#include "ccp-crypto.h" + +static int ccp_aes_complete(struct crypto_async_request *async_req, int ret) +{ + struct ablkcipher_request *req = ablkcipher_request_cast(async_req); + struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); + + if (ret) + return ret; + + if (ctx->u.aes.mode != CCP_AES_MODE_ECB) + memcpy(req->info, rctx->iv, AES_BLOCK_SIZE); + + return 0; +} + +static int ccp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int key_len) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm)); + struct ccp_crypto_ablkcipher_alg *alg = + ccp_crypto_ablkcipher_alg(crypto_ablkcipher_tfm(tfm)); + + switch (key_len) { + case AES_KEYSIZE_128: + ctx->u.aes.type = CCP_AES_TYPE_128; + break; + case AES_KEYSIZE_192: + ctx->u.aes.type = CCP_AES_TYPE_192; + break; + case AES_KEYSIZE_256: + ctx->u.aes.type = CCP_AES_TYPE_256; + break; + default: + crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + ctx->u.aes.mode = alg->mode; + ctx->u.aes.key_len = key_len; + + memcpy(ctx->u.aes.key, key, key_len); + sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len); + + return 0; +} + +static int ccp_aes_crypt(struct ablkcipher_request *req, bool encrypt) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); + struct scatterlist *iv_sg = NULL; + unsigned int iv_len = 0; + int ret; + + if (!ctx->u.aes.key_len) + return -EINVAL; + + if (((ctx->u.aes.mode == CCP_AES_MODE_ECB) || + (ctx->u.aes.mode == CCP_AES_MODE_CBC) || + (ctx->u.aes.mode == CCP_AES_MODE_CFB)) && + (req->nbytes & (AES_BLOCK_SIZE - 1))) + return -EINVAL; + + if (ctx->u.aes.mode != CCP_AES_MODE_ECB) { + if (!req->info) + return -EINVAL; + + memcpy(rctx->iv, req->info, AES_BLOCK_SIZE); + iv_sg = &rctx->iv_sg; + iv_len = AES_BLOCK_SIZE; + sg_init_one(iv_sg, rctx->iv, iv_len); + } + + memset(&rctx->cmd, 0, sizeof(rctx->cmd)); + INIT_LIST_HEAD(&rctx->cmd.entry); + rctx->cmd.engine = CCP_ENGINE_AES; + rctx->cmd.u.aes.type = ctx->u.aes.type; + rctx->cmd.u.aes.mode = ctx->u.aes.mode; + rctx->cmd.u.aes.action = + (encrypt) ? CCP_AES_ACTION_ENCRYPT : CCP_AES_ACTION_DECRYPT; + rctx->cmd.u.aes.key = &ctx->u.aes.key_sg; + rctx->cmd.u.aes.key_len = ctx->u.aes.key_len; + rctx->cmd.u.aes.iv = iv_sg; + rctx->cmd.u.aes.iv_len = iv_len; + rctx->cmd.u.aes.src = req->src; + rctx->cmd.u.aes.src_len = req->nbytes; + rctx->cmd.u.aes.dst = req->dst; + + ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); + + return ret; +} + +static int ccp_aes_encrypt(struct ablkcipher_request *req) +{ + return ccp_aes_crypt(req, true); +} + +static int ccp_aes_decrypt(struct ablkcipher_request *req) +{ + return ccp_aes_crypt(req, false); +} + +static int ccp_aes_cra_init(struct crypto_tfm *tfm) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); + + ctx->complete = ccp_aes_complete; + ctx->u.aes.key_len = 0; + + tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx); + + return 0; +} + +static void ccp_aes_cra_exit(struct crypto_tfm *tfm) +{ +} + +static int ccp_aes_rfc3686_complete(struct crypto_async_request *async_req, + int ret) +{ + struct ablkcipher_request *req = ablkcipher_request_cast(async_req); + struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); + + /* Restore the original pointer */ + req->info = rctx->rfc3686_info; + + return ccp_aes_complete(async_req, ret); +} + +static int ccp_aes_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int key_len) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm)); + + if (key_len < CTR_RFC3686_NONCE_SIZE) + return -EINVAL; + + key_len -= CTR_RFC3686_NONCE_SIZE; + memcpy(ctx->u.aes.nonce, key + key_len, CTR_RFC3686_NONCE_SIZE); + + return ccp_aes_setkey(tfm, key, key_len); +} + +static int ccp_aes_rfc3686_crypt(struct ablkcipher_request *req, bool encrypt) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); + u8 *iv; + + /* Initialize the CTR block */ + iv = rctx->rfc3686_iv; + memcpy(iv, ctx->u.aes.nonce, CTR_RFC3686_NONCE_SIZE); + + iv += CTR_RFC3686_NONCE_SIZE; + memcpy(iv, req->info, CTR_RFC3686_IV_SIZE); + + iv += CTR_RFC3686_IV_SIZE; + *(__be32 *)iv = cpu_to_be32(1); + + /* Point to the new IV */ + rctx->rfc3686_info = req->info; + req->info = rctx->rfc3686_iv; + + return ccp_aes_crypt(req, encrypt); +} + +static int ccp_aes_rfc3686_encrypt(struct ablkcipher_request *req) +{ + return ccp_aes_rfc3686_crypt(req, true); +} + +static int ccp_aes_rfc3686_decrypt(struct ablkcipher_request *req) +{ + return ccp_aes_rfc3686_crypt(req, false); +} + +static int ccp_aes_rfc3686_cra_init(struct crypto_tfm *tfm) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); + + ctx->complete = ccp_aes_rfc3686_complete; + ctx->u.aes.key_len = 0; + + tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx); + + return 0; +} + +static void ccp_aes_rfc3686_cra_exit(struct crypto_tfm *tfm) +{ +} + +static struct crypto_alg ccp_aes_defaults = { + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct ccp_ctx), + .cra_priority = CCP_CRA_PRIORITY, + .cra_type = &crypto_ablkcipher_type, + .cra_init = ccp_aes_cra_init, + .cra_exit = ccp_aes_cra_exit, + .cra_module = THIS_MODULE, + .cra_ablkcipher = { + .setkey = ccp_aes_setkey, + .encrypt = ccp_aes_encrypt, + .decrypt = ccp_aes_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + }, +}; + +static struct crypto_alg ccp_aes_rfc3686_defaults = { + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = CTR_RFC3686_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct ccp_ctx), + .cra_priority = CCP_CRA_PRIORITY, + .cra_type = &crypto_ablkcipher_type, + .cra_init = ccp_aes_rfc3686_cra_init, + .cra_exit = ccp_aes_rfc3686_cra_exit, + .cra_module = THIS_MODULE, + .cra_ablkcipher = { + .setkey = ccp_aes_rfc3686_setkey, + .encrypt = ccp_aes_rfc3686_encrypt, + .decrypt = ccp_aes_rfc3686_decrypt, + .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, + .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, + }, +}; + +struct ccp_aes_def { + enum ccp_aes_mode mode; + const char *name; + const char *driver_name; + unsigned int blocksize; + unsigned int ivsize; + struct crypto_alg *alg_defaults; +}; + +static struct ccp_aes_def aes_algs[] = { + { + .mode = CCP_AES_MODE_ECB, + .name = "ecb(aes)", + .driver_name = "ecb-aes-ccp", + .blocksize = AES_BLOCK_SIZE, + .ivsize = 0, + .alg_defaults = &ccp_aes_defaults, + }, + { + .mode = CCP_AES_MODE_CBC, + .name = "cbc(aes)", + .driver_name = "cbc-aes-ccp", + .blocksize = AES_BLOCK_SIZE, + .ivsize = AES_BLOCK_SIZE, + .alg_defaults = &ccp_aes_defaults, + }, + { + .mode = CCP_AES_MODE_CFB, + .name = "cfb(aes)", + .driver_name = "cfb-aes-ccp", + .blocksize = AES_BLOCK_SIZE, + .ivsize = AES_BLOCK_SIZE, + .alg_defaults = &ccp_aes_defaults, + }, + { + .mode = CCP_AES_MODE_OFB, + .name = "ofb(aes)", + .driver_name = "ofb-aes-ccp", + .blocksize = 1, + .ivsize = AES_BLOCK_SIZE, + .alg_defaults = &ccp_aes_defaults, + }, + { + .mode = CCP_AES_MODE_CTR, + .name = "ctr(aes)", + .driver_name = "ctr-aes-ccp", + .blocksize = 1, + .ivsize = AES_BLOCK_SIZE, + .alg_defaults = &ccp_aes_defaults, + }, + { + .mode = CCP_AES_MODE_CTR, + .name = "rfc3686(ctr(aes))", + .driver_name = "rfc3686-ctr-aes-ccp", + .blocksize = 1, + .ivsize = CTR_RFC3686_IV_SIZE, + .alg_defaults = &ccp_aes_rfc3686_defaults, + }, +}; + +static int ccp_register_aes_alg(struct list_head *head, + const struct ccp_aes_def *def) +{ + struct ccp_crypto_ablkcipher_alg *ccp_alg; + struct crypto_alg *alg; + int ret; + + ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); + if (!ccp_alg) + return -ENOMEM; + + INIT_LIST_HEAD(&ccp_alg->entry); + + ccp_alg->mode = def->mode; + + /* Copy the defaults and override as necessary */ + alg = &ccp_alg->alg; + *alg = *def->alg_defaults; + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + def->driver_name); + alg->cra_blocksize = def->blocksize; + alg->cra_ablkcipher.ivsize = def->ivsize; + + ret = crypto_register_alg(alg); + if (ret) { + pr_err("%s ablkcipher algorithm registration error (%d)\n", + alg->cra_name, ret); + kfree(ccp_alg); + return ret; + } + + list_add(&ccp_alg->entry, head); + + return 0; +} + +int ccp_register_aes_algs(struct list_head *head) +{ + int i, ret; + + for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { + ret = ccp_register_aes_alg(head, &aes_algs[i]); + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c new file mode 100644 index 000000000..bdec01ec6 --- /dev/null +++ b/drivers/crypto/ccp/ccp-crypto-main.c @@ -0,0 +1,391 @@ +/* + * AMD Cryptographic Coprocessor (CCP) crypto API support + * + * Copyright (C) 2013 Advanced Micro Devices, Inc. + * + * Author: Tom Lendacky <thomas.lendacky@amd.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/ccp.h> +#include <linux/scatterlist.h> +#include <crypto/internal/hash.h> + +#include "ccp-crypto.h" + +MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("1.0.0"); +MODULE_DESCRIPTION("AMD Cryptographic Coprocessor crypto API support"); + +static unsigned int aes_disable; +module_param(aes_disable, uint, 0444); +MODULE_PARM_DESC(aes_disable, "Disable use of AES - any non-zero value"); + +static unsigned int sha_disable; +module_param(sha_disable, uint, 0444); +MODULE_PARM_DESC(sha_disable, "Disable use of SHA - any non-zero value"); + +/* List heads for the supported algorithms */ +static LIST_HEAD(hash_algs); +static LIST_HEAD(cipher_algs); + +/* For any tfm, requests for that tfm must be returned on the order + * received. With multiple queues available, the CCP can process more + * than one cmd at a time. Therefore we must maintain a cmd list to insure + * the proper ordering of requests on a given tfm. + */ +struct ccp_crypto_queue { + struct list_head cmds; + struct list_head *backlog; + unsigned int cmd_count; +}; + +#define CCP_CRYPTO_MAX_QLEN 100 + +static struct ccp_crypto_queue req_queue; +static spinlock_t req_queue_lock; + +struct ccp_crypto_cmd { + struct list_head entry; + + struct ccp_cmd *cmd; + + /* Save the crypto_tfm and crypto_async_request addresses + * separately to avoid any reference to a possibly invalid + * crypto_async_request structure after invoking the request + * callback + */ + struct crypto_async_request *req; + struct crypto_tfm *tfm; + + /* Used for held command processing to determine state */ + int ret; +}; + +struct ccp_crypto_cpu { + struct work_struct work; + struct completion completion; + struct ccp_crypto_cmd *crypto_cmd; + int err; +}; + +static inline bool ccp_crypto_success(int err) +{ + if (err && (err != -EINPROGRESS) && (err != -EBUSY)) + return false; + + return true; +} + +static struct ccp_crypto_cmd *ccp_crypto_cmd_complete( + struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog) +{ + struct ccp_crypto_cmd *held = NULL, *tmp; + unsigned long flags; + + *backlog = NULL; + + spin_lock_irqsave(&req_queue_lock, flags); + + /* Held cmds will be after the current cmd in the queue so start + * searching for a cmd with a matching tfm for submission. + */ + tmp = crypto_cmd; + list_for_each_entry_continue(tmp, &req_queue.cmds, entry) { + if (crypto_cmd->tfm != tmp->tfm) + continue; + held = tmp; + break; + } + + /* Process the backlog: + * Because cmds can be executed from any point in the cmd list + * special precautions have to be taken when handling the backlog. + */ + if (req_queue.backlog != &req_queue.cmds) { + /* Skip over this cmd if it is the next backlog cmd */ + if (req_queue.backlog == &crypto_cmd->entry) + req_queue.backlog = crypto_cmd->entry.next; + + *backlog = container_of(req_queue.backlog, + struct ccp_crypto_cmd, entry); + req_queue.backlog = req_queue.backlog->next; + + /* Skip over this cmd if it is now the next backlog cmd */ + if (req_queue.backlog == &crypto_cmd->entry) + req_queue.backlog = crypto_cmd->entry.next; + } + + /* Remove the cmd entry from the list of cmds */ + req_queue.cmd_count--; + list_del(&crypto_cmd->entry); + + spin_unlock_irqrestore(&req_queue_lock, flags); + + return held; +} + +static void ccp_crypto_complete(void *data, int err) +{ + struct ccp_crypto_cmd *crypto_cmd = data; + struct ccp_crypto_cmd *held, *next, *backlog; + struct crypto_async_request *req = crypto_cmd->req; + struct ccp_ctx *ctx = crypto_tfm_ctx(req->tfm); + int ret; + + if (err == -EINPROGRESS) { + /* Only propagate the -EINPROGRESS if necessary */ + if (crypto_cmd->ret == -EBUSY) { + crypto_cmd->ret = -EINPROGRESS; + req->complete(req, -EINPROGRESS); + } + + return; + } + + /* Operation has completed - update the queue before invoking + * the completion callbacks and retrieve the next cmd (cmd with + * a matching tfm) that can be submitted to the CCP. + */ + held = ccp_crypto_cmd_complete(crypto_cmd, &backlog); + if (backlog) { + backlog->ret = -EINPROGRESS; + backlog->req->complete(backlog->req, -EINPROGRESS); + } + + /* Transition the state from -EBUSY to -EINPROGRESS first */ + if (crypto_cmd->ret == -EBUSY) + req->complete(req, -EINPROGRESS); + + /* Completion callbacks */ + ret = err; + if (ctx->complete) + ret = ctx->complete(req, ret); + req->complete(req, ret); + + /* Submit the next cmd */ + while (held) { + /* Since we have already queued the cmd, we must indicate that + * we can backlog so as not to "lose" this request. + */ + held->cmd->flags |= CCP_CMD_MAY_BACKLOG; + ret = ccp_enqueue_cmd(held->cmd); + if (ccp_crypto_success(ret)) + break; + + /* Error occurred, report it and get the next entry */ + ctx = crypto_tfm_ctx(held->req->tfm); + if (ctx->complete) + ret = ctx->complete(held->req, ret); + held->req->complete(held->req, ret); + + next = ccp_crypto_cmd_complete(held, &backlog); + if (backlog) { + backlog->ret = -EINPROGRESS; + backlog->req->complete(backlog->req, -EINPROGRESS); + } + + kfree(held); + held = next; + } + + kfree(crypto_cmd); +} + +static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd) +{ + struct ccp_crypto_cmd *active = NULL, *tmp; + unsigned long flags; + bool free_cmd = true; + int ret; + + spin_lock_irqsave(&req_queue_lock, flags); + + /* Check if the cmd can/should be queued */ + if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) { + ret = -EBUSY; + if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) + goto e_lock; + } + + /* Look for an entry with the same tfm. If there is a cmd + * with the same tfm in the list then the current cmd cannot + * be submitted to the CCP yet. + */ + list_for_each_entry(tmp, &req_queue.cmds, entry) { + if (crypto_cmd->tfm != tmp->tfm) + continue; + active = tmp; + break; + } + + ret = -EINPROGRESS; + if (!active) { + ret = ccp_enqueue_cmd(crypto_cmd->cmd); + if (!ccp_crypto_success(ret)) + goto e_lock; /* Error, don't queue it */ + if ((ret == -EBUSY) && + !(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) + goto e_lock; /* Not backlogging, don't queue it */ + } + + if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) { + ret = -EBUSY; + if (req_queue.backlog == &req_queue.cmds) + req_queue.backlog = &crypto_cmd->entry; + } + crypto_cmd->ret = ret; + + req_queue.cmd_count++; + list_add_tail(&crypto_cmd->entry, &req_queue.cmds); + + free_cmd = false; + +e_lock: + spin_unlock_irqrestore(&req_queue_lock, flags); + + if (free_cmd) + kfree(crypto_cmd); + + return ret; +} + +/** + * ccp_crypto_enqueue_request - queue an crypto async request for processing + * by the CCP + * + * @req: crypto_async_request struct to be processed + * @cmd: ccp_cmd struct to be sent to the CCP + */ +int ccp_crypto_enqueue_request(struct crypto_async_request *req, + struct ccp_cmd *cmd) +{ + struct ccp_crypto_cmd *crypto_cmd; + gfp_t gfp; + + gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; + + crypto_cmd = kzalloc(sizeof(*crypto_cmd), gfp); + if (!crypto_cmd) + return -ENOMEM; + + /* The tfm pointer must be saved and not referenced from the + * crypto_async_request (req) pointer because it is used after + * completion callback for the request and the req pointer + * might not be valid anymore. + */ + crypto_cmd->cmd = cmd; + crypto_cmd->req = req; + crypto_cmd->tfm = req->tfm; + + cmd->callback = ccp_crypto_complete; + cmd->data = crypto_cmd; + + if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) + cmd->flags |= CCP_CMD_MAY_BACKLOG; + else + cmd->flags &= ~CCP_CMD_MAY_BACKLOG; + + return ccp_crypto_enqueue_cmd(crypto_cmd); +} + +struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table, + struct scatterlist *sg_add) +{ + struct scatterlist *sg, *sg_last = NULL; + + for (sg = table->sgl; sg; sg = sg_next(sg)) + if (!sg_page(sg)) + break; + BUG_ON(!sg); + + for (; sg && sg_add; sg = sg_next(sg), sg_add = sg_next(sg_add)) { + sg_set_page(sg, sg_page(sg_add), sg_add->length, + sg_add->offset); + sg_last = sg; + } + BUG_ON(sg_add); + + return sg_last; +} + +static int ccp_register_algs(void) +{ + int ret; + + if (!aes_disable) { + ret = ccp_register_aes_algs(&cipher_algs); + if (ret) + return ret; + + ret = ccp_register_aes_cmac_algs(&hash_algs); + if (ret) + return ret; + + ret = ccp_register_aes_xts_algs(&cipher_algs); + if (ret) + return ret; + } + + if (!sha_disable) { + ret = ccp_register_sha_algs(&hash_algs); + if (ret) + return ret; + } + + return 0; +} + +static void ccp_unregister_algs(void) +{ + struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp; + struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp; + + list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) { + crypto_unregister_ahash(&ahash_alg->alg); + list_del(&ahash_alg->entry); + kfree(ahash_alg); + } + + list_for_each_entry_safe(ablk_alg, ablk_tmp, &cipher_algs, entry) { + crypto_unregister_alg(&ablk_alg->alg); + list_del(&ablk_alg->entry); + kfree(ablk_alg); + } +} + +static int ccp_crypto_init(void) +{ + int ret; + + ret = ccp_present(); + if (ret) + return ret; + + spin_lock_init(&req_queue_lock); + INIT_LIST_HEAD(&req_queue.cmds); + req_queue.backlog = &req_queue.cmds; + req_queue.cmd_count = 0; + + ret = ccp_register_algs(); + if (ret) + ccp_unregister_algs(); + + return ret; +} + +static void ccp_crypto_exit(void) +{ + ccp_unregister_algs(); +} + +module_init(ccp_crypto_init); +module_exit(ccp_crypto_exit); diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c new file mode 100644 index 000000000..507b34e0c --- /dev/null +++ b/drivers/crypto/ccp/ccp-crypto-sha.c @@ -0,0 +1,438 @@ +/* + * AMD Cryptographic Coprocessor (CCP) SHA crypto API support + * + * Copyright (C) 2013 Advanced Micro Devices, Inc. + * + * Author: Tom Lendacky <thomas.lendacky@amd.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/delay.h> +#include <linux/scatterlist.h> +#include <linux/crypto.h> +#include <crypto/algapi.h> +#include <crypto/hash.h> +#include <crypto/internal/hash.h> +#include <crypto/sha.h> +#include <crypto/scatterwalk.h> + +#include "ccp-crypto.h" + +static int ccp_sha_complete(struct crypto_async_request *async_req, int ret) +{ + struct ahash_request *req = ahash_request_cast(async_req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); + unsigned int digest_size = crypto_ahash_digestsize(tfm); + + if (ret) + goto e_free; + + if (rctx->hash_rem) { + /* Save remaining data to buffer */ + unsigned int offset = rctx->nbytes - rctx->hash_rem; + + scatterwalk_map_and_copy(rctx->buf, rctx->src, + offset, rctx->hash_rem, 0); + rctx->buf_count = rctx->hash_rem; + } else { + rctx->buf_count = 0; + } + + /* Update result area if supplied */ + if (req->result) + memcpy(req->result, rctx->ctx, digest_size); + +e_free: + sg_free_table(&rctx->data_sg); + + return ret; +} + +static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes, + unsigned int final) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); + struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); + struct scatterlist *sg; + unsigned int block_size = + crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); + unsigned int sg_count; + gfp_t gfp; + u64 len; + int ret; + + len = (u64)rctx->buf_count + (u64)nbytes; + + if (!final && (len <= block_size)) { + scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src, + 0, nbytes, 0); + rctx->buf_count += nbytes; + + return 0; + } + + rctx->src = req->src; + rctx->nbytes = nbytes; + + rctx->final = final; + rctx->hash_rem = final ? 0 : len & (block_size - 1); + rctx->hash_cnt = len - rctx->hash_rem; + if (!final && !rctx->hash_rem) { + /* CCP can't do zero length final, so keep some data around */ + rctx->hash_cnt -= block_size; + rctx->hash_rem = block_size; + } + + /* Initialize the context scatterlist */ + sg_init_one(&rctx->ctx_sg, rctx->ctx, sizeof(rctx->ctx)); + + sg = NULL; + if (rctx->buf_count && nbytes) { + /* Build the data scatterlist table - allocate enough entries + * for both data pieces (buffer and input data) + */ + gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? + GFP_KERNEL : GFP_ATOMIC; + sg_count = sg_nents(req->src) + 1; + ret = sg_alloc_table(&rctx->data_sg, sg_count, gfp); + if (ret) + return ret; + + sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count); + sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->buf_sg); + sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src); + sg_mark_end(sg); + + sg = rctx->data_sg.sgl; + } else if (rctx->buf_count) { + sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count); + + sg = &rctx->buf_sg; + } else if (nbytes) { + sg = req->src; + } + + rctx->msg_bits += (rctx->hash_cnt << 3); /* Total in bits */ + + memset(&rctx->cmd, 0, sizeof(rctx->cmd)); + INIT_LIST_HEAD(&rctx->cmd.entry); + rctx->cmd.engine = CCP_ENGINE_SHA; + rctx->cmd.u.sha.type = rctx->type; + rctx->cmd.u.sha.ctx = &rctx->ctx_sg; + rctx->cmd.u.sha.ctx_len = sizeof(rctx->ctx); + rctx->cmd.u.sha.src = sg; + rctx->cmd.u.sha.src_len = rctx->hash_cnt; + rctx->cmd.u.sha.opad = ctx->u.sha.key_len ? + &ctx->u.sha.opad_sg : NULL; + rctx->cmd.u.sha.opad_len = ctx->u.sha.key_len ? + ctx->u.sha.opad_count : 0; + rctx->cmd.u.sha.first = rctx->first; + rctx->cmd.u.sha.final = rctx->final; + rctx->cmd.u.sha.msg_bits = rctx->msg_bits; + + rctx->first = 0; + + ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); + + return ret; +} + +static int ccp_sha_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); + struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); + struct ccp_crypto_ahash_alg *alg = + ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm)); + unsigned int block_size = + crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); + + memset(rctx, 0, sizeof(*rctx)); + + rctx->type = alg->type; + rctx->first = 1; + + if (ctx->u.sha.key_len) { + /* Buffer the HMAC key for first update */ + memcpy(rctx->buf, ctx->u.sha.ipad, block_size); + rctx->buf_count = block_size; + } + + return 0; +} + +static int ccp_sha_update(struct ahash_request *req) +{ + return ccp_do_sha_update(req, req->nbytes, 0); +} + +static int ccp_sha_final(struct ahash_request *req) +{ + return ccp_do_sha_update(req, 0, 1); +} + +static int ccp_sha_finup(struct ahash_request *req) +{ + return ccp_do_sha_update(req, req->nbytes, 1); +} + +static int ccp_sha_digest(struct ahash_request *req) +{ + int ret; + + ret = ccp_sha_init(req); + if (ret) + return ret; + + return ccp_sha_finup(req); +} + +static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int key_len) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); + struct crypto_shash *shash = ctx->u.sha.hmac_tfm; + + SHASH_DESC_ON_STACK(sdesc, shash); + + unsigned int block_size = crypto_shash_blocksize(shash); + unsigned int digest_size = crypto_shash_digestsize(shash); + int i, ret; + + /* Set to zero until complete */ + ctx->u.sha.key_len = 0; + + /* Clear key area to provide zero padding for keys smaller + * than the block size + */ + memset(ctx->u.sha.key, 0, sizeof(ctx->u.sha.key)); + + if (key_len > block_size) { + /* Must hash the input key */ + sdesc->tfm = shash; + sdesc->flags = crypto_ahash_get_flags(tfm) & + CRYPTO_TFM_REQ_MAY_SLEEP; + + ret = crypto_shash_digest(sdesc, key, key_len, + ctx->u.sha.key); + if (ret) { + crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + key_len = digest_size; + } else { + memcpy(ctx->u.sha.key, key, key_len); + } + + for (i = 0; i < block_size; i++) { + ctx->u.sha.ipad[i] = ctx->u.sha.key[i] ^ 0x36; + ctx->u.sha.opad[i] = ctx->u.sha.key[i] ^ 0x5c; + } + + sg_init_one(&ctx->u.sha.opad_sg, ctx->u.sha.opad, block_size); + ctx->u.sha.opad_count = block_size; + + ctx->u.sha.key_len = key_len; + + return 0; +} + +static int ccp_sha_cra_init(struct crypto_tfm *tfm) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); + + ctx->complete = ccp_sha_complete; + ctx->u.sha.key_len = 0; + + crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_sha_req_ctx)); + + return 0; +} + +static void ccp_sha_cra_exit(struct crypto_tfm *tfm) +{ +} + +static int ccp_hmac_sha_cra_init(struct crypto_tfm *tfm) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); + struct ccp_crypto_ahash_alg *alg = ccp_crypto_ahash_alg(tfm); + struct crypto_shash *hmac_tfm; + + hmac_tfm = crypto_alloc_shash(alg->child_alg, 0, 0); + if (IS_ERR(hmac_tfm)) { + pr_warn("could not load driver %s need for HMAC support\n", + alg->child_alg); + return PTR_ERR(hmac_tfm); + } + + ctx->u.sha.hmac_tfm = hmac_tfm; + + return ccp_sha_cra_init(tfm); +} + +static void ccp_hmac_sha_cra_exit(struct crypto_tfm *tfm) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); + + if (ctx->u.sha.hmac_tfm) + crypto_free_shash(ctx->u.sha.hmac_tfm); + + ccp_sha_cra_exit(tfm); +} + +struct ccp_sha_def { + const char *name; + const char *drv_name; + enum ccp_sha_type type; + u32 digest_size; + u32 block_size; +}; + +static struct ccp_sha_def sha_algs[] = { + { + .name = "sha1", + .drv_name = "sha1-ccp", + .type = CCP_SHA_TYPE_1, + .digest_size = SHA1_DIGEST_SIZE, + .block_size = SHA1_BLOCK_SIZE, + }, + { + .name = "sha224", + .drv_name = "sha224-ccp", + .type = CCP_SHA_TYPE_224, + .digest_size = SHA224_DIGEST_SIZE, + .block_size = SHA224_BLOCK_SIZE, + }, + { + .name = "sha256", + .drv_name = "sha256-ccp", + .type = CCP_SHA_TYPE_256, + .digest_size = SHA256_DIGEST_SIZE, + .block_size = SHA256_BLOCK_SIZE, + }, +}; + +static int ccp_register_hmac_alg(struct list_head *head, + const struct ccp_sha_def *def, + const struct ccp_crypto_ahash_alg *base_alg) +{ + struct ccp_crypto_ahash_alg *ccp_alg; + struct ahash_alg *alg; + struct hash_alg_common *halg; + struct crypto_alg *base; + int ret; + + ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); + if (!ccp_alg) + return -ENOMEM; + + /* Copy the base algorithm and only change what's necessary */ + *ccp_alg = *base_alg; + INIT_LIST_HEAD(&ccp_alg->entry); + + strncpy(ccp_alg->child_alg, def->name, CRYPTO_MAX_ALG_NAME); + + alg = &ccp_alg->alg; + alg->setkey = ccp_sha_setkey; + + halg = &alg->halg; + + base = &halg->base; + snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", def->name); + snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s", + def->drv_name); + base->cra_init = ccp_hmac_sha_cra_init; + base->cra_exit = ccp_hmac_sha_cra_exit; + + ret = crypto_register_ahash(alg); + if (ret) { + pr_err("%s ahash algorithm registration error (%d)\n", + base->cra_name, ret); + kfree(ccp_alg); + return ret; + } + + list_add(&ccp_alg->entry, head); + + return ret; +} + +static int ccp_register_sha_alg(struct list_head *head, + const struct ccp_sha_def *def) +{ + struct ccp_crypto_ahash_alg *ccp_alg; + struct ahash_alg *alg; + struct hash_alg_common *halg; + struct crypto_alg *base; + int ret; + + ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); + if (!ccp_alg) + return -ENOMEM; + + INIT_LIST_HEAD(&ccp_alg->entry); + + ccp_alg->type = def->type; + + alg = &ccp_alg->alg; + alg->init = ccp_sha_init; + alg->update = ccp_sha_update; + alg->final = ccp_sha_final; + alg->finup = ccp_sha_finup; + alg->digest = ccp_sha_digest; + + halg = &alg->halg; + halg->digestsize = def->digest_size; + + base = &halg->base; + snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); + snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + def->drv_name); + base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK; + base->cra_blocksize = def->block_size; + base->cra_ctxsize = sizeof(struct ccp_ctx); + base->cra_priority = CCP_CRA_PRIORITY; + base->cra_type = &crypto_ahash_type; + base->cra_init = ccp_sha_cra_init; + base->cra_exit = ccp_sha_cra_exit; + base->cra_module = THIS_MODULE; + + ret = crypto_register_ahash(alg); + if (ret) { + pr_err("%s ahash algorithm registration error (%d)\n", + base->cra_name, ret); + kfree(ccp_alg); + return ret; + } + + list_add(&ccp_alg->entry, head); + + ret = ccp_register_hmac_alg(head, def, ccp_alg); + + return ret; +} + +int ccp_register_sha_algs(struct list_head *head) +{ + int i, ret; + + for (i = 0; i < ARRAY_SIZE(sha_algs); i++) { + ret = ccp_register_sha_alg(head, &sha_algs[i]); + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h new file mode 100644 index 000000000..76a96f0f4 --- /dev/null +++ b/drivers/crypto/ccp/ccp-crypto.h @@ -0,0 +1,194 @@ +/* + * AMD Cryptographic Coprocessor (CCP) crypto API support + * + * Copyright (C) 2013 Advanced Micro Devices, Inc. + * + * Author: Tom Lendacky <thomas.lendacky@amd.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __CCP_CRYPTO_H__ +#define __CCP_CRYPTO_H__ + +#include <linux/list.h> +#include <linux/wait.h> +#include <linux/pci.h> +#include <linux/ccp.h> +#include <linux/crypto.h> +#include <crypto/algapi.h> +#include <crypto/aes.h> +#include <crypto/ctr.h> +#include <crypto/hash.h> +#include <crypto/sha.h> + +#define CCP_CRA_PRIORITY 300 + +struct ccp_crypto_ablkcipher_alg { + struct list_head entry; + + u32 mode; + + struct crypto_alg alg; +}; + +struct ccp_crypto_ahash_alg { + struct list_head entry; + + const __be32 *init; + u32 type; + u32 mode; + + /* Child algorithm used for HMAC, CMAC, etc */ + char child_alg[CRYPTO_MAX_ALG_NAME]; + + struct ahash_alg alg; +}; + +static inline struct ccp_crypto_ablkcipher_alg * + ccp_crypto_ablkcipher_alg(struct crypto_tfm *tfm) +{ + struct crypto_alg *alg = tfm->__crt_alg; + + return container_of(alg, struct ccp_crypto_ablkcipher_alg, alg); +} + +static inline struct ccp_crypto_ahash_alg * + ccp_crypto_ahash_alg(struct crypto_tfm *tfm) +{ + struct crypto_alg *alg = tfm->__crt_alg; + struct ahash_alg *ahash_alg; + + ahash_alg = container_of(alg, struct ahash_alg, halg.base); + + return container_of(ahash_alg, struct ccp_crypto_ahash_alg, alg); +} + +/***** AES related defines *****/ +struct ccp_aes_ctx { + /* Fallback cipher for XTS with unsupported unit sizes */ + struct crypto_ablkcipher *tfm_ablkcipher; + + /* Cipher used to generate CMAC K1/K2 keys */ + struct crypto_cipher *tfm_cipher; + + enum ccp_engine engine; + enum ccp_aes_type type; + enum ccp_aes_mode mode; + + struct scatterlist key_sg; + unsigned int key_len; + u8 key[AES_MAX_KEY_SIZE]; + + u8 nonce[CTR_RFC3686_NONCE_SIZE]; + + /* CMAC key structures */ + struct scatterlist k1_sg; + struct scatterlist k2_sg; + unsigned int kn_len; + u8 k1[AES_BLOCK_SIZE]; + u8 k2[AES_BLOCK_SIZE]; +}; + +struct ccp_aes_req_ctx { + struct scatterlist iv_sg; + u8 iv[AES_BLOCK_SIZE]; + + /* Fields used for RFC3686 requests */ + u8 *rfc3686_info; + u8 rfc3686_iv[AES_BLOCK_SIZE]; + + struct ccp_cmd cmd; +}; + +struct ccp_aes_cmac_req_ctx { + unsigned int null_msg; + unsigned int final; + + struct scatterlist *src; + unsigned int nbytes; + + u64 hash_cnt; + unsigned int hash_rem; + + struct sg_table data_sg; + + struct scatterlist iv_sg; + u8 iv[AES_BLOCK_SIZE]; + + struct scatterlist buf_sg; + unsigned int buf_count; + u8 buf[AES_BLOCK_SIZE]; + + struct scatterlist pad_sg; + unsigned int pad_count; + u8 pad[AES_BLOCK_SIZE]; + + struct ccp_cmd cmd; +}; + +/***** SHA related defines *****/ +#define MAX_SHA_CONTEXT_SIZE SHA256_DIGEST_SIZE +#define MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE + +struct ccp_sha_ctx { + struct scatterlist opad_sg; + unsigned int opad_count; + + unsigned int key_len; + u8 key[MAX_SHA_BLOCK_SIZE]; + u8 ipad[MAX_SHA_BLOCK_SIZE]; + u8 opad[MAX_SHA_BLOCK_SIZE]; + struct crypto_shash *hmac_tfm; +}; + +struct ccp_sha_req_ctx { + enum ccp_sha_type type; + + u64 msg_bits; + + unsigned int first; + unsigned int final; + + struct scatterlist *src; + unsigned int nbytes; + + u64 hash_cnt; + unsigned int hash_rem; + + struct sg_table data_sg; + + struct scatterlist ctx_sg; + u8 ctx[MAX_SHA_CONTEXT_SIZE]; + + struct scatterlist buf_sg; + unsigned int buf_count; + u8 buf[MAX_SHA_BLOCK_SIZE]; + + /* CCP driver command */ + struct ccp_cmd cmd; +}; + +/***** Common Context Structure *****/ +struct ccp_ctx { + int (*complete)(struct crypto_async_request *req, int ret); + + union { + struct ccp_aes_ctx aes; + struct ccp_sha_ctx sha; + } u; +}; + +int ccp_crypto_enqueue_request(struct crypto_async_request *req, + struct ccp_cmd *cmd); +struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table, + struct scatterlist *sg_add); + +int ccp_register_aes_algs(struct list_head *head); +int ccp_register_aes_cmac_algs(struct list_head *head); +int ccp_register_aes_xts_algs(struct list_head *head); +int ccp_register_sha_algs(struct list_head *head); + +#endif diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c new file mode 100644 index 000000000..861bacc1b --- /dev/null +++ b/drivers/crypto/ccp/ccp-dev.c @@ -0,0 +1,654 @@ +/* + * AMD Cryptographic Coprocessor (CCP) driver + * + * Copyright (C) 2013 Advanced Micro Devices, Inc. + * + * Author: Tom Lendacky <thomas.lendacky@amd.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/kthread.h> +#include <linux/sched.h> +#include <linux/interrupt.h> +#include <linux/spinlock.h> +#include <linux/mutex.h> +#include <linux/delay.h> +#include <linux/hw_random.h> +#include <linux/cpu.h> +#ifdef CONFIG_X86 +#include <asm/cpu_device_id.h> +#endif +#include <linux/ccp.h> + +#include "ccp-dev.h" + +MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("1.0.0"); +MODULE_DESCRIPTION("AMD Cryptographic Coprocessor driver"); + +struct ccp_tasklet_data { + struct completion completion; + struct ccp_cmd *cmd; +}; + +static struct ccp_device *ccp_dev; +static inline struct ccp_device *ccp_get_device(void) +{ + return ccp_dev; +} + +static inline void ccp_add_device(struct ccp_device *ccp) +{ + ccp_dev = ccp; +} + +static inline void ccp_del_device(struct ccp_device *ccp) +{ + ccp_dev = NULL; +} + +/** + * ccp_present - check if a CCP device is present + * + * Returns zero if a CCP device is present, -ENODEV otherwise. + */ +int ccp_present(void) +{ + if (ccp_get_device()) + return 0; + + return -ENODEV; +} +EXPORT_SYMBOL_GPL(ccp_present); + +/** + * ccp_enqueue_cmd - queue an operation for processing by the CCP + * + * @cmd: ccp_cmd struct to be processed + * + * Queue a cmd to be processed by the CCP. If queueing the cmd + * would exceed the defined length of the cmd queue the cmd will + * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will + * result in a return code of -EBUSY. + * + * The callback routine specified in the ccp_cmd struct will be + * called to notify the caller of completion (if the cmd was not + * backlogged) or advancement out of the backlog. If the cmd has + * advanced out of the backlog the "err" value of the callback + * will be -EINPROGRESS. Any other "err" value during callback is + * the result of the operation. + * + * The cmd has been successfully queued if: + * the return code is -EINPROGRESS or + * the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set + */ +int ccp_enqueue_cmd(struct ccp_cmd *cmd) +{ + struct ccp_device *ccp = ccp_get_device(); + unsigned long flags; + unsigned int i; + int ret; + + if (!ccp) + return -ENODEV; + + /* Caller must supply a callback routine */ + if (!cmd->callback) + return -EINVAL; + + cmd->ccp = ccp; + + spin_lock_irqsave(&ccp->cmd_lock, flags); + + i = ccp->cmd_q_count; + + if (ccp->cmd_count >= MAX_CMD_QLEN) { + ret = -EBUSY; + if (cmd->flags & CCP_CMD_MAY_BACKLOG) + list_add_tail(&cmd->entry, &ccp->backlog); + } else { + ret = -EINPROGRESS; + ccp->cmd_count++; + list_add_tail(&cmd->entry, &ccp->cmd); + + /* Find an idle queue */ + if (!ccp->suspending) { + for (i = 0; i < ccp->cmd_q_count; i++) { + if (ccp->cmd_q[i].active) + continue; + + break; + } + } + } + + spin_unlock_irqrestore(&ccp->cmd_lock, flags); + + /* If we found an idle queue, wake it up */ + if (i < ccp->cmd_q_count) + wake_up_process(ccp->cmd_q[i].kthread); + + return ret; +} +EXPORT_SYMBOL_GPL(ccp_enqueue_cmd); + +static void ccp_do_cmd_backlog(struct work_struct *work) +{ + struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work); + struct ccp_device *ccp = cmd->ccp; + unsigned long flags; + unsigned int i; + + cmd->callback(cmd->data, -EINPROGRESS); + + spin_lock_irqsave(&ccp->cmd_lock, flags); + + ccp->cmd_count++; + list_add_tail(&cmd->entry, &ccp->cmd); + + /* Find an idle queue */ + for (i = 0; i < ccp->cmd_q_count; i++) { + if (ccp->cmd_q[i].active) + continue; + + break; + } + + spin_unlock_irqrestore(&ccp->cmd_lock, flags); + + /* If we found an idle queue, wake it up */ + if (i < ccp->cmd_q_count) + wake_up_process(ccp->cmd_q[i].kthread); +} + +static struct ccp_cmd *ccp_dequeue_cmd(struct ccp_cmd_queue *cmd_q) +{ + struct ccp_device *ccp = cmd_q->ccp; + struct ccp_cmd *cmd = NULL; + struct ccp_cmd *backlog = NULL; + unsigned long flags; + + spin_lock_irqsave(&ccp->cmd_lock, flags); + + cmd_q->active = 0; + + if (ccp->suspending) { + cmd_q->suspended = 1; + + spin_unlock_irqrestore(&ccp->cmd_lock, flags); + wake_up_interruptible(&ccp->suspend_queue); + + return NULL; + } + + if (ccp->cmd_count) { + cmd_q->active = 1; + + cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry); + list_del(&cmd->entry); + + ccp->cmd_count--; + } + + if (!list_empty(&ccp->backlog)) { + backlog = list_first_entry(&ccp->backlog, struct ccp_cmd, + entry); + list_del(&backlog->entry); + } + + spin_unlock_irqrestore(&ccp->cmd_lock, flags); + + if (backlog) { + INIT_WORK(&backlog->work, ccp_do_cmd_backlog); + schedule_work(&backlog->work); + } + + return cmd; +} + +static void ccp_do_cmd_complete(unsigned long data) +{ + struct ccp_tasklet_data *tdata = (struct ccp_tasklet_data *)data; + struct ccp_cmd *cmd = tdata->cmd; + + cmd->callback(cmd->data, cmd->ret); + complete(&tdata->completion); +} + +static int ccp_cmd_queue_thread(void *data) +{ + struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data; + struct ccp_cmd *cmd; + struct ccp_tasklet_data tdata; + struct tasklet_struct tasklet; + + tasklet_init(&tasklet, ccp_do_cmd_complete, (unsigned long)&tdata); + + set_current_state(TASK_INTERRUPTIBLE); + while (!kthread_should_stop()) { + schedule(); + + set_current_state(TASK_INTERRUPTIBLE); + + cmd = ccp_dequeue_cmd(cmd_q); + if (!cmd) + continue; + + __set_current_state(TASK_RUNNING); + + /* Execute the command */ + cmd->ret = ccp_run_cmd(cmd_q, cmd); + + /* Schedule the completion callback */ + tdata.cmd = cmd; + init_completion(&tdata.completion); + tasklet_schedule(&tasklet); + wait_for_completion(&tdata.completion); + } + + __set_current_state(TASK_RUNNING); + + return 0; +} + +static int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait) +{ + struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng); + u32 trng_value; + int len = min_t(int, sizeof(trng_value), max); + + /* + * Locking is provided by the caller so we can update device + * hwrng-related fields safely + */ + trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG); + if (!trng_value) { + /* Zero is returned if not data is available or if a + * bad-entropy error is present. Assume an error if + * we exceed TRNG_RETRIES reads of zero. + */ + if (ccp->hwrng_retries++ > TRNG_RETRIES) + return -EIO; + + return 0; + } + + /* Reset the counter and save the rng value */ + ccp->hwrng_retries = 0; + memcpy(data, &trng_value, len); + + return len; +} + +/** + * ccp_alloc_struct - allocate and initialize the ccp_device struct + * + * @dev: device struct of the CCP + */ +struct ccp_device *ccp_alloc_struct(struct device *dev) +{ + struct ccp_device *ccp; + + ccp = devm_kzalloc(dev, sizeof(*ccp), GFP_KERNEL); + if (!ccp) + return NULL; + ccp->dev = dev; + + INIT_LIST_HEAD(&ccp->cmd); + INIT_LIST_HEAD(&ccp->backlog); + + spin_lock_init(&ccp->cmd_lock); + mutex_init(&ccp->req_mutex); + mutex_init(&ccp->ksb_mutex); + ccp->ksb_count = KSB_COUNT; + ccp->ksb_start = 0; + + return ccp; +} + +/** + * ccp_init - initialize the CCP device + * + * @ccp: ccp_device struct + */ +int ccp_init(struct ccp_device *ccp) +{ + struct device *dev = ccp->dev; + struct ccp_cmd_queue *cmd_q; + struct dma_pool *dma_pool; + char dma_pool_name[MAX_DMAPOOL_NAME_LEN]; + unsigned int qmr, qim, i; + int ret; + + /* Find available queues */ + qim = 0; + qmr = ioread32(ccp->io_regs + Q_MASK_REG); + for (i = 0; i < MAX_HW_QUEUES; i++) { + if (!(qmr & (1 << i))) + continue; + + /* Allocate a dma pool for this queue */ + snprintf(dma_pool_name, sizeof(dma_pool_name), "ccp_q%d", i); + dma_pool = dma_pool_create(dma_pool_name, dev, + CCP_DMAPOOL_MAX_SIZE, + CCP_DMAPOOL_ALIGN, 0); + if (!dma_pool) { + dev_err(dev, "unable to allocate dma pool\n"); + ret = -ENOMEM; + goto e_pool; + } + + cmd_q = &ccp->cmd_q[ccp->cmd_q_count]; + ccp->cmd_q_count++; + + cmd_q->ccp = ccp; + cmd_q->id = i; + cmd_q->dma_pool = dma_pool; + + /* Reserve 2 KSB regions for the queue */ + cmd_q->ksb_key = KSB_START + ccp->ksb_start++; + cmd_q->ksb_ctx = KSB_START + ccp->ksb_start++; + ccp->ksb_count -= 2; + + /* Preset some register values and masks that are queue + * number dependent + */ + cmd_q->reg_status = ccp->io_regs + CMD_Q_STATUS_BASE + + (CMD_Q_STATUS_INCR * i); + cmd_q->reg_int_status = ccp->io_regs + CMD_Q_INT_STATUS_BASE + + (CMD_Q_STATUS_INCR * i); + cmd_q->int_ok = 1 << (i * 2); + cmd_q->int_err = 1 << ((i * 2) + 1); + + cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status)); + + init_waitqueue_head(&cmd_q->int_queue); + + /* Build queue interrupt mask (two interrupts per queue) */ + qim |= cmd_q->int_ok | cmd_q->int_err; + +#ifdef CONFIG_ARM64 + /* For arm64 set the recommended queue cache settings */ + iowrite32(ccp->axcache, ccp->io_regs + CMD_Q_CACHE_BASE + + (CMD_Q_CACHE_INC * i)); +#endif + + dev_dbg(dev, "queue #%u available\n", i); + } + if (ccp->cmd_q_count == 0) { + dev_notice(dev, "no command queues available\n"); + ret = -EIO; + goto e_pool; + } + dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count); + + /* Disable and clear interrupts until ready */ + iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG); + for (i = 0; i < ccp->cmd_q_count; i++) { + cmd_q = &ccp->cmd_q[i]; + + ioread32(cmd_q->reg_int_status); + ioread32(cmd_q->reg_status); + } + iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG); + + /* Request an irq */ + ret = ccp->get_irq(ccp); + if (ret) { + dev_err(dev, "unable to allocate an IRQ\n"); + goto e_pool; + } + + /* Initialize the queues used to wait for KSB space and suspend */ + init_waitqueue_head(&ccp->ksb_queue); + init_waitqueue_head(&ccp->suspend_queue); + + /* Create a kthread for each queue */ + for (i = 0; i < ccp->cmd_q_count; i++) { + struct task_struct *kthread; + + cmd_q = &ccp->cmd_q[i]; + + kthread = kthread_create(ccp_cmd_queue_thread, cmd_q, + "ccp-q%u", cmd_q->id); + if (IS_ERR(kthread)) { + dev_err(dev, "error creating queue thread (%ld)\n", + PTR_ERR(kthread)); + ret = PTR_ERR(kthread); + goto e_kthread; + } + + cmd_q->kthread = kthread; + wake_up_process(kthread); + } + + /* Register the RNG */ + ccp->hwrng.name = "ccp-rng"; + ccp->hwrng.read = ccp_trng_read; + ret = hwrng_register(&ccp->hwrng); + if (ret) { + dev_err(dev, "error registering hwrng (%d)\n", ret); + goto e_kthread; + } + + /* Make the device struct available before enabling interrupts */ + ccp_add_device(ccp); + + /* Enable interrupts */ + iowrite32(qim, ccp->io_regs + IRQ_MASK_REG); + + return 0; + +e_kthread: + for (i = 0; i < ccp->cmd_q_count; i++) + if (ccp->cmd_q[i].kthread) + kthread_stop(ccp->cmd_q[i].kthread); + + ccp->free_irq(ccp); + +e_pool: + for (i = 0; i < ccp->cmd_q_count; i++) + dma_pool_destroy(ccp->cmd_q[i].dma_pool); + + return ret; +} + +/** + * ccp_destroy - tear down the CCP device + * + * @ccp: ccp_device struct + */ +void ccp_destroy(struct ccp_device *ccp) +{ + struct ccp_cmd_queue *cmd_q; + struct ccp_cmd *cmd; + unsigned int qim, i; + + /* Remove general access to the device struct */ + ccp_del_device(ccp); + + /* Unregister the RNG */ + hwrng_unregister(&ccp->hwrng); + + /* Stop the queue kthreads */ + for (i = 0; i < ccp->cmd_q_count; i++) + if (ccp->cmd_q[i].kthread) + kthread_stop(ccp->cmd_q[i].kthread); + + /* Build queue interrupt mask (two interrupt masks per queue) */ + qim = 0; + for (i = 0; i < ccp->cmd_q_count; i++) { + cmd_q = &ccp->cmd_q[i]; + qim |= cmd_q->int_ok | cmd_q->int_err; + } + + /* Disable and clear interrupts */ + iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG); + for (i = 0; i < ccp->cmd_q_count; i++) { + cmd_q = &ccp->cmd_q[i]; + + ioread32(cmd_q->reg_int_status); + ioread32(cmd_q->reg_status); + } + iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG); + + ccp->free_irq(ccp); + + for (i = 0; i < ccp->cmd_q_count; i++) + dma_pool_destroy(ccp->cmd_q[i].dma_pool); + + /* Flush the cmd and backlog queue */ + while (!list_empty(&ccp->cmd)) { + /* Invoke the callback directly with an error code */ + cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry); + list_del(&cmd->entry); + cmd->callback(cmd->data, -ENODEV); + } + while (!list_empty(&ccp->backlog)) { + /* Invoke the callback directly with an error code */ + cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry); + list_del(&cmd->entry); + cmd->callback(cmd->data, -ENODEV); + } +} + +/** + * ccp_irq_handler - handle interrupts generated by the CCP device + * + * @irq: the irq associated with the interrupt + * @data: the data value supplied when the irq was created + */ +irqreturn_t ccp_irq_handler(int irq, void *data) +{ + struct device *dev = data; + struct ccp_device *ccp = dev_get_drvdata(dev); + struct ccp_cmd_queue *cmd_q; + u32 q_int, status; + unsigned int i; + + status = ioread32(ccp->io_regs + IRQ_STATUS_REG); + + for (i = 0; i < ccp->cmd_q_count; i++) { + cmd_q = &ccp->cmd_q[i]; + + q_int = status & (cmd_q->int_ok | cmd_q->int_err); + if (q_int) { + cmd_q->int_status = status; + cmd_q->q_status = ioread32(cmd_q->reg_status); + cmd_q->q_int_status = ioread32(cmd_q->reg_int_status); + + /* On error, only save the first error value */ + if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error) + cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status); + + cmd_q->int_rcvd = 1; + + /* Acknowledge the interrupt and wake the kthread */ + iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG); + wake_up_interruptible(&cmd_q->int_queue); + } + } + + return IRQ_HANDLED; +} + +#ifdef CONFIG_PM +bool ccp_queues_suspended(struct ccp_device *ccp) +{ + unsigned int suspended = 0; + unsigned long flags; + unsigned int i; + + spin_lock_irqsave(&ccp->cmd_lock, flags); + + for (i = 0; i < ccp->cmd_q_count; i++) + if (ccp->cmd_q[i].suspended) + suspended++; + + spin_unlock_irqrestore(&ccp->cmd_lock, flags); + + return ccp->cmd_q_count == suspended; +} +#endif + +#ifdef CONFIG_X86 +static const struct x86_cpu_id ccp_support[] = { + { X86_VENDOR_AMD, 22, }, + { }, +}; +#endif + +static int __init ccp_mod_init(void) +{ +#ifdef CONFIG_X86 + struct cpuinfo_x86 *cpuinfo = &boot_cpu_data; + int ret; + + if (!x86_match_cpu(ccp_support)) + return -ENODEV; + + switch (cpuinfo->x86) { + case 22: + if ((cpuinfo->x86_model < 48) || (cpuinfo->x86_model > 63)) + return -ENODEV; + + ret = ccp_pci_init(); + if (ret) + return ret; + + /* Don't leave the driver loaded if init failed */ + if (!ccp_get_device()) { + ccp_pci_exit(); + return -ENODEV; + } + + return 0; + + break; + } +#endif + +#ifdef CONFIG_ARM64 + int ret; + + ret = ccp_platform_init(); + if (ret) + return ret; + + /* Don't leave the driver loaded if init failed */ + if (!ccp_get_device()) { + ccp_platform_exit(); + return -ENODEV; + } + + return 0; +#endif + + return -ENODEV; +} + +static void __exit ccp_mod_exit(void) +{ +#ifdef CONFIG_X86 + struct cpuinfo_x86 *cpuinfo = &boot_cpu_data; + + switch (cpuinfo->x86) { + case 22: + ccp_pci_exit(); + break; + } +#endif + +#ifdef CONFIG_ARM64 + ccp_platform_exit(); +#endif +} + +module_init(ccp_mod_init); +module_exit(ccp_mod_exit); diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h new file mode 100644 index 000000000..6ff89031f --- /dev/null +++ b/drivers/crypto/ccp/ccp-dev.h @@ -0,0 +1,276 @@ +/* + * AMD Cryptographic Coprocessor (CCP) driver + * + * Copyright (C) 2013 Advanced Micro Devices, Inc. + * + * Author: Tom Lendacky <thomas.lendacky@amd.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __CCP_DEV_H__ +#define __CCP_DEV_H__ + +#include <linux/device.h> +#include <linux/pci.h> +#include <linux/spinlock.h> +#include <linux/mutex.h> +#include <linux/list.h> +#include <linux/wait.h> +#include <linux/dmapool.h> +#include <linux/hw_random.h> +#include <linux/bitops.h> + +#define MAX_DMAPOOL_NAME_LEN 32 + +#define MAX_HW_QUEUES 5 +#define MAX_CMD_QLEN 100 + +#define TRNG_RETRIES 10 + +#define CACHE_NONE 0x00 +#define CACHE_WB_NO_ALLOC 0xb7 + +/****** Register Mappings ******/ +#define Q_MASK_REG 0x000 +#define TRNG_OUT_REG 0x00c +#define IRQ_MASK_REG 0x040 +#define IRQ_STATUS_REG 0x200 + +#define DEL_CMD_Q_JOB 0x124 +#define DEL_Q_ACTIVE 0x00000200 +#define DEL_Q_ID_SHIFT 6 + +#define CMD_REQ0 0x180 +#define CMD_REQ_INCR 0x04 + +#define CMD_Q_STATUS_BASE 0x210 +#define CMD_Q_INT_STATUS_BASE 0x214 +#define CMD_Q_STATUS_INCR 0x20 + +#define CMD_Q_CACHE_BASE 0x228 +#define CMD_Q_CACHE_INC 0x20 + +#define CMD_Q_ERROR(__qs) ((__qs) & 0x0000003f) +#define CMD_Q_DEPTH(__qs) (((__qs) >> 12) & 0x0000000f) + +/****** REQ0 Related Values ******/ +#define REQ0_WAIT_FOR_WRITE 0x00000004 +#define REQ0_INT_ON_COMPLETE 0x00000002 +#define REQ0_STOP_ON_COMPLETE 0x00000001 + +#define REQ0_CMD_Q_SHIFT 9 +#define REQ0_JOBID_SHIFT 3 + +/****** REQ1 Related Values ******/ +#define REQ1_PROTECT_SHIFT 27 +#define REQ1_ENGINE_SHIFT 23 +#define REQ1_KEY_KSB_SHIFT 2 + +#define REQ1_EOM 0x00000002 +#define REQ1_INIT 0x00000001 + +/* AES Related Values */ +#define REQ1_AES_TYPE_SHIFT 21 +#define REQ1_AES_MODE_SHIFT 18 +#define REQ1_AES_ACTION_SHIFT 17 +#define REQ1_AES_CFB_SIZE_SHIFT 10 + +/* XTS-AES Related Values */ +#define REQ1_XTS_AES_SIZE_SHIFT 10 + +/* SHA Related Values */ +#define REQ1_SHA_TYPE_SHIFT 21 + +/* RSA Related Values */ +#define REQ1_RSA_MOD_SIZE_SHIFT 10 + +/* Pass-Through Related Values */ +#define REQ1_PT_BW_SHIFT 12 +#define REQ1_PT_BS_SHIFT 10 + +/* ECC Related Values */ +#define REQ1_ECC_AFFINE_CONVERT 0x00200000 +#define REQ1_ECC_FUNCTION_SHIFT 18 + +/****** REQ4 Related Values ******/ +#define REQ4_KSB_SHIFT 18 +#define REQ4_MEMTYPE_SHIFT 16 + +/****** REQ6 Related Values ******/ +#define REQ6_MEMTYPE_SHIFT 16 + +/****** Key Storage Block ******/ +#define KSB_START 77 +#define KSB_END 127 +#define KSB_COUNT (KSB_END - KSB_START + 1) +#define CCP_KSB_BITS 256 +#define CCP_KSB_BYTES 32 + +#define CCP_JOBID_MASK 0x0000003f + +#define CCP_DMAPOOL_MAX_SIZE 64 +#define CCP_DMAPOOL_ALIGN BIT(5) + +#define CCP_REVERSE_BUF_SIZE 64 + +#define CCP_AES_KEY_KSB_COUNT 1 +#define CCP_AES_CTX_KSB_COUNT 1 + +#define CCP_XTS_AES_KEY_KSB_COUNT 1 +#define CCP_XTS_AES_CTX_KSB_COUNT 1 + +#define CCP_SHA_KSB_COUNT 1 + +#define CCP_RSA_MAX_WIDTH 4096 + +#define CCP_PASSTHRU_BLOCKSIZE 256 +#define CCP_PASSTHRU_MASKSIZE 32 +#define CCP_PASSTHRU_KSB_COUNT 1 + +#define CCP_ECC_MODULUS_BYTES 48 /* 384-bits */ +#define CCP_ECC_MAX_OPERANDS 6 +#define CCP_ECC_MAX_OUTPUTS 3 +#define CCP_ECC_SRC_BUF_SIZE 448 +#define CCP_ECC_DST_BUF_SIZE 192 +#define CCP_ECC_OPERAND_SIZE 64 +#define CCP_ECC_OUTPUT_SIZE 64 +#define CCP_ECC_RESULT_OFFSET 60 +#define CCP_ECC_RESULT_SUCCESS 0x0001 + +struct ccp_device; +struct ccp_cmd; + +struct ccp_cmd_queue { + struct ccp_device *ccp; + + /* Queue identifier */ + u32 id; + + /* Queue dma pool */ + struct dma_pool *dma_pool; + + /* Queue reserved KSB regions */ + u32 ksb_key; + u32 ksb_ctx; + + /* Queue processing thread */ + struct task_struct *kthread; + unsigned int active; + unsigned int suspended; + + /* Number of free command slots available */ + unsigned int free_slots; + + /* Interrupt masks */ + u32 int_ok; + u32 int_err; + + /* Register addresses for queue */ + void __iomem *reg_status; + void __iomem *reg_int_status; + + /* Status values from job */ + u32 int_status; + u32 q_status; + u32 q_int_status; + u32 cmd_error; + + /* Interrupt wait queue */ + wait_queue_head_t int_queue; + unsigned int int_rcvd; +} ____cacheline_aligned; + +struct ccp_device { + struct device *dev; + + /* + * Bus specific device information + */ + void *dev_specific; + int (*get_irq)(struct ccp_device *ccp); + void (*free_irq)(struct ccp_device *ccp); + unsigned int irq; + + /* + * I/O area used for device communication. The register mapping + * starts at an offset into the mapped bar. + * The CMD_REQx registers and the Delete_Cmd_Queue_Job register + * need to be protected while a command queue thread is accessing + * them. + */ + struct mutex req_mutex ____cacheline_aligned; + void __iomem *io_map; + void __iomem *io_regs; + + /* + * Master lists that all cmds are queued on. Because there can be + * more than one CCP command queue that can process a cmd a separate + * backlog list is neeeded so that the backlog completion call + * completes before the cmd is available for execution. + */ + spinlock_t cmd_lock ____cacheline_aligned; + unsigned int cmd_count; + struct list_head cmd; + struct list_head backlog; + + /* + * The command queues. These represent the queues available on the + * CCP that are available for processing cmds + */ + struct ccp_cmd_queue cmd_q[MAX_HW_QUEUES]; + unsigned int cmd_q_count; + + /* + * Support for the CCP True RNG + */ + struct hwrng hwrng; + unsigned int hwrng_retries; + + /* + * A counter used to generate job-ids for cmds submitted to the CCP + */ + atomic_t current_id ____cacheline_aligned; + + /* + * The CCP uses key storage blocks (KSB) to maintain context for certain + * operations. To prevent multiple cmds from using the same KSB range + * a command queue reserves a KSB range for the duration of the cmd. + * Each queue, will however, reserve 2 KSB blocks for operations that + * only require single KSB entries (eg. AES context/iv and key) in order + * to avoid allocation contention. This will reserve at most 10 KSB + * entries, leaving 40 KSB entries available for dynamic allocation. + */ + struct mutex ksb_mutex ____cacheline_aligned; + DECLARE_BITMAP(ksb, KSB_COUNT); + wait_queue_head_t ksb_queue; + unsigned int ksb_avail; + unsigned int ksb_count; + u32 ksb_start; + + /* Suspend support */ + unsigned int suspending; + wait_queue_head_t suspend_queue; + + /* DMA caching attribute support */ + unsigned int axcache; +}; + +int ccp_pci_init(void); +void ccp_pci_exit(void); + +int ccp_platform_init(void); +void ccp_platform_exit(void); + +struct ccp_device *ccp_alloc_struct(struct device *dev); +int ccp_init(struct ccp_device *ccp); +void ccp_destroy(struct ccp_device *ccp); +bool ccp_queues_suspended(struct ccp_device *ccp); + +irqreturn_t ccp_irq_handler(int irq, void *data); + +int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd); + +#endif diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c new file mode 100644 index 000000000..71f2e3c89 --- /dev/null +++ b/drivers/crypto/ccp/ccp-ops.c @@ -0,0 +1,2126 @@ +/* + * AMD Cryptographic Coprocessor (CCP) driver + * + * Copyright (C) 2013 Advanced Micro Devices, Inc. + * + * Author: Tom Lendacky <thomas.lendacky@amd.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/pci_ids.h> +#include <linux/kthread.h> +#include <linux/sched.h> +#include <linux/interrupt.h> +#include <linux/spinlock.h> +#include <linux/mutex.h> +#include <linux/delay.h> +#include <linux/ccp.h> +#include <linux/scatterlist.h> +#include <crypto/scatterwalk.h> +#include <crypto/sha.h> + +#include "ccp-dev.h" + +enum ccp_memtype { + CCP_MEMTYPE_SYSTEM = 0, + CCP_MEMTYPE_KSB, + CCP_MEMTYPE_LOCAL, + CCP_MEMTYPE__LAST, +}; + +struct ccp_dma_info { + dma_addr_t address; + unsigned int offset; + unsigned int length; + enum dma_data_direction dir; +}; + +struct ccp_dm_workarea { + struct device *dev; + struct dma_pool *dma_pool; + unsigned int length; + + u8 *address; + struct ccp_dma_info dma; +}; + +struct ccp_sg_workarea { + struct scatterlist *sg; + unsigned int nents; + unsigned int length; + + struct scatterlist *dma_sg; + struct device *dma_dev; + unsigned int dma_count; + enum dma_data_direction dma_dir; + + unsigned int sg_used; + + u64 bytes_left; +}; + +struct ccp_data { + struct ccp_sg_workarea sg_wa; + struct ccp_dm_workarea dm_wa; +}; + +struct ccp_mem { + enum ccp_memtype type; + union { + struct ccp_dma_info dma; + u32 ksb; + } u; +}; + +struct ccp_aes_op { + enum ccp_aes_type type; + enum ccp_aes_mode mode; + enum ccp_aes_action action; +}; + +struct ccp_xts_aes_op { + enum ccp_aes_action action; + enum ccp_xts_aes_unit_size unit_size; +}; + +struct ccp_sha_op { + enum ccp_sha_type type; + u64 msg_bits; +}; + +struct ccp_rsa_op { + u32 mod_size; + u32 input_len; +}; + +struct ccp_passthru_op { + enum ccp_passthru_bitwise bit_mod; + enum ccp_passthru_byteswap byte_swap; +}; + +struct ccp_ecc_op { + enum ccp_ecc_function function; +}; + +struct ccp_op { + struct ccp_cmd_queue *cmd_q; + + u32 jobid; + u32 ioc; + u32 soc; + u32 ksb_key; + u32 ksb_ctx; + u32 init; + u32 eom; + + struct ccp_mem src; + struct ccp_mem dst; + + union { + struct ccp_aes_op aes; + struct ccp_xts_aes_op xts; + struct ccp_sha_op sha; + struct ccp_rsa_op rsa; + struct ccp_passthru_op passthru; + struct ccp_ecc_op ecc; + } u; +}; + +/* SHA initial context values */ +static const __be32 ccp_sha1_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { + cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1), + cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3), + cpu_to_be32(SHA1_H4), 0, 0, 0, +}; + +static const __be32 ccp_sha224_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { + cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1), + cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3), + cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5), + cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7), +}; + +static const __be32 ccp_sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { + cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1), + cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3), + cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5), + cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), +}; + +/* The CCP cannot perform zero-length sha operations so the caller + * is required to buffer data for the final operation. However, a + * sha operation for a message with a total length of zero is valid + * so known values are required to supply the result. + */ +static const u8 ccp_sha1_zero[CCP_SHA_CTXSIZE] = { + 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, + 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, + 0xaf, 0xd8, 0x07, 0x09, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ccp_sha224_zero[CCP_SHA_CTXSIZE] = { + 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, + 0x47, 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, + 0x15, 0xa2, 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, + 0xc5, 0xb3, 0xe4, 0x2f, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ccp_sha256_zero[CCP_SHA_CTXSIZE] = { + 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, + 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, + 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, + 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55, +}; + +static u32 ccp_addr_lo(struct ccp_dma_info *info) +{ + return lower_32_bits(info->address + info->offset); +} + +static u32 ccp_addr_hi(struct ccp_dma_info *info) +{ + return upper_32_bits(info->address + info->offset) & 0x0000ffff; +} + +static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count) +{ + struct ccp_cmd_queue *cmd_q = op->cmd_q; + struct ccp_device *ccp = cmd_q->ccp; + void __iomem *cr_addr; + u32 cr0, cmd; + unsigned int i; + int ret = 0; + + /* We could read a status register to see how many free slots + * are actually available, but reading that register resets it + * and you could lose some error information. + */ + cmd_q->free_slots--; + + cr0 = (cmd_q->id << REQ0_CMD_Q_SHIFT) + | (op->jobid << REQ0_JOBID_SHIFT) + | REQ0_WAIT_FOR_WRITE; + + if (op->soc) + cr0 |= REQ0_STOP_ON_COMPLETE + | REQ0_INT_ON_COMPLETE; + + if (op->ioc || !cmd_q->free_slots) + cr0 |= REQ0_INT_ON_COMPLETE; + + /* Start at CMD_REQ1 */ + cr_addr = ccp->io_regs + CMD_REQ0 + CMD_REQ_INCR; + + mutex_lock(&ccp->req_mutex); + + /* Write CMD_REQ1 through CMD_REQx first */ + for (i = 0; i < cr_count; i++, cr_addr += CMD_REQ_INCR) + iowrite32(*(cr + i), cr_addr); + + /* Tell the CCP to start */ + wmb(); + iowrite32(cr0, ccp->io_regs + CMD_REQ0); + + mutex_unlock(&ccp->req_mutex); + + if (cr0 & REQ0_INT_ON_COMPLETE) { + /* Wait for the job to complete */ + ret = wait_event_interruptible(cmd_q->int_queue, + cmd_q->int_rcvd); + if (ret || cmd_q->cmd_error) { + /* On error delete all related jobs from the queue */ + cmd = (cmd_q->id << DEL_Q_ID_SHIFT) + | op->jobid; + + iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB); + + if (!ret) + ret = -EIO; + } else if (op->soc) { + /* Delete just head job from the queue on SoC */ + cmd = DEL_Q_ACTIVE + | (cmd_q->id << DEL_Q_ID_SHIFT) + | op->jobid; + + iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB); + } + + cmd_q->free_slots = CMD_Q_DEPTH(cmd_q->q_status); + + cmd_q->int_rcvd = 0; + } + + return ret; +} + +static int ccp_perform_aes(struct ccp_op *op) +{ + u32 cr[6]; + + /* Fill out the register contents for REQ1 through REQ6 */ + cr[0] = (CCP_ENGINE_AES << REQ1_ENGINE_SHIFT) + | (op->u.aes.type << REQ1_AES_TYPE_SHIFT) + | (op->u.aes.mode << REQ1_AES_MODE_SHIFT) + | (op->u.aes.action << REQ1_AES_ACTION_SHIFT) + | (op->ksb_key << REQ1_KEY_KSB_SHIFT); + cr[1] = op->src.u.dma.length - 1; + cr[2] = ccp_addr_lo(&op->src.u.dma); + cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT) + | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) + | ccp_addr_hi(&op->src.u.dma); + cr[4] = ccp_addr_lo(&op->dst.u.dma); + cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT) + | ccp_addr_hi(&op->dst.u.dma); + + if (op->u.aes.mode == CCP_AES_MODE_CFB) + cr[0] |= ((0x7f) << REQ1_AES_CFB_SIZE_SHIFT); + + if (op->eom) + cr[0] |= REQ1_EOM; + + if (op->init) + cr[0] |= REQ1_INIT; + + return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); +} + +static int ccp_perform_xts_aes(struct ccp_op *op) +{ + u32 cr[6]; + + /* Fill out the register contents for REQ1 through REQ6 */ + cr[0] = (CCP_ENGINE_XTS_AES_128 << REQ1_ENGINE_SHIFT) + | (op->u.xts.action << REQ1_AES_ACTION_SHIFT) + | (op->u.xts.unit_size << REQ1_XTS_AES_SIZE_SHIFT) + | (op->ksb_key << REQ1_KEY_KSB_SHIFT); + cr[1] = op->src.u.dma.length - 1; + cr[2] = ccp_addr_lo(&op->src.u.dma); + cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT) + | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) + | ccp_addr_hi(&op->src.u.dma); + cr[4] = ccp_addr_lo(&op->dst.u.dma); + cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT) + | ccp_addr_hi(&op->dst.u.dma); + + if (op->eom) + cr[0] |= REQ1_EOM; + + if (op->init) + cr[0] |= REQ1_INIT; + + return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); +} + +static int ccp_perform_sha(struct ccp_op *op) +{ + u32 cr[6]; + + /* Fill out the register contents for REQ1 through REQ6 */ + cr[0] = (CCP_ENGINE_SHA << REQ1_ENGINE_SHIFT) + | (op->u.sha.type << REQ1_SHA_TYPE_SHIFT) + | REQ1_INIT; + cr[1] = op->src.u.dma.length - 1; + cr[2] = ccp_addr_lo(&op->src.u.dma); + cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT) + | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) + | ccp_addr_hi(&op->src.u.dma); + + if (op->eom) { + cr[0] |= REQ1_EOM; + cr[4] = lower_32_bits(op->u.sha.msg_bits); + cr[5] = upper_32_bits(op->u.sha.msg_bits); + } else { + cr[4] = 0; + cr[5] = 0; + } + + return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); +} + +static int ccp_perform_rsa(struct ccp_op *op) +{ + u32 cr[6]; + + /* Fill out the register contents for REQ1 through REQ6 */ + cr[0] = (CCP_ENGINE_RSA << REQ1_ENGINE_SHIFT) + | (op->u.rsa.mod_size << REQ1_RSA_MOD_SIZE_SHIFT) + | (op->ksb_key << REQ1_KEY_KSB_SHIFT) + | REQ1_EOM; + cr[1] = op->u.rsa.input_len - 1; + cr[2] = ccp_addr_lo(&op->src.u.dma); + cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT) + | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) + | ccp_addr_hi(&op->src.u.dma); + cr[4] = ccp_addr_lo(&op->dst.u.dma); + cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT) + | ccp_addr_hi(&op->dst.u.dma); + + return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); +} + +static int ccp_perform_passthru(struct ccp_op *op) +{ + u32 cr[6]; + + /* Fill out the register contents for REQ1 through REQ6 */ + cr[0] = (CCP_ENGINE_PASSTHRU << REQ1_ENGINE_SHIFT) + | (op->u.passthru.bit_mod << REQ1_PT_BW_SHIFT) + | (op->u.passthru.byte_swap << REQ1_PT_BS_SHIFT); + + if (op->src.type == CCP_MEMTYPE_SYSTEM) + cr[1] = op->src.u.dma.length - 1; + else + cr[1] = op->dst.u.dma.length - 1; + + if (op->src.type == CCP_MEMTYPE_SYSTEM) { + cr[2] = ccp_addr_lo(&op->src.u.dma); + cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) + | ccp_addr_hi(&op->src.u.dma); + + if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP) + cr[3] |= (op->ksb_key << REQ4_KSB_SHIFT); + } else { + cr[2] = op->src.u.ksb * CCP_KSB_BYTES; + cr[3] = (CCP_MEMTYPE_KSB << REQ4_MEMTYPE_SHIFT); + } + + if (op->dst.type == CCP_MEMTYPE_SYSTEM) { + cr[4] = ccp_addr_lo(&op->dst.u.dma); + cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT) + | ccp_addr_hi(&op->dst.u.dma); + } else { + cr[4] = op->dst.u.ksb * CCP_KSB_BYTES; + cr[5] = (CCP_MEMTYPE_KSB << REQ6_MEMTYPE_SHIFT); + } + + if (op->eom) + cr[0] |= REQ1_EOM; + + return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); +} + +static int ccp_perform_ecc(struct ccp_op *op) +{ + u32 cr[6]; + + /* Fill out the register contents for REQ1 through REQ6 */ + cr[0] = REQ1_ECC_AFFINE_CONVERT + | (CCP_ENGINE_ECC << REQ1_ENGINE_SHIFT) + | (op->u.ecc.function << REQ1_ECC_FUNCTION_SHIFT) + | REQ1_EOM; + cr[1] = op->src.u.dma.length - 1; + cr[2] = ccp_addr_lo(&op->src.u.dma); + cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) + | ccp_addr_hi(&op->src.u.dma); + cr[4] = ccp_addr_lo(&op->dst.u.dma); + cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT) + | ccp_addr_hi(&op->dst.u.dma); + + return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); +} + +static u32 ccp_alloc_ksb(struct ccp_device *ccp, unsigned int count) +{ + int start; + + for (;;) { + mutex_lock(&ccp->ksb_mutex); + + start = (u32)bitmap_find_next_zero_area(ccp->ksb, + ccp->ksb_count, + ccp->ksb_start, + count, 0); + if (start <= ccp->ksb_count) { + bitmap_set(ccp->ksb, start, count); + + mutex_unlock(&ccp->ksb_mutex); + break; + } + + ccp->ksb_avail = 0; + + mutex_unlock(&ccp->ksb_mutex); + + /* Wait for KSB entries to become available */ + if (wait_event_interruptible(ccp->ksb_queue, ccp->ksb_avail)) + return 0; + } + + return KSB_START + start; +} + +static void ccp_free_ksb(struct ccp_device *ccp, unsigned int start, + unsigned int count) +{ + if (!start) + return; + + mutex_lock(&ccp->ksb_mutex); + + bitmap_clear(ccp->ksb, start - KSB_START, count); + + ccp->ksb_avail = 1; + + mutex_unlock(&ccp->ksb_mutex); + + wake_up_interruptible_all(&ccp->ksb_queue); +} + +static u32 ccp_gen_jobid(struct ccp_device *ccp) +{ + return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK; +} + +static void ccp_sg_free(struct ccp_sg_workarea *wa) +{ + if (wa->dma_count) + dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir); + + wa->dma_count = 0; +} + +static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev, + struct scatterlist *sg, u64 len, + enum dma_data_direction dma_dir) +{ + memset(wa, 0, sizeof(*wa)); + + wa->sg = sg; + if (!sg) + return 0; + + wa->nents = sg_nents(sg); + wa->length = sg->length; + wa->bytes_left = len; + wa->sg_used = 0; + + if (len == 0) + return 0; + + if (dma_dir == DMA_NONE) + return 0; + + wa->dma_sg = sg; + wa->dma_dev = dev; + wa->dma_dir = dma_dir; + wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir); + if (!wa->dma_count) + return -ENOMEM; + + return 0; +} + +static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len) +{ + unsigned int nbytes = min_t(u64, len, wa->bytes_left); + + if (!wa->sg) + return; + + wa->sg_used += nbytes; + wa->bytes_left -= nbytes; + if (wa->sg_used == wa->sg->length) { + wa->sg = sg_next(wa->sg); + wa->sg_used = 0; + } +} + +static void ccp_dm_free(struct ccp_dm_workarea *wa) +{ + if (wa->length <= CCP_DMAPOOL_MAX_SIZE) { + if (wa->address) + dma_pool_free(wa->dma_pool, wa->address, + wa->dma.address); + } else { + if (wa->dma.address) + dma_unmap_single(wa->dev, wa->dma.address, wa->length, + wa->dma.dir); + kfree(wa->address); + } + + wa->address = NULL; + wa->dma.address = 0; +} + +static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa, + struct ccp_cmd_queue *cmd_q, + unsigned int len, + enum dma_data_direction dir) +{ + memset(wa, 0, sizeof(*wa)); + + if (!len) + return 0; + + wa->dev = cmd_q->ccp->dev; + wa->length = len; + + if (len <= CCP_DMAPOOL_MAX_SIZE) { + wa->dma_pool = cmd_q->dma_pool; + + wa->address = dma_pool_alloc(wa->dma_pool, GFP_KERNEL, + &wa->dma.address); + if (!wa->address) + return -ENOMEM; + + wa->dma.length = CCP_DMAPOOL_MAX_SIZE; + + memset(wa->address, 0, CCP_DMAPOOL_MAX_SIZE); + } else { + wa->address = kzalloc(len, GFP_KERNEL); + if (!wa->address) + return -ENOMEM; + + wa->dma.address = dma_map_single(wa->dev, wa->address, len, + dir); + if (!wa->dma.address) + return -ENOMEM; + + wa->dma.length = len; + } + wa->dma.dir = dir; + + return 0; +} + +static void ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset, + struct scatterlist *sg, unsigned int sg_offset, + unsigned int len) +{ + WARN_ON(!wa->address); + + scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len, + 0); +} + +static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset, + struct scatterlist *sg, unsigned int sg_offset, + unsigned int len) +{ + WARN_ON(!wa->address); + + scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len, + 1); +} + +static void ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa, + struct scatterlist *sg, + unsigned int len, unsigned int se_len, + bool sign_extend) +{ + unsigned int nbytes, sg_offset, dm_offset, ksb_len, i; + u8 buffer[CCP_REVERSE_BUF_SIZE]; + + BUG_ON(se_len > sizeof(buffer)); + + sg_offset = len; + dm_offset = 0; + nbytes = len; + while (nbytes) { + ksb_len = min_t(unsigned int, nbytes, se_len); + sg_offset -= ksb_len; + + scatterwalk_map_and_copy(buffer, sg, sg_offset, ksb_len, 0); + for (i = 0; i < ksb_len; i++) + wa->address[dm_offset + i] = buffer[ksb_len - i - 1]; + + dm_offset += ksb_len; + nbytes -= ksb_len; + + if ((ksb_len != se_len) && sign_extend) { + /* Must sign-extend to nearest sign-extend length */ + if (wa->address[dm_offset - 1] & 0x80) + memset(wa->address + dm_offset, 0xff, + se_len - ksb_len); + } + } +} + +static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa, + struct scatterlist *sg, + unsigned int len) +{ + unsigned int nbytes, sg_offset, dm_offset, ksb_len, i; + u8 buffer[CCP_REVERSE_BUF_SIZE]; + + sg_offset = 0; + dm_offset = len; + nbytes = len; + while (nbytes) { + ksb_len = min_t(unsigned int, nbytes, sizeof(buffer)); + dm_offset -= ksb_len; + + for (i = 0; i < ksb_len; i++) + buffer[ksb_len - i - 1] = wa->address[dm_offset + i]; + scatterwalk_map_and_copy(buffer, sg, sg_offset, ksb_len, 1); + + sg_offset += ksb_len; + nbytes -= ksb_len; + } +} + +static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q) +{ + ccp_dm_free(&data->dm_wa); + ccp_sg_free(&data->sg_wa); +} + +static int ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q, + struct scatterlist *sg, u64 sg_len, + unsigned int dm_len, + enum dma_data_direction dir) +{ + int ret; + + memset(data, 0, sizeof(*data)); + + ret = ccp_init_sg_workarea(&data->sg_wa, cmd_q->ccp->dev, sg, sg_len, + dir); + if (ret) + goto e_err; + + ret = ccp_init_dm_workarea(&data->dm_wa, cmd_q, dm_len, dir); + if (ret) + goto e_err; + + return 0; + +e_err: + ccp_free_data(data, cmd_q); + + return ret; +} + +static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from) +{ + struct ccp_sg_workarea *sg_wa = &data->sg_wa; + struct ccp_dm_workarea *dm_wa = &data->dm_wa; + unsigned int buf_count, nbytes; + + /* Clear the buffer if setting it */ + if (!from) + memset(dm_wa->address, 0, dm_wa->length); + + if (!sg_wa->sg) + return 0; + + /* Perform the copy operation + * nbytes will always be <= UINT_MAX because dm_wa->length is + * an unsigned int + */ + nbytes = min_t(u64, sg_wa->bytes_left, dm_wa->length); + scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used, + nbytes, from); + + /* Update the structures and generate the count */ + buf_count = 0; + while (sg_wa->bytes_left && (buf_count < dm_wa->length)) { + nbytes = min(sg_wa->sg->length - sg_wa->sg_used, + dm_wa->length - buf_count); + nbytes = min_t(u64, sg_wa->bytes_left, nbytes); + + buf_count += nbytes; + ccp_update_sg_workarea(sg_wa, nbytes); + } + + return buf_count; +} + +static unsigned int ccp_fill_queue_buf(struct ccp_data *data) +{ + return ccp_queue_buf(data, 0); +} + +static unsigned int ccp_empty_queue_buf(struct ccp_data *data) +{ + return ccp_queue_buf(data, 1); +} + +static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst, + struct ccp_op *op, unsigned int block_size, + bool blocksize_op) +{ + unsigned int sg_src_len, sg_dst_len, op_len; + + /* The CCP can only DMA from/to one address each per operation. This + * requires that we find the smallest DMA area between the source + * and destination. The resulting len values will always be <= UINT_MAX + * because the dma length is an unsigned int. + */ + sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used; + sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len); + + if (dst) { + sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used; + sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len); + op_len = min(sg_src_len, sg_dst_len); + } else { + op_len = sg_src_len; + } + + /* The data operation length will be at least block_size in length + * or the smaller of available sg room remaining for the source or + * the destination + */ + op_len = max(op_len, block_size); + + /* Unless we have to buffer data, there's no reason to wait */ + op->soc = 0; + + if (sg_src_len < block_size) { + /* Not enough data in the sg element, so it + * needs to be buffered into a blocksize chunk + */ + int cp_len = ccp_fill_queue_buf(src); + + op->soc = 1; + op->src.u.dma.address = src->dm_wa.dma.address; + op->src.u.dma.offset = 0; + op->src.u.dma.length = (blocksize_op) ? block_size : cp_len; + } else { + /* Enough data in the sg element, but we need to + * adjust for any previously copied data + */ + op->src.u.dma.address = sg_dma_address(src->sg_wa.sg); + op->src.u.dma.offset = src->sg_wa.sg_used; + op->src.u.dma.length = op_len & ~(block_size - 1); + + ccp_update_sg_workarea(&src->sg_wa, op->src.u.dma.length); + } + + if (dst) { + if (sg_dst_len < block_size) { + /* Not enough room in the sg element or we're on the + * last piece of data (when using padding), so the + * output needs to be buffered into a blocksize chunk + */ + op->soc = 1; + op->dst.u.dma.address = dst->dm_wa.dma.address; + op->dst.u.dma.offset = 0; + op->dst.u.dma.length = op->src.u.dma.length; + } else { + /* Enough room in the sg element, but we need to + * adjust for any previously used area + */ + op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg); + op->dst.u.dma.offset = dst->sg_wa.sg_used; + op->dst.u.dma.length = op->src.u.dma.length; + } + } +} + +static void ccp_process_data(struct ccp_data *src, struct ccp_data *dst, + struct ccp_op *op) +{ + op->init = 0; + + if (dst) { + if (op->dst.u.dma.address == dst->dm_wa.dma.address) + ccp_empty_queue_buf(dst); + else + ccp_update_sg_workarea(&dst->sg_wa, + op->dst.u.dma.length); + } +} + +static int ccp_copy_to_from_ksb(struct ccp_cmd_queue *cmd_q, + struct ccp_dm_workarea *wa, u32 jobid, u32 ksb, + u32 byte_swap, bool from) +{ + struct ccp_op op; + + memset(&op, 0, sizeof(op)); + + op.cmd_q = cmd_q; + op.jobid = jobid; + op.eom = 1; + + if (from) { + op.soc = 1; + op.src.type = CCP_MEMTYPE_KSB; + op.src.u.ksb = ksb; + op.dst.type = CCP_MEMTYPE_SYSTEM; + op.dst.u.dma.address = wa->dma.address; + op.dst.u.dma.length = wa->length; + } else { + op.src.type = CCP_MEMTYPE_SYSTEM; + op.src.u.dma.address = wa->dma.address; + op.src.u.dma.length = wa->length; + op.dst.type = CCP_MEMTYPE_KSB; + op.dst.u.ksb = ksb; + } + + op.u.passthru.byte_swap = byte_swap; + + return ccp_perform_passthru(&op); +} + +static int ccp_copy_to_ksb(struct ccp_cmd_queue *cmd_q, + struct ccp_dm_workarea *wa, u32 jobid, u32 ksb, + u32 byte_swap) +{ + return ccp_copy_to_from_ksb(cmd_q, wa, jobid, ksb, byte_swap, false); +} + +static int ccp_copy_from_ksb(struct ccp_cmd_queue *cmd_q, + struct ccp_dm_workarea *wa, u32 jobid, u32 ksb, + u32 byte_swap) +{ + return ccp_copy_to_from_ksb(cmd_q, wa, jobid, ksb, byte_swap, true); +} + +static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, + struct ccp_cmd *cmd) +{ + struct ccp_aes_engine *aes = &cmd->u.aes; + struct ccp_dm_workarea key, ctx; + struct ccp_data src; + struct ccp_op op; + unsigned int dm_offset; + int ret; + + if (!((aes->key_len == AES_KEYSIZE_128) || + (aes->key_len == AES_KEYSIZE_192) || + (aes->key_len == AES_KEYSIZE_256))) + return -EINVAL; + + if (aes->src_len & (AES_BLOCK_SIZE - 1)) + return -EINVAL; + + if (aes->iv_len != AES_BLOCK_SIZE) + return -EINVAL; + + if (!aes->key || !aes->iv || !aes->src) + return -EINVAL; + + if (aes->cmac_final) { + if (aes->cmac_key_len != AES_BLOCK_SIZE) + return -EINVAL; + + if (!aes->cmac_key) + return -EINVAL; + } + + BUILD_BUG_ON(CCP_AES_KEY_KSB_COUNT != 1); + BUILD_BUG_ON(CCP_AES_CTX_KSB_COUNT != 1); + + ret = -EIO; + memset(&op, 0, sizeof(op)); + op.cmd_q = cmd_q; + op.jobid = ccp_gen_jobid(cmd_q->ccp); + op.ksb_key = cmd_q->ksb_key; + op.ksb_ctx = cmd_q->ksb_ctx; + op.init = 1; + op.u.aes.type = aes->type; + op.u.aes.mode = aes->mode; + op.u.aes.action = aes->action; + + /* All supported key sizes fit in a single (32-byte) KSB entry + * and must be in little endian format. Use the 256-bit byte + * swap passthru option to convert from big endian to little + * endian. + */ + ret = ccp_init_dm_workarea(&key, cmd_q, + CCP_AES_KEY_KSB_COUNT * CCP_KSB_BYTES, + DMA_TO_DEVICE); + if (ret) + return ret; + + dm_offset = CCP_KSB_BYTES - aes->key_len; + ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); + ret = ccp_copy_to_ksb(cmd_q, &key, op.jobid, op.ksb_key, + CCP_PASSTHRU_BYTESWAP_256BIT); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_key; + } + + /* The AES context fits in a single (32-byte) KSB entry and + * must be in little endian format. Use the 256-bit byte swap + * passthru option to convert from big endian to little endian. + */ + ret = ccp_init_dm_workarea(&ctx, cmd_q, + CCP_AES_CTX_KSB_COUNT * CCP_KSB_BYTES, + DMA_BIDIRECTIONAL); + if (ret) + goto e_key; + + dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE; + ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); + ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, + CCP_PASSTHRU_BYTESWAP_256BIT); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_ctx; + } + + /* Send data to the CCP AES engine */ + ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len, + AES_BLOCK_SIZE, DMA_TO_DEVICE); + if (ret) + goto e_ctx; + + while (src.sg_wa.bytes_left) { + ccp_prepare_data(&src, NULL, &op, AES_BLOCK_SIZE, true); + if (aes->cmac_final && !src.sg_wa.bytes_left) { + op.eom = 1; + + /* Push the K1/K2 key to the CCP now */ + ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, + op.ksb_ctx, + CCP_PASSTHRU_BYTESWAP_256BIT); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_src; + } + + ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0, + aes->cmac_key_len); + ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, + CCP_PASSTHRU_BYTESWAP_256BIT); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_src; + } + } + + ret = ccp_perform_aes(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_src; + } + + ccp_process_data(&src, NULL, &op); + } + + /* Retrieve the AES context - convert from LE to BE using + * 32-byte (256-bit) byteswapping + */ + ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, + CCP_PASSTHRU_BYTESWAP_256BIT); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_src; + } + + /* ...but we only need AES_BLOCK_SIZE bytes */ + dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE; + ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); + +e_src: + ccp_free_data(&src, cmd_q); + +e_ctx: + ccp_dm_free(&ctx); + +e_key: + ccp_dm_free(&key); + + return ret; +} + +static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +{ + struct ccp_aes_engine *aes = &cmd->u.aes; + struct ccp_dm_workarea key, ctx; + struct ccp_data src, dst; + struct ccp_op op; + unsigned int dm_offset; + bool in_place = false; + int ret; + + if (aes->mode == CCP_AES_MODE_CMAC) + return ccp_run_aes_cmac_cmd(cmd_q, cmd); + + if (!((aes->key_len == AES_KEYSIZE_128) || + (aes->key_len == AES_KEYSIZE_192) || + (aes->key_len == AES_KEYSIZE_256))) + return -EINVAL; + + if (((aes->mode == CCP_AES_MODE_ECB) || + (aes->mode == CCP_AES_MODE_CBC) || + (aes->mode == CCP_AES_MODE_CFB)) && + (aes->src_len & (AES_BLOCK_SIZE - 1))) + return -EINVAL; + + if (!aes->key || !aes->src || !aes->dst) + return -EINVAL; + + if (aes->mode != CCP_AES_MODE_ECB) { + if (aes->iv_len != AES_BLOCK_SIZE) + return -EINVAL; + + if (!aes->iv) + return -EINVAL; + } + + BUILD_BUG_ON(CCP_AES_KEY_KSB_COUNT != 1); + BUILD_BUG_ON(CCP_AES_CTX_KSB_COUNT != 1); + + ret = -EIO; + memset(&op, 0, sizeof(op)); + op.cmd_q = cmd_q; + op.jobid = ccp_gen_jobid(cmd_q->ccp); + op.ksb_key = cmd_q->ksb_key; + op.ksb_ctx = cmd_q->ksb_ctx; + op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1; + op.u.aes.type = aes->type; + op.u.aes.mode = aes->mode; + op.u.aes.action = aes->action; + + /* All supported key sizes fit in a single (32-byte) KSB entry + * and must be in little endian format. Use the 256-bit byte + * swap passthru option to convert from big endian to little + * endian. + */ + ret = ccp_init_dm_workarea(&key, cmd_q, + CCP_AES_KEY_KSB_COUNT * CCP_KSB_BYTES, + DMA_TO_DEVICE); + if (ret) + return ret; + + dm_offset = CCP_KSB_BYTES - aes->key_len; + ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); + ret = ccp_copy_to_ksb(cmd_q, &key, op.jobid, op.ksb_key, + CCP_PASSTHRU_BYTESWAP_256BIT); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_key; + } + + /* The AES context fits in a single (32-byte) KSB entry and + * must be in little endian format. Use the 256-bit byte swap + * passthru option to convert from big endian to little endian. + */ + ret = ccp_init_dm_workarea(&ctx, cmd_q, + CCP_AES_CTX_KSB_COUNT * CCP_KSB_BYTES, + DMA_BIDIRECTIONAL); + if (ret) + goto e_key; + + if (aes->mode != CCP_AES_MODE_ECB) { + /* Load the AES context - conver to LE */ + dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE; + ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); + ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, + CCP_PASSTHRU_BYTESWAP_256BIT); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_ctx; + } + } + + /* Prepare the input and output data workareas. For in-place + * operations we need to set the dma direction to BIDIRECTIONAL + * and copy the src workarea to the dst workarea. + */ + if (sg_virt(aes->src) == sg_virt(aes->dst)) + in_place = true; + + ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len, + AES_BLOCK_SIZE, + in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); + if (ret) + goto e_ctx; + + if (in_place) { + dst = src; + } else { + ret = ccp_init_data(&dst, cmd_q, aes->dst, aes->src_len, + AES_BLOCK_SIZE, DMA_FROM_DEVICE); + if (ret) + goto e_src; + } + + /* Send data to the CCP AES engine */ + while (src.sg_wa.bytes_left) { + ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true); + if (!src.sg_wa.bytes_left) { + op.eom = 1; + + /* Since we don't retrieve the AES context in ECB + * mode we have to wait for the operation to complete + * on the last piece of data + */ + if (aes->mode == CCP_AES_MODE_ECB) + op.soc = 1; + } + + ret = ccp_perform_aes(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_dst; + } + + ccp_process_data(&src, &dst, &op); + } + + if (aes->mode != CCP_AES_MODE_ECB) { + /* Retrieve the AES context - convert from LE to BE using + * 32-byte (256-bit) byteswapping + */ + ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, + CCP_PASSTHRU_BYTESWAP_256BIT); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_dst; + } + + /* ...but we only need AES_BLOCK_SIZE bytes */ + dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE; + ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); + } + +e_dst: + if (!in_place) + ccp_free_data(&dst, cmd_q); + +e_src: + ccp_free_data(&src, cmd_q); + +e_ctx: + ccp_dm_free(&ctx); + +e_key: + ccp_dm_free(&key); + + return ret; +} + +static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, + struct ccp_cmd *cmd) +{ + struct ccp_xts_aes_engine *xts = &cmd->u.xts; + struct ccp_dm_workarea key, ctx; + struct ccp_data src, dst; + struct ccp_op op; + unsigned int unit_size, dm_offset; + bool in_place = false; + int ret; + + switch (xts->unit_size) { + case CCP_XTS_AES_UNIT_SIZE_16: + unit_size = 16; + break; + case CCP_XTS_AES_UNIT_SIZE_512: + unit_size = 512; + break; + case CCP_XTS_AES_UNIT_SIZE_1024: + unit_size = 1024; + break; + case CCP_XTS_AES_UNIT_SIZE_2048: + unit_size = 2048; + break; + case CCP_XTS_AES_UNIT_SIZE_4096: + unit_size = 4096; + break; + + default: + return -EINVAL; + } + + if (xts->key_len != AES_KEYSIZE_128) + return -EINVAL; + + if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1))) + return -EINVAL; + + if (xts->iv_len != AES_BLOCK_SIZE) + return -EINVAL; + + if (!xts->key || !xts->iv || !xts->src || !xts->dst) + return -EINVAL; + + BUILD_BUG_ON(CCP_XTS_AES_KEY_KSB_COUNT != 1); + BUILD_BUG_ON(CCP_XTS_AES_CTX_KSB_COUNT != 1); + + ret = -EIO; + memset(&op, 0, sizeof(op)); + op.cmd_q = cmd_q; + op.jobid = ccp_gen_jobid(cmd_q->ccp); + op.ksb_key = cmd_q->ksb_key; + op.ksb_ctx = cmd_q->ksb_ctx; + op.init = 1; + op.u.xts.action = xts->action; + op.u.xts.unit_size = xts->unit_size; + + /* All supported key sizes fit in a single (32-byte) KSB entry + * and must be in little endian format. Use the 256-bit byte + * swap passthru option to convert from big endian to little + * endian. + */ + ret = ccp_init_dm_workarea(&key, cmd_q, + CCP_XTS_AES_KEY_KSB_COUNT * CCP_KSB_BYTES, + DMA_TO_DEVICE); + if (ret) + return ret; + + dm_offset = CCP_KSB_BYTES - AES_KEYSIZE_128; + ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len); + ccp_set_dm_area(&key, 0, xts->key, dm_offset, xts->key_len); + ret = ccp_copy_to_ksb(cmd_q, &key, op.jobid, op.ksb_key, + CCP_PASSTHRU_BYTESWAP_256BIT); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_key; + } + + /* The AES context fits in a single (32-byte) KSB entry and + * for XTS is already in little endian format so no byte swapping + * is needed. + */ + ret = ccp_init_dm_workarea(&ctx, cmd_q, + CCP_XTS_AES_CTX_KSB_COUNT * CCP_KSB_BYTES, + DMA_BIDIRECTIONAL); + if (ret) + goto e_key; + + ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len); + ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_ctx; + } + + /* Prepare the input and output data workareas. For in-place + * operations we need to set the dma direction to BIDIRECTIONAL + * and copy the src workarea to the dst workarea. + */ + if (sg_virt(xts->src) == sg_virt(xts->dst)) + in_place = true; + + ret = ccp_init_data(&src, cmd_q, xts->src, xts->src_len, + unit_size, + in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); + if (ret) + goto e_ctx; + + if (in_place) { + dst = src; + } else { + ret = ccp_init_data(&dst, cmd_q, xts->dst, xts->src_len, + unit_size, DMA_FROM_DEVICE); + if (ret) + goto e_src; + } + + /* Send data to the CCP AES engine */ + while (src.sg_wa.bytes_left) { + ccp_prepare_data(&src, &dst, &op, unit_size, true); + if (!src.sg_wa.bytes_left) + op.eom = 1; + + ret = ccp_perform_xts_aes(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_dst; + } + + ccp_process_data(&src, &dst, &op); + } + + /* Retrieve the AES context - convert from LE to BE using + * 32-byte (256-bit) byteswapping + */ + ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, + CCP_PASSTHRU_BYTESWAP_256BIT); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_dst; + } + + /* ...but we only need AES_BLOCK_SIZE bytes */ + dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE; + ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len); + +e_dst: + if (!in_place) + ccp_free_data(&dst, cmd_q); + +e_src: + ccp_free_data(&src, cmd_q); + +e_ctx: + ccp_dm_free(&ctx); + +e_key: + ccp_dm_free(&key); + + return ret; +} + +static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +{ + struct ccp_sha_engine *sha = &cmd->u.sha; + struct ccp_dm_workarea ctx; + struct ccp_data src; + struct ccp_op op; + int ret; + + if (sha->ctx_len != CCP_SHA_CTXSIZE) + return -EINVAL; + + if (!sha->ctx) + return -EINVAL; + + if (!sha->final && (sha->src_len & (CCP_SHA_BLOCKSIZE - 1))) + return -EINVAL; + + if (!sha->src_len) { + const u8 *sha_zero; + + /* Not final, just return */ + if (!sha->final) + return 0; + + /* CCP can't do a zero length sha operation so the caller + * must buffer the data. + */ + if (sha->msg_bits) + return -EINVAL; + + /* A sha operation for a message with a total length of zero, + * return known result. + */ + switch (sha->type) { + case CCP_SHA_TYPE_1: + sha_zero = ccp_sha1_zero; + break; + case CCP_SHA_TYPE_224: + sha_zero = ccp_sha224_zero; + break; + case CCP_SHA_TYPE_256: + sha_zero = ccp_sha256_zero; + break; + default: + return -EINVAL; + } + + scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0, + sha->ctx_len, 1); + + return 0; + } + + if (!sha->src) + return -EINVAL; + + BUILD_BUG_ON(CCP_SHA_KSB_COUNT != 1); + + memset(&op, 0, sizeof(op)); + op.cmd_q = cmd_q; + op.jobid = ccp_gen_jobid(cmd_q->ccp); + op.ksb_ctx = cmd_q->ksb_ctx; + op.u.sha.type = sha->type; + op.u.sha.msg_bits = sha->msg_bits; + + /* The SHA context fits in a single (32-byte) KSB entry and + * must be in little endian format. Use the 256-bit byte swap + * passthru option to convert from big endian to little endian. + */ + ret = ccp_init_dm_workarea(&ctx, cmd_q, + CCP_SHA_KSB_COUNT * CCP_KSB_BYTES, + DMA_BIDIRECTIONAL); + if (ret) + return ret; + + if (sha->first) { + const __be32 *init; + + switch (sha->type) { + case CCP_SHA_TYPE_1: + init = ccp_sha1_init; + break; + case CCP_SHA_TYPE_224: + init = ccp_sha224_init; + break; + case CCP_SHA_TYPE_256: + init = ccp_sha256_init; + break; + default: + ret = -EINVAL; + goto e_ctx; + } + memcpy(ctx.address, init, CCP_SHA_CTXSIZE); + } else { + ccp_set_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len); + } + + ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, + CCP_PASSTHRU_BYTESWAP_256BIT); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_ctx; + } + + /* Send data to the CCP SHA engine */ + ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len, + CCP_SHA_BLOCKSIZE, DMA_TO_DEVICE); + if (ret) + goto e_ctx; + + while (src.sg_wa.bytes_left) { + ccp_prepare_data(&src, NULL, &op, CCP_SHA_BLOCKSIZE, false); + if (sha->final && !src.sg_wa.bytes_left) + op.eom = 1; + + ret = ccp_perform_sha(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_data; + } + + ccp_process_data(&src, NULL, &op); + } + + /* Retrieve the SHA context - convert from LE to BE using + * 32-byte (256-bit) byteswapping to BE + */ + ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, + CCP_PASSTHRU_BYTESWAP_256BIT); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_data; + } + + ccp_get_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len); + + if (sha->final && sha->opad) { + /* HMAC operation, recursively perform final SHA */ + struct ccp_cmd hmac_cmd; + struct scatterlist sg; + u64 block_size, digest_size; + u8 *hmac_buf; + + switch (sha->type) { + case CCP_SHA_TYPE_1: + block_size = SHA1_BLOCK_SIZE; + digest_size = SHA1_DIGEST_SIZE; + break; + case CCP_SHA_TYPE_224: + block_size = SHA224_BLOCK_SIZE; + digest_size = SHA224_DIGEST_SIZE; + break; + case CCP_SHA_TYPE_256: + block_size = SHA256_BLOCK_SIZE; + digest_size = SHA256_DIGEST_SIZE; + break; + default: + ret = -EINVAL; + goto e_data; + } + + if (sha->opad_len != block_size) { + ret = -EINVAL; + goto e_data; + } + + hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL); + if (!hmac_buf) { + ret = -ENOMEM; + goto e_data; + } + sg_init_one(&sg, hmac_buf, block_size + digest_size); + + scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0); + memcpy(hmac_buf + block_size, ctx.address, digest_size); + + memset(&hmac_cmd, 0, sizeof(hmac_cmd)); + hmac_cmd.engine = CCP_ENGINE_SHA; + hmac_cmd.u.sha.type = sha->type; + hmac_cmd.u.sha.ctx = sha->ctx; + hmac_cmd.u.sha.ctx_len = sha->ctx_len; + hmac_cmd.u.sha.src = &sg; + hmac_cmd.u.sha.src_len = block_size + digest_size; + hmac_cmd.u.sha.opad = NULL; + hmac_cmd.u.sha.opad_len = 0; + hmac_cmd.u.sha.first = 1; + hmac_cmd.u.sha.final = 1; + hmac_cmd.u.sha.msg_bits = (block_size + digest_size) << 3; + + ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd); + if (ret) + cmd->engine_error = hmac_cmd.engine_error; + + kfree(hmac_buf); + } + +e_data: + ccp_free_data(&src, cmd_q); + +e_ctx: + ccp_dm_free(&ctx); + + return ret; +} + +static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +{ + struct ccp_rsa_engine *rsa = &cmd->u.rsa; + struct ccp_dm_workarea exp, src; + struct ccp_data dst; + struct ccp_op op; + unsigned int ksb_count, i_len, o_len; + int ret; + + if (rsa->key_size > CCP_RSA_MAX_WIDTH) + return -EINVAL; + + if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst) + return -EINVAL; + + /* The RSA modulus must precede the message being acted upon, so + * it must be copied to a DMA area where the message and the + * modulus can be concatenated. Therefore the input buffer + * length required is twice the output buffer length (which + * must be a multiple of 256-bits). + */ + o_len = ((rsa->key_size + 255) / 256) * 32; + i_len = o_len * 2; + + ksb_count = o_len / CCP_KSB_BYTES; + + memset(&op, 0, sizeof(op)); + op.cmd_q = cmd_q; + op.jobid = ccp_gen_jobid(cmd_q->ccp); + op.ksb_key = ccp_alloc_ksb(cmd_q->ccp, ksb_count); + if (!op.ksb_key) + return -EIO; + + /* The RSA exponent may span multiple (32-byte) KSB entries and must + * be in little endian format. Reverse copy each 32-byte chunk + * of the exponent (En chunk to E0 chunk, E(n-1) chunk to E1 chunk) + * and each byte within that chunk and do not perform any byte swap + * operations on the passthru operation. + */ + ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE); + if (ret) + goto e_ksb; + + ccp_reverse_set_dm_area(&exp, rsa->exp, rsa->exp_len, CCP_KSB_BYTES, + false); + ret = ccp_copy_to_ksb(cmd_q, &exp, op.jobid, op.ksb_key, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_exp; + } + + /* Concatenate the modulus and the message. Both the modulus and + * the operands must be in little endian format. Since the input + * is in big endian format it must be converted. + */ + ret = ccp_init_dm_workarea(&src, cmd_q, i_len, DMA_TO_DEVICE); + if (ret) + goto e_exp; + + ccp_reverse_set_dm_area(&src, rsa->mod, rsa->mod_len, CCP_KSB_BYTES, + false); + src.address += o_len; /* Adjust the address for the copy operation */ + ccp_reverse_set_dm_area(&src, rsa->src, rsa->src_len, CCP_KSB_BYTES, + false); + src.address -= o_len; /* Reset the address to original value */ + + /* Prepare the output area for the operation */ + ret = ccp_init_data(&dst, cmd_q, rsa->dst, rsa->mod_len, + o_len, DMA_FROM_DEVICE); + if (ret) + goto e_src; + + op.soc = 1; + op.src.u.dma.address = src.dma.address; + op.src.u.dma.offset = 0; + op.src.u.dma.length = i_len; + op.dst.u.dma.address = dst.dm_wa.dma.address; + op.dst.u.dma.offset = 0; + op.dst.u.dma.length = o_len; + + op.u.rsa.mod_size = rsa->key_size; + op.u.rsa.input_len = i_len; + + ret = ccp_perform_rsa(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_dst; + } + + ccp_reverse_get_dm_area(&dst.dm_wa, rsa->dst, rsa->mod_len); + +e_dst: + ccp_free_data(&dst, cmd_q); + +e_src: + ccp_dm_free(&src); + +e_exp: + ccp_dm_free(&exp); + +e_ksb: + ccp_free_ksb(cmd_q->ccp, op.ksb_key, ksb_count); + + return ret; +} + +static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, + struct ccp_cmd *cmd) +{ + struct ccp_passthru_engine *pt = &cmd->u.passthru; + struct ccp_dm_workarea mask; + struct ccp_data src, dst; + struct ccp_op op; + bool in_place = false; + unsigned int i; + int ret; + + if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1))) + return -EINVAL; + + if (!pt->src || !pt->dst) + return -EINVAL; + + if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { + if (pt->mask_len != CCP_PASSTHRU_MASKSIZE) + return -EINVAL; + if (!pt->mask) + return -EINVAL; + } + + BUILD_BUG_ON(CCP_PASSTHRU_KSB_COUNT != 1); + + memset(&op, 0, sizeof(op)); + op.cmd_q = cmd_q; + op.jobid = ccp_gen_jobid(cmd_q->ccp); + + if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { + /* Load the mask */ + op.ksb_key = cmd_q->ksb_key; + + ret = ccp_init_dm_workarea(&mask, cmd_q, + CCP_PASSTHRU_KSB_COUNT * + CCP_KSB_BYTES, + DMA_TO_DEVICE); + if (ret) + return ret; + + ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len); + ret = ccp_copy_to_ksb(cmd_q, &mask, op.jobid, op.ksb_key, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_mask; + } + } + + /* Prepare the input and output data workareas. For in-place + * operations we need to set the dma direction to BIDIRECTIONAL + * and copy the src workarea to the dst workarea. + */ + if (sg_virt(pt->src) == sg_virt(pt->dst)) + in_place = true; + + ret = ccp_init_data(&src, cmd_q, pt->src, pt->src_len, + CCP_PASSTHRU_MASKSIZE, + in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); + if (ret) + goto e_mask; + + if (in_place) { + dst = src; + } else { + ret = ccp_init_data(&dst, cmd_q, pt->dst, pt->src_len, + CCP_PASSTHRU_MASKSIZE, DMA_FROM_DEVICE); + if (ret) + goto e_src; + } + + /* Send data to the CCP Passthru engine + * Because the CCP engine works on a single source and destination + * dma address at a time, each entry in the source scatterlist + * (after the dma_map_sg call) must be less than or equal to the + * (remaining) length in the destination scatterlist entry and the + * length must be a multiple of CCP_PASSTHRU_BLOCKSIZE + */ + dst.sg_wa.sg_used = 0; + for (i = 1; i <= src.sg_wa.dma_count; i++) { + if (!dst.sg_wa.sg || + (dst.sg_wa.sg->length < src.sg_wa.sg->length)) { + ret = -EINVAL; + goto e_dst; + } + + if (i == src.sg_wa.dma_count) { + op.eom = 1; + op.soc = 1; + } + + op.src.type = CCP_MEMTYPE_SYSTEM; + op.src.u.dma.address = sg_dma_address(src.sg_wa.sg); + op.src.u.dma.offset = 0; + op.src.u.dma.length = sg_dma_len(src.sg_wa.sg); + + op.dst.type = CCP_MEMTYPE_SYSTEM; + op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg); + op.dst.u.dma.offset = dst.sg_wa.sg_used; + op.dst.u.dma.length = op.src.u.dma.length; + + ret = ccp_perform_passthru(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_dst; + } + + dst.sg_wa.sg_used += src.sg_wa.sg->length; + if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) { + dst.sg_wa.sg = sg_next(dst.sg_wa.sg); + dst.sg_wa.sg_used = 0; + } + src.sg_wa.sg = sg_next(src.sg_wa.sg); + } + +e_dst: + if (!in_place) + ccp_free_data(&dst, cmd_q); + +e_src: + ccp_free_data(&src, cmd_q); + +e_mask: + if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) + ccp_dm_free(&mask); + + return ret; +} + +static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +{ + struct ccp_ecc_engine *ecc = &cmd->u.ecc; + struct ccp_dm_workarea src, dst; + struct ccp_op op; + int ret; + u8 *save; + + if (!ecc->u.mm.operand_1 || + (ecc->u.mm.operand_1_len > CCP_ECC_MODULUS_BYTES)) + return -EINVAL; + + if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) + if (!ecc->u.mm.operand_2 || + (ecc->u.mm.operand_2_len > CCP_ECC_MODULUS_BYTES)) + return -EINVAL; + + if (!ecc->u.mm.result || + (ecc->u.mm.result_len < CCP_ECC_MODULUS_BYTES)) + return -EINVAL; + + memset(&op, 0, sizeof(op)); + op.cmd_q = cmd_q; + op.jobid = ccp_gen_jobid(cmd_q->ccp); + + /* Concatenate the modulus and the operands. Both the modulus and + * the operands must be in little endian format. Since the input + * is in big endian format it must be converted and placed in a + * fixed length buffer. + */ + ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE, + DMA_TO_DEVICE); + if (ret) + return ret; + + /* Save the workarea address since it is updated in order to perform + * the concatenation + */ + save = src.address; + + /* Copy the ECC modulus */ + ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len, + CCP_ECC_OPERAND_SIZE, false); + src.address += CCP_ECC_OPERAND_SIZE; + + /* Copy the first operand */ + ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_1, + ecc->u.mm.operand_1_len, + CCP_ECC_OPERAND_SIZE, false); + src.address += CCP_ECC_OPERAND_SIZE; + + if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) { + /* Copy the second operand */ + ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_2, + ecc->u.mm.operand_2_len, + CCP_ECC_OPERAND_SIZE, false); + src.address += CCP_ECC_OPERAND_SIZE; + } + + /* Restore the workarea address */ + src.address = save; + + /* Prepare the output area for the operation */ + ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE, + DMA_FROM_DEVICE); + if (ret) + goto e_src; + + op.soc = 1; + op.src.u.dma.address = src.dma.address; + op.src.u.dma.offset = 0; + op.src.u.dma.length = src.length; + op.dst.u.dma.address = dst.dma.address; + op.dst.u.dma.offset = 0; + op.dst.u.dma.length = dst.length; + + op.u.ecc.function = cmd->u.ecc.function; + + ret = ccp_perform_ecc(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_dst; + } + + ecc->ecc_result = le16_to_cpup( + (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET)); + if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) { + ret = -EIO; + goto e_dst; + } + + /* Save the ECC result */ + ccp_reverse_get_dm_area(&dst, ecc->u.mm.result, CCP_ECC_MODULUS_BYTES); + +e_dst: + ccp_dm_free(&dst); + +e_src: + ccp_dm_free(&src); + + return ret; +} + +static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +{ + struct ccp_ecc_engine *ecc = &cmd->u.ecc; + struct ccp_dm_workarea src, dst; + struct ccp_op op; + int ret; + u8 *save; + + if (!ecc->u.pm.point_1.x || + (ecc->u.pm.point_1.x_len > CCP_ECC_MODULUS_BYTES) || + !ecc->u.pm.point_1.y || + (ecc->u.pm.point_1.y_len > CCP_ECC_MODULUS_BYTES)) + return -EINVAL; + + if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) { + if (!ecc->u.pm.point_2.x || + (ecc->u.pm.point_2.x_len > CCP_ECC_MODULUS_BYTES) || + !ecc->u.pm.point_2.y || + (ecc->u.pm.point_2.y_len > CCP_ECC_MODULUS_BYTES)) + return -EINVAL; + } else { + if (!ecc->u.pm.domain_a || + (ecc->u.pm.domain_a_len > CCP_ECC_MODULUS_BYTES)) + return -EINVAL; + + if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) + if (!ecc->u.pm.scalar || + (ecc->u.pm.scalar_len > CCP_ECC_MODULUS_BYTES)) + return -EINVAL; + } + + if (!ecc->u.pm.result.x || + (ecc->u.pm.result.x_len < CCP_ECC_MODULUS_BYTES) || + !ecc->u.pm.result.y || + (ecc->u.pm.result.y_len < CCP_ECC_MODULUS_BYTES)) + return -EINVAL; + + memset(&op, 0, sizeof(op)); + op.cmd_q = cmd_q; + op.jobid = ccp_gen_jobid(cmd_q->ccp); + + /* Concatenate the modulus and the operands. Both the modulus and + * the operands must be in little endian format. Since the input + * is in big endian format it must be converted and placed in a + * fixed length buffer. + */ + ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE, + DMA_TO_DEVICE); + if (ret) + return ret; + + /* Save the workarea address since it is updated in order to perform + * the concatenation + */ + save = src.address; + + /* Copy the ECC modulus */ + ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len, + CCP_ECC_OPERAND_SIZE, false); + src.address += CCP_ECC_OPERAND_SIZE; + + /* Copy the first point X and Y coordinate */ + ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.x, + ecc->u.pm.point_1.x_len, + CCP_ECC_OPERAND_SIZE, false); + src.address += CCP_ECC_OPERAND_SIZE; + ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.y, + ecc->u.pm.point_1.y_len, + CCP_ECC_OPERAND_SIZE, false); + src.address += CCP_ECC_OPERAND_SIZE; + + /* Set the first point Z coordianate to 1 */ + *src.address = 0x01; + src.address += CCP_ECC_OPERAND_SIZE; + + if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) { + /* Copy the second point X and Y coordinate */ + ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.x, + ecc->u.pm.point_2.x_len, + CCP_ECC_OPERAND_SIZE, false); + src.address += CCP_ECC_OPERAND_SIZE; + ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.y, + ecc->u.pm.point_2.y_len, + CCP_ECC_OPERAND_SIZE, false); + src.address += CCP_ECC_OPERAND_SIZE; + + /* Set the second point Z coordianate to 1 */ + *src.address = 0x01; + src.address += CCP_ECC_OPERAND_SIZE; + } else { + /* Copy the Domain "a" parameter */ + ccp_reverse_set_dm_area(&src, ecc->u.pm.domain_a, + ecc->u.pm.domain_a_len, + CCP_ECC_OPERAND_SIZE, false); + src.address += CCP_ECC_OPERAND_SIZE; + + if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) { + /* Copy the scalar value */ + ccp_reverse_set_dm_area(&src, ecc->u.pm.scalar, + ecc->u.pm.scalar_len, + CCP_ECC_OPERAND_SIZE, false); + src.address += CCP_ECC_OPERAND_SIZE; + } + } + + /* Restore the workarea address */ + src.address = save; + + /* Prepare the output area for the operation */ + ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE, + DMA_FROM_DEVICE); + if (ret) + goto e_src; + + op.soc = 1; + op.src.u.dma.address = src.dma.address; + op.src.u.dma.offset = 0; + op.src.u.dma.length = src.length; + op.dst.u.dma.address = dst.dma.address; + op.dst.u.dma.offset = 0; + op.dst.u.dma.length = dst.length; + + op.u.ecc.function = cmd->u.ecc.function; + + ret = ccp_perform_ecc(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_dst; + } + + ecc->ecc_result = le16_to_cpup( + (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET)); + if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) { + ret = -EIO; + goto e_dst; + } + + /* Save the workarea address since it is updated as we walk through + * to copy the point math result + */ + save = dst.address; + + /* Save the ECC result X and Y coordinates */ + ccp_reverse_get_dm_area(&dst, ecc->u.pm.result.x, + CCP_ECC_MODULUS_BYTES); + dst.address += CCP_ECC_OUTPUT_SIZE; + ccp_reverse_get_dm_area(&dst, ecc->u.pm.result.y, + CCP_ECC_MODULUS_BYTES); + dst.address += CCP_ECC_OUTPUT_SIZE; + + /* Restore the workarea address */ + dst.address = save; + +e_dst: + ccp_dm_free(&dst); + +e_src: + ccp_dm_free(&src); + + return ret; +} + +static int ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +{ + struct ccp_ecc_engine *ecc = &cmd->u.ecc; + + ecc->ecc_result = 0; + + if (!ecc->mod || + (ecc->mod_len > CCP_ECC_MODULUS_BYTES)) + return -EINVAL; + + switch (ecc->function) { + case CCP_ECC_FUNCTION_MMUL_384BIT: + case CCP_ECC_FUNCTION_MADD_384BIT: + case CCP_ECC_FUNCTION_MINV_384BIT: + return ccp_run_ecc_mm_cmd(cmd_q, cmd); + + case CCP_ECC_FUNCTION_PADD_384BIT: + case CCP_ECC_FUNCTION_PMUL_384BIT: + case CCP_ECC_FUNCTION_PDBL_384BIT: + return ccp_run_ecc_pm_cmd(cmd_q, cmd); + + default: + return -EINVAL; + } +} + +int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +{ + int ret; + + cmd->engine_error = 0; + cmd_q->cmd_error = 0; + cmd_q->int_rcvd = 0; + cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status)); + + switch (cmd->engine) { + case CCP_ENGINE_AES: + ret = ccp_run_aes_cmd(cmd_q, cmd); + break; + case CCP_ENGINE_XTS_AES_128: + ret = ccp_run_xts_aes_cmd(cmd_q, cmd); + break; + case CCP_ENGINE_SHA: + ret = ccp_run_sha_cmd(cmd_q, cmd); + break; + case CCP_ENGINE_RSA: + ret = ccp_run_rsa_cmd(cmd_q, cmd); + break; + case CCP_ENGINE_PASSTHRU: + ret = ccp_run_passthru_cmd(cmd_q, cmd); + break; + case CCP_ENGINE_ECC: + ret = ccp_run_ecc_cmd(cmd_q, cmd); + break; + default: + ret = -EINVAL; + } + + return ret; +} diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c new file mode 100644 index 000000000..af190d479 --- /dev/null +++ b/drivers/crypto/ccp/ccp-pci.c @@ -0,0 +1,340 @@ +/* + * AMD Cryptographic Coprocessor (CCP) driver + * + * Copyright (C) 2013 Advanced Micro Devices, Inc. + * + * Author: Tom Lendacky <thomas.lendacky@amd.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/pci.h> +#include <linux/pci_ids.h> +#include <linux/dma-mapping.h> +#include <linux/kthread.h> +#include <linux/sched.h> +#include <linux/interrupt.h> +#include <linux/spinlock.h> +#include <linux/delay.h> +#include <linux/ccp.h> + +#include "ccp-dev.h" + +#define IO_BAR 2 +#define IO_OFFSET 0x20000 + +#define MSIX_VECTORS 2 + +struct ccp_msix { + u32 vector; + char name[16]; +}; + +struct ccp_pci { + int msix_count; + struct ccp_msix msix[MSIX_VECTORS]; +}; + +static int ccp_get_msix_irqs(struct ccp_device *ccp) +{ + struct ccp_pci *ccp_pci = ccp->dev_specific; + struct device *dev = ccp->dev; + struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); + struct msix_entry msix_entry[MSIX_VECTORS]; + unsigned int name_len = sizeof(ccp_pci->msix[0].name) - 1; + int v, ret; + + for (v = 0; v < ARRAY_SIZE(msix_entry); v++) + msix_entry[v].entry = v; + + ret = pci_enable_msix_range(pdev, msix_entry, 1, v); + if (ret < 0) + return ret; + + ccp_pci->msix_count = ret; + for (v = 0; v < ccp_pci->msix_count; v++) { + /* Set the interrupt names and request the irqs */ + snprintf(ccp_pci->msix[v].name, name_len, "ccp-%u", v); + ccp_pci->msix[v].vector = msix_entry[v].vector; + ret = request_irq(ccp_pci->msix[v].vector, ccp_irq_handler, + 0, ccp_pci->msix[v].name, dev); + if (ret) { + dev_notice(dev, "unable to allocate MSI-X IRQ (%d)\n", + ret); + goto e_irq; + } + } + + return 0; + +e_irq: + while (v--) + free_irq(ccp_pci->msix[v].vector, dev); + + pci_disable_msix(pdev); + + ccp_pci->msix_count = 0; + + return ret; +} + +static int ccp_get_msi_irq(struct ccp_device *ccp) +{ + struct device *dev = ccp->dev; + struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); + int ret; + + ret = pci_enable_msi(pdev); + if (ret) + return ret; + + ccp->irq = pdev->irq; + ret = request_irq(ccp->irq, ccp_irq_handler, 0, "ccp", dev); + if (ret) { + dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret); + goto e_msi; + } + + return 0; + +e_msi: + pci_disable_msi(pdev); + + return ret; +} + +static int ccp_get_irqs(struct ccp_device *ccp) +{ + struct device *dev = ccp->dev; + int ret; + + ret = ccp_get_msix_irqs(ccp); + if (!ret) + return 0; + + /* Couldn't get MSI-X vectors, try MSI */ + dev_notice(dev, "could not enable MSI-X (%d), trying MSI\n", ret); + ret = ccp_get_msi_irq(ccp); + if (!ret) + return 0; + + /* Couldn't get MSI interrupt */ + dev_notice(dev, "could not enable MSI (%d)\n", ret); + + return ret; +} + +static void ccp_free_irqs(struct ccp_device *ccp) +{ + struct ccp_pci *ccp_pci = ccp->dev_specific; + struct device *dev = ccp->dev; + struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); + + if (ccp_pci->msix_count) { + while (ccp_pci->msix_count--) + free_irq(ccp_pci->msix[ccp_pci->msix_count].vector, + dev); + pci_disable_msix(pdev); + } else { + free_irq(ccp->irq, dev); + pci_disable_msi(pdev); + } +} + +static int ccp_find_mmio_area(struct ccp_device *ccp) +{ + struct device *dev = ccp->dev; + struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); + resource_size_t io_len; + unsigned long io_flags; + + io_flags = pci_resource_flags(pdev, IO_BAR); + io_len = pci_resource_len(pdev, IO_BAR); + if ((io_flags & IORESOURCE_MEM) && (io_len >= (IO_OFFSET + 0x800))) + return IO_BAR; + + return -EIO; +} + +static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct ccp_device *ccp; + struct ccp_pci *ccp_pci; + struct device *dev = &pdev->dev; + unsigned int bar; + int ret; + + ret = -ENOMEM; + ccp = ccp_alloc_struct(dev); + if (!ccp) + goto e_err; + + ccp_pci = devm_kzalloc(dev, sizeof(*ccp_pci), GFP_KERNEL); + if (!ccp_pci) + goto e_err; + + ccp->dev_specific = ccp_pci; + ccp->get_irq = ccp_get_irqs; + ccp->free_irq = ccp_free_irqs; + + ret = pci_request_regions(pdev, "ccp"); + if (ret) { + dev_err(dev, "pci_request_regions failed (%d)\n", ret); + goto e_err; + } + + ret = pci_enable_device(pdev); + if (ret) { + dev_err(dev, "pci_enable_device failed (%d)\n", ret); + goto e_regions; + } + + pci_set_master(pdev); + + ret = ccp_find_mmio_area(ccp); + if (ret < 0) + goto e_device; + bar = ret; + + ret = -EIO; + ccp->io_map = pci_iomap(pdev, bar, 0); + if (!ccp->io_map) { + dev_err(dev, "pci_iomap failed\n"); + goto e_device; + } + ccp->io_regs = ccp->io_map + IO_OFFSET; + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); + if (ret) { + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", + ret); + goto e_iomap; + } + } + + dev_set_drvdata(dev, ccp); + + ret = ccp_init(ccp); + if (ret) + goto e_iomap; + + dev_notice(dev, "enabled\n"); + + return 0; + +e_iomap: + pci_iounmap(pdev, ccp->io_map); + +e_device: + pci_disable_device(pdev); + +e_regions: + pci_release_regions(pdev); + +e_err: + dev_notice(dev, "initialization failed\n"); + return ret; +} + +static void ccp_pci_remove(struct pci_dev *pdev) +{ + struct device *dev = &pdev->dev; + struct ccp_device *ccp = dev_get_drvdata(dev); + + if (!ccp) + return; + + ccp_destroy(ccp); + + pci_iounmap(pdev, ccp->io_map); + + pci_disable_device(pdev); + + pci_release_regions(pdev); + + dev_notice(dev, "disabled\n"); +} + +#ifdef CONFIG_PM +static int ccp_pci_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct device *dev = &pdev->dev; + struct ccp_device *ccp = dev_get_drvdata(dev); + unsigned long flags; + unsigned int i; + + spin_lock_irqsave(&ccp->cmd_lock, flags); + + ccp->suspending = 1; + + /* Wake all the queue kthreads to prepare for suspend */ + for (i = 0; i < ccp->cmd_q_count; i++) + wake_up_process(ccp->cmd_q[i].kthread); + + spin_unlock_irqrestore(&ccp->cmd_lock, flags); + + /* Wait for all queue kthreads to say they're done */ + while (!ccp_queues_suspended(ccp)) + wait_event_interruptible(ccp->suspend_queue, + ccp_queues_suspended(ccp)); + + return 0; +} + +static int ccp_pci_resume(struct pci_dev *pdev) +{ + struct device *dev = &pdev->dev; + struct ccp_device *ccp = dev_get_drvdata(dev); + unsigned long flags; + unsigned int i; + + spin_lock_irqsave(&ccp->cmd_lock, flags); + + ccp->suspending = 0; + + /* Wake up all the kthreads */ + for (i = 0; i < ccp->cmd_q_count; i++) { + ccp->cmd_q[i].suspended = 0; + wake_up_process(ccp->cmd_q[i].kthread); + } + + spin_unlock_irqrestore(&ccp->cmd_lock, flags); + + return 0; +} +#endif + +static const struct pci_device_id ccp_pci_table[] = { + { PCI_VDEVICE(AMD, 0x1537), }, + /* Last entry must be zero */ + { 0, } +}; +MODULE_DEVICE_TABLE(pci, ccp_pci_table); + +static struct pci_driver ccp_pci_driver = { + .name = "AMD Cryptographic Coprocessor", + .id_table = ccp_pci_table, + .probe = ccp_pci_probe, + .remove = ccp_pci_remove, +#ifdef CONFIG_PM + .suspend = ccp_pci_suspend, + .resume = ccp_pci_resume, +#endif +}; + +int ccp_pci_init(void) +{ + return pci_register_driver(&ccp_pci_driver); +} + +void ccp_pci_exit(void) +{ + pci_unregister_driver(&ccp_pci_driver); +} diff --git a/drivers/crypto/ccp/ccp-platform.c b/drivers/crypto/ccp/ccp-platform.c new file mode 100644 index 000000000..b1c20b2b5 --- /dev/null +++ b/drivers/crypto/ccp/ccp-platform.c @@ -0,0 +1,314 @@ +/* + * AMD Cryptographic Coprocessor (CCP) driver + * + * Copyright (C) 2014 Advanced Micro Devices, Inc. + * + * Author: Tom Lendacky <thomas.lendacky@amd.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/platform_device.h> +#include <linux/ioport.h> +#include <linux/dma-mapping.h> +#include <linux/kthread.h> +#include <linux/sched.h> +#include <linux/interrupt.h> +#include <linux/spinlock.h> +#include <linux/delay.h> +#include <linux/ccp.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/acpi.h> + +#include "ccp-dev.h" + +struct ccp_platform { + int use_acpi; + int coherent; +}; + +static int ccp_get_irq(struct ccp_device *ccp) +{ + struct device *dev = ccp->dev; + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + int ret; + + ret = platform_get_irq(pdev, 0); + if (ret < 0) + return ret; + + ccp->irq = ret; + ret = request_irq(ccp->irq, ccp_irq_handler, 0, "ccp", dev); + if (ret) { + dev_notice(dev, "unable to allocate IRQ (%d)\n", ret); + return ret; + } + + return 0; +} + +static int ccp_get_irqs(struct ccp_device *ccp) +{ + struct device *dev = ccp->dev; + int ret; + + ret = ccp_get_irq(ccp); + if (!ret) + return 0; + + /* Couldn't get an interrupt */ + dev_notice(dev, "could not enable interrupts (%d)\n", ret); + + return ret; +} + +static void ccp_free_irqs(struct ccp_device *ccp) +{ + struct device *dev = ccp->dev; + + free_irq(ccp->irq, dev); +} + +static struct resource *ccp_find_mmio_area(struct ccp_device *ccp) +{ + struct device *dev = ccp->dev; + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct resource *ior; + + ior = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (ior && (resource_size(ior) >= 0x800)) + return ior; + + return NULL; +} + +#ifdef CONFIG_ACPI +static int ccp_acpi_support(struct ccp_device *ccp) +{ + struct ccp_platform *ccp_platform = ccp->dev_specific; + struct acpi_device *adev = ACPI_COMPANION(ccp->dev); + acpi_handle handle; + acpi_status status; + unsigned long long data; + int cca; + + /* Retrieve the device cache coherency value */ + handle = adev->handle; + do { + status = acpi_evaluate_integer(handle, "_CCA", NULL, &data); + if (!ACPI_FAILURE(status)) { + cca = data; + break; + } + } while (!ACPI_FAILURE(status)); + + if (ACPI_FAILURE(status)) { + dev_err(ccp->dev, "error obtaining acpi coherency value\n"); + return -EINVAL; + } + + ccp_platform->coherent = !!cca; + + return 0; +} +#else /* CONFIG_ACPI */ +static int ccp_acpi_support(struct ccp_device *ccp) +{ + return -EINVAL; +} +#endif + +#ifdef CONFIG_OF +static int ccp_of_support(struct ccp_device *ccp) +{ + struct ccp_platform *ccp_platform = ccp->dev_specific; + + ccp_platform->coherent = of_dma_is_coherent(ccp->dev->of_node); + + return 0; +} +#else +static int ccp_of_support(struct ccp_device *ccp) +{ + return -EINVAL; +} +#endif + +static int ccp_platform_probe(struct platform_device *pdev) +{ + struct ccp_device *ccp; + struct ccp_platform *ccp_platform; + struct device *dev = &pdev->dev; + struct acpi_device *adev = ACPI_COMPANION(dev); + struct resource *ior; + int ret; + + ret = -ENOMEM; + ccp = ccp_alloc_struct(dev); + if (!ccp) + goto e_err; + + ccp_platform = devm_kzalloc(dev, sizeof(*ccp_platform), GFP_KERNEL); + if (!ccp_platform) + goto e_err; + + ccp->dev_specific = ccp_platform; + ccp->get_irq = ccp_get_irqs; + ccp->free_irq = ccp_free_irqs; + + ccp_platform->use_acpi = (!adev || acpi_disabled) ? 0 : 1; + + ior = ccp_find_mmio_area(ccp); + ccp->io_map = devm_ioremap_resource(dev, ior); + if (IS_ERR(ccp->io_map)) { + ret = PTR_ERR(ccp->io_map); + goto e_err; + } + ccp->io_regs = ccp->io_map; + + if (!dev->dma_mask) + dev->dma_mask = &dev->coherent_dma_mask; + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); + if (ret) { + dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret); + goto e_err; + } + + if (ccp_platform->use_acpi) + ret = ccp_acpi_support(ccp); + else + ret = ccp_of_support(ccp); + if (ret) + goto e_err; + + if (ccp_platform->coherent) + ccp->axcache = CACHE_WB_NO_ALLOC; + else + ccp->axcache = CACHE_NONE; + + dev_set_drvdata(dev, ccp); + + ret = ccp_init(ccp); + if (ret) + goto e_err; + + dev_notice(dev, "enabled\n"); + + return 0; + +e_err: + dev_notice(dev, "initialization failed\n"); + return ret; +} + +static int ccp_platform_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct ccp_device *ccp = dev_get_drvdata(dev); + + ccp_destroy(ccp); + + dev_notice(dev, "disabled\n"); + + return 0; +} + +#ifdef CONFIG_PM +static int ccp_platform_suspend(struct platform_device *pdev, + pm_message_t state) +{ + struct device *dev = &pdev->dev; + struct ccp_device *ccp = dev_get_drvdata(dev); + unsigned long flags; + unsigned int i; + + spin_lock_irqsave(&ccp->cmd_lock, flags); + + ccp->suspending = 1; + + /* Wake all the queue kthreads to prepare for suspend */ + for (i = 0; i < ccp->cmd_q_count; i++) + wake_up_process(ccp->cmd_q[i].kthread); + + spin_unlock_irqrestore(&ccp->cmd_lock, flags); + + /* Wait for all queue kthreads to say they're done */ + while (!ccp_queues_suspended(ccp)) + wait_event_interruptible(ccp->suspend_queue, + ccp_queues_suspended(ccp)); + + return 0; +} + +static int ccp_platform_resume(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct ccp_device *ccp = dev_get_drvdata(dev); + unsigned long flags; + unsigned int i; + + spin_lock_irqsave(&ccp->cmd_lock, flags); + + ccp->suspending = 0; + + /* Wake up all the kthreads */ + for (i = 0; i < ccp->cmd_q_count; i++) { + ccp->cmd_q[i].suspended = 0; + wake_up_process(ccp->cmd_q[i].kthread); + } + + spin_unlock_irqrestore(&ccp->cmd_lock, flags); + + return 0; +} +#endif + +#ifdef CONFIG_ACPI +static const struct acpi_device_id ccp_acpi_match[] = { + { "AMDI0C00", 0 }, + { }, +}; +#endif + +#ifdef CONFIG_OF +static const struct of_device_id ccp_of_match[] = { + { .compatible = "amd,ccp-seattle-v1a" }, + { }, +}; +#endif + +static struct platform_driver ccp_platform_driver = { + .driver = { + .name = "AMD Cryptographic Coprocessor", +#ifdef CONFIG_ACPI + .acpi_match_table = ccp_acpi_match, +#endif +#ifdef CONFIG_OF + .of_match_table = ccp_of_match, +#endif + }, + .probe = ccp_platform_probe, + .remove = ccp_platform_remove, +#ifdef CONFIG_PM + .suspend = ccp_platform_suspend, + .resume = ccp_platform_resume, +#endif +}; + +int ccp_platform_init(void) +{ + return platform_driver_register(&ccp_platform_driver); +} + +void ccp_platform_exit(void) +{ + platform_driver_unregister(&ccp_platform_driver); +} diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c new file mode 100644 index 000000000..fe538e528 --- /dev/null +++ b/drivers/crypto/geode-aes.c @@ -0,0 +1,590 @@ + /* Copyright (C) 2004-2006, Advanced Micro Devices, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/pci_ids.h> +#include <linux/crypto.h> +#include <linux/spinlock.h> +#include <crypto/algapi.h> +#include <crypto/aes.h> + +#include <linux/io.h> +#include <linux/delay.h> + +#include "geode-aes.h" + +/* Static structures */ + +static void __iomem *_iobase; +static spinlock_t lock; + +/* Write a 128 bit field (either a writable key or IV) */ +static inline void +_writefield(u32 offset, void *value) +{ + int i; + for (i = 0; i < 4; i++) + iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4)); +} + +/* Read a 128 bit field (either a writable key or IV) */ +static inline void +_readfield(u32 offset, void *value) +{ + int i; + for (i = 0; i < 4; i++) + ((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4)); +} + +static int +do_crypt(void *src, void *dst, int len, u32 flags) +{ + u32 status; + u32 counter = AES_OP_TIMEOUT; + + iowrite32(virt_to_phys(src), _iobase + AES_SOURCEA_REG); + iowrite32(virt_to_phys(dst), _iobase + AES_DSTA_REG); + iowrite32(len, _iobase + AES_LENA_REG); + + /* Start the operation */ + iowrite32(AES_CTRL_START | flags, _iobase + AES_CTRLA_REG); + + do { + status = ioread32(_iobase + AES_INTR_REG); + cpu_relax(); + } while (!(status & AES_INTRA_PENDING) && --counter); + + /* Clear the event */ + iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG); + return counter ? 0 : 1; +} + +static unsigned int +geode_aes_crypt(struct geode_aes_op *op) +{ + u32 flags = 0; + unsigned long iflags; + int ret; + + if (op->len == 0) + return 0; + + /* If the source and destination is the same, then + * we need to turn on the coherent flags, otherwise + * we don't need to worry + */ + + flags |= (AES_CTRL_DCA | AES_CTRL_SCA); + + if (op->dir == AES_DIR_ENCRYPT) + flags |= AES_CTRL_ENCRYPT; + + /* Start the critical section */ + + spin_lock_irqsave(&lock, iflags); + + if (op->mode == AES_MODE_CBC) { + flags |= AES_CTRL_CBC; + _writefield(AES_WRITEIV0_REG, op->iv); + } + + if (!(op->flags & AES_FLAGS_HIDDENKEY)) { + flags |= AES_CTRL_WRKEY; + _writefield(AES_WRITEKEY0_REG, op->key); + } + + ret = do_crypt(op->src, op->dst, op->len, flags); + BUG_ON(ret); + + if (op->mode == AES_MODE_CBC) + _readfield(AES_WRITEIV0_REG, op->iv); + + spin_unlock_irqrestore(&lock, iflags); + + return op->len; +} + +/* CRYPTO-API Functions */ + +static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key, + unsigned int len) +{ + struct geode_aes_op *op = crypto_tfm_ctx(tfm); + unsigned int ret; + + op->keylen = len; + + if (len == AES_KEYSIZE_128) { + memcpy(op->key, key, len); + return 0; + } + + if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) { + /* not supported at all */ + tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + return -EINVAL; + } + + /* + * The requested key size is not supported by HW, do a fallback + */ + op->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; + op->fallback.cip->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK); + + ret = crypto_cipher_setkey(op->fallback.cip, key, len); + if (ret) { + tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; + tfm->crt_flags |= (op->fallback.cip->base.crt_flags & CRYPTO_TFM_RES_MASK); + } + return ret; +} + +static int geode_setkey_blk(struct crypto_tfm *tfm, const u8 *key, + unsigned int len) +{ + struct geode_aes_op *op = crypto_tfm_ctx(tfm); + unsigned int ret; + + op->keylen = len; + + if (len == AES_KEYSIZE_128) { + memcpy(op->key, key, len); + return 0; + } + + if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) { + /* not supported at all */ + tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + return -EINVAL; + } + + /* + * The requested key size is not supported by HW, do a fallback + */ + op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; + op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK); + + ret = crypto_blkcipher_setkey(op->fallback.blk, key, len); + if (ret) { + tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; + tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK); + } + return ret; +} + +static int fallback_blk_dec(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + unsigned int ret; + struct crypto_blkcipher *tfm; + struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); + + tfm = desc->tfm; + desc->tfm = op->fallback.blk; + + ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes); + + desc->tfm = tfm; + return ret; +} +static int fallback_blk_enc(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + unsigned int ret; + struct crypto_blkcipher *tfm; + struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); + + tfm = desc->tfm; + desc->tfm = op->fallback.blk; + + ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes); + + desc->tfm = tfm; + return ret; +} + +static void +geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + struct geode_aes_op *op = crypto_tfm_ctx(tfm); + + if (unlikely(op->keylen != AES_KEYSIZE_128)) { + crypto_cipher_encrypt_one(op->fallback.cip, out, in); + return; + } + + op->src = (void *) in; + op->dst = (void *) out; + op->mode = AES_MODE_ECB; + op->flags = 0; + op->len = AES_BLOCK_SIZE; + op->dir = AES_DIR_ENCRYPT; + + geode_aes_crypt(op); +} + + +static void +geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + struct geode_aes_op *op = crypto_tfm_ctx(tfm); + + if (unlikely(op->keylen != AES_KEYSIZE_128)) { + crypto_cipher_decrypt_one(op->fallback.cip, out, in); + return; + } + + op->src = (void *) in; + op->dst = (void *) out; + op->mode = AES_MODE_ECB; + op->flags = 0; + op->len = AES_BLOCK_SIZE; + op->dir = AES_DIR_DECRYPT; + + geode_aes_crypt(op); +} + +static int fallback_init_cip(struct crypto_tfm *tfm) +{ + const char *name = crypto_tfm_alg_name(tfm); + struct geode_aes_op *op = crypto_tfm_ctx(tfm); + + op->fallback.cip = crypto_alloc_cipher(name, 0, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); + + if (IS_ERR(op->fallback.cip)) { + printk(KERN_ERR "Error allocating fallback algo %s\n", name); + return PTR_ERR(op->fallback.cip); + } + + return 0; +} + +static void fallback_exit_cip(struct crypto_tfm *tfm) +{ + struct geode_aes_op *op = crypto_tfm_ctx(tfm); + + crypto_free_cipher(op->fallback.cip); + op->fallback.cip = NULL; +} + +static struct crypto_alg geode_alg = { + .cra_name = "aes", + .cra_driver_name = "geode-aes", + .cra_priority = 300, + .cra_alignmask = 15, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER | + CRYPTO_ALG_NEED_FALLBACK, + .cra_init = fallback_init_cip, + .cra_exit = fallback_exit_cip, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct geode_aes_op), + .cra_module = THIS_MODULE, + .cra_u = { + .cipher = { + .cia_min_keysize = AES_MIN_KEY_SIZE, + .cia_max_keysize = AES_MAX_KEY_SIZE, + .cia_setkey = geode_setkey_cip, + .cia_encrypt = geode_encrypt, + .cia_decrypt = geode_decrypt + } + } +}; + +static int +geode_cbc_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); + struct blkcipher_walk walk; + int err, ret; + + if (unlikely(op->keylen != AES_KEYSIZE_128)) + return fallback_blk_dec(desc, dst, src, nbytes); + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); + op->iv = walk.iv; + + while ((nbytes = walk.nbytes)) { + op->src = walk.src.virt.addr, + op->dst = walk.dst.virt.addr; + op->mode = AES_MODE_CBC; + op->len = nbytes - (nbytes % AES_BLOCK_SIZE); + op->dir = AES_DIR_DECRYPT; + + ret = geode_aes_crypt(op); + + nbytes -= ret; + err = blkcipher_walk_done(desc, &walk, nbytes); + } + + return err; +} + +static int +geode_cbc_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); + struct blkcipher_walk walk; + int err, ret; + + if (unlikely(op->keylen != AES_KEYSIZE_128)) + return fallback_blk_enc(desc, dst, src, nbytes); + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); + op->iv = walk.iv; + + while ((nbytes = walk.nbytes)) { + op->src = walk.src.virt.addr, + op->dst = walk.dst.virt.addr; + op->mode = AES_MODE_CBC; + op->len = nbytes - (nbytes % AES_BLOCK_SIZE); + op->dir = AES_DIR_ENCRYPT; + + ret = geode_aes_crypt(op); + nbytes -= ret; + err = blkcipher_walk_done(desc, &walk, nbytes); + } + + return err; +} + +static int fallback_init_blk(struct crypto_tfm *tfm) +{ + const char *name = crypto_tfm_alg_name(tfm); + struct geode_aes_op *op = crypto_tfm_ctx(tfm); + + op->fallback.blk = crypto_alloc_blkcipher(name, 0, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); + + if (IS_ERR(op->fallback.blk)) { + printk(KERN_ERR "Error allocating fallback algo %s\n", name); + return PTR_ERR(op->fallback.blk); + } + + return 0; +} + +static void fallback_exit_blk(struct crypto_tfm *tfm) +{ + struct geode_aes_op *op = crypto_tfm_ctx(tfm); + + crypto_free_blkcipher(op->fallback.blk); + op->fallback.blk = NULL; +} + +static struct crypto_alg geode_cbc_alg = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-geode", + .cra_priority = 400, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_init = fallback_init_blk, + .cra_exit = fallback_exit_blk, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct geode_aes_op), + .cra_alignmask = 15, + .cra_type = &crypto_blkcipher_type, + .cra_module = THIS_MODULE, + .cra_u = { + .blkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = geode_setkey_blk, + .encrypt = geode_cbc_encrypt, + .decrypt = geode_cbc_decrypt, + .ivsize = AES_BLOCK_SIZE, + } + } +}; + +static int +geode_ecb_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); + struct blkcipher_walk walk; + int err, ret; + + if (unlikely(op->keylen != AES_KEYSIZE_128)) + return fallback_blk_dec(desc, dst, src, nbytes); + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); + + while ((nbytes = walk.nbytes)) { + op->src = walk.src.virt.addr, + op->dst = walk.dst.virt.addr; + op->mode = AES_MODE_ECB; + op->len = nbytes - (nbytes % AES_BLOCK_SIZE); + op->dir = AES_DIR_DECRYPT; + + ret = geode_aes_crypt(op); + nbytes -= ret; + err = blkcipher_walk_done(desc, &walk, nbytes); + } + + return err; +} + +static int +geode_ecb_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); + struct blkcipher_walk walk; + int err, ret; + + if (unlikely(op->keylen != AES_KEYSIZE_128)) + return fallback_blk_enc(desc, dst, src, nbytes); + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); + + while ((nbytes = walk.nbytes)) { + op->src = walk.src.virt.addr, + op->dst = walk.dst.virt.addr; + op->mode = AES_MODE_ECB; + op->len = nbytes - (nbytes % AES_BLOCK_SIZE); + op->dir = AES_DIR_ENCRYPT; + + ret = geode_aes_crypt(op); + nbytes -= ret; + ret = blkcipher_walk_done(desc, &walk, nbytes); + } + + return err; +} + +static struct crypto_alg geode_ecb_alg = { + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-geode", + .cra_priority = 400, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_init = fallback_init_blk, + .cra_exit = fallback_exit_blk, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct geode_aes_op), + .cra_alignmask = 15, + .cra_type = &crypto_blkcipher_type, + .cra_module = THIS_MODULE, + .cra_u = { + .blkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = geode_setkey_blk, + .encrypt = geode_ecb_encrypt, + .decrypt = geode_ecb_decrypt, + } + } +}; + +static void geode_aes_remove(struct pci_dev *dev) +{ + crypto_unregister_alg(&geode_alg); + crypto_unregister_alg(&geode_ecb_alg); + crypto_unregister_alg(&geode_cbc_alg); + + pci_iounmap(dev, _iobase); + _iobase = NULL; + + pci_release_regions(dev); + pci_disable_device(dev); +} + + +static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id) +{ + int ret; + ret = pci_enable_device(dev); + if (ret) + return ret; + + ret = pci_request_regions(dev, "geode-aes"); + if (ret) + goto eenable; + + _iobase = pci_iomap(dev, 0, 0); + + if (_iobase == NULL) { + ret = -ENOMEM; + goto erequest; + } + + spin_lock_init(&lock); + + /* Clear any pending activity */ + iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG); + + ret = crypto_register_alg(&geode_alg); + if (ret) + goto eiomap; + + ret = crypto_register_alg(&geode_ecb_alg); + if (ret) + goto ealg; + + ret = crypto_register_alg(&geode_cbc_alg); + if (ret) + goto eecb; + + dev_notice(&dev->dev, "GEODE AES engine enabled.\n"); + return 0; + + eecb: + crypto_unregister_alg(&geode_ecb_alg); + + ealg: + crypto_unregister_alg(&geode_alg); + + eiomap: + pci_iounmap(dev, _iobase); + + erequest: + pci_release_regions(dev); + + eenable: + pci_disable_device(dev); + + dev_err(&dev->dev, "GEODE AES initialization failed.\n"); + return ret; +} + +static struct pci_device_id geode_aes_tbl[] = { + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), } , + { 0, } +}; + +MODULE_DEVICE_TABLE(pci, geode_aes_tbl); + +static struct pci_driver geode_aes_driver = { + .name = "Geode LX AES", + .id_table = geode_aes_tbl, + .probe = geode_aes_probe, + .remove = geode_aes_remove, +}; + +module_pci_driver(geode_aes_driver); + +MODULE_AUTHOR("Advanced Micro Devices, Inc."); +MODULE_DESCRIPTION("Geode LX Hardware AES driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/crypto/geode-aes.h b/drivers/crypto/geode-aes.h new file mode 100644 index 000000000..f442ca972 --- /dev/null +++ b/drivers/crypto/geode-aes.h @@ -0,0 +1,73 @@ +/* Copyright (C) 2003-2006, Advanced Micro Devices, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _GEODE_AES_H_ +#define _GEODE_AES_H_ + +/* driver logic flags */ +#define AES_MODE_ECB 0 +#define AES_MODE_CBC 1 + +#define AES_DIR_DECRYPT 0 +#define AES_DIR_ENCRYPT 1 + +#define AES_FLAGS_HIDDENKEY (1 << 0) + +/* Register definitions */ + +#define AES_CTRLA_REG 0x0000 + +#define AES_CTRL_START 0x01 +#define AES_CTRL_DECRYPT 0x00 +#define AES_CTRL_ENCRYPT 0x02 +#define AES_CTRL_WRKEY 0x04 +#define AES_CTRL_DCA 0x08 +#define AES_CTRL_SCA 0x10 +#define AES_CTRL_CBC 0x20 + +#define AES_INTR_REG 0x0008 + +#define AES_INTRA_PENDING (1 << 16) +#define AES_INTRB_PENDING (1 << 17) + +#define AES_INTR_PENDING (AES_INTRA_PENDING | AES_INTRB_PENDING) +#define AES_INTR_MASK 0x07 + +#define AES_SOURCEA_REG 0x0010 +#define AES_DSTA_REG 0x0014 +#define AES_LENA_REG 0x0018 +#define AES_WRITEKEY0_REG 0x0030 +#define AES_WRITEIV0_REG 0x0040 + +/* A very large counter that is used to gracefully bail out of an + * operation in case of trouble + */ + +#define AES_OP_TIMEOUT 0x50000 + +struct geode_aes_op { + + void *src; + void *dst; + + u32 mode; + u32 dir; + u32 flags; + int len; + + u8 key[AES_KEYSIZE_128]; + u8 *iv; + + union { + struct crypto_blkcipher *blk; + struct crypto_cipher *cip; + } fallback; + u32 keylen; +}; + +#endif diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c new file mode 100644 index 000000000..8d2a77284 --- /dev/null +++ b/drivers/crypto/hifn_795x.c @@ -0,0 +1,2801 @@ +/* + * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru> + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/mod_devicetable.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/delay.h> +#include <linux/mm.h> +#include <linux/dma-mapping.h> +#include <linux/scatterlist.h> +#include <linux/highmem.h> +#include <linux/crypto.h> +#include <linux/hw_random.h> +#include <linux/ktime.h> + +#include <crypto/algapi.h> +#include <crypto/des.h> + +#include <asm/kmap_types.h> + +//#define HIFN_DEBUG + +#ifdef HIFN_DEBUG +#define dprintk(f, a...) printk(f, ##a) +#else +#define dprintk(f, a...) do {} while (0) +#endif + +static char hifn_pll_ref[sizeof("extNNN")] = "ext"; +module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444); +MODULE_PARM_DESC(hifn_pll_ref, + "PLL reference clock (pci[freq] or ext[freq], default ext)"); + +static atomic_t hifn_dev_number; + +#define ACRYPTO_OP_DECRYPT 0 +#define ACRYPTO_OP_ENCRYPT 1 +#define ACRYPTO_OP_HMAC 2 +#define ACRYPTO_OP_RNG 3 + +#define ACRYPTO_MODE_ECB 0 +#define ACRYPTO_MODE_CBC 1 +#define ACRYPTO_MODE_CFB 2 +#define ACRYPTO_MODE_OFB 3 + +#define ACRYPTO_TYPE_AES_128 0 +#define ACRYPTO_TYPE_AES_192 1 +#define ACRYPTO_TYPE_AES_256 2 +#define ACRYPTO_TYPE_3DES 3 +#define ACRYPTO_TYPE_DES 4 + +#define PCI_VENDOR_ID_HIFN 0x13A3 +#define PCI_DEVICE_ID_HIFN_7955 0x0020 +#define PCI_DEVICE_ID_HIFN_7956 0x001d + +/* I/O region sizes */ + +#define HIFN_BAR0_SIZE 0x1000 +#define HIFN_BAR1_SIZE 0x2000 +#define HIFN_BAR2_SIZE 0x8000 + +/* DMA registres */ + +#define HIFN_DMA_CRA 0x0C /* DMA Command Ring Address */ +#define HIFN_DMA_SDRA 0x1C /* DMA Source Data Ring Address */ +#define HIFN_DMA_RRA 0x2C /* DMA Result Ring Address */ +#define HIFN_DMA_DDRA 0x3C /* DMA Destination Data Ring Address */ +#define HIFN_DMA_STCTL 0x40 /* DMA Status and Control */ +#define HIFN_DMA_INTREN 0x44 /* DMA Interrupt Enable */ +#define HIFN_DMA_CFG1 0x48 /* DMA Configuration #1 */ +#define HIFN_DMA_CFG2 0x6C /* DMA Configuration #2 */ +#define HIFN_CHIP_ID 0x98 /* Chip ID */ + +/* + * Processing Unit Registers (offset from BASEREG0) + */ +#define HIFN_0_PUDATA 0x00 /* Processing Unit Data */ +#define HIFN_0_PUCTRL 0x04 /* Processing Unit Control */ +#define HIFN_0_PUISR 0x08 /* Processing Unit Interrupt Status */ +#define HIFN_0_PUCNFG 0x0c /* Processing Unit Configuration */ +#define HIFN_0_PUIER 0x10 /* Processing Unit Interrupt Enable */ +#define HIFN_0_PUSTAT 0x14 /* Processing Unit Status/Chip ID */ +#define HIFN_0_FIFOSTAT 0x18 /* FIFO Status */ +#define HIFN_0_FIFOCNFG 0x1c /* FIFO Configuration */ +#define HIFN_0_SPACESIZE 0x20 /* Register space size */ + +/* Processing Unit Control Register (HIFN_0_PUCTRL) */ +#define HIFN_PUCTRL_CLRSRCFIFO 0x0010 /* clear source fifo */ +#define HIFN_PUCTRL_STOP 0x0008 /* stop pu */ +#define HIFN_PUCTRL_LOCKRAM 0x0004 /* lock ram */ +#define HIFN_PUCTRL_DMAENA 0x0002 /* enable dma */ +#define HIFN_PUCTRL_RESET 0x0001 /* Reset processing unit */ + +/* Processing Unit Interrupt Status Register (HIFN_0_PUISR) */ +#define HIFN_PUISR_CMDINVAL 0x8000 /* Invalid command interrupt */ +#define HIFN_PUISR_DATAERR 0x4000 /* Data error interrupt */ +#define HIFN_PUISR_SRCFIFO 0x2000 /* Source FIFO ready interrupt */ +#define HIFN_PUISR_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */ +#define HIFN_PUISR_DSTOVER 0x0200 /* Destination overrun interrupt */ +#define HIFN_PUISR_SRCCMD 0x0080 /* Source command interrupt */ +#define HIFN_PUISR_SRCCTX 0x0040 /* Source context interrupt */ +#define HIFN_PUISR_SRCDATA 0x0020 /* Source data interrupt */ +#define HIFN_PUISR_DSTDATA 0x0010 /* Destination data interrupt */ +#define HIFN_PUISR_DSTRESULT 0x0004 /* Destination result interrupt */ + +/* Processing Unit Configuration Register (HIFN_0_PUCNFG) */ +#define HIFN_PUCNFG_DRAMMASK 0xe000 /* DRAM size mask */ +#define HIFN_PUCNFG_DSZ_256K 0x0000 /* 256k dram */ +#define HIFN_PUCNFG_DSZ_512K 0x2000 /* 512k dram */ +#define HIFN_PUCNFG_DSZ_1M 0x4000 /* 1m dram */ +#define HIFN_PUCNFG_DSZ_2M 0x6000 /* 2m dram */ +#define HIFN_PUCNFG_DSZ_4M 0x8000 /* 4m dram */ +#define HIFN_PUCNFG_DSZ_8M 0xa000 /* 8m dram */ +#define HIFN_PUNCFG_DSZ_16M 0xc000 /* 16m dram */ +#define HIFN_PUCNFG_DSZ_32M 0xe000 /* 32m dram */ +#define HIFN_PUCNFG_DRAMREFRESH 0x1800 /* DRAM refresh rate mask */ +#define HIFN_PUCNFG_DRFR_512 0x0000 /* 512 divisor of ECLK */ +#define HIFN_PUCNFG_DRFR_256 0x0800 /* 256 divisor of ECLK */ +#define HIFN_PUCNFG_DRFR_128 0x1000 /* 128 divisor of ECLK */ +#define HIFN_PUCNFG_TCALLPHASES 0x0200 /* your guess is as good as mine... */ +#define HIFN_PUCNFG_TCDRVTOTEM 0x0100 /* your guess is as good as mine... */ +#define HIFN_PUCNFG_BIGENDIAN 0x0080 /* DMA big endian mode */ +#define HIFN_PUCNFG_BUS32 0x0040 /* Bus width 32bits */ +#define HIFN_PUCNFG_BUS16 0x0000 /* Bus width 16 bits */ +#define HIFN_PUCNFG_CHIPID 0x0020 /* Allow chipid from PUSTAT */ +#define HIFN_PUCNFG_DRAM 0x0010 /* Context RAM is DRAM */ +#define HIFN_PUCNFG_SRAM 0x0000 /* Context RAM is SRAM */ +#define HIFN_PUCNFG_COMPSING 0x0004 /* Enable single compression context */ +#define HIFN_PUCNFG_ENCCNFG 0x0002 /* Encryption configuration */ + +/* Processing Unit Interrupt Enable Register (HIFN_0_PUIER) */ +#define HIFN_PUIER_CMDINVAL 0x8000 /* Invalid command interrupt */ +#define HIFN_PUIER_DATAERR 0x4000 /* Data error interrupt */ +#define HIFN_PUIER_SRCFIFO 0x2000 /* Source FIFO ready interrupt */ +#define HIFN_PUIER_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */ +#define HIFN_PUIER_DSTOVER 0x0200 /* Destination overrun interrupt */ +#define HIFN_PUIER_SRCCMD 0x0080 /* Source command interrupt */ +#define HIFN_PUIER_SRCCTX 0x0040 /* Source context interrupt */ +#define HIFN_PUIER_SRCDATA 0x0020 /* Source data interrupt */ +#define HIFN_PUIER_DSTDATA 0x0010 /* Destination data interrupt */ +#define HIFN_PUIER_DSTRESULT 0x0004 /* Destination result interrupt */ + +/* Processing Unit Status Register/Chip ID (HIFN_0_PUSTAT) */ +#define HIFN_PUSTAT_CMDINVAL 0x8000 /* Invalid command interrupt */ +#define HIFN_PUSTAT_DATAERR 0x4000 /* Data error interrupt */ +#define HIFN_PUSTAT_SRCFIFO 0x2000 /* Source FIFO ready interrupt */ +#define HIFN_PUSTAT_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */ +#define HIFN_PUSTAT_DSTOVER 0x0200 /* Destination overrun interrupt */ +#define HIFN_PUSTAT_SRCCMD 0x0080 /* Source command interrupt */ +#define HIFN_PUSTAT_SRCCTX 0x0040 /* Source context interrupt */ +#define HIFN_PUSTAT_SRCDATA 0x0020 /* Source data interrupt */ +#define HIFN_PUSTAT_DSTDATA 0x0010 /* Destination data interrupt */ +#define HIFN_PUSTAT_DSTRESULT 0x0004 /* Destination result interrupt */ +#define HIFN_PUSTAT_CHIPREV 0x00ff /* Chip revision mask */ +#define HIFN_PUSTAT_CHIPENA 0xff00 /* Chip enabled mask */ +#define HIFN_PUSTAT_ENA_2 0x1100 /* Level 2 enabled */ +#define HIFN_PUSTAT_ENA_1 0x1000 /* Level 1 enabled */ +#define HIFN_PUSTAT_ENA_0 0x3000 /* Level 0 enabled */ +#define HIFN_PUSTAT_REV_2 0x0020 /* 7751 PT6/2 */ +#define HIFN_PUSTAT_REV_3 0x0030 /* 7751 PT6/3 */ + +/* FIFO Status Register (HIFN_0_FIFOSTAT) */ +#define HIFN_FIFOSTAT_SRC 0x7f00 /* Source FIFO available */ +#define HIFN_FIFOSTAT_DST 0x007f /* Destination FIFO available */ + +/* FIFO Configuration Register (HIFN_0_FIFOCNFG) */ +#define HIFN_FIFOCNFG_THRESHOLD 0x0400 /* must be written as 1 */ + +/* + * DMA Interface Registers (offset from BASEREG1) + */ +#define HIFN_1_DMA_CRAR 0x0c /* DMA Command Ring Address */ +#define HIFN_1_DMA_SRAR 0x1c /* DMA Source Ring Address */ +#define HIFN_1_DMA_RRAR 0x2c /* DMA Result Ring Address */ +#define HIFN_1_DMA_DRAR 0x3c /* DMA Destination Ring Address */ +#define HIFN_1_DMA_CSR 0x40 /* DMA Status and Control */ +#define HIFN_1_DMA_IER 0x44 /* DMA Interrupt Enable */ +#define HIFN_1_DMA_CNFG 0x48 /* DMA Configuration */ +#define HIFN_1_PLL 0x4c /* 795x: PLL config */ +#define HIFN_1_7811_RNGENA 0x60 /* 7811: rng enable */ +#define HIFN_1_7811_RNGCFG 0x64 /* 7811: rng config */ +#define HIFN_1_7811_RNGDAT 0x68 /* 7811: rng data */ +#define HIFN_1_7811_RNGSTS 0x6c /* 7811: rng status */ +#define HIFN_1_7811_MIPSRST 0x94 /* 7811: MIPS reset */ +#define HIFN_1_REVID 0x98 /* Revision ID */ +#define HIFN_1_UNLOCK_SECRET1 0xf4 +#define HIFN_1_UNLOCK_SECRET2 0xfc +#define HIFN_1_PUB_RESET 0x204 /* Public/RNG Reset */ +#define HIFN_1_PUB_BASE 0x300 /* Public Base Address */ +#define HIFN_1_PUB_OPLEN 0x304 /* Public Operand Length */ +#define HIFN_1_PUB_OP 0x308 /* Public Operand */ +#define HIFN_1_PUB_STATUS 0x30c /* Public Status */ +#define HIFN_1_PUB_IEN 0x310 /* Public Interrupt enable */ +#define HIFN_1_RNG_CONFIG 0x314 /* RNG config */ +#define HIFN_1_RNG_DATA 0x318 /* RNG data */ +#define HIFN_1_PUB_MEM 0x400 /* start of Public key memory */ +#define HIFN_1_PUB_MEMEND 0xbff /* end of Public key memory */ + +/* DMA Status and Control Register (HIFN_1_DMA_CSR) */ +#define HIFN_DMACSR_D_CTRLMASK 0xc0000000 /* Destinition Ring Control */ +#define HIFN_DMACSR_D_CTRL_NOP 0x00000000 /* Dest. Control: no-op */ +#define HIFN_DMACSR_D_CTRL_DIS 0x40000000 /* Dest. Control: disable */ +#define HIFN_DMACSR_D_CTRL_ENA 0x80000000 /* Dest. Control: enable */ +#define HIFN_DMACSR_D_ABORT 0x20000000 /* Destinition Ring PCIAbort */ +#define HIFN_DMACSR_D_DONE 0x10000000 /* Destinition Ring Done */ +#define HIFN_DMACSR_D_LAST 0x08000000 /* Destinition Ring Last */ +#define HIFN_DMACSR_D_WAIT 0x04000000 /* Destinition Ring Waiting */ +#define HIFN_DMACSR_D_OVER 0x02000000 /* Destinition Ring Overflow */ +#define HIFN_DMACSR_R_CTRL 0x00c00000 /* Result Ring Control */ +#define HIFN_DMACSR_R_CTRL_NOP 0x00000000 /* Result Control: no-op */ +#define HIFN_DMACSR_R_CTRL_DIS 0x00400000 /* Result Control: disable */ +#define HIFN_DMACSR_R_CTRL_ENA 0x00800000 /* Result Control: enable */ +#define HIFN_DMACSR_R_ABORT 0x00200000 /* Result Ring PCI Abort */ +#define HIFN_DMACSR_R_DONE 0x00100000 /* Result Ring Done */ +#define HIFN_DMACSR_R_LAST 0x00080000 /* Result Ring Last */ +#define HIFN_DMACSR_R_WAIT 0x00040000 /* Result Ring Waiting */ +#define HIFN_DMACSR_R_OVER 0x00020000 /* Result Ring Overflow */ +#define HIFN_DMACSR_S_CTRL 0x0000c000 /* Source Ring Control */ +#define HIFN_DMACSR_S_CTRL_NOP 0x00000000 /* Source Control: no-op */ +#define HIFN_DMACSR_S_CTRL_DIS 0x00004000 /* Source Control: disable */ +#define HIFN_DMACSR_S_CTRL_ENA 0x00008000 /* Source Control: enable */ +#define HIFN_DMACSR_S_ABORT 0x00002000 /* Source Ring PCI Abort */ +#define HIFN_DMACSR_S_DONE 0x00001000 /* Source Ring Done */ +#define HIFN_DMACSR_S_LAST 0x00000800 /* Source Ring Last */ +#define HIFN_DMACSR_S_WAIT 0x00000400 /* Source Ring Waiting */ +#define HIFN_DMACSR_ILLW 0x00000200 /* Illegal write (7811 only) */ +#define HIFN_DMACSR_ILLR 0x00000100 /* Illegal read (7811 only) */ +#define HIFN_DMACSR_C_CTRL 0x000000c0 /* Command Ring Control */ +#define HIFN_DMACSR_C_CTRL_NOP 0x00000000 /* Command Control: no-op */ +#define HIFN_DMACSR_C_CTRL_DIS 0x00000040 /* Command Control: disable */ +#define HIFN_DMACSR_C_CTRL_ENA 0x00000080 /* Command Control: enable */ +#define HIFN_DMACSR_C_ABORT 0x00000020 /* Command Ring PCI Abort */ +#define HIFN_DMACSR_C_DONE 0x00000010 /* Command Ring Done */ +#define HIFN_DMACSR_C_LAST 0x00000008 /* Command Ring Last */ +#define HIFN_DMACSR_C_WAIT 0x00000004 /* Command Ring Waiting */ +#define HIFN_DMACSR_PUBDONE 0x00000002 /* Public op done (7951 only) */ +#define HIFN_DMACSR_ENGINE 0x00000001 /* Command Ring Engine IRQ */ + +/* DMA Interrupt Enable Register (HIFN_1_DMA_IER) */ +#define HIFN_DMAIER_D_ABORT 0x20000000 /* Destination Ring PCIAbort */ +#define HIFN_DMAIER_D_DONE 0x10000000 /* Destination Ring Done */ +#define HIFN_DMAIER_D_LAST 0x08000000 /* Destination Ring Last */ +#define HIFN_DMAIER_D_WAIT 0x04000000 /* Destination Ring Waiting */ +#define HIFN_DMAIER_D_OVER 0x02000000 /* Destination Ring Overflow */ +#define HIFN_DMAIER_R_ABORT 0x00200000 /* Result Ring PCI Abort */ +#define HIFN_DMAIER_R_DONE 0x00100000 /* Result Ring Done */ +#define HIFN_DMAIER_R_LAST 0x00080000 /* Result Ring Last */ +#define HIFN_DMAIER_R_WAIT 0x00040000 /* Result Ring Waiting */ +#define HIFN_DMAIER_R_OVER 0x00020000 /* Result Ring Overflow */ +#define HIFN_DMAIER_S_ABORT 0x00002000 /* Source Ring PCI Abort */ +#define HIFN_DMAIER_S_DONE 0x00001000 /* Source Ring Done */ +#define HIFN_DMAIER_S_LAST 0x00000800 /* Source Ring Last */ +#define HIFN_DMAIER_S_WAIT 0x00000400 /* Source Ring Waiting */ +#define HIFN_DMAIER_ILLW 0x00000200 /* Illegal write (7811 only) */ +#define HIFN_DMAIER_ILLR 0x00000100 /* Illegal read (7811 only) */ +#define HIFN_DMAIER_C_ABORT 0x00000020 /* Command Ring PCI Abort */ +#define HIFN_DMAIER_C_DONE 0x00000010 /* Command Ring Done */ +#define HIFN_DMAIER_C_LAST 0x00000008 /* Command Ring Last */ +#define HIFN_DMAIER_C_WAIT 0x00000004 /* Command Ring Waiting */ +#define HIFN_DMAIER_PUBDONE 0x00000002 /* public op done (7951 only) */ +#define HIFN_DMAIER_ENGINE 0x00000001 /* Engine IRQ */ + +/* DMA Configuration Register (HIFN_1_DMA_CNFG) */ +#define HIFN_DMACNFG_BIGENDIAN 0x10000000 /* big endian mode */ +#define HIFN_DMACNFG_POLLFREQ 0x00ff0000 /* Poll frequency mask */ +#define HIFN_DMACNFG_UNLOCK 0x00000800 +#define HIFN_DMACNFG_POLLINVAL 0x00000700 /* Invalid Poll Scalar */ +#define HIFN_DMACNFG_LAST 0x00000010 /* Host control LAST bit */ +#define HIFN_DMACNFG_MODE 0x00000004 /* DMA mode */ +#define HIFN_DMACNFG_DMARESET 0x00000002 /* DMA Reset # */ +#define HIFN_DMACNFG_MSTRESET 0x00000001 /* Master Reset # */ + +/* PLL configuration register */ +#define HIFN_PLL_REF_CLK_HBI 0x00000000 /* HBI reference clock */ +#define HIFN_PLL_REF_CLK_PLL 0x00000001 /* PLL reference clock */ +#define HIFN_PLL_BP 0x00000002 /* Reference clock bypass */ +#define HIFN_PLL_PK_CLK_HBI 0x00000000 /* PK engine HBI clock */ +#define HIFN_PLL_PK_CLK_PLL 0x00000008 /* PK engine PLL clock */ +#define HIFN_PLL_PE_CLK_HBI 0x00000000 /* PE engine HBI clock */ +#define HIFN_PLL_PE_CLK_PLL 0x00000010 /* PE engine PLL clock */ +#define HIFN_PLL_RESERVED_1 0x00000400 /* Reserved bit, must be 1 */ +#define HIFN_PLL_ND_SHIFT 11 /* Clock multiplier shift */ +#define HIFN_PLL_ND_MULT_2 0x00000000 /* PLL clock multiplier 2 */ +#define HIFN_PLL_ND_MULT_4 0x00000800 /* PLL clock multiplier 4 */ +#define HIFN_PLL_ND_MULT_6 0x00001000 /* PLL clock multiplier 6 */ +#define HIFN_PLL_ND_MULT_8 0x00001800 /* PLL clock multiplier 8 */ +#define HIFN_PLL_ND_MULT_10 0x00002000 /* PLL clock multiplier 10 */ +#define HIFN_PLL_ND_MULT_12 0x00002800 /* PLL clock multiplier 12 */ +#define HIFN_PLL_IS_1_8 0x00000000 /* charge pump (mult. 1-8) */ +#define HIFN_PLL_IS_9_12 0x00010000 /* charge pump (mult. 9-12) */ + +#define HIFN_PLL_FCK_MAX 266 /* Maximum PLL frequency */ + +/* Public key reset register (HIFN_1_PUB_RESET) */ +#define HIFN_PUBRST_RESET 0x00000001 /* reset public/rng unit */ + +/* Public base address register (HIFN_1_PUB_BASE) */ +#define HIFN_PUBBASE_ADDR 0x00003fff /* base address */ + +/* Public operand length register (HIFN_1_PUB_OPLEN) */ +#define HIFN_PUBOPLEN_MOD_M 0x0000007f /* modulus length mask */ +#define HIFN_PUBOPLEN_MOD_S 0 /* modulus length shift */ +#define HIFN_PUBOPLEN_EXP_M 0x0003ff80 /* exponent length mask */ +#define HIFN_PUBOPLEN_EXP_S 7 /* exponent length shift */ +#define HIFN_PUBOPLEN_RED_M 0x003c0000 /* reducend length mask */ +#define HIFN_PUBOPLEN_RED_S 18 /* reducend length shift */ + +/* Public operation register (HIFN_1_PUB_OP) */ +#define HIFN_PUBOP_AOFFSET_M 0x0000007f /* A offset mask */ +#define HIFN_PUBOP_AOFFSET_S 0 /* A offset shift */ +#define HIFN_PUBOP_BOFFSET_M 0x00000f80 /* B offset mask */ +#define HIFN_PUBOP_BOFFSET_S 7 /* B offset shift */ +#define HIFN_PUBOP_MOFFSET_M 0x0003f000 /* M offset mask */ +#define HIFN_PUBOP_MOFFSET_S 12 /* M offset shift */ +#define HIFN_PUBOP_OP_MASK 0x003c0000 /* Opcode: */ +#define HIFN_PUBOP_OP_NOP 0x00000000 /* NOP */ +#define HIFN_PUBOP_OP_ADD 0x00040000 /* ADD */ +#define HIFN_PUBOP_OP_ADDC 0x00080000 /* ADD w/carry */ +#define HIFN_PUBOP_OP_SUB 0x000c0000 /* SUB */ +#define HIFN_PUBOP_OP_SUBC 0x00100000 /* SUB w/carry */ +#define HIFN_PUBOP_OP_MODADD 0x00140000 /* Modular ADD */ +#define HIFN_PUBOP_OP_MODSUB 0x00180000 /* Modular SUB */ +#define HIFN_PUBOP_OP_INCA 0x001c0000 /* INC A */ +#define HIFN_PUBOP_OP_DECA 0x00200000 /* DEC A */ +#define HIFN_PUBOP_OP_MULT 0x00240000 /* MULT */ +#define HIFN_PUBOP_OP_MODMULT 0x00280000 /* Modular MULT */ +#define HIFN_PUBOP_OP_MODRED 0x002c0000 /* Modular RED */ +#define HIFN_PUBOP_OP_MODEXP 0x00300000 /* Modular EXP */ + +/* Public status register (HIFN_1_PUB_STATUS) */ +#define HIFN_PUBSTS_DONE 0x00000001 /* operation done */ +#define HIFN_PUBSTS_CARRY 0x00000002 /* carry */ + +/* Public interrupt enable register (HIFN_1_PUB_IEN) */ +#define HIFN_PUBIEN_DONE 0x00000001 /* operation done interrupt */ + +/* Random number generator config register (HIFN_1_RNG_CONFIG) */ +#define HIFN_RNGCFG_ENA 0x00000001 /* enable rng */ + +#define HIFN_NAMESIZE 32 +#define HIFN_MAX_RESULT_ORDER 5 + +#define HIFN_D_CMD_RSIZE 24*1 +#define HIFN_D_SRC_RSIZE 80*1 +#define HIFN_D_DST_RSIZE 80*1 +#define HIFN_D_RES_RSIZE 24*1 + +#define HIFN_D_DST_DALIGN 4 + +#define HIFN_QUEUE_LENGTH (HIFN_D_CMD_RSIZE - 1) + +#define AES_MIN_KEY_SIZE 16 +#define AES_MAX_KEY_SIZE 32 + +#define HIFN_DES_KEY_LENGTH 8 +#define HIFN_3DES_KEY_LENGTH 24 +#define HIFN_MAX_CRYPT_KEY_LENGTH AES_MAX_KEY_SIZE +#define HIFN_IV_LENGTH 8 +#define HIFN_AES_IV_LENGTH 16 +#define HIFN_MAX_IV_LENGTH HIFN_AES_IV_LENGTH + +#define HIFN_MAC_KEY_LENGTH 64 +#define HIFN_MD5_LENGTH 16 +#define HIFN_SHA1_LENGTH 20 +#define HIFN_MAC_TRUNC_LENGTH 12 + +#define HIFN_MAX_COMMAND (8 + 8 + 8 + 64 + 260) +#define HIFN_MAX_RESULT (8 + 4 + 4 + 20 + 4) +#define HIFN_USED_RESULT 12 + +struct hifn_desc +{ + volatile __le32 l; + volatile __le32 p; +}; + +struct hifn_dma { + struct hifn_desc cmdr[HIFN_D_CMD_RSIZE+1]; + struct hifn_desc srcr[HIFN_D_SRC_RSIZE+1]; + struct hifn_desc dstr[HIFN_D_DST_RSIZE+1]; + struct hifn_desc resr[HIFN_D_RES_RSIZE+1]; + + u8 command_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_COMMAND]; + u8 result_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_RESULT]; + + /* + * Our current positions for insertion and removal from the descriptor + * rings. + */ + volatile int cmdi, srci, dsti, resi; + volatile int cmdu, srcu, dstu, resu; + int cmdk, srck, dstk, resk; +}; + +#define HIFN_FLAG_CMD_BUSY (1<<0) +#define HIFN_FLAG_SRC_BUSY (1<<1) +#define HIFN_FLAG_DST_BUSY (1<<2) +#define HIFN_FLAG_RES_BUSY (1<<3) +#define HIFN_FLAG_OLD_KEY (1<<4) + +#define HIFN_DEFAULT_ACTIVE_NUM 5 + +struct hifn_device +{ + char name[HIFN_NAMESIZE]; + + int irq; + + struct pci_dev *pdev; + void __iomem *bar[3]; + + void *desc_virt; + dma_addr_t desc_dma; + + u32 dmareg; + + void *sa[HIFN_D_RES_RSIZE]; + + spinlock_t lock; + + u32 flags; + int active, started; + struct delayed_work work; + unsigned long reset; + unsigned long success; + unsigned long prev_success; + + u8 snum; + + struct tasklet_struct tasklet; + + struct crypto_queue queue; + struct list_head alg_list; + + unsigned int pk_clk_freq; + +#ifdef CONFIG_CRYPTO_DEV_HIFN_795X_RNG + unsigned int rng_wait_time; + ktime_t rngtime; + struct hwrng rng; +#endif +}; + +#define HIFN_D_LENGTH 0x0000ffff +#define HIFN_D_NOINVALID 0x01000000 +#define HIFN_D_MASKDONEIRQ 0x02000000 +#define HIFN_D_DESTOVER 0x04000000 +#define HIFN_D_OVER 0x08000000 +#define HIFN_D_LAST 0x20000000 +#define HIFN_D_JUMP 0x40000000 +#define HIFN_D_VALID 0x80000000 + +struct hifn_base_command +{ + volatile __le16 masks; + volatile __le16 session_num; + volatile __le16 total_source_count; + volatile __le16 total_dest_count; +}; + +#define HIFN_BASE_CMD_COMP 0x0100 /* enable compression engine */ +#define HIFN_BASE_CMD_PAD 0x0200 /* enable padding engine */ +#define HIFN_BASE_CMD_MAC 0x0400 /* enable MAC engine */ +#define HIFN_BASE_CMD_CRYPT 0x0800 /* enable crypt engine */ +#define HIFN_BASE_CMD_DECODE 0x2000 +#define HIFN_BASE_CMD_SRCLEN_M 0xc000 +#define HIFN_BASE_CMD_SRCLEN_S 14 +#define HIFN_BASE_CMD_DSTLEN_M 0x3000 +#define HIFN_BASE_CMD_DSTLEN_S 12 +#define HIFN_BASE_CMD_LENMASK_HI 0x30000 +#define HIFN_BASE_CMD_LENMASK_LO 0x0ffff + +/* + * Structure to help build up the command data structure. + */ +struct hifn_crypt_command +{ + volatile __le16 masks; + volatile __le16 header_skip; + volatile __le16 source_count; + volatile __le16 reserved; +}; + +#define HIFN_CRYPT_CMD_ALG_MASK 0x0003 /* algorithm: */ +#define HIFN_CRYPT_CMD_ALG_DES 0x0000 /* DES */ +#define HIFN_CRYPT_CMD_ALG_3DES 0x0001 /* 3DES */ +#define HIFN_CRYPT_CMD_ALG_RC4 0x0002 /* RC4 */ +#define HIFN_CRYPT_CMD_ALG_AES 0x0003 /* AES */ +#define HIFN_CRYPT_CMD_MODE_MASK 0x0018 /* Encrypt mode: */ +#define HIFN_CRYPT_CMD_MODE_ECB 0x0000 /* ECB */ +#define HIFN_CRYPT_CMD_MODE_CBC 0x0008 /* CBC */ +#define HIFN_CRYPT_CMD_MODE_CFB 0x0010 /* CFB */ +#define HIFN_CRYPT_CMD_MODE_OFB 0x0018 /* OFB */ +#define HIFN_CRYPT_CMD_CLR_CTX 0x0040 /* clear context */ +#define HIFN_CRYPT_CMD_KSZ_MASK 0x0600 /* AES key size: */ +#define HIFN_CRYPT_CMD_KSZ_128 0x0000 /* 128 bit */ +#define HIFN_CRYPT_CMD_KSZ_192 0x0200 /* 192 bit */ +#define HIFN_CRYPT_CMD_KSZ_256 0x0400 /* 256 bit */ +#define HIFN_CRYPT_CMD_NEW_KEY 0x0800 /* expect new key */ +#define HIFN_CRYPT_CMD_NEW_IV 0x1000 /* expect new iv */ +#define HIFN_CRYPT_CMD_SRCLEN_M 0xc000 +#define HIFN_CRYPT_CMD_SRCLEN_S 14 + +/* + * Structure to help build up the command data structure. + */ +struct hifn_mac_command +{ + volatile __le16 masks; + volatile __le16 header_skip; + volatile __le16 source_count; + volatile __le16 reserved; +}; + +#define HIFN_MAC_CMD_ALG_MASK 0x0001 +#define HIFN_MAC_CMD_ALG_SHA1 0x0000 +#define HIFN_MAC_CMD_ALG_MD5 0x0001 +#define HIFN_MAC_CMD_MODE_MASK 0x000c +#define HIFN_MAC_CMD_MODE_HMAC 0x0000 +#define HIFN_MAC_CMD_MODE_SSL_MAC 0x0004 +#define HIFN_MAC_CMD_MODE_HASH 0x0008 +#define HIFN_MAC_CMD_MODE_FULL 0x0004 +#define HIFN_MAC_CMD_TRUNC 0x0010 +#define HIFN_MAC_CMD_RESULT 0x0020 +#define HIFN_MAC_CMD_APPEND 0x0040 +#define HIFN_MAC_CMD_SRCLEN_M 0xc000 +#define HIFN_MAC_CMD_SRCLEN_S 14 + +/* + * MAC POS IPsec initiates authentication after encryption on encodes + * and before decryption on decodes. + */ +#define HIFN_MAC_CMD_POS_IPSEC 0x0200 +#define HIFN_MAC_CMD_NEW_KEY 0x0800 + +struct hifn_comp_command +{ + volatile __le16 masks; + volatile __le16 header_skip; + volatile __le16 source_count; + volatile __le16 reserved; +}; + +#define HIFN_COMP_CMD_SRCLEN_M 0xc000 +#define HIFN_COMP_CMD_SRCLEN_S 14 +#define HIFN_COMP_CMD_ONE 0x0100 /* must be one */ +#define HIFN_COMP_CMD_CLEARHIST 0x0010 /* clear history */ +#define HIFN_COMP_CMD_UPDATEHIST 0x0008 /* update history */ +#define HIFN_COMP_CMD_LZS_STRIP0 0x0004 /* LZS: strip zero */ +#define HIFN_COMP_CMD_MPPC_RESTART 0x0004 /* MPPC: restart */ +#define HIFN_COMP_CMD_ALG_MASK 0x0001 /* compression mode: */ +#define HIFN_COMP_CMD_ALG_MPPC 0x0001 /* MPPC */ +#define HIFN_COMP_CMD_ALG_LZS 0x0000 /* LZS */ + +struct hifn_base_result +{ + volatile __le16 flags; + volatile __le16 session; + volatile __le16 src_cnt; /* 15:0 of source count */ + volatile __le16 dst_cnt; /* 15:0 of dest count */ +}; + +#define HIFN_BASE_RES_DSTOVERRUN 0x0200 /* destination overrun */ +#define HIFN_BASE_RES_SRCLEN_M 0xc000 /* 17:16 of source count */ +#define HIFN_BASE_RES_SRCLEN_S 14 +#define HIFN_BASE_RES_DSTLEN_M 0x3000 /* 17:16 of dest count */ +#define HIFN_BASE_RES_DSTLEN_S 12 + +struct hifn_comp_result +{ + volatile __le16 flags; + volatile __le16 crc; +}; + +#define HIFN_COMP_RES_LCB_M 0xff00 /* longitudinal check byte */ +#define HIFN_COMP_RES_LCB_S 8 +#define HIFN_COMP_RES_RESTART 0x0004 /* MPPC: restart */ +#define HIFN_COMP_RES_ENDMARKER 0x0002 /* LZS: end marker seen */ +#define HIFN_COMP_RES_SRC_NOTZERO 0x0001 /* source expired */ + +struct hifn_mac_result +{ + volatile __le16 flags; + volatile __le16 reserved; + /* followed by 0, 6, 8, or 10 u16's of the MAC, then crypt */ +}; + +#define HIFN_MAC_RES_MISCOMPARE 0x0002 /* compare failed */ +#define HIFN_MAC_RES_SRC_NOTZERO 0x0001 /* source expired */ + +struct hifn_crypt_result +{ + volatile __le16 flags; + volatile __le16 reserved; +}; + +#define HIFN_CRYPT_RES_SRC_NOTZERO 0x0001 /* source expired */ + +#ifndef HIFN_POLL_FREQUENCY +#define HIFN_POLL_FREQUENCY 0x1 +#endif + +#ifndef HIFN_POLL_SCALAR +#define HIFN_POLL_SCALAR 0x0 +#endif + +#define HIFN_MAX_SEGLEN 0xffff /* maximum dma segment len */ +#define HIFN_MAX_DMALEN 0x3ffff /* maximum dma length */ + +struct hifn_crypto_alg +{ + struct list_head entry; + struct crypto_alg alg; + struct hifn_device *dev; +}; + +#define ASYNC_SCATTERLIST_CACHE 16 + +#define ASYNC_FLAGS_MISALIGNED (1<<0) + +struct hifn_cipher_walk +{ + struct scatterlist cache[ASYNC_SCATTERLIST_CACHE]; + u32 flags; + int num; +}; + +struct hifn_context +{ + u8 key[HIFN_MAX_CRYPT_KEY_LENGTH]; + struct hifn_device *dev; + unsigned int keysize; +}; + +struct hifn_request_context +{ + u8 *iv; + unsigned int ivsize; + u8 op, type, mode, unused; + struct hifn_cipher_walk walk; +}; + +#define crypto_alg_to_hifn(a) container_of(a, struct hifn_crypto_alg, alg) + +static inline u32 hifn_read_0(struct hifn_device *dev, u32 reg) +{ + u32 ret; + + ret = readl(dev->bar[0] + reg); + + return ret; +} + +static inline u32 hifn_read_1(struct hifn_device *dev, u32 reg) +{ + u32 ret; + + ret = readl(dev->bar[1] + reg); + + return ret; +} + +static inline void hifn_write_0(struct hifn_device *dev, u32 reg, u32 val) +{ + writel((__force u32)cpu_to_le32(val), dev->bar[0] + reg); +} + +static inline void hifn_write_1(struct hifn_device *dev, u32 reg, u32 val) +{ + writel((__force u32)cpu_to_le32(val), dev->bar[1] + reg); +} + +static void hifn_wait_puc(struct hifn_device *dev) +{ + int i; + u32 ret; + + for (i=10000; i > 0; --i) { + ret = hifn_read_0(dev, HIFN_0_PUCTRL); + if (!(ret & HIFN_PUCTRL_RESET)) + break; + + udelay(1); + } + + if (!i) + dprintk("%s: Failed to reset PUC unit.\n", dev->name); +} + +static void hifn_reset_puc(struct hifn_device *dev) +{ + hifn_write_0(dev, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA); + hifn_wait_puc(dev); +} + +static void hifn_stop_device(struct hifn_device *dev) +{ + hifn_write_1(dev, HIFN_1_DMA_CSR, + HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS | + HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS); + hifn_write_0(dev, HIFN_0_PUIER, 0); + hifn_write_1(dev, HIFN_1_DMA_IER, 0); +} + +static void hifn_reset_dma(struct hifn_device *dev, int full) +{ + hifn_stop_device(dev); + + /* + * Setting poll frequency and others to 0. + */ + hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | + HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); + mdelay(1); + + /* + * Reset DMA. + */ + if (full) { + hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE); + mdelay(1); + } else { + hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE | + HIFN_DMACNFG_MSTRESET); + hifn_reset_puc(dev); + } + + hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | + HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); + + hifn_reset_puc(dev); +} + +static u32 hifn_next_signature(u_int32_t a, u_int cnt) +{ + int i; + u32 v; + + for (i = 0; i < cnt; i++) { + + /* get the parity */ + v = a & 0x80080125; + v ^= v >> 16; + v ^= v >> 8; + v ^= v >> 4; + v ^= v >> 2; + v ^= v >> 1; + + a = (v & 1) ^ (a << 1); + } + + return a; +} + +static struct pci2id { + u_short pci_vendor; + u_short pci_prod; + char card_id[13]; +} pci2id[] = { + { + PCI_VENDOR_ID_HIFN, + PCI_DEVICE_ID_HIFN_7955, + { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00 } + }, + { + PCI_VENDOR_ID_HIFN, + PCI_DEVICE_ID_HIFN_7956, + { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00 } + } +}; + +#ifdef CONFIG_CRYPTO_DEV_HIFN_795X_RNG +static int hifn_rng_data_present(struct hwrng *rng, int wait) +{ + struct hifn_device *dev = (struct hifn_device *)rng->priv; + s64 nsec; + + nsec = ktime_to_ns(ktime_sub(ktime_get(), dev->rngtime)); + nsec -= dev->rng_wait_time; + if (nsec <= 0) + return 1; + if (!wait) + return 0; + ndelay(nsec); + return 1; +} + +static int hifn_rng_data_read(struct hwrng *rng, u32 *data) +{ + struct hifn_device *dev = (struct hifn_device *)rng->priv; + + *data = hifn_read_1(dev, HIFN_1_RNG_DATA); + dev->rngtime = ktime_get(); + return 4; +} + +static int hifn_register_rng(struct hifn_device *dev) +{ + /* + * We must wait at least 256 Pk_clk cycles between two reads of the rng. + */ + dev->rng_wait_time = DIV_ROUND_UP_ULL(NSEC_PER_SEC, + dev->pk_clk_freq) * 256; + + dev->rng.name = dev->name; + dev->rng.data_present = hifn_rng_data_present, + dev->rng.data_read = hifn_rng_data_read, + dev->rng.priv = (unsigned long)dev; + + return hwrng_register(&dev->rng); +} + +static void hifn_unregister_rng(struct hifn_device *dev) +{ + hwrng_unregister(&dev->rng); +} +#else +#define hifn_register_rng(dev) 0 +#define hifn_unregister_rng(dev) +#endif + +static int hifn_init_pubrng(struct hifn_device *dev) +{ + int i; + + hifn_write_1(dev, HIFN_1_PUB_RESET, hifn_read_1(dev, HIFN_1_PUB_RESET) | + HIFN_PUBRST_RESET); + + for (i=100; i > 0; --i) { + mdelay(1); + + if ((hifn_read_1(dev, HIFN_1_PUB_RESET) & HIFN_PUBRST_RESET) == 0) + break; + } + + if (!i) + dprintk("Chip %s: Failed to initialise public key engine.\n", + dev->name); + else { + hifn_write_1(dev, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE); + dev->dmareg |= HIFN_DMAIER_PUBDONE; + hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg); + + dprintk("Chip %s: Public key engine has been successfully " + "initialised.\n", dev->name); + } + + /* + * Enable RNG engine. + */ + + hifn_write_1(dev, HIFN_1_RNG_CONFIG, + hifn_read_1(dev, HIFN_1_RNG_CONFIG) | HIFN_RNGCFG_ENA); + dprintk("Chip %s: RNG engine has been successfully initialised.\n", + dev->name); + +#ifdef CONFIG_CRYPTO_DEV_HIFN_795X_RNG + /* First value must be discarded */ + hifn_read_1(dev, HIFN_1_RNG_DATA); + dev->rngtime = ktime_get(); +#endif + return 0; +} + +static int hifn_enable_crypto(struct hifn_device *dev) +{ + u32 dmacfg, addr; + char *offtbl = NULL; + int i; + + for (i = 0; i < ARRAY_SIZE(pci2id); i++) { + if (pci2id[i].pci_vendor == dev->pdev->vendor && + pci2id[i].pci_prod == dev->pdev->device) { + offtbl = pci2id[i].card_id; + break; + } + } + + if (offtbl == NULL) { + dprintk("Chip %s: Unknown card!\n", dev->name); + return -ENODEV; + } + + dmacfg = hifn_read_1(dev, HIFN_1_DMA_CNFG); + + hifn_write_1(dev, HIFN_1_DMA_CNFG, + HIFN_DMACNFG_UNLOCK | HIFN_DMACNFG_MSTRESET | + HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); + mdelay(1); + addr = hifn_read_1(dev, HIFN_1_UNLOCK_SECRET1); + mdelay(1); + hifn_write_1(dev, HIFN_1_UNLOCK_SECRET2, 0); + mdelay(1); + + for (i=0; i<12; ++i) { + addr = hifn_next_signature(addr, offtbl[i] + 0x101); + hifn_write_1(dev, HIFN_1_UNLOCK_SECRET2, addr); + + mdelay(1); + } + hifn_write_1(dev, HIFN_1_DMA_CNFG, dmacfg); + + dprintk("Chip %s: %s.\n", dev->name, pci_name(dev->pdev)); + + return 0; +} + +static void hifn_init_dma(struct hifn_device *dev) +{ + struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; + u32 dptr = dev->desc_dma; + int i; + + for (i=0; i<HIFN_D_CMD_RSIZE; ++i) + dma->cmdr[i].p = __cpu_to_le32(dptr + + offsetof(struct hifn_dma, command_bufs[i][0])); + for (i=0; i<HIFN_D_RES_RSIZE; ++i) + dma->resr[i].p = __cpu_to_le32(dptr + + offsetof(struct hifn_dma, result_bufs[i][0])); + + /* + * Setup LAST descriptors. + */ + dma->cmdr[HIFN_D_CMD_RSIZE].p = __cpu_to_le32(dptr + + offsetof(struct hifn_dma, cmdr[0])); + dma->srcr[HIFN_D_SRC_RSIZE].p = __cpu_to_le32(dptr + + offsetof(struct hifn_dma, srcr[0])); + dma->dstr[HIFN_D_DST_RSIZE].p = __cpu_to_le32(dptr + + offsetof(struct hifn_dma, dstr[0])); + dma->resr[HIFN_D_RES_RSIZE].p = __cpu_to_le32(dptr + + offsetof(struct hifn_dma, resr[0])); + + dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0; + dma->cmdi = dma->srci = dma->dsti = dma->resi = 0; + dma->cmdk = dma->srck = dma->dstk = dma->resk = 0; +} + +/* + * Initialize the PLL. We need to know the frequency of the reference clock + * to calculate the optimal multiplier. For PCI we assume 66MHz, since that + * allows us to operate without the risk of overclocking the chip. If it + * actually uses 33MHz, the chip will operate at half the speed, this can be + * overriden by specifying the frequency as module parameter (pci33). + * + * Unfortunately the PCI clock is not very suitable since the HIFN needs a + * stable clock and the PCI clock frequency may vary, so the default is the + * external clock. There is no way to find out its frequency, we default to + * 66MHz since according to Mike Ham of HiFn, almost every board in existence + * has an external crystal populated at 66MHz. + */ +static void hifn_init_pll(struct hifn_device *dev) +{ + unsigned int freq, m; + u32 pllcfg; + + pllcfg = HIFN_1_PLL | HIFN_PLL_RESERVED_1; + + if (strncmp(hifn_pll_ref, "ext", 3) == 0) + pllcfg |= HIFN_PLL_REF_CLK_PLL; + else + pllcfg |= HIFN_PLL_REF_CLK_HBI; + + if (hifn_pll_ref[3] != '\0') + freq = simple_strtoul(hifn_pll_ref + 3, NULL, 10); + else { + freq = 66; + printk(KERN_INFO "hifn795x: assuming %uMHz clock speed, " + "override with hifn_pll_ref=%.3s<frequency>\n", + freq, hifn_pll_ref); + } + + m = HIFN_PLL_FCK_MAX / freq; + + pllcfg |= (m / 2 - 1) << HIFN_PLL_ND_SHIFT; + if (m <= 8) + pllcfg |= HIFN_PLL_IS_1_8; + else + pllcfg |= HIFN_PLL_IS_9_12; + + /* Select clock source and enable clock bypass */ + hifn_write_1(dev, HIFN_1_PLL, pllcfg | + HIFN_PLL_PK_CLK_HBI | HIFN_PLL_PE_CLK_HBI | HIFN_PLL_BP); + + /* Let the chip lock to the input clock */ + mdelay(10); + + /* Disable clock bypass */ + hifn_write_1(dev, HIFN_1_PLL, pllcfg | + HIFN_PLL_PK_CLK_HBI | HIFN_PLL_PE_CLK_HBI); + + /* Switch the engines to the PLL */ + hifn_write_1(dev, HIFN_1_PLL, pllcfg | + HIFN_PLL_PK_CLK_PLL | HIFN_PLL_PE_CLK_PLL); + + /* + * The Fpk_clk runs at half the total speed. Its frequency is needed to + * calculate the minimum time between two reads of the rng. Since 33MHz + * is actually 33.333... we overestimate the frequency here, resulting + * in slightly larger intervals. + */ + dev->pk_clk_freq = 1000000 * (freq + 1) * m / 2; +} + +static void hifn_init_registers(struct hifn_device *dev) +{ + u32 dptr = dev->desc_dma; + + /* Initialization magic... */ + hifn_write_0(dev, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA); + hifn_write_0(dev, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD); + hifn_write_0(dev, HIFN_0_PUIER, HIFN_PUIER_DSTOVER); + + /* write all 4 ring address registers */ + hifn_write_1(dev, HIFN_1_DMA_CRAR, dptr + + offsetof(struct hifn_dma, cmdr[0])); + hifn_write_1(dev, HIFN_1_DMA_SRAR, dptr + + offsetof(struct hifn_dma, srcr[0])); + hifn_write_1(dev, HIFN_1_DMA_DRAR, dptr + + offsetof(struct hifn_dma, dstr[0])); + hifn_write_1(dev, HIFN_1_DMA_RRAR, dptr + + offsetof(struct hifn_dma, resr[0])); + + mdelay(2); +#if 0 + hifn_write_1(dev, HIFN_1_DMA_CSR, + HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS | + HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS | + HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST | + HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER | + HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST | + HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER | + HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST | + HIFN_DMACSR_S_WAIT | + HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST | + HIFN_DMACSR_C_WAIT | + HIFN_DMACSR_ENGINE | + HIFN_DMACSR_PUBDONE); +#else + hifn_write_1(dev, HIFN_1_DMA_CSR, + HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | + HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA | + HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST | + HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER | + HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST | + HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER | + HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST | + HIFN_DMACSR_S_WAIT | + HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST | + HIFN_DMACSR_C_WAIT | + HIFN_DMACSR_ENGINE | + HIFN_DMACSR_PUBDONE); +#endif + hifn_read_1(dev, HIFN_1_DMA_CSR); + + dev->dmareg |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT | + HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER | + HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT | + HIFN_DMAIER_ENGINE; + dev->dmareg &= ~HIFN_DMAIER_C_WAIT; + + hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg); + hifn_read_1(dev, HIFN_1_DMA_IER); +#if 0 + hifn_write_0(dev, HIFN_0_PUCNFG, HIFN_PUCNFG_ENCCNFG | + HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES | + HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 | + HIFN_PUCNFG_DRAM); +#else + hifn_write_0(dev, HIFN_0_PUCNFG, 0x10342); +#endif + hifn_init_pll(dev); + + hifn_write_0(dev, HIFN_0_PUISR, HIFN_PUISR_DSTOVER); + hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | + HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST | + ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) | + ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL)); +} + +static int hifn_setup_base_command(struct hifn_device *dev, u8 *buf, + unsigned dlen, unsigned slen, u16 mask, u8 snum) +{ + struct hifn_base_command *base_cmd; + u8 *buf_pos = buf; + + base_cmd = (struct hifn_base_command *)buf_pos; + base_cmd->masks = __cpu_to_le16(mask); + base_cmd->total_source_count = + __cpu_to_le16(slen & HIFN_BASE_CMD_LENMASK_LO); + base_cmd->total_dest_count = + __cpu_to_le16(dlen & HIFN_BASE_CMD_LENMASK_LO); + + dlen >>= 16; + slen >>= 16; + base_cmd->session_num = __cpu_to_le16(snum | + ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) | + ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M)); + + return sizeof(struct hifn_base_command); +} + +static int hifn_setup_crypto_command(struct hifn_device *dev, + u8 *buf, unsigned dlen, unsigned slen, + u8 *key, int keylen, u8 *iv, int ivsize, u16 mode) +{ + struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; + struct hifn_crypt_command *cry_cmd; + u8 *buf_pos = buf; + u16 cmd_len; + + cry_cmd = (struct hifn_crypt_command *)buf_pos; + + cry_cmd->source_count = __cpu_to_le16(dlen & 0xffff); + dlen >>= 16; + cry_cmd->masks = __cpu_to_le16(mode | + ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & + HIFN_CRYPT_CMD_SRCLEN_M)); + cry_cmd->header_skip = 0; + cry_cmd->reserved = 0; + + buf_pos += sizeof(struct hifn_crypt_command); + + dma->cmdu++; + if (dma->cmdu > 1) { + dev->dmareg |= HIFN_DMAIER_C_WAIT; + hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg); + } + + if (keylen) { + memcpy(buf_pos, key, keylen); + buf_pos += keylen; + } + if (ivsize) { + memcpy(buf_pos, iv, ivsize); + buf_pos += ivsize; + } + + cmd_len = buf_pos - buf; + + return cmd_len; +} + +static int hifn_setup_cmd_desc(struct hifn_device *dev, + struct hifn_context *ctx, struct hifn_request_context *rctx, + void *priv, unsigned int nbytes) +{ + struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; + int cmd_len, sa_idx; + u8 *buf, *buf_pos; + u16 mask; + + sa_idx = dma->cmdi; + buf_pos = buf = dma->command_bufs[dma->cmdi]; + + mask = 0; + switch (rctx->op) { + case ACRYPTO_OP_DECRYPT: + mask = HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE; + break; + case ACRYPTO_OP_ENCRYPT: + mask = HIFN_BASE_CMD_CRYPT; + break; + case ACRYPTO_OP_HMAC: + mask = HIFN_BASE_CMD_MAC; + break; + default: + goto err_out; + } + + buf_pos += hifn_setup_base_command(dev, buf_pos, nbytes, + nbytes, mask, dev->snum); + + if (rctx->op == ACRYPTO_OP_ENCRYPT || rctx->op == ACRYPTO_OP_DECRYPT) { + u16 md = 0; + + if (ctx->keysize) + md |= HIFN_CRYPT_CMD_NEW_KEY; + if (rctx->iv && rctx->mode != ACRYPTO_MODE_ECB) + md |= HIFN_CRYPT_CMD_NEW_IV; + + switch (rctx->mode) { + case ACRYPTO_MODE_ECB: + md |= HIFN_CRYPT_CMD_MODE_ECB; + break; + case ACRYPTO_MODE_CBC: + md |= HIFN_CRYPT_CMD_MODE_CBC; + break; + case ACRYPTO_MODE_CFB: + md |= HIFN_CRYPT_CMD_MODE_CFB; + break; + case ACRYPTO_MODE_OFB: + md |= HIFN_CRYPT_CMD_MODE_OFB; + break; + default: + goto err_out; + } + + switch (rctx->type) { + case ACRYPTO_TYPE_AES_128: + if (ctx->keysize != 16) + goto err_out; + md |= HIFN_CRYPT_CMD_KSZ_128 | + HIFN_CRYPT_CMD_ALG_AES; + break; + case ACRYPTO_TYPE_AES_192: + if (ctx->keysize != 24) + goto err_out; + md |= HIFN_CRYPT_CMD_KSZ_192 | + HIFN_CRYPT_CMD_ALG_AES; + break; + case ACRYPTO_TYPE_AES_256: + if (ctx->keysize != 32) + goto err_out; + md |= HIFN_CRYPT_CMD_KSZ_256 | + HIFN_CRYPT_CMD_ALG_AES; + break; + case ACRYPTO_TYPE_3DES: + if (ctx->keysize != 24) + goto err_out; + md |= HIFN_CRYPT_CMD_ALG_3DES; + break; + case ACRYPTO_TYPE_DES: + if (ctx->keysize != 8) + goto err_out; + md |= HIFN_CRYPT_CMD_ALG_DES; + break; + default: + goto err_out; + } + + buf_pos += hifn_setup_crypto_command(dev, buf_pos, + nbytes, nbytes, ctx->key, ctx->keysize, + rctx->iv, rctx->ivsize, md); + } + + dev->sa[sa_idx] = priv; + dev->started++; + + cmd_len = buf_pos - buf; + dma->cmdr[dma->cmdi].l = __cpu_to_le32(cmd_len | HIFN_D_VALID | + HIFN_D_LAST | HIFN_D_MASKDONEIRQ); + + if (++dma->cmdi == HIFN_D_CMD_RSIZE) { + dma->cmdr[dma->cmdi].l = __cpu_to_le32( + HIFN_D_VALID | HIFN_D_LAST | + HIFN_D_MASKDONEIRQ | HIFN_D_JUMP); + dma->cmdi = 0; + } else + dma->cmdr[dma->cmdi-1].l |= __cpu_to_le32(HIFN_D_VALID); + + if (!(dev->flags & HIFN_FLAG_CMD_BUSY)) { + hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA); + dev->flags |= HIFN_FLAG_CMD_BUSY; + } + return 0; + +err_out: + return -EINVAL; +} + +static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page, + unsigned int offset, unsigned int size, int last) +{ + struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; + int idx; + dma_addr_t addr; + + addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_TODEVICE); + + idx = dma->srci; + + dma->srcr[idx].p = __cpu_to_le32(addr); + dma->srcr[idx].l = __cpu_to_le32(size | HIFN_D_VALID | + HIFN_D_MASKDONEIRQ | (last ? HIFN_D_LAST : 0)); + + if (++idx == HIFN_D_SRC_RSIZE) { + dma->srcr[idx].l = __cpu_to_le32(HIFN_D_VALID | + HIFN_D_JUMP | HIFN_D_MASKDONEIRQ | + (last ? HIFN_D_LAST : 0)); + idx = 0; + } + + dma->srci = idx; + dma->srcu++; + + if (!(dev->flags & HIFN_FLAG_SRC_BUSY)) { + hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA); + dev->flags |= HIFN_FLAG_SRC_BUSY; + } + + return size; +} + +static void hifn_setup_res_desc(struct hifn_device *dev) +{ + struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; + + dma->resr[dma->resi].l = __cpu_to_le32(HIFN_USED_RESULT | + HIFN_D_VALID | HIFN_D_LAST); + /* + * dma->resr[dma->resi].l = __cpu_to_le32(HIFN_MAX_RESULT | HIFN_D_VALID | + * HIFN_D_LAST); + */ + + if (++dma->resi == HIFN_D_RES_RSIZE) { + dma->resr[HIFN_D_RES_RSIZE].l = __cpu_to_le32(HIFN_D_VALID | + HIFN_D_JUMP | HIFN_D_MASKDONEIRQ | HIFN_D_LAST); + dma->resi = 0; + } + + dma->resu++; + + if (!(dev->flags & HIFN_FLAG_RES_BUSY)) { + hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA); + dev->flags |= HIFN_FLAG_RES_BUSY; + } +} + +static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page, + unsigned offset, unsigned size, int last) +{ + struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; + int idx; + dma_addr_t addr; + + addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_FROMDEVICE); + + idx = dma->dsti; + dma->dstr[idx].p = __cpu_to_le32(addr); + dma->dstr[idx].l = __cpu_to_le32(size | HIFN_D_VALID | + HIFN_D_MASKDONEIRQ | (last ? HIFN_D_LAST : 0)); + + if (++idx == HIFN_D_DST_RSIZE) { + dma->dstr[idx].l = __cpu_to_le32(HIFN_D_VALID | + HIFN_D_JUMP | HIFN_D_MASKDONEIRQ | + (last ? HIFN_D_LAST : 0)); + idx = 0; + } + dma->dsti = idx; + dma->dstu++; + + if (!(dev->flags & HIFN_FLAG_DST_BUSY)) { + hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA); + dev->flags |= HIFN_FLAG_DST_BUSY; + } +} + +static int hifn_setup_dma(struct hifn_device *dev, + struct hifn_context *ctx, struct hifn_request_context *rctx, + struct scatterlist *src, struct scatterlist *dst, + unsigned int nbytes, void *priv) +{ + struct scatterlist *t; + struct page *spage, *dpage; + unsigned int soff, doff; + unsigned int n, len; + + n = nbytes; + while (n) { + spage = sg_page(src); + soff = src->offset; + len = min(src->length, n); + + hifn_setup_src_desc(dev, spage, soff, len, n - len == 0); + + src++; + n -= len; + } + + t = &rctx->walk.cache[0]; + n = nbytes; + while (n) { + if (t->length && rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) { + BUG_ON(!sg_page(t)); + dpage = sg_page(t); + doff = 0; + len = t->length; + } else { + BUG_ON(!sg_page(dst)); + dpage = sg_page(dst); + doff = dst->offset; + len = dst->length; + } + len = min(len, n); + + hifn_setup_dst_desc(dev, dpage, doff, len, n - len == 0); + + dst++; + t++; + n -= len; + } + + hifn_setup_cmd_desc(dev, ctx, rctx, priv, nbytes); + hifn_setup_res_desc(dev); + return 0; +} + +static int hifn_cipher_walk_init(struct hifn_cipher_walk *w, + int num, gfp_t gfp_flags) +{ + int i; + + num = min(ASYNC_SCATTERLIST_CACHE, num); + sg_init_table(w->cache, num); + + w->num = 0; + for (i=0; i<num; ++i) { + struct page *page = alloc_page(gfp_flags); + struct scatterlist *s; + + if (!page) + break; + + s = &w->cache[i]; + + sg_set_page(s, page, PAGE_SIZE, 0); + w->num++; + } + + return i; +} + +static void hifn_cipher_walk_exit(struct hifn_cipher_walk *w) +{ + int i; + + for (i=0; i<w->num; ++i) { + struct scatterlist *s = &w->cache[i]; + + __free_page(sg_page(s)); + + s->length = 0; + } + + w->num = 0; +} + +static int ablkcipher_add(unsigned int *drestp, struct scatterlist *dst, + unsigned int size, unsigned int *nbytesp) +{ + unsigned int copy, drest = *drestp, nbytes = *nbytesp; + int idx = 0; + + if (drest < size || size > nbytes) + return -EINVAL; + + while (size) { + copy = min3(drest, size, dst->length); + + size -= copy; + drest -= copy; + nbytes -= copy; + + dprintk("%s: copy: %u, size: %u, drest: %u, nbytes: %u.\n", + __func__, copy, size, drest, nbytes); + + dst++; + idx++; + } + + *nbytesp = nbytes; + *drestp = drest; + + return idx; +} + +static int hifn_cipher_walk(struct ablkcipher_request *req, + struct hifn_cipher_walk *w) +{ + struct scatterlist *dst, *t; + unsigned int nbytes = req->nbytes, offset, copy, diff; + int idx, tidx, err; + + tidx = idx = 0; + offset = 0; + while (nbytes) { + if (idx >= w->num && (w->flags & ASYNC_FLAGS_MISALIGNED)) + return -EINVAL; + + dst = &req->dst[idx]; + + dprintk("\n%s: dlen: %u, doff: %u, offset: %u, nbytes: %u.\n", + __func__, dst->length, dst->offset, offset, nbytes); + + if (!IS_ALIGNED(dst->offset, HIFN_D_DST_DALIGN) || + !IS_ALIGNED(dst->length, HIFN_D_DST_DALIGN) || + offset) { + unsigned slen = min(dst->length - offset, nbytes); + unsigned dlen = PAGE_SIZE; + + t = &w->cache[idx]; + + err = ablkcipher_add(&dlen, dst, slen, &nbytes); + if (err < 0) + return err; + + idx += err; + + copy = slen & ~(HIFN_D_DST_DALIGN - 1); + diff = slen & (HIFN_D_DST_DALIGN - 1); + + if (dlen < nbytes) { + /* + * Destination page does not have enough space + * to put there additional blocksized chunk, + * so we mark that page as containing only + * blocksize aligned chunks: + * t->length = (slen & ~(HIFN_D_DST_DALIGN - 1)); + * and increase number of bytes to be processed + * in next chunk: + * nbytes += diff; + */ + nbytes += diff; + + /* + * Temporary of course... + * Kick author if you will catch this one. + */ + printk(KERN_ERR "%s: dlen: %u, nbytes: %u," + "slen: %u, offset: %u.\n", + __func__, dlen, nbytes, slen, offset); + printk(KERN_ERR "%s: please contact author to fix this " + "issue, generally you should not catch " + "this path under any condition but who " + "knows how did you use crypto code.\n" + "Thank you.\n", __func__); + BUG(); + } else { + copy += diff + nbytes; + + dst = &req->dst[idx]; + + err = ablkcipher_add(&dlen, dst, nbytes, &nbytes); + if (err < 0) + return err; + + idx += err; + } + + t->length = copy; + t->offset = offset; + } else { + nbytes -= min(dst->length, nbytes); + idx++; + } + + tidx++; + } + + return tidx; +} + +static int hifn_setup_session(struct ablkcipher_request *req) +{ + struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm); + struct hifn_request_context *rctx = ablkcipher_request_ctx(req); + struct hifn_device *dev = ctx->dev; + unsigned long dlen, flags; + unsigned int nbytes = req->nbytes, idx = 0; + int err = -EINVAL, sg_num; + struct scatterlist *dst; + + if (rctx->iv && !rctx->ivsize && rctx->mode != ACRYPTO_MODE_ECB) + goto err_out_exit; + + rctx->walk.flags = 0; + + while (nbytes) { + dst = &req->dst[idx]; + dlen = min(dst->length, nbytes); + + if (!IS_ALIGNED(dst->offset, HIFN_D_DST_DALIGN) || + !IS_ALIGNED(dlen, HIFN_D_DST_DALIGN)) + rctx->walk.flags |= ASYNC_FLAGS_MISALIGNED; + + nbytes -= dlen; + idx++; + } + + if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) { + err = hifn_cipher_walk_init(&rctx->walk, idx, GFP_ATOMIC); + if (err < 0) + return err; + } + + sg_num = hifn_cipher_walk(req, &rctx->walk); + if (sg_num < 0) { + err = sg_num; + goto err_out_exit; + } + + spin_lock_irqsave(&dev->lock, flags); + if (dev->started + sg_num > HIFN_QUEUE_LENGTH) { + err = -EAGAIN; + goto err_out; + } + + err = hifn_setup_dma(dev, ctx, rctx, req->src, req->dst, req->nbytes, req); + if (err) + goto err_out; + + dev->snum++; + + dev->active = HIFN_DEFAULT_ACTIVE_NUM; + spin_unlock_irqrestore(&dev->lock, flags); + + return 0; + +err_out: + spin_unlock_irqrestore(&dev->lock, flags); +err_out_exit: + if (err) { + printk("%s: iv: %p [%d], key: %p [%d], mode: %u, op: %u, " + "type: %u, err: %d.\n", + dev->name, rctx->iv, rctx->ivsize, + ctx->key, ctx->keysize, + rctx->mode, rctx->op, rctx->type, err); + } + + return err; +} + +static int hifn_test(struct hifn_device *dev, int encdec, u8 snum) +{ + int n, err; + u8 src[16]; + struct hifn_context ctx; + struct hifn_request_context rctx; + u8 fips_aes_ecb_from_zero[16] = { + 0x66, 0xE9, 0x4B, 0xD4, + 0xEF, 0x8A, 0x2C, 0x3B, + 0x88, 0x4C, 0xFA, 0x59, + 0xCA, 0x34, 0x2B, 0x2E}; + struct scatterlist sg; + + memset(src, 0, sizeof(src)); + memset(ctx.key, 0, sizeof(ctx.key)); + + ctx.dev = dev; + ctx.keysize = 16; + rctx.ivsize = 0; + rctx.iv = NULL; + rctx.op = (encdec)?ACRYPTO_OP_ENCRYPT:ACRYPTO_OP_DECRYPT; + rctx.mode = ACRYPTO_MODE_ECB; + rctx.type = ACRYPTO_TYPE_AES_128; + rctx.walk.cache[0].length = 0; + + sg_init_one(&sg, &src, sizeof(src)); + + err = hifn_setup_dma(dev, &ctx, &rctx, &sg, &sg, sizeof(src), NULL); + if (err) + goto err_out; + + dev->started = 0; + msleep(200); + + dprintk("%s: decoded: ", dev->name); + for (n=0; n<sizeof(src); ++n) + dprintk("%02x ", src[n]); + dprintk("\n"); + dprintk("%s: FIPS : ", dev->name); + for (n=0; n<sizeof(fips_aes_ecb_from_zero); ++n) + dprintk("%02x ", fips_aes_ecb_from_zero[n]); + dprintk("\n"); + + if (!memcmp(src, fips_aes_ecb_from_zero, sizeof(fips_aes_ecb_from_zero))) { + printk(KERN_INFO "%s: AES 128 ECB test has been successfully " + "passed.\n", dev->name); + return 0; + } + +err_out: + printk(KERN_INFO "%s: AES 128 ECB test has been failed.\n", dev->name); + return -1; +} + +static int hifn_start_device(struct hifn_device *dev) +{ + int err; + + dev->started = dev->active = 0; + hifn_reset_dma(dev, 1); + + err = hifn_enable_crypto(dev); + if (err) + return err; + + hifn_reset_puc(dev); + + hifn_init_dma(dev); + + hifn_init_registers(dev); + + hifn_init_pubrng(dev); + + return 0; +} + +static int ablkcipher_get(void *saddr, unsigned int *srestp, unsigned int offset, + struct scatterlist *dst, unsigned int size, unsigned int *nbytesp) +{ + unsigned int srest = *srestp, nbytes = *nbytesp, copy; + void *daddr; + int idx = 0; + + if (srest < size || size > nbytes) + return -EINVAL; + + while (size) { + copy = min3(srest, dst->length, size); + + daddr = kmap_atomic(sg_page(dst)); + memcpy(daddr + dst->offset + offset, saddr, copy); + kunmap_atomic(daddr); + + nbytes -= copy; + size -= copy; + srest -= copy; + saddr += copy; + offset = 0; + + dprintk("%s: copy: %u, size: %u, srest: %u, nbytes: %u.\n", + __func__, copy, size, srest, nbytes); + + dst++; + idx++; + } + + *nbytesp = nbytes; + *srestp = srest; + + return idx; +} + +static inline void hifn_complete_sa(struct hifn_device *dev, int i) +{ + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + dev->sa[i] = NULL; + dev->started--; + if (dev->started < 0) + printk("%s: started: %d.\n", __func__, dev->started); + spin_unlock_irqrestore(&dev->lock, flags); + BUG_ON(dev->started < 0); +} + +static void hifn_process_ready(struct ablkcipher_request *req, int error) +{ + struct hifn_request_context *rctx = ablkcipher_request_ctx(req); + + if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) { + unsigned int nbytes = req->nbytes; + int idx = 0, err; + struct scatterlist *dst, *t; + void *saddr; + + while (nbytes) { + t = &rctx->walk.cache[idx]; + dst = &req->dst[idx]; + + dprintk("\n%s: sg_page(t): %p, t->length: %u, " + "sg_page(dst): %p, dst->length: %u, " + "nbytes: %u.\n", + __func__, sg_page(t), t->length, + sg_page(dst), dst->length, nbytes); + + if (!t->length) { + nbytes -= min(dst->length, nbytes); + idx++; + continue; + } + + saddr = kmap_atomic(sg_page(t)); + + err = ablkcipher_get(saddr, &t->length, t->offset, + dst, nbytes, &nbytes); + if (err < 0) { + kunmap_atomic(saddr); + break; + } + + idx += err; + kunmap_atomic(saddr); + } + + hifn_cipher_walk_exit(&rctx->walk); + } + + req->base.complete(&req->base, error); +} + +static void hifn_clear_rings(struct hifn_device *dev, int error) +{ + struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; + int i, u; + + dprintk("%s: ring cleanup 1: i: %d.%d.%d.%d, u: %d.%d.%d.%d, " + "k: %d.%d.%d.%d.\n", + dev->name, + dma->cmdi, dma->srci, dma->dsti, dma->resi, + dma->cmdu, dma->srcu, dma->dstu, dma->resu, + dma->cmdk, dma->srck, dma->dstk, dma->resk); + + i = dma->resk; u = dma->resu; + while (u != 0) { + if (dma->resr[i].l & __cpu_to_le32(HIFN_D_VALID)) + break; + + if (dev->sa[i]) { + dev->success++; + dev->reset = 0; + hifn_process_ready(dev->sa[i], error); + hifn_complete_sa(dev, i); + } + + if (++i == HIFN_D_RES_RSIZE) + i = 0; + u--; + } + dma->resk = i; dma->resu = u; + + i = dma->srck; u = dma->srcu; + while (u != 0) { + if (dma->srcr[i].l & __cpu_to_le32(HIFN_D_VALID)) + break; + if (++i == HIFN_D_SRC_RSIZE) + i = 0; + u--; + } + dma->srck = i; dma->srcu = u; + + i = dma->cmdk; u = dma->cmdu; + while (u != 0) { + if (dma->cmdr[i].l & __cpu_to_le32(HIFN_D_VALID)) + break; + if (++i == HIFN_D_CMD_RSIZE) + i = 0; + u--; + } + dma->cmdk = i; dma->cmdu = u; + + i = dma->dstk; u = dma->dstu; + while (u != 0) { + if (dma->dstr[i].l & __cpu_to_le32(HIFN_D_VALID)) + break; + if (++i == HIFN_D_DST_RSIZE) + i = 0; + u--; + } + dma->dstk = i; dma->dstu = u; + + dprintk("%s: ring cleanup 2: i: %d.%d.%d.%d, u: %d.%d.%d.%d, " + "k: %d.%d.%d.%d.\n", + dev->name, + dma->cmdi, dma->srci, dma->dsti, dma->resi, + dma->cmdu, dma->srcu, dma->dstu, dma->resu, + dma->cmdk, dma->srck, dma->dstk, dma->resk); +} + +static void hifn_work(struct work_struct *work) +{ + struct delayed_work *dw = to_delayed_work(work); + struct hifn_device *dev = container_of(dw, struct hifn_device, work); + unsigned long flags; + int reset = 0; + u32 r = 0; + + spin_lock_irqsave(&dev->lock, flags); + if (dev->active == 0) { + struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; + + if (dma->cmdu == 0 && (dev->flags & HIFN_FLAG_CMD_BUSY)) { + dev->flags &= ~HIFN_FLAG_CMD_BUSY; + r |= HIFN_DMACSR_C_CTRL_DIS; + } + if (dma->srcu == 0 && (dev->flags & HIFN_FLAG_SRC_BUSY)) { + dev->flags &= ~HIFN_FLAG_SRC_BUSY; + r |= HIFN_DMACSR_S_CTRL_DIS; + } + if (dma->dstu == 0 && (dev->flags & HIFN_FLAG_DST_BUSY)) { + dev->flags &= ~HIFN_FLAG_DST_BUSY; + r |= HIFN_DMACSR_D_CTRL_DIS; + } + if (dma->resu == 0 && (dev->flags & HIFN_FLAG_RES_BUSY)) { + dev->flags &= ~HIFN_FLAG_RES_BUSY; + r |= HIFN_DMACSR_R_CTRL_DIS; + } + if (r) + hifn_write_1(dev, HIFN_1_DMA_CSR, r); + } else + dev->active--; + + if ((dev->prev_success == dev->success) && dev->started) + reset = 1; + dev->prev_success = dev->success; + spin_unlock_irqrestore(&dev->lock, flags); + + if (reset) { + if (++dev->reset >= 5) { + int i; + struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; + + printk("%s: r: %08x, active: %d, started: %d, " + "success: %lu: qlen: %u/%u, reset: %d.\n", + dev->name, r, dev->active, dev->started, + dev->success, dev->queue.qlen, dev->queue.max_qlen, + reset); + + printk("%s: res: ", __func__); + for (i=0; i<HIFN_D_RES_RSIZE; ++i) { + printk("%x.%p ", dma->resr[i].l, dev->sa[i]); + if (dev->sa[i]) { + hifn_process_ready(dev->sa[i], -ENODEV); + hifn_complete_sa(dev, i); + } + } + printk("\n"); + + hifn_reset_dma(dev, 1); + hifn_stop_device(dev); + hifn_start_device(dev); + dev->reset = 0; + } + + tasklet_schedule(&dev->tasklet); + } + + schedule_delayed_work(&dev->work, HZ); +} + +static irqreturn_t hifn_interrupt(int irq, void *data) +{ + struct hifn_device *dev = (struct hifn_device *)data; + struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; + u32 dmacsr, restart; + + dmacsr = hifn_read_1(dev, HIFN_1_DMA_CSR); + + dprintk("%s: 1 dmacsr: %08x, dmareg: %08x, res: %08x [%d], " + "i: %d.%d.%d.%d, u: %d.%d.%d.%d.\n", + dev->name, dmacsr, dev->dmareg, dmacsr & dev->dmareg, dma->cmdi, + dma->cmdi, dma->srci, dma->dsti, dma->resi, + dma->cmdu, dma->srcu, dma->dstu, dma->resu); + + if ((dmacsr & dev->dmareg) == 0) + return IRQ_NONE; + + hifn_write_1(dev, HIFN_1_DMA_CSR, dmacsr & dev->dmareg); + + if (dmacsr & HIFN_DMACSR_ENGINE) + hifn_write_0(dev, HIFN_0_PUISR, hifn_read_0(dev, HIFN_0_PUISR)); + if (dmacsr & HIFN_DMACSR_PUBDONE) + hifn_write_1(dev, HIFN_1_PUB_STATUS, + hifn_read_1(dev, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE); + + restart = dmacsr & (HIFN_DMACSR_R_OVER | HIFN_DMACSR_D_OVER); + if (restart) { + u32 puisr = hifn_read_0(dev, HIFN_0_PUISR); + + printk(KERN_WARNING "%s: overflow: r: %d, d: %d, puisr: %08x, d: %u.\n", + dev->name, !!(dmacsr & HIFN_DMACSR_R_OVER), + !!(dmacsr & HIFN_DMACSR_D_OVER), + puisr, !!(puisr & HIFN_PUISR_DSTOVER)); + if (!!(puisr & HIFN_PUISR_DSTOVER)) + hifn_write_0(dev, HIFN_0_PUISR, HIFN_PUISR_DSTOVER); + hifn_write_1(dev, HIFN_1_DMA_CSR, dmacsr & (HIFN_DMACSR_R_OVER | + HIFN_DMACSR_D_OVER)); + } + + restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT | + HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT); + if (restart) { + printk(KERN_WARNING "%s: abort: c: %d, s: %d, d: %d, r: %d.\n", + dev->name, !!(dmacsr & HIFN_DMACSR_C_ABORT), + !!(dmacsr & HIFN_DMACSR_S_ABORT), + !!(dmacsr & HIFN_DMACSR_D_ABORT), + !!(dmacsr & HIFN_DMACSR_R_ABORT)); + hifn_reset_dma(dev, 1); + hifn_init_dma(dev); + hifn_init_registers(dev); + } + + if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) { + dprintk("%s: wait on command.\n", dev->name); + dev->dmareg &= ~(HIFN_DMAIER_C_WAIT); + hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg); + } + + tasklet_schedule(&dev->tasklet); + + return IRQ_HANDLED; +} + +static void hifn_flush(struct hifn_device *dev) +{ + unsigned long flags; + struct crypto_async_request *async_req; + struct ablkcipher_request *req; + struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; + int i; + + for (i=0; i<HIFN_D_RES_RSIZE; ++i) { + struct hifn_desc *d = &dma->resr[i]; + + if (dev->sa[i]) { + hifn_process_ready(dev->sa[i], + (d->l & __cpu_to_le32(HIFN_D_VALID))?-ENODEV:0); + hifn_complete_sa(dev, i); + } + } + + spin_lock_irqsave(&dev->lock, flags); + while ((async_req = crypto_dequeue_request(&dev->queue))) { + req = container_of(async_req, struct ablkcipher_request, base); + spin_unlock_irqrestore(&dev->lock, flags); + + hifn_process_ready(req, -ENODEV); + + spin_lock_irqsave(&dev->lock, flags); + } + spin_unlock_irqrestore(&dev->lock, flags); +} + +static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key, + unsigned int len) +{ + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); + struct hifn_context *ctx = crypto_tfm_ctx(tfm); + struct hifn_device *dev = ctx->dev; + + if (len > HIFN_MAX_CRYPT_KEY_LENGTH) { + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -1; + } + + if (len == HIFN_DES_KEY_LENGTH) { + u32 tmp[DES_EXPKEY_WORDS]; + int ret = des_ekey(tmp, key); + + if (unlikely(ret == 0) && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; + return -EINVAL; + } + } + + dev->flags &= ~HIFN_FLAG_OLD_KEY; + + memcpy(ctx->key, key, len); + ctx->keysize = len; + + return 0; +} + +static int hifn_handle_req(struct ablkcipher_request *req) +{ + struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm); + struct hifn_device *dev = ctx->dev; + int err = -EAGAIN; + + if (dev->started + DIV_ROUND_UP(req->nbytes, PAGE_SIZE) <= HIFN_QUEUE_LENGTH) + err = hifn_setup_session(req); + + if (err == -EAGAIN) { + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + err = ablkcipher_enqueue_request(&dev->queue, req); + spin_unlock_irqrestore(&dev->lock, flags); + } + + return err; +} + +static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op, + u8 type, u8 mode) +{ + struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm); + struct hifn_request_context *rctx = ablkcipher_request_ctx(req); + unsigned ivsize; + + ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req)); + + if (req->info && mode != ACRYPTO_MODE_ECB) { + if (type == ACRYPTO_TYPE_AES_128) + ivsize = HIFN_AES_IV_LENGTH; + else if (type == ACRYPTO_TYPE_DES) + ivsize = HIFN_DES_KEY_LENGTH; + else if (type == ACRYPTO_TYPE_3DES) + ivsize = HIFN_3DES_KEY_LENGTH; + } + + if (ctx->keysize != 16 && type == ACRYPTO_TYPE_AES_128) { + if (ctx->keysize == 24) + type = ACRYPTO_TYPE_AES_192; + else if (ctx->keysize == 32) + type = ACRYPTO_TYPE_AES_256; + } + + rctx->op = op; + rctx->mode = mode; + rctx->type = type; + rctx->iv = req->info; + rctx->ivsize = ivsize; + + /* + * HEAVY TODO: needs to kick Herbert XU to write documentation. + * HEAVY TODO: needs to kick Herbert XU to write documentation. + * HEAVY TODO: needs to kick Herbert XU to write documentation. + */ + + return hifn_handle_req(req); +} + +static int hifn_process_queue(struct hifn_device *dev) +{ + struct crypto_async_request *async_req, *backlog; + struct ablkcipher_request *req; + unsigned long flags; + int err = 0; + + while (dev->started < HIFN_QUEUE_LENGTH) { + spin_lock_irqsave(&dev->lock, flags); + backlog = crypto_get_backlog(&dev->queue); + async_req = crypto_dequeue_request(&dev->queue); + spin_unlock_irqrestore(&dev->lock, flags); + + if (!async_req) + break; + + if (backlog) + backlog->complete(backlog, -EINPROGRESS); + + req = container_of(async_req, struct ablkcipher_request, base); + + err = hifn_handle_req(req); + if (err) + break; + } + + return err; +} + +static int hifn_setup_crypto(struct ablkcipher_request *req, u8 op, + u8 type, u8 mode) +{ + int err; + struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm); + struct hifn_device *dev = ctx->dev; + + err = hifn_setup_crypto_req(req, op, type, mode); + if (err) + return err; + + if (dev->started < HIFN_QUEUE_LENGTH && dev->queue.qlen) + hifn_process_queue(dev); + + return -EINPROGRESS; +} + +/* + * AES ecryption functions. + */ +static inline int hifn_encrypt_aes_ecb(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT, + ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_ECB); +} +static inline int hifn_encrypt_aes_cbc(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT, + ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CBC); +} +static inline int hifn_encrypt_aes_cfb(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT, + ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CFB); +} +static inline int hifn_encrypt_aes_ofb(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT, + ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_OFB); +} + +/* + * AES decryption functions. + */ +static inline int hifn_decrypt_aes_ecb(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, + ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_ECB); +} +static inline int hifn_decrypt_aes_cbc(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, + ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CBC); +} +static inline int hifn_decrypt_aes_cfb(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, + ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CFB); +} +static inline int hifn_decrypt_aes_ofb(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, + ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_OFB); +} + +/* + * DES ecryption functions. + */ +static inline int hifn_encrypt_des_ecb(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT, + ACRYPTO_TYPE_DES, ACRYPTO_MODE_ECB); +} +static inline int hifn_encrypt_des_cbc(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT, + ACRYPTO_TYPE_DES, ACRYPTO_MODE_CBC); +} +static inline int hifn_encrypt_des_cfb(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT, + ACRYPTO_TYPE_DES, ACRYPTO_MODE_CFB); +} +static inline int hifn_encrypt_des_ofb(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT, + ACRYPTO_TYPE_DES, ACRYPTO_MODE_OFB); +} + +/* + * DES decryption functions. + */ +static inline int hifn_decrypt_des_ecb(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, + ACRYPTO_TYPE_DES, ACRYPTO_MODE_ECB); +} +static inline int hifn_decrypt_des_cbc(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, + ACRYPTO_TYPE_DES, ACRYPTO_MODE_CBC); +} +static inline int hifn_decrypt_des_cfb(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, + ACRYPTO_TYPE_DES, ACRYPTO_MODE_CFB); +} +static inline int hifn_decrypt_des_ofb(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, + ACRYPTO_TYPE_DES, ACRYPTO_MODE_OFB); +} + +/* + * 3DES ecryption functions. + */ +static inline int hifn_encrypt_3des_ecb(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT, + ACRYPTO_TYPE_3DES, ACRYPTO_MODE_ECB); +} +static inline int hifn_encrypt_3des_cbc(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT, + ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CBC); +} +static inline int hifn_encrypt_3des_cfb(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT, + ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CFB); +} +static inline int hifn_encrypt_3des_ofb(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT, + ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB); +} + +/* + * 3DES decryption functions. + */ +static inline int hifn_decrypt_3des_ecb(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, + ACRYPTO_TYPE_3DES, ACRYPTO_MODE_ECB); +} +static inline int hifn_decrypt_3des_cbc(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, + ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CBC); +} +static inline int hifn_decrypt_3des_cfb(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, + ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CFB); +} +static inline int hifn_decrypt_3des_ofb(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, + ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB); +} + +struct hifn_alg_template +{ + char name[CRYPTO_MAX_ALG_NAME]; + char drv_name[CRYPTO_MAX_ALG_NAME]; + unsigned int bsize; + struct ablkcipher_alg ablkcipher; +}; + +static struct hifn_alg_template hifn_alg_templates[] = { + /* + * 3DES ECB, CBC, CFB and OFB modes. + */ + { + .name = "cfb(des3_ede)", .drv_name = "cfb-3des", .bsize = 8, + .ablkcipher = { + .min_keysize = HIFN_3DES_KEY_LENGTH, + .max_keysize = HIFN_3DES_KEY_LENGTH, + .setkey = hifn_setkey, + .encrypt = hifn_encrypt_3des_cfb, + .decrypt = hifn_decrypt_3des_cfb, + }, + }, + { + .name = "ofb(des3_ede)", .drv_name = "ofb-3des", .bsize = 8, + .ablkcipher = { + .min_keysize = HIFN_3DES_KEY_LENGTH, + .max_keysize = HIFN_3DES_KEY_LENGTH, + .setkey = hifn_setkey, + .encrypt = hifn_encrypt_3des_ofb, + .decrypt = hifn_decrypt_3des_ofb, + }, + }, + { + .name = "cbc(des3_ede)", .drv_name = "cbc-3des", .bsize = 8, + .ablkcipher = { + .ivsize = HIFN_IV_LENGTH, + .min_keysize = HIFN_3DES_KEY_LENGTH, + .max_keysize = HIFN_3DES_KEY_LENGTH, + .setkey = hifn_setkey, + .encrypt = hifn_encrypt_3des_cbc, + .decrypt = hifn_decrypt_3des_cbc, + }, + }, + { + .name = "ecb(des3_ede)", .drv_name = "ecb-3des", .bsize = 8, + .ablkcipher = { + .min_keysize = HIFN_3DES_KEY_LENGTH, + .max_keysize = HIFN_3DES_KEY_LENGTH, + .setkey = hifn_setkey, + .encrypt = hifn_encrypt_3des_ecb, + .decrypt = hifn_decrypt_3des_ecb, + }, + }, + + /* + * DES ECB, CBC, CFB and OFB modes. + */ + { + .name = "cfb(des)", .drv_name = "cfb-des", .bsize = 8, + .ablkcipher = { + .min_keysize = HIFN_DES_KEY_LENGTH, + .max_keysize = HIFN_DES_KEY_LENGTH, + .setkey = hifn_setkey, + .encrypt = hifn_encrypt_des_cfb, + .decrypt = hifn_decrypt_des_cfb, + }, + }, + { + .name = "ofb(des)", .drv_name = "ofb-des", .bsize = 8, + .ablkcipher = { + .min_keysize = HIFN_DES_KEY_LENGTH, + .max_keysize = HIFN_DES_KEY_LENGTH, + .setkey = hifn_setkey, + .encrypt = hifn_encrypt_des_ofb, + .decrypt = hifn_decrypt_des_ofb, + }, + }, + { + .name = "cbc(des)", .drv_name = "cbc-des", .bsize = 8, + .ablkcipher = { + .ivsize = HIFN_IV_LENGTH, + .min_keysize = HIFN_DES_KEY_LENGTH, + .max_keysize = HIFN_DES_KEY_LENGTH, + .setkey = hifn_setkey, + .encrypt = hifn_encrypt_des_cbc, + .decrypt = hifn_decrypt_des_cbc, + }, + }, + { + .name = "ecb(des)", .drv_name = "ecb-des", .bsize = 8, + .ablkcipher = { + .min_keysize = HIFN_DES_KEY_LENGTH, + .max_keysize = HIFN_DES_KEY_LENGTH, + .setkey = hifn_setkey, + .encrypt = hifn_encrypt_des_ecb, + .decrypt = hifn_decrypt_des_ecb, + }, + }, + + /* + * AES ECB, CBC, CFB and OFB modes. + */ + { + .name = "ecb(aes)", .drv_name = "ecb-aes", .bsize = 16, + .ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = hifn_setkey, + .encrypt = hifn_encrypt_aes_ecb, + .decrypt = hifn_decrypt_aes_ecb, + }, + }, + { + .name = "cbc(aes)", .drv_name = "cbc-aes", .bsize = 16, + .ablkcipher = { + .ivsize = HIFN_AES_IV_LENGTH, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = hifn_setkey, + .encrypt = hifn_encrypt_aes_cbc, + .decrypt = hifn_decrypt_aes_cbc, + }, + }, + { + .name = "cfb(aes)", .drv_name = "cfb-aes", .bsize = 16, + .ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = hifn_setkey, + .encrypt = hifn_encrypt_aes_cfb, + .decrypt = hifn_decrypt_aes_cfb, + }, + }, + { + .name = "ofb(aes)", .drv_name = "ofb-aes", .bsize = 16, + .ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = hifn_setkey, + .encrypt = hifn_encrypt_aes_ofb, + .decrypt = hifn_decrypt_aes_ofb, + }, + }, +}; + +static int hifn_cra_init(struct crypto_tfm *tfm) +{ + struct crypto_alg *alg = tfm->__crt_alg; + struct hifn_crypto_alg *ha = crypto_alg_to_hifn(alg); + struct hifn_context *ctx = crypto_tfm_ctx(tfm); + + ctx->dev = ha->dev; + tfm->crt_ablkcipher.reqsize = sizeof(struct hifn_request_context); + return 0; +} + +static int hifn_alg_alloc(struct hifn_device *dev, struct hifn_alg_template *t) +{ + struct hifn_crypto_alg *alg; + int err; + + alg = kzalloc(sizeof(struct hifn_crypto_alg), GFP_KERNEL); + if (!alg) + return -ENOMEM; + + snprintf(alg->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s", t->name); + snprintf(alg->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-%s", + t->drv_name, dev->name); + + alg->alg.cra_priority = 300; + alg->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC; + alg->alg.cra_blocksize = t->bsize; + alg->alg.cra_ctxsize = sizeof(struct hifn_context); + alg->alg.cra_alignmask = 0; + alg->alg.cra_type = &crypto_ablkcipher_type; + alg->alg.cra_module = THIS_MODULE; + alg->alg.cra_u.ablkcipher = t->ablkcipher; + alg->alg.cra_init = hifn_cra_init; + + alg->dev = dev; + + list_add_tail(&alg->entry, &dev->alg_list); + + err = crypto_register_alg(&alg->alg); + if (err) { + list_del(&alg->entry); + kfree(alg); + } + + return err; +} + +static void hifn_unregister_alg(struct hifn_device *dev) +{ + struct hifn_crypto_alg *a, *n; + + list_for_each_entry_safe(a, n, &dev->alg_list, entry) { + list_del(&a->entry); + crypto_unregister_alg(&a->alg); + kfree(a); + } +} + +static int hifn_register_alg(struct hifn_device *dev) +{ + int i, err; + + for (i=0; i<ARRAY_SIZE(hifn_alg_templates); ++i) { + err = hifn_alg_alloc(dev, &hifn_alg_templates[i]); + if (err) + goto err_out_exit; + } + + return 0; + +err_out_exit: + hifn_unregister_alg(dev); + return err; +} + +static void hifn_tasklet_callback(unsigned long data) +{ + struct hifn_device *dev = (struct hifn_device *)data; + + /* + * This is ok to call this without lock being held, + * althogh it modifies some parameters used in parallel, + * (like dev->success), but they are used in process + * context or update is atomic (like setting dev->sa[i] to NULL). + */ + hifn_clear_rings(dev, 0); + + if (dev->started < HIFN_QUEUE_LENGTH && dev->queue.qlen) + hifn_process_queue(dev); +} + +static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + int err, i; + struct hifn_device *dev; + char name[8]; + + err = pci_enable_device(pdev); + if (err) + return err; + pci_set_master(pdev); + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) + goto err_out_disable_pci_device; + + snprintf(name, sizeof(name), "hifn%d", + atomic_inc_return(&hifn_dev_number)-1); + + err = pci_request_regions(pdev, name); + if (err) + goto err_out_disable_pci_device; + + if (pci_resource_len(pdev, 0) < HIFN_BAR0_SIZE || + pci_resource_len(pdev, 1) < HIFN_BAR1_SIZE || + pci_resource_len(pdev, 2) < HIFN_BAR2_SIZE) { + dprintk("%s: Broken hardware - I/O regions are too small.\n", + pci_name(pdev)); + err = -ENODEV; + goto err_out_free_regions; + } + + dev = kzalloc(sizeof(struct hifn_device) + sizeof(struct crypto_alg), + GFP_KERNEL); + if (!dev) { + err = -ENOMEM; + goto err_out_free_regions; + } + + INIT_LIST_HEAD(&dev->alg_list); + + snprintf(dev->name, sizeof(dev->name), "%s", name); + spin_lock_init(&dev->lock); + + for (i=0; i<3; ++i) { + unsigned long addr, size; + + addr = pci_resource_start(pdev, i); + size = pci_resource_len(pdev, i); + + dev->bar[i] = ioremap_nocache(addr, size); + if (!dev->bar[i]) { + err = -ENOMEM; + goto err_out_unmap_bars; + } + } + + dev->desc_virt = pci_zalloc_consistent(pdev, sizeof(struct hifn_dma), + &dev->desc_dma); + if (!dev->desc_virt) { + dprintk("Failed to allocate descriptor rings.\n"); + err = -ENOMEM; + goto err_out_unmap_bars; + } + + dev->pdev = pdev; + dev->irq = pdev->irq; + + for (i=0; i<HIFN_D_RES_RSIZE; ++i) + dev->sa[i] = NULL; + + pci_set_drvdata(pdev, dev); + + tasklet_init(&dev->tasklet, hifn_tasklet_callback, (unsigned long)dev); + + crypto_init_queue(&dev->queue, 1); + + err = request_irq(dev->irq, hifn_interrupt, IRQF_SHARED, dev->name, dev); + if (err) { + dprintk("Failed to request IRQ%d: err: %d.\n", dev->irq, err); + dev->irq = 0; + goto err_out_free_desc; + } + + err = hifn_start_device(dev); + if (err) + goto err_out_free_irq; + + err = hifn_test(dev, 1, 0); + if (err) + goto err_out_stop_device; + + err = hifn_register_rng(dev); + if (err) + goto err_out_stop_device; + + err = hifn_register_alg(dev); + if (err) + goto err_out_unregister_rng; + + INIT_DELAYED_WORK(&dev->work, hifn_work); + schedule_delayed_work(&dev->work, HZ); + + dprintk("HIFN crypto accelerator card at %s has been " + "successfully registered as %s.\n", + pci_name(pdev), dev->name); + + return 0; + +err_out_unregister_rng: + hifn_unregister_rng(dev); +err_out_stop_device: + hifn_reset_dma(dev, 1); + hifn_stop_device(dev); +err_out_free_irq: + free_irq(dev->irq, dev); + tasklet_kill(&dev->tasklet); +err_out_free_desc: + pci_free_consistent(pdev, sizeof(struct hifn_dma), + dev->desc_virt, dev->desc_dma); + +err_out_unmap_bars: + for (i=0; i<3; ++i) + if (dev->bar[i]) + iounmap(dev->bar[i]); + +err_out_free_regions: + pci_release_regions(pdev); + +err_out_disable_pci_device: + pci_disable_device(pdev); + + return err; +} + +static void hifn_remove(struct pci_dev *pdev) +{ + int i; + struct hifn_device *dev; + + dev = pci_get_drvdata(pdev); + + if (dev) { + cancel_delayed_work_sync(&dev->work); + + hifn_unregister_rng(dev); + hifn_unregister_alg(dev); + hifn_reset_dma(dev, 1); + hifn_stop_device(dev); + + free_irq(dev->irq, dev); + tasklet_kill(&dev->tasklet); + + hifn_flush(dev); + + pci_free_consistent(pdev, sizeof(struct hifn_dma), + dev->desc_virt, dev->desc_dma); + for (i=0; i<3; ++i) + if (dev->bar[i]) + iounmap(dev->bar[i]); + + kfree(dev); + } + + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static struct pci_device_id hifn_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_HIFN, PCI_DEVICE_ID_HIFN_7955) }, + { PCI_DEVICE(PCI_VENDOR_ID_HIFN, PCI_DEVICE_ID_HIFN_7956) }, + { 0 } +}; +MODULE_DEVICE_TABLE(pci, hifn_pci_tbl); + +static struct pci_driver hifn_pci_driver = { + .name = "hifn795x", + .id_table = hifn_pci_tbl, + .probe = hifn_probe, + .remove = hifn_remove, +}; + +static int __init hifn_init(void) +{ + unsigned int freq; + int err; + + /* HIFN supports only 32-bit addresses */ + BUILD_BUG_ON(sizeof(dma_addr_t) != 4); + + if (strncmp(hifn_pll_ref, "ext", 3) && + strncmp(hifn_pll_ref, "pci", 3)) { + printk(KERN_ERR "hifn795x: invalid hifn_pll_ref clock, " + "must be pci or ext"); + return -EINVAL; + } + + /* + * For the 7955/7956 the reference clock frequency must be in the + * range of 20MHz-100MHz. For the 7954 the upper bound is 66.67MHz, + * but this chip is currently not supported. + */ + if (hifn_pll_ref[3] != '\0') { + freq = simple_strtoul(hifn_pll_ref + 3, NULL, 10); + if (freq < 20 || freq > 100) { + printk(KERN_ERR "hifn795x: invalid hifn_pll_ref " + "frequency, must be in the range " + "of 20-100"); + return -EINVAL; + } + } + + err = pci_register_driver(&hifn_pci_driver); + if (err < 0) { + dprintk("Failed to register PCI driver for %s device.\n", + hifn_pci_driver.name); + return -ENODEV; + } + + printk(KERN_INFO "Driver for HIFN 795x crypto accelerator chip " + "has been successfully registered.\n"); + + return 0; +} + +static void __exit hifn_fini(void) +{ + pci_unregister_driver(&hifn_pci_driver); + + printk(KERN_INFO "Driver for HIFN 795x crypto accelerator chip " + "has been successfully unregistered.\n"); +} + +module_init(hifn_init); +module_exit(hifn_fini); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); +MODULE_DESCRIPTION("Driver for HIFN 795x crypto accelerator chip."); diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c new file mode 100644 index 000000000..ad47d0d61 --- /dev/null +++ b/drivers/crypto/img-hash.c @@ -0,0 +1,1029 @@ +/* + * Copyright (c) 2014 Imagination Technologies + * Authors: Will Thomas, James Hartley + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Interface structure taken from omap-sham driver + */ + +#include <linux/clk.h> +#include <linux/dmaengine.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/scatterlist.h> + +#include <crypto/internal/hash.h> +#include <crypto/md5.h> +#include <crypto/sha.h> + +#define CR_RESET 0 +#define CR_RESET_SET 1 +#define CR_RESET_UNSET 0 + +#define CR_MESSAGE_LENGTH_H 0x4 +#define CR_MESSAGE_LENGTH_L 0x8 + +#define CR_CONTROL 0xc +#define CR_CONTROL_BYTE_ORDER_3210 0 +#define CR_CONTROL_BYTE_ORDER_0123 1 +#define CR_CONTROL_BYTE_ORDER_2310 2 +#define CR_CONTROL_BYTE_ORDER_1032 3 +#define CR_CONTROL_BYTE_ORDER_SHIFT 8 +#define CR_CONTROL_ALGO_MD5 0 +#define CR_CONTROL_ALGO_SHA1 1 +#define CR_CONTROL_ALGO_SHA224 2 +#define CR_CONTROL_ALGO_SHA256 3 + +#define CR_INTSTAT 0x10 +#define CR_INTENAB 0x14 +#define CR_INTCLEAR 0x18 +#define CR_INT_RESULTS_AVAILABLE BIT(0) +#define CR_INT_NEW_RESULTS_SET BIT(1) +#define CR_INT_RESULT_READ_ERR BIT(2) +#define CR_INT_MESSAGE_WRITE_ERROR BIT(3) +#define CR_INT_STATUS BIT(8) + +#define CR_RESULT_QUEUE 0x1c +#define CR_RSD0 0x40 +#define CR_CORE_REV 0x50 +#define CR_CORE_DES1 0x60 +#define CR_CORE_DES2 0x70 + +#define DRIVER_FLAGS_BUSY BIT(0) +#define DRIVER_FLAGS_FINAL BIT(1) +#define DRIVER_FLAGS_DMA_ACTIVE BIT(2) +#define DRIVER_FLAGS_OUTPUT_READY BIT(3) +#define DRIVER_FLAGS_INIT BIT(4) +#define DRIVER_FLAGS_CPU BIT(5) +#define DRIVER_FLAGS_DMA_READY BIT(6) +#define DRIVER_FLAGS_ERROR BIT(7) +#define DRIVER_FLAGS_SG BIT(8) +#define DRIVER_FLAGS_SHA1 BIT(18) +#define DRIVER_FLAGS_SHA224 BIT(19) +#define DRIVER_FLAGS_SHA256 BIT(20) +#define DRIVER_FLAGS_MD5 BIT(21) + +#define IMG_HASH_QUEUE_LENGTH 20 +#define IMG_HASH_DMA_THRESHOLD 64 + +#ifdef __LITTLE_ENDIAN +#define IMG_HASH_BYTE_ORDER CR_CONTROL_BYTE_ORDER_3210 +#else +#define IMG_HASH_BYTE_ORDER CR_CONTROL_BYTE_ORDER_0123 +#endif + +struct img_hash_dev; + +struct img_hash_request_ctx { + struct img_hash_dev *hdev; + u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32)); + unsigned long flags; + size_t digsize; + + dma_addr_t dma_addr; + size_t dma_ct; + + /* sg root */ + struct scatterlist *sgfirst; + /* walk state */ + struct scatterlist *sg; + size_t nents; + size_t offset; + unsigned int total; + size_t sent; + + unsigned long op; + + size_t bufcnt; + u8 buffer[0] __aligned(sizeof(u32)); + struct ahash_request fallback_req; +}; + +struct img_hash_ctx { + struct img_hash_dev *hdev; + unsigned long flags; + struct crypto_ahash *fallback; +}; + +struct img_hash_dev { + struct list_head list; + struct device *dev; + struct clk *hash_clk; + struct clk *sys_clk; + void __iomem *io_base; + + phys_addr_t bus_addr; + void __iomem *cpu_addr; + + spinlock_t lock; + int err; + struct tasklet_struct done_task; + struct tasklet_struct dma_task; + + unsigned long flags; + struct crypto_queue queue; + struct ahash_request *req; + + struct dma_chan *dma_lch; +}; + +struct img_hash_drv { + struct list_head dev_list; + spinlock_t lock; +}; + +static struct img_hash_drv img_hash = { + .dev_list = LIST_HEAD_INIT(img_hash.dev_list), + .lock = __SPIN_LOCK_UNLOCKED(img_hash.lock), +}; + +static inline u32 img_hash_read(struct img_hash_dev *hdev, u32 offset) +{ + return readl_relaxed(hdev->io_base + offset); +} + +static inline void img_hash_write(struct img_hash_dev *hdev, + u32 offset, u32 value) +{ + writel_relaxed(value, hdev->io_base + offset); +} + +static inline u32 img_hash_read_result_queue(struct img_hash_dev *hdev) +{ + return be32_to_cpu(img_hash_read(hdev, CR_RESULT_QUEUE)); +} + +static void img_hash_start(struct img_hash_dev *hdev, bool dma) +{ + struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); + u32 cr = IMG_HASH_BYTE_ORDER << CR_CONTROL_BYTE_ORDER_SHIFT; + + if (ctx->flags & DRIVER_FLAGS_MD5) + cr |= CR_CONTROL_ALGO_MD5; + else if (ctx->flags & DRIVER_FLAGS_SHA1) + cr |= CR_CONTROL_ALGO_SHA1; + else if (ctx->flags & DRIVER_FLAGS_SHA224) + cr |= CR_CONTROL_ALGO_SHA224; + else if (ctx->flags & DRIVER_FLAGS_SHA256) + cr |= CR_CONTROL_ALGO_SHA256; + dev_dbg(hdev->dev, "Starting hash process\n"); + img_hash_write(hdev, CR_CONTROL, cr); + + /* + * The hardware block requires two cycles between writing the control + * register and writing the first word of data in non DMA mode, to + * ensure the first data write is not grouped in burst with the control + * register write a read is issued to 'flush' the bus. + */ + if (!dma) + img_hash_read(hdev, CR_CONTROL); +} + +static int img_hash_xmit_cpu(struct img_hash_dev *hdev, const u8 *buf, + size_t length, int final) +{ + u32 count, len32; + const u32 *buffer = (const u32 *)buf; + + dev_dbg(hdev->dev, "xmit_cpu: length: %zu bytes\n", length); + + if (final) + hdev->flags |= DRIVER_FLAGS_FINAL; + + len32 = DIV_ROUND_UP(length, sizeof(u32)); + + for (count = 0; count < len32; count++) + writel_relaxed(buffer[count], hdev->cpu_addr); + + return -EINPROGRESS; +} + +static void img_hash_dma_callback(void *data) +{ + struct img_hash_dev *hdev = (struct img_hash_dev *)data; + struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); + + if (ctx->bufcnt) { + img_hash_xmit_cpu(hdev, ctx->buffer, ctx->bufcnt, 0); + ctx->bufcnt = 0; + } + if (ctx->sg) + tasklet_schedule(&hdev->dma_task); +} + +static int img_hash_xmit_dma(struct img_hash_dev *hdev, struct scatterlist *sg) +{ + struct dma_async_tx_descriptor *desc; + struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); + + ctx->dma_ct = dma_map_sg(hdev->dev, sg, 1, DMA_MEM_TO_DEV); + if (ctx->dma_ct == 0) { + dev_err(hdev->dev, "Invalid DMA sg\n"); + hdev->err = -EINVAL; + return -EINVAL; + } + + desc = dmaengine_prep_slave_sg(hdev->dma_lch, + sg, + ctx->dma_ct, + DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) { + dev_err(hdev->dev, "Null DMA descriptor\n"); + hdev->err = -EINVAL; + dma_unmap_sg(hdev->dev, sg, 1, DMA_MEM_TO_DEV); + return -EINVAL; + } + desc->callback = img_hash_dma_callback; + desc->callback_param = hdev; + dmaengine_submit(desc); + dma_async_issue_pending(hdev->dma_lch); + + return 0; +} + +static int img_hash_write_via_cpu(struct img_hash_dev *hdev) +{ + struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); + + ctx->bufcnt = sg_copy_to_buffer(hdev->req->src, sg_nents(ctx->sg), + ctx->buffer, hdev->req->nbytes); + + ctx->total = hdev->req->nbytes; + ctx->bufcnt = 0; + + hdev->flags |= (DRIVER_FLAGS_CPU | DRIVER_FLAGS_FINAL); + + img_hash_start(hdev, false); + + return img_hash_xmit_cpu(hdev, ctx->buffer, ctx->total, 1); +} + +static int img_hash_finish(struct ahash_request *req) +{ + struct img_hash_request_ctx *ctx = ahash_request_ctx(req); + + if (!req->result) + return -EINVAL; + + memcpy(req->result, ctx->digest, ctx->digsize); + + return 0; +} + +static void img_hash_copy_hash(struct ahash_request *req) +{ + struct img_hash_request_ctx *ctx = ahash_request_ctx(req); + u32 *hash = (u32 *)ctx->digest; + int i; + + for (i = (ctx->digsize / sizeof(u32)) - 1; i >= 0; i--) + hash[i] = img_hash_read_result_queue(ctx->hdev); +} + +static void img_hash_finish_req(struct ahash_request *req, int err) +{ + struct img_hash_request_ctx *ctx = ahash_request_ctx(req); + struct img_hash_dev *hdev = ctx->hdev; + + if (!err) { + img_hash_copy_hash(req); + if (DRIVER_FLAGS_FINAL & hdev->flags) + err = img_hash_finish(req); + } else { + dev_warn(hdev->dev, "Hash failed with error %d\n", err); + ctx->flags |= DRIVER_FLAGS_ERROR; + } + + hdev->flags &= ~(DRIVER_FLAGS_DMA_READY | DRIVER_FLAGS_OUTPUT_READY | + DRIVER_FLAGS_CPU | DRIVER_FLAGS_BUSY | DRIVER_FLAGS_FINAL); + + if (req->base.complete) + req->base.complete(&req->base, err); +} + +static int img_hash_write_via_dma(struct img_hash_dev *hdev) +{ + struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); + + img_hash_start(hdev, true); + + dev_dbg(hdev->dev, "xmit dma size: %d\n", ctx->total); + + if (!ctx->total) + hdev->flags |= DRIVER_FLAGS_FINAL; + + hdev->flags |= DRIVER_FLAGS_DMA_ACTIVE | DRIVER_FLAGS_FINAL; + + tasklet_schedule(&hdev->dma_task); + + return -EINPROGRESS; +} + +static int img_hash_dma_init(struct img_hash_dev *hdev) +{ + struct dma_slave_config dma_conf; + int err = -EINVAL; + + hdev->dma_lch = dma_request_slave_channel(hdev->dev, "tx"); + if (!hdev->dma_lch) { + dev_err(hdev->dev, "Couldn't aquire a slave DMA channel.\n"); + return -EBUSY; + } + dma_conf.direction = DMA_MEM_TO_DEV; + dma_conf.dst_addr = hdev->bus_addr; + dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + dma_conf.dst_maxburst = 16; + dma_conf.device_fc = false; + + err = dmaengine_slave_config(hdev->dma_lch, &dma_conf); + if (err) { + dev_err(hdev->dev, "Couldn't configure DMA slave.\n"); + dma_release_channel(hdev->dma_lch); + return err; + } + + return 0; +} + +static void img_hash_dma_task(unsigned long d) +{ + struct img_hash_dev *hdev = (struct img_hash_dev *)d; + struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); + u8 *addr; + size_t nbytes, bleft, wsend, len, tbc; + struct scatterlist tsg; + + if (!ctx->sg) + return; + + addr = sg_virt(ctx->sg); + nbytes = ctx->sg->length - ctx->offset; + + /* + * The hash accelerator does not support a data valid mask. This means + * that if each dma (i.e. per page) is not a multiple of 4 bytes, the + * padding bytes in the last word written by that dma would erroneously + * be included in the hash. To avoid this we round down the transfer, + * and add the excess to the start of the next dma. It does not matter + * that the final dma may not be a multiple of 4 bytes as the hashing + * block is programmed to accept the correct number of bytes. + */ + + bleft = nbytes % 4; + wsend = (nbytes / 4); + + if (wsend) { + sg_init_one(&tsg, addr + ctx->offset, wsend * 4); + if (img_hash_xmit_dma(hdev, &tsg)) { + dev_err(hdev->dev, "DMA failed, falling back to CPU"); + ctx->flags |= DRIVER_FLAGS_CPU; + hdev->err = 0; + img_hash_xmit_cpu(hdev, addr + ctx->offset, + wsend * 4, 0); + ctx->sent += wsend * 4; + wsend = 0; + } else { + ctx->sent += wsend * 4; + } + } + + if (bleft) { + ctx->bufcnt = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents, + ctx->buffer, bleft, ctx->sent); + tbc = 0; + ctx->sg = sg_next(ctx->sg); + while (ctx->sg && (ctx->bufcnt < 4)) { + len = ctx->sg->length; + if (likely(len > (4 - ctx->bufcnt))) + len = 4 - ctx->bufcnt; + tbc = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents, + ctx->buffer + ctx->bufcnt, len, + ctx->sent + ctx->bufcnt); + ctx->bufcnt += tbc; + if (tbc >= ctx->sg->length) { + ctx->sg = sg_next(ctx->sg); + tbc = 0; + } + } + + ctx->sent += ctx->bufcnt; + ctx->offset = tbc; + + if (!wsend) + img_hash_dma_callback(hdev); + } else { + ctx->offset = 0; + ctx->sg = sg_next(ctx->sg); + } +} + +static int img_hash_write_via_dma_stop(struct img_hash_dev *hdev) +{ + struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); + + if (ctx->flags & DRIVER_FLAGS_SG) + dma_unmap_sg(hdev->dev, ctx->sg, ctx->dma_ct, DMA_TO_DEVICE); + + return 0; +} + +static int img_hash_process_data(struct img_hash_dev *hdev) +{ + struct ahash_request *req = hdev->req; + struct img_hash_request_ctx *ctx = ahash_request_ctx(req); + int err = 0; + + ctx->bufcnt = 0; + + if (req->nbytes >= IMG_HASH_DMA_THRESHOLD) { + dev_dbg(hdev->dev, "process data request(%d bytes) using DMA\n", + req->nbytes); + err = img_hash_write_via_dma(hdev); + } else { + dev_dbg(hdev->dev, "process data request(%d bytes) using CPU\n", + req->nbytes); + err = img_hash_write_via_cpu(hdev); + } + return err; +} + +static int img_hash_hw_init(struct img_hash_dev *hdev) +{ + unsigned long long nbits; + u32 u, l; + + img_hash_write(hdev, CR_RESET, CR_RESET_SET); + img_hash_write(hdev, CR_RESET, CR_RESET_UNSET); + img_hash_write(hdev, CR_INTENAB, CR_INT_NEW_RESULTS_SET); + + nbits = (u64)hdev->req->nbytes << 3; + u = nbits >> 32; + l = nbits; + img_hash_write(hdev, CR_MESSAGE_LENGTH_H, u); + img_hash_write(hdev, CR_MESSAGE_LENGTH_L, l); + + if (!(DRIVER_FLAGS_INIT & hdev->flags)) { + hdev->flags |= DRIVER_FLAGS_INIT; + hdev->err = 0; + } + dev_dbg(hdev->dev, "hw initialized, nbits: %llx\n", nbits); + return 0; +} + +static int img_hash_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct img_hash_request_ctx *rctx = ahash_request_ctx(req); + struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback); + rctx->fallback_req.base.flags = req->base.flags + & CRYPTO_TFM_REQ_MAY_SLEEP; + + return crypto_ahash_init(&rctx->fallback_req); +} + +static int img_hash_handle_queue(struct img_hash_dev *hdev, + struct ahash_request *req) +{ + struct crypto_async_request *async_req, *backlog; + struct img_hash_request_ctx *ctx; + unsigned long flags; + int err = 0, res = 0; + + spin_lock_irqsave(&hdev->lock, flags); + + if (req) + res = ahash_enqueue_request(&hdev->queue, req); + + if (DRIVER_FLAGS_BUSY & hdev->flags) { + spin_unlock_irqrestore(&hdev->lock, flags); + return res; + } + + backlog = crypto_get_backlog(&hdev->queue); + async_req = crypto_dequeue_request(&hdev->queue); + if (async_req) + hdev->flags |= DRIVER_FLAGS_BUSY; + + spin_unlock_irqrestore(&hdev->lock, flags); + + if (!async_req) + return res; + + if (backlog) + backlog->complete(backlog, -EINPROGRESS); + + req = ahash_request_cast(async_req); + hdev->req = req; + + ctx = ahash_request_ctx(req); + + dev_info(hdev->dev, "processing req, op: %lu, bytes: %d\n", + ctx->op, req->nbytes); + + err = img_hash_hw_init(hdev); + + if (!err) + err = img_hash_process_data(hdev); + + if (err != -EINPROGRESS) { + /* done_task will not finish so do it here */ + img_hash_finish_req(req, err); + } + return res; +} + +static int img_hash_update(struct ahash_request *req) +{ + struct img_hash_request_ctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback); + rctx->fallback_req.base.flags = req->base.flags + & CRYPTO_TFM_REQ_MAY_SLEEP; + rctx->fallback_req.nbytes = req->nbytes; + rctx->fallback_req.src = req->src; + + return crypto_ahash_update(&rctx->fallback_req); +} + +static int img_hash_final(struct ahash_request *req) +{ + struct img_hash_request_ctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback); + rctx->fallback_req.base.flags = req->base.flags + & CRYPTO_TFM_REQ_MAY_SLEEP; + rctx->fallback_req.result = req->result; + + return crypto_ahash_final(&rctx->fallback_req); +} + +static int img_hash_finup(struct ahash_request *req) +{ + struct img_hash_request_ctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback); + rctx->fallback_req.base.flags = req->base.flags + & CRYPTO_TFM_REQ_MAY_SLEEP; + rctx->fallback_req.nbytes = req->nbytes; + rctx->fallback_req.src = req->src; + rctx->fallback_req.result = req->result; + + return crypto_ahash_finup(&rctx->fallback_req); +} + +static int img_hash_digest(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct img_hash_ctx *tctx = crypto_ahash_ctx(tfm); + struct img_hash_request_ctx *ctx = ahash_request_ctx(req); + struct img_hash_dev *hdev = NULL; + struct img_hash_dev *tmp; + int err; + + spin_lock(&img_hash.lock); + if (!tctx->hdev) { + list_for_each_entry(tmp, &img_hash.dev_list, list) { + hdev = tmp; + break; + } + tctx->hdev = hdev; + + } else { + hdev = tctx->hdev; + } + + spin_unlock(&img_hash.lock); + ctx->hdev = hdev; + ctx->flags = 0; + ctx->digsize = crypto_ahash_digestsize(tfm); + + switch (ctx->digsize) { + case SHA1_DIGEST_SIZE: + ctx->flags |= DRIVER_FLAGS_SHA1; + break; + case SHA256_DIGEST_SIZE: + ctx->flags |= DRIVER_FLAGS_SHA256; + break; + case SHA224_DIGEST_SIZE: + ctx->flags |= DRIVER_FLAGS_SHA224; + break; + case MD5_DIGEST_SIZE: + ctx->flags |= DRIVER_FLAGS_MD5; + break; + default: + return -EINVAL; + } + + ctx->bufcnt = 0; + ctx->offset = 0; + ctx->sent = 0; + ctx->total = req->nbytes; + ctx->sg = req->src; + ctx->sgfirst = req->src; + ctx->nents = sg_nents(ctx->sg); + + err = img_hash_handle_queue(tctx->hdev, req); + + return err; +} + +static int img_hash_cra_init(struct crypto_tfm *tfm) +{ + struct img_hash_ctx *ctx = crypto_tfm_ctx(tfm); + const char *alg_name = crypto_tfm_alg_name(tfm); + int err = -ENOMEM; + + ctx->fallback = crypto_alloc_ahash(alg_name, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->fallback)) { + pr_err("img_hash: Could not load fallback driver.\n"); + err = PTR_ERR(ctx->fallback); + goto err; + } + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct img_hash_request_ctx) + + IMG_HASH_DMA_THRESHOLD); + + return 0; + +err: + return err; +} + +static void img_hash_cra_exit(struct crypto_tfm *tfm) +{ + struct img_hash_ctx *tctx = crypto_tfm_ctx(tfm); + + crypto_free_ahash(tctx->fallback); +} + +static irqreturn_t img_irq_handler(int irq, void *dev_id) +{ + struct img_hash_dev *hdev = dev_id; + u32 reg; + + reg = img_hash_read(hdev, CR_INTSTAT); + img_hash_write(hdev, CR_INTCLEAR, reg); + + if (reg & CR_INT_NEW_RESULTS_SET) { + dev_dbg(hdev->dev, "IRQ CR_INT_NEW_RESULTS_SET\n"); + if (DRIVER_FLAGS_BUSY & hdev->flags) { + hdev->flags |= DRIVER_FLAGS_OUTPUT_READY; + if (!(DRIVER_FLAGS_CPU & hdev->flags)) + hdev->flags |= DRIVER_FLAGS_DMA_READY; + tasklet_schedule(&hdev->done_task); + } else { + dev_warn(hdev->dev, + "HASH interrupt when no active requests.\n"); + } + } else if (reg & CR_INT_RESULTS_AVAILABLE) { + dev_warn(hdev->dev, + "IRQ triggered before the hash had completed\n"); + } else if (reg & CR_INT_RESULT_READ_ERR) { + dev_warn(hdev->dev, + "Attempt to read from an empty result queue\n"); + } else if (reg & CR_INT_MESSAGE_WRITE_ERROR) { + dev_warn(hdev->dev, + "Data written before the hardware was configured\n"); + } + return IRQ_HANDLED; +} + +static struct ahash_alg img_algs[] = { + { + .init = img_hash_init, + .update = img_hash_update, + .final = img_hash_final, + .finup = img_hash_finup, + .digest = img_hash_digest, + .halg = { + .digestsize = MD5_DIGEST_SIZE, + .base = { + .cra_name = "md5", + .cra_driver_name = "img-md5", + .cra_priority = 300, + .cra_flags = + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = MD5_HMAC_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct img_hash_ctx), + .cra_init = img_hash_cra_init, + .cra_exit = img_hash_cra_exit, + .cra_module = THIS_MODULE, + } + } + }, + { + .init = img_hash_init, + .update = img_hash_update, + .final = img_hash_final, + .finup = img_hash_finup, + .digest = img_hash_digest, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .base = { + .cra_name = "sha1", + .cra_driver_name = "img-sha1", + .cra_priority = 300, + .cra_flags = + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct img_hash_ctx), + .cra_init = img_hash_cra_init, + .cra_exit = img_hash_cra_exit, + .cra_module = THIS_MODULE, + } + } + }, + { + .init = img_hash_init, + .update = img_hash_update, + .final = img_hash_final, + .finup = img_hash_finup, + .digest = img_hash_digest, + .halg = { + .digestsize = SHA224_DIGEST_SIZE, + .base = { + .cra_name = "sha224", + .cra_driver_name = "img-sha224", + .cra_priority = 300, + .cra_flags = + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA224_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct img_hash_ctx), + .cra_init = img_hash_cra_init, + .cra_exit = img_hash_cra_exit, + .cra_module = THIS_MODULE, + } + } + }, + { + .init = img_hash_init, + .update = img_hash_update, + .final = img_hash_final, + .finup = img_hash_finup, + .digest = img_hash_digest, + .halg = { + .digestsize = SHA256_DIGEST_SIZE, + .base = { + .cra_name = "sha256", + .cra_driver_name = "img-sha256", + .cra_priority = 300, + .cra_flags = + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct img_hash_ctx), + .cra_init = img_hash_cra_init, + .cra_exit = img_hash_cra_exit, + .cra_module = THIS_MODULE, + } + } + } +}; + +static int img_register_algs(struct img_hash_dev *hdev) +{ + int i, err; + + for (i = 0; i < ARRAY_SIZE(img_algs); i++) { + err = crypto_register_ahash(&img_algs[i]); + if (err) + goto err_reg; + } + return 0; + +err_reg: + for (; i--; ) + crypto_unregister_ahash(&img_algs[i]); + + return err; +} + +static int img_unregister_algs(struct img_hash_dev *hdev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(img_algs); i++) + crypto_unregister_ahash(&img_algs[i]); + return 0; +} + +static void img_hash_done_task(unsigned long data) +{ + struct img_hash_dev *hdev = (struct img_hash_dev *)data; + int err = 0; + + if (hdev->err == -EINVAL) { + err = hdev->err; + goto finish; + } + + if (!(DRIVER_FLAGS_BUSY & hdev->flags)) { + img_hash_handle_queue(hdev, NULL); + return; + } + + if (DRIVER_FLAGS_CPU & hdev->flags) { + if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) { + hdev->flags &= ~DRIVER_FLAGS_OUTPUT_READY; + goto finish; + } + } else if (DRIVER_FLAGS_DMA_READY & hdev->flags) { + if (DRIVER_FLAGS_DMA_ACTIVE & hdev->flags) { + hdev->flags &= ~DRIVER_FLAGS_DMA_ACTIVE; + img_hash_write_via_dma_stop(hdev); + if (hdev->err) { + err = hdev->err; + goto finish; + } + } + if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) { + hdev->flags &= ~(DRIVER_FLAGS_DMA_READY | + DRIVER_FLAGS_OUTPUT_READY); + goto finish; + } + } + return; + +finish: + img_hash_finish_req(hdev->req, err); +} + +static const struct of_device_id img_hash_match[] = { + { .compatible = "img,hash-accelerator" }, + {} +}; +MODULE_DEVICE_TABLE(of, img_hash_match); + +static int img_hash_probe(struct platform_device *pdev) +{ + struct img_hash_dev *hdev; + struct device *dev = &pdev->dev; + struct resource *hash_res; + int irq; + int err; + + hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL); + if (hdev == NULL) + return -ENOMEM; + + spin_lock_init(&hdev->lock); + + hdev->dev = dev; + + platform_set_drvdata(pdev, hdev); + + INIT_LIST_HEAD(&hdev->list); + + tasklet_init(&hdev->done_task, img_hash_done_task, (unsigned long)hdev); + tasklet_init(&hdev->dma_task, img_hash_dma_task, (unsigned long)hdev); + + crypto_init_queue(&hdev->queue, IMG_HASH_QUEUE_LENGTH); + + /* Register bank */ + hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + hdev->io_base = devm_ioremap_resource(dev, hash_res); + if (IS_ERR(hdev->io_base)) { + err = PTR_ERR(hdev->io_base); + dev_err(dev, "can't ioremap, returned %d\n", err); + + goto res_err; + } + + /* Write port (DMA or CPU) */ + hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + hdev->cpu_addr = devm_ioremap_resource(dev, hash_res); + if (IS_ERR(hdev->cpu_addr)) { + dev_err(dev, "can't ioremap write port\n"); + err = PTR_ERR(hdev->cpu_addr); + goto res_err; + } + hdev->bus_addr = hash_res->start; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(dev, "no IRQ resource info\n"); + err = irq; + goto res_err; + } + + err = devm_request_irq(dev, irq, img_irq_handler, 0, + dev_name(dev), hdev); + if (err) { + dev_err(dev, "unable to request irq\n"); + goto res_err; + } + dev_dbg(dev, "using IRQ channel %d\n", irq); + + hdev->hash_clk = devm_clk_get(&pdev->dev, "hash"); + if (IS_ERR(hdev->hash_clk)) { + dev_err(dev, "clock initialization failed.\n"); + err = PTR_ERR(hdev->hash_clk); + goto res_err; + } + + hdev->sys_clk = devm_clk_get(&pdev->dev, "sys"); + if (IS_ERR(hdev->sys_clk)) { + dev_err(dev, "clock initialization failed.\n"); + err = PTR_ERR(hdev->sys_clk); + goto res_err; + } + + err = clk_prepare_enable(hdev->hash_clk); + if (err) + goto res_err; + + err = clk_prepare_enable(hdev->sys_clk); + if (err) + goto clk_err; + + err = img_hash_dma_init(hdev); + if (err) + goto dma_err; + + dev_dbg(dev, "using %s for DMA transfers\n", + dma_chan_name(hdev->dma_lch)); + + spin_lock(&img_hash.lock); + list_add_tail(&hdev->list, &img_hash.dev_list); + spin_unlock(&img_hash.lock); + + err = img_register_algs(hdev); + if (err) + goto err_algs; + dev_dbg(dev, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n"); + + return 0; + +err_algs: + spin_lock(&img_hash.lock); + list_del(&hdev->list); + spin_unlock(&img_hash.lock); + dma_release_channel(hdev->dma_lch); +dma_err: + clk_disable_unprepare(hdev->sys_clk); +clk_err: + clk_disable_unprepare(hdev->hash_clk); +res_err: + tasklet_kill(&hdev->done_task); + tasklet_kill(&hdev->dma_task); + + return err; +} + +static int img_hash_remove(struct platform_device *pdev) +{ + static struct img_hash_dev *hdev; + + hdev = platform_get_drvdata(pdev); + spin_lock(&img_hash.lock); + list_del(&hdev->list); + spin_unlock(&img_hash.lock); + + img_unregister_algs(hdev); + + tasklet_kill(&hdev->done_task); + tasklet_kill(&hdev->dma_task); + + dma_release_channel(hdev->dma_lch); + + clk_disable_unprepare(hdev->hash_clk); + clk_disable_unprepare(hdev->sys_clk); + + return 0; +} + +static struct platform_driver img_hash_driver = { + .probe = img_hash_probe, + .remove = img_hash_remove, + .driver = { + .name = "img-hash-accelerator", + .of_match_table = of_match_ptr(img_hash_match), + } +}; +module_platform_driver(img_hash_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Imgtec SHA1/224/256 & MD5 hw accelerator driver"); +MODULE_AUTHOR("Will Thomas."); +MODULE_AUTHOR("James Hartley <james.hartley@imgtec.com>"); diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c new file mode 100644 index 000000000..48f453555 --- /dev/null +++ b/drivers/crypto/ixp4xx_crypto.c @@ -0,0 +1,1498 @@ +/* + * Intel IXP4xx NPE-C crypto driver + * + * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + * + */ + +#include <linux/platform_device.h> +#include <linux/dma-mapping.h> +#include <linux/dmapool.h> +#include <linux/crypto.h> +#include <linux/kernel.h> +#include <linux/rtnetlink.h> +#include <linux/interrupt.h> +#include <linux/spinlock.h> +#include <linux/gfp.h> +#include <linux/module.h> + +#include <crypto/ctr.h> +#include <crypto/des.h> +#include <crypto/aes.h> +#include <crypto/sha.h> +#include <crypto/algapi.h> +#include <crypto/aead.h> +#include <crypto/authenc.h> +#include <crypto/scatterwalk.h> + +#include <mach/npe.h> +#include <mach/qmgr.h> + +#define MAX_KEYLEN 32 + +/* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */ +#define NPE_CTX_LEN 80 +#define AES_BLOCK128 16 + +#define NPE_OP_HASH_VERIFY 0x01 +#define NPE_OP_CCM_ENABLE 0x04 +#define NPE_OP_CRYPT_ENABLE 0x08 +#define NPE_OP_HASH_ENABLE 0x10 +#define NPE_OP_NOT_IN_PLACE 0x20 +#define NPE_OP_HMAC_DISABLE 0x40 +#define NPE_OP_CRYPT_ENCRYPT 0x80 + +#define NPE_OP_CCM_GEN_MIC 0xcc +#define NPE_OP_HASH_GEN_ICV 0x50 +#define NPE_OP_ENC_GEN_KEY 0xc9 + +#define MOD_ECB 0x0000 +#define MOD_CTR 0x1000 +#define MOD_CBC_ENC 0x2000 +#define MOD_CBC_DEC 0x3000 +#define MOD_CCM_ENC 0x4000 +#define MOD_CCM_DEC 0x5000 + +#define KEYLEN_128 4 +#define KEYLEN_192 6 +#define KEYLEN_256 8 + +#define CIPH_DECR 0x0000 +#define CIPH_ENCR 0x0400 + +#define MOD_DES 0x0000 +#define MOD_TDEA2 0x0100 +#define MOD_3DES 0x0200 +#define MOD_AES 0x0800 +#define MOD_AES128 (0x0800 | KEYLEN_128) +#define MOD_AES192 (0x0900 | KEYLEN_192) +#define MOD_AES256 (0x0a00 | KEYLEN_256) + +#define MAX_IVLEN 16 +#define NPE_ID 2 /* NPE C */ +#define NPE_QLEN 16 +/* Space for registering when the first + * NPE_QLEN crypt_ctl are busy */ +#define NPE_QLEN_TOTAL 64 + +#define SEND_QID 29 +#define RECV_QID 30 + +#define CTL_FLAG_UNUSED 0x0000 +#define CTL_FLAG_USED 0x1000 +#define CTL_FLAG_PERFORM_ABLK 0x0001 +#define CTL_FLAG_GEN_ICV 0x0002 +#define CTL_FLAG_GEN_REVAES 0x0004 +#define CTL_FLAG_PERFORM_AEAD 0x0008 +#define CTL_FLAG_MASK 0x000f + +#define HMAC_IPAD_VALUE 0x36 +#define HMAC_OPAD_VALUE 0x5C +#define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE + +#define MD5_DIGEST_SIZE 16 + +struct buffer_desc { + u32 phys_next; +#ifdef __ARMEB__ + u16 buf_len; + u16 pkt_len; +#else + u16 pkt_len; + u16 buf_len; +#endif + u32 phys_addr; + u32 __reserved[4]; + struct buffer_desc *next; + enum dma_data_direction dir; +}; + +struct crypt_ctl { +#ifdef __ARMEB__ + u8 mode; /* NPE_OP_* operation mode */ + u8 init_len; + u16 reserved; +#else + u16 reserved; + u8 init_len; + u8 mode; /* NPE_OP_* operation mode */ +#endif + u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */ + u32 icv_rev_aes; /* icv or rev aes */ + u32 src_buf; + u32 dst_buf; +#ifdef __ARMEB__ + u16 auth_offs; /* Authentication start offset */ + u16 auth_len; /* Authentication data length */ + u16 crypt_offs; /* Cryption start offset */ + u16 crypt_len; /* Cryption data length */ +#else + u16 auth_len; /* Authentication data length */ + u16 auth_offs; /* Authentication start offset */ + u16 crypt_len; /* Cryption data length */ + u16 crypt_offs; /* Cryption start offset */ +#endif + u32 aadAddr; /* Additional Auth Data Addr for CCM mode */ + u32 crypto_ctx; /* NPE Crypto Param structure address */ + + /* Used by Host: 4*4 bytes*/ + unsigned ctl_flags; + union { + struct ablkcipher_request *ablk_req; + struct aead_request *aead_req; + struct crypto_tfm *tfm; + } data; + struct buffer_desc *regist_buf; + u8 *regist_ptr; +}; + +struct ablk_ctx { + struct buffer_desc *src; + struct buffer_desc *dst; +}; + +struct aead_ctx { + struct buffer_desc *buffer; + struct scatterlist ivlist; + /* used when the hmac is not on one sg entry */ + u8 *hmac_virt; + int encrypt; +}; + +struct ix_hash_algo { + u32 cfgword; + unsigned char *icv; +}; + +struct ix_sa_dir { + unsigned char *npe_ctx; + dma_addr_t npe_ctx_phys; + int npe_ctx_idx; + u8 npe_mode; +}; + +struct ixp_ctx { + struct ix_sa_dir encrypt; + struct ix_sa_dir decrypt; + int authkey_len; + u8 authkey[MAX_KEYLEN]; + int enckey_len; + u8 enckey[MAX_KEYLEN]; + u8 salt[MAX_IVLEN]; + u8 nonce[CTR_RFC3686_NONCE_SIZE]; + unsigned salted; + atomic_t configuring; + struct completion completion; +}; + +struct ixp_alg { + struct crypto_alg crypto; + const struct ix_hash_algo *hash; + u32 cfg_enc; + u32 cfg_dec; + + int registered; +}; + +static const struct ix_hash_algo hash_alg_md5 = { + .cfgword = 0xAA010004, + .icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF" + "\xFE\xDC\xBA\x98\x76\x54\x32\x10", +}; +static const struct ix_hash_algo hash_alg_sha1 = { + .cfgword = 0x00000005, + .icv = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA" + "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0", +}; + +static struct npe *npe_c; +static struct dma_pool *buffer_pool = NULL; +static struct dma_pool *ctx_pool = NULL; + +static struct crypt_ctl *crypt_virt = NULL; +static dma_addr_t crypt_phys; + +static int support_aes = 1; + +#define DRIVER_NAME "ixp4xx_crypto" + +static struct platform_device *pdev; + +static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt) +{ + return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl); +} + +static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys) +{ + return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl); +} + +static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm) +{ + return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_enc; +} + +static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm) +{ + return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_dec; +} + +static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm) +{ + return container_of(tfm->__crt_alg, struct ixp_alg, crypto)->hash; +} + +static int setup_crypt_desc(void) +{ + struct device *dev = &pdev->dev; + BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64); + crypt_virt = dma_alloc_coherent(dev, + NPE_QLEN * sizeof(struct crypt_ctl), + &crypt_phys, GFP_ATOMIC); + if (!crypt_virt) + return -ENOMEM; + memset(crypt_virt, 0, NPE_QLEN * sizeof(struct crypt_ctl)); + return 0; +} + +static spinlock_t desc_lock; +static struct crypt_ctl *get_crypt_desc(void) +{ + int i; + static int idx = 0; + unsigned long flags; + + spin_lock_irqsave(&desc_lock, flags); + + if (unlikely(!crypt_virt)) + setup_crypt_desc(); + if (unlikely(!crypt_virt)) { + spin_unlock_irqrestore(&desc_lock, flags); + return NULL; + } + i = idx; + if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) { + if (++idx >= NPE_QLEN) + idx = 0; + crypt_virt[i].ctl_flags = CTL_FLAG_USED; + spin_unlock_irqrestore(&desc_lock, flags); + return crypt_virt +i; + } else { + spin_unlock_irqrestore(&desc_lock, flags); + return NULL; + } +} + +static spinlock_t emerg_lock; +static struct crypt_ctl *get_crypt_desc_emerg(void) +{ + int i; + static int idx = NPE_QLEN; + struct crypt_ctl *desc; + unsigned long flags; + + desc = get_crypt_desc(); + if (desc) + return desc; + if (unlikely(!crypt_virt)) + return NULL; + + spin_lock_irqsave(&emerg_lock, flags); + i = idx; + if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) { + if (++idx >= NPE_QLEN_TOTAL) + idx = NPE_QLEN; + crypt_virt[i].ctl_flags = CTL_FLAG_USED; + spin_unlock_irqrestore(&emerg_lock, flags); + return crypt_virt +i; + } else { + spin_unlock_irqrestore(&emerg_lock, flags); + return NULL; + } +} + +static void free_buf_chain(struct device *dev, struct buffer_desc *buf,u32 phys) +{ + while (buf) { + struct buffer_desc *buf1; + u32 phys1; + + buf1 = buf->next; + phys1 = buf->phys_next; + dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir); + dma_pool_free(buffer_pool, buf, phys); + buf = buf1; + phys = phys1; + } +} + +static struct tasklet_struct crypto_done_tasklet; + +static void finish_scattered_hmac(struct crypt_ctl *crypt) +{ + struct aead_request *req = crypt->data.aead_req; + struct aead_ctx *req_ctx = aead_request_ctx(req); + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + int authsize = crypto_aead_authsize(tfm); + int decryptlen = req->cryptlen - authsize; + + if (req_ctx->encrypt) { + scatterwalk_map_and_copy(req_ctx->hmac_virt, + req->src, decryptlen, authsize, 1); + } + dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes); +} + +static void one_packet(dma_addr_t phys) +{ + struct device *dev = &pdev->dev; + struct crypt_ctl *crypt; + struct ixp_ctx *ctx; + int failed; + + failed = phys & 0x1 ? -EBADMSG : 0; + phys &= ~0x3; + crypt = crypt_phys2virt(phys); + + switch (crypt->ctl_flags & CTL_FLAG_MASK) { + case CTL_FLAG_PERFORM_AEAD: { + struct aead_request *req = crypt->data.aead_req; + struct aead_ctx *req_ctx = aead_request_ctx(req); + + free_buf_chain(dev, req_ctx->buffer, crypt->src_buf); + if (req_ctx->hmac_virt) { + finish_scattered_hmac(crypt); + } + req->base.complete(&req->base, failed); + break; + } + case CTL_FLAG_PERFORM_ABLK: { + struct ablkcipher_request *req = crypt->data.ablk_req; + struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req); + + if (req_ctx->dst) { + free_buf_chain(dev, req_ctx->dst, crypt->dst_buf); + } + free_buf_chain(dev, req_ctx->src, crypt->src_buf); + req->base.complete(&req->base, failed); + break; + } + case CTL_FLAG_GEN_ICV: + ctx = crypto_tfm_ctx(crypt->data.tfm); + dma_pool_free(ctx_pool, crypt->regist_ptr, + crypt->regist_buf->phys_addr); + dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf); + if (atomic_dec_and_test(&ctx->configuring)) + complete(&ctx->completion); + break; + case CTL_FLAG_GEN_REVAES: + ctx = crypto_tfm_ctx(crypt->data.tfm); + *(u32*)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR); + if (atomic_dec_and_test(&ctx->configuring)) + complete(&ctx->completion); + break; + default: + BUG(); + } + crypt->ctl_flags = CTL_FLAG_UNUSED; +} + +static void irqhandler(void *_unused) +{ + tasklet_schedule(&crypto_done_tasklet); +} + +static void crypto_done_action(unsigned long arg) +{ + int i; + + for(i=0; i<4; i++) { + dma_addr_t phys = qmgr_get_entry(RECV_QID); + if (!phys) + return; + one_packet(phys); + } + tasklet_schedule(&crypto_done_tasklet); +} + +static int init_ixp_crypto(struct device *dev) +{ + int ret = -ENODEV; + u32 msg[2] = { 0, 0 }; + + if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH | + IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) { + printk(KERN_ERR "ixp_crypto: No HW crypto available\n"); + return ret; + } + npe_c = npe_request(NPE_ID); + if (!npe_c) + return ret; + + if (!npe_running(npe_c)) { + ret = npe_load_firmware(npe_c, npe_name(npe_c), dev); + if (ret) { + return ret; + } + if (npe_recv_message(npe_c, msg, "STATUS_MSG")) + goto npe_error; + } else { + if (npe_send_message(npe_c, msg, "STATUS_MSG")) + goto npe_error; + + if (npe_recv_message(npe_c, msg, "STATUS_MSG")) + goto npe_error; + } + + switch ((msg[1]>>16) & 0xff) { + case 3: + printk(KERN_WARNING "Firmware of %s lacks AES support\n", + npe_name(npe_c)); + support_aes = 0; + break; + case 4: + case 5: + support_aes = 1; + break; + default: + printk(KERN_ERR "Firmware of %s lacks crypto support\n", + npe_name(npe_c)); + return -ENODEV; + } + /* buffer_pool will also be used to sometimes store the hmac, + * so assure it is large enough + */ + BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc)); + buffer_pool = dma_pool_create("buffer", dev, + sizeof(struct buffer_desc), 32, 0); + ret = -ENOMEM; + if (!buffer_pool) { + goto err; + } + ctx_pool = dma_pool_create("context", dev, + NPE_CTX_LEN, 16, 0); + if (!ctx_pool) { + goto err; + } + ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0, + "ixp_crypto:out", NULL); + if (ret) + goto err; + ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0, + "ixp_crypto:in", NULL); + if (ret) { + qmgr_release_queue(SEND_QID); + goto err; + } + qmgr_set_irq(RECV_QID, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL); + tasklet_init(&crypto_done_tasklet, crypto_done_action, 0); + + qmgr_enable_irq(RECV_QID); + return 0; + +npe_error: + printk(KERN_ERR "%s not responding\n", npe_name(npe_c)); + ret = -EIO; +err: + if (ctx_pool) + dma_pool_destroy(ctx_pool); + if (buffer_pool) + dma_pool_destroy(buffer_pool); + npe_release(npe_c); + return ret; +} + +static void release_ixp_crypto(struct device *dev) +{ + qmgr_disable_irq(RECV_QID); + tasklet_kill(&crypto_done_tasklet); + + qmgr_release_queue(SEND_QID); + qmgr_release_queue(RECV_QID); + + dma_pool_destroy(ctx_pool); + dma_pool_destroy(buffer_pool); + + npe_release(npe_c); + + if (crypt_virt) { + dma_free_coherent(dev, + NPE_QLEN_TOTAL * sizeof( struct crypt_ctl), + crypt_virt, crypt_phys); + } + return; +} + +static void reset_sa_dir(struct ix_sa_dir *dir) +{ + memset(dir->npe_ctx, 0, NPE_CTX_LEN); + dir->npe_ctx_idx = 0; + dir->npe_mode = 0; +} + +static int init_sa_dir(struct ix_sa_dir *dir) +{ + dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys); + if (!dir->npe_ctx) { + return -ENOMEM; + } + reset_sa_dir(dir); + return 0; +} + +static void free_sa_dir(struct ix_sa_dir *dir) +{ + memset(dir->npe_ctx, 0, NPE_CTX_LEN); + dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys); +} + +static int init_tfm(struct crypto_tfm *tfm) +{ + struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); + int ret; + + atomic_set(&ctx->configuring, 0); + ret = init_sa_dir(&ctx->encrypt); + if (ret) + return ret; + ret = init_sa_dir(&ctx->decrypt); + if (ret) { + free_sa_dir(&ctx->encrypt); + } + return ret; +} + +static int init_tfm_ablk(struct crypto_tfm *tfm) +{ + tfm->crt_ablkcipher.reqsize = sizeof(struct ablk_ctx); + return init_tfm(tfm); +} + +static int init_tfm_aead(struct crypto_tfm *tfm) +{ + tfm->crt_aead.reqsize = sizeof(struct aead_ctx); + return init_tfm(tfm); +} + +static void exit_tfm(struct crypto_tfm *tfm) +{ + struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); + free_sa_dir(&ctx->encrypt); + free_sa_dir(&ctx->decrypt); +} + +static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target, + int init_len, u32 ctx_addr, const u8 *key, int key_len) +{ + struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypt_ctl *crypt; + struct buffer_desc *buf; + int i; + u8 *pad; + u32 pad_phys, buf_phys; + + BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN); + pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys); + if (!pad) + return -ENOMEM; + buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys); + if (!buf) { + dma_pool_free(ctx_pool, pad, pad_phys); + return -ENOMEM; + } + crypt = get_crypt_desc_emerg(); + if (!crypt) { + dma_pool_free(ctx_pool, pad, pad_phys); + dma_pool_free(buffer_pool, buf, buf_phys); + return -EAGAIN; + } + + memcpy(pad, key, key_len); + memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len); + for (i = 0; i < HMAC_PAD_BLOCKLEN; i++) { + pad[i] ^= xpad; + } + + crypt->data.tfm = tfm; + crypt->regist_ptr = pad; + crypt->regist_buf = buf; + + crypt->auth_offs = 0; + crypt->auth_len = HMAC_PAD_BLOCKLEN; + crypt->crypto_ctx = ctx_addr; + crypt->src_buf = buf_phys; + crypt->icv_rev_aes = target; + crypt->mode = NPE_OP_HASH_GEN_ICV; + crypt->init_len = init_len; + crypt->ctl_flags |= CTL_FLAG_GEN_ICV; + + buf->next = 0; + buf->buf_len = HMAC_PAD_BLOCKLEN; + buf->pkt_len = 0; + buf->phys_addr = pad_phys; + + atomic_inc(&ctx->configuring); + qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt)); + BUG_ON(qmgr_stat_overflow(SEND_QID)); + return 0; +} + +static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize, + const u8 *key, int key_len, unsigned digest_len) +{ + u32 itarget, otarget, npe_ctx_addr; + unsigned char *cinfo; + int init_len, ret = 0; + u32 cfgword; + struct ix_sa_dir *dir; + struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); + const struct ix_hash_algo *algo; + + dir = encrypt ? &ctx->encrypt : &ctx->decrypt; + cinfo = dir->npe_ctx + dir->npe_ctx_idx; + algo = ix_hash(tfm); + + /* write cfg word to cryptinfo */ + cfgword = algo->cfgword | ( authsize << 6); /* (authsize/4) << 8 */ +#ifndef __ARMEB__ + cfgword ^= 0xAA000000; /* change the "byte swap" flags */ +#endif + *(u32*)cinfo = cpu_to_be32(cfgword); + cinfo += sizeof(cfgword); + + /* write ICV to cryptinfo */ + memcpy(cinfo, algo->icv, digest_len); + cinfo += digest_len; + + itarget = dir->npe_ctx_phys + dir->npe_ctx_idx + + sizeof(algo->cfgword); + otarget = itarget + digest_len; + init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx); + npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx; + + dir->npe_ctx_idx += init_len; + dir->npe_mode |= NPE_OP_HASH_ENABLE; + + if (!encrypt) + dir->npe_mode |= NPE_OP_HASH_VERIFY; + + ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget, + init_len, npe_ctx_addr, key, key_len); + if (ret) + return ret; + return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget, + init_len, npe_ctx_addr, key, key_len); +} + +static int gen_rev_aes_key(struct crypto_tfm *tfm) +{ + struct crypt_ctl *crypt; + struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); + struct ix_sa_dir *dir = &ctx->decrypt; + + crypt = get_crypt_desc_emerg(); + if (!crypt) { + return -EAGAIN; + } + *(u32*)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR); + + crypt->data.tfm = tfm; + crypt->crypt_offs = 0; + crypt->crypt_len = AES_BLOCK128; + crypt->src_buf = 0; + crypt->crypto_ctx = dir->npe_ctx_phys; + crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32); + crypt->mode = NPE_OP_ENC_GEN_KEY; + crypt->init_len = dir->npe_ctx_idx; + crypt->ctl_flags |= CTL_FLAG_GEN_REVAES; + + atomic_inc(&ctx->configuring); + qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt)); + BUG_ON(qmgr_stat_overflow(SEND_QID)); + return 0; +} + +static int setup_cipher(struct crypto_tfm *tfm, int encrypt, + const u8 *key, int key_len) +{ + u8 *cinfo; + u32 cipher_cfg; + u32 keylen_cfg = 0; + struct ix_sa_dir *dir; + struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); + u32 *flags = &tfm->crt_flags; + + dir = encrypt ? &ctx->encrypt : &ctx->decrypt; + cinfo = dir->npe_ctx; + + if (encrypt) { + cipher_cfg = cipher_cfg_enc(tfm); + dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT; + } else { + cipher_cfg = cipher_cfg_dec(tfm); + } + if (cipher_cfg & MOD_AES) { + switch (key_len) { + case 16: keylen_cfg = MOD_AES128; break; + case 24: keylen_cfg = MOD_AES192; break; + case 32: keylen_cfg = MOD_AES256; break; + default: + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + return -EINVAL; + } + cipher_cfg |= keylen_cfg; + } else if (cipher_cfg & MOD_3DES) { + const u32 *K = (const u32 *)key; + if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || + !((K[2] ^ K[4]) | (K[3] ^ K[5])))) + { + *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED; + return -EINVAL; + } + } else { + u32 tmp[DES_EXPKEY_WORDS]; + if (des_ekey(tmp, key) == 0) { + *flags |= CRYPTO_TFM_RES_WEAK_KEY; + } + } + /* write cfg word to cryptinfo */ + *(u32*)cinfo = cpu_to_be32(cipher_cfg); + cinfo += sizeof(cipher_cfg); + + /* write cipher key to cryptinfo */ + memcpy(cinfo, key, key_len); + /* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */ + if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) { + memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE -key_len); + key_len = DES3_EDE_KEY_SIZE; + } + dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len; + dir->npe_mode |= NPE_OP_CRYPT_ENABLE; + if ((cipher_cfg & MOD_AES) && !encrypt) { + return gen_rev_aes_key(tfm); + } + return 0; +} + +static struct buffer_desc *chainup_buffers(struct device *dev, + struct scatterlist *sg, unsigned nbytes, + struct buffer_desc *buf, gfp_t flags, + enum dma_data_direction dir) +{ + for (; nbytes > 0; sg = sg_next(sg)) { + unsigned len = min(nbytes, sg->length); + struct buffer_desc *next_buf; + u32 next_buf_phys; + void *ptr; + + nbytes -= len; + ptr = page_address(sg_page(sg)) + sg->offset; + next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys); + if (!next_buf) { + buf = NULL; + break; + } + sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir); + buf->next = next_buf; + buf->phys_next = next_buf_phys; + buf = next_buf; + + buf->phys_addr = sg_dma_address(sg); + buf->buf_len = len; + buf->dir = dir; + } + buf->next = NULL; + buf->phys_next = 0; + return buf; +} + +static int ablk_setkey(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int key_len) +{ + struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm); + u32 *flags = &tfm->base.crt_flags; + int ret; + + init_completion(&ctx->completion); + atomic_inc(&ctx->configuring); + + reset_sa_dir(&ctx->encrypt); + reset_sa_dir(&ctx->decrypt); + + ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE; + ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE; + + ret = setup_cipher(&tfm->base, 0, key, key_len); + if (ret) + goto out; + ret = setup_cipher(&tfm->base, 1, key, key_len); + if (ret) + goto out; + + if (*flags & CRYPTO_TFM_RES_WEAK_KEY) { + if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) { + ret = -EINVAL; + } else { + *flags &= ~CRYPTO_TFM_RES_WEAK_KEY; + } + } +out: + if (!atomic_dec_and_test(&ctx->configuring)) + wait_for_completion(&ctx->completion); + return ret; +} + +static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int key_len) +{ + struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm); + + /* the nonce is stored in bytes at end of key */ + if (key_len < CTR_RFC3686_NONCE_SIZE) + return -EINVAL; + + memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE), + CTR_RFC3686_NONCE_SIZE); + + key_len -= CTR_RFC3686_NONCE_SIZE; + return ablk_setkey(tfm, key, key_len); +} + +static int ablk_perform(struct ablkcipher_request *req, int encrypt) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm); + unsigned ivsize = crypto_ablkcipher_ivsize(tfm); + struct ix_sa_dir *dir; + struct crypt_ctl *crypt; + unsigned int nbytes = req->nbytes; + enum dma_data_direction src_direction = DMA_BIDIRECTIONAL; + struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req); + struct buffer_desc src_hook; + struct device *dev = &pdev->dev; + gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? + GFP_KERNEL : GFP_ATOMIC; + + if (qmgr_stat_full(SEND_QID)) + return -EAGAIN; + if (atomic_read(&ctx->configuring)) + return -EAGAIN; + + dir = encrypt ? &ctx->encrypt : &ctx->decrypt; + + crypt = get_crypt_desc(); + if (!crypt) + return -ENOMEM; + + crypt->data.ablk_req = req; + crypt->crypto_ctx = dir->npe_ctx_phys; + crypt->mode = dir->npe_mode; + crypt->init_len = dir->npe_ctx_idx; + + crypt->crypt_offs = 0; + crypt->crypt_len = nbytes; + + BUG_ON(ivsize && !req->info); + memcpy(crypt->iv, req->info, ivsize); + if (req->src != req->dst) { + struct buffer_desc dst_hook; + crypt->mode |= NPE_OP_NOT_IN_PLACE; + /* This was never tested by Intel + * for more than one dst buffer, I think. */ + BUG_ON(req->dst->length < nbytes); + req_ctx->dst = NULL; + if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook, + flags, DMA_FROM_DEVICE)) + goto free_buf_dest; + src_direction = DMA_TO_DEVICE; + req_ctx->dst = dst_hook.next; + crypt->dst_buf = dst_hook.phys_next; + } else { + req_ctx->dst = NULL; + } + req_ctx->src = NULL; + if (!chainup_buffers(dev, req->src, nbytes, &src_hook, + flags, src_direction)) + goto free_buf_src; + + req_ctx->src = src_hook.next; + crypt->src_buf = src_hook.phys_next; + crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK; + qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt)); + BUG_ON(qmgr_stat_overflow(SEND_QID)); + return -EINPROGRESS; + +free_buf_src: + free_buf_chain(dev, req_ctx->src, crypt->src_buf); +free_buf_dest: + if (req->src != req->dst) { + free_buf_chain(dev, req_ctx->dst, crypt->dst_buf); + } + crypt->ctl_flags = CTL_FLAG_UNUSED; + return -ENOMEM; +} + +static int ablk_encrypt(struct ablkcipher_request *req) +{ + return ablk_perform(req, 1); +} + +static int ablk_decrypt(struct ablkcipher_request *req) +{ + return ablk_perform(req, 0); +} + +static int ablk_rfc3686_crypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm); + u8 iv[CTR_RFC3686_BLOCK_SIZE]; + u8 *info = req->info; + int ret; + + /* set up counter block */ + memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE); + memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE); + + /* initialize counter portion of counter block */ + *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) = + cpu_to_be32(1); + + req->info = iv; + ret = ablk_perform(req, 1); + req->info = info; + return ret; +} + +static int hmac_inconsistent(struct scatterlist *sg, unsigned start, + unsigned int nbytes) +{ + int offset = 0; + + if (!nbytes) + return 0; + + for (;;) { + if (start < offset + sg->length) + break; + + offset += sg->length; + sg = sg_next(sg); + } + return (start + nbytes > offset + sg->length); +} + +static int aead_perform(struct aead_request *req, int encrypt, + int cryptoffset, int eff_cryptlen, u8 *iv) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct ixp_ctx *ctx = crypto_aead_ctx(tfm); + unsigned ivsize = crypto_aead_ivsize(tfm); + unsigned authsize = crypto_aead_authsize(tfm); + struct ix_sa_dir *dir; + struct crypt_ctl *crypt; + unsigned int cryptlen; + struct buffer_desc *buf, src_hook; + struct aead_ctx *req_ctx = aead_request_ctx(req); + struct device *dev = &pdev->dev; + gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? + GFP_KERNEL : GFP_ATOMIC; + + if (qmgr_stat_full(SEND_QID)) + return -EAGAIN; + if (atomic_read(&ctx->configuring)) + return -EAGAIN; + + if (encrypt) { + dir = &ctx->encrypt; + cryptlen = req->cryptlen; + } else { + dir = &ctx->decrypt; + /* req->cryptlen includes the authsize when decrypting */ + cryptlen = req->cryptlen -authsize; + eff_cryptlen -= authsize; + } + crypt = get_crypt_desc(); + if (!crypt) + return -ENOMEM; + + crypt->data.aead_req = req; + crypt->crypto_ctx = dir->npe_ctx_phys; + crypt->mode = dir->npe_mode; + crypt->init_len = dir->npe_ctx_idx; + + crypt->crypt_offs = cryptoffset; + crypt->crypt_len = eff_cryptlen; + + crypt->auth_offs = 0; + crypt->auth_len = req->assoclen + ivsize + cryptlen; + BUG_ON(ivsize && !req->iv); + memcpy(crypt->iv, req->iv, ivsize); + + if (req->src != req->dst) { + BUG(); /* -ENOTSUP because of my laziness */ + } + + /* ASSOC data */ + buf = chainup_buffers(dev, req->assoc, req->assoclen, &src_hook, + flags, DMA_TO_DEVICE); + req_ctx->buffer = src_hook.next; + crypt->src_buf = src_hook.phys_next; + if (!buf) + goto out; + /* IV */ + sg_init_table(&req_ctx->ivlist, 1); + sg_set_buf(&req_ctx->ivlist, iv, ivsize); + buf = chainup_buffers(dev, &req_ctx->ivlist, ivsize, buf, flags, + DMA_BIDIRECTIONAL); + if (!buf) + goto free_chain; + if (unlikely(hmac_inconsistent(req->src, cryptlen, authsize))) { + /* The 12 hmac bytes are scattered, + * we need to copy them into a safe buffer */ + req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags, + &crypt->icv_rev_aes); + if (unlikely(!req_ctx->hmac_virt)) + goto free_chain; + if (!encrypt) { + scatterwalk_map_and_copy(req_ctx->hmac_virt, + req->src, cryptlen, authsize, 0); + } + req_ctx->encrypt = encrypt; + } else { + req_ctx->hmac_virt = NULL; + } + /* Crypt */ + buf = chainup_buffers(dev, req->src, cryptlen + authsize, buf, flags, + DMA_BIDIRECTIONAL); + if (!buf) + goto free_hmac_virt; + if (!req_ctx->hmac_virt) { + crypt->icv_rev_aes = buf->phys_addr + buf->buf_len - authsize; + } + + crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD; + qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt)); + BUG_ON(qmgr_stat_overflow(SEND_QID)); + return -EINPROGRESS; +free_hmac_virt: + if (req_ctx->hmac_virt) { + dma_pool_free(buffer_pool, req_ctx->hmac_virt, + crypt->icv_rev_aes); + } +free_chain: + free_buf_chain(dev, req_ctx->buffer, crypt->src_buf); +out: + crypt->ctl_flags = CTL_FLAG_UNUSED; + return -ENOMEM; +} + +static int aead_setup(struct crypto_aead *tfm, unsigned int authsize) +{ + struct ixp_ctx *ctx = crypto_aead_ctx(tfm); + u32 *flags = &tfm->base.crt_flags; + unsigned digest_len = crypto_aead_alg(tfm)->maxauthsize; + int ret; + + if (!ctx->enckey_len && !ctx->authkey_len) + return 0; + init_completion(&ctx->completion); + atomic_inc(&ctx->configuring); + + reset_sa_dir(&ctx->encrypt); + reset_sa_dir(&ctx->decrypt); + + ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len); + if (ret) + goto out; + ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len); + if (ret) + goto out; + ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey, + ctx->authkey_len, digest_len); + if (ret) + goto out; + ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey, + ctx->authkey_len, digest_len); + if (ret) + goto out; + + if (*flags & CRYPTO_TFM_RES_WEAK_KEY) { + if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) { + ret = -EINVAL; + goto out; + } else { + *flags &= ~CRYPTO_TFM_RES_WEAK_KEY; + } + } +out: + if (!atomic_dec_and_test(&ctx->configuring)) + wait_for_completion(&ctx->completion); + return ret; +} + +static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize) +{ + int max = crypto_aead_alg(tfm)->maxauthsize >> 2; + + if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3)) + return -EINVAL; + return aead_setup(tfm, authsize); +} + +static int aead_setkey(struct crypto_aead *tfm, const u8 *key, + unsigned int keylen) +{ + struct ixp_ctx *ctx = crypto_aead_ctx(tfm); + struct crypto_authenc_keys keys; + + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) + goto badkey; + + if (keys.authkeylen > sizeof(ctx->authkey)) + goto badkey; + + if (keys.enckeylen > sizeof(ctx->enckey)) + goto badkey; + + memcpy(ctx->authkey, keys.authkey, keys.authkeylen); + memcpy(ctx->enckey, keys.enckey, keys.enckeylen); + ctx->authkey_len = keys.authkeylen; + ctx->enckey_len = keys.enckeylen; + + return aead_setup(tfm, crypto_aead_authsize(tfm)); +badkey: + crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; +} + +static int aead_encrypt(struct aead_request *req) +{ + unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req)); + return aead_perform(req, 1, req->assoclen + ivsize, + req->cryptlen, req->iv); +} + +static int aead_decrypt(struct aead_request *req) +{ + unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req)); + return aead_perform(req, 0, req->assoclen + ivsize, + req->cryptlen, req->iv); +} + +static int aead_givencrypt(struct aead_givcrypt_request *req) +{ + struct crypto_aead *tfm = aead_givcrypt_reqtfm(req); + struct ixp_ctx *ctx = crypto_aead_ctx(tfm); + unsigned len, ivsize = crypto_aead_ivsize(tfm); + __be64 seq; + + /* copied from eseqiv.c */ + if (!ctx->salted) { + get_random_bytes(ctx->salt, ivsize); + ctx->salted = 1; + } + memcpy(req->areq.iv, ctx->salt, ivsize); + len = ivsize; + if (ivsize > sizeof(u64)) { + memset(req->giv, 0, ivsize - sizeof(u64)); + len = sizeof(u64); + } + seq = cpu_to_be64(req->seq); + memcpy(req->giv + ivsize - len, &seq, len); + return aead_perform(&req->areq, 1, req->areq.assoclen, + req->areq.cryptlen +ivsize, req->giv); +} + +static struct ixp_alg ixp4xx_algos[] = { +{ + .crypto = { + .cra_name = "cbc(des)", + .cra_blocksize = DES_BLOCK_SIZE, + .cra_u = { .ablkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .geniv = "eseqiv", + } + } + }, + .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192, + .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192, + +}, { + .crypto = { + .cra_name = "ecb(des)", + .cra_blocksize = DES_BLOCK_SIZE, + .cra_u = { .ablkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + } + } + }, + .cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192, + .cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192, +}, { + .crypto = { + .cra_name = "cbc(des3_ede)", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_u = { .ablkcipher = { + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + .geniv = "eseqiv", + } + } + }, + .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192, + .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192, +}, { + .crypto = { + .cra_name = "ecb(des3_ede)", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_u = { .ablkcipher = { + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + } + } + }, + .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192, + .cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192, +}, { + .crypto = { + .cra_name = "cbc(aes)", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_u = { .ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .geniv = "eseqiv", + } + } + }, + .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC, + .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC, +}, { + .crypto = { + .cra_name = "ecb(aes)", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_u = { .ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + } + } + }, + .cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB, + .cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB, +}, { + .crypto = { + .cra_name = "ctr(aes)", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_u = { .ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .geniv = "eseqiv", + } + } + }, + .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR, + .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR, +}, { + .crypto = { + .cra_name = "rfc3686(ctr(aes))", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_u = { .ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .geniv = "eseqiv", + .setkey = ablk_rfc3686_setkey, + .encrypt = ablk_rfc3686_crypt, + .decrypt = ablk_rfc3686_crypt } + } + }, + .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR, + .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR, +}, { + .crypto = { + .cra_name = "authenc(hmac(md5),cbc(des))", + .cra_blocksize = DES_BLOCK_SIZE, + .cra_u = { .aead = { + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + } + } + }, + .hash = &hash_alg_md5, + .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192, + .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192, +}, { + .crypto = { + .cra_name = "authenc(hmac(md5),cbc(des3_ede))", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_u = { .aead = { + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + } + } + }, + .hash = &hash_alg_md5, + .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192, + .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192, +}, { + .crypto = { + .cra_name = "authenc(hmac(sha1),cbc(des))", + .cra_blocksize = DES_BLOCK_SIZE, + .cra_u = { .aead = { + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + } + } + }, + .hash = &hash_alg_sha1, + .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192, + .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192, +}, { + .crypto = { + .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_u = { .aead = { + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + } + } + }, + .hash = &hash_alg_sha1, + .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192, + .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192, +}, { + .crypto = { + .cra_name = "authenc(hmac(md5),cbc(aes))", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_u = { .aead = { + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + } + } + }, + .hash = &hash_alg_md5, + .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC, + .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC, +}, { + .crypto = { + .cra_name = "authenc(hmac(sha1),cbc(aes))", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_u = { .aead = { + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + } + } + }, + .hash = &hash_alg_sha1, + .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC, + .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC, +} }; + +#define IXP_POSTFIX "-ixp4xx" + +static const struct platform_device_info ixp_dev_info __initdata = { + .name = DRIVER_NAME, + .id = 0, + .dma_mask = DMA_BIT_MASK(32), +}; + +static int __init ixp_module_init(void) +{ + int num = ARRAY_SIZE(ixp4xx_algos); + int i, err; + + pdev = platform_device_register_full(&ixp_dev_info); + if (IS_ERR(pdev)) + return PTR_ERR(pdev); + + spin_lock_init(&desc_lock); + spin_lock_init(&emerg_lock); + + err = init_ixp_crypto(&pdev->dev); + if (err) { + platform_device_unregister(pdev); + return err; + } + for (i=0; i< num; i++) { + struct crypto_alg *cra = &ixp4xx_algos[i].crypto; + + if (snprintf(cra->cra_driver_name, CRYPTO_MAX_ALG_NAME, + "%s"IXP_POSTFIX, cra->cra_name) >= + CRYPTO_MAX_ALG_NAME) + { + continue; + } + if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) { + continue; + } + if (!ixp4xx_algos[i].hash) { + /* block ciphers */ + cra->cra_type = &crypto_ablkcipher_type; + cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC; + if (!cra->cra_ablkcipher.setkey) + cra->cra_ablkcipher.setkey = ablk_setkey; + if (!cra->cra_ablkcipher.encrypt) + cra->cra_ablkcipher.encrypt = ablk_encrypt; + if (!cra->cra_ablkcipher.decrypt) + cra->cra_ablkcipher.decrypt = ablk_decrypt; + cra->cra_init = init_tfm_ablk; + } else { + /* authenc */ + cra->cra_type = &crypto_aead_type; + cra->cra_flags = CRYPTO_ALG_TYPE_AEAD | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC; + cra->cra_aead.setkey = aead_setkey; + cra->cra_aead.setauthsize = aead_setauthsize; + cra->cra_aead.encrypt = aead_encrypt; + cra->cra_aead.decrypt = aead_decrypt; + cra->cra_aead.givencrypt = aead_givencrypt; + cra->cra_init = init_tfm_aead; + } + cra->cra_ctxsize = sizeof(struct ixp_ctx); + cra->cra_module = THIS_MODULE; + cra->cra_alignmask = 3; + cra->cra_priority = 300; + cra->cra_exit = exit_tfm; + if (crypto_register_alg(cra)) + printk(KERN_ERR "Failed to register '%s'\n", + cra->cra_name); + else + ixp4xx_algos[i].registered = 1; + } + return 0; +} + +static void __exit ixp_module_exit(void) +{ + int num = ARRAY_SIZE(ixp4xx_algos); + int i; + + for (i=0; i< num; i++) { + if (ixp4xx_algos[i].registered) + crypto_unregister_alg(&ixp4xx_algos[i].crypto); + } + release_ixp_crypto(&pdev->dev); + platform_device_unregister(pdev); +} + +module_init(ixp_module_init); +module_exit(ixp_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>"); +MODULE_DESCRIPTION("IXP4xx hardware crypto"); + diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c new file mode 100644 index 000000000..f91f15dde --- /dev/null +++ b/drivers/crypto/mv_cesa.c @@ -0,0 +1,1193 @@ +/* + * Support for Marvell's crypto engine which can be found on some Orion5X + * boards. + * + * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > + * License: GPLv2 + * + */ +#include <crypto/aes.h> +#include <crypto/algapi.h> +#include <linux/crypto.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kthread.h> +#include <linux/platform_device.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/clk.h> +#include <crypto/internal/hash.h> +#include <crypto/sha.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/of_irq.h> + +#include "mv_cesa.h" + +#define MV_CESA "MV-CESA:" +#define MAX_HW_HASH_SIZE 0xFFFF +#define MV_CESA_EXPIRE 500 /* msec */ + +/* + * STM: + * /---------------------------------------\ + * | | request complete + * \./ | + * IDLE -> new request -> BUSY -> done -> DEQUEUE + * /°\ | + * | | more scatter entries + * \________________/ + */ +enum engine_status { + ENGINE_IDLE, + ENGINE_BUSY, + ENGINE_W_DEQUEUE, +}; + +/** + * struct req_progress - used for every crypt request + * @src_sg_it: sg iterator for src + * @dst_sg_it: sg iterator for dst + * @sg_src_left: bytes left in src to process (scatter list) + * @src_start: offset to add to src start position (scatter list) + * @crypt_len: length of current hw crypt/hash process + * @hw_nbytes: total bytes to process in hw for this request + * @copy_back: whether to copy data back (crypt) or not (hash) + * @sg_dst_left: bytes left dst to process in this scatter list + * @dst_start: offset to add to dst start position (scatter list) + * @hw_processed_bytes: number of bytes processed by hw (request). + * + * sg helper are used to iterate over the scatterlist. Since the size of the + * SRAM may be less than the scatter size, this struct struct is used to keep + * track of progress within current scatterlist. + */ +struct req_progress { + struct sg_mapping_iter src_sg_it; + struct sg_mapping_iter dst_sg_it; + void (*complete) (void); + void (*process) (int is_first); + + /* src mostly */ + int sg_src_left; + int src_start; + int crypt_len; + int hw_nbytes; + /* dst mostly */ + int copy_back; + int sg_dst_left; + int dst_start; + int hw_processed_bytes; +}; + +struct crypto_priv { + void __iomem *reg; + void __iomem *sram; + int irq; + struct clk *clk; + struct task_struct *queue_th; + + /* the lock protects queue and eng_st */ + spinlock_t lock; + struct crypto_queue queue; + enum engine_status eng_st; + struct timer_list completion_timer; + struct crypto_async_request *cur_req; + struct req_progress p; + int max_req_size; + int sram_size; + int has_sha1; + int has_hmac_sha1; +}; + +static struct crypto_priv *cpg; + +struct mv_ctx { + u8 aes_enc_key[AES_KEY_LEN]; + u32 aes_dec_key[8]; + int key_len; + u32 need_calc_aes_dkey; +}; + +enum crypto_op { + COP_AES_ECB, + COP_AES_CBC, +}; + +struct mv_req_ctx { + enum crypto_op op; + int decrypt; +}; + +enum hash_op { + COP_SHA1, + COP_HMAC_SHA1 +}; + +struct mv_tfm_hash_ctx { + struct crypto_shash *fallback; + struct crypto_shash *base_hash; + u32 ivs[2 * SHA1_DIGEST_SIZE / 4]; + int count_add; + enum hash_op op; +}; + +struct mv_req_hash_ctx { + u64 count; + u32 state[SHA1_DIGEST_SIZE / 4]; + u8 buffer[SHA1_BLOCK_SIZE]; + int first_hash; /* marks that we don't have previous state */ + int last_chunk; /* marks that this is the 'final' request */ + int extra_bytes; /* unprocessed bytes in buffer */ + enum hash_op op; + int count_add; +}; + +static void mv_completion_timer_callback(unsigned long unused) +{ + int active = readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_EN_SEC_ACCL0; + + printk(KERN_ERR MV_CESA + "completion timer expired (CESA %sactive), cleaning up.\n", + active ? "" : "in"); + + del_timer(&cpg->completion_timer); + writel(SEC_CMD_DISABLE_SEC, cpg->reg + SEC_ACCEL_CMD); + while(readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_DISABLE_SEC) + printk(KERN_INFO MV_CESA "%s: waiting for engine finishing\n", __func__); + cpg->eng_st = ENGINE_W_DEQUEUE; + wake_up_process(cpg->queue_th); +} + +static void mv_setup_timer(void) +{ + setup_timer(&cpg->completion_timer, &mv_completion_timer_callback, 0); + mod_timer(&cpg->completion_timer, + jiffies + msecs_to_jiffies(MV_CESA_EXPIRE)); +} + +static void compute_aes_dec_key(struct mv_ctx *ctx) +{ + struct crypto_aes_ctx gen_aes_key; + int key_pos; + + if (!ctx->need_calc_aes_dkey) + return; + + crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len); + + key_pos = ctx->key_len + 24; + memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4); + switch (ctx->key_len) { + case AES_KEYSIZE_256: + key_pos -= 2; + /* fall */ + case AES_KEYSIZE_192: + key_pos -= 2; + memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos], + 4 * 4); + break; + } + ctx->need_calc_aes_dkey = 0; +} + +static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key, + unsigned int len) +{ + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); + struct mv_ctx *ctx = crypto_tfm_ctx(tfm); + + switch (len) { + case AES_KEYSIZE_128: + case AES_KEYSIZE_192: + case AES_KEYSIZE_256: + break; + default: + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + ctx->key_len = len; + ctx->need_calc_aes_dkey = 1; + + memcpy(ctx->aes_enc_key, key, AES_KEY_LEN); + return 0; +} + +static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len) +{ + int ret; + void *sbuf; + int copy_len; + + while (len) { + if (!p->sg_src_left) { + ret = sg_miter_next(&p->src_sg_it); + BUG_ON(!ret); + p->sg_src_left = p->src_sg_it.length; + p->src_start = 0; + } + + sbuf = p->src_sg_it.addr + p->src_start; + + copy_len = min(p->sg_src_left, len); + memcpy(dbuf, sbuf, copy_len); + + p->src_start += copy_len; + p->sg_src_left -= copy_len; + + len -= copy_len; + dbuf += copy_len; + } +} + +static void setup_data_in(void) +{ + struct req_progress *p = &cpg->p; + int data_in_sram = + min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size); + copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START + p->crypt_len, + data_in_sram - p->crypt_len); + p->crypt_len = data_in_sram; +} + +static void mv_process_current_q(int first_block) +{ + struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req); + struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); + struct sec_accel_config op; + + switch (req_ctx->op) { + case COP_AES_ECB: + op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB; + break; + case COP_AES_CBC: + default: + op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC; + op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) | + ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF); + if (first_block) + memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16); + break; + } + if (req_ctx->decrypt) { + op.config |= CFG_DIR_DEC; + memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key, + AES_KEY_LEN); + } else { + op.config |= CFG_DIR_ENC; + memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key, + AES_KEY_LEN); + } + + switch (ctx->key_len) { + case AES_KEYSIZE_128: + op.config |= CFG_AES_LEN_128; + break; + case AES_KEYSIZE_192: + op.config |= CFG_AES_LEN_192; + break; + case AES_KEYSIZE_256: + op.config |= CFG_AES_LEN_256; + break; + } + op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) | + ENC_P_DST(SRAM_DATA_OUT_START); + op.enc_key_p = SRAM_DATA_KEY_P; + + setup_data_in(); + op.enc_len = cpg->p.crypt_len; + memcpy(cpg->sram + SRAM_CONFIG, &op, + sizeof(struct sec_accel_config)); + + /* GO */ + mv_setup_timer(); + writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); +} + +static void mv_crypto_algo_completion(void) +{ + struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req); + struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); + + sg_miter_stop(&cpg->p.src_sg_it); + sg_miter_stop(&cpg->p.dst_sg_it); + + if (req_ctx->op != COP_AES_CBC) + return ; + + memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16); +} + +static void mv_process_hash_current(int first_block) +{ + struct ahash_request *req = ahash_request_cast(cpg->cur_req); + const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); + struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req); + struct req_progress *p = &cpg->p; + struct sec_accel_config op = { 0 }; + int is_last; + + switch (req_ctx->op) { + case COP_SHA1: + default: + op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1; + break; + case COP_HMAC_SHA1: + op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1; + memcpy(cpg->sram + SRAM_HMAC_IV_IN, + tfm_ctx->ivs, sizeof(tfm_ctx->ivs)); + break; + } + + op.mac_src_p = + MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32) + req_ctx-> + count); + + setup_data_in(); + + op.mac_digest = + MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len); + op.mac_iv = + MAC_INNER_IV_P(SRAM_HMAC_IV_IN) | + MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT); + + is_last = req_ctx->last_chunk + && (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes) + && (req_ctx->count <= MAX_HW_HASH_SIZE); + if (req_ctx->first_hash) { + if (is_last) + op.config |= CFG_NOT_FRAG; + else + op.config |= CFG_FIRST_FRAG; + + req_ctx->first_hash = 0; + } else { + if (is_last) + op.config |= CFG_LAST_FRAG; + else + op.config |= CFG_MID_FRAG; + + if (first_block) { + writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A); + writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B); + writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C); + writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D); + writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E); + } + } + + memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config)); + + /* GO */ + mv_setup_timer(); + writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); +} + +static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx, + struct shash_desc *desc) +{ + int i; + struct sha1_state shash_state; + + shash_state.count = ctx->count + ctx->count_add; + for (i = 0; i < 5; i++) + shash_state.state[i] = ctx->state[i]; + memcpy(shash_state.buffer, ctx->buffer, sizeof(shash_state.buffer)); + return crypto_shash_import(desc, &shash_state); +} + +static int mv_hash_final_fallback(struct ahash_request *req) +{ + const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); + struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req); + SHASH_DESC_ON_STACK(shash, tfm_ctx->fallback); + int rc; + + shash->tfm = tfm_ctx->fallback; + shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP; + if (unlikely(req_ctx->first_hash)) { + crypto_shash_init(shash); + crypto_shash_update(shash, req_ctx->buffer, + req_ctx->extra_bytes); + } else { + /* only SHA1 for now.... + */ + rc = mv_hash_import_sha1_ctx(req_ctx, shash); + if (rc) + goto out; + } + rc = crypto_shash_final(shash, req->result); +out: + return rc; +} + +static void mv_save_digest_state(struct mv_req_hash_ctx *ctx) +{ + ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A); + ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B); + ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C); + ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D); + ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E); +} + +static void mv_hash_algo_completion(void) +{ + struct ahash_request *req = ahash_request_cast(cpg->cur_req); + struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); + + if (ctx->extra_bytes) + copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes); + sg_miter_stop(&cpg->p.src_sg_it); + + if (likely(ctx->last_chunk)) { + if (likely(ctx->count <= MAX_HW_HASH_SIZE)) { + memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF, + crypto_ahash_digestsize(crypto_ahash_reqtfm + (req))); + } else { + mv_save_digest_state(ctx); + mv_hash_final_fallback(req); + } + } else { + mv_save_digest_state(ctx); + } +} + +static void dequeue_complete_req(void) +{ + struct crypto_async_request *req = cpg->cur_req; + void *buf; + int ret; + cpg->p.hw_processed_bytes += cpg->p.crypt_len; + if (cpg->p.copy_back) { + int need_copy_len = cpg->p.crypt_len; + int sram_offset = 0; + do { + int dst_copy; + + if (!cpg->p.sg_dst_left) { + ret = sg_miter_next(&cpg->p.dst_sg_it); + BUG_ON(!ret); + cpg->p.sg_dst_left = cpg->p.dst_sg_it.length; + cpg->p.dst_start = 0; + } + + buf = cpg->p.dst_sg_it.addr; + buf += cpg->p.dst_start; + + dst_copy = min(need_copy_len, cpg->p.sg_dst_left); + + memcpy(buf, + cpg->sram + SRAM_DATA_OUT_START + sram_offset, + dst_copy); + sram_offset += dst_copy; + cpg->p.sg_dst_left -= dst_copy; + need_copy_len -= dst_copy; + cpg->p.dst_start += dst_copy; + } while (need_copy_len > 0); + } + + cpg->p.crypt_len = 0; + + BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE); + if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) { + /* process next scatter list entry */ + cpg->eng_st = ENGINE_BUSY; + cpg->p.process(0); + } else { + cpg->p.complete(); + cpg->eng_st = ENGINE_IDLE; + local_bh_disable(); + req->complete(req, 0); + local_bh_enable(); + } +} + +static int count_sgs(struct scatterlist *sl, unsigned int total_bytes) +{ + int i = 0; + size_t cur_len; + + while (sl) { + cur_len = sl[i].length; + ++i; + if (total_bytes > cur_len) + total_bytes -= cur_len; + else + break; + } + + return i; +} + +static void mv_start_new_crypt_req(struct ablkcipher_request *req) +{ + struct req_progress *p = &cpg->p; + int num_sgs; + + cpg->cur_req = &req->base; + memset(p, 0, sizeof(struct req_progress)); + p->hw_nbytes = req->nbytes; + p->complete = mv_crypto_algo_completion; + p->process = mv_process_current_q; + p->copy_back = 1; + + num_sgs = count_sgs(req->src, req->nbytes); + sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG); + + num_sgs = count_sgs(req->dst, req->nbytes); + sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG); + + mv_process_current_q(1); +} + +static void mv_start_new_hash_req(struct ahash_request *req) +{ + struct req_progress *p = &cpg->p; + struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); + int num_sgs, hw_bytes, old_extra_bytes, rc; + cpg->cur_req = &req->base; + memset(p, 0, sizeof(struct req_progress)); + hw_bytes = req->nbytes + ctx->extra_bytes; + old_extra_bytes = ctx->extra_bytes; + + ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE; + if (ctx->extra_bytes != 0 + && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE)) + hw_bytes -= ctx->extra_bytes; + else + ctx->extra_bytes = 0; + + num_sgs = count_sgs(req->src, req->nbytes); + sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG); + + if (hw_bytes) { + p->hw_nbytes = hw_bytes; + p->complete = mv_hash_algo_completion; + p->process = mv_process_hash_current; + + if (unlikely(old_extra_bytes)) { + memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer, + old_extra_bytes); + p->crypt_len = old_extra_bytes; + } + + mv_process_hash_current(1); + } else { + copy_src_to_buf(p, ctx->buffer + old_extra_bytes, + ctx->extra_bytes - old_extra_bytes); + sg_miter_stop(&p->src_sg_it); + if (ctx->last_chunk) + rc = mv_hash_final_fallback(req); + else + rc = 0; + cpg->eng_st = ENGINE_IDLE; + local_bh_disable(); + req->base.complete(&req->base, rc); + local_bh_enable(); + } +} + +static int queue_manag(void *data) +{ + cpg->eng_st = ENGINE_IDLE; + do { + struct crypto_async_request *async_req = NULL; + struct crypto_async_request *backlog; + + __set_current_state(TASK_INTERRUPTIBLE); + + if (cpg->eng_st == ENGINE_W_DEQUEUE) + dequeue_complete_req(); + + spin_lock_irq(&cpg->lock); + if (cpg->eng_st == ENGINE_IDLE) { + backlog = crypto_get_backlog(&cpg->queue); + async_req = crypto_dequeue_request(&cpg->queue); + if (async_req) { + BUG_ON(cpg->eng_st != ENGINE_IDLE); + cpg->eng_st = ENGINE_BUSY; + } + } + spin_unlock_irq(&cpg->lock); + + if (backlog) { + backlog->complete(backlog, -EINPROGRESS); + backlog = NULL; + } + + if (async_req) { + if (crypto_tfm_alg_type(async_req->tfm) != + CRYPTO_ALG_TYPE_AHASH) { + struct ablkcipher_request *req = + ablkcipher_request_cast(async_req); + mv_start_new_crypt_req(req); + } else { + struct ahash_request *req = + ahash_request_cast(async_req); + mv_start_new_hash_req(req); + } + async_req = NULL; + } + + schedule(); + + } while (!kthread_should_stop()); + return 0; +} + +static int mv_handle_req(struct crypto_async_request *req) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&cpg->lock, flags); + ret = crypto_enqueue_request(&cpg->queue, req); + spin_unlock_irqrestore(&cpg->lock, flags); + wake_up_process(cpg->queue_th); + return ret; +} + +static int mv_enc_aes_ecb(struct ablkcipher_request *req) +{ + struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); + + req_ctx->op = COP_AES_ECB; + req_ctx->decrypt = 0; + + return mv_handle_req(&req->base); +} + +static int mv_dec_aes_ecb(struct ablkcipher_request *req) +{ + struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); + + req_ctx->op = COP_AES_ECB; + req_ctx->decrypt = 1; + + compute_aes_dec_key(ctx); + return mv_handle_req(&req->base); +} + +static int mv_enc_aes_cbc(struct ablkcipher_request *req) +{ + struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); + + req_ctx->op = COP_AES_CBC; + req_ctx->decrypt = 0; + + return mv_handle_req(&req->base); +} + +static int mv_dec_aes_cbc(struct ablkcipher_request *req) +{ + struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); + + req_ctx->op = COP_AES_CBC; + req_ctx->decrypt = 1; + + compute_aes_dec_key(ctx); + return mv_handle_req(&req->base); +} + +static int mv_cra_init(struct crypto_tfm *tfm) +{ + tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx); + return 0; +} + +static void mv_init_hash_req_ctx(struct mv_req_hash_ctx *ctx, int op, + int is_last, unsigned int req_len, + int count_add) +{ + memset(ctx, 0, sizeof(*ctx)); + ctx->op = op; + ctx->count = req_len; + ctx->first_hash = 1; + ctx->last_chunk = is_last; + ctx->count_add = count_add; +} + +static void mv_update_hash_req_ctx(struct mv_req_hash_ctx *ctx, int is_last, + unsigned req_len) +{ + ctx->last_chunk = is_last; + ctx->count += req_len; +} + +static int mv_hash_init(struct ahash_request *req) +{ + const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); + mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 0, 0, + tfm_ctx->count_add); + return 0; +} + +static int mv_hash_update(struct ahash_request *req) +{ + if (!req->nbytes) + return 0; + + mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->nbytes); + return mv_handle_req(&req->base); +} + +static int mv_hash_final(struct ahash_request *req) +{ + struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); + + ahash_request_set_crypt(req, NULL, req->result, 0); + mv_update_hash_req_ctx(ctx, 1, 0); + return mv_handle_req(&req->base); +} + +static int mv_hash_finup(struct ahash_request *req) +{ + mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes); + return mv_handle_req(&req->base); +} + +static int mv_hash_digest(struct ahash_request *req) +{ + const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); + mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 1, + req->nbytes, tfm_ctx->count_add); + return mv_handle_req(&req->base); +} + +static void mv_hash_init_ivs(struct mv_tfm_hash_ctx *ctx, const void *istate, + const void *ostate) +{ + const struct sha1_state *isha1_state = istate, *osha1_state = ostate; + int i; + for (i = 0; i < 5; i++) { + ctx->ivs[i] = cpu_to_be32(isha1_state->state[i]); + ctx->ivs[i + 5] = cpu_to_be32(osha1_state->state[i]); + } +} + +static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key, + unsigned int keylen) +{ + int rc; + struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(&tfm->base); + int bs, ds, ss; + + if (!ctx->base_hash) + return 0; + + rc = crypto_shash_setkey(ctx->fallback, key, keylen); + if (rc) + return rc; + + /* Can't see a way to extract the ipad/opad from the fallback tfm + so I'm basically copying code from the hmac module */ + bs = crypto_shash_blocksize(ctx->base_hash); + ds = crypto_shash_digestsize(ctx->base_hash); + ss = crypto_shash_statesize(ctx->base_hash); + + { + SHASH_DESC_ON_STACK(shash, ctx->base_hash); + + unsigned int i; + char ipad[ss]; + char opad[ss]; + + shash->tfm = ctx->base_hash; + shash->flags = crypto_shash_get_flags(ctx->base_hash) & + CRYPTO_TFM_REQ_MAY_SLEEP; + + if (keylen > bs) { + int err; + + err = + crypto_shash_digest(shash, key, keylen, ipad); + if (err) + return err; + + keylen = ds; + } else + memcpy(ipad, key, keylen); + + memset(ipad + keylen, 0, bs - keylen); + memcpy(opad, ipad, bs); + + for (i = 0; i < bs; i++) { + ipad[i] ^= 0x36; + opad[i] ^= 0x5c; + } + + rc = crypto_shash_init(shash) ? : + crypto_shash_update(shash, ipad, bs) ? : + crypto_shash_export(shash, ipad) ? : + crypto_shash_init(shash) ? : + crypto_shash_update(shash, opad, bs) ? : + crypto_shash_export(shash, opad); + + if (rc == 0) + mv_hash_init_ivs(ctx, ipad, opad); + + return rc; + } +} + +static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name, + enum hash_op op, int count_add) +{ + const char *fallback_driver_name = crypto_tfm_alg_name(tfm); + struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_shash *fallback_tfm = NULL; + struct crypto_shash *base_hash = NULL; + int err = -ENOMEM; + + ctx->op = op; + ctx->count_add = count_add; + + /* Allocate a fallback and abort if it failed. */ + fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback_tfm)) { + printk(KERN_WARNING MV_CESA + "Fallback driver '%s' could not be loaded!\n", + fallback_driver_name); + err = PTR_ERR(fallback_tfm); + goto out; + } + ctx->fallback = fallback_tfm; + + if (base_hash_name) { + /* Allocate a hash to compute the ipad/opad of hmac. */ + base_hash = crypto_alloc_shash(base_hash_name, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(base_hash)) { + printk(KERN_WARNING MV_CESA + "Base driver '%s' could not be loaded!\n", + base_hash_name); + err = PTR_ERR(base_hash); + goto err_bad_base; + } + } + ctx->base_hash = base_hash; + + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct mv_req_hash_ctx) + + crypto_shash_descsize(ctx->fallback)); + return 0; +err_bad_base: + crypto_free_shash(fallback_tfm); +out: + return err; +} + +static void mv_cra_hash_exit(struct crypto_tfm *tfm) +{ + struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm); + + crypto_free_shash(ctx->fallback); + if (ctx->base_hash) + crypto_free_shash(ctx->base_hash); +} + +static int mv_cra_hash_sha1_init(struct crypto_tfm *tfm) +{ + return mv_cra_hash_init(tfm, NULL, COP_SHA1, 0); +} + +static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm) +{ + return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE); +} + +static irqreturn_t crypto_int(int irq, void *priv) +{ + u32 val; + + val = readl(cpg->reg + SEC_ACCEL_INT_STATUS); + if (!(val & SEC_INT_ACCEL0_DONE)) + return IRQ_NONE; + + if (!del_timer(&cpg->completion_timer)) { + printk(KERN_WARNING MV_CESA + "got an interrupt but no pending timer?\n"); + } + val &= ~SEC_INT_ACCEL0_DONE; + writel(val, cpg->reg + FPGA_INT_STATUS); + writel(val, cpg->reg + SEC_ACCEL_INT_STATUS); + BUG_ON(cpg->eng_st != ENGINE_BUSY); + cpg->eng_st = ENGINE_W_DEQUEUE; + wake_up_process(cpg->queue_th); + return IRQ_HANDLED; +} + +static struct crypto_alg mv_aes_alg_ecb = { + .cra_name = "ecb(aes)", + .cra_driver_name = "mv-ecb-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, + .cra_blocksize = 16, + .cra_ctxsize = sizeof(struct mv_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = mv_cra_init, + .cra_u = { + .ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = mv_setkey_aes, + .encrypt = mv_enc_aes_ecb, + .decrypt = mv_dec_aes_ecb, + }, + }, +}; + +static struct crypto_alg mv_aes_alg_cbc = { + .cra_name = "cbc(aes)", + .cra_driver_name = "mv-cbc-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct mv_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = mv_cra_init, + .cra_u = { + .ablkcipher = { + .ivsize = AES_BLOCK_SIZE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = mv_setkey_aes, + .encrypt = mv_enc_aes_cbc, + .decrypt = mv_dec_aes_cbc, + }, + }, +}; + +static struct ahash_alg mv_sha1_alg = { + .init = mv_hash_init, + .update = mv_hash_update, + .final = mv_hash_final, + .finup = mv_hash_finup, + .digest = mv_hash_digest, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .base = { + .cra_name = "sha1", + .cra_driver_name = "mv-sha1", + .cra_priority = 300, + .cra_flags = + CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx), + .cra_init = mv_cra_hash_sha1_init, + .cra_exit = mv_cra_hash_exit, + .cra_module = THIS_MODULE, + } + } +}; + +static struct ahash_alg mv_hmac_sha1_alg = { + .init = mv_hash_init, + .update = mv_hash_update, + .final = mv_hash_final, + .finup = mv_hash_finup, + .digest = mv_hash_digest, + .setkey = mv_hash_setkey, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .base = { + .cra_name = "hmac(sha1)", + .cra_driver_name = "mv-hmac-sha1", + .cra_priority = 300, + .cra_flags = + CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx), + .cra_init = mv_cra_hash_hmac_sha1_init, + .cra_exit = mv_cra_hash_exit, + .cra_module = THIS_MODULE, + } + } +}; + +static int mv_probe(struct platform_device *pdev) +{ + struct crypto_priv *cp; + struct resource *res; + int irq; + int ret; + + if (cpg) { + printk(KERN_ERR MV_CESA "Second crypto dev?\n"); + return -EEXIST; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); + if (!res) + return -ENXIO; + + cp = kzalloc(sizeof(*cp), GFP_KERNEL); + if (!cp) + return -ENOMEM; + + spin_lock_init(&cp->lock); + crypto_init_queue(&cp->queue, 50); + cp->reg = ioremap(res->start, resource_size(res)); + if (!cp->reg) { + ret = -ENOMEM; + goto err; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram"); + if (!res) { + ret = -ENXIO; + goto err_unmap_reg; + } + cp->sram_size = resource_size(res); + cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE; + cp->sram = ioremap(res->start, cp->sram_size); + if (!cp->sram) { + ret = -ENOMEM; + goto err_unmap_reg; + } + + if (pdev->dev.of_node) + irq = irq_of_parse_and_map(pdev->dev.of_node, 0); + else + irq = platform_get_irq(pdev, 0); + if (irq < 0 || irq == NO_IRQ) { + ret = irq; + goto err_unmap_sram; + } + cp->irq = irq; + + platform_set_drvdata(pdev, cp); + cpg = cp; + + cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto"); + if (IS_ERR(cp->queue_th)) { + ret = PTR_ERR(cp->queue_th); + goto err_unmap_sram; + } + + ret = request_irq(irq, crypto_int, 0, dev_name(&pdev->dev), + cp); + if (ret) + goto err_thread; + + /* Not all platforms can gate the clock, so it is not + an error if the clock does not exists. */ + cp->clk = clk_get(&pdev->dev, NULL); + if (!IS_ERR(cp->clk)) + clk_prepare_enable(cp->clk); + + writel(0, cpg->reg + SEC_ACCEL_INT_STATUS); + writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK); + writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG); + writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0); + + ret = crypto_register_alg(&mv_aes_alg_ecb); + if (ret) { + printk(KERN_WARNING MV_CESA + "Could not register aes-ecb driver\n"); + goto err_irq; + } + + ret = crypto_register_alg(&mv_aes_alg_cbc); + if (ret) { + printk(KERN_WARNING MV_CESA + "Could not register aes-cbc driver\n"); + goto err_unreg_ecb; + } + + ret = crypto_register_ahash(&mv_sha1_alg); + if (ret == 0) + cpg->has_sha1 = 1; + else + printk(KERN_WARNING MV_CESA "Could not register sha1 driver\n"); + + ret = crypto_register_ahash(&mv_hmac_sha1_alg); + if (ret == 0) { + cpg->has_hmac_sha1 = 1; + } else { + printk(KERN_WARNING MV_CESA + "Could not register hmac-sha1 driver\n"); + } + + return 0; +err_unreg_ecb: + crypto_unregister_alg(&mv_aes_alg_ecb); +err_irq: + free_irq(irq, cp); + if (!IS_ERR(cp->clk)) { + clk_disable_unprepare(cp->clk); + clk_put(cp->clk); + } +err_thread: + kthread_stop(cp->queue_th); +err_unmap_sram: + iounmap(cp->sram); +err_unmap_reg: + iounmap(cp->reg); +err: + kfree(cp); + cpg = NULL; + return ret; +} + +static int mv_remove(struct platform_device *pdev) +{ + struct crypto_priv *cp = platform_get_drvdata(pdev); + + crypto_unregister_alg(&mv_aes_alg_ecb); + crypto_unregister_alg(&mv_aes_alg_cbc); + if (cp->has_sha1) + crypto_unregister_ahash(&mv_sha1_alg); + if (cp->has_hmac_sha1) + crypto_unregister_ahash(&mv_hmac_sha1_alg); + kthread_stop(cp->queue_th); + free_irq(cp->irq, cp); + memset(cp->sram, 0, cp->sram_size); + iounmap(cp->sram); + iounmap(cp->reg); + + if (!IS_ERR(cp->clk)) { + clk_disable_unprepare(cp->clk); + clk_put(cp->clk); + } + + kfree(cp); + cpg = NULL; + return 0; +} + +static const struct of_device_id mv_cesa_of_match_table[] = { + { .compatible = "marvell,orion-crypto", }, + {} +}; +MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table); + +static struct platform_driver marvell_crypto = { + .probe = mv_probe, + .remove = mv_remove, + .driver = { + .name = "mv_crypto", + .of_match_table = mv_cesa_of_match_table, + }, +}; +MODULE_ALIAS("platform:mv_crypto"); + +module_platform_driver(marvell_crypto); + +MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>"); +MODULE_DESCRIPTION("Support for Marvell's cryptographic engine"); +MODULE_LICENSE("GPL"); diff --git a/drivers/crypto/mv_cesa.h b/drivers/crypto/mv_cesa.h new file mode 100644 index 000000000..9249d3ed1 --- /dev/null +++ b/drivers/crypto/mv_cesa.h @@ -0,0 +1,150 @@ +#ifndef __MV_CRYPTO_H__ +#define __MV_CRYPTO_H__ + +#define DIGEST_INITIAL_VAL_A 0xdd00 +#define DIGEST_INITIAL_VAL_B 0xdd04 +#define DIGEST_INITIAL_VAL_C 0xdd08 +#define DIGEST_INITIAL_VAL_D 0xdd0c +#define DIGEST_INITIAL_VAL_E 0xdd10 +#define DES_CMD_REG 0xdd58 + +#define SEC_ACCEL_CMD 0xde00 +#define SEC_CMD_EN_SEC_ACCL0 (1 << 0) +#define SEC_CMD_EN_SEC_ACCL1 (1 << 1) +#define SEC_CMD_DISABLE_SEC (1 << 2) + +#define SEC_ACCEL_DESC_P0 0xde04 +#define SEC_DESC_P0_PTR(x) (x) + +#define SEC_ACCEL_DESC_P1 0xde14 +#define SEC_DESC_P1_PTR(x) (x) + +#define SEC_ACCEL_CFG 0xde08 +#define SEC_CFG_STOP_DIG_ERR (1 << 0) +#define SEC_CFG_CH0_W_IDMA (1 << 7) +#define SEC_CFG_CH1_W_IDMA (1 << 8) +#define SEC_CFG_ACT_CH0_IDMA (1 << 9) +#define SEC_CFG_ACT_CH1_IDMA (1 << 10) + +#define SEC_ACCEL_STATUS 0xde0c +#define SEC_ST_ACT_0 (1 << 0) +#define SEC_ST_ACT_1 (1 << 1) + +/* + * FPGA_INT_STATUS looks like a FPGA leftover and is documented only in Errata + * 4.12. It looks like that it was part of an IRQ-controller in FPGA and + * someone forgot to remove it while switching to the core and moving to + * SEC_ACCEL_INT_STATUS. + */ +#define FPGA_INT_STATUS 0xdd68 +#define SEC_ACCEL_INT_STATUS 0xde20 +#define SEC_INT_AUTH_DONE (1 << 0) +#define SEC_INT_DES_E_DONE (1 << 1) +#define SEC_INT_AES_E_DONE (1 << 2) +#define SEC_INT_AES_D_DONE (1 << 3) +#define SEC_INT_ENC_DONE (1 << 4) +#define SEC_INT_ACCEL0_DONE (1 << 5) +#define SEC_INT_ACCEL1_DONE (1 << 6) +#define SEC_INT_ACC0_IDMA_DONE (1 << 7) +#define SEC_INT_ACC1_IDMA_DONE (1 << 8) + +#define SEC_ACCEL_INT_MASK 0xde24 + +#define AES_KEY_LEN (8 * 4) + +struct sec_accel_config { + + u32 config; +#define CFG_OP_MAC_ONLY 0 +#define CFG_OP_CRYPT_ONLY 1 +#define CFG_OP_MAC_CRYPT 2 +#define CFG_OP_CRYPT_MAC 3 +#define CFG_MACM_MD5 (4 << 4) +#define CFG_MACM_SHA1 (5 << 4) +#define CFG_MACM_HMAC_MD5 (6 << 4) +#define CFG_MACM_HMAC_SHA1 (7 << 4) +#define CFG_ENCM_DES (1 << 8) +#define CFG_ENCM_3DES (2 << 8) +#define CFG_ENCM_AES (3 << 8) +#define CFG_DIR_ENC (0 << 12) +#define CFG_DIR_DEC (1 << 12) +#define CFG_ENC_MODE_ECB (0 << 16) +#define CFG_ENC_MODE_CBC (1 << 16) +#define CFG_3DES_EEE (0 << 20) +#define CFG_3DES_EDE (1 << 20) +#define CFG_AES_LEN_128 (0 << 24) +#define CFG_AES_LEN_192 (1 << 24) +#define CFG_AES_LEN_256 (2 << 24) +#define CFG_NOT_FRAG (0 << 30) +#define CFG_FIRST_FRAG (1 << 30) +#define CFG_LAST_FRAG (2 << 30) +#define CFG_MID_FRAG (3 << 30) + + u32 enc_p; +#define ENC_P_SRC(x) (x) +#define ENC_P_DST(x) ((x) << 16) + + u32 enc_len; +#define ENC_LEN(x) (x) + + u32 enc_key_p; +#define ENC_KEY_P(x) (x) + + u32 enc_iv; +#define ENC_IV_POINT(x) ((x) << 0) +#define ENC_IV_BUF_POINT(x) ((x) << 16) + + u32 mac_src_p; +#define MAC_SRC_DATA_P(x) (x) +#define MAC_SRC_TOTAL_LEN(x) ((x) << 16) + + u32 mac_digest; +#define MAC_DIGEST_P(x) (x) +#define MAC_FRAG_LEN(x) ((x) << 16) + u32 mac_iv; +#define MAC_INNER_IV_P(x) (x) +#define MAC_OUTER_IV_P(x) ((x) << 16) +}__attribute__ ((packed)); + /* + * /-----------\ 0 + * | ACCEL CFG | 4 * 8 + * |-----------| 0x20 + * | CRYPT KEY | 8 * 4 + * |-----------| 0x40 + * | IV IN | 4 * 4 + * |-----------| 0x40 (inplace) + * | IV BUF | 4 * 4 + * |-----------| 0x80 + * | DATA IN | 16 * x (max ->max_req_size) + * |-----------| 0x80 (inplace operation) + * | DATA OUT | 16 * x (max ->max_req_size) + * \-----------/ SRAM size + */ + + /* Hashing memory map: + * /-----------\ 0 + * | ACCEL CFG | 4 * 8 + * |-----------| 0x20 + * | Inner IV | 5 * 4 + * |-----------| 0x34 + * | Outer IV | 5 * 4 + * |-----------| 0x48 + * | Output BUF| 5 * 4 + * |-----------| 0x80 + * | DATA IN | 64 * x (max ->max_req_size) + * \-----------/ SRAM size + */ +#define SRAM_CONFIG 0x00 +#define SRAM_DATA_KEY_P 0x20 +#define SRAM_DATA_IV 0x40 +#define SRAM_DATA_IV_BUF 0x40 +#define SRAM_DATA_IN_START 0x80 +#define SRAM_DATA_OUT_START 0x80 + +#define SRAM_HMAC_IV_IN 0x20 +#define SRAM_HMAC_IV_OUT 0x34 +#define SRAM_DIGEST_BUF 0x48 + +#define SRAM_CFG_SPACE 0x80 + +#endif diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c new file mode 100644 index 000000000..59ed54e46 --- /dev/null +++ b/drivers/crypto/mxs-dcp.c @@ -0,0 +1,1102 @@ +/* + * Freescale i.MX23/i.MX28 Data Co-Processor driver + * + * Copyright (C) 2013 Marek Vasut <marex@denx.de> + * + * The code contained herein is licensed under the GNU General Public + * License. You may obtain a copy of the GNU General Public License + * Version 2 or later at the following locations: + * + * http://www.opensource.org/licenses/gpl-license.html + * http://www.gnu.org/copyleft/gpl.html + */ + +#include <linux/crypto.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/kthread.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/stmp_device.h> + +#include <crypto/aes.h> +#include <crypto/sha.h> +#include <crypto/internal/hash.h> + +#define DCP_MAX_CHANS 4 +#define DCP_BUF_SZ PAGE_SIZE + +#define DCP_ALIGNMENT 64 + +/* DCP DMA descriptor. */ +struct dcp_dma_desc { + uint32_t next_cmd_addr; + uint32_t control0; + uint32_t control1; + uint32_t source; + uint32_t destination; + uint32_t size; + uint32_t payload; + uint32_t status; +}; + +/* Coherent aligned block for bounce buffering. */ +struct dcp_coherent_block { + uint8_t aes_in_buf[DCP_BUF_SZ]; + uint8_t aes_out_buf[DCP_BUF_SZ]; + uint8_t sha_in_buf[DCP_BUF_SZ]; + + uint8_t aes_key[2 * AES_KEYSIZE_128]; + + struct dcp_dma_desc desc[DCP_MAX_CHANS]; +}; + +struct dcp { + struct device *dev; + void __iomem *base; + + uint32_t caps; + + struct dcp_coherent_block *coh; + + struct completion completion[DCP_MAX_CHANS]; + struct mutex mutex[DCP_MAX_CHANS]; + struct task_struct *thread[DCP_MAX_CHANS]; + struct crypto_queue queue[DCP_MAX_CHANS]; +}; + +enum dcp_chan { + DCP_CHAN_HASH_SHA = 0, + DCP_CHAN_CRYPTO = 2, +}; + +struct dcp_async_ctx { + /* Common context */ + enum dcp_chan chan; + uint32_t fill; + + /* SHA Hash-specific context */ + struct mutex mutex; + uint32_t alg; + unsigned int hot:1; + + /* Crypto-specific context */ + struct crypto_ablkcipher *fallback; + unsigned int key_len; + uint8_t key[AES_KEYSIZE_128]; +}; + +struct dcp_aes_req_ctx { + unsigned int enc:1; + unsigned int ecb:1; +}; + +struct dcp_sha_req_ctx { + unsigned int init:1; + unsigned int fini:1; +}; + +/* + * There can even be only one instance of the MXS DCP due to the + * design of Linux Crypto API. + */ +static struct dcp *global_sdcp; + +/* DCP register layout. */ +#define MXS_DCP_CTRL 0x00 +#define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES (1 << 23) +#define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING (1 << 22) + +#define MXS_DCP_STAT 0x10 +#define MXS_DCP_STAT_CLR 0x18 +#define MXS_DCP_STAT_IRQ_MASK 0xf + +#define MXS_DCP_CHANNELCTRL 0x20 +#define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK 0xff + +#define MXS_DCP_CAPABILITY1 0x40 +#define MXS_DCP_CAPABILITY1_SHA256 (4 << 16) +#define MXS_DCP_CAPABILITY1_SHA1 (1 << 16) +#define MXS_DCP_CAPABILITY1_AES128 (1 << 0) + +#define MXS_DCP_CONTEXT 0x50 + +#define MXS_DCP_CH_N_CMDPTR(n) (0x100 + ((n) * 0x40)) + +#define MXS_DCP_CH_N_SEMA(n) (0x110 + ((n) * 0x40)) + +#define MXS_DCP_CH_N_STAT(n) (0x120 + ((n) * 0x40)) +#define MXS_DCP_CH_N_STAT_CLR(n) (0x128 + ((n) * 0x40)) + +/* DMA descriptor bits. */ +#define MXS_DCP_CONTROL0_HASH_TERM (1 << 13) +#define MXS_DCP_CONTROL0_HASH_INIT (1 << 12) +#define MXS_DCP_CONTROL0_PAYLOAD_KEY (1 << 11) +#define MXS_DCP_CONTROL0_CIPHER_ENCRYPT (1 << 8) +#define MXS_DCP_CONTROL0_CIPHER_INIT (1 << 9) +#define MXS_DCP_CONTROL0_ENABLE_HASH (1 << 6) +#define MXS_DCP_CONTROL0_ENABLE_CIPHER (1 << 5) +#define MXS_DCP_CONTROL0_DECR_SEMAPHORE (1 << 1) +#define MXS_DCP_CONTROL0_INTERRUPT (1 << 0) + +#define MXS_DCP_CONTROL1_HASH_SELECT_SHA256 (2 << 16) +#define MXS_DCP_CONTROL1_HASH_SELECT_SHA1 (0 << 16) +#define MXS_DCP_CONTROL1_CIPHER_MODE_CBC (1 << 4) +#define MXS_DCP_CONTROL1_CIPHER_MODE_ECB (0 << 4) +#define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128 (0 << 0) + +static int mxs_dcp_start_dma(struct dcp_async_ctx *actx) +{ + struct dcp *sdcp = global_sdcp; + const int chan = actx->chan; + uint32_t stat; + unsigned long ret; + struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; + + dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc), + DMA_TO_DEVICE); + + reinit_completion(&sdcp->completion[chan]); + + /* Clear status register. */ + writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan)); + + /* Load the DMA descriptor. */ + writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan)); + + /* Increment the semaphore to start the DMA transfer. */ + writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan)); + + ret = wait_for_completion_timeout(&sdcp->completion[chan], + msecs_to_jiffies(1000)); + if (!ret) { + dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n", + chan, readl(sdcp->base + MXS_DCP_STAT)); + return -ETIMEDOUT; + } + + stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan)); + if (stat & 0xff) { + dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n", + chan, stat); + return -EINVAL; + } + + dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE); + + return 0; +} + +/* + * Encryption (AES128) + */ +static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, + struct ablkcipher_request *req, int init) +{ + struct dcp *sdcp = global_sdcp; + struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; + struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); + int ret; + + dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key, + 2 * AES_KEYSIZE_128, + DMA_TO_DEVICE); + dma_addr_t src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf, + DCP_BUF_SZ, DMA_TO_DEVICE); + dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf, + DCP_BUF_SZ, DMA_FROM_DEVICE); + + /* Fill in the DMA descriptor. */ + desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE | + MXS_DCP_CONTROL0_INTERRUPT | + MXS_DCP_CONTROL0_ENABLE_CIPHER; + + /* Payload contains the key. */ + desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY; + + if (rctx->enc) + desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT; + if (init) + desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT; + + desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128; + + if (rctx->ecb) + desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB; + else + desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC; + + desc->next_cmd_addr = 0; + desc->source = src_phys; + desc->destination = dst_phys; + desc->size = actx->fill; + desc->payload = key_phys; + desc->status = 0; + + ret = mxs_dcp_start_dma(actx); + + dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128, + DMA_TO_DEVICE); + dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE); + dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE); + + return ret; +} + +static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) +{ + struct dcp *sdcp = global_sdcp; + + struct ablkcipher_request *req = ablkcipher_request_cast(arq); + struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); + struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); + + struct scatterlist *dst = req->dst; + struct scatterlist *src = req->src; + const int nents = sg_nents(req->src); + + const int out_off = DCP_BUF_SZ; + uint8_t *in_buf = sdcp->coh->aes_in_buf; + uint8_t *out_buf = sdcp->coh->aes_out_buf; + + uint8_t *out_tmp, *src_buf, *dst_buf = NULL; + uint32_t dst_off = 0; + + uint8_t *key = sdcp->coh->aes_key; + + int ret = 0; + int split = 0; + unsigned int i, len, clen, rem = 0; + int init = 0; + + actx->fill = 0; + + /* Copy the key from the temporary location. */ + memcpy(key, actx->key, actx->key_len); + + if (!rctx->ecb) { + /* Copy the CBC IV just past the key. */ + memcpy(key + AES_KEYSIZE_128, req->info, AES_KEYSIZE_128); + /* CBC needs the INIT set. */ + init = 1; + } else { + memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128); + } + + for_each_sg(req->src, src, nents, i) { + src_buf = sg_virt(src); + len = sg_dma_len(src); + + do { + if (actx->fill + len > out_off) + clen = out_off - actx->fill; + else + clen = len; + + memcpy(in_buf + actx->fill, src_buf, clen); + len -= clen; + src_buf += clen; + actx->fill += clen; + + /* + * If we filled the buffer or this is the last SG, + * submit the buffer. + */ + if (actx->fill == out_off || sg_is_last(src)) { + ret = mxs_dcp_run_aes(actx, req, init); + if (ret) + return ret; + init = 0; + + out_tmp = out_buf; + while (dst && actx->fill) { + if (!split) { + dst_buf = sg_virt(dst); + dst_off = 0; + } + rem = min(sg_dma_len(dst) - dst_off, + actx->fill); + + memcpy(dst_buf + dst_off, out_tmp, rem); + out_tmp += rem; + dst_off += rem; + actx->fill -= rem; + + if (dst_off == sg_dma_len(dst)) { + dst = sg_next(dst); + split = 0; + } else { + split = 1; + } + } + } + } while (len); + } + + return ret; +} + +static int dcp_chan_thread_aes(void *data) +{ + struct dcp *sdcp = global_sdcp; + const int chan = DCP_CHAN_CRYPTO; + + struct crypto_async_request *backlog; + struct crypto_async_request *arq; + + int ret; + + do { + __set_current_state(TASK_INTERRUPTIBLE); + + mutex_lock(&sdcp->mutex[chan]); + backlog = crypto_get_backlog(&sdcp->queue[chan]); + arq = crypto_dequeue_request(&sdcp->queue[chan]); + mutex_unlock(&sdcp->mutex[chan]); + + if (backlog) + backlog->complete(backlog, -EINPROGRESS); + + if (arq) { + ret = mxs_dcp_aes_block_crypt(arq); + arq->complete(arq, ret); + continue; + } + + schedule(); + } while (!kthread_should_stop()); + + return 0; +} + +static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc) +{ + struct crypto_tfm *tfm = + crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); + struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx( + crypto_ablkcipher_reqtfm(req)); + int ret; + + ablkcipher_request_set_tfm(req, ctx->fallback); + + if (enc) + ret = crypto_ablkcipher_encrypt(req); + else + ret = crypto_ablkcipher_decrypt(req); + + ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); + + return ret; +} + +static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb) +{ + struct dcp *sdcp = global_sdcp; + struct crypto_async_request *arq = &req->base; + struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); + struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); + int ret; + + if (unlikely(actx->key_len != AES_KEYSIZE_128)) + return mxs_dcp_block_fallback(req, enc); + + rctx->enc = enc; + rctx->ecb = ecb; + actx->chan = DCP_CHAN_CRYPTO; + + mutex_lock(&sdcp->mutex[actx->chan]); + ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); + mutex_unlock(&sdcp->mutex[actx->chan]); + + wake_up_process(sdcp->thread[actx->chan]); + + return -EINPROGRESS; +} + +static int mxs_dcp_aes_ecb_decrypt(struct ablkcipher_request *req) +{ + return mxs_dcp_aes_enqueue(req, 0, 1); +} + +static int mxs_dcp_aes_ecb_encrypt(struct ablkcipher_request *req) +{ + return mxs_dcp_aes_enqueue(req, 1, 1); +} + +static int mxs_dcp_aes_cbc_decrypt(struct ablkcipher_request *req) +{ + return mxs_dcp_aes_enqueue(req, 0, 0); +} + +static int mxs_dcp_aes_cbc_encrypt(struct ablkcipher_request *req) +{ + return mxs_dcp_aes_enqueue(req, 1, 0); +} + +static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int len) +{ + struct dcp_async_ctx *actx = crypto_ablkcipher_ctx(tfm); + unsigned int ret; + + /* + * AES 128 is supposed by the hardware, store key into temporary + * buffer and exit. We must use the temporary buffer here, since + * there can still be an operation in progress. + */ + actx->key_len = len; + if (len == AES_KEYSIZE_128) { + memcpy(actx->key, key, len); + return 0; + } + + /* Check if the key size is supported by kernel at all. */ + if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) { + tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + return -EINVAL; + } + + /* + * If the requested AES key size is not supported by the hardware, + * but is supported by in-kernel software implementation, we use + * software fallback. + */ + actx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; + actx->fallback->base.crt_flags |= + tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK; + + ret = crypto_ablkcipher_setkey(actx->fallback, key, len); + if (!ret) + return 0; + + tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK; + tfm->base.crt_flags |= + actx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK; + + return ret; +} + +static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm) +{ + const char *name = crypto_tfm_alg_name(tfm); + const uint32_t flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK; + struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm); + struct crypto_ablkcipher *blk; + + blk = crypto_alloc_ablkcipher(name, 0, flags); + if (IS_ERR(blk)) + return PTR_ERR(blk); + + actx->fallback = blk; + tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_aes_req_ctx); + return 0; +} + +static void mxs_dcp_aes_fallback_exit(struct crypto_tfm *tfm) +{ + struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm); + + crypto_free_ablkcipher(actx->fallback); + actx->fallback = NULL; +} + +/* + * Hashing (SHA1/SHA256) + */ +static int mxs_dcp_run_sha(struct ahash_request *req) +{ + struct dcp *sdcp = global_sdcp; + int ret; + + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); + struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); + struct hash_alg_common *halg = crypto_hash_alg_common(tfm); + + struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; + + dma_addr_t digest_phys = 0; + dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf, + DCP_BUF_SZ, DMA_TO_DEVICE); + + /* Fill in the DMA descriptor. */ + desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE | + MXS_DCP_CONTROL0_INTERRUPT | + MXS_DCP_CONTROL0_ENABLE_HASH; + if (rctx->init) + desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT; + + desc->control1 = actx->alg; + desc->next_cmd_addr = 0; + desc->source = buf_phys; + desc->destination = 0; + desc->size = actx->fill; + desc->payload = 0; + desc->status = 0; + + /* Set HASH_TERM bit for last transfer block. */ + if (rctx->fini) { + digest_phys = dma_map_single(sdcp->dev, req->result, + halg->digestsize, DMA_FROM_DEVICE); + desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM; + desc->payload = digest_phys; + } + + ret = mxs_dcp_start_dma(actx); + + if (rctx->fini) + dma_unmap_single(sdcp->dev, digest_phys, halg->digestsize, + DMA_FROM_DEVICE); + + dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE); + + return ret; +} + +static int dcp_sha_req_to_buf(struct crypto_async_request *arq) +{ + struct dcp *sdcp = global_sdcp; + + struct ahash_request *req = ahash_request_cast(arq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); + struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); + struct hash_alg_common *halg = crypto_hash_alg_common(tfm); + const int nents = sg_nents(req->src); + + uint8_t *in_buf = sdcp->coh->sha_in_buf; + + uint8_t *src_buf; + + struct scatterlist *src; + + unsigned int i, len, clen; + int ret; + + int fin = rctx->fini; + if (fin) + rctx->fini = 0; + + for_each_sg(req->src, src, nents, i) { + src_buf = sg_virt(src); + len = sg_dma_len(src); + + do { + if (actx->fill + len > DCP_BUF_SZ) + clen = DCP_BUF_SZ - actx->fill; + else + clen = len; + + memcpy(in_buf + actx->fill, src_buf, clen); + len -= clen; + src_buf += clen; + actx->fill += clen; + + /* + * If we filled the buffer and still have some + * more data, submit the buffer. + */ + if (len && actx->fill == DCP_BUF_SZ) { + ret = mxs_dcp_run_sha(req); + if (ret) + return ret; + actx->fill = 0; + rctx->init = 0; + } + } while (len); + } + + if (fin) { + rctx->fini = 1; + + /* Submit whatever is left. */ + if (!req->result) + return -EINVAL; + + ret = mxs_dcp_run_sha(req); + if (ret) + return ret; + + actx->fill = 0; + + /* For some reason, the result is flipped. */ + for (i = 0; i < halg->digestsize / 2; i++) { + swap(req->result[i], + req->result[halg->digestsize - i - 1]); + } + } + + return 0; +} + +static int dcp_chan_thread_sha(void *data) +{ + struct dcp *sdcp = global_sdcp; + const int chan = DCP_CHAN_HASH_SHA; + + struct crypto_async_request *backlog; + struct crypto_async_request *arq; + + struct dcp_sha_req_ctx *rctx; + + struct ahash_request *req; + int ret, fini; + + do { + __set_current_state(TASK_INTERRUPTIBLE); + + mutex_lock(&sdcp->mutex[chan]); + backlog = crypto_get_backlog(&sdcp->queue[chan]); + arq = crypto_dequeue_request(&sdcp->queue[chan]); + mutex_unlock(&sdcp->mutex[chan]); + + if (backlog) + backlog->complete(backlog, -EINPROGRESS); + + if (arq) { + req = ahash_request_cast(arq); + rctx = ahash_request_ctx(req); + + ret = dcp_sha_req_to_buf(arq); + fini = rctx->fini; + arq->complete(arq, ret); + if (!fini) + continue; + } + + schedule(); + } while (!kthread_should_stop()); + + return 0; +} + +static int dcp_sha_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); + + struct hash_alg_common *halg = crypto_hash_alg_common(tfm); + + /* + * Start hashing session. The code below only inits the + * hashing session context, nothing more. + */ + memset(actx, 0, sizeof(*actx)); + + if (strcmp(halg->base.cra_name, "sha1") == 0) + actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1; + else + actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256; + + actx->fill = 0; + actx->hot = 0; + actx->chan = DCP_CHAN_HASH_SHA; + + mutex_init(&actx->mutex); + + return 0; +} + +static int dcp_sha_update_fx(struct ahash_request *req, int fini) +{ + struct dcp *sdcp = global_sdcp; + + struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); + + int ret; + + /* + * Ignore requests that have no data in them and are not + * the trailing requests in the stream of requests. + */ + if (!req->nbytes && !fini) + return 0; + + mutex_lock(&actx->mutex); + + rctx->fini = fini; + + if (!actx->hot) { + actx->hot = 1; + rctx->init = 1; + } + + mutex_lock(&sdcp->mutex[actx->chan]); + ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); + mutex_unlock(&sdcp->mutex[actx->chan]); + + wake_up_process(sdcp->thread[actx->chan]); + mutex_unlock(&actx->mutex); + + return -EINPROGRESS; +} + +static int dcp_sha_update(struct ahash_request *req) +{ + return dcp_sha_update_fx(req, 0); +} + +static int dcp_sha_final(struct ahash_request *req) +{ + ahash_request_set_crypt(req, NULL, req->result, 0); + req->nbytes = 0; + return dcp_sha_update_fx(req, 1); +} + +static int dcp_sha_finup(struct ahash_request *req) +{ + return dcp_sha_update_fx(req, 1); +} + +static int dcp_sha_digest(struct ahash_request *req) +{ + int ret; + + ret = dcp_sha_init(req); + if (ret) + return ret; + + return dcp_sha_finup(req); +} + +static int dcp_sha_cra_init(struct crypto_tfm *tfm) +{ + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct dcp_sha_req_ctx)); + return 0; +} + +static void dcp_sha_cra_exit(struct crypto_tfm *tfm) +{ +} + +/* AES 128 ECB and AES 128 CBC */ +static struct crypto_alg dcp_aes_algs[] = { + { + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-dcp", + .cra_priority = 400, + .cra_alignmask = 15, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_init = mxs_dcp_aes_fallback_init, + .cra_exit = mxs_dcp_aes_fallback_exit, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct dcp_async_ctx), + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_u = { + .ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = mxs_dcp_aes_setkey, + .encrypt = mxs_dcp_aes_ecb_encrypt, + .decrypt = mxs_dcp_aes_ecb_decrypt + }, + }, + }, { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-dcp", + .cra_priority = 400, + .cra_alignmask = 15, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_init = mxs_dcp_aes_fallback_init, + .cra_exit = mxs_dcp_aes_fallback_exit, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct dcp_async_ctx), + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_u = { + .ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = mxs_dcp_aes_setkey, + .encrypt = mxs_dcp_aes_cbc_encrypt, + .decrypt = mxs_dcp_aes_cbc_decrypt, + .ivsize = AES_BLOCK_SIZE, + }, + }, + }, +}; + +/* SHA1 */ +static struct ahash_alg dcp_sha1_alg = { + .init = dcp_sha_init, + .update = dcp_sha_update, + .final = dcp_sha_final, + .finup = dcp_sha_finup, + .digest = dcp_sha_digest, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .base = { + .cra_name = "sha1", + .cra_driver_name = "sha1-dcp", + .cra_priority = 400, + .cra_alignmask = 63, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct dcp_async_ctx), + .cra_module = THIS_MODULE, + .cra_init = dcp_sha_cra_init, + .cra_exit = dcp_sha_cra_exit, + }, + }, +}; + +/* SHA256 */ +static struct ahash_alg dcp_sha256_alg = { + .init = dcp_sha_init, + .update = dcp_sha_update, + .final = dcp_sha_final, + .finup = dcp_sha_finup, + .digest = dcp_sha_digest, + .halg = { + .digestsize = SHA256_DIGEST_SIZE, + .base = { + .cra_name = "sha256", + .cra_driver_name = "sha256-dcp", + .cra_priority = 400, + .cra_alignmask = 63, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct dcp_async_ctx), + .cra_module = THIS_MODULE, + .cra_init = dcp_sha_cra_init, + .cra_exit = dcp_sha_cra_exit, + }, + }, +}; + +static irqreturn_t mxs_dcp_irq(int irq, void *context) +{ + struct dcp *sdcp = context; + uint32_t stat; + int i; + + stat = readl(sdcp->base + MXS_DCP_STAT); + stat &= MXS_DCP_STAT_IRQ_MASK; + if (!stat) + return IRQ_NONE; + + /* Clear the interrupts. */ + writel(stat, sdcp->base + MXS_DCP_STAT_CLR); + + /* Complete the DMA requests that finished. */ + for (i = 0; i < DCP_MAX_CHANS; i++) + if (stat & (1 << i)) + complete(&sdcp->completion[i]); + + return IRQ_HANDLED; +} + +static int mxs_dcp_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct dcp *sdcp = NULL; + int i, ret; + + struct resource *iores; + int dcp_vmi_irq, dcp_irq; + + if (global_sdcp) { + dev_err(dev, "Only one DCP instance allowed!\n"); + return -ENODEV; + } + + iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); + dcp_vmi_irq = platform_get_irq(pdev, 0); + if (dcp_vmi_irq < 0) + return dcp_vmi_irq; + + dcp_irq = platform_get_irq(pdev, 1); + if (dcp_irq < 0) + return dcp_irq; + + sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL); + if (!sdcp) + return -ENOMEM; + + sdcp->dev = dev; + sdcp->base = devm_ioremap_resource(dev, iores); + if (IS_ERR(sdcp->base)) + return PTR_ERR(sdcp->base); + + + ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0, + "dcp-vmi-irq", sdcp); + if (ret) { + dev_err(dev, "Failed to claim DCP VMI IRQ!\n"); + return ret; + } + + ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0, + "dcp-irq", sdcp); + if (ret) { + dev_err(dev, "Failed to claim DCP IRQ!\n"); + return ret; + } + + /* Allocate coherent helper block. */ + sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT, + GFP_KERNEL); + if (!sdcp->coh) + return -ENOMEM; + + /* Re-align the structure so it fits the DCP constraints. */ + sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT); + + /* Restart the DCP block. */ + ret = stmp_reset_block(sdcp->base); + if (ret) + return ret; + + /* Initialize control register. */ + writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES | + MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf, + sdcp->base + MXS_DCP_CTRL); + + /* Enable all DCP DMA channels. */ + writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK, + sdcp->base + MXS_DCP_CHANNELCTRL); + + /* + * We do not enable context switching. Give the context buffer a + * pointer to an illegal address so if context switching is + * inadvertantly enabled, the DCP will return an error instead of + * trashing good memory. The DCP DMA cannot access ROM, so any ROM + * address will do. + */ + writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT); + for (i = 0; i < DCP_MAX_CHANS; i++) + writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i)); + writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR); + + global_sdcp = sdcp; + + platform_set_drvdata(pdev, sdcp); + + for (i = 0; i < DCP_MAX_CHANS; i++) { + mutex_init(&sdcp->mutex[i]); + init_completion(&sdcp->completion[i]); + crypto_init_queue(&sdcp->queue[i], 50); + } + + /* Create the SHA and AES handler threads. */ + sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha, + NULL, "mxs_dcp_chan/sha"); + if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) { + dev_err(dev, "Error starting SHA thread!\n"); + return PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]); + } + + sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes, + NULL, "mxs_dcp_chan/aes"); + if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) { + dev_err(dev, "Error starting SHA thread!\n"); + ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]); + goto err_destroy_sha_thread; + } + + /* Register the various crypto algorithms. */ + sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1); + + if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) { + ret = crypto_register_algs(dcp_aes_algs, + ARRAY_SIZE(dcp_aes_algs)); + if (ret) { + /* Failed to register algorithm. */ + dev_err(dev, "Failed to register AES crypto!\n"); + goto err_destroy_aes_thread; + } + } + + if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) { + ret = crypto_register_ahash(&dcp_sha1_alg); + if (ret) { + dev_err(dev, "Failed to register %s hash!\n", + dcp_sha1_alg.halg.base.cra_name); + goto err_unregister_aes; + } + } + + if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) { + ret = crypto_register_ahash(&dcp_sha256_alg); + if (ret) { + dev_err(dev, "Failed to register %s hash!\n", + dcp_sha256_alg.halg.base.cra_name); + goto err_unregister_sha1; + } + } + + return 0; + +err_unregister_sha1: + if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) + crypto_unregister_ahash(&dcp_sha1_alg); + +err_unregister_aes: + if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) + crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs)); + +err_destroy_aes_thread: + kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]); + +err_destroy_sha_thread: + kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); + return ret; +} + +static int mxs_dcp_remove(struct platform_device *pdev) +{ + struct dcp *sdcp = platform_get_drvdata(pdev); + + if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) + crypto_unregister_ahash(&dcp_sha256_alg); + + if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) + crypto_unregister_ahash(&dcp_sha1_alg); + + if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) + crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs)); + + kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); + kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]); + + platform_set_drvdata(pdev, NULL); + + global_sdcp = NULL; + + return 0; +} + +static const struct of_device_id mxs_dcp_dt_ids[] = { + { .compatible = "fsl,imx23-dcp", .data = NULL, }, + { .compatible = "fsl,imx28-dcp", .data = NULL, }, + { /* sentinel */ } +}; + +MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids); + +static struct platform_driver mxs_dcp_driver = { + .probe = mxs_dcp_probe, + .remove = mxs_dcp_remove, + .driver = { + .name = "mxs-dcp", + .of_match_table = mxs_dcp_dt_ids, + }, +}; + +module_platform_driver(mxs_dcp_driver); + +MODULE_AUTHOR("Marek Vasut <marex@denx.de>"); +MODULE_DESCRIPTION("Freescale MXS DCP Driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:mxs-dcp"); diff --git a/drivers/crypto/n2_asm.S b/drivers/crypto/n2_asm.S new file mode 100644 index 000000000..f7c793745 --- /dev/null +++ b/drivers/crypto/n2_asm.S @@ -0,0 +1,95 @@ +/* n2_asm.S: Hypervisor calls for NCS support. + * + * Copyright (C) 2009 David S. Miller <davem@davemloft.net> + */ + +#include <linux/linkage.h> +#include <asm/hypervisor.h> +#include "n2_core.h" + + /* o0: queue type + * o1: RA of queue + * o2: num entries in queue + * o3: address of queue handle return + */ +ENTRY(sun4v_ncs_qconf) + mov HV_FAST_NCS_QCONF, %o5 + ta HV_FAST_TRAP + stx %o1, [%o3] + retl + nop +ENDPROC(sun4v_ncs_qconf) + + /* %o0: queue handle + * %o1: address of queue type return + * %o2: address of queue base address return + * %o3: address of queue num entries return + */ +ENTRY(sun4v_ncs_qinfo) + mov %o1, %g1 + mov %o2, %g2 + mov %o3, %g3 + mov HV_FAST_NCS_QINFO, %o5 + ta HV_FAST_TRAP + stx %o1, [%g1] + stx %o2, [%g2] + stx %o3, [%g3] + retl + nop +ENDPROC(sun4v_ncs_qinfo) + + /* %o0: queue handle + * %o1: address of head offset return + */ +ENTRY(sun4v_ncs_gethead) + mov %o1, %o2 + mov HV_FAST_NCS_GETHEAD, %o5 + ta HV_FAST_TRAP + stx %o1, [%o2] + retl + nop +ENDPROC(sun4v_ncs_gethead) + + /* %o0: queue handle + * %o1: address of tail offset return + */ +ENTRY(sun4v_ncs_gettail) + mov %o1, %o2 + mov HV_FAST_NCS_GETTAIL, %o5 + ta HV_FAST_TRAP + stx %o1, [%o2] + retl + nop +ENDPROC(sun4v_ncs_gettail) + + /* %o0: queue handle + * %o1: new tail offset + */ +ENTRY(sun4v_ncs_settail) + mov HV_FAST_NCS_SETTAIL, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_ncs_settail) + + /* %o0: queue handle + * %o1: address of devino return + */ +ENTRY(sun4v_ncs_qhandle_to_devino) + mov %o1, %o2 + mov HV_FAST_NCS_QHANDLE_TO_DEVINO, %o5 + ta HV_FAST_TRAP + stx %o1, [%o2] + retl + nop +ENDPROC(sun4v_ncs_qhandle_to_devino) + + /* %o0: queue handle + * %o1: new head offset + */ +ENTRY(sun4v_ncs_sethead_marker) + mov HV_FAST_NCS_SETHEAD_MARKER, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_ncs_sethead_marker) diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c new file mode 100644 index 000000000..10a9aeff1 --- /dev/null +++ b/drivers/crypto/n2_core.c @@ -0,0 +1,2265 @@ +/* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support. + * + * Copyright (C) 2010, 2011 David S. Miller <davem@davemloft.net> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/cpumask.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/crypto.h> +#include <crypto/md5.h> +#include <crypto/sha.h> +#include <crypto/aes.h> +#include <crypto/des.h> +#include <linux/mutex.h> +#include <linux/delay.h> +#include <linux/sched.h> + +#include <crypto/internal/hash.h> +#include <crypto/scatterwalk.h> +#include <crypto/algapi.h> + +#include <asm/hypervisor.h> +#include <asm/mdesc.h> + +#include "n2_core.h" + +#define DRV_MODULE_NAME "n2_crypto" +#define DRV_MODULE_VERSION "0.2" +#define DRV_MODULE_RELDATE "July 28, 2011" + +static char version[] = + DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; + +MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); +MODULE_DESCRIPTION("Niagara2 Crypto driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); + +#define N2_CRA_PRIORITY 200 + +static DEFINE_MUTEX(spu_lock); + +struct spu_queue { + cpumask_t sharing; + unsigned long qhandle; + + spinlock_t lock; + u8 q_type; + void *q; + unsigned long head; + unsigned long tail; + struct list_head jobs; + + unsigned long devino; + + char irq_name[32]; + unsigned int irq; + + struct list_head list; +}; + +static struct spu_queue **cpu_to_cwq; +static struct spu_queue **cpu_to_mau; + +static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off) +{ + if (q->q_type == HV_NCS_QTYPE_MAU) { + off += MAU_ENTRY_SIZE; + if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES)) + off = 0; + } else { + off += CWQ_ENTRY_SIZE; + if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES)) + off = 0; + } + return off; +} + +struct n2_request_common { + struct list_head entry; + unsigned int offset; +}; +#define OFFSET_NOT_RUNNING (~(unsigned int)0) + +/* An async job request records the final tail value it used in + * n2_request_common->offset, test to see if that offset is in + * the range old_head, new_head, inclusive. + */ +static inline bool job_finished(struct spu_queue *q, unsigned int offset, + unsigned long old_head, unsigned long new_head) +{ + if (old_head <= new_head) { + if (offset > old_head && offset <= new_head) + return true; + } else { + if (offset > old_head || offset <= new_head) + return true; + } + return false; +} + +/* When the HEAD marker is unequal to the actual HEAD, we get + * a virtual device INO interrupt. We should process the + * completed CWQ entries and adjust the HEAD marker to clear + * the IRQ. + */ +static irqreturn_t cwq_intr(int irq, void *dev_id) +{ + unsigned long off, new_head, hv_ret; + struct spu_queue *q = dev_id; + + pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n", + smp_processor_id(), q->qhandle); + + spin_lock(&q->lock); + + hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head); + + pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n", + smp_processor_id(), new_head, hv_ret); + + for (off = q->head; off != new_head; off = spu_next_offset(q, off)) { + /* XXX ... XXX */ + } + + hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head); + if (hv_ret == HV_EOK) + q->head = new_head; + + spin_unlock(&q->lock); + + return IRQ_HANDLED; +} + +static irqreturn_t mau_intr(int irq, void *dev_id) +{ + struct spu_queue *q = dev_id; + unsigned long head, hv_ret; + + spin_lock(&q->lock); + + pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n", + smp_processor_id(), q->qhandle); + + hv_ret = sun4v_ncs_gethead(q->qhandle, &head); + + pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n", + smp_processor_id(), head, hv_ret); + + sun4v_ncs_sethead_marker(q->qhandle, head); + + spin_unlock(&q->lock); + + return IRQ_HANDLED; +} + +static void *spu_queue_next(struct spu_queue *q, void *cur) +{ + return q->q + spu_next_offset(q, cur - q->q); +} + +static int spu_queue_num_free(struct spu_queue *q) +{ + unsigned long head = q->head; + unsigned long tail = q->tail; + unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES); + unsigned long diff; + + if (head > tail) + diff = head - tail; + else + diff = (end - tail) + head; + + return (diff / CWQ_ENTRY_SIZE) - 1; +} + +static void *spu_queue_alloc(struct spu_queue *q, int num_entries) +{ + int avail = spu_queue_num_free(q); + + if (avail >= num_entries) + return q->q + q->tail; + + return NULL; +} + +static unsigned long spu_queue_submit(struct spu_queue *q, void *last) +{ + unsigned long hv_ret, new_tail; + + new_tail = spu_next_offset(q, last - q->q); + + hv_ret = sun4v_ncs_settail(q->qhandle, new_tail); + if (hv_ret == HV_EOK) + q->tail = new_tail; + return hv_ret; +} + +static u64 control_word_base(unsigned int len, unsigned int hmac_key_len, + int enc_type, int auth_type, + unsigned int hash_len, + bool sfas, bool sob, bool eob, bool encrypt, + int opcode) +{ + u64 word = (len - 1) & CONTROL_LEN; + + word |= ((u64) opcode << CONTROL_OPCODE_SHIFT); + word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT); + word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT); + if (sfas) + word |= CONTROL_STORE_FINAL_AUTH_STATE; + if (sob) + word |= CONTROL_START_OF_BLOCK; + if (eob) + word |= CONTROL_END_OF_BLOCK; + if (encrypt) + word |= CONTROL_ENCRYPT; + if (hmac_key_len) + word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT; + if (hash_len) + word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT; + + return word; +} + +#if 0 +static inline bool n2_should_run_async(struct spu_queue *qp, int this_len) +{ + if (this_len >= 64 || + qp->head != qp->tail) + return true; + return false; +} +#endif + +struct n2_ahash_alg { + struct list_head entry; + const char *hash_zero; + const u32 *hash_init; + u8 hw_op_hashsz; + u8 digest_size; + u8 auth_type; + u8 hmac_type; + struct ahash_alg alg; +}; + +static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm) +{ + struct crypto_alg *alg = tfm->__crt_alg; + struct ahash_alg *ahash_alg; + + ahash_alg = container_of(alg, struct ahash_alg, halg.base); + + return container_of(ahash_alg, struct n2_ahash_alg, alg); +} + +struct n2_hmac_alg { + const char *child_alg; + struct n2_ahash_alg derived; +}; + +static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm) +{ + struct crypto_alg *alg = tfm->__crt_alg; + struct ahash_alg *ahash_alg; + + ahash_alg = container_of(alg, struct ahash_alg, halg.base); + + return container_of(ahash_alg, struct n2_hmac_alg, derived.alg); +} + +struct n2_hash_ctx { + struct crypto_ahash *fallback_tfm; +}; + +#define N2_HASH_KEY_MAX 32 /* HW limit for all HMAC requests */ + +struct n2_hmac_ctx { + struct n2_hash_ctx base; + + struct crypto_shash *child_shash; + + int hash_key_len; + unsigned char hash_key[N2_HASH_KEY_MAX]; +}; + +struct n2_hash_req_ctx { + union { + struct md5_state md5; + struct sha1_state sha1; + struct sha256_state sha256; + } u; + + struct ahash_request fallback_req; +}; + +static int n2_hash_async_init(struct ahash_request *req) +{ + struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); + rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + + return crypto_ahash_init(&rctx->fallback_req); +} + +static int n2_hash_async_update(struct ahash_request *req) +{ + struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); + rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + rctx->fallback_req.nbytes = req->nbytes; + rctx->fallback_req.src = req->src; + + return crypto_ahash_update(&rctx->fallback_req); +} + +static int n2_hash_async_final(struct ahash_request *req) +{ + struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); + rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + rctx->fallback_req.result = req->result; + + return crypto_ahash_final(&rctx->fallback_req); +} + +static int n2_hash_async_finup(struct ahash_request *req) +{ + struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); + rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + rctx->fallback_req.nbytes = req->nbytes; + rctx->fallback_req.src = req->src; + rctx->fallback_req.result = req->result; + + return crypto_ahash_finup(&rctx->fallback_req); +} + +static int n2_hash_cra_init(struct crypto_tfm *tfm) +{ + const char *fallback_driver_name = crypto_tfm_alg_name(tfm); + struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); + struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct crypto_ahash *fallback_tfm; + int err; + + fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback_tfm)) { + pr_warning("Fallback driver '%s' could not be loaded!\n", + fallback_driver_name); + err = PTR_ERR(fallback_tfm); + goto out; + } + + crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) + + crypto_ahash_reqsize(fallback_tfm))); + + ctx->fallback_tfm = fallback_tfm; + return 0; + +out: + return err; +} + +static void n2_hash_cra_exit(struct crypto_tfm *tfm) +{ + struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); + struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); + + crypto_free_ahash(ctx->fallback_tfm); +} + +static int n2_hmac_cra_init(struct crypto_tfm *tfm) +{ + const char *fallback_driver_name = crypto_tfm_alg_name(tfm); + struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); + struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash); + struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm); + struct crypto_ahash *fallback_tfm; + struct crypto_shash *child_shash; + int err; + + fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback_tfm)) { + pr_warning("Fallback driver '%s' could not be loaded!\n", + fallback_driver_name); + err = PTR_ERR(fallback_tfm); + goto out; + } + + child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0); + if (IS_ERR(child_shash)) { + pr_warning("Child shash '%s' could not be loaded!\n", + n2alg->child_alg); + err = PTR_ERR(child_shash); + goto out_free_fallback; + } + + crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) + + crypto_ahash_reqsize(fallback_tfm))); + + ctx->child_shash = child_shash; + ctx->base.fallback_tfm = fallback_tfm; + return 0; + +out_free_fallback: + crypto_free_ahash(fallback_tfm); + +out: + return err; +} + +static void n2_hmac_cra_exit(struct crypto_tfm *tfm) +{ + struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); + struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash); + + crypto_free_ahash(ctx->base.fallback_tfm); + crypto_free_shash(ctx->child_shash); +} + +static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) +{ + struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm); + struct crypto_shash *child_shash = ctx->child_shash; + struct crypto_ahash *fallback_tfm; + SHASH_DESC_ON_STACK(shash, child_shash); + int err, bs, ds; + + fallback_tfm = ctx->base.fallback_tfm; + err = crypto_ahash_setkey(fallback_tfm, key, keylen); + if (err) + return err; + + shash->tfm = child_shash; + shash->flags = crypto_ahash_get_flags(tfm) & + CRYPTO_TFM_REQ_MAY_SLEEP; + + bs = crypto_shash_blocksize(child_shash); + ds = crypto_shash_digestsize(child_shash); + BUG_ON(ds > N2_HASH_KEY_MAX); + if (keylen > bs) { + err = crypto_shash_digest(shash, key, keylen, + ctx->hash_key); + if (err) + return err; + keylen = ds; + } else if (keylen <= N2_HASH_KEY_MAX) + memcpy(ctx->hash_key, key, keylen); + + ctx->hash_key_len = keylen; + + return err; +} + +static unsigned long wait_for_tail(struct spu_queue *qp) +{ + unsigned long head, hv_ret; + + do { + hv_ret = sun4v_ncs_gethead(qp->qhandle, &head); + if (hv_ret != HV_EOK) { + pr_err("Hypervisor error on gethead\n"); + break; + } + if (head == qp->tail) { + qp->head = head; + break; + } + } while (1); + return hv_ret; +} + +static unsigned long submit_and_wait_for_tail(struct spu_queue *qp, + struct cwq_initial_entry *ent) +{ + unsigned long hv_ret = spu_queue_submit(qp, ent); + + if (hv_ret == HV_EOK) + hv_ret = wait_for_tail(qp); + + return hv_ret; +} + +static int n2_do_async_digest(struct ahash_request *req, + unsigned int auth_type, unsigned int digest_size, + unsigned int result_size, void *hash_loc, + unsigned long auth_key, unsigned int auth_key_len) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct cwq_initial_entry *ent; + struct crypto_hash_walk walk; + struct spu_queue *qp; + unsigned long flags; + int err = -ENODEV; + int nbytes, cpu; + + /* The total effective length of the operation may not + * exceed 2^16. + */ + if (unlikely(req->nbytes > (1 << 16))) { + struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); + struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); + rctx->fallback_req.base.flags = + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + rctx->fallback_req.nbytes = req->nbytes; + rctx->fallback_req.src = req->src; + rctx->fallback_req.result = req->result; + + return crypto_ahash_digest(&rctx->fallback_req); + } + + nbytes = crypto_hash_walk_first(req, &walk); + + cpu = get_cpu(); + qp = cpu_to_cwq[cpu]; + if (!qp) + goto out; + + spin_lock_irqsave(&qp->lock, flags); + + /* XXX can do better, improve this later by doing a by-hand scatterlist + * XXX walk, etc. + */ + ent = qp->q + qp->tail; + + ent->control = control_word_base(nbytes, auth_key_len, 0, + auth_type, digest_size, + false, true, false, false, + OPCODE_INPLACE_BIT | + OPCODE_AUTH_MAC); + ent->src_addr = __pa(walk.data); + ent->auth_key_addr = auth_key; + ent->auth_iv_addr = __pa(hash_loc); + ent->final_auth_state_addr = 0UL; + ent->enc_key_addr = 0UL; + ent->enc_iv_addr = 0UL; + ent->dest_addr = __pa(hash_loc); + + nbytes = crypto_hash_walk_done(&walk, 0); + while (nbytes > 0) { + ent = spu_queue_next(qp, ent); + + ent->control = (nbytes - 1); + ent->src_addr = __pa(walk.data); + ent->auth_key_addr = 0UL; + ent->auth_iv_addr = 0UL; + ent->final_auth_state_addr = 0UL; + ent->enc_key_addr = 0UL; + ent->enc_iv_addr = 0UL; + ent->dest_addr = 0UL; + + nbytes = crypto_hash_walk_done(&walk, 0); + } + ent->control |= CONTROL_END_OF_BLOCK; + + if (submit_and_wait_for_tail(qp, ent) != HV_EOK) + err = -EINVAL; + else + err = 0; + + spin_unlock_irqrestore(&qp->lock, flags); + + if (!err) + memcpy(req->result, hash_loc, result_size); +out: + put_cpu(); + + return err; +} + +static int n2_hash_async_digest(struct ahash_request *req) +{ + struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm); + struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); + int ds; + + ds = n2alg->digest_size; + if (unlikely(req->nbytes == 0)) { + memcpy(req->result, n2alg->hash_zero, ds); + return 0; + } + memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz); + + return n2_do_async_digest(req, n2alg->auth_type, + n2alg->hw_op_hashsz, ds, + &rctx->u, 0UL, 0); +} + +static int n2_hmac_async_digest(struct ahash_request *req) +{ + struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm); + struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm); + int ds; + + ds = n2alg->derived.digest_size; + if (unlikely(req->nbytes == 0) || + unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) { + struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); + struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); + rctx->fallback_req.base.flags = + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + rctx->fallback_req.nbytes = req->nbytes; + rctx->fallback_req.src = req->src; + rctx->fallback_req.result = req->result; + + return crypto_ahash_digest(&rctx->fallback_req); + } + memcpy(&rctx->u, n2alg->derived.hash_init, + n2alg->derived.hw_op_hashsz); + + return n2_do_async_digest(req, n2alg->derived.hmac_type, + n2alg->derived.hw_op_hashsz, ds, + &rctx->u, + __pa(&ctx->hash_key), + ctx->hash_key_len); +} + +struct n2_cipher_context { + int key_len; + int enc_type; + union { + u8 aes[AES_MAX_KEY_SIZE]; + u8 des[DES_KEY_SIZE]; + u8 des3[3 * DES_KEY_SIZE]; + u8 arc4[258]; /* S-box, X, Y */ + } key; +}; + +#define N2_CHUNK_ARR_LEN 16 + +struct n2_crypto_chunk { + struct list_head entry; + unsigned long iv_paddr : 44; + unsigned long arr_len : 20; + unsigned long dest_paddr; + unsigned long dest_final; + struct { + unsigned long src_paddr : 44; + unsigned long src_len : 20; + } arr[N2_CHUNK_ARR_LEN]; +}; + +struct n2_request_context { + struct ablkcipher_walk walk; + struct list_head chunk_list; + struct n2_crypto_chunk chunk; + u8 temp_iv[16]; +}; + +/* The SPU allows some level of flexibility for partial cipher blocks + * being specified in a descriptor. + * + * It merely requires that every descriptor's length field is at least + * as large as the cipher block size. This means that a cipher block + * can span at most 2 descriptors. However, this does not allow a + * partial block to span into the final descriptor as that would + * violate the rule (since every descriptor's length must be at lest + * the block size). So, for example, assuming an 8 byte block size: + * + * 0xe --> 0xa --> 0x8 + * + * is a valid length sequence, whereas: + * + * 0xe --> 0xb --> 0x7 + * + * is not a valid sequence. + */ + +struct n2_cipher_alg { + struct list_head entry; + u8 enc_type; + struct crypto_alg alg; +}; + +static inline struct n2_cipher_alg *n2_cipher_alg(struct crypto_tfm *tfm) +{ + struct crypto_alg *alg = tfm->__crt_alg; + + return container_of(alg, struct n2_cipher_alg, alg); +} + +struct n2_cipher_request_context { + struct ablkcipher_walk walk; +}; + +static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, + unsigned int keylen) +{ + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); + struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); + struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); + + ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK); + + switch (keylen) { + case AES_KEYSIZE_128: + ctx->enc_type |= ENC_TYPE_ALG_AES128; + break; + case AES_KEYSIZE_192: + ctx->enc_type |= ENC_TYPE_ALG_AES192; + break; + case AES_KEYSIZE_256: + ctx->enc_type |= ENC_TYPE_ALG_AES256; + break; + default: + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + ctx->key_len = keylen; + memcpy(ctx->key.aes, key, keylen); + return 0; +} + +static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, + unsigned int keylen) +{ + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); + struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); + struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); + u32 tmp[DES_EXPKEY_WORDS]; + int err; + + ctx->enc_type = n2alg->enc_type; + + if (keylen != DES_KEY_SIZE) { + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + err = des_ekey(tmp, key); + if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; + return -EINVAL; + } + + ctx->key_len = keylen; + memcpy(ctx->key.des, key, keylen); + return 0; +} + +static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, + unsigned int keylen) +{ + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); + struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); + struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); + + ctx->enc_type = n2alg->enc_type; + + if (keylen != (3 * DES_KEY_SIZE)) { + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + ctx->key_len = keylen; + memcpy(ctx->key.des3, key, keylen); + return 0; +} + +static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key, + unsigned int keylen) +{ + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); + struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); + struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); + u8 *s = ctx->key.arc4; + u8 *x = s + 256; + u8 *y = x + 1; + int i, j, k; + + ctx->enc_type = n2alg->enc_type; + + j = k = 0; + *x = 0; + *y = 0; + for (i = 0; i < 256; i++) + s[i] = i; + for (i = 0; i < 256; i++) { + u8 a = s[i]; + j = (j + key[k] + a) & 0xff; + s[i] = s[j]; + s[j] = a; + if (++k >= keylen) + k = 0; + } + + return 0; +} + +static inline int cipher_descriptor_len(int nbytes, unsigned int block_size) +{ + int this_len = nbytes; + + this_len -= (nbytes & (block_size - 1)); + return this_len > (1 << 16) ? (1 << 16) : this_len; +} + +static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp, + struct spu_queue *qp, bool encrypt) +{ + struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); + struct cwq_initial_entry *ent; + bool in_place; + int i; + + ent = spu_queue_alloc(qp, cp->arr_len); + if (!ent) { + pr_info("queue_alloc() of %d fails\n", + cp->arr_len); + return -EBUSY; + } + + in_place = (cp->dest_paddr == cp->arr[0].src_paddr); + + ent->control = control_word_base(cp->arr[0].src_len, + 0, ctx->enc_type, 0, 0, + false, true, false, encrypt, + OPCODE_ENCRYPT | + (in_place ? OPCODE_INPLACE_BIT : 0)); + ent->src_addr = cp->arr[0].src_paddr; + ent->auth_key_addr = 0UL; + ent->auth_iv_addr = 0UL; + ent->final_auth_state_addr = 0UL; + ent->enc_key_addr = __pa(&ctx->key); + ent->enc_iv_addr = cp->iv_paddr; + ent->dest_addr = (in_place ? 0UL : cp->dest_paddr); + + for (i = 1; i < cp->arr_len; i++) { + ent = spu_queue_next(qp, ent); + + ent->control = cp->arr[i].src_len - 1; + ent->src_addr = cp->arr[i].src_paddr; + ent->auth_key_addr = 0UL; + ent->auth_iv_addr = 0UL; + ent->final_auth_state_addr = 0UL; + ent->enc_key_addr = 0UL; + ent->enc_iv_addr = 0UL; + ent->dest_addr = 0UL; + } + ent->control |= CONTROL_END_OF_BLOCK; + + return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0; +} + +static int n2_compute_chunks(struct ablkcipher_request *req) +{ + struct n2_request_context *rctx = ablkcipher_request_ctx(req); + struct ablkcipher_walk *walk = &rctx->walk; + struct n2_crypto_chunk *chunk; + unsigned long dest_prev; + unsigned int tot_len; + bool prev_in_place; + int err, nbytes; + + ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes); + err = ablkcipher_walk_phys(req, walk); + if (err) + return err; + + INIT_LIST_HEAD(&rctx->chunk_list); + + chunk = &rctx->chunk; + INIT_LIST_HEAD(&chunk->entry); + + chunk->iv_paddr = 0UL; + chunk->arr_len = 0; + chunk->dest_paddr = 0UL; + + prev_in_place = false; + dest_prev = ~0UL; + tot_len = 0; + + while ((nbytes = walk->nbytes) != 0) { + unsigned long dest_paddr, src_paddr; + bool in_place; + int this_len; + + src_paddr = (page_to_phys(walk->src.page) + + walk->src.offset); + dest_paddr = (page_to_phys(walk->dst.page) + + walk->dst.offset); + in_place = (src_paddr == dest_paddr); + this_len = cipher_descriptor_len(nbytes, walk->blocksize); + + if (chunk->arr_len != 0) { + if (in_place != prev_in_place || + (!prev_in_place && + dest_paddr != dest_prev) || + chunk->arr_len == N2_CHUNK_ARR_LEN || + tot_len + this_len > (1 << 16)) { + chunk->dest_final = dest_prev; + list_add_tail(&chunk->entry, + &rctx->chunk_list); + chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC); + if (!chunk) { + err = -ENOMEM; + break; + } + INIT_LIST_HEAD(&chunk->entry); + } + } + if (chunk->arr_len == 0) { + chunk->dest_paddr = dest_paddr; + tot_len = 0; + } + chunk->arr[chunk->arr_len].src_paddr = src_paddr; + chunk->arr[chunk->arr_len].src_len = this_len; + chunk->arr_len++; + + dest_prev = dest_paddr + this_len; + prev_in_place = in_place; + tot_len += this_len; + + err = ablkcipher_walk_done(req, walk, nbytes - this_len); + if (err) + break; + } + if (!err && chunk->arr_len != 0) { + chunk->dest_final = dest_prev; + list_add_tail(&chunk->entry, &rctx->chunk_list); + } + + return err; +} + +static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv) +{ + struct n2_request_context *rctx = ablkcipher_request_ctx(req); + struct n2_crypto_chunk *c, *tmp; + + if (final_iv) + memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize); + + ablkcipher_walk_complete(&rctx->walk); + list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) { + list_del(&c->entry); + if (unlikely(c != &rctx->chunk)) + kfree(c); + } + +} + +static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt) +{ + struct n2_request_context *rctx = ablkcipher_request_ctx(req); + struct crypto_tfm *tfm = req->base.tfm; + int err = n2_compute_chunks(req); + struct n2_crypto_chunk *c, *tmp; + unsigned long flags, hv_ret; + struct spu_queue *qp; + + if (err) + return err; + + qp = cpu_to_cwq[get_cpu()]; + err = -ENODEV; + if (!qp) + goto out; + + spin_lock_irqsave(&qp->lock, flags); + + list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) { + err = __n2_crypt_chunk(tfm, c, qp, encrypt); + if (err) + break; + list_del(&c->entry); + if (unlikely(c != &rctx->chunk)) + kfree(c); + } + if (!err) { + hv_ret = wait_for_tail(qp); + if (hv_ret != HV_EOK) + err = -EINVAL; + } + + spin_unlock_irqrestore(&qp->lock, flags); + +out: + put_cpu(); + + n2_chunk_complete(req, NULL); + return err; +} + +static int n2_encrypt_ecb(struct ablkcipher_request *req) +{ + return n2_do_ecb(req, true); +} + +static int n2_decrypt_ecb(struct ablkcipher_request *req) +{ + return n2_do_ecb(req, false); +} + +static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt) +{ + struct n2_request_context *rctx = ablkcipher_request_ctx(req); + struct crypto_tfm *tfm = req->base.tfm; + unsigned long flags, hv_ret, iv_paddr; + int err = n2_compute_chunks(req); + struct n2_crypto_chunk *c, *tmp; + struct spu_queue *qp; + void *final_iv_addr; + + final_iv_addr = NULL; + + if (err) + return err; + + qp = cpu_to_cwq[get_cpu()]; + err = -ENODEV; + if (!qp) + goto out; + + spin_lock_irqsave(&qp->lock, flags); + + if (encrypt) { + iv_paddr = __pa(rctx->walk.iv); + list_for_each_entry_safe(c, tmp, &rctx->chunk_list, + entry) { + c->iv_paddr = iv_paddr; + err = __n2_crypt_chunk(tfm, c, qp, true); + if (err) + break; + iv_paddr = c->dest_final - rctx->walk.blocksize; + list_del(&c->entry); + if (unlikely(c != &rctx->chunk)) + kfree(c); + } + final_iv_addr = __va(iv_paddr); + } else { + list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list, + entry) { + if (c == &rctx->chunk) { + iv_paddr = __pa(rctx->walk.iv); + } else { + iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr + + tmp->arr[tmp->arr_len-1].src_len - + rctx->walk.blocksize); + } + if (!final_iv_addr) { + unsigned long pa; + + pa = (c->arr[c->arr_len-1].src_paddr + + c->arr[c->arr_len-1].src_len - + rctx->walk.blocksize); + final_iv_addr = rctx->temp_iv; + memcpy(rctx->temp_iv, __va(pa), + rctx->walk.blocksize); + } + c->iv_paddr = iv_paddr; + err = __n2_crypt_chunk(tfm, c, qp, false); + if (err) + break; + list_del(&c->entry); + if (unlikely(c != &rctx->chunk)) + kfree(c); + } + } + if (!err) { + hv_ret = wait_for_tail(qp); + if (hv_ret != HV_EOK) + err = -EINVAL; + } + + spin_unlock_irqrestore(&qp->lock, flags); + +out: + put_cpu(); + + n2_chunk_complete(req, err ? NULL : final_iv_addr); + return err; +} + +static int n2_encrypt_chaining(struct ablkcipher_request *req) +{ + return n2_do_chaining(req, true); +} + +static int n2_decrypt_chaining(struct ablkcipher_request *req) +{ + return n2_do_chaining(req, false); +} + +struct n2_cipher_tmpl { + const char *name; + const char *drv_name; + u8 block_size; + u8 enc_type; + struct ablkcipher_alg ablkcipher; +}; + +static const struct n2_cipher_tmpl cipher_tmpls[] = { + /* ARC4: only ECB is supported (chaining bits ignored) */ + { .name = "ecb(arc4)", + .drv_name = "ecb-arc4", + .block_size = 1, + .enc_type = (ENC_TYPE_ALG_RC4_STREAM | + ENC_TYPE_CHAINING_ECB), + .ablkcipher = { + .min_keysize = 1, + .max_keysize = 256, + .setkey = n2_arc4_setkey, + .encrypt = n2_encrypt_ecb, + .decrypt = n2_decrypt_ecb, + }, + }, + + /* DES: ECB CBC and CFB are supported */ + { .name = "ecb(des)", + .drv_name = "ecb-des", + .block_size = DES_BLOCK_SIZE, + .enc_type = (ENC_TYPE_ALG_DES | + ENC_TYPE_CHAINING_ECB), + .ablkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .setkey = n2_des_setkey, + .encrypt = n2_encrypt_ecb, + .decrypt = n2_decrypt_ecb, + }, + }, + { .name = "cbc(des)", + .drv_name = "cbc-des", + .block_size = DES_BLOCK_SIZE, + .enc_type = (ENC_TYPE_ALG_DES | + ENC_TYPE_CHAINING_CBC), + .ablkcipher = { + .ivsize = DES_BLOCK_SIZE, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .setkey = n2_des_setkey, + .encrypt = n2_encrypt_chaining, + .decrypt = n2_decrypt_chaining, + }, + }, + { .name = "cfb(des)", + .drv_name = "cfb-des", + .block_size = DES_BLOCK_SIZE, + .enc_type = (ENC_TYPE_ALG_DES | + ENC_TYPE_CHAINING_CFB), + .ablkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .setkey = n2_des_setkey, + .encrypt = n2_encrypt_chaining, + .decrypt = n2_decrypt_chaining, + }, + }, + + /* 3DES: ECB CBC and CFB are supported */ + { .name = "ecb(des3_ede)", + .drv_name = "ecb-3des", + .block_size = DES_BLOCK_SIZE, + .enc_type = (ENC_TYPE_ALG_3DES | + ENC_TYPE_CHAINING_ECB), + .ablkcipher = { + .min_keysize = 3 * DES_KEY_SIZE, + .max_keysize = 3 * DES_KEY_SIZE, + .setkey = n2_3des_setkey, + .encrypt = n2_encrypt_ecb, + .decrypt = n2_decrypt_ecb, + }, + }, + { .name = "cbc(des3_ede)", + .drv_name = "cbc-3des", + .block_size = DES_BLOCK_SIZE, + .enc_type = (ENC_TYPE_ALG_3DES | + ENC_TYPE_CHAINING_CBC), + .ablkcipher = { + .ivsize = DES_BLOCK_SIZE, + .min_keysize = 3 * DES_KEY_SIZE, + .max_keysize = 3 * DES_KEY_SIZE, + .setkey = n2_3des_setkey, + .encrypt = n2_encrypt_chaining, + .decrypt = n2_decrypt_chaining, + }, + }, + { .name = "cfb(des3_ede)", + .drv_name = "cfb-3des", + .block_size = DES_BLOCK_SIZE, + .enc_type = (ENC_TYPE_ALG_3DES | + ENC_TYPE_CHAINING_CFB), + .ablkcipher = { + .min_keysize = 3 * DES_KEY_SIZE, + .max_keysize = 3 * DES_KEY_SIZE, + .setkey = n2_3des_setkey, + .encrypt = n2_encrypt_chaining, + .decrypt = n2_decrypt_chaining, + }, + }, + /* AES: ECB CBC and CTR are supported */ + { .name = "ecb(aes)", + .drv_name = "ecb-aes", + .block_size = AES_BLOCK_SIZE, + .enc_type = (ENC_TYPE_ALG_AES128 | + ENC_TYPE_CHAINING_ECB), + .ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = n2_aes_setkey, + .encrypt = n2_encrypt_ecb, + .decrypt = n2_decrypt_ecb, + }, + }, + { .name = "cbc(aes)", + .drv_name = "cbc-aes", + .block_size = AES_BLOCK_SIZE, + .enc_type = (ENC_TYPE_ALG_AES128 | + ENC_TYPE_CHAINING_CBC), + .ablkcipher = { + .ivsize = AES_BLOCK_SIZE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = n2_aes_setkey, + .encrypt = n2_encrypt_chaining, + .decrypt = n2_decrypt_chaining, + }, + }, + { .name = "ctr(aes)", + .drv_name = "ctr-aes", + .block_size = AES_BLOCK_SIZE, + .enc_type = (ENC_TYPE_ALG_AES128 | + ENC_TYPE_CHAINING_COUNTER), + .ablkcipher = { + .ivsize = AES_BLOCK_SIZE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = n2_aes_setkey, + .encrypt = n2_encrypt_chaining, + .decrypt = n2_encrypt_chaining, + }, + }, + +}; +#define NUM_CIPHER_TMPLS ARRAY_SIZE(cipher_tmpls) + +static LIST_HEAD(cipher_algs); + +struct n2_hash_tmpl { + const char *name; + const char *hash_zero; + const u32 *hash_init; + u8 hw_op_hashsz; + u8 digest_size; + u8 block_size; + u8 auth_type; + u8 hmac_type; +}; + +static const char md5_zero[MD5_DIGEST_SIZE] = { + 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04, + 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e, +}; +static const u32 md5_init[MD5_HASH_WORDS] = { + cpu_to_le32(0x67452301), + cpu_to_le32(0xefcdab89), + cpu_to_le32(0x98badcfe), + cpu_to_le32(0x10325476), +}; +static const char sha1_zero[SHA1_DIGEST_SIZE] = { + 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32, + 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8, + 0x07, 0x09 +}; +static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = { + SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, +}; +static const char sha256_zero[SHA256_DIGEST_SIZE] = { + 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, + 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, + 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, + 0x1b, 0x78, 0x52, 0xb8, 0x55 +}; +static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = { + SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, + SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7, +}; +static const char sha224_zero[SHA224_DIGEST_SIZE] = { + 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47, + 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2, + 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4, + 0x2f +}; +static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = { + SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, + SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7, +}; + +static const struct n2_hash_tmpl hash_tmpls[] = { + { .name = "md5", + .hash_zero = md5_zero, + .hash_init = md5_init, + .auth_type = AUTH_TYPE_MD5, + .hmac_type = AUTH_TYPE_HMAC_MD5, + .hw_op_hashsz = MD5_DIGEST_SIZE, + .digest_size = MD5_DIGEST_SIZE, + .block_size = MD5_HMAC_BLOCK_SIZE }, + { .name = "sha1", + .hash_zero = sha1_zero, + .hash_init = sha1_init, + .auth_type = AUTH_TYPE_SHA1, + .hmac_type = AUTH_TYPE_HMAC_SHA1, + .hw_op_hashsz = SHA1_DIGEST_SIZE, + .digest_size = SHA1_DIGEST_SIZE, + .block_size = SHA1_BLOCK_SIZE }, + { .name = "sha256", + .hash_zero = sha256_zero, + .hash_init = sha256_init, + .auth_type = AUTH_TYPE_SHA256, + .hmac_type = AUTH_TYPE_HMAC_SHA256, + .hw_op_hashsz = SHA256_DIGEST_SIZE, + .digest_size = SHA256_DIGEST_SIZE, + .block_size = SHA256_BLOCK_SIZE }, + { .name = "sha224", + .hash_zero = sha224_zero, + .hash_init = sha224_init, + .auth_type = AUTH_TYPE_SHA256, + .hmac_type = AUTH_TYPE_RESERVED, + .hw_op_hashsz = SHA256_DIGEST_SIZE, + .digest_size = SHA224_DIGEST_SIZE, + .block_size = SHA224_BLOCK_SIZE }, +}; +#define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls) + +static LIST_HEAD(ahash_algs); +static LIST_HEAD(hmac_algs); + +static int algs_registered; + +static void __n2_unregister_algs(void) +{ + struct n2_cipher_alg *cipher, *cipher_tmp; + struct n2_ahash_alg *alg, *alg_tmp; + struct n2_hmac_alg *hmac, *hmac_tmp; + + list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) { + crypto_unregister_alg(&cipher->alg); + list_del(&cipher->entry); + kfree(cipher); + } + list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) { + crypto_unregister_ahash(&hmac->derived.alg); + list_del(&hmac->derived.entry); + kfree(hmac); + } + list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) { + crypto_unregister_ahash(&alg->alg); + list_del(&alg->entry); + kfree(alg); + } +} + +static int n2_cipher_cra_init(struct crypto_tfm *tfm) +{ + tfm->crt_ablkcipher.reqsize = sizeof(struct n2_request_context); + return 0; +} + +static int __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl) +{ + struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); + struct crypto_alg *alg; + int err; + + if (!p) + return -ENOMEM; + + alg = &p->alg; + + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name); + alg->cra_priority = N2_CRA_PRIORITY; + alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC; + alg->cra_blocksize = tmpl->block_size; + p->enc_type = tmpl->enc_type; + alg->cra_ctxsize = sizeof(struct n2_cipher_context); + alg->cra_type = &crypto_ablkcipher_type; + alg->cra_u.ablkcipher = tmpl->ablkcipher; + alg->cra_init = n2_cipher_cra_init; + alg->cra_module = THIS_MODULE; + + list_add(&p->entry, &cipher_algs); + err = crypto_register_alg(alg); + if (err) { + pr_err("%s alg registration failed\n", alg->cra_name); + list_del(&p->entry); + kfree(p); + } else { + pr_info("%s alg registered\n", alg->cra_name); + } + return err; +} + +static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash) +{ + struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); + struct ahash_alg *ahash; + struct crypto_alg *base; + int err; + + if (!p) + return -ENOMEM; + + p->child_alg = n2ahash->alg.halg.base.cra_name; + memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg)); + INIT_LIST_HEAD(&p->derived.entry); + + ahash = &p->derived.alg; + ahash->digest = n2_hmac_async_digest; + ahash->setkey = n2_hmac_async_setkey; + + base = &ahash->halg.base; + snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg); + snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2", p->child_alg); + + base->cra_ctxsize = sizeof(struct n2_hmac_ctx); + base->cra_init = n2_hmac_cra_init; + base->cra_exit = n2_hmac_cra_exit; + + list_add(&p->derived.entry, &hmac_algs); + err = crypto_register_ahash(ahash); + if (err) { + pr_err("%s alg registration failed\n", base->cra_name); + list_del(&p->derived.entry); + kfree(p); + } else { + pr_info("%s alg registered\n", base->cra_name); + } + return err; +} + +static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl) +{ + struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); + struct hash_alg_common *halg; + struct crypto_alg *base; + struct ahash_alg *ahash; + int err; + + if (!p) + return -ENOMEM; + + p->hash_zero = tmpl->hash_zero; + p->hash_init = tmpl->hash_init; + p->auth_type = tmpl->auth_type; + p->hmac_type = tmpl->hmac_type; + p->hw_op_hashsz = tmpl->hw_op_hashsz; + p->digest_size = tmpl->digest_size; + + ahash = &p->alg; + ahash->init = n2_hash_async_init; + ahash->update = n2_hash_async_update; + ahash->final = n2_hash_async_final; + ahash->finup = n2_hash_async_finup; + ahash->digest = n2_hash_async_digest; + + halg = &ahash->halg; + halg->digestsize = tmpl->digest_size; + + base = &halg->base; + snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); + snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name); + base->cra_priority = N2_CRA_PRIORITY; + base->cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK; + base->cra_blocksize = tmpl->block_size; + base->cra_ctxsize = sizeof(struct n2_hash_ctx); + base->cra_module = THIS_MODULE; + base->cra_init = n2_hash_cra_init; + base->cra_exit = n2_hash_cra_exit; + + list_add(&p->entry, &ahash_algs); + err = crypto_register_ahash(ahash); + if (err) { + pr_err("%s alg registration failed\n", base->cra_name); + list_del(&p->entry); + kfree(p); + } else { + pr_info("%s alg registered\n", base->cra_name); + } + if (!err && p->hmac_type != AUTH_TYPE_RESERVED) + err = __n2_register_one_hmac(p); + return err; +} + +static int n2_register_algs(void) +{ + int i, err = 0; + + mutex_lock(&spu_lock); + if (algs_registered++) + goto out; + + for (i = 0; i < NUM_HASH_TMPLS; i++) { + err = __n2_register_one_ahash(&hash_tmpls[i]); + if (err) { + __n2_unregister_algs(); + goto out; + } + } + for (i = 0; i < NUM_CIPHER_TMPLS; i++) { + err = __n2_register_one_cipher(&cipher_tmpls[i]); + if (err) { + __n2_unregister_algs(); + goto out; + } + } + +out: + mutex_unlock(&spu_lock); + return err; +} + +static void n2_unregister_algs(void) +{ + mutex_lock(&spu_lock); + if (!--algs_registered) + __n2_unregister_algs(); + mutex_unlock(&spu_lock); +} + +/* To map CWQ queues to interrupt sources, the hypervisor API provides + * a devino. This isn't very useful to us because all of the + * interrupts listed in the device_node have been translated to + * Linux virtual IRQ cookie numbers. + * + * So we have to back-translate, going through the 'intr' and 'ino' + * property tables of the n2cp MDESC node, matching it with the OF + * 'interrupts' property entries, in order to to figure out which + * devino goes to which already-translated IRQ. + */ +static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip, + unsigned long dev_ino) +{ + const unsigned int *dev_intrs; + unsigned int intr; + int i; + + for (i = 0; i < ip->num_intrs; i++) { + if (ip->ino_table[i].ino == dev_ino) + break; + } + if (i == ip->num_intrs) + return -ENODEV; + + intr = ip->ino_table[i].intr; + + dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL); + if (!dev_intrs) + return -ENODEV; + + for (i = 0; i < dev->archdata.num_irqs; i++) { + if (dev_intrs[i] == intr) + return i; + } + + return -ENODEV; +} + +static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip, + const char *irq_name, struct spu_queue *p, + irq_handler_t handler) +{ + unsigned long herr; + int index; + + herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino); + if (herr) + return -EINVAL; + + index = find_devino_index(dev, ip, p->devino); + if (index < 0) + return index; + + p->irq = dev->archdata.irqs[index]; + + sprintf(p->irq_name, "%s-%d", irq_name, index); + + return request_irq(p->irq, handler, 0, p->irq_name, p); +} + +static struct kmem_cache *queue_cache[2]; + +static void *new_queue(unsigned long q_type) +{ + return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL); +} + +static void free_queue(void *p, unsigned long q_type) +{ + return kmem_cache_free(queue_cache[q_type - 1], p); +} + +static int queue_cache_init(void) +{ + if (!queue_cache[HV_NCS_QTYPE_MAU - 1]) + queue_cache[HV_NCS_QTYPE_MAU - 1] = + kmem_cache_create("mau_queue", + (MAU_NUM_ENTRIES * + MAU_ENTRY_SIZE), + MAU_ENTRY_SIZE, 0, NULL); + if (!queue_cache[HV_NCS_QTYPE_MAU - 1]) + return -ENOMEM; + + if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) + queue_cache[HV_NCS_QTYPE_CWQ - 1] = + kmem_cache_create("cwq_queue", + (CWQ_NUM_ENTRIES * + CWQ_ENTRY_SIZE), + CWQ_ENTRY_SIZE, 0, NULL); + if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) { + kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); + return -ENOMEM; + } + return 0; +} + +static void queue_cache_destroy(void) +{ + kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); + kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]); +} + +static int spu_queue_register(struct spu_queue *p, unsigned long q_type) +{ + cpumask_var_t old_allowed; + unsigned long hv_ret; + + if (cpumask_empty(&p->sharing)) + return -EINVAL; + + if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL)) + return -ENOMEM; + + cpumask_copy(old_allowed, ¤t->cpus_allowed); + + set_cpus_allowed_ptr(current, &p->sharing); + + hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q), + CWQ_NUM_ENTRIES, &p->qhandle); + if (!hv_ret) + sun4v_ncs_sethead_marker(p->qhandle, 0); + + set_cpus_allowed_ptr(current, old_allowed); + + free_cpumask_var(old_allowed); + + return (hv_ret ? -EINVAL : 0); +} + +static int spu_queue_setup(struct spu_queue *p) +{ + int err; + + p->q = new_queue(p->q_type); + if (!p->q) + return -ENOMEM; + + err = spu_queue_register(p, p->q_type); + if (err) { + free_queue(p->q, p->q_type); + p->q = NULL; + } + + return err; +} + +static void spu_queue_destroy(struct spu_queue *p) +{ + unsigned long hv_ret; + + if (!p->q) + return; + + hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle); + + if (!hv_ret) + free_queue(p->q, p->q_type); +} + +static void spu_list_destroy(struct list_head *list) +{ + struct spu_queue *p, *n; + + list_for_each_entry_safe(p, n, list, list) { + int i; + + for (i = 0; i < NR_CPUS; i++) { + if (cpu_to_cwq[i] == p) + cpu_to_cwq[i] = NULL; + } + + if (p->irq) { + free_irq(p->irq, p); + p->irq = 0; + } + spu_queue_destroy(p); + list_del(&p->list); + kfree(p); + } +} + +/* Walk the backward arcs of a CWQ 'exec-unit' node, + * gathering cpu membership information. + */ +static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc, + struct platform_device *dev, + u64 node, struct spu_queue *p, + struct spu_queue **table) +{ + u64 arc; + + mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) { + u64 tgt = mdesc_arc_target(mdesc, arc); + const char *name = mdesc_node_name(mdesc, tgt); + const u64 *id; + + if (strcmp(name, "cpu")) + continue; + id = mdesc_get_property(mdesc, tgt, "id", NULL); + if (table[*id] != NULL) { + dev_err(&dev->dev, "%s: SPU cpu slot already set.\n", + dev->dev.of_node->full_name); + return -EINVAL; + } + cpumask_set_cpu(*id, &p->sharing); + table[*id] = p; + } + return 0; +} + +/* Process an 'exec-unit' MDESC node of type 'cwq'. */ +static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list, + struct platform_device *dev, struct mdesc_handle *mdesc, + u64 node, const char *iname, unsigned long q_type, + irq_handler_t handler, struct spu_queue **table) +{ + struct spu_queue *p; + int err; + + p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL); + if (!p) { + dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n", + dev->dev.of_node->full_name); + return -ENOMEM; + } + + cpumask_clear(&p->sharing); + spin_lock_init(&p->lock); + p->q_type = q_type; + INIT_LIST_HEAD(&p->jobs); + list_add(&p->list, list); + + err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table); + if (err) + return err; + + err = spu_queue_setup(p); + if (err) + return err; + + return spu_map_ino(dev, ip, iname, p, handler); +} + +static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev, + struct spu_mdesc_info *ip, struct list_head *list, + const char *exec_name, unsigned long q_type, + irq_handler_t handler, struct spu_queue **table) +{ + int err = 0; + u64 node; + + mdesc_for_each_node_by_name(mdesc, node, "exec-unit") { + const char *type; + + type = mdesc_get_property(mdesc, node, "type", NULL); + if (!type || strcmp(type, exec_name)) + continue; + + err = handle_exec_unit(ip, list, dev, mdesc, node, + exec_name, q_type, handler, table); + if (err) { + spu_list_destroy(list); + break; + } + } + + return err; +} + +static int get_irq_props(struct mdesc_handle *mdesc, u64 node, + struct spu_mdesc_info *ip) +{ + const u64 *ino; + int ino_len; + int i; + + ino = mdesc_get_property(mdesc, node, "ino", &ino_len); + if (!ino) { + printk("NO 'ino'\n"); + return -ENODEV; + } + + ip->num_intrs = ino_len / sizeof(u64); + ip->ino_table = kzalloc((sizeof(struct ino_blob) * + ip->num_intrs), + GFP_KERNEL); + if (!ip->ino_table) + return -ENOMEM; + + for (i = 0; i < ip->num_intrs; i++) { + struct ino_blob *b = &ip->ino_table[i]; + b->intr = i + 1; + b->ino = ino[i]; + } + + return 0; +} + +static int grab_mdesc_irq_props(struct mdesc_handle *mdesc, + struct platform_device *dev, + struct spu_mdesc_info *ip, + const char *node_name) +{ + const unsigned int *reg; + u64 node; + + reg = of_get_property(dev->dev.of_node, "reg", NULL); + if (!reg) + return -ENODEV; + + mdesc_for_each_node_by_name(mdesc, node, "virtual-device") { + const char *name; + const u64 *chdl; + + name = mdesc_get_property(mdesc, node, "name", NULL); + if (!name || strcmp(name, node_name)) + continue; + chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL); + if (!chdl || (*chdl != *reg)) + continue; + ip->cfg_handle = *chdl; + return get_irq_props(mdesc, node, ip); + } + + return -ENODEV; +} + +static unsigned long n2_spu_hvapi_major; +static unsigned long n2_spu_hvapi_minor; + +static int n2_spu_hvapi_register(void) +{ + int err; + + n2_spu_hvapi_major = 2; + n2_spu_hvapi_minor = 0; + + err = sun4v_hvapi_register(HV_GRP_NCS, + n2_spu_hvapi_major, + &n2_spu_hvapi_minor); + + if (!err) + pr_info("Registered NCS HVAPI version %lu.%lu\n", + n2_spu_hvapi_major, + n2_spu_hvapi_minor); + + return err; +} + +static void n2_spu_hvapi_unregister(void) +{ + sun4v_hvapi_unregister(HV_GRP_NCS); +} + +static int global_ref; + +static int grab_global_resources(void) +{ + int err = 0; + + mutex_lock(&spu_lock); + + if (global_ref++) + goto out; + + err = n2_spu_hvapi_register(); + if (err) + goto out; + + err = queue_cache_init(); + if (err) + goto out_hvapi_release; + + err = -ENOMEM; + cpu_to_cwq = kzalloc(sizeof(struct spu_queue *) * NR_CPUS, + GFP_KERNEL); + if (!cpu_to_cwq) + goto out_queue_cache_destroy; + + cpu_to_mau = kzalloc(sizeof(struct spu_queue *) * NR_CPUS, + GFP_KERNEL); + if (!cpu_to_mau) + goto out_free_cwq_table; + + err = 0; + +out: + if (err) + global_ref--; + mutex_unlock(&spu_lock); + return err; + +out_free_cwq_table: + kfree(cpu_to_cwq); + cpu_to_cwq = NULL; + +out_queue_cache_destroy: + queue_cache_destroy(); + +out_hvapi_release: + n2_spu_hvapi_unregister(); + goto out; +} + +static void release_global_resources(void) +{ + mutex_lock(&spu_lock); + if (!--global_ref) { + kfree(cpu_to_cwq); + cpu_to_cwq = NULL; + + kfree(cpu_to_mau); + cpu_to_mau = NULL; + + queue_cache_destroy(); + n2_spu_hvapi_unregister(); + } + mutex_unlock(&spu_lock); +} + +static struct n2_crypto *alloc_n2cp(void) +{ + struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL); + + if (np) + INIT_LIST_HEAD(&np->cwq_list); + + return np; +} + +static void free_n2cp(struct n2_crypto *np) +{ + if (np->cwq_info.ino_table) { + kfree(np->cwq_info.ino_table); + np->cwq_info.ino_table = NULL; + } + + kfree(np); +} + +static void n2_spu_driver_version(void) +{ + static int n2_spu_version_printed; + + if (n2_spu_version_printed++ == 0) + pr_info("%s", version); +} + +static int n2_crypto_probe(struct platform_device *dev) +{ + struct mdesc_handle *mdesc; + const char *full_name; + struct n2_crypto *np; + int err; + + n2_spu_driver_version(); + + full_name = dev->dev.of_node->full_name; + pr_info("Found N2CP at %s\n", full_name); + + np = alloc_n2cp(); + if (!np) { + dev_err(&dev->dev, "%s: Unable to allocate n2cp.\n", + full_name); + return -ENOMEM; + } + + err = grab_global_resources(); + if (err) { + dev_err(&dev->dev, "%s: Unable to grab " + "global resources.\n", full_name); + goto out_free_n2cp; + } + + mdesc = mdesc_grab(); + + if (!mdesc) { + dev_err(&dev->dev, "%s: Unable to grab MDESC.\n", + full_name); + err = -ENODEV; + goto out_free_global; + } + err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp"); + if (err) { + dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n", + full_name); + mdesc_release(mdesc); + goto out_free_global; + } + + err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list, + "cwq", HV_NCS_QTYPE_CWQ, cwq_intr, + cpu_to_cwq); + mdesc_release(mdesc); + + if (err) { + dev_err(&dev->dev, "%s: CWQ MDESC scan failed.\n", + full_name); + goto out_free_global; + } + + err = n2_register_algs(); + if (err) { + dev_err(&dev->dev, "%s: Unable to register algorithms.\n", + full_name); + goto out_free_spu_list; + } + + dev_set_drvdata(&dev->dev, np); + + return 0; + +out_free_spu_list: + spu_list_destroy(&np->cwq_list); + +out_free_global: + release_global_resources(); + +out_free_n2cp: + free_n2cp(np); + + return err; +} + +static int n2_crypto_remove(struct platform_device *dev) +{ + struct n2_crypto *np = dev_get_drvdata(&dev->dev); + + n2_unregister_algs(); + + spu_list_destroy(&np->cwq_list); + + release_global_resources(); + + free_n2cp(np); + + return 0; +} + +static struct n2_mau *alloc_ncp(void) +{ + struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL); + + if (mp) + INIT_LIST_HEAD(&mp->mau_list); + + return mp; +} + +static void free_ncp(struct n2_mau *mp) +{ + if (mp->mau_info.ino_table) { + kfree(mp->mau_info.ino_table); + mp->mau_info.ino_table = NULL; + } + + kfree(mp); +} + +static int n2_mau_probe(struct platform_device *dev) +{ + struct mdesc_handle *mdesc; + const char *full_name; + struct n2_mau *mp; + int err; + + n2_spu_driver_version(); + + full_name = dev->dev.of_node->full_name; + pr_info("Found NCP at %s\n", full_name); + + mp = alloc_ncp(); + if (!mp) { + dev_err(&dev->dev, "%s: Unable to allocate ncp.\n", + full_name); + return -ENOMEM; + } + + err = grab_global_resources(); + if (err) { + dev_err(&dev->dev, "%s: Unable to grab " + "global resources.\n", full_name); + goto out_free_ncp; + } + + mdesc = mdesc_grab(); + + if (!mdesc) { + dev_err(&dev->dev, "%s: Unable to grab MDESC.\n", + full_name); + err = -ENODEV; + goto out_free_global; + } + + err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp"); + if (err) { + dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n", + full_name); + mdesc_release(mdesc); + goto out_free_global; + } + + err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list, + "mau", HV_NCS_QTYPE_MAU, mau_intr, + cpu_to_mau); + mdesc_release(mdesc); + + if (err) { + dev_err(&dev->dev, "%s: MAU MDESC scan failed.\n", + full_name); + goto out_free_global; + } + + dev_set_drvdata(&dev->dev, mp); + + return 0; + +out_free_global: + release_global_resources(); + +out_free_ncp: + free_ncp(mp); + + return err; +} + +static int n2_mau_remove(struct platform_device *dev) +{ + struct n2_mau *mp = dev_get_drvdata(&dev->dev); + + spu_list_destroy(&mp->mau_list); + + release_global_resources(); + + free_ncp(mp); + + return 0; +} + +static struct of_device_id n2_crypto_match[] = { + { + .name = "n2cp", + .compatible = "SUNW,n2-cwq", + }, + { + .name = "n2cp", + .compatible = "SUNW,vf-cwq", + }, + { + .name = "n2cp", + .compatible = "SUNW,kt-cwq", + }, + {}, +}; + +MODULE_DEVICE_TABLE(of, n2_crypto_match); + +static struct platform_driver n2_crypto_driver = { + .driver = { + .name = "n2cp", + .of_match_table = n2_crypto_match, + }, + .probe = n2_crypto_probe, + .remove = n2_crypto_remove, +}; + +static struct of_device_id n2_mau_match[] = { + { + .name = "ncp", + .compatible = "SUNW,n2-mau", + }, + { + .name = "ncp", + .compatible = "SUNW,vf-mau", + }, + { + .name = "ncp", + .compatible = "SUNW,kt-mau", + }, + {}, +}; + +MODULE_DEVICE_TABLE(of, n2_mau_match); + +static struct platform_driver n2_mau_driver = { + .driver = { + .name = "ncp", + .of_match_table = n2_mau_match, + }, + .probe = n2_mau_probe, + .remove = n2_mau_remove, +}; + +static int __init n2_init(void) +{ + int err = platform_driver_register(&n2_crypto_driver); + + if (!err) { + err = platform_driver_register(&n2_mau_driver); + if (err) + platform_driver_unregister(&n2_crypto_driver); + } + return err; +} + +static void __exit n2_exit(void) +{ + platform_driver_unregister(&n2_mau_driver); + platform_driver_unregister(&n2_crypto_driver); +} + +module_init(n2_init); +module_exit(n2_exit); diff --git a/drivers/crypto/n2_core.h b/drivers/crypto/n2_core.h new file mode 100644 index 000000000..4bcbbeae9 --- /dev/null +++ b/drivers/crypto/n2_core.h @@ -0,0 +1,231 @@ +#ifndef _N2_CORE_H +#define _N2_CORE_H + +#ifndef __ASSEMBLY__ + +struct ino_blob { + u64 intr; + u64 ino; +}; + +struct spu_mdesc_info { + u64 cfg_handle; + struct ino_blob *ino_table; + int num_intrs; +}; + +struct n2_crypto { + struct spu_mdesc_info cwq_info; + struct list_head cwq_list; +}; + +struct n2_mau { + struct spu_mdesc_info mau_info; + struct list_head mau_list; +}; + +#define CWQ_ENTRY_SIZE 64 +#define CWQ_NUM_ENTRIES 64 + +#define MAU_ENTRY_SIZE 64 +#define MAU_NUM_ENTRIES 64 + +struct cwq_initial_entry { + u64 control; + u64 src_addr; + u64 auth_key_addr; + u64 auth_iv_addr; + u64 final_auth_state_addr; + u64 enc_key_addr; + u64 enc_iv_addr; + u64 dest_addr; +}; + +struct cwq_ext_entry { + u64 len; + u64 src_addr; + u64 resv1; + u64 resv2; + u64 resv3; + u64 resv4; + u64 resv5; + u64 resv6; +}; + +struct cwq_final_entry { + u64 control; + u64 src_addr; + u64 resv1; + u64 resv2; + u64 resv3; + u64 resv4; + u64 resv5; + u64 resv6; +}; + +#define CONTROL_LEN 0x000000000000ffffULL +#define CONTROL_LEN_SHIFT 0 +#define CONTROL_HMAC_KEY_LEN 0x0000000000ff0000ULL +#define CONTROL_HMAC_KEY_LEN_SHIFT 16 +#define CONTROL_ENC_TYPE 0x00000000ff000000ULL +#define CONTROL_ENC_TYPE_SHIFT 24 +#define ENC_TYPE_ALG_RC4_STREAM 0x00ULL +#define ENC_TYPE_ALG_RC4_NOSTREAM 0x04ULL +#define ENC_TYPE_ALG_DES 0x08ULL +#define ENC_TYPE_ALG_3DES 0x0cULL +#define ENC_TYPE_ALG_AES128 0x10ULL +#define ENC_TYPE_ALG_AES192 0x14ULL +#define ENC_TYPE_ALG_AES256 0x18ULL +#define ENC_TYPE_ALG_RESERVED 0x1cULL +#define ENC_TYPE_ALG_MASK 0x1cULL +#define ENC_TYPE_CHAINING_ECB 0x00ULL +#define ENC_TYPE_CHAINING_CBC 0x01ULL +#define ENC_TYPE_CHAINING_CFB 0x02ULL +#define ENC_TYPE_CHAINING_COUNTER 0x03ULL +#define ENC_TYPE_CHAINING_MASK 0x03ULL +#define CONTROL_AUTH_TYPE 0x0000001f00000000ULL +#define CONTROL_AUTH_TYPE_SHIFT 32 +#define AUTH_TYPE_RESERVED 0x00ULL +#define AUTH_TYPE_MD5 0x01ULL +#define AUTH_TYPE_SHA1 0x02ULL +#define AUTH_TYPE_SHA256 0x03ULL +#define AUTH_TYPE_CRC32 0x04ULL +#define AUTH_TYPE_HMAC_MD5 0x05ULL +#define AUTH_TYPE_HMAC_SHA1 0x06ULL +#define AUTH_TYPE_HMAC_SHA256 0x07ULL +#define AUTH_TYPE_TCP_CHECKSUM 0x08ULL +#define AUTH_TYPE_SSL_HMAC_MD5 0x09ULL +#define AUTH_TYPE_SSL_HMAC_SHA1 0x0aULL +#define AUTH_TYPE_SSL_HMAC_SHA256 0x0bULL +#define CONTROL_STRAND 0x000000e000000000ULL +#define CONTROL_STRAND_SHIFT 37 +#define CONTROL_HASH_LEN 0x0000ff0000000000ULL +#define CONTROL_HASH_LEN_SHIFT 40 +#define CONTROL_INTERRUPT 0x0001000000000000ULL +#define CONTROL_STORE_FINAL_AUTH_STATE 0x0002000000000000ULL +#define CONTROL_RESERVED 0x001c000000000000ULL +#define CONTROL_HV_DONE 0x0004000000000000ULL +#define CONTROL_HV_PROTOCOL_ERROR 0x0008000000000000ULL +#define CONTROL_HV_HARDWARE_ERROR 0x0010000000000000ULL +#define CONTROL_END_OF_BLOCK 0x0020000000000000ULL +#define CONTROL_START_OF_BLOCK 0x0040000000000000ULL +#define CONTROL_ENCRYPT 0x0080000000000000ULL +#define CONTROL_OPCODE 0xff00000000000000ULL +#define CONTROL_OPCODE_SHIFT 56 +#define OPCODE_INPLACE_BIT 0x80ULL +#define OPCODE_SSL_KEYBLOCK 0x10ULL +#define OPCODE_COPY 0x20ULL +#define OPCODE_ENCRYPT 0x40ULL +#define OPCODE_AUTH_MAC 0x41ULL + +#endif /* !(__ASSEMBLY__) */ + +/* NCS v2.0 hypervisor interfaces */ +#define HV_NCS_QTYPE_MAU 0x01 +#define HV_NCS_QTYPE_CWQ 0x02 + +/* ncs_qconf() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_NCS_QCONF + * ARG0: Queue type (HV_NCS_QTYPE_{MAU,CWQ}) + * ARG1: Real address of queue, or handle for unconfigure + * ARG2: Number of entries in queue, zero for unconfigure + * RET0: status + * RET1: queue handle + * + * Configure a queue in the stream processing unit. + * + * The real address given as the base must be 64-byte + * aligned. + * + * The queue size can range from a minimum of 2 to a maximum + * of 64. The queue size must be a power of two. + * + * To unconfigure a queue, specify a length of zero and place + * the queue handle into ARG1. + * + * On configure success the hypervisor will set the FIRST, HEAD, + * and TAIL registers to the address of the first entry in the + * queue. The LAST register will be set to point to the last + * entry in the queue. + */ +#define HV_FAST_NCS_QCONF 0x111 + +/* ncs_qinfo() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_NCS_QINFO + * ARG0: Queue handle + * RET0: status + * RET1: Queue type (HV_NCS_QTYPE_{MAU,CWQ}) + * RET2: Queue base address + * RET3: Number of entries + */ +#define HV_FAST_NCS_QINFO 0x112 + +/* ncs_gethead() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_NCS_GETHEAD + * ARG0: Queue handle + * RET0: status + * RET1: queue head offset + */ +#define HV_FAST_NCS_GETHEAD 0x113 + +/* ncs_gettail() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_NCS_GETTAIL + * ARG0: Queue handle + * RET0: status + * RET1: queue tail offset + */ +#define HV_FAST_NCS_GETTAIL 0x114 + +/* ncs_settail() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_NCS_SETTAIL + * ARG0: Queue handle + * ARG1: New tail offset + * RET0: status + */ +#define HV_FAST_NCS_SETTAIL 0x115 + +/* ncs_qhandle_to_devino() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_NCS_QHANDLE_TO_DEVINO + * ARG0: Queue handle + * RET0: status + * RET1: devino + */ +#define HV_FAST_NCS_QHANDLE_TO_DEVINO 0x116 + +/* ncs_sethead_marker() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_NCS_SETHEAD_MARKER + * ARG0: Queue handle + * ARG1: New head offset + * RET0: status + */ +#define HV_FAST_NCS_SETHEAD_MARKER 0x117 + +#ifndef __ASSEMBLY__ +extern unsigned long sun4v_ncs_qconf(unsigned long queue_type, + unsigned long queue_ra, + unsigned long num_entries, + unsigned long *qhandle); +extern unsigned long sun4v_ncs_qinfo(unsigned long qhandle, + unsigned long *queue_type, + unsigned long *queue_ra, + unsigned long *num_entries); +extern unsigned long sun4v_ncs_gethead(unsigned long qhandle, + unsigned long *head); +extern unsigned long sun4v_ncs_gettail(unsigned long qhandle, + unsigned long *tail); +extern unsigned long sun4v_ncs_settail(unsigned long qhandle, + unsigned long tail); +extern unsigned long sun4v_ncs_qhandle_to_devino(unsigned long qhandle, + unsigned long *devino); +extern unsigned long sun4v_ncs_sethead_marker(unsigned long qhandle, + unsigned long head); +#endif /* !(__ASSEMBLY__) */ + +#endif /* _N2_CORE_H */ diff --git a/drivers/crypto/nx/Kconfig b/drivers/crypto/nx/Kconfig new file mode 100644 index 000000000..f82616621 --- /dev/null +++ b/drivers/crypto/nx/Kconfig @@ -0,0 +1,26 @@ +config CRYPTO_DEV_NX_ENCRYPT + tristate "Encryption acceleration support" + depends on PPC64 && IBMVIO + default y + select CRYPTO_AES + select CRYPTO_CBC + select CRYPTO_ECB + select CRYPTO_CCM + select CRYPTO_GCM + select CRYPTO_AUTHENC + select CRYPTO_XCBC + select CRYPTO_SHA256 + select CRYPTO_SHA512 + help + Support for Power7+ in-Nest encryption acceleration. This + module supports acceleration for AES and SHA2 algorithms. If you + choose 'M' here, this module will be called nx_crypto. + +config CRYPTO_DEV_NX_COMPRESS + tristate "Compression acceleration support" + depends on PPC64 && IBMVIO + default y + help + Support for Power7+ in-Nest compression acceleration. This + module supports acceleration for AES and SHA2 algorithms. If you + choose 'M' here, this module will be called nx_compress. diff --git a/drivers/crypto/nx/Makefile b/drivers/crypto/nx/Makefile new file mode 100644 index 000000000..bb770ea45 --- /dev/null +++ b/drivers/crypto/nx/Makefile @@ -0,0 +1,14 @@ +obj-$(CONFIG_CRYPTO_DEV_NX_ENCRYPT) += nx-crypto.o +nx-crypto-objs := nx.o \ + nx_debugfs.o \ + nx-aes-cbc.o \ + nx-aes-ecb.o \ + nx-aes-gcm.o \ + nx-aes-ccm.o \ + nx-aes-ctr.o \ + nx-aes-xcbc.o \ + nx-sha256.o \ + nx-sha512.o + +obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS) += nx-compress.o +nx-compress-objs := nx-842.o diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c new file mode 100644 index 000000000..887196e9b --- /dev/null +++ b/drivers/crypto/nx/nx-842.c @@ -0,0 +1,1603 @@ +/* + * Driver for IBM Power 842 compression accelerator + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Copyright (C) IBM Corporation, 2012 + * + * Authors: Robert Jennings <rcj@linux.vnet.ibm.com> + * Seth Jennings <sjenning@linux.vnet.ibm.com> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/nx842.h> +#include <linux/of.h> +#include <linux/slab.h> + +#include <asm/page.h> +#include <asm/vio.h> + +#include "nx_csbcpb.h" /* struct nx_csbcpb */ + +#define MODULE_NAME "nx-compress" +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Robert Jennings <rcj@linux.vnet.ibm.com>"); +MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors"); + +#define SHIFT_4K 12 +#define SHIFT_64K 16 +#define SIZE_4K (1UL << SHIFT_4K) +#define SIZE_64K (1UL << SHIFT_64K) + +/* IO buffer must be 128 byte aligned */ +#define IO_BUFFER_ALIGN 128 + +struct nx842_header { + int blocks_nr; /* number of compressed blocks */ + int offset; /* offset of the first block (from beginning of header) */ + int sizes[0]; /* size of compressed blocks */ +}; + +static inline int nx842_header_size(const struct nx842_header *hdr) +{ + return sizeof(struct nx842_header) + + hdr->blocks_nr * sizeof(hdr->sizes[0]); +} + +/* Macros for fields within nx_csbcpb */ +/* Check the valid bit within the csbcpb valid field */ +#define NX842_CSBCBP_VALID_CHK(x) (x & BIT_MASK(7)) + +/* CE macros operate on the completion_extension field bits in the csbcpb. + * CE0 0=full completion, 1=partial completion + * CE1 0=CE0 indicates completion, 1=termination (output may be modified) + * CE2 0=processed_bytes is source bytes, 1=processed_bytes is target bytes */ +#define NX842_CSBCPB_CE0(x) (x & BIT_MASK(7)) +#define NX842_CSBCPB_CE1(x) (x & BIT_MASK(6)) +#define NX842_CSBCPB_CE2(x) (x & BIT_MASK(5)) + +/* The NX unit accepts data only on 4K page boundaries */ +#define NX842_HW_PAGE_SHIFT SHIFT_4K +#define NX842_HW_PAGE_SIZE (ASM_CONST(1) << NX842_HW_PAGE_SHIFT) +#define NX842_HW_PAGE_MASK (~(NX842_HW_PAGE_SIZE-1)) + +enum nx842_status { + UNAVAILABLE, + AVAILABLE +}; + +struct ibm_nx842_counters { + atomic64_t comp_complete; + atomic64_t comp_failed; + atomic64_t decomp_complete; + atomic64_t decomp_failed; + atomic64_t swdecomp; + atomic64_t comp_times[32]; + atomic64_t decomp_times[32]; +}; + +static struct nx842_devdata { + struct vio_dev *vdev; + struct device *dev; + struct ibm_nx842_counters *counters; + unsigned int max_sg_len; + unsigned int max_sync_size; + unsigned int max_sync_sg; + enum nx842_status status; +} __rcu *devdata; +static DEFINE_SPINLOCK(devdata_mutex); + +#define NX842_COUNTER_INC(_x) \ +static inline void nx842_inc_##_x( \ + const struct nx842_devdata *dev) { \ + if (dev) \ + atomic64_inc(&dev->counters->_x); \ +} +NX842_COUNTER_INC(comp_complete); +NX842_COUNTER_INC(comp_failed); +NX842_COUNTER_INC(decomp_complete); +NX842_COUNTER_INC(decomp_failed); +NX842_COUNTER_INC(swdecomp); + +#define NX842_HIST_SLOTS 16 + +static void ibm_nx842_incr_hist(atomic64_t *times, unsigned int time) +{ + int bucket = fls(time); + + if (bucket) + bucket = min((NX842_HIST_SLOTS - 1), bucket - 1); + + atomic64_inc(×[bucket]); +} + +/* NX unit operation flags */ +#define NX842_OP_COMPRESS 0x0 +#define NX842_OP_CRC 0x1 +#define NX842_OP_DECOMPRESS 0x2 +#define NX842_OP_COMPRESS_CRC (NX842_OP_COMPRESS | NX842_OP_CRC) +#define NX842_OP_DECOMPRESS_CRC (NX842_OP_DECOMPRESS | NX842_OP_CRC) +#define NX842_OP_ASYNC (1<<23) +#define NX842_OP_NOTIFY (1<<22) +#define NX842_OP_NOTIFY_INT(x) ((x & 0xff)<<8) + +static unsigned long nx842_get_desired_dma(struct vio_dev *viodev) +{ + /* No use of DMA mappings within the driver. */ + return 0; +} + +struct nx842_slentry { + unsigned long ptr; /* Real address (use __pa()) */ + unsigned long len; +}; + +/* pHyp scatterlist entry */ +struct nx842_scatterlist { + int entry_nr; /* number of slentries */ + struct nx842_slentry *entries; /* ptr to array of slentries */ +}; + +/* Does not include sizeof(entry_nr) in the size */ +static inline unsigned long nx842_get_scatterlist_size( + struct nx842_scatterlist *sl) +{ + return sl->entry_nr * sizeof(struct nx842_slentry); +} + +static inline unsigned long nx842_get_pa(void *addr) +{ + if (is_vmalloc_addr(addr)) + return page_to_phys(vmalloc_to_page(addr)) + + offset_in_page(addr); + else + return __pa(addr); +} + +static int nx842_build_scatterlist(unsigned long buf, int len, + struct nx842_scatterlist *sl) +{ + unsigned long nextpage; + struct nx842_slentry *entry; + + sl->entry_nr = 0; + + entry = sl->entries; + while (len) { + entry->ptr = nx842_get_pa((void *)buf); + nextpage = ALIGN(buf + 1, NX842_HW_PAGE_SIZE); + if (nextpage < buf + len) { + /* we aren't at the end yet */ + if (IS_ALIGNED(buf, NX842_HW_PAGE_SIZE)) + /* we are in the middle (or beginning) */ + entry->len = NX842_HW_PAGE_SIZE; + else + /* we are at the beginning */ + entry->len = nextpage - buf; + } else { + /* at the end */ + entry->len = len; + } + + len -= entry->len; + buf += entry->len; + sl->entry_nr++; + entry++; + } + + return 0; +} + +/* + * Working memory for software decompression + */ +struct sw842_fifo { + union { + char f8[256][8]; + char f4[512][4]; + }; + char f2[256][2]; + unsigned char f84_full; + unsigned char f2_full; + unsigned char f8_count; + unsigned char f2_count; + unsigned int f4_count; +}; + +/* + * Working memory for crypto API + */ +struct nx842_workmem { + char bounce[PAGE_SIZE]; /* bounce buffer for decompression input */ + union { + /* hardware working memory */ + struct { + /* scatterlist */ + char slin[SIZE_4K]; + char slout[SIZE_4K]; + /* coprocessor status/parameter block */ + struct nx_csbcpb csbcpb; + }; + /* software working memory */ + struct sw842_fifo swfifo; /* software decompression fifo */ + }; +}; + +int nx842_get_workmem_size(void) +{ + return sizeof(struct nx842_workmem) + NX842_HW_PAGE_SIZE; +} +EXPORT_SYMBOL_GPL(nx842_get_workmem_size); + +int nx842_get_workmem_size_aligned(void) +{ + return sizeof(struct nx842_workmem); +} +EXPORT_SYMBOL_GPL(nx842_get_workmem_size_aligned); + +static int nx842_validate_result(struct device *dev, + struct cop_status_block *csb) +{ + /* The csb must be valid after returning from vio_h_cop_sync */ + if (!NX842_CSBCBP_VALID_CHK(csb->valid)) { + dev_err(dev, "%s: cspcbp not valid upon completion.\n", + __func__); + dev_dbg(dev, "valid:0x%02x cs:0x%02x cc:0x%02x ce:0x%02x\n", + csb->valid, + csb->crb_seq_number, + csb->completion_code, + csb->completion_extension); + dev_dbg(dev, "processed_bytes:%d address:0x%016lx\n", + csb->processed_byte_count, + (unsigned long)csb->address); + return -EIO; + } + + /* Check return values from the hardware in the CSB */ + switch (csb->completion_code) { + case 0: /* Completed without error */ + break; + case 64: /* Target bytes > Source bytes during compression */ + case 13: /* Output buffer too small */ + dev_dbg(dev, "%s: Compression output larger than input\n", + __func__); + return -ENOSPC; + case 66: /* Input data contains an illegal template field */ + case 67: /* Template indicates data past the end of the input stream */ + dev_dbg(dev, "%s: Bad data for decompression (code:%d)\n", + __func__, csb->completion_code); + return -EINVAL; + default: + dev_dbg(dev, "%s: Unspecified error (code:%d)\n", + __func__, csb->completion_code); + return -EIO; + } + + /* Hardware sanity check */ + if (!NX842_CSBCPB_CE2(csb->completion_extension)) { + dev_err(dev, "%s: No error returned by hardware, but " + "data returned is unusable, contact support.\n" + "(Additional info: csbcbp->processed bytes " + "does not specify processed bytes for the " + "target buffer.)\n", __func__); + return -EIO; + } + + return 0; +} + +/** + * nx842_compress - Compress data using the 842 algorithm + * + * Compression provide by the NX842 coprocessor on IBM Power systems. + * The input buffer is compressed and the result is stored in the + * provided output buffer. + * + * Upon return from this function @outlen contains the length of the + * compressed data. If there is an error then @outlen will be 0 and an + * error will be specified by the return code from this function. + * + * @in: Pointer to input buffer, must be page aligned + * @inlen: Length of input buffer, must be PAGE_SIZE + * @out: Pointer to output buffer + * @outlen: Length of output buffer + * @wrkmem: ptr to buffer for working memory, size determined by + * nx842_get_workmem_size() + * + * Returns: + * 0 Success, output of length @outlen stored in the buffer at @out + * -ENOMEM Unable to allocate internal buffers + * -ENOSPC Output buffer is to small + * -EMSGSIZE XXX Difficult to describe this limitation + * -EIO Internal error + * -ENODEV Hardware unavailable + */ +int nx842_compress(const unsigned char *in, unsigned int inlen, + unsigned char *out, unsigned int *outlen, void *wmem) +{ + struct nx842_header *hdr; + struct nx842_devdata *local_devdata; + struct device *dev = NULL; + struct nx842_workmem *workmem; + struct nx842_scatterlist slin, slout; + struct nx_csbcpb *csbcpb; + int ret = 0, max_sync_size, i, bytesleft, size, hdrsize; + unsigned long inbuf, outbuf, padding; + struct vio_pfo_op op = { + .done = NULL, + .handle = 0, + .timeout = 0, + }; + unsigned long start_time = get_tb(); + + /* + * Make sure input buffer is 64k page aligned. This is assumed since + * this driver is designed for page compression only (for now). This + * is very nice since we can now use direct DDE(s) for the input and + * the alignment is guaranteed. + */ + inbuf = (unsigned long)in; + if (!IS_ALIGNED(inbuf, PAGE_SIZE) || inlen != PAGE_SIZE) + return -EINVAL; + + rcu_read_lock(); + local_devdata = rcu_dereference(devdata); + if (!local_devdata || !local_devdata->dev) { + rcu_read_unlock(); + return -ENODEV; + } + max_sync_size = local_devdata->max_sync_size; + dev = local_devdata->dev; + + /* Create the header */ + hdr = (struct nx842_header *)out; + hdr->blocks_nr = PAGE_SIZE / max_sync_size; + hdrsize = nx842_header_size(hdr); + outbuf = (unsigned long)out + hdrsize; + bytesleft = *outlen - hdrsize; + + /* Init scatterlist */ + workmem = (struct nx842_workmem *)ALIGN((unsigned long)wmem, + NX842_HW_PAGE_SIZE); + slin.entries = (struct nx842_slentry *)workmem->slin; + slout.entries = (struct nx842_slentry *)workmem->slout; + + /* Init operation */ + op.flags = NX842_OP_COMPRESS; + csbcpb = &workmem->csbcpb; + memset(csbcpb, 0, sizeof(*csbcpb)); + op.csbcpb = nx842_get_pa(csbcpb); + op.out = nx842_get_pa(slout.entries); + + for (i = 0; i < hdr->blocks_nr; i++) { + /* + * Aligning the output blocks to 128 bytes does waste space, + * but it prevents the need for bounce buffers and memory + * copies. It also simplifies the code a lot. In the worst + * case (64k page, 4k max_sync_size), you lose up to + * (128*16)/64k = ~3% the compression factor. For 64k + * max_sync_size, the loss would be at most 128/64k = ~0.2%. + */ + padding = ALIGN(outbuf, IO_BUFFER_ALIGN) - outbuf; + outbuf += padding; + bytesleft -= padding; + if (i == 0) + /* save offset into first block in header */ + hdr->offset = padding + hdrsize; + + if (bytesleft <= 0) { + ret = -ENOSPC; + goto unlock; + } + + /* + * NOTE: If the default max_sync_size is changed from 4k + * to 64k, remove the "likely" case below, since a + * scatterlist will always be needed. + */ + if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) { + /* Create direct DDE */ + op.in = nx842_get_pa((void *)inbuf); + op.inlen = max_sync_size; + + } else { + /* Create indirect DDE (scatterlist) */ + nx842_build_scatterlist(inbuf, max_sync_size, &slin); + op.in = nx842_get_pa(slin.entries); + op.inlen = -nx842_get_scatterlist_size(&slin); + } + + /* + * If max_sync_size != NX842_HW_PAGE_SIZE, an indirect + * DDE is required for the outbuf. + * If max_sync_size == NX842_HW_PAGE_SIZE, outbuf must + * also be page aligned (1 in 128/4k=32 chance) in order + * to use a direct DDE. + * This is unlikely, just use an indirect DDE always. + */ + nx842_build_scatterlist(outbuf, + min(bytesleft, max_sync_size), &slout); + /* op.out set before loop */ + op.outlen = -nx842_get_scatterlist_size(&slout); + + /* Send request to pHyp */ + ret = vio_h_cop_sync(local_devdata->vdev, &op); + + /* Check for pHyp error */ + if (ret) { + dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n", + __func__, ret, op.hcall_err); + ret = -EIO; + goto unlock; + } + + /* Check for hardware error */ + ret = nx842_validate_result(dev, &csbcpb->csb); + if (ret && ret != -ENOSPC) + goto unlock; + + /* Handle incompressible data */ + if (unlikely(ret == -ENOSPC)) { + if (bytesleft < max_sync_size) { + /* + * Not enough space left in the output buffer + * to store uncompressed block + */ + goto unlock; + } else { + /* Store incompressible block */ + memcpy((void *)outbuf, (void *)inbuf, + max_sync_size); + hdr->sizes[i] = -max_sync_size; + outbuf += max_sync_size; + bytesleft -= max_sync_size; + /* Reset ret, incompressible data handled */ + ret = 0; + } + } else { + /* Normal case, compression was successful */ + size = csbcpb->csb.processed_byte_count; + dev_dbg(dev, "%s: processed_bytes=%d\n", + __func__, size); + hdr->sizes[i] = size; + outbuf += size; + bytesleft -= size; + } + + inbuf += max_sync_size; + } + + *outlen = (unsigned int)(outbuf - (unsigned long)out); + +unlock: + if (ret) + nx842_inc_comp_failed(local_devdata); + else { + nx842_inc_comp_complete(local_devdata); + ibm_nx842_incr_hist(local_devdata->counters->comp_times, + (get_tb() - start_time) / tb_ticks_per_usec); + } + rcu_read_unlock(); + return ret; +} +EXPORT_SYMBOL_GPL(nx842_compress); + +static int sw842_decompress(const unsigned char *, int, unsigned char *, int *, + const void *); + +/** + * nx842_decompress - Decompress data using the 842 algorithm + * + * Decompression provide by the NX842 coprocessor on IBM Power systems. + * The input buffer is decompressed and the result is stored in the + * provided output buffer. The size allocated to the output buffer is + * provided by the caller of this function in @outlen. Upon return from + * this function @outlen contains the length of the decompressed data. + * If there is an error then @outlen will be 0 and an error will be + * specified by the return code from this function. + * + * @in: Pointer to input buffer, will use bounce buffer if not 128 byte + * aligned + * @inlen: Length of input buffer + * @out: Pointer to output buffer, must be page aligned + * @outlen: Length of output buffer, must be PAGE_SIZE + * @wrkmem: ptr to buffer for working memory, size determined by + * nx842_get_workmem_size() + * + * Returns: + * 0 Success, output of length @outlen stored in the buffer at @out + * -ENODEV Hardware decompression device is unavailable + * -ENOMEM Unable to allocate internal buffers + * -ENOSPC Output buffer is to small + * -EINVAL Bad input data encountered when attempting decompress + * -EIO Internal error + */ +int nx842_decompress(const unsigned char *in, unsigned int inlen, + unsigned char *out, unsigned int *outlen, void *wmem) +{ + struct nx842_header *hdr; + struct nx842_devdata *local_devdata; + struct device *dev = NULL; + struct nx842_workmem *workmem; + struct nx842_scatterlist slin, slout; + struct nx_csbcpb *csbcpb; + int ret = 0, i, size, max_sync_size; + unsigned long inbuf, outbuf; + struct vio_pfo_op op = { + .done = NULL, + .handle = 0, + .timeout = 0, + }; + unsigned long start_time = get_tb(); + + /* Ensure page alignment and size */ + outbuf = (unsigned long)out; + if (!IS_ALIGNED(outbuf, PAGE_SIZE) || *outlen != PAGE_SIZE) + return -EINVAL; + + rcu_read_lock(); + local_devdata = rcu_dereference(devdata); + if (local_devdata) + dev = local_devdata->dev; + + /* Get header */ + hdr = (struct nx842_header *)in; + + workmem = (struct nx842_workmem *)ALIGN((unsigned long)wmem, + NX842_HW_PAGE_SIZE); + + inbuf = (unsigned long)in + hdr->offset; + if (likely(!IS_ALIGNED(inbuf, IO_BUFFER_ALIGN))) { + /* Copy block(s) into bounce buffer for alignment */ + memcpy(workmem->bounce, in + hdr->offset, inlen - hdr->offset); + inbuf = (unsigned long)workmem->bounce; + } + + /* Init scatterlist */ + slin.entries = (struct nx842_slentry *)workmem->slin; + slout.entries = (struct nx842_slentry *)workmem->slout; + + /* Init operation */ + op.flags = NX842_OP_DECOMPRESS; + csbcpb = &workmem->csbcpb; + memset(csbcpb, 0, sizeof(*csbcpb)); + op.csbcpb = nx842_get_pa(csbcpb); + + /* + * max_sync_size may have changed since compression, + * so we can't read it from the device info. We need + * to derive it from hdr->blocks_nr. + */ + max_sync_size = PAGE_SIZE / hdr->blocks_nr; + + for (i = 0; i < hdr->blocks_nr; i++) { + /* Skip padding */ + inbuf = ALIGN(inbuf, IO_BUFFER_ALIGN); + + if (hdr->sizes[i] < 0) { + /* Negative sizes indicate uncompressed data blocks */ + size = abs(hdr->sizes[i]); + memcpy((void *)outbuf, (void *)inbuf, size); + outbuf += size; + inbuf += size; + continue; + } + + if (!dev) + goto sw; + + /* + * The better the compression, the more likely the "likely" + * case becomes. + */ + if (likely((inbuf & NX842_HW_PAGE_MASK) == + ((inbuf + hdr->sizes[i] - 1) & NX842_HW_PAGE_MASK))) { + /* Create direct DDE */ + op.in = nx842_get_pa((void *)inbuf); + op.inlen = hdr->sizes[i]; + } else { + /* Create indirect DDE (scatterlist) */ + nx842_build_scatterlist(inbuf, hdr->sizes[i] , &slin); + op.in = nx842_get_pa(slin.entries); + op.inlen = -nx842_get_scatterlist_size(&slin); + } + + /* + * NOTE: If the default max_sync_size is changed from 4k + * to 64k, remove the "likely" case below, since a + * scatterlist will always be needed. + */ + if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) { + /* Create direct DDE */ + op.out = nx842_get_pa((void *)outbuf); + op.outlen = max_sync_size; + } else { + /* Create indirect DDE (scatterlist) */ + nx842_build_scatterlist(outbuf, max_sync_size, &slout); + op.out = nx842_get_pa(slout.entries); + op.outlen = -nx842_get_scatterlist_size(&slout); + } + + /* Send request to pHyp */ + ret = vio_h_cop_sync(local_devdata->vdev, &op); + + /* Check for pHyp error */ + if (ret) { + dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n", + __func__, ret, op.hcall_err); + dev = NULL; + goto sw; + } + + /* Check for hardware error */ + ret = nx842_validate_result(dev, &csbcpb->csb); + if (ret) { + dev = NULL; + goto sw; + } + + /* HW decompression success */ + inbuf += hdr->sizes[i]; + outbuf += csbcpb->csb.processed_byte_count; + continue; + +sw: + /* software decompression */ + size = max_sync_size; + ret = sw842_decompress( + (unsigned char *)inbuf, hdr->sizes[i], + (unsigned char *)outbuf, &size, wmem); + if (ret) + pr_debug("%s: sw842_decompress failed with %d\n", + __func__, ret); + + if (ret) { + if (ret != -ENOSPC && ret != -EINVAL && + ret != -EMSGSIZE) + ret = -EIO; + goto unlock; + } + + /* SW decompression success */ + inbuf += hdr->sizes[i]; + outbuf += size; + } + + *outlen = (unsigned int)(outbuf - (unsigned long)out); + +unlock: + if (ret) + /* decompress fail */ + nx842_inc_decomp_failed(local_devdata); + else { + if (!dev) + /* software decompress */ + nx842_inc_swdecomp(local_devdata); + nx842_inc_decomp_complete(local_devdata); + ibm_nx842_incr_hist(local_devdata->counters->decomp_times, + (get_tb() - start_time) / tb_ticks_per_usec); + } + + rcu_read_unlock(); + return ret; +} +EXPORT_SYMBOL_GPL(nx842_decompress); + +/** + * nx842_OF_set_defaults -- Set default (disabled) values for devdata + * + * @devdata - struct nx842_devdata to update + * + * Returns: + * 0 on success + * -ENOENT if @devdata ptr is NULL + */ +static int nx842_OF_set_defaults(struct nx842_devdata *devdata) +{ + if (devdata) { + devdata->max_sync_size = 0; + devdata->max_sync_sg = 0; + devdata->max_sg_len = 0; + devdata->status = UNAVAILABLE; + return 0; + } else + return -ENOENT; +} + +/** + * nx842_OF_upd_status -- Update the device info from OF status prop + * + * The status property indicates if the accelerator is enabled. If the + * device is in the OF tree it indicates that the hardware is present. + * The status field indicates if the device is enabled when the status + * is 'okay'. Otherwise the device driver will be disabled. + * + * @devdata - struct nx842_devdata to update + * @prop - struct property point containing the maxsyncop for the update + * + * Returns: + * 0 - Device is available + * -EINVAL - Device is not available + */ +static int nx842_OF_upd_status(struct nx842_devdata *devdata, + struct property *prop) { + int ret = 0; + const char *status = (const char *)prop->value; + + if (!strncmp(status, "okay", (size_t)prop->length)) { + devdata->status = AVAILABLE; + } else { + dev_info(devdata->dev, "%s: status '%s' is not 'okay'\n", + __func__, status); + devdata->status = UNAVAILABLE; + } + + return ret; +} + +/** + * nx842_OF_upd_maxsglen -- Update the device info from OF maxsglen prop + * + * Definition of the 'ibm,max-sg-len' OF property: + * This field indicates the maximum byte length of a scatter list + * for the platform facility. It is a single cell encoded as with encode-int. + * + * Example: + * # od -x ibm,max-sg-len + * 0000000 0000 0ff0 + * + * In this example, the maximum byte length of a scatter list is + * 0x0ff0 (4,080). + * + * @devdata - struct nx842_devdata to update + * @prop - struct property point containing the maxsyncop for the update + * + * Returns: + * 0 on success + * -EINVAL on failure + */ +static int nx842_OF_upd_maxsglen(struct nx842_devdata *devdata, + struct property *prop) { + int ret = 0; + const int *maxsglen = prop->value; + + if (prop->length != sizeof(*maxsglen)) { + dev_err(devdata->dev, "%s: unexpected format for ibm,max-sg-len property\n", __func__); + dev_dbg(devdata->dev, "%s: ibm,max-sg-len is %d bytes long, expected %lu bytes\n", __func__, + prop->length, sizeof(*maxsglen)); + ret = -EINVAL; + } else { + devdata->max_sg_len = (unsigned int)min(*maxsglen, + (int)NX842_HW_PAGE_SIZE); + } + + return ret; +} + +/** + * nx842_OF_upd_maxsyncop -- Update the device info from OF maxsyncop prop + * + * Definition of the 'ibm,max-sync-cop' OF property: + * Two series of cells. The first series of cells represents the maximums + * that can be synchronously compressed. The second series of cells + * represents the maximums that can be synchronously decompressed. + * 1. The first cell in each series contains the count of the number of + * data length, scatter list elements pairs that follow – each being + * of the form + * a. One cell data byte length + * b. One cell total number of scatter list elements + * + * Example: + * # od -x ibm,max-sync-cop + * 0000000 0000 0001 0000 1000 0000 01fe 0000 0001 + * 0000020 0000 1000 0000 01fe + * + * In this example, compression supports 0x1000 (4,096) data byte length + * and 0x1fe (510) total scatter list elements. Decompression supports + * 0x1000 (4,096) data byte length and 0x1f3 (510) total scatter list + * elements. + * + * @devdata - struct nx842_devdata to update + * @prop - struct property point containing the maxsyncop for the update + * + * Returns: + * 0 on success + * -EINVAL on failure + */ +static int nx842_OF_upd_maxsyncop(struct nx842_devdata *devdata, + struct property *prop) { + int ret = 0; + const struct maxsynccop_t { + int comp_elements; + int comp_data_limit; + int comp_sg_limit; + int decomp_elements; + int decomp_data_limit; + int decomp_sg_limit; + } *maxsynccop; + + if (prop->length != sizeof(*maxsynccop)) { + dev_err(devdata->dev, "%s: unexpected format for ibm,max-sync-cop property\n", __func__); + dev_dbg(devdata->dev, "%s: ibm,max-sync-cop is %d bytes long, expected %lu bytes\n", __func__, prop->length, + sizeof(*maxsynccop)); + ret = -EINVAL; + goto out; + } + + maxsynccop = (const struct maxsynccop_t *)prop->value; + + /* Use one limit rather than separate limits for compression and + * decompression. Set a maximum for this so as not to exceed the + * size that the header can support and round the value down to + * the hardware page size (4K) */ + devdata->max_sync_size = + (unsigned int)min(maxsynccop->comp_data_limit, + maxsynccop->decomp_data_limit); + + devdata->max_sync_size = min_t(unsigned int, devdata->max_sync_size, + SIZE_64K); + + if (devdata->max_sync_size < SIZE_4K) { + dev_err(devdata->dev, "%s: hardware max data size (%u) is " + "less than the driver minimum, unable to use " + "the hardware device\n", + __func__, devdata->max_sync_size); + ret = -EINVAL; + goto out; + } + + devdata->max_sync_sg = (unsigned int)min(maxsynccop->comp_sg_limit, + maxsynccop->decomp_sg_limit); + if (devdata->max_sync_sg < 1) { + dev_err(devdata->dev, "%s: hardware max sg size (%u) is " + "less than the driver minimum, unable to use " + "the hardware device\n", + __func__, devdata->max_sync_sg); + ret = -EINVAL; + goto out; + } + +out: + return ret; +} + +/** + * + * nx842_OF_upd -- Handle OF properties updates for the device. + * + * Set all properties from the OF tree. Optionally, a new property + * can be provided by the @new_prop pointer to overwrite an existing value. + * The device will remain disabled until all values are valid, this function + * will return an error for updates unless all values are valid. + * + * @new_prop: If not NULL, this property is being updated. If NULL, update + * all properties from the current values in the OF tree. + * + * Returns: + * 0 - Success + * -ENOMEM - Could not allocate memory for new devdata structure + * -EINVAL - property value not found, new_prop is not a recognized + * property for the device or property value is not valid. + * -ENODEV - Device is not available + */ +static int nx842_OF_upd(struct property *new_prop) +{ + struct nx842_devdata *old_devdata = NULL; + struct nx842_devdata *new_devdata = NULL; + struct device_node *of_node = NULL; + struct property *status = NULL; + struct property *maxsglen = NULL; + struct property *maxsyncop = NULL; + int ret = 0; + unsigned long flags; + + spin_lock_irqsave(&devdata_mutex, flags); + old_devdata = rcu_dereference_check(devdata, + lockdep_is_held(&devdata_mutex)); + if (old_devdata) + of_node = old_devdata->dev->of_node; + + if (!old_devdata || !of_node) { + pr_err("%s: device is not available\n", __func__); + spin_unlock_irqrestore(&devdata_mutex, flags); + return -ENODEV; + } + + new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS); + if (!new_devdata) { + dev_err(old_devdata->dev, "%s: Could not allocate memory for device data\n", __func__); + ret = -ENOMEM; + goto error_out; + } + + memcpy(new_devdata, old_devdata, sizeof(*old_devdata)); + new_devdata->counters = old_devdata->counters; + + /* Set ptrs for existing properties */ + status = of_find_property(of_node, "status", NULL); + maxsglen = of_find_property(of_node, "ibm,max-sg-len", NULL); + maxsyncop = of_find_property(of_node, "ibm,max-sync-cop", NULL); + if (!status || !maxsglen || !maxsyncop) { + dev_err(old_devdata->dev, "%s: Could not locate device properties\n", __func__); + ret = -EINVAL; + goto error_out; + } + + /* + * If this is a property update, there are only certain properties that + * we care about. Bail if it isn't in the below list + */ + if (new_prop && (strncmp(new_prop->name, "status", new_prop->length) || + strncmp(new_prop->name, "ibm,max-sg-len", new_prop->length) || + strncmp(new_prop->name, "ibm,max-sync-cop", new_prop->length))) + goto out; + + /* Perform property updates */ + ret = nx842_OF_upd_status(new_devdata, status); + if (ret) + goto error_out; + + ret = nx842_OF_upd_maxsglen(new_devdata, maxsglen); + if (ret) + goto error_out; + + ret = nx842_OF_upd_maxsyncop(new_devdata, maxsyncop); + if (ret) + goto error_out; + +out: + dev_info(old_devdata->dev, "%s: max_sync_size new:%u old:%u\n", + __func__, new_devdata->max_sync_size, + old_devdata->max_sync_size); + dev_info(old_devdata->dev, "%s: max_sync_sg new:%u old:%u\n", + __func__, new_devdata->max_sync_sg, + old_devdata->max_sync_sg); + dev_info(old_devdata->dev, "%s: max_sg_len new:%u old:%u\n", + __func__, new_devdata->max_sg_len, + old_devdata->max_sg_len); + + rcu_assign_pointer(devdata, new_devdata); + spin_unlock_irqrestore(&devdata_mutex, flags); + synchronize_rcu(); + dev_set_drvdata(new_devdata->dev, new_devdata); + kfree(old_devdata); + return 0; + +error_out: + if (new_devdata) { + dev_info(old_devdata->dev, "%s: device disabled\n", __func__); + nx842_OF_set_defaults(new_devdata); + rcu_assign_pointer(devdata, new_devdata); + spin_unlock_irqrestore(&devdata_mutex, flags); + synchronize_rcu(); + dev_set_drvdata(new_devdata->dev, new_devdata); + kfree(old_devdata); + } else { + dev_err(old_devdata->dev, "%s: could not update driver from hardware\n", __func__); + spin_unlock_irqrestore(&devdata_mutex, flags); + } + + if (!ret) + ret = -EINVAL; + return ret; +} + +/** + * nx842_OF_notifier - Process updates to OF properties for the device + * + * @np: notifier block + * @action: notifier action + * @update: struct pSeries_reconfig_prop_update pointer if action is + * PSERIES_UPDATE_PROPERTY + * + * Returns: + * NOTIFY_OK on success + * NOTIFY_BAD encoded with error number on failure, use + * notifier_to_errno() to decode this value + */ +static int nx842_OF_notifier(struct notifier_block *np, unsigned long action, + void *data) +{ + struct of_reconfig_data *upd = data; + struct nx842_devdata *local_devdata; + struct device_node *node = NULL; + + rcu_read_lock(); + local_devdata = rcu_dereference(devdata); + if (local_devdata) + node = local_devdata->dev->of_node; + + if (local_devdata && + action == OF_RECONFIG_UPDATE_PROPERTY && + !strcmp(upd->dn->name, node->name)) { + rcu_read_unlock(); + nx842_OF_upd(upd->prop); + } else + rcu_read_unlock(); + + return NOTIFY_OK; +} + +static struct notifier_block nx842_of_nb = { + .notifier_call = nx842_OF_notifier, +}; + +#define nx842_counter_read(_name) \ +static ssize_t nx842_##_name##_show(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) { \ + struct nx842_devdata *local_devdata; \ + int p = 0; \ + rcu_read_lock(); \ + local_devdata = rcu_dereference(devdata); \ + if (local_devdata) \ + p = snprintf(buf, PAGE_SIZE, "%ld\n", \ + atomic64_read(&local_devdata->counters->_name)); \ + rcu_read_unlock(); \ + return p; \ +} + +#define NX842DEV_COUNTER_ATTR_RO(_name) \ + nx842_counter_read(_name); \ + static struct device_attribute dev_attr_##_name = __ATTR(_name, \ + 0444, \ + nx842_##_name##_show,\ + NULL); + +NX842DEV_COUNTER_ATTR_RO(comp_complete); +NX842DEV_COUNTER_ATTR_RO(comp_failed); +NX842DEV_COUNTER_ATTR_RO(decomp_complete); +NX842DEV_COUNTER_ATTR_RO(decomp_failed); +NX842DEV_COUNTER_ATTR_RO(swdecomp); + +static ssize_t nx842_timehist_show(struct device *, + struct device_attribute *, char *); + +static struct device_attribute dev_attr_comp_times = __ATTR(comp_times, 0444, + nx842_timehist_show, NULL); +static struct device_attribute dev_attr_decomp_times = __ATTR(decomp_times, + 0444, nx842_timehist_show, NULL); + +static ssize_t nx842_timehist_show(struct device *dev, + struct device_attribute *attr, char *buf) { + char *p = buf; + struct nx842_devdata *local_devdata; + atomic64_t *times; + int bytes_remain = PAGE_SIZE; + int bytes; + int i; + + rcu_read_lock(); + local_devdata = rcu_dereference(devdata); + if (!local_devdata) { + rcu_read_unlock(); + return 0; + } + + if (attr == &dev_attr_comp_times) + times = local_devdata->counters->comp_times; + else if (attr == &dev_attr_decomp_times) + times = local_devdata->counters->decomp_times; + else { + rcu_read_unlock(); + return 0; + } + + for (i = 0; i < (NX842_HIST_SLOTS - 2); i++) { + bytes = snprintf(p, bytes_remain, "%u-%uus:\t%ld\n", + i ? (2<<(i-1)) : 0, (2<<i)-1, + atomic64_read(×[i])); + bytes_remain -= bytes; + p += bytes; + } + /* The last bucket holds everything over + * 2<<(NX842_HIST_SLOTS - 2) us */ + bytes = snprintf(p, bytes_remain, "%uus - :\t%ld\n", + 2<<(NX842_HIST_SLOTS - 2), + atomic64_read(×[(NX842_HIST_SLOTS - 1)])); + p += bytes; + + rcu_read_unlock(); + return p - buf; +} + +static struct attribute *nx842_sysfs_entries[] = { + &dev_attr_comp_complete.attr, + &dev_attr_comp_failed.attr, + &dev_attr_decomp_complete.attr, + &dev_attr_decomp_failed.attr, + &dev_attr_swdecomp.attr, + &dev_attr_comp_times.attr, + &dev_attr_decomp_times.attr, + NULL, +}; + +static struct attribute_group nx842_attribute_group = { + .name = NULL, /* put in device directory */ + .attrs = nx842_sysfs_entries, +}; + +static int __init nx842_probe(struct vio_dev *viodev, + const struct vio_device_id *id) +{ + struct nx842_devdata *old_devdata, *new_devdata = NULL; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&devdata_mutex, flags); + old_devdata = rcu_dereference_check(devdata, + lockdep_is_held(&devdata_mutex)); + + if (old_devdata && old_devdata->vdev != NULL) { + dev_err(&viodev->dev, "%s: Attempt to register more than one instance of the hardware\n", __func__); + ret = -1; + goto error_unlock; + } + + dev_set_drvdata(&viodev->dev, NULL); + + new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS); + if (!new_devdata) { + dev_err(&viodev->dev, "%s: Could not allocate memory for device data\n", __func__); + ret = -ENOMEM; + goto error_unlock; + } + + new_devdata->counters = kzalloc(sizeof(*new_devdata->counters), + GFP_NOFS); + if (!new_devdata->counters) { + dev_err(&viodev->dev, "%s: Could not allocate memory for performance counters\n", __func__); + ret = -ENOMEM; + goto error_unlock; + } + + new_devdata->vdev = viodev; + new_devdata->dev = &viodev->dev; + nx842_OF_set_defaults(new_devdata); + + rcu_assign_pointer(devdata, new_devdata); + spin_unlock_irqrestore(&devdata_mutex, flags); + synchronize_rcu(); + kfree(old_devdata); + + of_reconfig_notifier_register(&nx842_of_nb); + + ret = nx842_OF_upd(NULL); + if (ret && ret != -ENODEV) { + dev_err(&viodev->dev, "could not parse device tree. %d\n", ret); + ret = -1; + goto error; + } + + rcu_read_lock(); + dev_set_drvdata(&viodev->dev, rcu_dereference(devdata)); + rcu_read_unlock(); + + if (sysfs_create_group(&viodev->dev.kobj, &nx842_attribute_group)) { + dev_err(&viodev->dev, "could not create sysfs device attributes\n"); + ret = -1; + goto error; + } + + return 0; + +error_unlock: + spin_unlock_irqrestore(&devdata_mutex, flags); + if (new_devdata) + kfree(new_devdata->counters); + kfree(new_devdata); +error: + return ret; +} + +static int __exit nx842_remove(struct vio_dev *viodev) +{ + struct nx842_devdata *old_devdata; + unsigned long flags; + + pr_info("Removing IBM Power 842 compression device\n"); + sysfs_remove_group(&viodev->dev.kobj, &nx842_attribute_group); + + spin_lock_irqsave(&devdata_mutex, flags); + old_devdata = rcu_dereference_check(devdata, + lockdep_is_held(&devdata_mutex)); + of_reconfig_notifier_unregister(&nx842_of_nb); + RCU_INIT_POINTER(devdata, NULL); + spin_unlock_irqrestore(&devdata_mutex, flags); + synchronize_rcu(); + dev_set_drvdata(&viodev->dev, NULL); + if (old_devdata) + kfree(old_devdata->counters); + kfree(old_devdata); + return 0; +} + +static struct vio_device_id nx842_driver_ids[] = { + {"ibm,compression-v1", "ibm,compression"}, + {"", ""}, +}; + +static struct vio_driver nx842_driver = { + .name = MODULE_NAME, + .probe = nx842_probe, + .remove = __exit_p(nx842_remove), + .get_desired_dma = nx842_get_desired_dma, + .id_table = nx842_driver_ids, +}; + +static int __init nx842_init(void) +{ + struct nx842_devdata *new_devdata; + pr_info("Registering IBM Power 842 compression driver\n"); + + RCU_INIT_POINTER(devdata, NULL); + new_devdata = kzalloc(sizeof(*new_devdata), GFP_KERNEL); + if (!new_devdata) { + pr_err("Could not allocate memory for device data\n"); + return -ENOMEM; + } + new_devdata->status = UNAVAILABLE; + RCU_INIT_POINTER(devdata, new_devdata); + + return vio_register_driver(&nx842_driver); +} + +module_init(nx842_init); + +static void __exit nx842_exit(void) +{ + struct nx842_devdata *old_devdata; + unsigned long flags; + + pr_info("Exiting IBM Power 842 compression driver\n"); + spin_lock_irqsave(&devdata_mutex, flags); + old_devdata = rcu_dereference_check(devdata, + lockdep_is_held(&devdata_mutex)); + RCU_INIT_POINTER(devdata, NULL); + spin_unlock_irqrestore(&devdata_mutex, flags); + synchronize_rcu(); + if (old_devdata) + dev_set_drvdata(old_devdata->dev, NULL); + kfree(old_devdata); + vio_unregister_driver(&nx842_driver); +} + +module_exit(nx842_exit); + +/********************************* + * 842 software decompressor +*********************************/ +typedef int (*sw842_template_op)(const char **, int *, unsigned char **, + struct sw842_fifo *); + +static int sw842_data8(const char **, int *, unsigned char **, + struct sw842_fifo *); +static int sw842_data4(const char **, int *, unsigned char **, + struct sw842_fifo *); +static int sw842_data2(const char **, int *, unsigned char **, + struct sw842_fifo *); +static int sw842_ptr8(const char **, int *, unsigned char **, + struct sw842_fifo *); +static int sw842_ptr4(const char **, int *, unsigned char **, + struct sw842_fifo *); +static int sw842_ptr2(const char **, int *, unsigned char **, + struct sw842_fifo *); + +/* special templates */ +#define SW842_TMPL_REPEAT 0x1B +#define SW842_TMPL_ZEROS 0x1C +#define SW842_TMPL_EOF 0x1E + +static sw842_template_op sw842_tmpl_ops[26][4] = { + { sw842_data8, NULL}, /* 0 (00000) */ + { sw842_data4, sw842_data2, sw842_ptr2, NULL}, + { sw842_data4, sw842_ptr2, sw842_data2, NULL}, + { sw842_data4, sw842_ptr2, sw842_ptr2, NULL}, + { sw842_data4, sw842_ptr4, NULL}, + { sw842_data2, sw842_ptr2, sw842_data4, NULL}, + { sw842_data2, sw842_ptr2, sw842_data2, sw842_ptr2}, + { sw842_data2, sw842_ptr2, sw842_ptr2, sw842_data2}, + { sw842_data2, sw842_ptr2, sw842_ptr2, sw842_ptr2,}, + { sw842_data2, sw842_ptr2, sw842_ptr4, NULL}, + { sw842_ptr2, sw842_data2, sw842_data4, NULL}, /* 10 (01010) */ + { sw842_ptr2, sw842_data4, sw842_ptr2, NULL}, + { sw842_ptr2, sw842_data2, sw842_ptr2, sw842_data2}, + { sw842_ptr2, sw842_data2, sw842_ptr2, sw842_ptr2}, + { sw842_ptr2, sw842_data2, sw842_ptr4, NULL}, + { sw842_ptr2, sw842_ptr2, sw842_data4, NULL}, + { sw842_ptr2, sw842_ptr2, sw842_data2, sw842_ptr2}, + { sw842_ptr2, sw842_ptr2, sw842_ptr2, sw842_data2}, + { sw842_ptr2, sw842_ptr2, sw842_ptr2, sw842_ptr2}, + { sw842_ptr2, sw842_ptr2, sw842_ptr4, NULL}, + { sw842_ptr4, sw842_data4, NULL}, /* 20 (10100) */ + { sw842_ptr4, sw842_data2, sw842_ptr2, NULL}, + { sw842_ptr4, sw842_ptr2, sw842_data2, NULL}, + { sw842_ptr4, sw842_ptr2, sw842_ptr2, NULL}, + { sw842_ptr4, sw842_ptr4, NULL}, + { sw842_ptr8, NULL} +}; + +/* Software decompress helpers */ + +static uint8_t sw842_get_byte(const char *buf, int bit) +{ + uint8_t tmpl; + uint16_t tmp; + tmp = htons(*(uint16_t *)(buf)); + tmp = (uint16_t)(tmp << bit); + tmp = ntohs(tmp); + memcpy(&tmpl, &tmp, 1); + return tmpl; +} + +static uint8_t sw842_get_template(const char **buf, int *bit) +{ + uint8_t byte; + byte = sw842_get_byte(*buf, *bit); + byte = byte >> 3; + byte &= 0x1F; + *buf += (*bit + 5) / 8; + *bit = (*bit + 5) % 8; + return byte; +} + +/* repeat_count happens to be 5-bit too (like the template) */ +static uint8_t sw842_get_repeat_count(const char **buf, int *bit) +{ + uint8_t byte; + byte = sw842_get_byte(*buf, *bit); + byte = byte >> 2; + byte &= 0x3F; + *buf += (*bit + 6) / 8; + *bit = (*bit + 6) % 8; + return byte; +} + +static uint8_t sw842_get_ptr2(const char **buf, int *bit) +{ + uint8_t ptr; + ptr = sw842_get_byte(*buf, *bit); + (*buf)++; + return ptr; +} + +static uint16_t sw842_get_ptr4(const char **buf, int *bit, + struct sw842_fifo *fifo) +{ + uint16_t ptr; + ptr = htons(*(uint16_t *)(*buf)); + ptr = (uint16_t)(ptr << *bit); + ptr = ptr >> 7; + ptr &= 0x01FF; + *buf += (*bit + 9) / 8; + *bit = (*bit + 9) % 8; + return ptr; +} + +static uint8_t sw842_get_ptr8(const char **buf, int *bit, + struct sw842_fifo *fifo) +{ + return sw842_get_ptr2(buf, bit); +} + +/* Software decompress template ops */ + +static int sw842_data8(const char **inbuf, int *inbit, + unsigned char **outbuf, struct sw842_fifo *fifo) +{ + int ret; + + ret = sw842_data4(inbuf, inbit, outbuf, fifo); + if (ret) + return ret; + ret = sw842_data4(inbuf, inbit, outbuf, fifo); + return ret; +} + +static int sw842_data4(const char **inbuf, int *inbit, + unsigned char **outbuf, struct sw842_fifo *fifo) +{ + int ret; + + ret = sw842_data2(inbuf, inbit, outbuf, fifo); + if (ret) + return ret; + ret = sw842_data2(inbuf, inbit, outbuf, fifo); + return ret; +} + +static int sw842_data2(const char **inbuf, int *inbit, + unsigned char **outbuf, struct sw842_fifo *fifo) +{ + **outbuf = sw842_get_byte(*inbuf, *inbit); + (*inbuf)++; + (*outbuf)++; + **outbuf = sw842_get_byte(*inbuf, *inbit); + (*inbuf)++; + (*outbuf)++; + return 0; +} + +static int sw842_ptr8(const char **inbuf, int *inbit, + unsigned char **outbuf, struct sw842_fifo *fifo) +{ + uint8_t ptr; + ptr = sw842_get_ptr8(inbuf, inbit, fifo); + if (!fifo->f84_full && (ptr >= fifo->f8_count)) + return 1; + memcpy(*outbuf, fifo->f8[ptr], 8); + *outbuf += 8; + return 0; +} + +static int sw842_ptr4(const char **inbuf, int *inbit, + unsigned char **outbuf, struct sw842_fifo *fifo) +{ + uint16_t ptr; + ptr = sw842_get_ptr4(inbuf, inbit, fifo); + if (!fifo->f84_full && (ptr >= fifo->f4_count)) + return 1; + memcpy(*outbuf, fifo->f4[ptr], 4); + *outbuf += 4; + return 0; +} + +static int sw842_ptr2(const char **inbuf, int *inbit, + unsigned char **outbuf, struct sw842_fifo *fifo) +{ + uint8_t ptr; + ptr = sw842_get_ptr2(inbuf, inbit); + if (!fifo->f2_full && (ptr >= fifo->f2_count)) + return 1; + memcpy(*outbuf, fifo->f2[ptr], 2); + *outbuf += 2; + return 0; +} + +static void sw842_copy_to_fifo(const char *buf, struct sw842_fifo *fifo) +{ + unsigned char initial_f2count = fifo->f2_count; + + memcpy(fifo->f8[fifo->f8_count], buf, 8); + fifo->f4_count += 2; + fifo->f8_count += 1; + + if (!fifo->f84_full && fifo->f4_count >= 512) { + fifo->f84_full = 1; + fifo->f4_count /= 512; + } + + memcpy(fifo->f2[fifo->f2_count++], buf, 2); + memcpy(fifo->f2[fifo->f2_count++], buf + 2, 2); + memcpy(fifo->f2[fifo->f2_count++], buf + 4, 2); + memcpy(fifo->f2[fifo->f2_count++], buf + 6, 2); + if (fifo->f2_count < initial_f2count) + fifo->f2_full = 1; +} + +static int sw842_decompress(const unsigned char *src, int srclen, + unsigned char *dst, int *destlen, + const void *wrkmem) +{ + uint8_t tmpl; + const char *inbuf; + int inbit = 0; + unsigned char *outbuf, *outbuf_end, *origbuf, *prevbuf; + const char *inbuf_end; + sw842_template_op op; + int opindex; + int i, repeat_count; + struct sw842_fifo *fifo; + int ret = 0; + + fifo = &((struct nx842_workmem *)(wrkmem))->swfifo; + memset(fifo, 0, sizeof(*fifo)); + + origbuf = NULL; + inbuf = src; + inbuf_end = src + srclen; + outbuf = dst; + outbuf_end = dst + *destlen; + + while ((tmpl = sw842_get_template(&inbuf, &inbit)) != SW842_TMPL_EOF) { + if (inbuf >= inbuf_end) { + ret = -EINVAL; + goto out; + } + + opindex = 0; + prevbuf = origbuf; + origbuf = outbuf; + switch (tmpl) { + case SW842_TMPL_REPEAT: + if (prevbuf == NULL) { + ret = -EINVAL; + goto out; + } + + repeat_count = sw842_get_repeat_count(&inbuf, + &inbit) + 1; + + /* Did the repeat count advance past the end of input */ + if (inbuf > inbuf_end) { + ret = -EINVAL; + goto out; + } + + for (i = 0; i < repeat_count; i++) { + /* Would this overflow the output buffer */ + if ((outbuf + 8) > outbuf_end) { + ret = -ENOSPC; + goto out; + } + + memcpy(outbuf, prevbuf, 8); + sw842_copy_to_fifo(outbuf, fifo); + outbuf += 8; + } + break; + + case SW842_TMPL_ZEROS: + /* Would this overflow the output buffer */ + if ((outbuf + 8) > outbuf_end) { + ret = -ENOSPC; + goto out; + } + + memset(outbuf, 0, 8); + sw842_copy_to_fifo(outbuf, fifo); + outbuf += 8; + break; + + default: + if (tmpl > 25) { + ret = -EINVAL; + goto out; + } + + /* Does this go past the end of the input buffer */ + if ((inbuf + 2) > inbuf_end) { + ret = -EINVAL; + goto out; + } + + /* Would this overflow the output buffer */ + if ((outbuf + 8) > outbuf_end) { + ret = -ENOSPC; + goto out; + } + + while (opindex < 4 && + (op = sw842_tmpl_ops[tmpl][opindex++]) + != NULL) { + ret = (*op)(&inbuf, &inbit, &outbuf, fifo); + if (ret) { + ret = -EINVAL; + goto out; + } + sw842_copy_to_fifo(origbuf, fifo); + } + } + } + +out: + if (!ret) + *destlen = (unsigned int)(outbuf - dst); + else + *destlen = 0; + + return ret; +} diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c new file mode 100644 index 000000000..a066cc345 --- /dev/null +++ b/drivers/crypto/nx/nx-aes-cbc.c @@ -0,0 +1,150 @@ +/** + * AES CBC routines supporting the Power 7+ Nest Accelerators driver + * + * Copyright (C) 2011-2012 International Business Machines Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 only. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Author: Kent Yoder <yoder1@us.ibm.com> + */ + +#include <crypto/aes.h> +#include <crypto/algapi.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/crypto.h> +#include <asm/vio.h> + +#include "nx_csbcpb.h" +#include "nx.h" + + +static int cbc_aes_nx_set_key(struct crypto_tfm *tfm, + const u8 *in_key, + unsigned int key_len) +{ + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm); + struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + + nx_ctx_init(nx_ctx, HCOP_FC_AES); + + switch (key_len) { + case AES_KEYSIZE_128: + NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128); + nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128]; + break; + case AES_KEYSIZE_192: + NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192); + nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192]; + break; + case AES_KEYSIZE_256: + NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256); + nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256]; + break; + default: + return -EINVAL; + } + + csbcpb->cpb.hdr.mode = NX_MODE_AES_CBC; + memcpy(csbcpb->cpb.aes_cbc.key, in_key, key_len); + + return 0; +} + +static int cbc_aes_nx_crypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes, + int enc) +{ + struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); + struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + unsigned long irq_flags; + unsigned int processed = 0, to_process; + int rc; + + spin_lock_irqsave(&nx_ctx->lock, irq_flags); + + if (enc) + NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; + else + NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; + + do { + to_process = nbytes - processed; + + rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process, + processed, csbcpb->cpb.aes_cbc.iv); + if (rc) + goto out; + + if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { + rc = -EINVAL; + goto out; + } + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + + memcpy(desc->info, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE); + atomic_inc(&(nx_ctx->stats->aes_ops)); + atomic64_add(csbcpb->csb.processed_byte_count, + &(nx_ctx->stats->aes_bytes)); + + processed += to_process; + } while (processed < nbytes); +out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); + return rc; +} + +static int cbc_aes_nx_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) +{ + return cbc_aes_nx_crypt(desc, dst, src, nbytes, 1); +} + +static int cbc_aes_nx_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) +{ + return cbc_aes_nx_crypt(desc, dst, src, nbytes, 0); +} + +struct crypto_alg nx_cbc_aes_alg = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-nx", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct nx_crypto_ctx), + .cra_type = &crypto_blkcipher_type, + .cra_alignmask = 0xf, + .cra_module = THIS_MODULE, + .cra_init = nx_crypto_ctx_aes_cbc_init, + .cra_exit = nx_crypto_ctx_exit, + .cra_blkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = cbc_aes_nx_set_key, + .encrypt = cbc_aes_nx_encrypt, + .decrypt = cbc_aes_nx_decrypt, + } +}; diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c new file mode 100644 index 000000000..67f80813a --- /dev/null +++ b/drivers/crypto/nx/nx-aes-ccm.c @@ -0,0 +1,604 @@ +/** + * AES CCM routines supporting the Power 7+ Nest Accelerators driver + * + * Copyright (C) 2012 International Business Machines Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 only. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Author: Kent Yoder <yoder1@us.ibm.com> + */ + +#include <crypto/internal/aead.h> +#include <crypto/aes.h> +#include <crypto/algapi.h> +#include <crypto/scatterwalk.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/crypto.h> +#include <asm/vio.h> + +#include "nx_csbcpb.h" +#include "nx.h" + + +static int ccm_aes_nx_set_key(struct crypto_aead *tfm, + const u8 *in_key, + unsigned int key_len) +{ + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base); + struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; + + nx_ctx_init(nx_ctx, HCOP_FC_AES); + + switch (key_len) { + case AES_KEYSIZE_128: + NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128); + NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128); + nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128]; + break; + default: + return -EINVAL; + } + + csbcpb->cpb.hdr.mode = NX_MODE_AES_CCM; + memcpy(csbcpb->cpb.aes_ccm.key, in_key, key_len); + + csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_CCA; + memcpy(csbcpb_aead->cpb.aes_cca.key, in_key, key_len); + + return 0; + +} + +static int ccm4309_aes_nx_set_key(struct crypto_aead *tfm, + const u8 *in_key, + unsigned int key_len) +{ + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base); + + if (key_len < 3) + return -EINVAL; + + key_len -= 3; + + memcpy(nx_ctx->priv.ccm.nonce, in_key + key_len, 3); + + return ccm_aes_nx_set_key(tfm, in_key, key_len); +} + +static int ccm_aes_nx_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + switch (authsize) { + case 4: + case 6: + case 8: + case 10: + case 12: + case 14: + case 16: + break; + default: + return -EINVAL; + } + + crypto_aead_crt(tfm)->authsize = authsize; + + return 0; +} + +static int ccm4309_aes_nx_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + switch (authsize) { + case 8: + case 12: + case 16: + break; + default: + return -EINVAL; + } + + crypto_aead_crt(tfm)->authsize = authsize; + + return 0; +} + +/* taken from crypto/ccm.c */ +static int set_msg_len(u8 *block, unsigned int msglen, int csize) +{ + __be32 data; + + memset(block, 0, csize); + block += csize; + + if (csize >= 4) + csize = 4; + else if (msglen > (unsigned int)(1 << (8 * csize))) + return -EOVERFLOW; + + data = cpu_to_be32(msglen); + memcpy(block - csize, (u8 *)&data + 4 - csize, csize); + + return 0; +} + +/* taken from crypto/ccm.c */ +static inline int crypto_ccm_check_iv(const u8 *iv) +{ + /* 2 <= L <= 8, so 1 <= L' <= 7. */ + if (1 > iv[0] || iv[0] > 7) + return -EINVAL; + + return 0; +} + +/* based on code from crypto/ccm.c */ +static int generate_b0(u8 *iv, unsigned int assoclen, unsigned int authsize, + unsigned int cryptlen, u8 *b0) +{ + unsigned int l, lp, m = authsize; + int rc; + + memcpy(b0, iv, 16); + + lp = b0[0]; + l = lp + 1; + + /* set m, bits 3-5 */ + *b0 |= (8 * ((m - 2) / 2)); + + /* set adata, bit 6, if associated data is used */ + if (assoclen) + *b0 |= 64; + + rc = set_msg_len(b0 + 16 - l, cryptlen, l); + + return rc; +} + +static int generate_pat(u8 *iv, + struct aead_request *req, + struct nx_crypto_ctx *nx_ctx, + unsigned int authsize, + unsigned int nbytes, + u8 *out) +{ + struct nx_sg *nx_insg = nx_ctx->in_sg; + struct nx_sg *nx_outsg = nx_ctx->out_sg; + unsigned int iauth_len = 0; + u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL; + int rc; + unsigned int max_sg_len; + + /* zero the ctr value */ + memset(iv + 15 - iv[0], 0, iv[0] + 1); + + /* page 78 of nx_wb.pdf has, + * Note: RFC3610 allows the AAD data to be up to 2^64 -1 bytes + * in length. If a full message is used, the AES CCA implementation + * restricts the maximum AAD length to 2^32 -1 bytes. + * If partial messages are used, the implementation supports + * 2^64 -1 bytes maximum AAD length. + * + * However, in the cryptoapi's aead_request structure, + * assoclen is an unsigned int, thus it cannot hold a length + * value greater than 2^32 - 1. + * Thus the AAD is further constrained by this and is never + * greater than 2^32. + */ + + if (!req->assoclen) { + b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0; + } else if (req->assoclen <= 14) { + /* if associated data is 14 bytes or less, we do 1 GCM + * operation on 2 AES blocks, B0 (stored in the csbcpb) and B1, + * which is fed in through the source buffers here */ + b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0; + b1 = nx_ctx->priv.ccm.iauth_tag; + iauth_len = req->assoclen; + } else if (req->assoclen <= 65280) { + /* if associated data is less than (2^16 - 2^8), we construct + * B1 differently and feed in the associated data to a CCA + * operation */ + b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0; + b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1; + iauth_len = 14; + } else { + b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0; + b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1; + iauth_len = 10; + } + + /* generate B0 */ + rc = generate_b0(iv, req->assoclen, authsize, nbytes, b0); + if (rc) + return rc; + + /* generate B1: + * add control info for associated data + * RFC 3610 and NIST Special Publication 800-38C + */ + if (b1) { + memset(b1, 0, 16); + if (req->assoclen <= 65280) { + *(u16 *)b1 = (u16)req->assoclen; + scatterwalk_map_and_copy(b1 + 2, req->assoc, 0, + iauth_len, SCATTERWALK_FROM_SG); + } else { + *(u16 *)b1 = (u16)(0xfffe); + *(u32 *)&b1[2] = (u32)req->assoclen; + scatterwalk_map_and_copy(b1 + 6, req->assoc, 0, + iauth_len, SCATTERWALK_FROM_SG); + } + } + + /* now copy any remaining AAD to scatterlist and call nx... */ + if (!req->assoclen) { + return rc; + } else if (req->assoclen <= 14) { + unsigned int len = 16; + + nx_insg = nx_build_sg_list(nx_insg, b1, &len, nx_ctx->ap->sglen); + + if (len != 16) + return -EINVAL; + + nx_outsg = nx_build_sg_list(nx_outsg, tmp, &len, + nx_ctx->ap->sglen); + + if (len != 16) + return -EINVAL; + + /* inlen should be negative, indicating to phyp that its a + * pointer to an sg list */ + nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) * + sizeof(struct nx_sg); + nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) * + sizeof(struct nx_sg); + + NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_ENDE_ENCRYPT; + NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_INTERMEDIATE; + + result = nx_ctx->csbcpb->cpb.aes_ccm.out_pat_or_mac; + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + return rc; + + atomic_inc(&(nx_ctx->stats->aes_ops)); + atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); + + } else { + unsigned int processed = 0, to_process; + + processed += iauth_len; + + /* page_limit: number of sg entries that fit on one page */ + max_sg_len = min_t(u64, nx_ctx->ap->sglen, + nx_driver.of.max_sg_len/sizeof(struct nx_sg)); + max_sg_len = min_t(u64, max_sg_len, + nx_ctx->ap->databytelen/NX_PAGE_SIZE); + + do { + to_process = min_t(u32, req->assoclen - processed, + nx_ctx->ap->databytelen); + + nx_insg = nx_walk_and_build(nx_ctx->in_sg, + nx_ctx->ap->sglen, + req->assoc, processed, + &to_process); + + if ((to_process + processed) < req->assoclen) { + NX_CPB_FDM(nx_ctx->csbcpb_aead) |= + NX_FDM_INTERMEDIATE; + } else { + NX_CPB_FDM(nx_ctx->csbcpb_aead) &= + ~NX_FDM_INTERMEDIATE; + } + + + nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) * + sizeof(struct nx_sg); + + result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0; + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + return rc; + + memcpy(nx_ctx->csbcpb_aead->cpb.aes_cca.b0, + nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0, + AES_BLOCK_SIZE); + + NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION; + + atomic_inc(&(nx_ctx->stats->aes_ops)); + atomic64_add(req->assoclen, + &(nx_ctx->stats->aes_bytes)); + + processed += to_process; + } while (processed < req->assoclen); + + result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0; + } + + memcpy(out, result, AES_BLOCK_SIZE); + + return rc; +} + +static int ccm_nx_decrypt(struct aead_request *req, + struct blkcipher_desc *desc) +{ + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); + struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + unsigned int nbytes = req->cryptlen; + unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); + struct nx_ccm_priv *priv = &nx_ctx->priv.ccm; + unsigned long irq_flags; + unsigned int processed = 0, to_process; + int rc = -1; + + spin_lock_irqsave(&nx_ctx->lock, irq_flags); + + nbytes -= authsize; + + /* copy out the auth tag to compare with later */ + scatterwalk_map_and_copy(priv->oauth_tag, + req->src, nbytes, authsize, + SCATTERWALK_FROM_SG); + + rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, + csbcpb->cpb.aes_ccm.in_pat_or_b0); + if (rc) + goto out; + + do { + + /* to_process: the AES_BLOCK_SIZE data chunk to process in this + * update. This value is bound by sg list limits. + */ + to_process = nbytes - processed; + + if ((to_process + processed) < nbytes) + NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; + else + NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; + + NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; + + rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, + &to_process, processed, + csbcpb->cpb.aes_ccm.iv_or_ctr); + if (rc) + goto out; + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + + /* for partial completion, copy following for next + * entry into loop... + */ + memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE); + memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0, + csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE); + memcpy(csbcpb->cpb.aes_ccm.in_s0, + csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE); + + NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; + + /* update stats */ + atomic_inc(&(nx_ctx->stats->aes_ops)); + atomic64_add(csbcpb->csb.processed_byte_count, + &(nx_ctx->stats->aes_bytes)); + + processed += to_process; + } while (processed < nbytes); + + rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag, + authsize) ? -EBADMSG : 0; +out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); + return rc; +} + +static int ccm_nx_encrypt(struct aead_request *req, + struct blkcipher_desc *desc) +{ + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); + struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + unsigned int nbytes = req->cryptlen; + unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); + unsigned long irq_flags; + unsigned int processed = 0, to_process; + int rc = -1; + + spin_lock_irqsave(&nx_ctx->lock, irq_flags); + + rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, + csbcpb->cpb.aes_ccm.in_pat_or_b0); + if (rc) + goto out; + + do { + /* to process: the AES_BLOCK_SIZE data chunk to process in this + * update. This value is bound by sg list limits. + */ + to_process = nbytes - processed; + + if ((to_process + processed) < nbytes) + NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; + else + NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; + + NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; + + rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, + &to_process, processed, + csbcpb->cpb.aes_ccm.iv_or_ctr); + if (rc) + goto out; + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + + /* for partial completion, copy following for next + * entry into loop... + */ + memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE); + memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0, + csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE); + memcpy(csbcpb->cpb.aes_ccm.in_s0, + csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE); + + NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; + + /* update stats */ + atomic_inc(&(nx_ctx->stats->aes_ops)); + atomic64_add(csbcpb->csb.processed_byte_count, + &(nx_ctx->stats->aes_bytes)); + + processed += to_process; + + } while (processed < nbytes); + + /* copy out the auth tag */ + scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac, + req->dst, nbytes, authsize, + SCATTERWALK_TO_SG); + +out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); + return rc; +} + +static int ccm4309_aes_nx_encrypt(struct aead_request *req) +{ + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); + struct blkcipher_desc desc; + u8 *iv = nx_ctx->priv.ccm.iv; + + iv[0] = 3; + memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3); + memcpy(iv + 4, req->iv, 8); + + desc.info = iv; + desc.tfm = (struct crypto_blkcipher *)req->base.tfm; + + return ccm_nx_encrypt(req, &desc); +} + +static int ccm_aes_nx_encrypt(struct aead_request *req) +{ + struct blkcipher_desc desc; + int rc; + + desc.info = req->iv; + desc.tfm = (struct crypto_blkcipher *)req->base.tfm; + + rc = crypto_ccm_check_iv(desc.info); + if (rc) + return rc; + + return ccm_nx_encrypt(req, &desc); +} + +static int ccm4309_aes_nx_decrypt(struct aead_request *req) +{ + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); + struct blkcipher_desc desc; + u8 *iv = nx_ctx->priv.ccm.iv; + + iv[0] = 3; + memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3); + memcpy(iv + 4, req->iv, 8); + + desc.info = iv; + desc.tfm = (struct crypto_blkcipher *)req->base.tfm; + + return ccm_nx_decrypt(req, &desc); +} + +static int ccm_aes_nx_decrypt(struct aead_request *req) +{ + struct blkcipher_desc desc; + int rc; + + desc.info = req->iv; + desc.tfm = (struct crypto_blkcipher *)req->base.tfm; + + rc = crypto_ccm_check_iv(desc.info); + if (rc) + return rc; + + return ccm_nx_decrypt(req, &desc); +} + +/* tell the block cipher walk routines that this is a stream cipher by + * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block + * during encrypt/decrypt doesn't solve this problem, because it calls + * blkcipher_walk_done under the covers, which doesn't use walk->blocksize, + * but instead uses this tfm->blocksize. */ +struct crypto_alg nx_ccm_aes_alg = { + .cra_name = "ccm(aes)", + .cra_driver_name = "ccm-aes-nx", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct nx_crypto_ctx), + .cra_type = &crypto_aead_type, + .cra_module = THIS_MODULE, + .cra_init = nx_crypto_ctx_aes_ccm_init, + .cra_exit = nx_crypto_ctx_exit, + .cra_aead = { + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = AES_BLOCK_SIZE, + .setkey = ccm_aes_nx_set_key, + .setauthsize = ccm_aes_nx_setauthsize, + .encrypt = ccm_aes_nx_encrypt, + .decrypt = ccm_aes_nx_decrypt, + } +}; + +struct crypto_alg nx_ccm4309_aes_alg = { + .cra_name = "rfc4309(ccm(aes))", + .cra_driver_name = "rfc4309-ccm-aes-nx", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct nx_crypto_ctx), + .cra_type = &crypto_nivaead_type, + .cra_module = THIS_MODULE, + .cra_init = nx_crypto_ctx_aes_ccm_init, + .cra_exit = nx_crypto_ctx_exit, + .cra_aead = { + .ivsize = 8, + .maxauthsize = AES_BLOCK_SIZE, + .setkey = ccm4309_aes_nx_set_key, + .setauthsize = ccm4309_aes_nx_setauthsize, + .encrypt = ccm4309_aes_nx_encrypt, + .decrypt = ccm4309_aes_nx_decrypt, + .geniv = "seqiv", + } +}; diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c new file mode 100644 index 000000000..2617cd4d5 --- /dev/null +++ b/drivers/crypto/nx/nx-aes-ctr.c @@ -0,0 +1,187 @@ +/** + * AES CTR routines supporting the Power 7+ Nest Accelerators driver + * + * Copyright (C) 2011-2012 International Business Machines Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 only. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Author: Kent Yoder <yoder1@us.ibm.com> + */ + +#include <crypto/aes.h> +#include <crypto/ctr.h> +#include <crypto/algapi.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/crypto.h> +#include <asm/vio.h> + +#include "nx_csbcpb.h" +#include "nx.h" + + +static int ctr_aes_nx_set_key(struct crypto_tfm *tfm, + const u8 *in_key, + unsigned int key_len) +{ + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm); + struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + + nx_ctx_init(nx_ctx, HCOP_FC_AES); + + switch (key_len) { + case AES_KEYSIZE_128: + NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128); + nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128]; + break; + case AES_KEYSIZE_192: + NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192); + nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192]; + break; + case AES_KEYSIZE_256: + NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256); + nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256]; + break; + default: + return -EINVAL; + } + + csbcpb->cpb.hdr.mode = NX_MODE_AES_CTR; + memcpy(csbcpb->cpb.aes_ctr.key, in_key, key_len); + + return 0; +} + +static int ctr3686_aes_nx_set_key(struct crypto_tfm *tfm, + const u8 *in_key, + unsigned int key_len) +{ + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm); + + if (key_len < CTR_RFC3686_NONCE_SIZE) + return -EINVAL; + + memcpy(nx_ctx->priv.ctr.iv, + in_key + key_len - CTR_RFC3686_NONCE_SIZE, + CTR_RFC3686_NONCE_SIZE); + + key_len -= CTR_RFC3686_NONCE_SIZE; + + return ctr_aes_nx_set_key(tfm, in_key, key_len); +} + +static int ctr_aes_nx_crypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) +{ + struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); + struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + unsigned long irq_flags; + unsigned int processed = 0, to_process; + int rc; + + spin_lock_irqsave(&nx_ctx->lock, irq_flags); + + do { + to_process = nbytes - processed; + + rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process, + processed, csbcpb->cpb.aes_ctr.iv); + if (rc) + goto out; + + if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { + rc = -EINVAL; + goto out; + } + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + + memcpy(desc->info, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE); + + atomic_inc(&(nx_ctx->stats->aes_ops)); + atomic64_add(csbcpb->csb.processed_byte_count, + &(nx_ctx->stats->aes_bytes)); + + processed += to_process; + } while (processed < nbytes); +out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); + return rc; +} + +static int ctr3686_aes_nx_crypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) +{ + struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); + u8 *iv = nx_ctx->priv.ctr.iv; + + memcpy(iv + CTR_RFC3686_NONCE_SIZE, + desc->info, CTR_RFC3686_IV_SIZE); + iv[12] = iv[13] = iv[14] = 0; + iv[15] = 1; + + desc->info = nx_ctx->priv.ctr.iv; + + return ctr_aes_nx_crypt(desc, dst, src, nbytes); +} + +struct crypto_alg nx_ctr_aes_alg = { + .cra_name = "ctr(aes)", + .cra_driver_name = "ctr-aes-nx", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct nx_crypto_ctx), + .cra_type = &crypto_blkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = nx_crypto_ctx_aes_ctr_init, + .cra_exit = nx_crypto_ctx_exit, + .cra_blkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = ctr_aes_nx_set_key, + .encrypt = ctr_aes_nx_crypt, + .decrypt = ctr_aes_nx_crypt, + } +}; + +struct crypto_alg nx_ctr3686_aes_alg = { + .cra_name = "rfc3686(ctr(aes))", + .cra_driver_name = "rfc3686-ctr-aes-nx", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct nx_crypto_ctx), + .cra_type = &crypto_blkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = nx_crypto_ctx_aes_ctr_init, + .cra_exit = nx_crypto_ctx_exit, + .cra_blkcipher = { + .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, + .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, + .ivsize = CTR_RFC3686_IV_SIZE, + .geniv = "seqiv", + .setkey = ctr3686_aes_nx_set_key, + .encrypt = ctr3686_aes_nx_crypt, + .decrypt = ctr3686_aes_nx_crypt, + } +}; diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c new file mode 100644 index 000000000..cfdde8b8b --- /dev/null +++ b/drivers/crypto/nx/nx-aes-ecb.c @@ -0,0 +1,149 @@ +/** + * AES ECB routines supporting the Power 7+ Nest Accelerators driver + * + * Copyright (C) 2011-2012 International Business Machines Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 only. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Author: Kent Yoder <yoder1@us.ibm.com> + */ + +#include <crypto/aes.h> +#include <crypto/algapi.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/crypto.h> +#include <asm/vio.h> + +#include "nx_csbcpb.h" +#include "nx.h" + + +static int ecb_aes_nx_set_key(struct crypto_tfm *tfm, + const u8 *in_key, + unsigned int key_len) +{ + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm); + struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; + + nx_ctx_init(nx_ctx, HCOP_FC_AES); + + switch (key_len) { + case AES_KEYSIZE_128: + NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128); + nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128]; + break; + case AES_KEYSIZE_192: + NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192); + nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192]; + break; + case AES_KEYSIZE_256: + NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256); + nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256]; + break; + default: + return -EINVAL; + } + + csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB; + memcpy(csbcpb->cpb.aes_ecb.key, in_key, key_len); + + return 0; +} + +static int ecb_aes_nx_crypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes, + int enc) +{ + struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); + struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + unsigned long irq_flags; + unsigned int processed = 0, to_process; + int rc; + + spin_lock_irqsave(&nx_ctx->lock, irq_flags); + + if (enc) + NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; + else + NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; + + do { + to_process = nbytes - processed; + + rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process, + processed, NULL); + if (rc) + goto out; + + if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { + rc = -EINVAL; + goto out; + } + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + + atomic_inc(&(nx_ctx->stats->aes_ops)); + atomic64_add(csbcpb->csb.processed_byte_count, + &(nx_ctx->stats->aes_bytes)); + + processed += to_process; + } while (processed < nbytes); + +out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); + return rc; +} + +static int ecb_aes_nx_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) +{ + return ecb_aes_nx_crypt(desc, dst, src, nbytes, 1); +} + +static int ecb_aes_nx_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) +{ + return ecb_aes_nx_crypt(desc, dst, src, nbytes, 0); +} + +struct crypto_alg nx_ecb_aes_alg = { + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-nx", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_alignmask = 0xf, + .cra_ctxsize = sizeof(struct nx_crypto_ctx), + .cra_type = &crypto_blkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = nx_crypto_ctx_aes_ecb_init, + .cra_exit = nx_crypto_ctx_exit, + .cra_blkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = ecb_aes_nx_set_key, + .encrypt = ecb_aes_nx_encrypt, + .decrypt = ecb_aes_nx_decrypt, + } +}; diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c new file mode 100644 index 000000000..88c562434 --- /dev/null +++ b/drivers/crypto/nx/nx-aes-gcm.c @@ -0,0 +1,525 @@ +/** + * AES GCM routines supporting the Power 7+ Nest Accelerators driver + * + * Copyright (C) 2012 International Business Machines Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 only. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Author: Kent Yoder <yoder1@us.ibm.com> + */ + +#include <crypto/internal/aead.h> +#include <crypto/aes.h> +#include <crypto/algapi.h> +#include <crypto/scatterwalk.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/crypto.h> +#include <asm/vio.h> + +#include "nx_csbcpb.h" +#include "nx.h" + + +static int gcm_aes_nx_set_key(struct crypto_aead *tfm, + const u8 *in_key, + unsigned int key_len) +{ + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base); + struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; + + nx_ctx_init(nx_ctx, HCOP_FC_AES); + + switch (key_len) { + case AES_KEYSIZE_128: + NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128); + NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128); + nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128]; + break; + case AES_KEYSIZE_192: + NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192); + NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_192); + nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192]; + break; + case AES_KEYSIZE_256: + NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256); + NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_256); + nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256]; + break; + default: + return -EINVAL; + } + + csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM; + memcpy(csbcpb->cpb.aes_gcm.key, in_key, key_len); + + csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_GCA; + memcpy(csbcpb_aead->cpb.aes_gca.key, in_key, key_len); + + return 0; +} + +static int gcm4106_aes_nx_set_key(struct crypto_aead *tfm, + const u8 *in_key, + unsigned int key_len) +{ + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base); + char *nonce = nx_ctx->priv.gcm.nonce; + int rc; + + if (key_len < 4) + return -EINVAL; + + key_len -= 4; + + rc = gcm_aes_nx_set_key(tfm, in_key, key_len); + if (rc) + goto out; + + memcpy(nonce, in_key + key_len, 4); +out: + return rc; +} + +static int gcm_aes_nx_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + if (authsize > crypto_aead_alg(tfm)->maxauthsize) + return -EINVAL; + + crypto_aead_crt(tfm)->authsize = authsize; + + return 0; +} + +static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + switch (authsize) { + case 8: + case 12: + case 16: + break; + default: + return -EINVAL; + } + + crypto_aead_crt(tfm)->authsize = authsize; + + return 0; +} + +static int nx_gca(struct nx_crypto_ctx *nx_ctx, + struct aead_request *req, + u8 *out) +{ + int rc; + struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; + struct scatter_walk walk; + struct nx_sg *nx_sg = nx_ctx->in_sg; + unsigned int nbytes = req->assoclen; + unsigned int processed = 0, to_process; + unsigned int max_sg_len; + + if (nbytes <= AES_BLOCK_SIZE) { + scatterwalk_start(&walk, req->assoc); + scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG); + scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0); + return 0; + } + + NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION; + + /* page_limit: number of sg entries that fit on one page */ + max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg), + nx_ctx->ap->sglen); + max_sg_len = min_t(u64, max_sg_len, + nx_ctx->ap->databytelen/NX_PAGE_SIZE); + + do { + /* + * to_process: the data chunk to process in this update. + * This value is bound by sg list limits. + */ + to_process = min_t(u64, nbytes - processed, + nx_ctx->ap->databytelen); + to_process = min_t(u64, to_process, + NX_PAGE_SIZE * (max_sg_len - 1)); + + nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len, + req->assoc, processed, &to_process); + + if ((to_process + processed) < nbytes) + NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE; + else + NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE; + + nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg) + * sizeof(struct nx_sg); + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + return rc; + + memcpy(csbcpb_aead->cpb.aes_gca.in_pat, + csbcpb_aead->cpb.aes_gca.out_pat, + AES_BLOCK_SIZE); + NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION; + + atomic_inc(&(nx_ctx->stats->aes_ops)); + atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); + + processed += to_process; + } while (processed < nbytes); + + memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE); + + return rc; +} + +static int gmac(struct aead_request *req, struct blkcipher_desc *desc) +{ + int rc; + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); + struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + struct nx_sg *nx_sg; + unsigned int nbytes = req->assoclen; + unsigned int processed = 0, to_process; + unsigned int max_sg_len; + + /* Set GMAC mode */ + csbcpb->cpb.hdr.mode = NX_MODE_AES_GMAC; + + NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION; + + /* page_limit: number of sg entries that fit on one page */ + max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg), + nx_ctx->ap->sglen); + max_sg_len = min_t(u64, max_sg_len, + nx_ctx->ap->databytelen/NX_PAGE_SIZE); + + /* Copy IV */ + memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE); + + do { + /* + * to_process: the data chunk to process in this update. + * This value is bound by sg list limits. + */ + to_process = min_t(u64, nbytes - processed, + nx_ctx->ap->databytelen); + to_process = min_t(u64, to_process, + NX_PAGE_SIZE * (max_sg_len - 1)); + + nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len, + req->assoc, processed, &to_process); + + if ((to_process + processed) < nbytes) + NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; + else + NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; + + nx_ctx->op.inlen = (nx_ctx->in_sg - nx_sg) + * sizeof(struct nx_sg); + + csbcpb->cpb.aes_gcm.bit_length_data = 0; + csbcpb->cpb.aes_gcm.bit_length_aad = 8 * nbytes; + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + + memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad, + csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE); + memcpy(csbcpb->cpb.aes_gcm.in_s0, + csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE); + + NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; + + atomic_inc(&(nx_ctx->stats->aes_ops)); + atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); + + processed += to_process; + } while (processed < nbytes); + +out: + /* Restore GCM mode */ + csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM; + return rc; +} + +static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc, + int enc) +{ + int rc; + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); + struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + char out[AES_BLOCK_SIZE]; + struct nx_sg *in_sg, *out_sg; + int len; + + /* For scenarios where the input message is zero length, AES CTR mode + * may be used. Set the source data to be a single block (16B) of all + * zeros, and set the input IV value to be the same as the GMAC IV + * value. - nx_wb 4.8.1.3 */ + + /* Change to ECB mode */ + csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB; + memcpy(csbcpb->cpb.aes_ecb.key, csbcpb->cpb.aes_gcm.key, + sizeof(csbcpb->cpb.aes_ecb.key)); + if (enc) + NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; + else + NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; + + len = AES_BLOCK_SIZE; + + /* Encrypt the counter/IV */ + in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) desc->info, + &len, nx_ctx->ap->sglen); + + if (len != AES_BLOCK_SIZE) + return -EINVAL; + + len = sizeof(out); + out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, &len, + nx_ctx->ap->sglen); + + if (len != sizeof(out)) + return -EINVAL; + + nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + atomic_inc(&(nx_ctx->stats->aes_ops)); + + /* Copy out the auth tag */ + memcpy(csbcpb->cpb.aes_gcm.out_pat_or_mac, out, + crypto_aead_authsize(crypto_aead_reqtfm(req))); +out: + /* Restore XCBC mode */ + csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM; + + /* + * ECB key uses the same region that GCM AAD and counter, so it's safe + * to just fill it with zeroes. + */ + memset(csbcpb->cpb.aes_ecb.key, 0, sizeof(csbcpb->cpb.aes_ecb.key)); + + return rc; +} + +static int gcm_aes_nx_crypt(struct aead_request *req, int enc) +{ + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); + struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + struct blkcipher_desc desc; + unsigned int nbytes = req->cryptlen; + unsigned int processed = 0, to_process; + unsigned long irq_flags; + int rc = -EINVAL; + + spin_lock_irqsave(&nx_ctx->lock, irq_flags); + + desc.info = nx_ctx->priv.gcm.iv; + /* initialize the counter */ + *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1; + + if (nbytes == 0) { + if (req->assoclen == 0) + rc = gcm_empty(req, &desc, enc); + else + rc = gmac(req, &desc); + if (rc) + goto out; + else + goto mac; + } + + /* Process associated data */ + csbcpb->cpb.aes_gcm.bit_length_aad = req->assoclen * 8; + if (req->assoclen) { + rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad); + if (rc) + goto out; + } + + /* Set flags for encryption */ + NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION; + if (enc) { + NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; + } else { + NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; + nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req)); + } + + do { + to_process = nbytes - processed; + + csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8; + desc.tfm = (struct crypto_blkcipher *) req->base.tfm; + rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, + req->src, &to_process, processed, + csbcpb->cpb.aes_gcm.iv_or_cnt); + + if (rc) + goto out; + + if ((to_process + processed) < nbytes) + NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; + else + NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; + + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + + memcpy(desc.info, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE); + memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad, + csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE); + memcpy(csbcpb->cpb.aes_gcm.in_s0, + csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE); + + NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; + + atomic_inc(&(nx_ctx->stats->aes_ops)); + atomic64_add(csbcpb->csb.processed_byte_count, + &(nx_ctx->stats->aes_bytes)); + + processed += to_process; + } while (processed < nbytes); + +mac: + if (enc) { + /* copy out the auth tag */ + scatterwalk_map_and_copy(csbcpb->cpb.aes_gcm.out_pat_or_mac, + req->dst, nbytes, + crypto_aead_authsize(crypto_aead_reqtfm(req)), + SCATTERWALK_TO_SG); + } else { + u8 *itag = nx_ctx->priv.gcm.iauth_tag; + u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac; + + scatterwalk_map_and_copy(itag, req->src, nbytes, + crypto_aead_authsize(crypto_aead_reqtfm(req)), + SCATTERWALK_FROM_SG); + rc = memcmp(itag, otag, + crypto_aead_authsize(crypto_aead_reqtfm(req))) ? + -EBADMSG : 0; + } +out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); + return rc; +} + +static int gcm_aes_nx_encrypt(struct aead_request *req) +{ + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); + char *iv = nx_ctx->priv.gcm.iv; + + memcpy(iv, req->iv, 12); + + return gcm_aes_nx_crypt(req, 1); +} + +static int gcm_aes_nx_decrypt(struct aead_request *req) +{ + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); + char *iv = nx_ctx->priv.gcm.iv; + + memcpy(iv, req->iv, 12); + + return gcm_aes_nx_crypt(req, 0); +} + +static int gcm4106_aes_nx_encrypt(struct aead_request *req) +{ + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); + char *iv = nx_ctx->priv.gcm.iv; + char *nonce = nx_ctx->priv.gcm.nonce; + + memcpy(iv, nonce, NX_GCM4106_NONCE_LEN); + memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8); + + return gcm_aes_nx_crypt(req, 1); +} + +static int gcm4106_aes_nx_decrypt(struct aead_request *req) +{ + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); + char *iv = nx_ctx->priv.gcm.iv; + char *nonce = nx_ctx->priv.gcm.nonce; + + memcpy(iv, nonce, NX_GCM4106_NONCE_LEN); + memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8); + + return gcm_aes_nx_crypt(req, 0); +} + +/* tell the block cipher walk routines that this is a stream cipher by + * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block + * during encrypt/decrypt doesn't solve this problem, because it calls + * blkcipher_walk_done under the covers, which doesn't use walk->blocksize, + * but instead uses this tfm->blocksize. */ +struct crypto_alg nx_gcm_aes_alg = { + .cra_name = "gcm(aes)", + .cra_driver_name = "gcm-aes-nx", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AEAD, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct nx_crypto_ctx), + .cra_type = &crypto_aead_type, + .cra_module = THIS_MODULE, + .cra_init = nx_crypto_ctx_aes_gcm_init, + .cra_exit = nx_crypto_ctx_exit, + .cra_aead = { + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = AES_BLOCK_SIZE, + .setkey = gcm_aes_nx_set_key, + .setauthsize = gcm_aes_nx_setauthsize, + .encrypt = gcm_aes_nx_encrypt, + .decrypt = gcm_aes_nx_decrypt, + } +}; + +struct crypto_alg nx_gcm4106_aes_alg = { + .cra_name = "rfc4106(gcm(aes))", + .cra_driver_name = "rfc4106-gcm-aes-nx", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AEAD, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct nx_crypto_ctx), + .cra_type = &crypto_nivaead_type, + .cra_module = THIS_MODULE, + .cra_init = nx_crypto_ctx_aes_gcm_init, + .cra_exit = nx_crypto_ctx_exit, + .cra_aead = { + .ivsize = 8, + .maxauthsize = AES_BLOCK_SIZE, + .geniv = "seqiv", + .setkey = gcm4106_aes_nx_set_key, + .setauthsize = gcm4106_aes_nx_setauthsize, + .encrypt = gcm4106_aes_nx_encrypt, + .decrypt = gcm4106_aes_nx_decrypt, + } +}; diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c new file mode 100644 index 000000000..8c2faffab --- /dev/null +++ b/drivers/crypto/nx/nx-aes-xcbc.c @@ -0,0 +1,378 @@ +/** + * AES XCBC routines supporting the Power 7+ Nest Accelerators driver + * + * Copyright (C) 2011-2012 International Business Machines Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 only. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Author: Kent Yoder <yoder1@us.ibm.com> + */ + +#include <crypto/internal/hash.h> +#include <crypto/aes.h> +#include <crypto/algapi.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/crypto.h> +#include <asm/vio.h> + +#include "nx_csbcpb.h" +#include "nx.h" + + +struct xcbc_state { + u8 state[AES_BLOCK_SIZE]; + unsigned int count; + u8 buffer[AES_BLOCK_SIZE]; +}; + +static int nx_xcbc_set_key(struct crypto_shash *desc, + const u8 *in_key, + unsigned int key_len) +{ + struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc); + + switch (key_len) { + case AES_KEYSIZE_128: + nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128]; + break; + default: + return -EINVAL; + } + + memcpy(nx_ctx->priv.xcbc.key, in_key, key_len); + + return 0; +} + +/* + * Based on RFC 3566, for a zero-length message: + * + * n = 1 + * K1 = E(K, 0x01010101010101010101010101010101) + * K3 = E(K, 0x03030303030303030303030303030303) + * E[0] = 0x00000000000000000000000000000000 + * M[1] = 0x80000000000000000000000000000000 (0 length message with padding) + * E[1] = (K1, M[1] ^ E[0] ^ K3) + * Tag = M[1] + */ +static int nx_xcbc_empty(struct shash_desc *desc, u8 *out) +{ + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); + struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + struct nx_sg *in_sg, *out_sg; + u8 keys[2][AES_BLOCK_SIZE]; + u8 key[32]; + int rc = 0; + int len; + + /* Change to ECB mode */ + csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB; + memcpy(key, csbcpb->cpb.aes_xcbc.key, AES_BLOCK_SIZE); + memcpy(csbcpb->cpb.aes_ecb.key, key, AES_BLOCK_SIZE); + NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; + + /* K1 and K3 base patterns */ + memset(keys[0], 0x01, sizeof(keys[0])); + memset(keys[1], 0x03, sizeof(keys[1])); + + len = sizeof(keys); + /* Generate K1 and K3 encrypting the patterns */ + in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys, &len, + nx_ctx->ap->sglen); + + if (len != sizeof(keys)) + return -EINVAL; + + out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) keys, &len, + nx_ctx->ap->sglen); + + if (len != sizeof(keys)) + return -EINVAL; + + nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + atomic_inc(&(nx_ctx->stats->aes_ops)); + + /* XOr K3 with the padding for a 0 length message */ + keys[1][0] ^= 0x80; + + len = sizeof(keys[1]); + + /* Encrypt the final result */ + memcpy(csbcpb->cpb.aes_ecb.key, keys[0], AES_BLOCK_SIZE); + in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys[1], &len, + nx_ctx->ap->sglen); + + if (len != sizeof(keys[1])) + return -EINVAL; + + len = AES_BLOCK_SIZE; + out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, + nx_ctx->ap->sglen); + + if (len != AES_BLOCK_SIZE) + return -EINVAL; + + nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + atomic_inc(&(nx_ctx->stats->aes_ops)); + +out: + /* Restore XCBC mode */ + csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC; + memcpy(csbcpb->cpb.aes_xcbc.key, key, AES_BLOCK_SIZE); + NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; + + return rc; +} + +static int nx_xcbc_init(struct shash_desc *desc) +{ + struct xcbc_state *sctx = shash_desc_ctx(desc); + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); + struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + struct nx_sg *out_sg; + int len; + + nx_ctx_init(nx_ctx, HCOP_FC_AES); + + memset(sctx, 0, sizeof *sctx); + + NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128); + csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC; + + memcpy(csbcpb->cpb.aes_xcbc.key, nx_ctx->priv.xcbc.key, AES_BLOCK_SIZE); + memset(nx_ctx->priv.xcbc.key, 0, sizeof *nx_ctx->priv.xcbc.key); + + len = AES_BLOCK_SIZE; + out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, + &len, nx_ctx->ap->sglen); + + if (len != AES_BLOCK_SIZE) + return -EINVAL; + + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); + + return 0; +} + +static int nx_xcbc_update(struct shash_desc *desc, + const u8 *data, + unsigned int len) +{ + struct xcbc_state *sctx = shash_desc_ctx(desc); + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); + struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + struct nx_sg *in_sg; + u32 to_process = 0, leftover, total; + unsigned int max_sg_len; + unsigned long irq_flags; + int rc = 0; + int data_len; + + spin_lock_irqsave(&nx_ctx->lock, irq_flags); + + + total = sctx->count + len; + + /* 2 cases for total data len: + * 1: <= AES_BLOCK_SIZE: copy into state, return 0 + * 2: > AES_BLOCK_SIZE: process X blocks, copy in leftover + */ + if (total <= AES_BLOCK_SIZE) { + memcpy(sctx->buffer + sctx->count, data, len); + sctx->count += len; + goto out; + } + + in_sg = nx_ctx->in_sg; + max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg), + nx_ctx->ap->sglen); + max_sg_len = min_t(u64, max_sg_len, + nx_ctx->ap->databytelen/NX_PAGE_SIZE); + + do { + to_process = total - to_process; + to_process = to_process & ~(AES_BLOCK_SIZE - 1); + + leftover = total - to_process; + + /* the hardware will not accept a 0 byte operation for this + * algorithm and the operation MUST be finalized to be correct. + * So if we happen to get an update that falls on a block sized + * boundary, we must save off the last block to finalize with + * later. */ + if (!leftover) { + to_process -= AES_BLOCK_SIZE; + leftover = AES_BLOCK_SIZE; + } + + if (sctx->count) { + data_len = sctx->count; + in_sg = nx_build_sg_list(nx_ctx->in_sg, + (u8 *) sctx->buffer, + &data_len, + max_sg_len); + if (data_len != sctx->count) + return -EINVAL; + } + + data_len = to_process - sctx->count; + in_sg = nx_build_sg_list(in_sg, + (u8 *) data, + &data_len, + max_sg_len); + + if (data_len != to_process - sctx->count) + return -EINVAL; + + nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * + sizeof(struct nx_sg); + + /* we've hit the nx chip previously and we're updating again, + * so copy over the partial digest */ + if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { + memcpy(csbcpb->cpb.aes_xcbc.cv, + csbcpb->cpb.aes_xcbc.out_cv_mac, + AES_BLOCK_SIZE); + } + + NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; + if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { + rc = -EINVAL; + goto out; + } + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + + atomic_inc(&(nx_ctx->stats->aes_ops)); + + /* everything after the first update is continuation */ + NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; + + total -= to_process; + data += to_process - sctx->count; + sctx->count = 0; + in_sg = nx_ctx->in_sg; + } while (leftover > AES_BLOCK_SIZE); + + /* copy the leftover back into the state struct */ + memcpy(sctx->buffer, data, leftover); + sctx->count = leftover; + +out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); + return rc; +} + +static int nx_xcbc_final(struct shash_desc *desc, u8 *out) +{ + struct xcbc_state *sctx = shash_desc_ctx(desc); + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); + struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + struct nx_sg *in_sg, *out_sg; + unsigned long irq_flags; + int rc = 0; + int len; + + spin_lock_irqsave(&nx_ctx->lock, irq_flags); + + if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { + /* we've hit the nx chip previously, now we're finalizing, + * so copy over the partial digest */ + memcpy(csbcpb->cpb.aes_xcbc.cv, + csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE); + } else if (sctx->count == 0) { + /* + * we've never seen an update, so this is a 0 byte op. The + * hardware cannot handle a 0 byte op, so just ECB to + * generate the hash. + */ + rc = nx_xcbc_empty(desc, out); + goto out; + } + + /* final is represented by continuing the operation and indicating that + * this is not an intermediate operation */ + NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; + + len = sctx->count; + in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer, + &len, nx_ctx->ap->sglen); + + if (len != sctx->count) + return -EINVAL; + + len = AES_BLOCK_SIZE; + out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, + nx_ctx->ap->sglen); + + if (len != AES_BLOCK_SIZE) + return -EINVAL; + + nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); + + if (!nx_ctx->op.outlen) { + rc = -EINVAL; + goto out; + } + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + + atomic_inc(&(nx_ctx->stats->aes_ops)); + + memcpy(out, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE); +out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); + return rc; +} + +struct shash_alg nx_shash_aes_xcbc_alg = { + .digestsize = AES_BLOCK_SIZE, + .init = nx_xcbc_init, + .update = nx_xcbc_update, + .final = nx_xcbc_final, + .setkey = nx_xcbc_set_key, + .descsize = sizeof(struct xcbc_state), + .statesize = sizeof(struct xcbc_state), + .base = { + .cra_name = "xcbc(aes)", + .cra_driver_name = "xcbc-aes-nx", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct nx_crypto_ctx), + .cra_init = nx_crypto_ctx_aes_xcbc_init, + .cra_exit = nx_crypto_ctx_exit, + } +}; diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c new file mode 100644 index 000000000..23621da62 --- /dev/null +++ b/drivers/crypto/nx/nx-sha256.c @@ -0,0 +1,274 @@ +/** + * SHA-256 routines supporting the Power 7+ Nest Accelerators driver + * + * Copyright (C) 2011-2012 International Business Machines Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 only. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Author: Kent Yoder <yoder1@us.ibm.com> + */ + +#include <crypto/internal/hash.h> +#include <crypto/sha.h> +#include <linux/module.h> +#include <asm/vio.h> +#include <asm/byteorder.h> + +#include "nx_csbcpb.h" +#include "nx.h" + + +static int nx_sha256_init(struct shash_desc *desc) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); + int len; + int rc; + + nx_ctx_init(nx_ctx, HCOP_FC_SHA); + + memset(sctx, 0, sizeof *sctx); + + nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256]; + + NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256); + + len = SHA256_DIGEST_SIZE; + rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg, + &nx_ctx->op.outlen, + &len, + (u8 *) sctx->state, + NX_DS_SHA256); + + if (rc) + goto out; + + sctx->state[0] = __cpu_to_be32(SHA256_H0); + sctx->state[1] = __cpu_to_be32(SHA256_H1); + sctx->state[2] = __cpu_to_be32(SHA256_H2); + sctx->state[3] = __cpu_to_be32(SHA256_H3); + sctx->state[4] = __cpu_to_be32(SHA256_H4); + sctx->state[5] = __cpu_to_be32(SHA256_H5); + sctx->state[6] = __cpu_to_be32(SHA256_H6); + sctx->state[7] = __cpu_to_be32(SHA256_H7); + sctx->count = 0; + +out: + return 0; +} + +static int nx_sha256_update(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); + struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; + u64 to_process = 0, leftover, total; + unsigned long irq_flags; + int rc = 0; + int data_len; + u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE); + + spin_lock_irqsave(&nx_ctx->lock, irq_flags); + + /* 2 cases for total data len: + * 1: < SHA256_BLOCK_SIZE: copy into state, return 0 + * 2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover + */ + total = (sctx->count % SHA256_BLOCK_SIZE) + len; + if (total < SHA256_BLOCK_SIZE) { + memcpy(sctx->buf + buf_len, data, len); + sctx->count += len; + goto out; + } + + memcpy(csbcpb->cpb.sha256.message_digest, sctx->state, SHA256_DIGEST_SIZE); + NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; + NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; + + do { + /* + * to_process: the SHA256_BLOCK_SIZE data chunk to process in + * this update. This value is also restricted by the sg list + * limits. + */ + to_process = total - to_process; + to_process = to_process & ~(SHA256_BLOCK_SIZE - 1); + + if (buf_len) { + data_len = buf_len; + rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, + &nx_ctx->op.inlen, + &data_len, + (u8 *) sctx->buf, + NX_DS_SHA256); + + if (rc || data_len != buf_len) + goto out; + } + + data_len = to_process - buf_len; + rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, + &nx_ctx->op.inlen, + &data_len, + (u8 *) data, + NX_DS_SHA256); + + if (rc) + goto out; + + to_process = (data_len + buf_len); + leftover = total - to_process; + + /* + * we've hit the nx chip previously and we're updating + * again, so copy over the partial digest. + */ + memcpy(csbcpb->cpb.sha256.input_partial_digest, + csbcpb->cpb.sha256.message_digest, + SHA256_DIGEST_SIZE); + + if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { + rc = -EINVAL; + goto out; + } + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + + atomic_inc(&(nx_ctx->stats->sha256_ops)); + + total -= to_process; + data += to_process - buf_len; + buf_len = 0; + + } while (leftover >= SHA256_BLOCK_SIZE); + + /* copy the leftover back into the state struct */ + if (leftover) + memcpy(sctx->buf, data, leftover); + + sctx->count += len; + memcpy(sctx->state, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE); +out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); + return rc; +} + +static int nx_sha256_final(struct shash_desc *desc, u8 *out) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); + struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; + unsigned long irq_flags; + int rc; + int len; + + spin_lock_irqsave(&nx_ctx->lock, irq_flags); + + /* final is represented by continuing the operation and indicating that + * this is not an intermediate operation */ + if (sctx->count >= SHA256_BLOCK_SIZE) { + /* we've hit the nx chip previously, now we're finalizing, + * so copy over the partial digest */ + memcpy(csbcpb->cpb.sha256.input_partial_digest, sctx->state, SHA256_DIGEST_SIZE); + NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; + NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; + } else { + NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; + NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION; + } + + csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8); + + len = sctx->count & (SHA256_BLOCK_SIZE - 1); + rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, + &nx_ctx->op.inlen, + &len, + (u8 *) sctx->buf, + NX_DS_SHA256); + + if (rc || len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) + goto out; + + len = SHA256_DIGEST_SIZE; + rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg, + &nx_ctx->op.outlen, + &len, + out, + NX_DS_SHA256); + + if (rc || len != SHA256_DIGEST_SIZE) + goto out; + + if (!nx_ctx->op.outlen) { + rc = -EINVAL; + goto out; + } + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + + atomic_inc(&(nx_ctx->stats->sha256_ops)); + + atomic64_add(sctx->count, &(nx_ctx->stats->sha256_bytes)); + memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE); +out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); + return rc; +} + +static int nx_sha256_export(struct shash_desc *desc, void *out) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + + memcpy(out, sctx, sizeof(*sctx)); + + return 0; +} + +static int nx_sha256_import(struct shash_desc *desc, const void *in) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + + memcpy(sctx, in, sizeof(*sctx)); + + return 0; +} + +struct shash_alg nx_shash_sha256_alg = { + .digestsize = SHA256_DIGEST_SIZE, + .init = nx_sha256_init, + .update = nx_sha256_update, + .final = nx_sha256_final, + .export = nx_sha256_export, + .import = nx_sha256_import, + .descsize = sizeof(struct sha256_state), + .statesize = sizeof(struct sha256_state), + .base = { + .cra_name = "sha256", + .cra_driver_name = "sha256-nx", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct nx_crypto_ctx), + .cra_init = nx_crypto_ctx_sha_init, + .cra_exit = nx_crypto_ctx_exit, + } +}; diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c new file mode 100644 index 000000000..b3adf1022 --- /dev/null +++ b/drivers/crypto/nx/nx-sha512.c @@ -0,0 +1,279 @@ +/** + * SHA-512 routines supporting the Power 7+ Nest Accelerators driver + * + * Copyright (C) 2011-2012 International Business Machines Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 only. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Author: Kent Yoder <yoder1@us.ibm.com> + */ + +#include <crypto/internal/hash.h> +#include <crypto/sha.h> +#include <linux/module.h> +#include <asm/vio.h> + +#include "nx_csbcpb.h" +#include "nx.h" + + +static int nx_sha512_init(struct shash_desc *desc) +{ + struct sha512_state *sctx = shash_desc_ctx(desc); + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); + int len; + int rc; + + nx_ctx_init(nx_ctx, HCOP_FC_SHA); + + memset(sctx, 0, sizeof *sctx); + + nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512]; + + NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512); + + len = SHA512_DIGEST_SIZE; + rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg, + &nx_ctx->op.outlen, + &len, + (u8 *)sctx->state, + NX_DS_SHA512); + + if (rc || len != SHA512_DIGEST_SIZE) + goto out; + + sctx->state[0] = __cpu_to_be64(SHA512_H0); + sctx->state[1] = __cpu_to_be64(SHA512_H1); + sctx->state[2] = __cpu_to_be64(SHA512_H2); + sctx->state[3] = __cpu_to_be64(SHA512_H3); + sctx->state[4] = __cpu_to_be64(SHA512_H4); + sctx->state[5] = __cpu_to_be64(SHA512_H5); + sctx->state[6] = __cpu_to_be64(SHA512_H6); + sctx->state[7] = __cpu_to_be64(SHA512_H7); + sctx->count[0] = 0; + +out: + return 0; +} + +static int nx_sha512_update(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + struct sha512_state *sctx = shash_desc_ctx(desc); + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); + struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; + u64 to_process, leftover = 0, total; + unsigned long irq_flags; + int rc = 0; + int data_len; + u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE); + + spin_lock_irqsave(&nx_ctx->lock, irq_flags); + + /* 2 cases for total data len: + * 1: < SHA512_BLOCK_SIZE: copy into state, return 0 + * 2: >= SHA512_BLOCK_SIZE: process X blocks, copy in leftover + */ + total = (sctx->count[0] % SHA512_BLOCK_SIZE) + len; + if (total < SHA512_BLOCK_SIZE) { + memcpy(sctx->buf + buf_len, data, len); + sctx->count[0] += len; + goto out; + } + + memcpy(csbcpb->cpb.sha512.message_digest, sctx->state, SHA512_DIGEST_SIZE); + NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; + NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; + + do { + /* + * to_process: the SHA512_BLOCK_SIZE data chunk to process in + * this update. This value is also restricted by the sg list + * limits. + */ + to_process = total - leftover; + to_process = to_process & ~(SHA512_BLOCK_SIZE - 1); + leftover = total - to_process; + + if (buf_len) { + data_len = buf_len; + rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, + &nx_ctx->op.inlen, + &data_len, + (u8 *) sctx->buf, + NX_DS_SHA512); + + if (rc || data_len != buf_len) + goto out; + } + + data_len = to_process - buf_len; + rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, + &nx_ctx->op.inlen, + &data_len, + (u8 *) data, + NX_DS_SHA512); + + if (rc || data_len != (to_process - buf_len)) + goto out; + + to_process = (data_len + buf_len); + leftover = total - to_process; + + /* + * we've hit the nx chip previously and we're updating + * again, so copy over the partial digest. + */ + memcpy(csbcpb->cpb.sha512.input_partial_digest, + csbcpb->cpb.sha512.message_digest, + SHA512_DIGEST_SIZE); + + if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { + rc = -EINVAL; + goto out; + } + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + + atomic_inc(&(nx_ctx->stats->sha512_ops)); + + total -= to_process; + data += to_process - buf_len; + buf_len = 0; + + } while (leftover >= SHA512_BLOCK_SIZE); + + /* copy the leftover back into the state struct */ + if (leftover) + memcpy(sctx->buf, data, leftover); + sctx->count[0] += len; + memcpy(sctx->state, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); +out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); + return rc; +} + +static int nx_sha512_final(struct shash_desc *desc, u8 *out) +{ + struct sha512_state *sctx = shash_desc_ctx(desc); + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); + struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; + u64 count0; + unsigned long irq_flags; + int rc; + int len; + + spin_lock_irqsave(&nx_ctx->lock, irq_flags); + + /* final is represented by continuing the operation and indicating that + * this is not an intermediate operation */ + if (sctx->count[0] >= SHA512_BLOCK_SIZE) { + /* we've hit the nx chip previously, now we're finalizing, + * so copy over the partial digest */ + memcpy(csbcpb->cpb.sha512.input_partial_digest, sctx->state, + SHA512_DIGEST_SIZE); + NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; + NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; + } else { + NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; + NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION; + } + + NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; + + count0 = sctx->count[0] * 8; + + csbcpb->cpb.sha512.message_bit_length_lo = count0; + + len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1); + rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, + &nx_ctx->op.inlen, + &len, + (u8 *)sctx->buf, + NX_DS_SHA512); + + if (rc || len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) + goto out; + + len = SHA512_DIGEST_SIZE; + rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg, + &nx_ctx->op.outlen, + &len, + out, + NX_DS_SHA512); + + if (rc) + goto out; + + if (!nx_ctx->op.outlen) { + rc = -EINVAL; + goto out; + } + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + + atomic_inc(&(nx_ctx->stats->sha512_ops)); + atomic64_add(sctx->count[0], &(nx_ctx->stats->sha512_bytes)); + + memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); +out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); + return rc; +} + +static int nx_sha512_export(struct shash_desc *desc, void *out) +{ + struct sha512_state *sctx = shash_desc_ctx(desc); + + memcpy(out, sctx, sizeof(*sctx)); + + return 0; +} + +static int nx_sha512_import(struct shash_desc *desc, const void *in) +{ + struct sha512_state *sctx = shash_desc_ctx(desc); + + memcpy(sctx, in, sizeof(*sctx)); + + return 0; +} + +struct shash_alg nx_shash_sha512_alg = { + .digestsize = SHA512_DIGEST_SIZE, + .init = nx_sha512_init, + .update = nx_sha512_update, + .final = nx_sha512_final, + .export = nx_sha512_export, + .import = nx_sha512_import, + .descsize = sizeof(struct sha512_state), + .statesize = sizeof(struct sha512_state), + .base = { + .cra_name = "sha512", + .cra_driver_name = "sha512-nx", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA512_BLOCK_SIZE, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct nx_crypto_ctx), + .cra_init = nx_crypto_ctx_sha_init, + .cra_exit = nx_crypto_ctx_exit, + } +}; diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c new file mode 100644 index 000000000..1da6dc59d --- /dev/null +++ b/drivers/crypto/nx/nx.c @@ -0,0 +1,799 @@ +/** + * Routines supporting the Power 7+ Nest Accelerators driver + * + * Copyright (C) 2011-2012 International Business Machines Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 only. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Author: Kent Yoder <yoder1@us.ibm.com> + */ + +#include <crypto/internal/hash.h> +#include <crypto/hash.h> +#include <crypto/aes.h> +#include <crypto/sha.h> +#include <crypto/algapi.h> +#include <crypto/scatterwalk.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/types.h> +#include <linux/mm.h> +#include <linux/crypto.h> +#include <linux/scatterlist.h> +#include <linux/device.h> +#include <linux/of.h> +#include <asm/hvcall.h> +#include <asm/vio.h> + +#include "nx_csbcpb.h" +#include "nx.h" + + +/** + * nx_hcall_sync - make an H_COP_OP hcall for the passed in op structure + * + * @nx_ctx: the crypto context handle + * @op: PFO operation struct to pass in + * @may_sleep: flag indicating the request can sleep + * + * Make the hcall, retrying while the hardware is busy. If we cannot yield + * the thread, limit the number of retries to 10 here. + */ +int nx_hcall_sync(struct nx_crypto_ctx *nx_ctx, + struct vio_pfo_op *op, + u32 may_sleep) +{ + int rc, retries = 10; + struct vio_dev *viodev = nx_driver.viodev; + + atomic_inc(&(nx_ctx->stats->sync_ops)); + + do { + rc = vio_h_cop_sync(viodev, op); + } while (rc == -EBUSY && !may_sleep && retries--); + + if (rc) { + dev_dbg(&viodev->dev, "vio_h_cop_sync failed: rc: %d " + "hcall rc: %ld\n", rc, op->hcall_err); + atomic_inc(&(nx_ctx->stats->errors)); + atomic_set(&(nx_ctx->stats->last_error), op->hcall_err); + atomic_set(&(nx_ctx->stats->last_error_pid), current->pid); + } + + return rc; +} + +/** + * nx_build_sg_list - build an NX scatter list describing a single buffer + * + * @sg_head: pointer to the first scatter list element to build + * @start_addr: pointer to the linear buffer + * @len: length of the data at @start_addr + * @sgmax: the largest number of scatter list elements we're allowed to create + * + * This function will start writing nx_sg elements at @sg_head and keep + * writing them until all of the data from @start_addr is described or + * until sgmax elements have been written. Scatter list elements will be + * created such that none of the elements describes a buffer that crosses a 4K + * boundary. + */ +struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head, + u8 *start_addr, + unsigned int *len, + u32 sgmax) +{ + unsigned int sg_len = 0; + struct nx_sg *sg; + u64 sg_addr = (u64)start_addr; + u64 end_addr; + + /* determine the start and end for this address range - slightly + * different if this is in VMALLOC_REGION */ + if (is_vmalloc_addr(start_addr)) + sg_addr = page_to_phys(vmalloc_to_page(start_addr)) + + offset_in_page(sg_addr); + else + sg_addr = __pa(sg_addr); + + end_addr = sg_addr + *len; + + /* each iteration will write one struct nx_sg element and add the + * length of data described by that element to sg_len. Once @len bytes + * have been described (or @sgmax elements have been written), the + * loop ends. min_t is used to ensure @end_addr falls on the same page + * as sg_addr, if not, we need to create another nx_sg element for the + * data on the next page. + * + * Also when using vmalloc'ed data, every time that a system page + * boundary is crossed the physical address needs to be re-calculated. + */ + for (sg = sg_head; sg_len < *len; sg++) { + u64 next_page; + + sg->addr = sg_addr; + sg_addr = min_t(u64, NX_PAGE_NUM(sg_addr + NX_PAGE_SIZE), + end_addr); + + next_page = (sg->addr & PAGE_MASK) + PAGE_SIZE; + sg->len = min_t(u64, sg_addr, next_page) - sg->addr; + sg_len += sg->len; + + if (sg_addr >= next_page && + is_vmalloc_addr(start_addr + sg_len)) { + sg_addr = page_to_phys(vmalloc_to_page( + start_addr + sg_len)); + end_addr = sg_addr + *len - sg_len; + } + + if ((sg - sg_head) == sgmax) { + pr_err("nx: scatter/gather list overflow, pid: %d\n", + current->pid); + sg++; + break; + } + } + *len = sg_len; + + /* return the moved sg_head pointer */ + return sg; +} + +/** + * nx_walk_and_build - walk a linux scatterlist and build an nx scatterlist + * + * @nx_dst: pointer to the first nx_sg element to write + * @sglen: max number of nx_sg entries we're allowed to write + * @sg_src: pointer to the source linux scatterlist to walk + * @start: number of bytes to fast-forward past at the beginning of @sg_src + * @src_len: number of bytes to walk in @sg_src + */ +struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst, + unsigned int sglen, + struct scatterlist *sg_src, + unsigned int start, + unsigned int *src_len) +{ + struct scatter_walk walk; + struct nx_sg *nx_sg = nx_dst; + unsigned int n, offset = 0, len = *src_len; + char *dst; + + /* we need to fast forward through @start bytes first */ + for (;;) { + scatterwalk_start(&walk, sg_src); + + if (start < offset + sg_src->length) + break; + + offset += sg_src->length; + sg_src = sg_next(sg_src); + } + + /* start - offset is the number of bytes to advance in the scatterlist + * element we're currently looking at */ + scatterwalk_advance(&walk, start - offset); + + while (len && (nx_sg - nx_dst) < sglen) { + n = scatterwalk_clamp(&walk, len); + if (!n) { + /* In cases where we have scatterlist chain sg_next + * handles with it properly */ + scatterwalk_start(&walk, sg_next(walk.sg)); + n = scatterwalk_clamp(&walk, len); + } + dst = scatterwalk_map(&walk); + + nx_sg = nx_build_sg_list(nx_sg, dst, &n, sglen - (nx_sg - nx_dst)); + len -= n; + + scatterwalk_unmap(dst); + scatterwalk_advance(&walk, n); + scatterwalk_done(&walk, SCATTERWALK_FROM_SG, len); + } + /* update to_process */ + *src_len -= len; + + /* return the moved destination pointer */ + return nx_sg; +} + +/** + * trim_sg_list - ensures the bound in sg list. + * @sg: sg list head + * @end: sg lisg end + * @delta: is the amount we need to crop in order to bound the list. + * + */ +static long int trim_sg_list(struct nx_sg *sg, struct nx_sg *end, unsigned int delta) +{ + while (delta && end > sg) { + struct nx_sg *last = end - 1; + + if (last->len > delta) { + last->len -= delta; + delta = 0; + } else { + end--; + delta -= last->len; + } + } + return (sg - end) * sizeof(struct nx_sg); +} + +/** + * nx_sha_build_sg_list - walk and build sg list to sha modes + * using right bounds and limits. + * @nx_ctx: NX crypto context for the lists we're building + * @nx_sg: current sg list in or out list + * @op_len: current op_len to be used in order to build a sg list + * @nbytes: number or bytes to be processed + * @offset: buf offset + * @mode: SHA256 or SHA512 + */ +int nx_sha_build_sg_list(struct nx_crypto_ctx *nx_ctx, + struct nx_sg *nx_in_outsg, + s64 *op_len, + unsigned int *nbytes, + u8 *offset, + u32 mode) +{ + unsigned int delta = 0; + unsigned int total = *nbytes; + struct nx_sg *nx_insg = nx_in_outsg; + unsigned int max_sg_len; + + max_sg_len = min_t(u64, nx_ctx->ap->sglen, + nx_driver.of.max_sg_len/sizeof(struct nx_sg)); + max_sg_len = min_t(u64, max_sg_len, + nx_ctx->ap->databytelen/NX_PAGE_SIZE); + + *nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen); + nx_insg = nx_build_sg_list(nx_insg, offset, nbytes, max_sg_len); + + switch (mode) { + case NX_DS_SHA256: + if (*nbytes < total) + delta = *nbytes - (*nbytes & ~(SHA256_BLOCK_SIZE - 1)); + break; + case NX_DS_SHA512: + if (*nbytes < total) + delta = *nbytes - (*nbytes & ~(SHA512_BLOCK_SIZE - 1)); + break; + default: + return -EINVAL; + } + *op_len = trim_sg_list(nx_in_outsg, nx_insg, delta); + + return 0; +} + +/** + * nx_build_sg_lists - walk the input scatterlists and build arrays of NX + * scatterlists based on them. + * + * @nx_ctx: NX crypto context for the lists we're building + * @desc: the block cipher descriptor for the operation + * @dst: destination scatterlist + * @src: source scatterlist + * @nbytes: length of data described in the scatterlists + * @offset: number of bytes to fast-forward past at the beginning of + * scatterlists. + * @iv: destination for the iv data, if the algorithm requires it + * + * This is common code shared by all the AES algorithms. It uses the block + * cipher walk routines to traverse input and output scatterlists, building + * corresponding NX scatterlists + */ +int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx, + struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int *nbytes, + unsigned int offset, + u8 *iv) +{ + unsigned int delta = 0; + unsigned int total = *nbytes; + struct nx_sg *nx_insg = nx_ctx->in_sg; + struct nx_sg *nx_outsg = nx_ctx->out_sg; + unsigned int max_sg_len; + + max_sg_len = min_t(u64, nx_ctx->ap->sglen, + nx_driver.of.max_sg_len/sizeof(struct nx_sg)); + max_sg_len = min_t(u64, max_sg_len, + nx_ctx->ap->databytelen/NX_PAGE_SIZE); + + if (iv) + memcpy(iv, desc->info, AES_BLOCK_SIZE); + + *nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen); + + nx_outsg = nx_walk_and_build(nx_outsg, max_sg_len, dst, + offset, nbytes); + nx_insg = nx_walk_and_build(nx_insg, max_sg_len, src, + offset, nbytes); + + if (*nbytes < total) + delta = *nbytes - (*nbytes & ~(AES_BLOCK_SIZE - 1)); + + /* these lengths should be negative, which will indicate to phyp that + * the input and output parameters are scatterlists, not linear + * buffers */ + nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta); + nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta); + + return 0; +} + +/** + * nx_ctx_init - initialize an nx_ctx's vio_pfo_op struct + * + * @nx_ctx: the nx context to initialize + * @function: the function code for the op + */ +void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function) +{ + spin_lock_init(&nx_ctx->lock); + memset(nx_ctx->kmem, 0, nx_ctx->kmem_len); + nx_ctx->csbcpb->csb.valid |= NX_CSB_VALID_BIT; + + nx_ctx->op.flags = function; + nx_ctx->op.csbcpb = __pa(nx_ctx->csbcpb); + nx_ctx->op.in = __pa(nx_ctx->in_sg); + nx_ctx->op.out = __pa(nx_ctx->out_sg); + + if (nx_ctx->csbcpb_aead) { + nx_ctx->csbcpb_aead->csb.valid |= NX_CSB_VALID_BIT; + + nx_ctx->op_aead.flags = function; + nx_ctx->op_aead.csbcpb = __pa(nx_ctx->csbcpb_aead); + nx_ctx->op_aead.in = __pa(nx_ctx->in_sg); + nx_ctx->op_aead.out = __pa(nx_ctx->out_sg); + } +} + +static void nx_of_update_status(struct device *dev, + struct property *p, + struct nx_of *props) +{ + if (!strncmp(p->value, "okay", p->length)) { + props->status = NX_WAITING; + props->flags |= NX_OF_FLAG_STATUS_SET; + } else { + dev_info(dev, "%s: status '%s' is not 'okay'\n", __func__, + (char *)p->value); + } +} + +static void nx_of_update_sglen(struct device *dev, + struct property *p, + struct nx_of *props) +{ + if (p->length != sizeof(props->max_sg_len)) { + dev_err(dev, "%s: unexpected format for " + "ibm,max-sg-len property\n", __func__); + dev_dbg(dev, "%s: ibm,max-sg-len is %d bytes " + "long, expected %zd bytes\n", __func__, + p->length, sizeof(props->max_sg_len)); + return; + } + + props->max_sg_len = *(u32 *)p->value; + props->flags |= NX_OF_FLAG_MAXSGLEN_SET; +} + +static void nx_of_update_msc(struct device *dev, + struct property *p, + struct nx_of *props) +{ + struct msc_triplet *trip; + struct max_sync_cop *msc; + unsigned int bytes_so_far, i, lenp; + + msc = (struct max_sync_cop *)p->value; + lenp = p->length; + + /* You can't tell if the data read in for this property is sane by its + * size alone. This is because there are sizes embedded in the data + * structure. The best we can do is check lengths as we parse and bail + * as soon as a length error is detected. */ + bytes_so_far = 0; + + while ((bytes_so_far + sizeof(struct max_sync_cop)) <= lenp) { + bytes_so_far += sizeof(struct max_sync_cop); + + trip = msc->trip; + + for (i = 0; + ((bytes_so_far + sizeof(struct msc_triplet)) <= lenp) && + i < msc->triplets; + i++) { + if (msc->fc > NX_MAX_FC || msc->mode > NX_MAX_MODE) { + dev_err(dev, "unknown function code/mode " + "combo: %d/%d (ignored)\n", msc->fc, + msc->mode); + goto next_loop; + } + + switch (trip->keybitlen) { + case 128: + case 160: + props->ap[msc->fc][msc->mode][0].databytelen = + trip->databytelen; + props->ap[msc->fc][msc->mode][0].sglen = + trip->sglen; + break; + case 192: + props->ap[msc->fc][msc->mode][1].databytelen = + trip->databytelen; + props->ap[msc->fc][msc->mode][1].sglen = + trip->sglen; + break; + case 256: + if (msc->fc == NX_FC_AES) { + props->ap[msc->fc][msc->mode][2]. + databytelen = trip->databytelen; + props->ap[msc->fc][msc->mode][2].sglen = + trip->sglen; + } else if (msc->fc == NX_FC_AES_HMAC || + msc->fc == NX_FC_SHA) { + props->ap[msc->fc][msc->mode][1]. + databytelen = trip->databytelen; + props->ap[msc->fc][msc->mode][1].sglen = + trip->sglen; + } else { + dev_warn(dev, "unknown function " + "code/key bit len combo" + ": (%u/256)\n", msc->fc); + } + break; + case 512: + props->ap[msc->fc][msc->mode][2].databytelen = + trip->databytelen; + props->ap[msc->fc][msc->mode][2].sglen = + trip->sglen; + break; + default: + dev_warn(dev, "unknown function code/key bit " + "len combo: (%u/%u)\n", msc->fc, + trip->keybitlen); + break; + } +next_loop: + bytes_so_far += sizeof(struct msc_triplet); + trip++; + } + + msc = (struct max_sync_cop *)trip; + } + + props->flags |= NX_OF_FLAG_MAXSYNCCOP_SET; +} + +/** + * nx_of_init - read openFirmware values from the device tree + * + * @dev: device handle + * @props: pointer to struct to hold the properties values + * + * Called once at driver probe time, this function will read out the + * openFirmware properties we use at runtime. If all the OF properties are + * acceptable, when we exit this function props->flags will indicate that + * we're ready to register our crypto algorithms. + */ +static void nx_of_init(struct device *dev, struct nx_of *props) +{ + struct device_node *base_node = dev->of_node; + struct property *p; + + p = of_find_property(base_node, "status", NULL); + if (!p) + dev_info(dev, "%s: property 'status' not found\n", __func__); + else + nx_of_update_status(dev, p, props); + + p = of_find_property(base_node, "ibm,max-sg-len", NULL); + if (!p) + dev_info(dev, "%s: property 'ibm,max-sg-len' not found\n", + __func__); + else + nx_of_update_sglen(dev, p, props); + + p = of_find_property(base_node, "ibm,max-sync-cop", NULL); + if (!p) + dev_info(dev, "%s: property 'ibm,max-sync-cop' not found\n", + __func__); + else + nx_of_update_msc(dev, p, props); +} + +/** + * nx_register_algs - register algorithms with the crypto API + * + * Called from nx_probe() + * + * If all OF properties are in an acceptable state, the driver flags will + * indicate that we're ready and we'll create our debugfs files and register + * out crypto algorithms. + */ +static int nx_register_algs(void) +{ + int rc = -1; + + if (nx_driver.of.flags != NX_OF_FLAG_MASK_READY) + goto out; + + memset(&nx_driver.stats, 0, sizeof(struct nx_stats)); + + rc = NX_DEBUGFS_INIT(&nx_driver); + if (rc) + goto out; + + nx_driver.of.status = NX_OKAY; + + rc = crypto_register_alg(&nx_ecb_aes_alg); + if (rc) + goto out; + + rc = crypto_register_alg(&nx_cbc_aes_alg); + if (rc) + goto out_unreg_ecb; + + rc = crypto_register_alg(&nx_ctr_aes_alg); + if (rc) + goto out_unreg_cbc; + + rc = crypto_register_alg(&nx_ctr3686_aes_alg); + if (rc) + goto out_unreg_ctr; + + rc = crypto_register_alg(&nx_gcm_aes_alg); + if (rc) + goto out_unreg_ctr3686; + + rc = crypto_register_alg(&nx_gcm4106_aes_alg); + if (rc) + goto out_unreg_gcm; + + rc = crypto_register_alg(&nx_ccm_aes_alg); + if (rc) + goto out_unreg_gcm4106; + + rc = crypto_register_alg(&nx_ccm4309_aes_alg); + if (rc) + goto out_unreg_ccm; + + rc = crypto_register_shash(&nx_shash_sha256_alg); + if (rc) + goto out_unreg_ccm4309; + + rc = crypto_register_shash(&nx_shash_sha512_alg); + if (rc) + goto out_unreg_s256; + + rc = crypto_register_shash(&nx_shash_aes_xcbc_alg); + if (rc) + goto out_unreg_s512; + + goto out; + +out_unreg_s512: + crypto_unregister_shash(&nx_shash_sha512_alg); +out_unreg_s256: + crypto_unregister_shash(&nx_shash_sha256_alg); +out_unreg_ccm4309: + crypto_unregister_alg(&nx_ccm4309_aes_alg); +out_unreg_ccm: + crypto_unregister_alg(&nx_ccm_aes_alg); +out_unreg_gcm4106: + crypto_unregister_alg(&nx_gcm4106_aes_alg); +out_unreg_gcm: + crypto_unregister_alg(&nx_gcm_aes_alg); +out_unreg_ctr3686: + crypto_unregister_alg(&nx_ctr3686_aes_alg); +out_unreg_ctr: + crypto_unregister_alg(&nx_ctr_aes_alg); +out_unreg_cbc: + crypto_unregister_alg(&nx_cbc_aes_alg); +out_unreg_ecb: + crypto_unregister_alg(&nx_ecb_aes_alg); +out: + return rc; +} + +/** + * nx_crypto_ctx_init - create and initialize a crypto api context + * + * @nx_ctx: the crypto api context + * @fc: function code for the context + * @mode: the function code specific mode for this context + */ +static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode) +{ + if (nx_driver.of.status != NX_OKAY) { + pr_err("Attempt to initialize NX crypto context while device " + "is not available!\n"); + return -ENODEV; + } + + /* we need an extra page for csbcpb_aead for these modes */ + if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM) + nx_ctx->kmem_len = (5 * NX_PAGE_SIZE) + + sizeof(struct nx_csbcpb); + else + nx_ctx->kmem_len = (4 * NX_PAGE_SIZE) + + sizeof(struct nx_csbcpb); + + nx_ctx->kmem = kmalloc(nx_ctx->kmem_len, GFP_KERNEL); + if (!nx_ctx->kmem) + return -ENOMEM; + + /* the csbcpb and scatterlists must be 4K aligned pages */ + nx_ctx->csbcpb = (struct nx_csbcpb *)(round_up((u64)nx_ctx->kmem, + (u64)NX_PAGE_SIZE)); + nx_ctx->in_sg = (struct nx_sg *)((u8 *)nx_ctx->csbcpb + NX_PAGE_SIZE); + nx_ctx->out_sg = (struct nx_sg *)((u8 *)nx_ctx->in_sg + NX_PAGE_SIZE); + + if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM) + nx_ctx->csbcpb_aead = + (struct nx_csbcpb *)((u8 *)nx_ctx->out_sg + + NX_PAGE_SIZE); + + /* give each context a pointer to global stats and their OF + * properties */ + nx_ctx->stats = &nx_driver.stats; + memcpy(nx_ctx->props, nx_driver.of.ap[fc][mode], + sizeof(struct alg_props) * 3); + + return 0; +} + +/* entry points from the crypto tfm initializers */ +int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm) +{ + return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES, + NX_MODE_AES_CCM); +} + +int nx_crypto_ctx_aes_gcm_init(struct crypto_tfm *tfm) +{ + return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES, + NX_MODE_AES_GCM); +} + +int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm *tfm) +{ + return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES, + NX_MODE_AES_CTR); +} + +int nx_crypto_ctx_aes_cbc_init(struct crypto_tfm *tfm) +{ + return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES, + NX_MODE_AES_CBC); +} + +int nx_crypto_ctx_aes_ecb_init(struct crypto_tfm *tfm) +{ + return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES, + NX_MODE_AES_ECB); +} + +int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm) +{ + return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_SHA, NX_MODE_SHA); +} + +int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm) +{ + return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES, + NX_MODE_AES_XCBC_MAC); +} + +/** + * nx_crypto_ctx_exit - destroy a crypto api context + * + * @tfm: the crypto transform pointer for the context + * + * As crypto API contexts are destroyed, this exit hook is called to free the + * memory associated with it. + */ +void nx_crypto_ctx_exit(struct crypto_tfm *tfm) +{ + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm); + + kzfree(nx_ctx->kmem); + nx_ctx->csbcpb = NULL; + nx_ctx->csbcpb_aead = NULL; + nx_ctx->in_sg = NULL; + nx_ctx->out_sg = NULL; +} + +static int nx_probe(struct vio_dev *viodev, const struct vio_device_id *id) +{ + dev_dbg(&viodev->dev, "driver probed: %s resource id: 0x%x\n", + viodev->name, viodev->resource_id); + + if (nx_driver.viodev) { + dev_err(&viodev->dev, "%s: Attempt to register more than one " + "instance of the hardware\n", __func__); + return -EINVAL; + } + + nx_driver.viodev = viodev; + + nx_of_init(&viodev->dev, &nx_driver.of); + + return nx_register_algs(); +} + +static int nx_remove(struct vio_dev *viodev) +{ + dev_dbg(&viodev->dev, "entering nx_remove for UA 0x%x\n", + viodev->unit_address); + + if (nx_driver.of.status == NX_OKAY) { + NX_DEBUGFS_FINI(&nx_driver); + + crypto_unregister_alg(&nx_ccm_aes_alg); + crypto_unregister_alg(&nx_ccm4309_aes_alg); + crypto_unregister_alg(&nx_gcm_aes_alg); + crypto_unregister_alg(&nx_gcm4106_aes_alg); + crypto_unregister_alg(&nx_ctr_aes_alg); + crypto_unregister_alg(&nx_ctr3686_aes_alg); + crypto_unregister_alg(&nx_cbc_aes_alg); + crypto_unregister_alg(&nx_ecb_aes_alg); + crypto_unregister_shash(&nx_shash_sha256_alg); + crypto_unregister_shash(&nx_shash_sha512_alg); + crypto_unregister_shash(&nx_shash_aes_xcbc_alg); + } + + return 0; +} + + +/* module wide initialization/cleanup */ +static int __init nx_init(void) +{ + return vio_register_driver(&nx_driver.viodriver); +} + +static void __exit nx_fini(void) +{ + vio_unregister_driver(&nx_driver.viodriver); +} + +static struct vio_device_id nx_crypto_driver_ids[] = { + { "ibm,sym-encryption-v1", "ibm,sym-encryption" }, + { "", "" } +}; +MODULE_DEVICE_TABLE(vio, nx_crypto_driver_ids); + +/* driver state structure */ +struct nx_crypto_driver nx_driver = { + .viodriver = { + .id_table = nx_crypto_driver_ids, + .probe = nx_probe, + .remove = nx_remove, + .name = NX_NAME, + }, +}; + +module_init(nx_init); +module_exit(nx_fini); + +MODULE_AUTHOR("Kent Yoder <yoder1@us.ibm.com>"); +MODULE_DESCRIPTION(NX_STRING); +MODULE_LICENSE("GPL"); +MODULE_VERSION(NX_VERSION); diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h new file mode 100644 index 000000000..6c9ecaaea --- /dev/null +++ b/drivers/crypto/nx/nx.h @@ -0,0 +1,196 @@ + +#ifndef __NX_H__ +#define __NX_H__ + +#define NX_NAME "nx-crypto" +#define NX_STRING "IBM Power7+ Nest Accelerator Crypto Driver" +#define NX_VERSION "1.0" + +static const char nx_driver_string[] = NX_STRING; +static const char nx_driver_version[] = NX_VERSION; + +/* a scatterlist in the format PHYP is expecting */ +struct nx_sg { + u64 addr; + u32 rsvd; + u32 len; +} __attribute((packed)); + +#define NX_PAGE_SIZE (4096) +#define NX_MAX_SG_ENTRIES (NX_PAGE_SIZE/(sizeof(struct nx_sg))) + +enum nx_status { + NX_DISABLED, + NX_WAITING, + NX_OKAY +}; + +/* msc_triplet and max_sync_cop are used only to assist in parsing the + * openFirmware property */ +struct msc_triplet { + u32 keybitlen; + u32 databytelen; + u32 sglen; +} __packed; + +struct max_sync_cop { + u32 fc; + u32 mode; + u32 triplets; + struct msc_triplet trip[0]; +} __packed; + +struct alg_props { + u32 databytelen; + u32 sglen; +}; + +#define NX_OF_FLAG_MAXSGLEN_SET (1) +#define NX_OF_FLAG_STATUS_SET (2) +#define NX_OF_FLAG_MAXSYNCCOP_SET (4) +#define NX_OF_FLAG_MASK_READY (NX_OF_FLAG_MAXSGLEN_SET | \ + NX_OF_FLAG_STATUS_SET | \ + NX_OF_FLAG_MAXSYNCCOP_SET) +struct nx_of { + u32 flags; + u32 max_sg_len; + enum nx_status status; + struct alg_props ap[NX_MAX_FC][NX_MAX_MODE][3]; +}; + +struct nx_stats { + atomic_t aes_ops; + atomic64_t aes_bytes; + atomic_t sha256_ops; + atomic64_t sha256_bytes; + atomic_t sha512_ops; + atomic64_t sha512_bytes; + + atomic_t sync_ops; + + atomic_t errors; + atomic_t last_error; + atomic_t last_error_pid; +}; + +struct nx_debugfs { + struct dentry *dfs_root; + struct dentry *dfs_aes_ops, *dfs_aes_bytes; + struct dentry *dfs_sha256_ops, *dfs_sha256_bytes; + struct dentry *dfs_sha512_ops, *dfs_sha512_bytes; + struct dentry *dfs_errors, *dfs_last_error, *dfs_last_error_pid; +}; + +struct nx_crypto_driver { + struct nx_stats stats; + struct nx_of of; + struct vio_dev *viodev; + struct vio_driver viodriver; + struct nx_debugfs dfs; +}; + +#define NX_GCM4106_NONCE_LEN (4) +#define NX_GCM_CTR_OFFSET (12) +struct nx_gcm_priv { + u8 iv[16]; + u8 iauth_tag[16]; + u8 nonce[NX_GCM4106_NONCE_LEN]; +}; + +#define NX_CCM_AES_KEY_LEN (16) +#define NX_CCM4309_AES_KEY_LEN (19) +#define NX_CCM4309_NONCE_LEN (3) +struct nx_ccm_priv { + u8 iv[16]; + u8 b0[16]; + u8 iauth_tag[16]; + u8 oauth_tag[16]; + u8 nonce[NX_CCM4309_NONCE_LEN]; +}; + +struct nx_xcbc_priv { + u8 key[16]; +}; + +struct nx_ctr_priv { + u8 iv[16]; +}; + +struct nx_crypto_ctx { + spinlock_t lock; /* synchronize access to the context */ + void *kmem; /* unaligned, kmalloc'd buffer */ + size_t kmem_len; /* length of kmem */ + struct nx_csbcpb *csbcpb; /* aligned page given to phyp @ hcall time */ + struct vio_pfo_op op; /* operation struct with hcall parameters */ + struct nx_csbcpb *csbcpb_aead; /* secondary csbcpb used by AEAD algs */ + struct vio_pfo_op op_aead;/* operation struct for csbcpb_aead */ + + struct nx_sg *in_sg; /* aligned pointer into kmem to an sg list */ + struct nx_sg *out_sg; /* aligned pointer into kmem to an sg list */ + + struct alg_props *ap; /* pointer into props based on our key size */ + struct alg_props props[3];/* openFirmware properties for requests */ + struct nx_stats *stats; /* pointer into an nx_crypto_driver for stats + reporting */ + + union { + struct nx_gcm_priv gcm; + struct nx_ccm_priv ccm; + struct nx_xcbc_priv xcbc; + struct nx_ctr_priv ctr; + } priv; +}; + +/* prototypes */ +int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm); +int nx_crypto_ctx_aes_gcm_init(struct crypto_tfm *tfm); +int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm); +int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm *tfm); +int nx_crypto_ctx_aes_cbc_init(struct crypto_tfm *tfm); +int nx_crypto_ctx_aes_ecb_init(struct crypto_tfm *tfm); +int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm); +void nx_crypto_ctx_exit(struct crypto_tfm *tfm); +void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function); +int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op, + u32 may_sleep); +int nx_sha_build_sg_list(struct nx_crypto_ctx *, struct nx_sg *, + s64 *, unsigned int *, u8 *, u32); +struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int *, u32); +int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *, + struct scatterlist *, struct scatterlist *, unsigned int *, + unsigned int, u8 *); +struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int, + struct scatterlist *, unsigned int, + unsigned int *); + +#ifdef CONFIG_DEBUG_FS +#define NX_DEBUGFS_INIT(drv) nx_debugfs_init(drv) +#define NX_DEBUGFS_FINI(drv) nx_debugfs_fini(drv) + +int nx_debugfs_init(struct nx_crypto_driver *); +void nx_debugfs_fini(struct nx_crypto_driver *); +#else +#define NX_DEBUGFS_INIT(drv) (0) +#define NX_DEBUGFS_FINI(drv) (0) +#endif + +#define NX_PAGE_NUM(x) ((u64)(x) & 0xfffffffffffff000ULL) + +extern struct crypto_alg nx_cbc_aes_alg; +extern struct crypto_alg nx_ecb_aes_alg; +extern struct crypto_alg nx_gcm_aes_alg; +extern struct crypto_alg nx_gcm4106_aes_alg; +extern struct crypto_alg nx_ctr_aes_alg; +extern struct crypto_alg nx_ctr3686_aes_alg; +extern struct crypto_alg nx_ccm_aes_alg; +extern struct crypto_alg nx_ccm4309_aes_alg; +extern struct shash_alg nx_shash_aes_xcbc_alg; +extern struct shash_alg nx_shash_sha512_alg; +extern struct shash_alg nx_shash_sha256_alg; + +extern struct nx_crypto_driver nx_driver; + +#define SCATTERWALK_TO_SG 1 +#define SCATTERWALK_FROM_SG 0 + +#endif diff --git a/drivers/crypto/nx/nx_csbcpb.h b/drivers/crypto/nx/nx_csbcpb.h new file mode 100644 index 000000000..a304f956d --- /dev/null +++ b/drivers/crypto/nx/nx_csbcpb.h @@ -0,0 +1,205 @@ + +#ifndef __NX_CSBCPB_H__ +#define __NX_CSBCPB_H__ + +struct cop_symcpb_aes_ecb { + u8 key[32]; + u8 __rsvd[80]; +} __packed; + +struct cop_symcpb_aes_cbc { + u8 iv[16]; + u8 key[32]; + u8 cv[16]; + u32 spbc; + u8 __rsvd[44]; +} __packed; + +struct cop_symcpb_aes_gca { + u8 in_pat[16]; + u8 key[32]; + u8 out_pat[16]; + u32 spbc; + u8 __rsvd[44]; +} __packed; + +struct cop_symcpb_aes_gcm { + u8 in_pat_or_aad[16]; + u8 iv_or_cnt[16]; + u64 bit_length_aad; + u64 bit_length_data; + u8 in_s0[16]; + u8 key[32]; + u8 __rsvd1[16]; + u8 out_pat_or_mac[16]; + u8 out_s0[16]; + u8 out_cnt[16]; + u32 spbc; + u8 __rsvd2[12]; +} __packed; + +struct cop_symcpb_aes_ctr { + u8 iv[16]; + u8 key[32]; + u8 cv[16]; + u32 spbc; + u8 __rsvd2[44]; +} __packed; + +struct cop_symcpb_aes_cca { + u8 b0[16]; + u8 b1[16]; + u8 key[16]; + u8 out_pat_or_b0[16]; + u32 spbc; + u8 __rsvd[44]; +} __packed; + +struct cop_symcpb_aes_ccm { + u8 in_pat_or_b0[16]; + u8 iv_or_ctr[16]; + u8 in_s0[16]; + u8 key[16]; + u8 __rsvd1[48]; + u8 out_pat_or_mac[16]; + u8 out_s0[16]; + u8 out_ctr[16]; + u32 spbc; + u8 __rsvd2[12]; +} __packed; + +struct cop_symcpb_aes_xcbc { + u8 cv[16]; + u8 key[16]; + u8 __rsvd1[16]; + u8 out_cv_mac[16]; + u32 spbc; + u8 __rsvd2[44]; +} __packed; + +struct cop_symcpb_sha256 { + u64 message_bit_length; + u64 __rsvd1; + u8 input_partial_digest[32]; + u8 message_digest[32]; + u32 spbc; + u8 __rsvd2[44]; +} __packed; + +struct cop_symcpb_sha512 { + u64 message_bit_length_hi; + u64 message_bit_length_lo; + u8 input_partial_digest[64]; + u8 __rsvd1[32]; + u8 message_digest[64]; + u32 spbc; + u8 __rsvd2[76]; +} __packed; + +#define NX_FDM_INTERMEDIATE 0x01 +#define NX_FDM_CONTINUATION 0x02 +#define NX_FDM_ENDE_ENCRYPT 0x80 + +#define NX_CPB_FDM(c) ((c)->cpb.hdr.fdm) +#define NX_CPB_KS_DS(c) ((c)->cpb.hdr.ks_ds) + +#define NX_CPB_KEY_SIZE(c) (NX_CPB_KS_DS(c) >> 4) +#define NX_CPB_SET_KEY_SIZE(c, x) NX_CPB_KS_DS(c) |= ((x) << 4) +#define NX_CPB_SET_DIGEST_SIZE(c, x) NX_CPB_KS_DS(c) |= (x) + +struct cop_symcpb_header { + u8 mode; + u8 fdm; + u8 ks_ds; + u8 pad_byte; + u8 __rsvd[12]; +} __packed; + +struct cop_parameter_block { + struct cop_symcpb_header hdr; + union { + struct cop_symcpb_aes_ecb aes_ecb; + struct cop_symcpb_aes_cbc aes_cbc; + struct cop_symcpb_aes_gca aes_gca; + struct cop_symcpb_aes_gcm aes_gcm; + struct cop_symcpb_aes_cca aes_cca; + struct cop_symcpb_aes_ccm aes_ccm; + struct cop_symcpb_aes_ctr aes_ctr; + struct cop_symcpb_aes_xcbc aes_xcbc; + struct cop_symcpb_sha256 sha256; + struct cop_symcpb_sha512 sha512; + }; +} __packed; + +#define NX_CSB_VALID_BIT 0x80 + +/* co-processor status block */ +struct cop_status_block { + u8 valid; + u8 crb_seq_number; + u8 completion_code; + u8 completion_extension; + u32 processed_byte_count; + u64 address; +} __packed; + +/* Nest accelerator workbook section 4.4 */ +struct nx_csbcpb { + unsigned char __rsvd[112]; + struct cop_status_block csb; + struct cop_parameter_block cpb; +} __packed; + +/* nx_csbcpb related definitions */ +#define NX_MODE_AES_ECB 0 +#define NX_MODE_AES_CBC 1 +#define NX_MODE_AES_GMAC 2 +#define NX_MODE_AES_GCA 3 +#define NX_MODE_AES_GCM 4 +#define NX_MODE_AES_CCA 5 +#define NX_MODE_AES_CCM 6 +#define NX_MODE_AES_CTR 7 +#define NX_MODE_AES_XCBC_MAC 20 +#define NX_MODE_SHA 0 +#define NX_MODE_SHA_HMAC 1 +#define NX_MODE_AES_CBC_HMAC_ETA 8 +#define NX_MODE_AES_CBC_HMAC_ATE 9 +#define NX_MODE_AES_CBC_HMAC_EAA 10 +#define NX_MODE_AES_CTR_HMAC_ETA 12 +#define NX_MODE_AES_CTR_HMAC_ATE 13 +#define NX_MODE_AES_CTR_HMAC_EAA 14 + +#define NX_FDM_CI_FULL 0 +#define NX_FDM_CI_FIRST 1 +#define NX_FDM_CI_LAST 2 +#define NX_FDM_CI_MIDDLE 3 + +#define NX_FDM_PR_NONE 0 +#define NX_FDM_PR_PAD 1 + +#define NX_KS_AES_128 1 +#define NX_KS_AES_192 2 +#define NX_KS_AES_256 3 + +#define NX_DS_SHA256 2 +#define NX_DS_SHA512 3 + +#define NX_FC_AES 0 +#define NX_FC_SHA 2 +#define NX_FC_AES_HMAC 6 + +#define NX_MAX_FC (NX_FC_AES_HMAC + 1) +#define NX_MAX_MODE (NX_MODE_AES_XCBC_MAC + 1) + +#define HCOP_FC_AES NX_FC_AES +#define HCOP_FC_SHA NX_FC_SHA +#define HCOP_FC_AES_HMAC NX_FC_AES_HMAC + +/* indices into the array of algorithm properties */ +#define NX_PROPS_AES_128 0 +#define NX_PROPS_AES_192 1 +#define NX_PROPS_AES_256 2 +#define NX_PROPS_SHA256 1 +#define NX_PROPS_SHA512 2 + +#endif diff --git a/drivers/crypto/nx/nx_debugfs.c b/drivers/crypto/nx/nx_debugfs.c new file mode 100644 index 000000000..7ab2e8dcd --- /dev/null +++ b/drivers/crypto/nx/nx_debugfs.c @@ -0,0 +1,103 @@ +/** + * debugfs routines supporting the Power 7+ Nest Accelerators driver + * + * Copyright (C) 2011-2012 International Business Machines Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 only. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Author: Kent Yoder <yoder1@us.ibm.com> + */ + +#include <linux/device.h> +#include <linux/kobject.h> +#include <linux/string.h> +#include <linux/debugfs.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/crypto.h> +#include <crypto/hash.h> +#include <asm/vio.h> + +#include "nx_csbcpb.h" +#include "nx.h" + +#ifdef CONFIG_DEBUG_FS + +/* + * debugfs + * + * For documentation on these attributes, please see: + * + * Documentation/ABI/testing/debugfs-pfo-nx-crypto + */ + +int nx_debugfs_init(struct nx_crypto_driver *drv) +{ + struct nx_debugfs *dfs = &drv->dfs; + + dfs->dfs_root = debugfs_create_dir(NX_NAME, NULL); + + dfs->dfs_aes_ops = + debugfs_create_u32("aes_ops", + S_IRUSR | S_IRGRP | S_IROTH, + dfs->dfs_root, (u32 *)&drv->stats.aes_ops); + dfs->dfs_sha256_ops = + debugfs_create_u32("sha256_ops", + S_IRUSR | S_IRGRP | S_IROTH, + dfs->dfs_root, + (u32 *)&drv->stats.sha256_ops); + dfs->dfs_sha512_ops = + debugfs_create_u32("sha512_ops", + S_IRUSR | S_IRGRP | S_IROTH, + dfs->dfs_root, + (u32 *)&drv->stats.sha512_ops); + dfs->dfs_aes_bytes = + debugfs_create_u64("aes_bytes", + S_IRUSR | S_IRGRP | S_IROTH, + dfs->dfs_root, + (u64 *)&drv->stats.aes_bytes); + dfs->dfs_sha256_bytes = + debugfs_create_u64("sha256_bytes", + S_IRUSR | S_IRGRP | S_IROTH, + dfs->dfs_root, + (u64 *)&drv->stats.sha256_bytes); + dfs->dfs_sha512_bytes = + debugfs_create_u64("sha512_bytes", + S_IRUSR | S_IRGRP | S_IROTH, + dfs->dfs_root, + (u64 *)&drv->stats.sha512_bytes); + dfs->dfs_errors = + debugfs_create_u32("errors", + S_IRUSR | S_IRGRP | S_IROTH, + dfs->dfs_root, (u32 *)&drv->stats.errors); + dfs->dfs_last_error = + debugfs_create_u32("last_error", + S_IRUSR | S_IRGRP | S_IROTH, + dfs->dfs_root, + (u32 *)&drv->stats.last_error); + dfs->dfs_last_error_pid = + debugfs_create_u32("last_error_pid", + S_IRUSR | S_IRGRP | S_IROTH, + dfs->dfs_root, + (u32 *)&drv->stats.last_error_pid); + return 0; +} + +void +nx_debugfs_fini(struct nx_crypto_driver *drv) +{ + debugfs_remove_recursive(drv->dfs.dfs_root); +} + +#endif diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c new file mode 100644 index 000000000..9a28b7e07 --- /dev/null +++ b/drivers/crypto/omap-aes.c @@ -0,0 +1,1335 @@ +/* + * Cryptographic API. + * + * Support for OMAP AES HW acceleration. + * + * Copyright (c) 2010 Nokia Corporation + * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com> + * Copyright (c) 2011 Texas Instruments Incorporated + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + */ + +#define pr_fmt(fmt) "%20s: " fmt, __func__ +#define prn(num) pr_debug(#num "=%d\n", num) +#define prx(num) pr_debug(#num "=%x\n", num) + +#include <linux/err.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/scatterlist.h> +#include <linux/dma-mapping.h> +#include <linux/dmaengine.h> +#include <linux/omap-dma.h> +#include <linux/pm_runtime.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_address.h> +#include <linux/io.h> +#include <linux/crypto.h> +#include <linux/interrupt.h> +#include <crypto/scatterwalk.h> +#include <crypto/aes.h> + +#define DST_MAXBURST 4 +#define DMA_MIN (DST_MAXBURST * sizeof(u32)) + +#define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset) + +/* OMAP TRM gives bitfields as start:end, where start is the higher bit + number. For example 7:0 */ +#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end)) +#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end)) + +#define AES_REG_KEY(dd, x) ((dd)->pdata->key_ofs - \ + ((x ^ 0x01) * 0x04)) +#define AES_REG_IV(dd, x) ((dd)->pdata->iv_ofs + ((x) * 0x04)) + +#define AES_REG_CTRL(dd) ((dd)->pdata->ctrl_ofs) +#define AES_REG_CTRL_CTR_WIDTH_MASK (3 << 7) +#define AES_REG_CTRL_CTR_WIDTH_32 (0 << 7) +#define AES_REG_CTRL_CTR_WIDTH_64 (1 << 7) +#define AES_REG_CTRL_CTR_WIDTH_96 (2 << 7) +#define AES_REG_CTRL_CTR_WIDTH_128 (3 << 7) +#define AES_REG_CTRL_CTR (1 << 6) +#define AES_REG_CTRL_CBC (1 << 5) +#define AES_REG_CTRL_KEY_SIZE (3 << 3) +#define AES_REG_CTRL_DIRECTION (1 << 2) +#define AES_REG_CTRL_INPUT_READY (1 << 1) +#define AES_REG_CTRL_OUTPUT_READY (1 << 0) + +#define AES_REG_DATA_N(dd, x) ((dd)->pdata->data_ofs + ((x) * 0x04)) + +#define AES_REG_REV(dd) ((dd)->pdata->rev_ofs) + +#define AES_REG_MASK(dd) ((dd)->pdata->mask_ofs) +#define AES_REG_MASK_SIDLE (1 << 6) +#define AES_REG_MASK_START (1 << 5) +#define AES_REG_MASK_DMA_OUT_EN (1 << 3) +#define AES_REG_MASK_DMA_IN_EN (1 << 2) +#define AES_REG_MASK_SOFTRESET (1 << 1) +#define AES_REG_AUTOIDLE (1 << 0) + +#define AES_REG_LENGTH_N(x) (0x54 + ((x) * 0x04)) + +#define AES_REG_IRQ_STATUS(dd) ((dd)->pdata->irq_status_ofs) +#define AES_REG_IRQ_ENABLE(dd) ((dd)->pdata->irq_enable_ofs) +#define AES_REG_IRQ_DATA_IN BIT(1) +#define AES_REG_IRQ_DATA_OUT BIT(2) +#define DEFAULT_TIMEOUT (5*HZ) + +#define FLAGS_MODE_MASK 0x000f +#define FLAGS_ENCRYPT BIT(0) +#define FLAGS_CBC BIT(1) +#define FLAGS_GIV BIT(2) +#define FLAGS_CTR BIT(3) + +#define FLAGS_INIT BIT(4) +#define FLAGS_FAST BIT(5) +#define FLAGS_BUSY BIT(6) + +#define AES_BLOCK_WORDS (AES_BLOCK_SIZE >> 2) + +struct omap_aes_ctx { + struct omap_aes_dev *dd; + + int keylen; + u32 key[AES_KEYSIZE_256 / sizeof(u32)]; + unsigned long flags; +}; + +struct omap_aes_reqctx { + unsigned long mode; +}; + +#define OMAP_AES_QUEUE_LENGTH 1 +#define OMAP_AES_CACHE_SIZE 0 + +struct omap_aes_algs_info { + struct crypto_alg *algs_list; + unsigned int size; + unsigned int registered; +}; + +struct omap_aes_pdata { + struct omap_aes_algs_info *algs_info; + unsigned int algs_info_size; + + void (*trigger)(struct omap_aes_dev *dd, int length); + + u32 key_ofs; + u32 iv_ofs; + u32 ctrl_ofs; + u32 data_ofs; + u32 rev_ofs; + u32 mask_ofs; + u32 irq_enable_ofs; + u32 irq_status_ofs; + + u32 dma_enable_in; + u32 dma_enable_out; + u32 dma_start; + + u32 major_mask; + u32 major_shift; + u32 minor_mask; + u32 minor_shift; +}; + +struct omap_aes_dev { + struct list_head list; + unsigned long phys_base; + void __iomem *io_base; + struct omap_aes_ctx *ctx; + struct device *dev; + unsigned long flags; + int err; + + spinlock_t lock; + struct crypto_queue queue; + + struct tasklet_struct done_task; + struct tasklet_struct queue_task; + + struct ablkcipher_request *req; + + /* + * total is used by PIO mode for book keeping so introduce + * variable total_save as need it to calc page_order + */ + size_t total; + size_t total_save; + + struct scatterlist *in_sg; + struct scatterlist *out_sg; + + /* Buffers for copying for unaligned cases */ + struct scatterlist in_sgl; + struct scatterlist out_sgl; + struct scatterlist *orig_out; + int sgs_copied; + + struct scatter_walk in_walk; + struct scatter_walk out_walk; + int dma_in; + struct dma_chan *dma_lch_in; + int dma_out; + struct dma_chan *dma_lch_out; + int in_sg_len; + int out_sg_len; + int pio_only; + const struct omap_aes_pdata *pdata; +}; + +/* keep registered devices data here */ +static LIST_HEAD(dev_list); +static DEFINE_SPINLOCK(list_lock); + +#ifdef DEBUG +#define omap_aes_read(dd, offset) \ +({ \ + int _read_ret; \ + _read_ret = __raw_readl(dd->io_base + offset); \ + pr_debug("omap_aes_read(" #offset "=%#x)= %#x\n", \ + offset, _read_ret); \ + _read_ret; \ +}) +#else +static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset) +{ + return __raw_readl(dd->io_base + offset); +} +#endif + +#ifdef DEBUG +#define omap_aes_write(dd, offset, value) \ + do { \ + pr_debug("omap_aes_write(" #offset "=%#x) value=%#x\n", \ + offset, value); \ + __raw_writel(value, dd->io_base + offset); \ + } while (0) +#else +static inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset, + u32 value) +{ + __raw_writel(value, dd->io_base + offset); +} +#endif + +static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset, + u32 value, u32 mask) +{ + u32 val; + + val = omap_aes_read(dd, offset); + val &= ~mask; + val |= value; + omap_aes_write(dd, offset, val); +} + +static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset, + u32 *value, int count) +{ + for (; count--; value++, offset += 4) + omap_aes_write(dd, offset, *value); +} + +static int omap_aes_hw_init(struct omap_aes_dev *dd) +{ + if (!(dd->flags & FLAGS_INIT)) { + dd->flags |= FLAGS_INIT; + dd->err = 0; + } + + return 0; +} + +static int omap_aes_write_ctrl(struct omap_aes_dev *dd) +{ + unsigned int key32; + int i, err; + u32 val, mask = 0; + + err = omap_aes_hw_init(dd); + if (err) + return err; + + key32 = dd->ctx->keylen / sizeof(u32); + + /* it seems a key should always be set even if it has not changed */ + for (i = 0; i < key32; i++) { + omap_aes_write(dd, AES_REG_KEY(dd, i), + __le32_to_cpu(dd->ctx->key[i])); + } + + if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->info) + omap_aes_write_n(dd, AES_REG_IV(dd, 0), dd->req->info, 4); + + val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3); + if (dd->flags & FLAGS_CBC) + val |= AES_REG_CTRL_CBC; + if (dd->flags & FLAGS_CTR) { + val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128; + mask = AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_MASK; + } + if (dd->flags & FLAGS_ENCRYPT) + val |= AES_REG_CTRL_DIRECTION; + + mask |= AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION | + AES_REG_CTRL_KEY_SIZE; + + omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, mask); + + return 0; +} + +static void omap_aes_dma_trigger_omap2(struct omap_aes_dev *dd, int length) +{ + u32 mask, val; + + val = dd->pdata->dma_start; + + if (dd->dma_lch_out != NULL) + val |= dd->pdata->dma_enable_out; + if (dd->dma_lch_in != NULL) + val |= dd->pdata->dma_enable_in; + + mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in | + dd->pdata->dma_start; + + omap_aes_write_mask(dd, AES_REG_MASK(dd), val, mask); + +} + +static void omap_aes_dma_trigger_omap4(struct omap_aes_dev *dd, int length) +{ + omap_aes_write(dd, AES_REG_LENGTH_N(0), length); + omap_aes_write(dd, AES_REG_LENGTH_N(1), 0); + + omap_aes_dma_trigger_omap2(dd, length); +} + +static void omap_aes_dma_stop(struct omap_aes_dev *dd) +{ + u32 mask; + + mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in | + dd->pdata->dma_start; + + omap_aes_write_mask(dd, AES_REG_MASK(dd), 0, mask); +} + +static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx) +{ + struct omap_aes_dev *dd = NULL, *tmp; + + spin_lock_bh(&list_lock); + if (!ctx->dd) { + list_for_each_entry(tmp, &dev_list, list) { + /* FIXME: take fist available aes core */ + dd = tmp; + break; + } + ctx->dd = dd; + } else { + /* already found before */ + dd = ctx->dd; + } + spin_unlock_bh(&list_lock); + + return dd; +} + +static void omap_aes_dma_out_callback(void *data) +{ + struct omap_aes_dev *dd = data; + + /* dma_lch_out - completed */ + tasklet_schedule(&dd->done_task); +} + +static int omap_aes_dma_init(struct omap_aes_dev *dd) +{ + int err = -ENOMEM; + dma_cap_mask_t mask; + + dd->dma_lch_out = NULL; + dd->dma_lch_in = NULL; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + dd->dma_lch_in = dma_request_slave_channel_compat(mask, + omap_dma_filter_fn, + &dd->dma_in, + dd->dev, "rx"); + if (!dd->dma_lch_in) { + dev_err(dd->dev, "Unable to request in DMA channel\n"); + goto err_dma_in; + } + + dd->dma_lch_out = dma_request_slave_channel_compat(mask, + omap_dma_filter_fn, + &dd->dma_out, + dd->dev, "tx"); + if (!dd->dma_lch_out) { + dev_err(dd->dev, "Unable to request out DMA channel\n"); + goto err_dma_out; + } + + return 0; + +err_dma_out: + dma_release_channel(dd->dma_lch_in); +err_dma_in: + if (err) + pr_err("error: %d\n", err); + return err; +} + +static void omap_aes_dma_cleanup(struct omap_aes_dev *dd) +{ + dma_release_channel(dd->dma_lch_out); + dma_release_channel(dd->dma_lch_in); +} + +static void sg_copy_buf(void *buf, struct scatterlist *sg, + unsigned int start, unsigned int nbytes, int out) +{ + struct scatter_walk walk; + + if (!nbytes) + return; + + scatterwalk_start(&walk, sg); + scatterwalk_advance(&walk, start); + scatterwalk_copychunks(buf, &walk, nbytes, out); + scatterwalk_done(&walk, out, 0); +} + +static int omap_aes_crypt_dma(struct crypto_tfm *tfm, + struct scatterlist *in_sg, struct scatterlist *out_sg, + int in_sg_len, int out_sg_len) +{ + struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm); + struct omap_aes_dev *dd = ctx->dd; + struct dma_async_tx_descriptor *tx_in, *tx_out; + struct dma_slave_config cfg; + int ret; + + if (dd->pio_only) { + scatterwalk_start(&dd->in_walk, dd->in_sg); + scatterwalk_start(&dd->out_walk, dd->out_sg); + + /* Enable DATAIN interrupt and let it take + care of the rest */ + omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2); + return 0; + } + + dma_sync_sg_for_device(dd->dev, dd->in_sg, in_sg_len, DMA_TO_DEVICE); + + memset(&cfg, 0, sizeof(cfg)); + + cfg.src_addr = dd->phys_base + AES_REG_DATA_N(dd, 0); + cfg.dst_addr = dd->phys_base + AES_REG_DATA_N(dd, 0); + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.src_maxburst = DST_MAXBURST; + cfg.dst_maxburst = DST_MAXBURST; + + /* IN */ + ret = dmaengine_slave_config(dd->dma_lch_in, &cfg); + if (ret) { + dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n", + ret); + return ret; + } + + tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, in_sg_len, + DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!tx_in) { + dev_err(dd->dev, "IN prep_slave_sg() failed\n"); + return -EINVAL; + } + + /* No callback necessary */ + tx_in->callback_param = dd; + + /* OUT */ + ret = dmaengine_slave_config(dd->dma_lch_out, &cfg); + if (ret) { + dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n", + ret); + return ret; + } + + tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, out_sg_len, + DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!tx_out) { + dev_err(dd->dev, "OUT prep_slave_sg() failed\n"); + return -EINVAL; + } + + tx_out->callback = omap_aes_dma_out_callback; + tx_out->callback_param = dd; + + dmaengine_submit(tx_in); + dmaengine_submit(tx_out); + + dma_async_issue_pending(dd->dma_lch_in); + dma_async_issue_pending(dd->dma_lch_out); + + /* start DMA */ + dd->pdata->trigger(dd, dd->total); + + return 0; +} + +static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd) +{ + struct crypto_tfm *tfm = crypto_ablkcipher_tfm( + crypto_ablkcipher_reqtfm(dd->req)); + int err; + + pr_debug("total: %d\n", dd->total); + + if (!dd->pio_only) { + err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len, + DMA_TO_DEVICE); + if (!err) { + dev_err(dd->dev, "dma_map_sg() error\n"); + return -EINVAL; + } + + err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len, + DMA_FROM_DEVICE); + if (!err) { + dev_err(dd->dev, "dma_map_sg() error\n"); + return -EINVAL; + } + } + + err = omap_aes_crypt_dma(tfm, dd->in_sg, dd->out_sg, dd->in_sg_len, + dd->out_sg_len); + if (err && !dd->pio_only) { + dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE); + dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, + DMA_FROM_DEVICE); + } + + return err; +} + +static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) +{ + struct ablkcipher_request *req = dd->req; + + pr_debug("err: %d\n", err); + + dd->flags &= ~FLAGS_BUSY; + + req->base.complete(&req->base, err); +} + +static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) +{ + int err = 0; + + pr_debug("total: %d\n", dd->total); + + omap_aes_dma_stop(dd); + + dmaengine_terminate_all(dd->dma_lch_in); + dmaengine_terminate_all(dd->dma_lch_out); + + return err; +} + +static int omap_aes_check_aligned(struct scatterlist *sg, int total) +{ + int len = 0; + + while (sg) { + if (!IS_ALIGNED(sg->offset, 4)) + return -1; + if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE)) + return -1; + + len += sg->length; + sg = sg_next(sg); + } + + if (len != total) + return -1; + + return 0; +} + +static int omap_aes_copy_sgs(struct omap_aes_dev *dd) +{ + void *buf_in, *buf_out; + int pages; + + pages = get_order(dd->total); + + buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages); + buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages); + + if (!buf_in || !buf_out) { + pr_err("Couldn't allocated pages for unaligned cases.\n"); + return -1; + } + + dd->orig_out = dd->out_sg; + + sg_copy_buf(buf_in, dd->in_sg, 0, dd->total, 0); + + sg_init_table(&dd->in_sgl, 1); + sg_set_buf(&dd->in_sgl, buf_in, dd->total); + dd->in_sg = &dd->in_sgl; + + sg_init_table(&dd->out_sgl, 1); + sg_set_buf(&dd->out_sgl, buf_out, dd->total); + dd->out_sg = &dd->out_sgl; + + return 0; +} + +static int omap_aes_handle_queue(struct omap_aes_dev *dd, + struct ablkcipher_request *req) +{ + struct crypto_async_request *async_req, *backlog; + struct omap_aes_ctx *ctx; + struct omap_aes_reqctx *rctx; + unsigned long flags; + int err, ret = 0; + + spin_lock_irqsave(&dd->lock, flags); + if (req) + ret = ablkcipher_enqueue_request(&dd->queue, req); + if (dd->flags & FLAGS_BUSY) { + spin_unlock_irqrestore(&dd->lock, flags); + return ret; + } + backlog = crypto_get_backlog(&dd->queue); + async_req = crypto_dequeue_request(&dd->queue); + if (async_req) + dd->flags |= FLAGS_BUSY; + spin_unlock_irqrestore(&dd->lock, flags); + + if (!async_req) + return ret; + + if (backlog) + backlog->complete(backlog, -EINPROGRESS); + + req = ablkcipher_request_cast(async_req); + + /* assign new request to device */ + dd->req = req; + dd->total = req->nbytes; + dd->total_save = req->nbytes; + dd->in_sg = req->src; + dd->out_sg = req->dst; + + if (omap_aes_check_aligned(dd->in_sg, dd->total) || + omap_aes_check_aligned(dd->out_sg, dd->total)) { + if (omap_aes_copy_sgs(dd)) + pr_err("Failed to copy SGs for unaligned cases\n"); + dd->sgs_copied = 1; + } else { + dd->sgs_copied = 0; + } + + dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, dd->total); + dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, dd->total); + BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0); + + rctx = ablkcipher_request_ctx(req); + ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); + rctx->mode &= FLAGS_MODE_MASK; + dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode; + + dd->ctx = ctx; + ctx->dd = dd; + + err = omap_aes_write_ctrl(dd); + if (!err) + err = omap_aes_crypt_dma_start(dd); + if (err) { + /* aes_task will not finish it, so do it here */ + omap_aes_finish_req(dd, err); + tasklet_schedule(&dd->queue_task); + } + + return ret; /* return ret, which is enqueue return value */ +} + +static void omap_aes_done_task(unsigned long data) +{ + struct omap_aes_dev *dd = (struct omap_aes_dev *)data; + void *buf_in, *buf_out; + int pages; + + pr_debug("enter done_task\n"); + + if (!dd->pio_only) { + dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len, + DMA_FROM_DEVICE); + dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE); + dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, + DMA_FROM_DEVICE); + omap_aes_crypt_dma_stop(dd); + } + + if (dd->sgs_copied) { + buf_in = sg_virt(&dd->in_sgl); + buf_out = sg_virt(&dd->out_sgl); + + sg_copy_buf(buf_out, dd->orig_out, 0, dd->total_save, 1); + + pages = get_order(dd->total_save); + free_pages((unsigned long)buf_in, pages); + free_pages((unsigned long)buf_out, pages); + } + + omap_aes_finish_req(dd, 0); + omap_aes_handle_queue(dd, NULL); + + pr_debug("exit\n"); +} + +static void omap_aes_queue_task(unsigned long data) +{ + struct omap_aes_dev *dd = (struct omap_aes_dev *)data; + + omap_aes_handle_queue(dd, NULL); +} + +static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode) +{ + struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx( + crypto_ablkcipher_reqtfm(req)); + struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req); + struct omap_aes_dev *dd; + + pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes, + !!(mode & FLAGS_ENCRYPT), + !!(mode & FLAGS_CBC)); + + if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { + pr_err("request size is not exact amount of AES blocks\n"); + return -EINVAL; + } + + dd = omap_aes_find_dev(ctx); + if (!dd) + return -ENODEV; + + rctx->mode = mode; + + return omap_aes_handle_queue(dd, req); +} + +/* ********************** ALG API ************************************ */ + +static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); + + if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && + keylen != AES_KEYSIZE_256) + return -EINVAL; + + pr_debug("enter, keylen: %d\n", keylen); + + memcpy(ctx->key, key, keylen); + ctx->keylen = keylen; + + return 0; +} + +static int omap_aes_ecb_encrypt(struct ablkcipher_request *req) +{ + return omap_aes_crypt(req, FLAGS_ENCRYPT); +} + +static int omap_aes_ecb_decrypt(struct ablkcipher_request *req) +{ + return omap_aes_crypt(req, 0); +} + +static int omap_aes_cbc_encrypt(struct ablkcipher_request *req) +{ + return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC); +} + +static int omap_aes_cbc_decrypt(struct ablkcipher_request *req) +{ + return omap_aes_crypt(req, FLAGS_CBC); +} + +static int omap_aes_ctr_encrypt(struct ablkcipher_request *req) +{ + return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CTR); +} + +static int omap_aes_ctr_decrypt(struct ablkcipher_request *req) +{ + return omap_aes_crypt(req, FLAGS_CTR); +} + +static int omap_aes_cra_init(struct crypto_tfm *tfm) +{ + struct omap_aes_dev *dd = NULL; + int err; + + /* Find AES device, currently picks the first device */ + spin_lock_bh(&list_lock); + list_for_each_entry(dd, &dev_list, list) { + break; + } + spin_unlock_bh(&list_lock); + + err = pm_runtime_get_sync(dd->dev); + if (err < 0) { + dev_err(dd->dev, "%s: failed to get_sync(%d)\n", + __func__, err); + return err; + } + + tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx); + + return 0; +} + +static void omap_aes_cra_exit(struct crypto_tfm *tfm) +{ + struct omap_aes_dev *dd = NULL; + + /* Find AES device, currently picks the first device */ + spin_lock_bh(&list_lock); + list_for_each_entry(dd, &dev_list, list) { + break; + } + spin_unlock_bh(&list_lock); + + pm_runtime_put_sync(dd->dev); +} + +/* ********************** ALGS ************************************ */ + +static struct crypto_alg algs_ecb_cbc[] = { +{ + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-omap", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct omap_aes_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = omap_aes_cra_init, + .cra_exit = omap_aes_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = omap_aes_setkey, + .encrypt = omap_aes_ecb_encrypt, + .decrypt = omap_aes_ecb_decrypt, + } +}, +{ + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-omap", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct omap_aes_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = omap_aes_cra_init, + .cra_exit = omap_aes_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = omap_aes_setkey, + .encrypt = omap_aes_cbc_encrypt, + .decrypt = omap_aes_cbc_decrypt, + } +} +}; + +static struct crypto_alg algs_ctr[] = { +{ + .cra_name = "ctr(aes)", + .cra_driver_name = "ctr-aes-omap", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct omap_aes_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = omap_aes_cra_init, + .cra_exit = omap_aes_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .geniv = "eseqiv", + .ivsize = AES_BLOCK_SIZE, + .setkey = omap_aes_setkey, + .encrypt = omap_aes_ctr_encrypt, + .decrypt = omap_aes_ctr_decrypt, + } +} , +}; + +static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc[] = { + { + .algs_list = algs_ecb_cbc, + .size = ARRAY_SIZE(algs_ecb_cbc), + }, +}; + +static const struct omap_aes_pdata omap_aes_pdata_omap2 = { + .algs_info = omap_aes_algs_info_ecb_cbc, + .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc), + .trigger = omap_aes_dma_trigger_omap2, + .key_ofs = 0x1c, + .iv_ofs = 0x20, + .ctrl_ofs = 0x30, + .data_ofs = 0x34, + .rev_ofs = 0x44, + .mask_ofs = 0x48, + .dma_enable_in = BIT(2), + .dma_enable_out = BIT(3), + .dma_start = BIT(5), + .major_mask = 0xf0, + .major_shift = 4, + .minor_mask = 0x0f, + .minor_shift = 0, +}; + +#ifdef CONFIG_OF +static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc_ctr[] = { + { + .algs_list = algs_ecb_cbc, + .size = ARRAY_SIZE(algs_ecb_cbc), + }, + { + .algs_list = algs_ctr, + .size = ARRAY_SIZE(algs_ctr), + }, +}; + +static const struct omap_aes_pdata omap_aes_pdata_omap3 = { + .algs_info = omap_aes_algs_info_ecb_cbc_ctr, + .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr), + .trigger = omap_aes_dma_trigger_omap2, + .key_ofs = 0x1c, + .iv_ofs = 0x20, + .ctrl_ofs = 0x30, + .data_ofs = 0x34, + .rev_ofs = 0x44, + .mask_ofs = 0x48, + .dma_enable_in = BIT(2), + .dma_enable_out = BIT(3), + .dma_start = BIT(5), + .major_mask = 0xf0, + .major_shift = 4, + .minor_mask = 0x0f, + .minor_shift = 0, +}; + +static const struct omap_aes_pdata omap_aes_pdata_omap4 = { + .algs_info = omap_aes_algs_info_ecb_cbc_ctr, + .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr), + .trigger = omap_aes_dma_trigger_omap4, + .key_ofs = 0x3c, + .iv_ofs = 0x40, + .ctrl_ofs = 0x50, + .data_ofs = 0x60, + .rev_ofs = 0x80, + .mask_ofs = 0x84, + .irq_status_ofs = 0x8c, + .irq_enable_ofs = 0x90, + .dma_enable_in = BIT(5), + .dma_enable_out = BIT(6), + .major_mask = 0x0700, + .major_shift = 8, + .minor_mask = 0x003f, + .minor_shift = 0, +}; + +static irqreturn_t omap_aes_irq(int irq, void *dev_id) +{ + struct omap_aes_dev *dd = dev_id; + u32 status, i; + u32 *src, *dst; + + status = omap_aes_read(dd, AES_REG_IRQ_STATUS(dd)); + if (status & AES_REG_IRQ_DATA_IN) { + omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0); + + BUG_ON(!dd->in_sg); + + BUG_ON(_calc_walked(in) > dd->in_sg->length); + + src = sg_virt(dd->in_sg) + _calc_walked(in); + + for (i = 0; i < AES_BLOCK_WORDS; i++) { + omap_aes_write(dd, AES_REG_DATA_N(dd, i), *src); + + scatterwalk_advance(&dd->in_walk, 4); + if (dd->in_sg->length == _calc_walked(in)) { + dd->in_sg = sg_next(dd->in_sg); + if (dd->in_sg) { + scatterwalk_start(&dd->in_walk, + dd->in_sg); + src = sg_virt(dd->in_sg) + + _calc_walked(in); + } + } else { + src++; + } + } + + /* Clear IRQ status */ + status &= ~AES_REG_IRQ_DATA_IN; + omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status); + + /* Enable DATA_OUT interrupt */ + omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x4); + + } else if (status & AES_REG_IRQ_DATA_OUT) { + omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0); + + BUG_ON(!dd->out_sg); + + BUG_ON(_calc_walked(out) > dd->out_sg->length); + + dst = sg_virt(dd->out_sg) + _calc_walked(out); + + for (i = 0; i < AES_BLOCK_WORDS; i++) { + *dst = omap_aes_read(dd, AES_REG_DATA_N(dd, i)); + scatterwalk_advance(&dd->out_walk, 4); + if (dd->out_sg->length == _calc_walked(out)) { + dd->out_sg = sg_next(dd->out_sg); + if (dd->out_sg) { + scatterwalk_start(&dd->out_walk, + dd->out_sg); + dst = sg_virt(dd->out_sg) + + _calc_walked(out); + } + } else { + dst++; + } + } + + dd->total -= AES_BLOCK_SIZE; + + BUG_ON(dd->total < 0); + + /* Clear IRQ status */ + status &= ~AES_REG_IRQ_DATA_OUT; + omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status); + + if (!dd->total) + /* All bytes read! */ + tasklet_schedule(&dd->done_task); + else + /* Enable DATA_IN interrupt for next block */ + omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2); + } + + return IRQ_HANDLED; +} + +static const struct of_device_id omap_aes_of_match[] = { + { + .compatible = "ti,omap2-aes", + .data = &omap_aes_pdata_omap2, + }, + { + .compatible = "ti,omap3-aes", + .data = &omap_aes_pdata_omap3, + }, + { + .compatible = "ti,omap4-aes", + .data = &omap_aes_pdata_omap4, + }, + {}, +}; +MODULE_DEVICE_TABLE(of, omap_aes_of_match); + +static int omap_aes_get_res_of(struct omap_aes_dev *dd, + struct device *dev, struct resource *res) +{ + struct device_node *node = dev->of_node; + const struct of_device_id *match; + int err = 0; + + match = of_match_device(of_match_ptr(omap_aes_of_match), dev); + if (!match) { + dev_err(dev, "no compatible OF match\n"); + err = -EINVAL; + goto err; + } + + err = of_address_to_resource(node, 0, res); + if (err < 0) { + dev_err(dev, "can't translate OF node address\n"); + err = -EINVAL; + goto err; + } + + dd->dma_out = -1; /* Dummy value that's unused */ + dd->dma_in = -1; /* Dummy value that's unused */ + + dd->pdata = match->data; + +err: + return err; +} +#else +static const struct of_device_id omap_aes_of_match[] = { + {}, +}; + +static int omap_aes_get_res_of(struct omap_aes_dev *dd, + struct device *dev, struct resource *res) +{ + return -EINVAL; +} +#endif + +static int omap_aes_get_res_pdev(struct omap_aes_dev *dd, + struct platform_device *pdev, struct resource *res) +{ + struct device *dev = &pdev->dev; + struct resource *r; + int err = 0; + + /* Get the base address */ + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) { + dev_err(dev, "no MEM resource info\n"); + err = -ENODEV; + goto err; + } + memcpy(res, r, sizeof(*res)); + + /* Get the DMA out channel */ + r = platform_get_resource(pdev, IORESOURCE_DMA, 0); + if (!r) { + dev_err(dev, "no DMA out resource info\n"); + err = -ENODEV; + goto err; + } + dd->dma_out = r->start; + + /* Get the DMA in channel */ + r = platform_get_resource(pdev, IORESOURCE_DMA, 1); + if (!r) { + dev_err(dev, "no DMA in resource info\n"); + err = -ENODEV; + goto err; + } + dd->dma_in = r->start; + + /* Only OMAP2/3 can be non-DT */ + dd->pdata = &omap_aes_pdata_omap2; + +err: + return err; +} + +static int omap_aes_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct omap_aes_dev *dd; + struct crypto_alg *algp; + struct resource res; + int err = -ENOMEM, i, j, irq = -1; + u32 reg; + + dd = devm_kzalloc(dev, sizeof(struct omap_aes_dev), GFP_KERNEL); + if (dd == NULL) { + dev_err(dev, "unable to alloc data struct.\n"); + goto err_data; + } + dd->dev = dev; + platform_set_drvdata(pdev, dd); + + spin_lock_init(&dd->lock); + crypto_init_queue(&dd->queue, OMAP_AES_QUEUE_LENGTH); + + err = (dev->of_node) ? omap_aes_get_res_of(dd, dev, &res) : + omap_aes_get_res_pdev(dd, pdev, &res); + if (err) + goto err_res; + + dd->io_base = devm_ioremap_resource(dev, &res); + if (IS_ERR(dd->io_base)) { + err = PTR_ERR(dd->io_base); + goto err_res; + } + dd->phys_base = res.start; + + pm_runtime_enable(dev); + err = pm_runtime_get_sync(dev); + if (err < 0) { + dev_err(dev, "%s: failed to get_sync(%d)\n", + __func__, err); + goto err_res; + } + + omap_aes_dma_stop(dd); + + reg = omap_aes_read(dd, AES_REG_REV(dd)); + + pm_runtime_put_sync(dev); + + dev_info(dev, "OMAP AES hw accel rev: %u.%u\n", + (reg & dd->pdata->major_mask) >> dd->pdata->major_shift, + (reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift); + + tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd); + tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd); + + err = omap_aes_dma_init(dd); + if (err && AES_REG_IRQ_STATUS(dd) && AES_REG_IRQ_ENABLE(dd)) { + dd->pio_only = 1; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(dev, "can't get IRQ resource\n"); + goto err_irq; + } + + err = devm_request_irq(dev, irq, omap_aes_irq, 0, + dev_name(dev), dd); + if (err) { + dev_err(dev, "Unable to grab omap-aes IRQ\n"); + goto err_irq; + } + } + + + INIT_LIST_HEAD(&dd->list); + spin_lock(&list_lock); + list_add_tail(&dd->list, &dev_list); + spin_unlock(&list_lock); + + for (i = 0; i < dd->pdata->algs_info_size; i++) { + for (j = 0; j < dd->pdata->algs_info[i].size; j++) { + algp = &dd->pdata->algs_info[i].algs_list[j]; + + pr_debug("reg alg: %s\n", algp->cra_name); + INIT_LIST_HEAD(&algp->cra_list); + + err = crypto_register_alg(algp); + if (err) + goto err_algs; + + dd->pdata->algs_info[i].registered++; + } + } + + return 0; +err_algs: + for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) + for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) + crypto_unregister_alg( + &dd->pdata->algs_info[i].algs_list[j]); + if (!dd->pio_only) + omap_aes_dma_cleanup(dd); +err_irq: + tasklet_kill(&dd->done_task); + tasklet_kill(&dd->queue_task); + pm_runtime_disable(dev); +err_res: + dd = NULL; +err_data: + dev_err(dev, "initialization failed.\n"); + return err; +} + +static int omap_aes_remove(struct platform_device *pdev) +{ + struct omap_aes_dev *dd = platform_get_drvdata(pdev); + int i, j; + + if (!dd) + return -ENODEV; + + spin_lock(&list_lock); + list_del(&dd->list); + spin_unlock(&list_lock); + + for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) + for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) + crypto_unregister_alg( + &dd->pdata->algs_info[i].algs_list[j]); + + tasklet_kill(&dd->done_task); + tasklet_kill(&dd->queue_task); + omap_aes_dma_cleanup(dd); + pm_runtime_disable(dd->dev); + dd = NULL; + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int omap_aes_suspend(struct device *dev) +{ + pm_runtime_put_sync(dev); + return 0; +} + +static int omap_aes_resume(struct device *dev) +{ + pm_runtime_get_sync(dev); + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(omap_aes_pm_ops, omap_aes_suspend, omap_aes_resume); + +static struct platform_driver omap_aes_driver = { + .probe = omap_aes_probe, + .remove = omap_aes_remove, + .driver = { + .name = "omap-aes", + .pm = &omap_aes_pm_ops, + .of_match_table = omap_aes_of_match, + }, +}; + +module_platform_driver(omap_aes_driver); + +MODULE_DESCRIPTION("OMAP AES hw acceleration support."); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Dmitry Kasatkin"); + diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c new file mode 100644 index 000000000..46307098f --- /dev/null +++ b/drivers/crypto/omap-des.c @@ -0,0 +1,1234 @@ +/* + * Support for OMAP DES and Triple DES HW acceleration. + * + * Copyright (c) 2013 Texas Instruments Incorporated + * Author: Joel Fernandes <joelf@ti.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#ifdef DEBUG +#define prn(num) printk(#num "=%d\n", num) +#define prx(num) printk(#num "=%x\n", num) +#else +#define prn(num) do { } while (0) +#define prx(num) do { } while (0) +#endif + +#include <linux/err.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/scatterlist.h> +#include <linux/dma-mapping.h> +#include <linux/dmaengine.h> +#include <linux/omap-dma.h> +#include <linux/pm_runtime.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_address.h> +#include <linux/io.h> +#include <linux/crypto.h> +#include <linux/interrupt.h> +#include <crypto/scatterwalk.h> +#include <crypto/des.h> + +#define DST_MAXBURST 2 + +#define DES_BLOCK_WORDS (DES_BLOCK_SIZE >> 2) + +#define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset) + +#define DES_REG_KEY(dd, x) ((dd)->pdata->key_ofs - \ + ((x ^ 0x01) * 0x04)) + +#define DES_REG_IV(dd, x) ((dd)->pdata->iv_ofs + ((x) * 0x04)) + +#define DES_REG_CTRL(dd) ((dd)->pdata->ctrl_ofs) +#define DES_REG_CTRL_CBC BIT(4) +#define DES_REG_CTRL_TDES BIT(3) +#define DES_REG_CTRL_DIRECTION BIT(2) +#define DES_REG_CTRL_INPUT_READY BIT(1) +#define DES_REG_CTRL_OUTPUT_READY BIT(0) + +#define DES_REG_DATA_N(dd, x) ((dd)->pdata->data_ofs + ((x) * 0x04)) + +#define DES_REG_REV(dd) ((dd)->pdata->rev_ofs) + +#define DES_REG_MASK(dd) ((dd)->pdata->mask_ofs) + +#define DES_REG_LENGTH_N(x) (0x24 + ((x) * 0x04)) + +#define DES_REG_IRQ_STATUS(dd) ((dd)->pdata->irq_status_ofs) +#define DES_REG_IRQ_ENABLE(dd) ((dd)->pdata->irq_enable_ofs) +#define DES_REG_IRQ_DATA_IN BIT(1) +#define DES_REG_IRQ_DATA_OUT BIT(2) + +#define FLAGS_MODE_MASK 0x000f +#define FLAGS_ENCRYPT BIT(0) +#define FLAGS_CBC BIT(1) +#define FLAGS_INIT BIT(4) +#define FLAGS_BUSY BIT(6) + +struct omap_des_ctx { + struct omap_des_dev *dd; + + int keylen; + u32 key[(3 * DES_KEY_SIZE) / sizeof(u32)]; + unsigned long flags; +}; + +struct omap_des_reqctx { + unsigned long mode; +}; + +#define OMAP_DES_QUEUE_LENGTH 1 +#define OMAP_DES_CACHE_SIZE 0 + +struct omap_des_algs_info { + struct crypto_alg *algs_list; + unsigned int size; + unsigned int registered; +}; + +struct omap_des_pdata { + struct omap_des_algs_info *algs_info; + unsigned int algs_info_size; + + void (*trigger)(struct omap_des_dev *dd, int length); + + u32 key_ofs; + u32 iv_ofs; + u32 ctrl_ofs; + u32 data_ofs; + u32 rev_ofs; + u32 mask_ofs; + u32 irq_enable_ofs; + u32 irq_status_ofs; + + u32 dma_enable_in; + u32 dma_enable_out; + u32 dma_start; + + u32 major_mask; + u32 major_shift; + u32 minor_mask; + u32 minor_shift; +}; + +struct omap_des_dev { + struct list_head list; + unsigned long phys_base; + void __iomem *io_base; + struct omap_des_ctx *ctx; + struct device *dev; + unsigned long flags; + int err; + + /* spinlock used for queues */ + spinlock_t lock; + struct crypto_queue queue; + + struct tasklet_struct done_task; + struct tasklet_struct queue_task; + + struct ablkcipher_request *req; + /* + * total is used by PIO mode for book keeping so introduce + * variable total_save as need it to calc page_order + */ + size_t total; + size_t total_save; + + struct scatterlist *in_sg; + struct scatterlist *out_sg; + + /* Buffers for copying for unaligned cases */ + struct scatterlist in_sgl; + struct scatterlist out_sgl; + struct scatterlist *orig_out; + int sgs_copied; + + struct scatter_walk in_walk; + struct scatter_walk out_walk; + int dma_in; + struct dma_chan *dma_lch_in; + int dma_out; + struct dma_chan *dma_lch_out; + int in_sg_len; + int out_sg_len; + int pio_only; + const struct omap_des_pdata *pdata; +}; + +/* keep registered devices data here */ +static LIST_HEAD(dev_list); +static DEFINE_SPINLOCK(list_lock); + +#ifdef DEBUG +#define omap_des_read(dd, offset) \ + ({ \ + int _read_ret; \ + _read_ret = __raw_readl(dd->io_base + offset); \ + pr_err("omap_des_read(" #offset "=%#x)= %#x\n", \ + offset, _read_ret); \ + _read_ret; \ + }) +#else +static inline u32 omap_des_read(struct omap_des_dev *dd, u32 offset) +{ + return __raw_readl(dd->io_base + offset); +} +#endif + +#ifdef DEBUG +#define omap_des_write(dd, offset, value) \ + do { \ + pr_err("omap_des_write(" #offset "=%#x) value=%#x\n", \ + offset, value); \ + __raw_writel(value, dd->io_base + offset); \ + } while (0) +#else +static inline void omap_des_write(struct omap_des_dev *dd, u32 offset, + u32 value) +{ + __raw_writel(value, dd->io_base + offset); +} +#endif + +static inline void omap_des_write_mask(struct omap_des_dev *dd, u32 offset, + u32 value, u32 mask) +{ + u32 val; + + val = omap_des_read(dd, offset); + val &= ~mask; + val |= value; + omap_des_write(dd, offset, val); +} + +static void omap_des_write_n(struct omap_des_dev *dd, u32 offset, + u32 *value, int count) +{ + for (; count--; value++, offset += 4) + omap_des_write(dd, offset, *value); +} + +static int omap_des_hw_init(struct omap_des_dev *dd) +{ + int err; + + /* + * clocks are enabled when request starts and disabled when finished. + * It may be long delays between requests. + * Device might go to off mode to save power. + */ + err = pm_runtime_get_sync(dd->dev); + if (err < 0) { + pm_runtime_put_noidle(dd->dev); + dev_err(dd->dev, "%s: failed to get_sync(%d)\n", __func__, err); + return err; + } + + if (!(dd->flags & FLAGS_INIT)) { + dd->flags |= FLAGS_INIT; + dd->err = 0; + } + + return 0; +} + +static int omap_des_write_ctrl(struct omap_des_dev *dd) +{ + unsigned int key32; + int i, err; + u32 val = 0, mask = 0; + + err = omap_des_hw_init(dd); + if (err) + return err; + + key32 = dd->ctx->keylen / sizeof(u32); + + /* it seems a key should always be set even if it has not changed */ + for (i = 0; i < key32; i++) { + omap_des_write(dd, DES_REG_KEY(dd, i), + __le32_to_cpu(dd->ctx->key[i])); + } + + if ((dd->flags & FLAGS_CBC) && dd->req->info) + omap_des_write_n(dd, DES_REG_IV(dd, 0), dd->req->info, 2); + + if (dd->flags & FLAGS_CBC) + val |= DES_REG_CTRL_CBC; + if (dd->flags & FLAGS_ENCRYPT) + val |= DES_REG_CTRL_DIRECTION; + if (key32 == 6) + val |= DES_REG_CTRL_TDES; + + mask |= DES_REG_CTRL_CBC | DES_REG_CTRL_DIRECTION | DES_REG_CTRL_TDES; + + omap_des_write_mask(dd, DES_REG_CTRL(dd), val, mask); + + return 0; +} + +static void omap_des_dma_trigger_omap4(struct omap_des_dev *dd, int length) +{ + u32 mask, val; + + omap_des_write(dd, DES_REG_LENGTH_N(0), length); + + val = dd->pdata->dma_start; + + if (dd->dma_lch_out != NULL) + val |= dd->pdata->dma_enable_out; + if (dd->dma_lch_in != NULL) + val |= dd->pdata->dma_enable_in; + + mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in | + dd->pdata->dma_start; + + omap_des_write_mask(dd, DES_REG_MASK(dd), val, mask); +} + +static void omap_des_dma_stop(struct omap_des_dev *dd) +{ + u32 mask; + + mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in | + dd->pdata->dma_start; + + omap_des_write_mask(dd, DES_REG_MASK(dd), 0, mask); +} + +static struct omap_des_dev *omap_des_find_dev(struct omap_des_ctx *ctx) +{ + struct omap_des_dev *dd = NULL, *tmp; + + spin_lock_bh(&list_lock); + if (!ctx->dd) { + list_for_each_entry(tmp, &dev_list, list) { + /* FIXME: take fist available des core */ + dd = tmp; + break; + } + ctx->dd = dd; + } else { + /* already found before */ + dd = ctx->dd; + } + spin_unlock_bh(&list_lock); + + return dd; +} + +static void omap_des_dma_out_callback(void *data) +{ + struct omap_des_dev *dd = data; + + /* dma_lch_out - completed */ + tasklet_schedule(&dd->done_task); +} + +static int omap_des_dma_init(struct omap_des_dev *dd) +{ + int err = -ENOMEM; + dma_cap_mask_t mask; + + dd->dma_lch_out = NULL; + dd->dma_lch_in = NULL; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + dd->dma_lch_in = dma_request_slave_channel_compat(mask, + omap_dma_filter_fn, + &dd->dma_in, + dd->dev, "rx"); + if (!dd->dma_lch_in) { + dev_err(dd->dev, "Unable to request in DMA channel\n"); + goto err_dma_in; + } + + dd->dma_lch_out = dma_request_slave_channel_compat(mask, + omap_dma_filter_fn, + &dd->dma_out, + dd->dev, "tx"); + if (!dd->dma_lch_out) { + dev_err(dd->dev, "Unable to request out DMA channel\n"); + goto err_dma_out; + } + + return 0; + +err_dma_out: + dma_release_channel(dd->dma_lch_in); +err_dma_in: + if (err) + pr_err("error: %d\n", err); + return err; +} + +static void omap_des_dma_cleanup(struct omap_des_dev *dd) +{ + dma_release_channel(dd->dma_lch_out); + dma_release_channel(dd->dma_lch_in); +} + +static void sg_copy_buf(void *buf, struct scatterlist *sg, + unsigned int start, unsigned int nbytes, int out) +{ + struct scatter_walk walk; + + if (!nbytes) + return; + + scatterwalk_start(&walk, sg); + scatterwalk_advance(&walk, start); + scatterwalk_copychunks(buf, &walk, nbytes, out); + scatterwalk_done(&walk, out, 0); +} + +static int omap_des_crypt_dma(struct crypto_tfm *tfm, + struct scatterlist *in_sg, struct scatterlist *out_sg, + int in_sg_len, int out_sg_len) +{ + struct omap_des_ctx *ctx = crypto_tfm_ctx(tfm); + struct omap_des_dev *dd = ctx->dd; + struct dma_async_tx_descriptor *tx_in, *tx_out; + struct dma_slave_config cfg; + int ret; + + if (dd->pio_only) { + scatterwalk_start(&dd->in_walk, dd->in_sg); + scatterwalk_start(&dd->out_walk, dd->out_sg); + + /* Enable DATAIN interrupt and let it take + care of the rest */ + omap_des_write(dd, DES_REG_IRQ_ENABLE(dd), 0x2); + return 0; + } + + dma_sync_sg_for_device(dd->dev, dd->in_sg, in_sg_len, DMA_TO_DEVICE); + + memset(&cfg, 0, sizeof(cfg)); + + cfg.src_addr = dd->phys_base + DES_REG_DATA_N(dd, 0); + cfg.dst_addr = dd->phys_base + DES_REG_DATA_N(dd, 0); + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.src_maxburst = DST_MAXBURST; + cfg.dst_maxburst = DST_MAXBURST; + + /* IN */ + ret = dmaengine_slave_config(dd->dma_lch_in, &cfg); + if (ret) { + dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n", + ret); + return ret; + } + + tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, in_sg_len, + DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!tx_in) { + dev_err(dd->dev, "IN prep_slave_sg() failed\n"); + return -EINVAL; + } + + /* No callback necessary */ + tx_in->callback_param = dd; + + /* OUT */ + ret = dmaengine_slave_config(dd->dma_lch_out, &cfg); + if (ret) { + dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n", + ret); + return ret; + } + + tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, out_sg_len, + DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!tx_out) { + dev_err(dd->dev, "OUT prep_slave_sg() failed\n"); + return -EINVAL; + } + + tx_out->callback = omap_des_dma_out_callback; + tx_out->callback_param = dd; + + dmaengine_submit(tx_in); + dmaengine_submit(tx_out); + + dma_async_issue_pending(dd->dma_lch_in); + dma_async_issue_pending(dd->dma_lch_out); + + /* start DMA */ + dd->pdata->trigger(dd, dd->total); + + return 0; +} + +static int omap_des_crypt_dma_start(struct omap_des_dev *dd) +{ + struct crypto_tfm *tfm = crypto_ablkcipher_tfm( + crypto_ablkcipher_reqtfm(dd->req)); + int err; + + pr_debug("total: %d\n", dd->total); + + if (!dd->pio_only) { + err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len, + DMA_TO_DEVICE); + if (!err) { + dev_err(dd->dev, "dma_map_sg() error\n"); + return -EINVAL; + } + + err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len, + DMA_FROM_DEVICE); + if (!err) { + dev_err(dd->dev, "dma_map_sg() error\n"); + return -EINVAL; + } + } + + err = omap_des_crypt_dma(tfm, dd->in_sg, dd->out_sg, dd->in_sg_len, + dd->out_sg_len); + if (err && !dd->pio_only) { + dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE); + dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, + DMA_FROM_DEVICE); + } + + return err; +} + +static void omap_des_finish_req(struct omap_des_dev *dd, int err) +{ + struct ablkcipher_request *req = dd->req; + + pr_debug("err: %d\n", err); + + pm_runtime_put(dd->dev); + dd->flags &= ~FLAGS_BUSY; + + req->base.complete(&req->base, err); +} + +static int omap_des_crypt_dma_stop(struct omap_des_dev *dd) +{ + int err = 0; + + pr_debug("total: %d\n", dd->total); + + omap_des_dma_stop(dd); + + dmaengine_terminate_all(dd->dma_lch_in); + dmaengine_terminate_all(dd->dma_lch_out); + + dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE); + dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE); + + return err; +} + +static int omap_des_copy_needed(struct scatterlist *sg) +{ + while (sg) { + if (!IS_ALIGNED(sg->offset, 4)) + return -1; + if (!IS_ALIGNED(sg->length, DES_BLOCK_SIZE)) + return -1; + sg = sg_next(sg); + } + return 0; +} + +static int omap_des_copy_sgs(struct omap_des_dev *dd) +{ + void *buf_in, *buf_out; + int pages; + + pages = dd->total >> PAGE_SHIFT; + + if (dd->total & (PAGE_SIZE-1)) + pages++; + + BUG_ON(!pages); + + buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages); + buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages); + + if (!buf_in || !buf_out) { + pr_err("Couldn't allocated pages for unaligned cases.\n"); + return -1; + } + + dd->orig_out = dd->out_sg; + + sg_copy_buf(buf_in, dd->in_sg, 0, dd->total, 0); + + sg_init_table(&dd->in_sgl, 1); + sg_set_buf(&dd->in_sgl, buf_in, dd->total); + dd->in_sg = &dd->in_sgl; + + sg_init_table(&dd->out_sgl, 1); + sg_set_buf(&dd->out_sgl, buf_out, dd->total); + dd->out_sg = &dd->out_sgl; + + return 0; +} + +static int omap_des_handle_queue(struct omap_des_dev *dd, + struct ablkcipher_request *req) +{ + struct crypto_async_request *async_req, *backlog; + struct omap_des_ctx *ctx; + struct omap_des_reqctx *rctx; + unsigned long flags; + int err, ret = 0; + + spin_lock_irqsave(&dd->lock, flags); + if (req) + ret = ablkcipher_enqueue_request(&dd->queue, req); + if (dd->flags & FLAGS_BUSY) { + spin_unlock_irqrestore(&dd->lock, flags); + return ret; + } + backlog = crypto_get_backlog(&dd->queue); + async_req = crypto_dequeue_request(&dd->queue); + if (async_req) + dd->flags |= FLAGS_BUSY; + spin_unlock_irqrestore(&dd->lock, flags); + + if (!async_req) + return ret; + + if (backlog) + backlog->complete(backlog, -EINPROGRESS); + + req = ablkcipher_request_cast(async_req); + + /* assign new request to device */ + dd->req = req; + dd->total = req->nbytes; + dd->total_save = req->nbytes; + dd->in_sg = req->src; + dd->out_sg = req->dst; + + if (omap_des_copy_needed(dd->in_sg) || + omap_des_copy_needed(dd->out_sg)) { + if (omap_des_copy_sgs(dd)) + pr_err("Failed to copy SGs for unaligned cases\n"); + dd->sgs_copied = 1; + } else { + dd->sgs_copied = 0; + } + + dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, dd->total); + dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, dd->total); + BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0); + + rctx = ablkcipher_request_ctx(req); + ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); + rctx->mode &= FLAGS_MODE_MASK; + dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode; + + dd->ctx = ctx; + ctx->dd = dd; + + err = omap_des_write_ctrl(dd); + if (!err) + err = omap_des_crypt_dma_start(dd); + if (err) { + /* des_task will not finish it, so do it here */ + omap_des_finish_req(dd, err); + tasklet_schedule(&dd->queue_task); + } + + return ret; /* return ret, which is enqueue return value */ +} + +static void omap_des_done_task(unsigned long data) +{ + struct omap_des_dev *dd = (struct omap_des_dev *)data; + void *buf_in, *buf_out; + int pages; + + pr_debug("enter done_task\n"); + + if (!dd->pio_only) { + dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len, + DMA_FROM_DEVICE); + dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE); + dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, + DMA_FROM_DEVICE); + omap_des_crypt_dma_stop(dd); + } + + if (dd->sgs_copied) { + buf_in = sg_virt(&dd->in_sgl); + buf_out = sg_virt(&dd->out_sgl); + + sg_copy_buf(buf_out, dd->orig_out, 0, dd->total_save, 1); + + pages = get_order(dd->total_save); + free_pages((unsigned long)buf_in, pages); + free_pages((unsigned long)buf_out, pages); + } + + omap_des_finish_req(dd, 0); + omap_des_handle_queue(dd, NULL); + + pr_debug("exit\n"); +} + +static void omap_des_queue_task(unsigned long data) +{ + struct omap_des_dev *dd = (struct omap_des_dev *)data; + + omap_des_handle_queue(dd, NULL); +} + +static int omap_des_crypt(struct ablkcipher_request *req, unsigned long mode) +{ + struct omap_des_ctx *ctx = crypto_ablkcipher_ctx( + crypto_ablkcipher_reqtfm(req)); + struct omap_des_reqctx *rctx = ablkcipher_request_ctx(req); + struct omap_des_dev *dd; + + pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes, + !!(mode & FLAGS_ENCRYPT), + !!(mode & FLAGS_CBC)); + + if (!IS_ALIGNED(req->nbytes, DES_BLOCK_SIZE)) { + pr_err("request size is not exact amount of DES blocks\n"); + return -EINVAL; + } + + dd = omap_des_find_dev(ctx); + if (!dd) + return -ENODEV; + + rctx->mode = mode; + + return omap_des_handle_queue(dd, req); +} + +/* ********************** ALG API ************************************ */ + +static int omap_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(tfm); + + if (keylen != DES_KEY_SIZE && keylen != (3*DES_KEY_SIZE)) + return -EINVAL; + + pr_debug("enter, keylen: %d\n", keylen); + + memcpy(ctx->key, key, keylen); + ctx->keylen = keylen; + + return 0; +} + +static int omap_des_ecb_encrypt(struct ablkcipher_request *req) +{ + return omap_des_crypt(req, FLAGS_ENCRYPT); +} + +static int omap_des_ecb_decrypt(struct ablkcipher_request *req) +{ + return omap_des_crypt(req, 0); +} + +static int omap_des_cbc_encrypt(struct ablkcipher_request *req) +{ + return omap_des_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC); +} + +static int omap_des_cbc_decrypt(struct ablkcipher_request *req) +{ + return omap_des_crypt(req, FLAGS_CBC); +} + +static int omap_des_cra_init(struct crypto_tfm *tfm) +{ + pr_debug("enter\n"); + + tfm->crt_ablkcipher.reqsize = sizeof(struct omap_des_reqctx); + + return 0; +} + +static void omap_des_cra_exit(struct crypto_tfm *tfm) +{ + pr_debug("enter\n"); +} + +/* ********************** ALGS ************************************ */ + +static struct crypto_alg algs_ecb_cbc[] = { +{ + .cra_name = "ecb(des)", + .cra_driver_name = "ecb-des-omap", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct omap_des_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = omap_des_cra_init, + .cra_exit = omap_des_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .setkey = omap_des_setkey, + .encrypt = omap_des_ecb_encrypt, + .decrypt = omap_des_ecb_decrypt, + } +}, +{ + .cra_name = "cbc(des)", + .cra_driver_name = "cbc-des-omap", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct omap_des_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = omap_des_cra_init, + .cra_exit = omap_des_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .setkey = omap_des_setkey, + .encrypt = omap_des_cbc_encrypt, + .decrypt = omap_des_cbc_decrypt, + } +}, +{ + .cra_name = "ecb(des3_ede)", + .cra_driver_name = "ecb-des3-omap", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct omap_des_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = omap_des_cra_init, + .cra_exit = omap_des_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = 3*DES_KEY_SIZE, + .max_keysize = 3*DES_KEY_SIZE, + .setkey = omap_des_setkey, + .encrypt = omap_des_ecb_encrypt, + .decrypt = omap_des_ecb_decrypt, + } +}, +{ + .cra_name = "cbc(des3_ede)", + .cra_driver_name = "cbc-des3-omap", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct omap_des_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = omap_des_cra_init, + .cra_exit = omap_des_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = 3*DES_KEY_SIZE, + .max_keysize = 3*DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .setkey = omap_des_setkey, + .encrypt = omap_des_cbc_encrypt, + .decrypt = omap_des_cbc_decrypt, + } +} +}; + +static struct omap_des_algs_info omap_des_algs_info_ecb_cbc[] = { + { + .algs_list = algs_ecb_cbc, + .size = ARRAY_SIZE(algs_ecb_cbc), + }, +}; + +#ifdef CONFIG_OF +static const struct omap_des_pdata omap_des_pdata_omap4 = { + .algs_info = omap_des_algs_info_ecb_cbc, + .algs_info_size = ARRAY_SIZE(omap_des_algs_info_ecb_cbc), + .trigger = omap_des_dma_trigger_omap4, + .key_ofs = 0x14, + .iv_ofs = 0x18, + .ctrl_ofs = 0x20, + .data_ofs = 0x28, + .rev_ofs = 0x30, + .mask_ofs = 0x34, + .irq_status_ofs = 0x3c, + .irq_enable_ofs = 0x40, + .dma_enable_in = BIT(5), + .dma_enable_out = BIT(6), + .major_mask = 0x0700, + .major_shift = 8, + .minor_mask = 0x003f, + .minor_shift = 0, +}; + +static irqreturn_t omap_des_irq(int irq, void *dev_id) +{ + struct omap_des_dev *dd = dev_id; + u32 status, i; + u32 *src, *dst; + + status = omap_des_read(dd, DES_REG_IRQ_STATUS(dd)); + if (status & DES_REG_IRQ_DATA_IN) { + omap_des_write(dd, DES_REG_IRQ_ENABLE(dd), 0x0); + + BUG_ON(!dd->in_sg); + + BUG_ON(_calc_walked(in) > dd->in_sg->length); + + src = sg_virt(dd->in_sg) + _calc_walked(in); + + for (i = 0; i < DES_BLOCK_WORDS; i++) { + omap_des_write(dd, DES_REG_DATA_N(dd, i), *src); + + scatterwalk_advance(&dd->in_walk, 4); + if (dd->in_sg->length == _calc_walked(in)) { + dd->in_sg = sg_next(dd->in_sg); + if (dd->in_sg) { + scatterwalk_start(&dd->in_walk, + dd->in_sg); + src = sg_virt(dd->in_sg) + + _calc_walked(in); + } + } else { + src++; + } + } + + /* Clear IRQ status */ + status &= ~DES_REG_IRQ_DATA_IN; + omap_des_write(dd, DES_REG_IRQ_STATUS(dd), status); + + /* Enable DATA_OUT interrupt */ + omap_des_write(dd, DES_REG_IRQ_ENABLE(dd), 0x4); + + } else if (status & DES_REG_IRQ_DATA_OUT) { + omap_des_write(dd, DES_REG_IRQ_ENABLE(dd), 0x0); + + BUG_ON(!dd->out_sg); + + BUG_ON(_calc_walked(out) > dd->out_sg->length); + + dst = sg_virt(dd->out_sg) + _calc_walked(out); + + for (i = 0; i < DES_BLOCK_WORDS; i++) { + *dst = omap_des_read(dd, DES_REG_DATA_N(dd, i)); + scatterwalk_advance(&dd->out_walk, 4); + if (dd->out_sg->length == _calc_walked(out)) { + dd->out_sg = sg_next(dd->out_sg); + if (dd->out_sg) { + scatterwalk_start(&dd->out_walk, + dd->out_sg); + dst = sg_virt(dd->out_sg) + + _calc_walked(out); + } + } else { + dst++; + } + } + + BUG_ON(dd->total < DES_BLOCK_SIZE); + + dd->total -= DES_BLOCK_SIZE; + + /* Clear IRQ status */ + status &= ~DES_REG_IRQ_DATA_OUT; + omap_des_write(dd, DES_REG_IRQ_STATUS(dd), status); + + if (!dd->total) + /* All bytes read! */ + tasklet_schedule(&dd->done_task); + else + /* Enable DATA_IN interrupt for next block */ + omap_des_write(dd, DES_REG_IRQ_ENABLE(dd), 0x2); + } + + return IRQ_HANDLED; +} + +static const struct of_device_id omap_des_of_match[] = { + { + .compatible = "ti,omap4-des", + .data = &omap_des_pdata_omap4, + }, + {}, +}; +MODULE_DEVICE_TABLE(of, omap_des_of_match); + +static int omap_des_get_of(struct omap_des_dev *dd, + struct platform_device *pdev) +{ + const struct of_device_id *match; + + match = of_match_device(of_match_ptr(omap_des_of_match), &pdev->dev); + if (!match) { + dev_err(&pdev->dev, "no compatible OF match\n"); + return -EINVAL; + } + + dd->dma_out = -1; /* Dummy value that's unused */ + dd->dma_in = -1; /* Dummy value that's unused */ + dd->pdata = match->data; + + return 0; +} +#else +static int omap_des_get_of(struct omap_des_dev *dd, + struct device *dev) +{ + return -EINVAL; +} +#endif + +static int omap_des_get_pdev(struct omap_des_dev *dd, + struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *r; + int err = 0; + + /* Get the DMA out channel */ + r = platform_get_resource(pdev, IORESOURCE_DMA, 0); + if (!r) { + dev_err(dev, "no DMA out resource info\n"); + err = -ENODEV; + goto err; + } + dd->dma_out = r->start; + + /* Get the DMA in channel */ + r = platform_get_resource(pdev, IORESOURCE_DMA, 1); + if (!r) { + dev_err(dev, "no DMA in resource info\n"); + err = -ENODEV; + goto err; + } + dd->dma_in = r->start; + + /* non-DT devices get pdata from pdev */ + dd->pdata = pdev->dev.platform_data; + +err: + return err; +} + +static int omap_des_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct omap_des_dev *dd; + struct crypto_alg *algp; + struct resource *res; + int err = -ENOMEM, i, j, irq = -1; + u32 reg; + + dd = devm_kzalloc(dev, sizeof(struct omap_des_dev), GFP_KERNEL); + if (dd == NULL) { + dev_err(dev, "unable to alloc data struct.\n"); + goto err_data; + } + dd->dev = dev; + platform_set_drvdata(pdev, dd); + + spin_lock_init(&dd->lock); + crypto_init_queue(&dd->queue, OMAP_DES_QUEUE_LENGTH); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "no MEM resource info\n"); + goto err_res; + } + + err = (dev->of_node) ? omap_des_get_of(dd, pdev) : + omap_des_get_pdev(dd, pdev); + if (err) + goto err_res; + + dd->io_base = devm_ioremap_resource(dev, res); + if (IS_ERR(dd->io_base)) { + err = PTR_ERR(dd->io_base); + goto err_res; + } + dd->phys_base = res->start; + + pm_runtime_enable(dev); + err = pm_runtime_get_sync(dev); + if (err < 0) { + pm_runtime_put_noidle(dev); + dev_err(dd->dev, "%s: failed to get_sync(%d)\n", __func__, err); + goto err_get; + } + + omap_des_dma_stop(dd); + + reg = omap_des_read(dd, DES_REG_REV(dd)); + + pm_runtime_put_sync(dev); + + dev_info(dev, "OMAP DES hw accel rev: %u.%u\n", + (reg & dd->pdata->major_mask) >> dd->pdata->major_shift, + (reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift); + + tasklet_init(&dd->done_task, omap_des_done_task, (unsigned long)dd); + tasklet_init(&dd->queue_task, omap_des_queue_task, (unsigned long)dd); + + err = omap_des_dma_init(dd); + if (err && DES_REG_IRQ_STATUS(dd) && DES_REG_IRQ_ENABLE(dd)) { + dd->pio_only = 1; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(dev, "can't get IRQ resource\n"); + goto err_irq; + } + + err = devm_request_irq(dev, irq, omap_des_irq, 0, + dev_name(dev), dd); + if (err) { + dev_err(dev, "Unable to grab omap-des IRQ\n"); + goto err_irq; + } + } + + + INIT_LIST_HEAD(&dd->list); + spin_lock(&list_lock); + list_add_tail(&dd->list, &dev_list); + spin_unlock(&list_lock); + + for (i = 0; i < dd->pdata->algs_info_size; i++) { + for (j = 0; j < dd->pdata->algs_info[i].size; j++) { + algp = &dd->pdata->algs_info[i].algs_list[j]; + + pr_debug("reg alg: %s\n", algp->cra_name); + INIT_LIST_HEAD(&algp->cra_list); + + err = crypto_register_alg(algp); + if (err) + goto err_algs; + + dd->pdata->algs_info[i].registered++; + } + } + + return 0; +err_algs: + for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) + for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) + crypto_unregister_alg( + &dd->pdata->algs_info[i].algs_list[j]); + if (!dd->pio_only) + omap_des_dma_cleanup(dd); +err_irq: + tasklet_kill(&dd->done_task); + tasklet_kill(&dd->queue_task); +err_get: + pm_runtime_disable(dev); +err_res: + dd = NULL; +err_data: + dev_err(dev, "initialization failed.\n"); + return err; +} + +static int omap_des_remove(struct platform_device *pdev) +{ + struct omap_des_dev *dd = platform_get_drvdata(pdev); + int i, j; + + if (!dd) + return -ENODEV; + + spin_lock(&list_lock); + list_del(&dd->list); + spin_unlock(&list_lock); + + for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) + for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) + crypto_unregister_alg( + &dd->pdata->algs_info[i].algs_list[j]); + + tasklet_kill(&dd->done_task); + tasklet_kill(&dd->queue_task); + omap_des_dma_cleanup(dd); + pm_runtime_disable(dd->dev); + dd = NULL; + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int omap_des_suspend(struct device *dev) +{ + pm_runtime_put_sync(dev); + return 0; +} + +static int omap_des_resume(struct device *dev) +{ + int err; + + err = pm_runtime_get_sync(dev); + if (err < 0) { + pm_runtime_put_noidle(dev); + dev_err(dev, "%s: failed to get_sync(%d)\n", __func__, err); + return err; + } + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(omap_des_pm_ops, omap_des_suspend, omap_des_resume); + +static struct platform_driver omap_des_driver = { + .probe = omap_des_probe, + .remove = omap_des_remove, + .driver = { + .name = "omap-des", + .pm = &omap_des_pm_ops, + .of_match_table = of_match_ptr(omap_des_of_match), + }, +}; + +module_platform_driver(omap_des_driver); + +MODULE_DESCRIPTION("OMAP DES hw acceleration support."); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Joel Fernandes <joelf@ti.com>"); diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c new file mode 100644 index 000000000..4d63e0d4d --- /dev/null +++ b/drivers/crypto/omap-sham.c @@ -0,0 +1,2044 @@ +/* + * Cryptographic API. + * + * Support for OMAP SHA1/MD5 HW acceleration. + * + * Copyright (c) 2010 Nokia Corporation + * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com> + * Copyright (c) 2011 Texas Instruments Incorporated + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Some ideas are from old omap-sha1-md5.c driver. + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include <linux/err.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/irq.h> +#include <linux/io.h> +#include <linux/platform_device.h> +#include <linux/scatterlist.h> +#include <linux/dma-mapping.h> +#include <linux/dmaengine.h> +#include <linux/omap-dma.h> +#include <linux/pm_runtime.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/delay.h> +#include <linux/crypto.h> +#include <linux/cryptohash.h> +#include <crypto/scatterwalk.h> +#include <crypto/algapi.h> +#include <crypto/sha.h> +#include <crypto/hash.h> +#include <crypto/internal/hash.h> + +#define MD5_DIGEST_SIZE 16 + +#define SHA_REG_IDIGEST(dd, x) ((dd)->pdata->idigest_ofs + ((x)*0x04)) +#define SHA_REG_DIN(dd, x) ((dd)->pdata->din_ofs + ((x) * 0x04)) +#define SHA_REG_DIGCNT(dd) ((dd)->pdata->digcnt_ofs) + +#define SHA_REG_ODIGEST(dd, x) ((dd)->pdata->odigest_ofs + (x * 0x04)) + +#define SHA_REG_CTRL 0x18 +#define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5) +#define SHA_REG_CTRL_CLOSE_HASH (1 << 4) +#define SHA_REG_CTRL_ALGO_CONST (1 << 3) +#define SHA_REG_CTRL_ALGO (1 << 2) +#define SHA_REG_CTRL_INPUT_READY (1 << 1) +#define SHA_REG_CTRL_OUTPUT_READY (1 << 0) + +#define SHA_REG_REV(dd) ((dd)->pdata->rev_ofs) + +#define SHA_REG_MASK(dd) ((dd)->pdata->mask_ofs) +#define SHA_REG_MASK_DMA_EN (1 << 3) +#define SHA_REG_MASK_IT_EN (1 << 2) +#define SHA_REG_MASK_SOFTRESET (1 << 1) +#define SHA_REG_AUTOIDLE (1 << 0) + +#define SHA_REG_SYSSTATUS(dd) ((dd)->pdata->sysstatus_ofs) +#define SHA_REG_SYSSTATUS_RESETDONE (1 << 0) + +#define SHA_REG_MODE(dd) ((dd)->pdata->mode_ofs) +#define SHA_REG_MODE_HMAC_OUTER_HASH (1 << 7) +#define SHA_REG_MODE_HMAC_KEY_PROC (1 << 5) +#define SHA_REG_MODE_CLOSE_HASH (1 << 4) +#define SHA_REG_MODE_ALGO_CONSTANT (1 << 3) + +#define SHA_REG_MODE_ALGO_MASK (7 << 0) +#define SHA_REG_MODE_ALGO_MD5_128 (0 << 1) +#define SHA_REG_MODE_ALGO_SHA1_160 (1 << 1) +#define SHA_REG_MODE_ALGO_SHA2_224 (2 << 1) +#define SHA_REG_MODE_ALGO_SHA2_256 (3 << 1) +#define SHA_REG_MODE_ALGO_SHA2_384 (1 << 0) +#define SHA_REG_MODE_ALGO_SHA2_512 (3 << 0) + +#define SHA_REG_LENGTH(dd) ((dd)->pdata->length_ofs) + +#define SHA_REG_IRQSTATUS 0x118 +#define SHA_REG_IRQSTATUS_CTX_RDY (1 << 3) +#define SHA_REG_IRQSTATUS_PARTHASH_RDY (1 << 2) +#define SHA_REG_IRQSTATUS_INPUT_RDY (1 << 1) +#define SHA_REG_IRQSTATUS_OUTPUT_RDY (1 << 0) + +#define SHA_REG_IRQENA 0x11C +#define SHA_REG_IRQENA_CTX_RDY (1 << 3) +#define SHA_REG_IRQENA_PARTHASH_RDY (1 << 2) +#define SHA_REG_IRQENA_INPUT_RDY (1 << 1) +#define SHA_REG_IRQENA_OUTPUT_RDY (1 << 0) + +#define DEFAULT_TIMEOUT_INTERVAL HZ + +/* mostly device flags */ +#define FLAGS_BUSY 0 +#define FLAGS_FINAL 1 +#define FLAGS_DMA_ACTIVE 2 +#define FLAGS_OUTPUT_READY 3 +#define FLAGS_INIT 4 +#define FLAGS_CPU 5 +#define FLAGS_DMA_READY 6 +#define FLAGS_AUTO_XOR 7 +#define FLAGS_BE32_SHA1 8 +/* context flags */ +#define FLAGS_FINUP 16 +#define FLAGS_SG 17 + +#define FLAGS_MODE_SHIFT 18 +#define FLAGS_MODE_MASK (SHA_REG_MODE_ALGO_MASK << FLAGS_MODE_SHIFT) +#define FLAGS_MODE_MD5 (SHA_REG_MODE_ALGO_MD5_128 << FLAGS_MODE_SHIFT) +#define FLAGS_MODE_SHA1 (SHA_REG_MODE_ALGO_SHA1_160 << FLAGS_MODE_SHIFT) +#define FLAGS_MODE_SHA224 (SHA_REG_MODE_ALGO_SHA2_224 << FLAGS_MODE_SHIFT) +#define FLAGS_MODE_SHA256 (SHA_REG_MODE_ALGO_SHA2_256 << FLAGS_MODE_SHIFT) +#define FLAGS_MODE_SHA384 (SHA_REG_MODE_ALGO_SHA2_384 << FLAGS_MODE_SHIFT) +#define FLAGS_MODE_SHA512 (SHA_REG_MODE_ALGO_SHA2_512 << FLAGS_MODE_SHIFT) + +#define FLAGS_HMAC 21 +#define FLAGS_ERROR 22 + +#define OP_UPDATE 1 +#define OP_FINAL 2 + +#define OMAP_ALIGN_MASK (sizeof(u32)-1) +#define OMAP_ALIGNED __attribute__((aligned(sizeof(u32)))) + +#define BUFLEN PAGE_SIZE + +struct omap_sham_dev; + +struct omap_sham_reqctx { + struct omap_sham_dev *dd; + unsigned long flags; + unsigned long op; + + u8 digest[SHA512_DIGEST_SIZE] OMAP_ALIGNED; + size_t digcnt; + size_t bufcnt; + size_t buflen; + dma_addr_t dma_addr; + + /* walk state */ + struct scatterlist *sg; + struct scatterlist sgl; + unsigned int offset; /* offset in current sg */ + unsigned int total; /* total request */ + + u8 buffer[0] OMAP_ALIGNED; +}; + +struct omap_sham_hmac_ctx { + struct crypto_shash *shash; + u8 ipad[SHA512_BLOCK_SIZE] OMAP_ALIGNED; + u8 opad[SHA512_BLOCK_SIZE] OMAP_ALIGNED; +}; + +struct omap_sham_ctx { + struct omap_sham_dev *dd; + + unsigned long flags; + + /* fallback stuff */ + struct crypto_shash *fallback; + + struct omap_sham_hmac_ctx base[0]; +}; + +#define OMAP_SHAM_QUEUE_LENGTH 1 + +struct omap_sham_algs_info { + struct ahash_alg *algs_list; + unsigned int size; + unsigned int registered; +}; + +struct omap_sham_pdata { + struct omap_sham_algs_info *algs_info; + unsigned int algs_info_size; + unsigned long flags; + int digest_size; + + void (*copy_hash)(struct ahash_request *req, int out); + void (*write_ctrl)(struct omap_sham_dev *dd, size_t length, + int final, int dma); + void (*trigger)(struct omap_sham_dev *dd, size_t length); + int (*poll_irq)(struct omap_sham_dev *dd); + irqreturn_t (*intr_hdlr)(int irq, void *dev_id); + + u32 odigest_ofs; + u32 idigest_ofs; + u32 din_ofs; + u32 digcnt_ofs; + u32 rev_ofs; + u32 mask_ofs; + u32 sysstatus_ofs; + u32 mode_ofs; + u32 length_ofs; + + u32 major_mask; + u32 major_shift; + u32 minor_mask; + u32 minor_shift; +}; + +struct omap_sham_dev { + struct list_head list; + unsigned long phys_base; + struct device *dev; + void __iomem *io_base; + int irq; + spinlock_t lock; + int err; + unsigned int dma; + struct dma_chan *dma_lch; + struct tasklet_struct done_task; + u8 polling_mode; + + unsigned long flags; + struct crypto_queue queue; + struct ahash_request *req; + + const struct omap_sham_pdata *pdata; +}; + +struct omap_sham_drv { + struct list_head dev_list; + spinlock_t lock; + unsigned long flags; +}; + +static struct omap_sham_drv sham = { + .dev_list = LIST_HEAD_INIT(sham.dev_list), + .lock = __SPIN_LOCK_UNLOCKED(sham.lock), +}; + +static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset) +{ + return __raw_readl(dd->io_base + offset); +} + +static inline void omap_sham_write(struct omap_sham_dev *dd, + u32 offset, u32 value) +{ + __raw_writel(value, dd->io_base + offset); +} + +static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address, + u32 value, u32 mask) +{ + u32 val; + + val = omap_sham_read(dd, address); + val &= ~mask; + val |= value; + omap_sham_write(dd, address, val); +} + +static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit) +{ + unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL; + + while (!(omap_sham_read(dd, offset) & bit)) { + if (time_is_before_jiffies(timeout)) + return -ETIMEDOUT; + } + + return 0; +} + +static void omap_sham_copy_hash_omap2(struct ahash_request *req, int out) +{ + struct omap_sham_reqctx *ctx = ahash_request_ctx(req); + struct omap_sham_dev *dd = ctx->dd; + u32 *hash = (u32 *)ctx->digest; + int i; + + for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) { + if (out) + hash[i] = omap_sham_read(dd, SHA_REG_IDIGEST(dd, i)); + else + omap_sham_write(dd, SHA_REG_IDIGEST(dd, i), hash[i]); + } +} + +static void omap_sham_copy_hash_omap4(struct ahash_request *req, int out) +{ + struct omap_sham_reqctx *ctx = ahash_request_ctx(req); + struct omap_sham_dev *dd = ctx->dd; + int i; + + if (ctx->flags & BIT(FLAGS_HMAC)) { + struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req); + struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); + struct omap_sham_hmac_ctx *bctx = tctx->base; + u32 *opad = (u32 *)bctx->opad; + + for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) { + if (out) + opad[i] = omap_sham_read(dd, + SHA_REG_ODIGEST(dd, i)); + else + omap_sham_write(dd, SHA_REG_ODIGEST(dd, i), + opad[i]); + } + } + + omap_sham_copy_hash_omap2(req, out); +} + +static void omap_sham_copy_ready_hash(struct ahash_request *req) +{ + struct omap_sham_reqctx *ctx = ahash_request_ctx(req); + u32 *in = (u32 *)ctx->digest; + u32 *hash = (u32 *)req->result; + int i, d, big_endian = 0; + + if (!hash) + return; + + switch (ctx->flags & FLAGS_MODE_MASK) { + case FLAGS_MODE_MD5: + d = MD5_DIGEST_SIZE / sizeof(u32); + break; + case FLAGS_MODE_SHA1: + /* OMAP2 SHA1 is big endian */ + if (test_bit(FLAGS_BE32_SHA1, &ctx->dd->flags)) + big_endian = 1; + d = SHA1_DIGEST_SIZE / sizeof(u32); + break; + case FLAGS_MODE_SHA224: + d = SHA224_DIGEST_SIZE / sizeof(u32); + break; + case FLAGS_MODE_SHA256: + d = SHA256_DIGEST_SIZE / sizeof(u32); + break; + case FLAGS_MODE_SHA384: + d = SHA384_DIGEST_SIZE / sizeof(u32); + break; + case FLAGS_MODE_SHA512: + d = SHA512_DIGEST_SIZE / sizeof(u32); + break; + default: + d = 0; + } + + if (big_endian) + for (i = 0; i < d; i++) + hash[i] = be32_to_cpu(in[i]); + else + for (i = 0; i < d; i++) + hash[i] = le32_to_cpu(in[i]); +} + +static int omap_sham_hw_init(struct omap_sham_dev *dd) +{ + pm_runtime_get_sync(dd->dev); + + if (!test_bit(FLAGS_INIT, &dd->flags)) { + set_bit(FLAGS_INIT, &dd->flags); + dd->err = 0; + } + + return 0; +} + +static void omap_sham_write_ctrl_omap2(struct omap_sham_dev *dd, size_t length, + int final, int dma) +{ + struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); + u32 val = length << 5, mask; + + if (likely(ctx->digcnt)) + omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt); + + omap_sham_write_mask(dd, SHA_REG_MASK(dd), + SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0), + SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN); + /* + * Setting ALGO_CONST only for the first iteration + * and CLOSE_HASH only for the last one. + */ + if ((ctx->flags & FLAGS_MODE_MASK) == FLAGS_MODE_SHA1) + val |= SHA_REG_CTRL_ALGO; + if (!ctx->digcnt) + val |= SHA_REG_CTRL_ALGO_CONST; + if (final) + val |= SHA_REG_CTRL_CLOSE_HASH; + + mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH | + SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH; + + omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask); +} + +static void omap_sham_trigger_omap2(struct omap_sham_dev *dd, size_t length) +{ +} + +static int omap_sham_poll_irq_omap2(struct omap_sham_dev *dd) +{ + return omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY); +} + +static int get_block_size(struct omap_sham_reqctx *ctx) +{ + int d; + + switch (ctx->flags & FLAGS_MODE_MASK) { + case FLAGS_MODE_MD5: + case FLAGS_MODE_SHA1: + d = SHA1_BLOCK_SIZE; + break; + case FLAGS_MODE_SHA224: + case FLAGS_MODE_SHA256: + d = SHA256_BLOCK_SIZE; + break; + case FLAGS_MODE_SHA384: + case FLAGS_MODE_SHA512: + d = SHA512_BLOCK_SIZE; + break; + default: + d = 0; + } + + return d; +} + +static void omap_sham_write_n(struct omap_sham_dev *dd, u32 offset, + u32 *value, int count) +{ + for (; count--; value++, offset += 4) + omap_sham_write(dd, offset, *value); +} + +static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length, + int final, int dma) +{ + struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); + u32 val, mask; + + /* + * Setting ALGO_CONST only for the first iteration and + * CLOSE_HASH only for the last one. Note that flags mode bits + * correspond to algorithm encoding in mode register. + */ + val = (ctx->flags & FLAGS_MODE_MASK) >> (FLAGS_MODE_SHIFT); + if (!ctx->digcnt) { + struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req); + struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); + struct omap_sham_hmac_ctx *bctx = tctx->base; + int bs, nr_dr; + + val |= SHA_REG_MODE_ALGO_CONSTANT; + + if (ctx->flags & BIT(FLAGS_HMAC)) { + bs = get_block_size(ctx); + nr_dr = bs / (2 * sizeof(u32)); + val |= SHA_REG_MODE_HMAC_KEY_PROC; + omap_sham_write_n(dd, SHA_REG_ODIGEST(dd, 0), + (u32 *)bctx->ipad, nr_dr); + omap_sham_write_n(dd, SHA_REG_IDIGEST(dd, 0), + (u32 *)bctx->ipad + nr_dr, nr_dr); + ctx->digcnt += bs; + } + } + + if (final) { + val |= SHA_REG_MODE_CLOSE_HASH; + + if (ctx->flags & BIT(FLAGS_HMAC)) + val |= SHA_REG_MODE_HMAC_OUTER_HASH; + } + + mask = SHA_REG_MODE_ALGO_CONSTANT | SHA_REG_MODE_CLOSE_HASH | + SHA_REG_MODE_ALGO_MASK | SHA_REG_MODE_HMAC_OUTER_HASH | + SHA_REG_MODE_HMAC_KEY_PROC; + + dev_dbg(dd->dev, "ctrl: %08x, flags: %08lx\n", val, ctx->flags); + omap_sham_write_mask(dd, SHA_REG_MODE(dd), val, mask); + omap_sham_write(dd, SHA_REG_IRQENA, SHA_REG_IRQENA_OUTPUT_RDY); + omap_sham_write_mask(dd, SHA_REG_MASK(dd), + SHA_REG_MASK_IT_EN | + (dma ? SHA_REG_MASK_DMA_EN : 0), + SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN); +} + +static void omap_sham_trigger_omap4(struct omap_sham_dev *dd, size_t length) +{ + omap_sham_write(dd, SHA_REG_LENGTH(dd), length); +} + +static int omap_sham_poll_irq_omap4(struct omap_sham_dev *dd) +{ + return omap_sham_wait(dd, SHA_REG_IRQSTATUS, + SHA_REG_IRQSTATUS_INPUT_RDY); +} + +static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf, + size_t length, int final) +{ + struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); + int count, len32, bs32, offset = 0; + const u32 *buffer = (const u32 *)buf; + + dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n", + ctx->digcnt, length, final); + + dd->pdata->write_ctrl(dd, length, final, 0); + dd->pdata->trigger(dd, length); + + /* should be non-zero before next lines to disable clocks later */ + ctx->digcnt += length; + + if (final) + set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */ + + set_bit(FLAGS_CPU, &dd->flags); + + len32 = DIV_ROUND_UP(length, sizeof(u32)); + bs32 = get_block_size(ctx) / sizeof(u32); + + while (len32) { + if (dd->pdata->poll_irq(dd)) + return -ETIMEDOUT; + + for (count = 0; count < min(len32, bs32); count++, offset++) + omap_sham_write(dd, SHA_REG_DIN(dd, count), + buffer[offset]); + len32 -= min(len32, bs32); + } + + return -EINPROGRESS; +} + +static void omap_sham_dma_callback(void *param) +{ + struct omap_sham_dev *dd = param; + + set_bit(FLAGS_DMA_READY, &dd->flags); + tasklet_schedule(&dd->done_task); +} + +static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, + size_t length, int final, int is_sg) +{ + struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); + struct dma_async_tx_descriptor *tx; + struct dma_slave_config cfg; + int len32, ret, dma_min = get_block_size(ctx); + + dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n", + ctx->digcnt, length, final); + + memset(&cfg, 0, sizeof(cfg)); + + cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0); + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.dst_maxburst = dma_min / DMA_SLAVE_BUSWIDTH_4_BYTES; + + ret = dmaengine_slave_config(dd->dma_lch, &cfg); + if (ret) { + pr_err("omap-sham: can't configure dmaengine slave: %d\n", ret); + return ret; + } + + len32 = DIV_ROUND_UP(length, dma_min) * dma_min; + + if (is_sg) { + /* + * The SG entry passed in may not have the 'length' member + * set correctly so use a local SG entry (sgl) with the + * proper value for 'length' instead. If this is not done, + * the dmaengine may try to DMA the incorrect amount of data. + */ + sg_init_table(&ctx->sgl, 1); + ctx->sgl.page_link = ctx->sg->page_link; + ctx->sgl.offset = ctx->sg->offset; + sg_dma_len(&ctx->sgl) = len32; + sg_dma_address(&ctx->sgl) = sg_dma_address(ctx->sg); + + tx = dmaengine_prep_slave_sg(dd->dma_lch, &ctx->sgl, 1, + DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + } else { + tx = dmaengine_prep_slave_single(dd->dma_lch, dma_addr, len32, + DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + } + + if (!tx) { + dev_err(dd->dev, "prep_slave_sg/single() failed\n"); + return -EINVAL; + } + + tx->callback = omap_sham_dma_callback; + tx->callback_param = dd; + + dd->pdata->write_ctrl(dd, length, final, 1); + + ctx->digcnt += length; + + if (final) + set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */ + + set_bit(FLAGS_DMA_ACTIVE, &dd->flags); + + dmaengine_submit(tx); + dma_async_issue_pending(dd->dma_lch); + + dd->pdata->trigger(dd, length); + + return -EINPROGRESS; +} + +static size_t omap_sham_append_buffer(struct omap_sham_reqctx *ctx, + const u8 *data, size_t length) +{ + size_t count = min(length, ctx->buflen - ctx->bufcnt); + + count = min(count, ctx->total); + if (count <= 0) + return 0; + memcpy(ctx->buffer + ctx->bufcnt, data, count); + ctx->bufcnt += count; + + return count; +} + +static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx) +{ + size_t count; + const u8 *vaddr; + + while (ctx->sg) { + vaddr = kmap_atomic(sg_page(ctx->sg)); + vaddr += ctx->sg->offset; + + count = omap_sham_append_buffer(ctx, + vaddr + ctx->offset, + ctx->sg->length - ctx->offset); + + kunmap_atomic((void *)vaddr); + + if (!count) + break; + ctx->offset += count; + ctx->total -= count; + if (ctx->offset == ctx->sg->length) { + ctx->sg = sg_next(ctx->sg); + if (ctx->sg) + ctx->offset = 0; + else + ctx->total = 0; + } + } + + return 0; +} + +static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd, + struct omap_sham_reqctx *ctx, + size_t length, int final) +{ + int ret; + + ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen, + DMA_TO_DEVICE); + if (dma_mapping_error(dd->dev, ctx->dma_addr)) { + dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen); + return -EINVAL; + } + + ctx->flags &= ~BIT(FLAGS_SG); + + ret = omap_sham_xmit_dma(dd, ctx->dma_addr, length, final, 0); + if (ret != -EINPROGRESS) + dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen, + DMA_TO_DEVICE); + + return ret; +} + +static int omap_sham_update_dma_slow(struct omap_sham_dev *dd) +{ + struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); + unsigned int final; + size_t count; + + omap_sham_append_sg(ctx); + + final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total; + + dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n", + ctx->bufcnt, ctx->digcnt, final); + + if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) { + count = ctx->bufcnt; + ctx->bufcnt = 0; + return omap_sham_xmit_dma_map(dd, ctx, count, final); + } + + return 0; +} + +/* Start address alignment */ +#define SG_AA(sg) (IS_ALIGNED(sg->offset, sizeof(u32))) +/* SHA1 block size alignment */ +#define SG_SA(sg, bs) (IS_ALIGNED(sg->length, bs)) + +static int omap_sham_update_dma_start(struct omap_sham_dev *dd) +{ + struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); + unsigned int length, final, tail; + struct scatterlist *sg; + int ret, bs; + + if (!ctx->total) + return 0; + + if (ctx->bufcnt || ctx->offset) + return omap_sham_update_dma_slow(dd); + + /* + * Don't use the sg interface when the transfer size is less + * than the number of elements in a DMA frame. Otherwise, + * the dmaengine infrastructure will calculate that it needs + * to transfer 0 frames which ultimately fails. + */ + if (ctx->total < get_block_size(ctx)) + return omap_sham_update_dma_slow(dd); + + dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n", + ctx->digcnt, ctx->bufcnt, ctx->total); + + sg = ctx->sg; + bs = get_block_size(ctx); + + if (!SG_AA(sg)) + return omap_sham_update_dma_slow(dd); + + if (!sg_is_last(sg) && !SG_SA(sg, bs)) + /* size is not BLOCK_SIZE aligned */ + return omap_sham_update_dma_slow(dd); + + length = min(ctx->total, sg->length); + + if (sg_is_last(sg)) { + if (!(ctx->flags & BIT(FLAGS_FINUP))) { + /* not last sg must be BLOCK_SIZE aligned */ + tail = length & (bs - 1); + /* without finup() we need one block to close hash */ + if (!tail) + tail = bs; + length -= tail; + } + } + + if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) { + dev_err(dd->dev, "dma_map_sg error\n"); + return -EINVAL; + } + + ctx->flags |= BIT(FLAGS_SG); + + ctx->total -= length; + ctx->offset = length; /* offset where to start slow */ + + final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total; + + ret = omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final, 1); + if (ret != -EINPROGRESS) + dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); + + return ret; +} + +static int omap_sham_update_cpu(struct omap_sham_dev *dd) +{ + struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); + int bufcnt, final; + + if (!ctx->total) + return 0; + + omap_sham_append_sg(ctx); + + final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total; + + dev_dbg(dd->dev, "cpu: bufcnt: %u, digcnt: %d, final: %d\n", + ctx->bufcnt, ctx->digcnt, final); + + if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) { + bufcnt = ctx->bufcnt; + ctx->bufcnt = 0; + return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, final); + } + + return 0; +} + +static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) +{ + struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); + + dmaengine_terminate_all(dd->dma_lch); + + if (ctx->flags & BIT(FLAGS_SG)) { + dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); + if (ctx->sg->length == ctx->offset) { + ctx->sg = sg_next(ctx->sg); + if (ctx->sg) + ctx->offset = 0; + } + } else { + dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen, + DMA_TO_DEVICE); + } + + return 0; +} + +static int omap_sham_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); + struct omap_sham_reqctx *ctx = ahash_request_ctx(req); + struct omap_sham_dev *dd = NULL, *tmp; + int bs = 0; + + spin_lock_bh(&sham.lock); + if (!tctx->dd) { + list_for_each_entry(tmp, &sham.dev_list, list) { + dd = tmp; + break; + } + tctx->dd = dd; + } else { + dd = tctx->dd; + } + spin_unlock_bh(&sham.lock); + + ctx->dd = dd; + + ctx->flags = 0; + + dev_dbg(dd->dev, "init: digest size: %d\n", + crypto_ahash_digestsize(tfm)); + + switch (crypto_ahash_digestsize(tfm)) { + case MD5_DIGEST_SIZE: + ctx->flags |= FLAGS_MODE_MD5; + bs = SHA1_BLOCK_SIZE; + break; + case SHA1_DIGEST_SIZE: + ctx->flags |= FLAGS_MODE_SHA1; + bs = SHA1_BLOCK_SIZE; + break; + case SHA224_DIGEST_SIZE: + ctx->flags |= FLAGS_MODE_SHA224; + bs = SHA224_BLOCK_SIZE; + break; + case SHA256_DIGEST_SIZE: + ctx->flags |= FLAGS_MODE_SHA256; + bs = SHA256_BLOCK_SIZE; + break; + case SHA384_DIGEST_SIZE: + ctx->flags |= FLAGS_MODE_SHA384; + bs = SHA384_BLOCK_SIZE; + break; + case SHA512_DIGEST_SIZE: + ctx->flags |= FLAGS_MODE_SHA512; + bs = SHA512_BLOCK_SIZE; + break; + } + + ctx->bufcnt = 0; + ctx->digcnt = 0; + ctx->buflen = BUFLEN; + + if (tctx->flags & BIT(FLAGS_HMAC)) { + if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) { + struct omap_sham_hmac_ctx *bctx = tctx->base; + + memcpy(ctx->buffer, bctx->ipad, bs); + ctx->bufcnt = bs; + } + + ctx->flags |= BIT(FLAGS_HMAC); + } + + return 0; + +} + +static int omap_sham_update_req(struct omap_sham_dev *dd) +{ + struct ahash_request *req = dd->req; + struct omap_sham_reqctx *ctx = ahash_request_ctx(req); + int err; + + dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n", + ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0); + + if (ctx->flags & BIT(FLAGS_CPU)) + err = omap_sham_update_cpu(dd); + else + err = omap_sham_update_dma_start(dd); + + /* wait for dma completion before can take more data */ + dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt); + + return err; +} + +static int omap_sham_final_req(struct omap_sham_dev *dd) +{ + struct ahash_request *req = dd->req; + struct omap_sham_reqctx *ctx = ahash_request_ctx(req); + int err = 0, use_dma = 1; + + if ((ctx->bufcnt <= get_block_size(ctx)) || dd->polling_mode) + /* + * faster to handle last block with cpu or + * use cpu when dma is not present. + */ + use_dma = 0; + + if (use_dma) + err = omap_sham_xmit_dma_map(dd, ctx, ctx->bufcnt, 1); + else + err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1); + + ctx->bufcnt = 0; + + dev_dbg(dd->dev, "final_req: err: %d\n", err); + + return err; +} + +static int omap_sham_finish_hmac(struct ahash_request *req) +{ + struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); + struct omap_sham_hmac_ctx *bctx = tctx->base; + int bs = crypto_shash_blocksize(bctx->shash); + int ds = crypto_shash_digestsize(bctx->shash); + SHASH_DESC_ON_STACK(shash, bctx->shash); + + shash->tfm = bctx->shash; + shash->flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */ + + return crypto_shash_init(shash) ?: + crypto_shash_update(shash, bctx->opad, bs) ?: + crypto_shash_finup(shash, req->result, ds, req->result); +} + +static int omap_sham_finish(struct ahash_request *req) +{ + struct omap_sham_reqctx *ctx = ahash_request_ctx(req); + struct omap_sham_dev *dd = ctx->dd; + int err = 0; + + if (ctx->digcnt) { + omap_sham_copy_ready_hash(req); + if ((ctx->flags & BIT(FLAGS_HMAC)) && + !test_bit(FLAGS_AUTO_XOR, &dd->flags)) + err = omap_sham_finish_hmac(req); + } + + dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt); + + return err; +} + +static void omap_sham_finish_req(struct ahash_request *req, int err) +{ + struct omap_sham_reqctx *ctx = ahash_request_ctx(req); + struct omap_sham_dev *dd = ctx->dd; + + if (!err) { + dd->pdata->copy_hash(req, 1); + if (test_bit(FLAGS_FINAL, &dd->flags)) + err = omap_sham_finish(req); + } else { + ctx->flags |= BIT(FLAGS_ERROR); + } + + /* atomic operation is not needed here */ + dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) | + BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY)); + + pm_runtime_put(dd->dev); + + if (req->base.complete) + req->base.complete(&req->base, err); + + /* handle new request */ + tasklet_schedule(&dd->done_task); +} + +static int omap_sham_handle_queue(struct omap_sham_dev *dd, + struct ahash_request *req) +{ + struct crypto_async_request *async_req, *backlog; + struct omap_sham_reqctx *ctx; + unsigned long flags; + int err = 0, ret = 0; + + spin_lock_irqsave(&dd->lock, flags); + if (req) + ret = ahash_enqueue_request(&dd->queue, req); + if (test_bit(FLAGS_BUSY, &dd->flags)) { + spin_unlock_irqrestore(&dd->lock, flags); + return ret; + } + backlog = crypto_get_backlog(&dd->queue); + async_req = crypto_dequeue_request(&dd->queue); + if (async_req) + set_bit(FLAGS_BUSY, &dd->flags); + spin_unlock_irqrestore(&dd->lock, flags); + + if (!async_req) + return ret; + + if (backlog) + backlog->complete(backlog, -EINPROGRESS); + + req = ahash_request_cast(async_req); + dd->req = req; + ctx = ahash_request_ctx(req); + + dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", + ctx->op, req->nbytes); + + err = omap_sham_hw_init(dd); + if (err) + goto err1; + + if (ctx->digcnt) + /* request has changed - restore hash */ + dd->pdata->copy_hash(req, 0); + + if (ctx->op == OP_UPDATE) { + err = omap_sham_update_req(dd); + if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP))) + /* no final() after finup() */ + err = omap_sham_final_req(dd); + } else if (ctx->op == OP_FINAL) { + err = omap_sham_final_req(dd); + } +err1: + if (err != -EINPROGRESS) + /* done_task will not finish it, so do it here */ + omap_sham_finish_req(req, err); + + dev_dbg(dd->dev, "exit, err: %d\n", err); + + return ret; +} + +static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) +{ + struct omap_sham_reqctx *ctx = ahash_request_ctx(req); + struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); + struct omap_sham_dev *dd = tctx->dd; + + ctx->op = op; + + return omap_sham_handle_queue(dd, req); +} + +static int omap_sham_update(struct ahash_request *req) +{ + struct omap_sham_reqctx *ctx = ahash_request_ctx(req); + struct omap_sham_dev *dd = ctx->dd; + int bs = get_block_size(ctx); + + if (!req->nbytes) + return 0; + + ctx->total = req->nbytes; + ctx->sg = req->src; + ctx->offset = 0; + + if (ctx->flags & BIT(FLAGS_FINUP)) { + if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) { + /* + * OMAP HW accel works only with buffers >= 9 + * will switch to bypass in final() + * final has the same request and data + */ + omap_sham_append_sg(ctx); + return 0; + } else if ((ctx->bufcnt + ctx->total <= bs) || + dd->polling_mode) { + /* + * faster to use CPU for short transfers or + * use cpu when dma is not present. + */ + ctx->flags |= BIT(FLAGS_CPU); + } + } else if (ctx->bufcnt + ctx->total < ctx->buflen) { + omap_sham_append_sg(ctx); + return 0; + } + + if (dd->polling_mode) + ctx->flags |= BIT(FLAGS_CPU); + + return omap_sham_enqueue(req, OP_UPDATE); +} + +static int omap_sham_shash_digest(struct crypto_shash *tfm, u32 flags, + const u8 *data, unsigned int len, u8 *out) +{ + SHASH_DESC_ON_STACK(shash, tfm); + + shash->tfm = tfm; + shash->flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP; + + return crypto_shash_digest(shash, data, len, out); +} + +static int omap_sham_final_shash(struct ahash_request *req) +{ + struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); + struct omap_sham_reqctx *ctx = ahash_request_ctx(req); + + return omap_sham_shash_digest(tctx->fallback, req->base.flags, + ctx->buffer, ctx->bufcnt, req->result); +} + +static int omap_sham_final(struct ahash_request *req) +{ + struct omap_sham_reqctx *ctx = ahash_request_ctx(req); + + ctx->flags |= BIT(FLAGS_FINUP); + + if (ctx->flags & BIT(FLAGS_ERROR)) + return 0; /* uncompleted hash is not needed */ + + /* OMAP HW accel works only with buffers >= 9 */ + /* HMAC is always >= 9 because ipad == block size */ + if ((ctx->digcnt + ctx->bufcnt) < 9) + return omap_sham_final_shash(req); + else if (ctx->bufcnt) + return omap_sham_enqueue(req, OP_FINAL); + + /* copy ready hash (+ finalize hmac) */ + return omap_sham_finish(req); +} + +static int omap_sham_finup(struct ahash_request *req) +{ + struct omap_sham_reqctx *ctx = ahash_request_ctx(req); + int err1, err2; + + ctx->flags |= BIT(FLAGS_FINUP); + + err1 = omap_sham_update(req); + if (err1 == -EINPROGRESS || err1 == -EBUSY) + return err1; + /* + * final() has to be always called to cleanup resources + * even if udpate() failed, except EINPROGRESS + */ + err2 = omap_sham_final(req); + + return err1 ?: err2; +} + +static int omap_sham_digest(struct ahash_request *req) +{ + return omap_sham_init(req) ?: omap_sham_finup(req); +} + +static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) +{ + struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); + struct omap_sham_hmac_ctx *bctx = tctx->base; + int bs = crypto_shash_blocksize(bctx->shash); + int ds = crypto_shash_digestsize(bctx->shash); + struct omap_sham_dev *dd = NULL, *tmp; + int err, i; + + spin_lock_bh(&sham.lock); + if (!tctx->dd) { + list_for_each_entry(tmp, &sham.dev_list, list) { + dd = tmp; + break; + } + tctx->dd = dd; + } else { + dd = tctx->dd; + } + spin_unlock_bh(&sham.lock); + + err = crypto_shash_setkey(tctx->fallback, key, keylen); + if (err) + return err; + + if (keylen > bs) { + err = omap_sham_shash_digest(bctx->shash, + crypto_shash_get_flags(bctx->shash), + key, keylen, bctx->ipad); + if (err) + return err; + keylen = ds; + } else { + memcpy(bctx->ipad, key, keylen); + } + + memset(bctx->ipad + keylen, 0, bs - keylen); + + if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) { + memcpy(bctx->opad, bctx->ipad, bs); + + for (i = 0; i < bs; i++) { + bctx->ipad[i] ^= 0x36; + bctx->opad[i] ^= 0x5c; + } + } + + return err; +} + +static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) +{ + struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm); + const char *alg_name = crypto_tfm_alg_name(tfm); + + /* Allocate a fallback and abort if it failed. */ + tctx->fallback = crypto_alloc_shash(alg_name, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(tctx->fallback)) { + pr_err("omap-sham: fallback driver '%s' " + "could not be loaded.\n", alg_name); + return PTR_ERR(tctx->fallback); + } + + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct omap_sham_reqctx) + BUFLEN); + + if (alg_base) { + struct omap_sham_hmac_ctx *bctx = tctx->base; + tctx->flags |= BIT(FLAGS_HMAC); + bctx->shash = crypto_alloc_shash(alg_base, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(bctx->shash)) { + pr_err("omap-sham: base driver '%s' " + "could not be loaded.\n", alg_base); + crypto_free_shash(tctx->fallback); + return PTR_ERR(bctx->shash); + } + + } + + return 0; +} + +static int omap_sham_cra_init(struct crypto_tfm *tfm) +{ + return omap_sham_cra_init_alg(tfm, NULL); +} + +static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm) +{ + return omap_sham_cra_init_alg(tfm, "sha1"); +} + +static int omap_sham_cra_sha224_init(struct crypto_tfm *tfm) +{ + return omap_sham_cra_init_alg(tfm, "sha224"); +} + +static int omap_sham_cra_sha256_init(struct crypto_tfm *tfm) +{ + return omap_sham_cra_init_alg(tfm, "sha256"); +} + +static int omap_sham_cra_md5_init(struct crypto_tfm *tfm) +{ + return omap_sham_cra_init_alg(tfm, "md5"); +} + +static int omap_sham_cra_sha384_init(struct crypto_tfm *tfm) +{ + return omap_sham_cra_init_alg(tfm, "sha384"); +} + +static int omap_sham_cra_sha512_init(struct crypto_tfm *tfm) +{ + return omap_sham_cra_init_alg(tfm, "sha512"); +} + +static void omap_sham_cra_exit(struct crypto_tfm *tfm) +{ + struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm); + + crypto_free_shash(tctx->fallback); + tctx->fallback = NULL; + + if (tctx->flags & BIT(FLAGS_HMAC)) { + struct omap_sham_hmac_ctx *bctx = tctx->base; + crypto_free_shash(bctx->shash); + } +} + +static struct ahash_alg algs_sha1_md5[] = { +{ + .init = omap_sham_init, + .update = omap_sham_update, + .final = omap_sham_final, + .finup = omap_sham_finup, + .digest = omap_sham_digest, + .halg.digestsize = SHA1_DIGEST_SIZE, + .halg.base = { + .cra_name = "sha1", + .cra_driver_name = "omap-sha1", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct omap_sham_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = omap_sham_cra_init, + .cra_exit = omap_sham_cra_exit, + } +}, +{ + .init = omap_sham_init, + .update = omap_sham_update, + .final = omap_sham_final, + .finup = omap_sham_finup, + .digest = omap_sham_digest, + .halg.digestsize = MD5_DIGEST_SIZE, + .halg.base = { + .cra_name = "md5", + .cra_driver_name = "omap-md5", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct omap_sham_ctx), + .cra_alignmask = OMAP_ALIGN_MASK, + .cra_module = THIS_MODULE, + .cra_init = omap_sham_cra_init, + .cra_exit = omap_sham_cra_exit, + } +}, +{ + .init = omap_sham_init, + .update = omap_sham_update, + .final = omap_sham_final, + .finup = omap_sham_finup, + .digest = omap_sham_digest, + .setkey = omap_sham_setkey, + .halg.digestsize = SHA1_DIGEST_SIZE, + .halg.base = { + .cra_name = "hmac(sha1)", + .cra_driver_name = "omap-hmac-sha1", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct omap_sham_ctx) + + sizeof(struct omap_sham_hmac_ctx), + .cra_alignmask = OMAP_ALIGN_MASK, + .cra_module = THIS_MODULE, + .cra_init = omap_sham_cra_sha1_init, + .cra_exit = omap_sham_cra_exit, + } +}, +{ + .init = omap_sham_init, + .update = omap_sham_update, + .final = omap_sham_final, + .finup = omap_sham_finup, + .digest = omap_sham_digest, + .setkey = omap_sham_setkey, + .halg.digestsize = MD5_DIGEST_SIZE, + .halg.base = { + .cra_name = "hmac(md5)", + .cra_driver_name = "omap-hmac-md5", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct omap_sham_ctx) + + sizeof(struct omap_sham_hmac_ctx), + .cra_alignmask = OMAP_ALIGN_MASK, + .cra_module = THIS_MODULE, + .cra_init = omap_sham_cra_md5_init, + .cra_exit = omap_sham_cra_exit, + } +} +}; + +/* OMAP4 has some algs in addition to what OMAP2 has */ +static struct ahash_alg algs_sha224_sha256[] = { +{ + .init = omap_sham_init, + .update = omap_sham_update, + .final = omap_sham_final, + .finup = omap_sham_finup, + .digest = omap_sham_digest, + .halg.digestsize = SHA224_DIGEST_SIZE, + .halg.base = { + .cra_name = "sha224", + .cra_driver_name = "omap-sha224", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA224_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct omap_sham_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = omap_sham_cra_init, + .cra_exit = omap_sham_cra_exit, + } +}, +{ + .init = omap_sham_init, + .update = omap_sham_update, + .final = omap_sham_final, + .finup = omap_sham_finup, + .digest = omap_sham_digest, + .halg.digestsize = SHA256_DIGEST_SIZE, + .halg.base = { + .cra_name = "sha256", + .cra_driver_name = "omap-sha256", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct omap_sham_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = omap_sham_cra_init, + .cra_exit = omap_sham_cra_exit, + } +}, +{ + .init = omap_sham_init, + .update = omap_sham_update, + .final = omap_sham_final, + .finup = omap_sham_finup, + .digest = omap_sham_digest, + .setkey = omap_sham_setkey, + .halg.digestsize = SHA224_DIGEST_SIZE, + .halg.base = { + .cra_name = "hmac(sha224)", + .cra_driver_name = "omap-hmac-sha224", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA224_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct omap_sham_ctx) + + sizeof(struct omap_sham_hmac_ctx), + .cra_alignmask = OMAP_ALIGN_MASK, + .cra_module = THIS_MODULE, + .cra_init = omap_sham_cra_sha224_init, + .cra_exit = omap_sham_cra_exit, + } +}, +{ + .init = omap_sham_init, + .update = omap_sham_update, + .final = omap_sham_final, + .finup = omap_sham_finup, + .digest = omap_sham_digest, + .setkey = omap_sham_setkey, + .halg.digestsize = SHA256_DIGEST_SIZE, + .halg.base = { + .cra_name = "hmac(sha256)", + .cra_driver_name = "omap-hmac-sha256", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct omap_sham_ctx) + + sizeof(struct omap_sham_hmac_ctx), + .cra_alignmask = OMAP_ALIGN_MASK, + .cra_module = THIS_MODULE, + .cra_init = omap_sham_cra_sha256_init, + .cra_exit = omap_sham_cra_exit, + } +}, +}; + +static struct ahash_alg algs_sha384_sha512[] = { +{ + .init = omap_sham_init, + .update = omap_sham_update, + .final = omap_sham_final, + .finup = omap_sham_finup, + .digest = omap_sham_digest, + .halg.digestsize = SHA384_DIGEST_SIZE, + .halg.base = { + .cra_name = "sha384", + .cra_driver_name = "omap-sha384", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA384_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct omap_sham_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = omap_sham_cra_init, + .cra_exit = omap_sham_cra_exit, + } +}, +{ + .init = omap_sham_init, + .update = omap_sham_update, + .final = omap_sham_final, + .finup = omap_sham_finup, + .digest = omap_sham_digest, + .halg.digestsize = SHA512_DIGEST_SIZE, + .halg.base = { + .cra_name = "sha512", + .cra_driver_name = "omap-sha512", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA512_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct omap_sham_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = omap_sham_cra_init, + .cra_exit = omap_sham_cra_exit, + } +}, +{ + .init = omap_sham_init, + .update = omap_sham_update, + .final = omap_sham_final, + .finup = omap_sham_finup, + .digest = omap_sham_digest, + .setkey = omap_sham_setkey, + .halg.digestsize = SHA384_DIGEST_SIZE, + .halg.base = { + .cra_name = "hmac(sha384)", + .cra_driver_name = "omap-hmac-sha384", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA384_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct omap_sham_ctx) + + sizeof(struct omap_sham_hmac_ctx), + .cra_alignmask = OMAP_ALIGN_MASK, + .cra_module = THIS_MODULE, + .cra_init = omap_sham_cra_sha384_init, + .cra_exit = omap_sham_cra_exit, + } +}, +{ + .init = omap_sham_init, + .update = omap_sham_update, + .final = omap_sham_final, + .finup = omap_sham_finup, + .digest = omap_sham_digest, + .setkey = omap_sham_setkey, + .halg.digestsize = SHA512_DIGEST_SIZE, + .halg.base = { + .cra_name = "hmac(sha512)", + .cra_driver_name = "omap-hmac-sha512", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA512_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct omap_sham_ctx) + + sizeof(struct omap_sham_hmac_ctx), + .cra_alignmask = OMAP_ALIGN_MASK, + .cra_module = THIS_MODULE, + .cra_init = omap_sham_cra_sha512_init, + .cra_exit = omap_sham_cra_exit, + } +}, +}; + +static void omap_sham_done_task(unsigned long data) +{ + struct omap_sham_dev *dd = (struct omap_sham_dev *)data; + int err = 0; + + if (!test_bit(FLAGS_BUSY, &dd->flags)) { + omap_sham_handle_queue(dd, NULL); + return; + } + + if (test_bit(FLAGS_CPU, &dd->flags)) { + if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) { + /* hash or semi-hash ready */ + err = omap_sham_update_cpu(dd); + if (err != -EINPROGRESS) + goto finish; + } + } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) { + if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) { + omap_sham_update_dma_stop(dd); + if (dd->err) { + err = dd->err; + goto finish; + } + } + if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) { + /* hash or semi-hash ready */ + clear_bit(FLAGS_DMA_READY, &dd->flags); + err = omap_sham_update_dma_start(dd); + if (err != -EINPROGRESS) + goto finish; + } + } + + return; + +finish: + dev_dbg(dd->dev, "update done: err: %d\n", err); + /* finish curent request */ + omap_sham_finish_req(dd->req, err); +} + +static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd) +{ + if (!test_bit(FLAGS_BUSY, &dd->flags)) { + dev_warn(dd->dev, "Interrupt when no active requests.\n"); + } else { + set_bit(FLAGS_OUTPUT_READY, &dd->flags); + tasklet_schedule(&dd->done_task); + } + + return IRQ_HANDLED; +} + +static irqreturn_t omap_sham_irq_omap2(int irq, void *dev_id) +{ + struct omap_sham_dev *dd = dev_id; + + if (unlikely(test_bit(FLAGS_FINAL, &dd->flags))) + /* final -> allow device to go to power-saving mode */ + omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH); + + omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY, + SHA_REG_CTRL_OUTPUT_READY); + omap_sham_read(dd, SHA_REG_CTRL); + + return omap_sham_irq_common(dd); +} + +static irqreturn_t omap_sham_irq_omap4(int irq, void *dev_id) +{ + struct omap_sham_dev *dd = dev_id; + + omap_sham_write_mask(dd, SHA_REG_MASK(dd), 0, SHA_REG_MASK_IT_EN); + + return omap_sham_irq_common(dd); +} + +static struct omap_sham_algs_info omap_sham_algs_info_omap2[] = { + { + .algs_list = algs_sha1_md5, + .size = ARRAY_SIZE(algs_sha1_md5), + }, +}; + +static const struct omap_sham_pdata omap_sham_pdata_omap2 = { + .algs_info = omap_sham_algs_info_omap2, + .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap2), + .flags = BIT(FLAGS_BE32_SHA1), + .digest_size = SHA1_DIGEST_SIZE, + .copy_hash = omap_sham_copy_hash_omap2, + .write_ctrl = omap_sham_write_ctrl_omap2, + .trigger = omap_sham_trigger_omap2, + .poll_irq = omap_sham_poll_irq_omap2, + .intr_hdlr = omap_sham_irq_omap2, + .idigest_ofs = 0x00, + .din_ofs = 0x1c, + .digcnt_ofs = 0x14, + .rev_ofs = 0x5c, + .mask_ofs = 0x60, + .sysstatus_ofs = 0x64, + .major_mask = 0xf0, + .major_shift = 4, + .minor_mask = 0x0f, + .minor_shift = 0, +}; + +#ifdef CONFIG_OF +static struct omap_sham_algs_info omap_sham_algs_info_omap4[] = { + { + .algs_list = algs_sha1_md5, + .size = ARRAY_SIZE(algs_sha1_md5), + }, + { + .algs_list = algs_sha224_sha256, + .size = ARRAY_SIZE(algs_sha224_sha256), + }, +}; + +static const struct omap_sham_pdata omap_sham_pdata_omap4 = { + .algs_info = omap_sham_algs_info_omap4, + .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap4), + .flags = BIT(FLAGS_AUTO_XOR), + .digest_size = SHA256_DIGEST_SIZE, + .copy_hash = omap_sham_copy_hash_omap4, + .write_ctrl = omap_sham_write_ctrl_omap4, + .trigger = omap_sham_trigger_omap4, + .poll_irq = omap_sham_poll_irq_omap4, + .intr_hdlr = omap_sham_irq_omap4, + .idigest_ofs = 0x020, + .odigest_ofs = 0x0, + .din_ofs = 0x080, + .digcnt_ofs = 0x040, + .rev_ofs = 0x100, + .mask_ofs = 0x110, + .sysstatus_ofs = 0x114, + .mode_ofs = 0x44, + .length_ofs = 0x48, + .major_mask = 0x0700, + .major_shift = 8, + .minor_mask = 0x003f, + .minor_shift = 0, +}; + +static struct omap_sham_algs_info omap_sham_algs_info_omap5[] = { + { + .algs_list = algs_sha1_md5, + .size = ARRAY_SIZE(algs_sha1_md5), + }, + { + .algs_list = algs_sha224_sha256, + .size = ARRAY_SIZE(algs_sha224_sha256), + }, + { + .algs_list = algs_sha384_sha512, + .size = ARRAY_SIZE(algs_sha384_sha512), + }, +}; + +static const struct omap_sham_pdata omap_sham_pdata_omap5 = { + .algs_info = omap_sham_algs_info_omap5, + .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap5), + .flags = BIT(FLAGS_AUTO_XOR), + .digest_size = SHA512_DIGEST_SIZE, + .copy_hash = omap_sham_copy_hash_omap4, + .write_ctrl = omap_sham_write_ctrl_omap4, + .trigger = omap_sham_trigger_omap4, + .poll_irq = omap_sham_poll_irq_omap4, + .intr_hdlr = omap_sham_irq_omap4, + .idigest_ofs = 0x240, + .odigest_ofs = 0x200, + .din_ofs = 0x080, + .digcnt_ofs = 0x280, + .rev_ofs = 0x100, + .mask_ofs = 0x110, + .sysstatus_ofs = 0x114, + .mode_ofs = 0x284, + .length_ofs = 0x288, + .major_mask = 0x0700, + .major_shift = 8, + .minor_mask = 0x003f, + .minor_shift = 0, +}; + +static const struct of_device_id omap_sham_of_match[] = { + { + .compatible = "ti,omap2-sham", + .data = &omap_sham_pdata_omap2, + }, + { + .compatible = "ti,omap4-sham", + .data = &omap_sham_pdata_omap4, + }, + { + .compatible = "ti,omap5-sham", + .data = &omap_sham_pdata_omap5, + }, + {}, +}; +MODULE_DEVICE_TABLE(of, omap_sham_of_match); + +static int omap_sham_get_res_of(struct omap_sham_dev *dd, + struct device *dev, struct resource *res) +{ + struct device_node *node = dev->of_node; + const struct of_device_id *match; + int err = 0; + + match = of_match_device(of_match_ptr(omap_sham_of_match), dev); + if (!match) { + dev_err(dev, "no compatible OF match\n"); + err = -EINVAL; + goto err; + } + + err = of_address_to_resource(node, 0, res); + if (err < 0) { + dev_err(dev, "can't translate OF node address\n"); + err = -EINVAL; + goto err; + } + + dd->irq = irq_of_parse_and_map(node, 0); + if (!dd->irq) { + dev_err(dev, "can't translate OF irq value\n"); + err = -EINVAL; + goto err; + } + + dd->dma = -1; /* Dummy value that's unused */ + dd->pdata = match->data; + +err: + return err; +} +#else +static const struct of_device_id omap_sham_of_match[] = { + {}, +}; + +static int omap_sham_get_res_of(struct omap_sham_dev *dd, + struct device *dev, struct resource *res) +{ + return -EINVAL; +} +#endif + +static int omap_sham_get_res_pdev(struct omap_sham_dev *dd, + struct platform_device *pdev, struct resource *res) +{ + struct device *dev = &pdev->dev; + struct resource *r; + int err = 0; + + /* Get the base address */ + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) { + dev_err(dev, "no MEM resource info\n"); + err = -ENODEV; + goto err; + } + memcpy(res, r, sizeof(*res)); + + /* Get the IRQ */ + dd->irq = platform_get_irq(pdev, 0); + if (dd->irq < 0) { + dev_err(dev, "no IRQ resource info\n"); + err = dd->irq; + goto err; + } + + /* Get the DMA */ + r = platform_get_resource(pdev, IORESOURCE_DMA, 0); + if (!r) { + dev_err(dev, "no DMA resource info\n"); + err = -ENODEV; + goto err; + } + dd->dma = r->start; + + /* Only OMAP2/3 can be non-DT */ + dd->pdata = &omap_sham_pdata_omap2; + +err: + return err; +} + +static int omap_sham_probe(struct platform_device *pdev) +{ + struct omap_sham_dev *dd; + struct device *dev = &pdev->dev; + struct resource res; + dma_cap_mask_t mask; + int err, i, j; + u32 rev; + + dd = devm_kzalloc(dev, sizeof(struct omap_sham_dev), GFP_KERNEL); + if (dd == NULL) { + dev_err(dev, "unable to alloc data struct.\n"); + err = -ENOMEM; + goto data_err; + } + dd->dev = dev; + platform_set_drvdata(pdev, dd); + + INIT_LIST_HEAD(&dd->list); + spin_lock_init(&dd->lock); + tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd); + crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH); + + err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) : + omap_sham_get_res_pdev(dd, pdev, &res); + if (err) + goto data_err; + + dd->io_base = devm_ioremap_resource(dev, &res); + if (IS_ERR(dd->io_base)) { + err = PTR_ERR(dd->io_base); + goto data_err; + } + dd->phys_base = res.start; + + err = devm_request_irq(dev, dd->irq, dd->pdata->intr_hdlr, + IRQF_TRIGGER_NONE, dev_name(dev), dd); + if (err) { + dev_err(dev, "unable to request irq %d, err = %d\n", + dd->irq, err); + goto data_err; + } + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + dd->dma_lch = dma_request_slave_channel_compat(mask, omap_dma_filter_fn, + &dd->dma, dev, "rx"); + if (!dd->dma_lch) { + dd->polling_mode = 1; + dev_dbg(dev, "using polling mode instead of dma\n"); + } + + dd->flags |= dd->pdata->flags; + + pm_runtime_enable(dev); + pm_runtime_irq_safe(dev); + pm_runtime_get_sync(dev); + rev = omap_sham_read(dd, SHA_REG_REV(dd)); + pm_runtime_put_sync(&pdev->dev); + + dev_info(dev, "hw accel on OMAP rev %u.%u\n", + (rev & dd->pdata->major_mask) >> dd->pdata->major_shift, + (rev & dd->pdata->minor_mask) >> dd->pdata->minor_shift); + + spin_lock(&sham.lock); + list_add_tail(&dd->list, &sham.dev_list); + spin_unlock(&sham.lock); + + for (i = 0; i < dd->pdata->algs_info_size; i++) { + for (j = 0; j < dd->pdata->algs_info[i].size; j++) { + err = crypto_register_ahash( + &dd->pdata->algs_info[i].algs_list[j]); + if (err) + goto err_algs; + + dd->pdata->algs_info[i].registered++; + } + } + + return 0; + +err_algs: + for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) + for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) + crypto_unregister_ahash( + &dd->pdata->algs_info[i].algs_list[j]); + pm_runtime_disable(dev); + if (dd->dma_lch) + dma_release_channel(dd->dma_lch); +data_err: + dev_err(dev, "initialization failed.\n"); + + return err; +} + +static int omap_sham_remove(struct platform_device *pdev) +{ + static struct omap_sham_dev *dd; + int i, j; + + dd = platform_get_drvdata(pdev); + if (!dd) + return -ENODEV; + spin_lock(&sham.lock); + list_del(&dd->list); + spin_unlock(&sham.lock); + for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) + for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) + crypto_unregister_ahash( + &dd->pdata->algs_info[i].algs_list[j]); + tasklet_kill(&dd->done_task); + pm_runtime_disable(&pdev->dev); + + if (dd->dma_lch) + dma_release_channel(dd->dma_lch); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int omap_sham_suspend(struct device *dev) +{ + pm_runtime_put_sync(dev); + return 0; +} + +static int omap_sham_resume(struct device *dev) +{ + pm_runtime_get_sync(dev); + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(omap_sham_pm_ops, omap_sham_suspend, omap_sham_resume); + +static struct platform_driver omap_sham_driver = { + .probe = omap_sham_probe, + .remove = omap_sham_remove, + .driver = { + .name = "omap-sham", + .pm = &omap_sham_pm_ops, + .of_match_table = omap_sham_of_match, + }, +}; + +module_platform_driver(omap_sham_driver); + +MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support."); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Dmitry Kasatkin"); +MODULE_ALIAS("platform:omap-sham"); diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c new file mode 100644 index 000000000..c178ed8c3 --- /dev/null +++ b/drivers/crypto/padlock-aes.c @@ -0,0 +1,566 @@ +/* + * Cryptographic API. + * + * Support for VIA PadLock hardware crypto engine. + * + * Copyright (c) 2004 Michal Ludvig <michal@logix.cz> + * + */ + +#include <crypto/algapi.h> +#include <crypto/aes.h> +#include <crypto/padlock.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/percpu.h> +#include <linux/smp.h> +#include <linux/slab.h> +#include <asm/cpu_device_id.h> +#include <asm/byteorder.h> +#include <asm/processor.h> +#include <asm/i387.h> + +/* + * Number of data blocks actually fetched for each xcrypt insn. + * Processors with prefetch errata will fetch extra blocks. + */ +static unsigned int ecb_fetch_blocks = 2; +#define MAX_ECB_FETCH_BLOCKS (8) +#define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE) + +static unsigned int cbc_fetch_blocks = 1; +#define MAX_CBC_FETCH_BLOCKS (4) +#define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE) + +/* Control word. */ +struct cword { + unsigned int __attribute__ ((__packed__)) + rounds:4, + algo:3, + keygen:1, + interm:1, + encdec:1, + ksize:2; +} __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); + +/* Whenever making any changes to the following + * structure *make sure* you keep E, d_data + * and cword aligned on 16 Bytes boundaries and + * the Hardware can access 16 * 16 bytes of E and d_data + * (only the first 15 * 16 bytes matter but the HW reads + * more). + */ +struct aes_ctx { + u32 E[AES_MAX_KEYLENGTH_U32] + __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); + u32 d_data[AES_MAX_KEYLENGTH_U32] + __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); + struct { + struct cword encrypt; + struct cword decrypt; + } cword; + u32 *D; +}; + +static DEFINE_PER_CPU(struct cword *, paes_last_cword); + +/* Tells whether the ACE is capable to generate + the extended key for a given key_len. */ +static inline int +aes_hw_extkey_available(uint8_t key_len) +{ + /* TODO: We should check the actual CPU model/stepping + as it's possible that the capability will be + added in the next CPU revisions. */ + if (key_len == 16) + return 1; + return 0; +} + +static inline struct aes_ctx *aes_ctx_common(void *ctx) +{ + unsigned long addr = (unsigned long)ctx; + unsigned long align = PADLOCK_ALIGNMENT; + + if (align <= crypto_tfm_ctx_alignment()) + align = 1; + return (struct aes_ctx *)ALIGN(addr, align); +} + +static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm) +{ + return aes_ctx_common(crypto_tfm_ctx(tfm)); +} + +static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm) +{ + return aes_ctx_common(crypto_blkcipher_ctx(tfm)); +} + +static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len) +{ + struct aes_ctx *ctx = aes_ctx(tfm); + const __le32 *key = (const __le32 *)in_key; + u32 *flags = &tfm->crt_flags; + struct crypto_aes_ctx gen_aes; + int cpu; + + if (key_len % 8) { + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + return -EINVAL; + } + + /* + * If the hardware is capable of generating the extended key + * itself we must supply the plain key for both encryption + * and decryption. + */ + ctx->D = ctx->E; + + ctx->E[0] = le32_to_cpu(key[0]); + ctx->E[1] = le32_to_cpu(key[1]); + ctx->E[2] = le32_to_cpu(key[2]); + ctx->E[3] = le32_to_cpu(key[3]); + + /* Prepare control words. */ + memset(&ctx->cword, 0, sizeof(ctx->cword)); + + ctx->cword.decrypt.encdec = 1; + ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4; + ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds; + ctx->cword.encrypt.ksize = (key_len - 16) / 8; + ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize; + + /* Don't generate extended keys if the hardware can do it. */ + if (aes_hw_extkey_available(key_len)) + goto ok; + + ctx->D = ctx->d_data; + ctx->cword.encrypt.keygen = 1; + ctx->cword.decrypt.keygen = 1; + + if (crypto_aes_expand_key(&gen_aes, in_key, key_len)) { + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + return -EINVAL; + } + + memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH); + memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH); + +ok: + for_each_online_cpu(cpu) + if (&ctx->cword.encrypt == per_cpu(paes_last_cword, cpu) || + &ctx->cword.decrypt == per_cpu(paes_last_cword, cpu)) + per_cpu(paes_last_cword, cpu) = NULL; + + return 0; +} + +/* ====== Encryption/decryption routines ====== */ + +/* These are the real call to PadLock. */ +static inline void padlock_reset_key(struct cword *cword) +{ + int cpu = raw_smp_processor_id(); + + if (cword != per_cpu(paes_last_cword, cpu)) +#ifndef CONFIG_X86_64 + asm volatile ("pushfl; popfl"); +#else + asm volatile ("pushfq; popfq"); +#endif +} + +static inline void padlock_store_cword(struct cword *cword) +{ + per_cpu(paes_last_cword, raw_smp_processor_id()) = cword; +} + +/* + * While the padlock instructions don't use FP/SSE registers, they + * generate a spurious DNA fault when cr0.ts is '1'. These instructions + * should be used only inside the irq_ts_save/restore() context + */ + +static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key, + struct cword *control_word, int count) +{ + asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ + : "+S"(input), "+D"(output) + : "d"(control_word), "b"(key), "c"(count)); +} + +static inline u8 *rep_xcrypt_cbc(const u8 *input, u8 *output, void *key, + u8 *iv, struct cword *control_word, int count) +{ + asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ + : "+S" (input), "+D" (output), "+a" (iv) + : "d" (control_word), "b" (key), "c" (count)); + return iv; +} + +static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key, + struct cword *cword, int count) +{ + /* + * Padlock prefetches extra data so we must provide mapped input buffers. + * Assume there are at least 16 bytes of stack already in use. + */ + u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1]; + u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); + + memcpy(tmp, in, count * AES_BLOCK_SIZE); + rep_xcrypt_ecb(tmp, out, key, cword, count); +} + +static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key, + u8 *iv, struct cword *cword, int count) +{ + /* + * Padlock prefetches extra data so we must provide mapped input buffers. + * Assume there are at least 16 bytes of stack already in use. + */ + u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1]; + u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); + + memcpy(tmp, in, count * AES_BLOCK_SIZE); + return rep_xcrypt_cbc(tmp, out, key, iv, cword, count); +} + +static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key, + struct cword *cword, int count) +{ + /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data. + * We could avoid some copying here but it's probably not worth it. + */ + if (unlikely(((unsigned long)in & ~PAGE_MASK) + ecb_fetch_bytes > PAGE_SIZE)) { + ecb_crypt_copy(in, out, key, cword, count); + return; + } + + rep_xcrypt_ecb(in, out, key, cword, count); +} + +static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key, + u8 *iv, struct cword *cword, int count) +{ + /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */ + if (unlikely(((unsigned long)in & ~PAGE_MASK) + cbc_fetch_bytes > PAGE_SIZE)) + return cbc_crypt_copy(in, out, key, iv, cword, count); + + return rep_xcrypt_cbc(in, out, key, iv, cword, count); +} + +static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, + void *control_word, u32 count) +{ + u32 initial = count & (ecb_fetch_blocks - 1); + + if (count < ecb_fetch_blocks) { + ecb_crypt(input, output, key, control_word, count); + return; + } + + if (initial) + asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ + : "+S"(input), "+D"(output) + : "d"(control_word), "b"(key), "c"(initial)); + + asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ + : "+S"(input), "+D"(output) + : "d"(control_word), "b"(key), "c"(count - initial)); +} + +static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, + u8 *iv, void *control_word, u32 count) +{ + u32 initial = count & (cbc_fetch_blocks - 1); + + if (count < cbc_fetch_blocks) + return cbc_crypt(input, output, key, iv, control_word, count); + + if (initial) + asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ + : "+S" (input), "+D" (output), "+a" (iv) + : "d" (control_word), "b" (key), "c" (initial)); + + asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ + : "+S" (input), "+D" (output), "+a" (iv) + : "d" (control_word), "b" (key), "c" (count-initial)); + return iv; +} + +static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + struct aes_ctx *ctx = aes_ctx(tfm); + int ts_state; + + padlock_reset_key(&ctx->cword.encrypt); + ts_state = irq_ts_save(); + ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1); + irq_ts_restore(ts_state); + padlock_store_cword(&ctx->cword.encrypt); +} + +static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + struct aes_ctx *ctx = aes_ctx(tfm); + int ts_state; + + padlock_reset_key(&ctx->cword.encrypt); + ts_state = irq_ts_save(); + ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1); + irq_ts_restore(ts_state); + padlock_store_cword(&ctx->cword.encrypt); +} + +static struct crypto_alg aes_alg = { + .cra_name = "aes", + .cra_driver_name = "aes-padlock", + .cra_priority = PADLOCK_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aes_ctx), + .cra_alignmask = PADLOCK_ALIGNMENT - 1, + .cra_module = THIS_MODULE, + .cra_u = { + .cipher = { + .cia_min_keysize = AES_MIN_KEY_SIZE, + .cia_max_keysize = AES_MAX_KEY_SIZE, + .cia_setkey = aes_set_key, + .cia_encrypt = aes_encrypt, + .cia_decrypt = aes_decrypt, + } + } +}; + +static int ecb_aes_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); + struct blkcipher_walk walk; + int err; + int ts_state; + + padlock_reset_key(&ctx->cword.encrypt); + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); + + ts_state = irq_ts_save(); + while ((nbytes = walk.nbytes)) { + padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, + ctx->E, &ctx->cword.encrypt, + nbytes / AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } + irq_ts_restore(ts_state); + + padlock_store_cword(&ctx->cword.encrypt); + + return err; +} + +static int ecb_aes_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); + struct blkcipher_walk walk; + int err; + int ts_state; + + padlock_reset_key(&ctx->cword.decrypt); + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); + + ts_state = irq_ts_save(); + while ((nbytes = walk.nbytes)) { + padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, + ctx->D, &ctx->cword.decrypt, + nbytes / AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } + irq_ts_restore(ts_state); + + padlock_store_cword(&ctx->cword.encrypt); + + return err; +} + +static struct crypto_alg ecb_aes_alg = { + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-padlock", + .cra_priority = PADLOCK_COMPOSITE_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aes_ctx), + .cra_alignmask = PADLOCK_ALIGNMENT - 1, + .cra_type = &crypto_blkcipher_type, + .cra_module = THIS_MODULE, + .cra_u = { + .blkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = aes_set_key, + .encrypt = ecb_aes_encrypt, + .decrypt = ecb_aes_decrypt, + } + } +}; + +static int cbc_aes_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); + struct blkcipher_walk walk; + int err; + int ts_state; + + padlock_reset_key(&ctx->cword.encrypt); + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); + + ts_state = irq_ts_save(); + while ((nbytes = walk.nbytes)) { + u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr, + walk.dst.virt.addr, ctx->E, + walk.iv, &ctx->cword.encrypt, + nbytes / AES_BLOCK_SIZE); + memcpy(walk.iv, iv, AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } + irq_ts_restore(ts_state); + + padlock_store_cword(&ctx->cword.decrypt); + + return err; +} + +static int cbc_aes_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); + struct blkcipher_walk walk; + int err; + int ts_state; + + padlock_reset_key(&ctx->cword.encrypt); + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); + + ts_state = irq_ts_save(); + while ((nbytes = walk.nbytes)) { + padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr, + ctx->D, walk.iv, &ctx->cword.decrypt, + nbytes / AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } + + irq_ts_restore(ts_state); + + padlock_store_cword(&ctx->cword.encrypt); + + return err; +} + +static struct crypto_alg cbc_aes_alg = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-padlock", + .cra_priority = PADLOCK_COMPOSITE_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aes_ctx), + .cra_alignmask = PADLOCK_ALIGNMENT - 1, + .cra_type = &crypto_blkcipher_type, + .cra_module = THIS_MODULE, + .cra_u = { + .blkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = aes_set_key, + .encrypt = cbc_aes_encrypt, + .decrypt = cbc_aes_decrypt, + } + } +}; + +static struct x86_cpu_id padlock_cpu_id[] = { + X86_FEATURE_MATCH(X86_FEATURE_XCRYPT), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, padlock_cpu_id); + +static int __init padlock_init(void) +{ + int ret; + struct cpuinfo_x86 *c = &cpu_data(0); + + if (!x86_match_cpu(padlock_cpu_id)) + return -ENODEV; + + if (!cpu_has_xcrypt_enabled) { + printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); + return -ENODEV; + } + + if ((ret = crypto_register_alg(&aes_alg))) + goto aes_err; + + if ((ret = crypto_register_alg(&ecb_aes_alg))) + goto ecb_aes_err; + + if ((ret = crypto_register_alg(&cbc_aes_alg))) + goto cbc_aes_err; + + printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); + + if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) { + ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS; + cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS; + printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n"); + } + +out: + return ret; + +cbc_aes_err: + crypto_unregister_alg(&ecb_aes_alg); +ecb_aes_err: + crypto_unregister_alg(&aes_alg); +aes_err: + printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n"); + goto out; +} + +static void __exit padlock_fini(void) +{ + crypto_unregister_alg(&cbc_aes_alg); + crypto_unregister_alg(&ecb_aes_alg); + crypto_unregister_alg(&aes_alg); +} + +module_init(padlock_init); +module_exit(padlock_fini); + +MODULE_DESCRIPTION("VIA PadLock AES algorithm support"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Michal Ludvig"); + +MODULE_ALIAS_CRYPTO("aes"); diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c new file mode 100644 index 000000000..95f7d27ce --- /dev/null +++ b/drivers/crypto/padlock-sha.c @@ -0,0 +1,599 @@ +/* + * Cryptographic API. + * + * Support for VIA PadLock hardware crypto engine. + * + * Copyright (c) 2006 Michal Ludvig <michal@logix.cz> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include <crypto/internal/hash.h> +#include <crypto/padlock.h> +#include <crypto/sha.h> +#include <linux/err.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/scatterlist.h> +#include <asm/cpu_device_id.h> +#include <asm/i387.h> + +struct padlock_sha_desc { + struct shash_desc fallback; +}; + +struct padlock_sha_ctx { + struct crypto_shash *fallback; +}; + +static int padlock_sha_init(struct shash_desc *desc) +{ + struct padlock_sha_desc *dctx = shash_desc_ctx(desc); + struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm); + + dctx->fallback.tfm = ctx->fallback; + dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; + return crypto_shash_init(&dctx->fallback); +} + +static int padlock_sha_update(struct shash_desc *desc, + const u8 *data, unsigned int length) +{ + struct padlock_sha_desc *dctx = shash_desc_ctx(desc); + + dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; + return crypto_shash_update(&dctx->fallback, data, length); +} + +static int padlock_sha_export(struct shash_desc *desc, void *out) +{ + struct padlock_sha_desc *dctx = shash_desc_ctx(desc); + + return crypto_shash_export(&dctx->fallback, out); +} + +static int padlock_sha_import(struct shash_desc *desc, const void *in) +{ + struct padlock_sha_desc *dctx = shash_desc_ctx(desc); + struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm); + + dctx->fallback.tfm = ctx->fallback; + dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; + return crypto_shash_import(&dctx->fallback, in); +} + +static inline void padlock_output_block(uint32_t *src, + uint32_t *dst, size_t count) +{ + while (count--) + *dst++ = swab32(*src++); +} + +static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in, + unsigned int count, u8 *out) +{ + /* We can't store directly to *out as it may be unaligned. */ + /* BTW Don't reduce the buffer size below 128 Bytes! + * PadLock microcode needs it that big. */ + char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ + ((aligned(STACK_ALIGN))); + char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); + struct padlock_sha_desc *dctx = shash_desc_ctx(desc); + struct sha1_state state; + unsigned int space; + unsigned int leftover; + int ts_state; + int err; + + dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; + err = crypto_shash_export(&dctx->fallback, &state); + if (err) + goto out; + + if (state.count + count > ULONG_MAX) + return crypto_shash_finup(&dctx->fallback, in, count, out); + + leftover = ((state.count - 1) & (SHA1_BLOCK_SIZE - 1)) + 1; + space = SHA1_BLOCK_SIZE - leftover; + if (space) { + if (count > space) { + err = crypto_shash_update(&dctx->fallback, in, space) ?: + crypto_shash_export(&dctx->fallback, &state); + if (err) + goto out; + count -= space; + in += space; + } else { + memcpy(state.buffer + leftover, in, count); + in = state.buffer; + count += leftover; + state.count &= ~(SHA1_BLOCK_SIZE - 1); + } + } + + memcpy(result, &state.state, SHA1_DIGEST_SIZE); + + /* prevent taking the spurious DNA fault with padlock. */ + ts_state = irq_ts_save(); + asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */ + : \ + : "c"((unsigned long)state.count + count), \ + "a"((unsigned long)state.count), \ + "S"(in), "D"(result)); + irq_ts_restore(ts_state); + + padlock_output_block((uint32_t *)result, (uint32_t *)out, 5); + +out: + return err; +} + +static int padlock_sha1_final(struct shash_desc *desc, u8 *out) +{ + u8 buf[4]; + + return padlock_sha1_finup(desc, buf, 0, out); +} + +static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in, + unsigned int count, u8 *out) +{ + /* We can't store directly to *out as it may be unaligned. */ + /* BTW Don't reduce the buffer size below 128 Bytes! + * PadLock microcode needs it that big. */ + char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ + ((aligned(STACK_ALIGN))); + char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); + struct padlock_sha_desc *dctx = shash_desc_ctx(desc); + struct sha256_state state; + unsigned int space; + unsigned int leftover; + int ts_state; + int err; + + dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; + err = crypto_shash_export(&dctx->fallback, &state); + if (err) + goto out; + + if (state.count + count > ULONG_MAX) + return crypto_shash_finup(&dctx->fallback, in, count, out); + + leftover = ((state.count - 1) & (SHA256_BLOCK_SIZE - 1)) + 1; + space = SHA256_BLOCK_SIZE - leftover; + if (space) { + if (count > space) { + err = crypto_shash_update(&dctx->fallback, in, space) ?: + crypto_shash_export(&dctx->fallback, &state); + if (err) + goto out; + count -= space; + in += space; + } else { + memcpy(state.buf + leftover, in, count); + in = state.buf; + count += leftover; + state.count &= ~(SHA1_BLOCK_SIZE - 1); + } + } + + memcpy(result, &state.state, SHA256_DIGEST_SIZE); + + /* prevent taking the spurious DNA fault with padlock. */ + ts_state = irq_ts_save(); + asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */ + : \ + : "c"((unsigned long)state.count + count), \ + "a"((unsigned long)state.count), \ + "S"(in), "D"(result)); + irq_ts_restore(ts_state); + + padlock_output_block((uint32_t *)result, (uint32_t *)out, 8); + +out: + return err; +} + +static int padlock_sha256_final(struct shash_desc *desc, u8 *out) +{ + u8 buf[4]; + + return padlock_sha256_finup(desc, buf, 0, out); +} + +static int padlock_cra_init(struct crypto_tfm *tfm) +{ + struct crypto_shash *hash = __crypto_shash_cast(tfm); + const char *fallback_driver_name = crypto_tfm_alg_name(tfm); + struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_shash *fallback_tfm; + int err = -ENOMEM; + + /* Allocate a fallback and abort if it failed. */ + fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback_tfm)) { + printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n", + fallback_driver_name); + err = PTR_ERR(fallback_tfm); + goto out; + } + + ctx->fallback = fallback_tfm; + hash->descsize += crypto_shash_descsize(fallback_tfm); + return 0; + +out: + return err; +} + +static void padlock_cra_exit(struct crypto_tfm *tfm) +{ + struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm); + + crypto_free_shash(ctx->fallback); +} + +static struct shash_alg sha1_alg = { + .digestsize = SHA1_DIGEST_SIZE, + .init = padlock_sha_init, + .update = padlock_sha_update, + .finup = padlock_sha1_finup, + .final = padlock_sha1_final, + .export = padlock_sha_export, + .import = padlock_sha_import, + .descsize = sizeof(struct padlock_sha_desc), + .statesize = sizeof(struct sha1_state), + .base = { + .cra_name = "sha1", + .cra_driver_name = "sha1-padlock", + .cra_priority = PADLOCK_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_SHASH | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct padlock_sha_ctx), + .cra_module = THIS_MODULE, + .cra_init = padlock_cra_init, + .cra_exit = padlock_cra_exit, + } +}; + +static struct shash_alg sha256_alg = { + .digestsize = SHA256_DIGEST_SIZE, + .init = padlock_sha_init, + .update = padlock_sha_update, + .finup = padlock_sha256_finup, + .final = padlock_sha256_final, + .export = padlock_sha_export, + .import = padlock_sha_import, + .descsize = sizeof(struct padlock_sha_desc), + .statesize = sizeof(struct sha256_state), + .base = { + .cra_name = "sha256", + .cra_driver_name = "sha256-padlock", + .cra_priority = PADLOCK_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_SHASH | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct padlock_sha_ctx), + .cra_module = THIS_MODULE, + .cra_init = padlock_cra_init, + .cra_exit = padlock_cra_exit, + } +}; + +/* Add two shash_alg instance for hardware-implemented * +* multiple-parts hash supported by VIA Nano Processor.*/ +static int padlock_sha1_init_nano(struct shash_desc *desc) +{ + struct sha1_state *sctx = shash_desc_ctx(desc); + + *sctx = (struct sha1_state){ + .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, + }; + + return 0; +} + +static int padlock_sha1_update_nano(struct shash_desc *desc, + const u8 *data, unsigned int len) +{ + struct sha1_state *sctx = shash_desc_ctx(desc); + unsigned int partial, done; + const u8 *src; + /*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/ + u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ + ((aligned(STACK_ALIGN))); + u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); + int ts_state; + + partial = sctx->count & 0x3f; + sctx->count += len; + done = 0; + src = data; + memcpy(dst, (u8 *)(sctx->state), SHA1_DIGEST_SIZE); + + if ((partial + len) >= SHA1_BLOCK_SIZE) { + + /* Append the bytes in state's buffer to a block to handle */ + if (partial) { + done = -partial; + memcpy(sctx->buffer + partial, data, + done + SHA1_BLOCK_SIZE); + src = sctx->buffer; + ts_state = irq_ts_save(); + asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" + : "+S"(src), "+D"(dst) \ + : "a"((long)-1), "c"((unsigned long)1)); + irq_ts_restore(ts_state); + done += SHA1_BLOCK_SIZE; + src = data + done; + } + + /* Process the left bytes from the input data */ + if (len - done >= SHA1_BLOCK_SIZE) { + ts_state = irq_ts_save(); + asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" + : "+S"(src), "+D"(dst) + : "a"((long)-1), + "c"((unsigned long)((len - done) / SHA1_BLOCK_SIZE))); + irq_ts_restore(ts_state); + done += ((len - done) - (len - done) % SHA1_BLOCK_SIZE); + src = data + done; + } + partial = 0; + } + memcpy((u8 *)(sctx->state), dst, SHA1_DIGEST_SIZE); + memcpy(sctx->buffer + partial, src, len - done); + + return 0; +} + +static int padlock_sha1_final_nano(struct shash_desc *desc, u8 *out) +{ + struct sha1_state *state = (struct sha1_state *)shash_desc_ctx(desc); + unsigned int partial, padlen; + __be64 bits; + static const u8 padding[64] = { 0x80, }; + + bits = cpu_to_be64(state->count << 3); + + /* Pad out to 56 mod 64 */ + partial = state->count & 0x3f; + padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial); + padlock_sha1_update_nano(desc, padding, padlen); + + /* Append length field bytes */ + padlock_sha1_update_nano(desc, (const u8 *)&bits, sizeof(bits)); + + /* Swap to output */ + padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 5); + + return 0; +} + +static int padlock_sha256_init_nano(struct shash_desc *desc) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + + *sctx = (struct sha256_state){ + .state = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, \ + SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7}, + }; + + return 0; +} + +static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + unsigned int partial, done; + const u8 *src; + /*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/ + u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ + ((aligned(STACK_ALIGN))); + u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); + int ts_state; + + partial = sctx->count & 0x3f; + sctx->count += len; + done = 0; + src = data; + memcpy(dst, (u8 *)(sctx->state), SHA256_DIGEST_SIZE); + + if ((partial + len) >= SHA256_BLOCK_SIZE) { + + /* Append the bytes in state's buffer to a block to handle */ + if (partial) { + done = -partial; + memcpy(sctx->buf + partial, data, + done + SHA256_BLOCK_SIZE); + src = sctx->buf; + ts_state = irq_ts_save(); + asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" + : "+S"(src), "+D"(dst) + : "a"((long)-1), "c"((unsigned long)1)); + irq_ts_restore(ts_state); + done += SHA256_BLOCK_SIZE; + src = data + done; + } + + /* Process the left bytes from input data*/ + if (len - done >= SHA256_BLOCK_SIZE) { + ts_state = irq_ts_save(); + asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" + : "+S"(src), "+D"(dst) + : "a"((long)-1), + "c"((unsigned long)((len - done) / 64))); + irq_ts_restore(ts_state); + done += ((len - done) - (len - done) % 64); + src = data + done; + } + partial = 0; + } + memcpy((u8 *)(sctx->state), dst, SHA256_DIGEST_SIZE); + memcpy(sctx->buf + partial, src, len - done); + + return 0; +} + +static int padlock_sha256_final_nano(struct shash_desc *desc, u8 *out) +{ + struct sha256_state *state = + (struct sha256_state *)shash_desc_ctx(desc); + unsigned int partial, padlen; + __be64 bits; + static const u8 padding[64] = { 0x80, }; + + bits = cpu_to_be64(state->count << 3); + + /* Pad out to 56 mod 64 */ + partial = state->count & 0x3f; + padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial); + padlock_sha256_update_nano(desc, padding, padlen); + + /* Append length field bytes */ + padlock_sha256_update_nano(desc, (const u8 *)&bits, sizeof(bits)); + + /* Swap to output */ + padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 8); + + return 0; +} + +static int padlock_sha_export_nano(struct shash_desc *desc, + void *out) +{ + int statesize = crypto_shash_statesize(desc->tfm); + void *sctx = shash_desc_ctx(desc); + + memcpy(out, sctx, statesize); + return 0; +} + +static int padlock_sha_import_nano(struct shash_desc *desc, + const void *in) +{ + int statesize = crypto_shash_statesize(desc->tfm); + void *sctx = shash_desc_ctx(desc); + + memcpy(sctx, in, statesize); + return 0; +} + +static struct shash_alg sha1_alg_nano = { + .digestsize = SHA1_DIGEST_SIZE, + .init = padlock_sha1_init_nano, + .update = padlock_sha1_update_nano, + .final = padlock_sha1_final_nano, + .export = padlock_sha_export_nano, + .import = padlock_sha_import_nano, + .descsize = sizeof(struct sha1_state), + .statesize = sizeof(struct sha1_state), + .base = { + .cra_name = "sha1", + .cra_driver_name = "sha1-padlock-nano", + .cra_priority = PADLOCK_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +}; + +static struct shash_alg sha256_alg_nano = { + .digestsize = SHA256_DIGEST_SIZE, + .init = padlock_sha256_init_nano, + .update = padlock_sha256_update_nano, + .final = padlock_sha256_final_nano, + .export = padlock_sha_export_nano, + .import = padlock_sha_import_nano, + .descsize = sizeof(struct sha256_state), + .statesize = sizeof(struct sha256_state), + .base = { + .cra_name = "sha256", + .cra_driver_name = "sha256-padlock-nano", + .cra_priority = PADLOCK_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +}; + +static struct x86_cpu_id padlock_sha_ids[] = { + X86_FEATURE_MATCH(X86_FEATURE_PHE), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, padlock_sha_ids); + +static int __init padlock_init(void) +{ + int rc = -ENODEV; + struct cpuinfo_x86 *c = &cpu_data(0); + struct shash_alg *sha1; + struct shash_alg *sha256; + + if (!x86_match_cpu(padlock_sha_ids) || !cpu_has_phe_enabled) + return -ENODEV; + + /* Register the newly added algorithm module if on * + * VIA Nano processor, or else just do as before */ + if (c->x86_model < 0x0f) { + sha1 = &sha1_alg; + sha256 = &sha256_alg; + } else { + sha1 = &sha1_alg_nano; + sha256 = &sha256_alg_nano; + } + + rc = crypto_register_shash(sha1); + if (rc) + goto out; + + rc = crypto_register_shash(sha256); + if (rc) + goto out_unreg1; + + printk(KERN_NOTICE PFX "Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n"); + + return 0; + +out_unreg1: + crypto_unregister_shash(sha1); + +out: + printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n"); + return rc; +} + +static void __exit padlock_fini(void) +{ + struct cpuinfo_x86 *c = &cpu_data(0); + + if (c->x86_model >= 0x0f) { + crypto_unregister_shash(&sha1_alg_nano); + crypto_unregister_shash(&sha256_alg_nano); + } else { + crypto_unregister_shash(&sha1_alg); + crypto_unregister_shash(&sha256_alg); + } +} + +module_init(padlock_init); +module_exit(padlock_fini); + +MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support."); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Michal Ludvig"); + +MODULE_ALIAS_CRYPTO("sha1-all"); +MODULE_ALIAS_CRYPTO("sha256-all"); +MODULE_ALIAS_CRYPTO("sha1-padlock"); +MODULE_ALIAS_CRYPTO("sha256-padlock"); diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c new file mode 100644 index 000000000..5da5b98b8 --- /dev/null +++ b/drivers/crypto/picoxcell_crypto.c @@ -0,0 +1,1861 @@ +/* + * Copyright (c) 2010-2011 Picochip Ltd., Jamie Iles + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#include <crypto/aead.h> +#include <crypto/aes.h> +#include <crypto/algapi.h> +#include <crypto/authenc.h> +#include <crypto/des.h> +#include <crypto/md5.h> +#include <crypto/sha.h> +#include <crypto/internal/skcipher.h> +#include <linux/clk.h> +#include <linux/crypto.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/dmapool.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm.h> +#include <linux/rtnetlink.h> +#include <linux/scatterlist.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/timer.h> + +#include "picoxcell_crypto_regs.h" + +/* + * The threshold for the number of entries in the CMD FIFO available before + * the CMD0_CNT interrupt is raised. Increasing this value will reduce the + * number of interrupts raised to the CPU. + */ +#define CMD0_IRQ_THRESHOLD 1 + +/* + * The timeout period (in jiffies) for a PDU. When the the number of PDUs in + * flight is greater than the STAT_IRQ_THRESHOLD or 0 the timer is disabled. + * When there are packets in flight but lower than the threshold, we enable + * the timer and at expiry, attempt to remove any processed packets from the + * queue and if there are still packets left, schedule the timer again. + */ +#define PACKET_TIMEOUT 1 + +/* The priority to register each algorithm with. */ +#define SPACC_CRYPTO_ALG_PRIORITY 10000 + +#define SPACC_CRYPTO_KASUMI_F8_KEY_LEN 16 +#define SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ 64 +#define SPACC_CRYPTO_IPSEC_HASH_PG_SZ 64 +#define SPACC_CRYPTO_IPSEC_MAX_CTXS 32 +#define SPACC_CRYPTO_IPSEC_FIFO_SZ 32 +#define SPACC_CRYPTO_L2_CIPHER_PG_SZ 64 +#define SPACC_CRYPTO_L2_HASH_PG_SZ 64 +#define SPACC_CRYPTO_L2_MAX_CTXS 128 +#define SPACC_CRYPTO_L2_FIFO_SZ 128 + +#define MAX_DDT_LEN 16 + +/* DDT format. This must match the hardware DDT format exactly. */ +struct spacc_ddt { + dma_addr_t p; + u32 len; +}; + +/* + * Asynchronous crypto request structure. + * + * This structure defines a request that is either queued for processing or + * being processed. + */ +struct spacc_req { + struct list_head list; + struct spacc_engine *engine; + struct crypto_async_request *req; + int result; + bool is_encrypt; + unsigned ctx_id; + dma_addr_t src_addr, dst_addr; + struct spacc_ddt *src_ddt, *dst_ddt; + void (*complete)(struct spacc_req *req); + + /* AEAD specific bits. */ + u8 *giv; + size_t giv_len; + dma_addr_t giv_pa; +}; + +struct spacc_engine { + void __iomem *regs; + struct list_head pending; + int next_ctx; + spinlock_t hw_lock; + int in_flight; + struct list_head completed; + struct list_head in_progress; + struct tasklet_struct complete; + unsigned long fifo_sz; + void __iomem *cipher_ctx_base; + void __iomem *hash_key_base; + struct spacc_alg *algs; + unsigned num_algs; + struct list_head registered_algs; + size_t cipher_pg_sz; + size_t hash_pg_sz; + const char *name; + struct clk *clk; + struct device *dev; + unsigned max_ctxs; + struct timer_list packet_timeout; + unsigned stat_irq_thresh; + struct dma_pool *req_pool; +}; + +/* Algorithm type mask. */ +#define SPACC_CRYPTO_ALG_MASK 0x7 + +/* SPACC definition of a crypto algorithm. */ +struct spacc_alg { + unsigned long ctrl_default; + unsigned long type; + struct crypto_alg alg; + struct spacc_engine *engine; + struct list_head entry; + int key_offs; + int iv_offs; +}; + +/* Generic context structure for any algorithm type. */ +struct spacc_generic_ctx { + struct spacc_engine *engine; + int flags; + int key_offs; + int iv_offs; +}; + +/* Block cipher context. */ +struct spacc_ablk_ctx { + struct spacc_generic_ctx generic; + u8 key[AES_MAX_KEY_SIZE]; + u8 key_len; + /* + * The fallback cipher. If the operation can't be done in hardware, + * fallback to a software version. + */ + struct crypto_ablkcipher *sw_cipher; +}; + +/* AEAD cipher context. */ +struct spacc_aead_ctx { + struct spacc_generic_ctx generic; + u8 cipher_key[AES_MAX_KEY_SIZE]; + u8 hash_ctx[SPACC_CRYPTO_IPSEC_HASH_PG_SZ]; + u8 cipher_key_len; + u8 hash_key_len; + struct crypto_aead *sw_cipher; + size_t auth_size; + u8 salt[AES_BLOCK_SIZE]; +}; + +static int spacc_ablk_submit(struct spacc_req *req); + +static inline struct spacc_alg *to_spacc_alg(struct crypto_alg *alg) +{ + return alg ? container_of(alg, struct spacc_alg, alg) : NULL; +} + +static inline int spacc_fifo_cmd_full(struct spacc_engine *engine) +{ + u32 fifo_stat = readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET); + + return fifo_stat & SPA_FIFO_CMD_FULL; +} + +/* + * Given a cipher context, and a context number, get the base address of the + * context page. + * + * Returns the address of the context page where the key/context may + * be written. + */ +static inline void __iomem *spacc_ctx_page_addr(struct spacc_generic_ctx *ctx, + unsigned indx, + bool is_cipher_ctx) +{ + return is_cipher_ctx ? ctx->engine->cipher_ctx_base + + (indx * ctx->engine->cipher_pg_sz) : + ctx->engine->hash_key_base + (indx * ctx->engine->hash_pg_sz); +} + +/* The context pages can only be written with 32-bit accesses. */ +static inline void memcpy_toio32(u32 __iomem *dst, const void *src, + unsigned count) +{ + const u32 *src32 = (const u32 *) src; + + while (count--) + writel(*src32++, dst++); +} + +static void spacc_cipher_write_ctx(struct spacc_generic_ctx *ctx, + void __iomem *page_addr, const u8 *key, + size_t key_len, const u8 *iv, size_t iv_len) +{ + void __iomem *key_ptr = page_addr + ctx->key_offs; + void __iomem *iv_ptr = page_addr + ctx->iv_offs; + + memcpy_toio32(key_ptr, key, key_len / 4); + memcpy_toio32(iv_ptr, iv, iv_len / 4); +} + +/* + * Load a context into the engines context memory. + * + * Returns the index of the context page where the context was loaded. + */ +static unsigned spacc_load_ctx(struct spacc_generic_ctx *ctx, + const u8 *ciph_key, size_t ciph_len, + const u8 *iv, size_t ivlen, const u8 *hash_key, + size_t hash_len) +{ + unsigned indx = ctx->engine->next_ctx++; + void __iomem *ciph_page_addr, *hash_page_addr; + + ciph_page_addr = spacc_ctx_page_addr(ctx, indx, 1); + hash_page_addr = spacc_ctx_page_addr(ctx, indx, 0); + + ctx->engine->next_ctx &= ctx->engine->fifo_sz - 1; + spacc_cipher_write_ctx(ctx, ciph_page_addr, ciph_key, ciph_len, iv, + ivlen); + writel(ciph_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET) | + (1 << SPA_KEY_SZ_CIPHER_OFFSET), + ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET); + + if (hash_key) { + memcpy_toio32(hash_page_addr, hash_key, hash_len / 4); + writel(hash_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET), + ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET); + } + + return indx; +} + +/* Count the number of scatterlist entries in a scatterlist. */ +static int sg_count(struct scatterlist *sg_list, int nbytes) +{ + struct scatterlist *sg = sg_list; + int sg_nents = 0; + + while (nbytes > 0) { + ++sg_nents; + nbytes -= sg->length; + sg = sg_next(sg); + } + + return sg_nents; +} + +static inline void ddt_set(struct spacc_ddt *ddt, dma_addr_t phys, size_t len) +{ + ddt->p = phys; + ddt->len = len; +} + +/* + * Take a crypto request and scatterlists for the data and turn them into DDTs + * for passing to the crypto engines. This also DMA maps the data so that the + * crypto engines can DMA to/from them. + */ +static struct spacc_ddt *spacc_sg_to_ddt(struct spacc_engine *engine, + struct scatterlist *payload, + unsigned nbytes, + enum dma_data_direction dir, + dma_addr_t *ddt_phys) +{ + unsigned nents, mapped_ents; + struct scatterlist *cur; + struct spacc_ddt *ddt; + int i; + + nents = sg_count(payload, nbytes); + mapped_ents = dma_map_sg(engine->dev, payload, nents, dir); + + if (mapped_ents + 1 > MAX_DDT_LEN) + goto out; + + ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, ddt_phys); + if (!ddt) + goto out; + + for_each_sg(payload, cur, mapped_ents, i) + ddt_set(&ddt[i], sg_dma_address(cur), sg_dma_len(cur)); + ddt_set(&ddt[mapped_ents], 0, 0); + + return ddt; + +out: + dma_unmap_sg(engine->dev, payload, nents, dir); + return NULL; +} + +static int spacc_aead_make_ddts(struct spacc_req *req, u8 *giv) +{ + struct aead_request *areq = container_of(req->req, struct aead_request, + base); + struct spacc_engine *engine = req->engine; + struct spacc_ddt *src_ddt, *dst_ddt; + unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(areq)); + unsigned nents = sg_count(areq->src, areq->cryptlen); + dma_addr_t iv_addr; + struct scatterlist *cur; + int i, dst_ents, src_ents, assoc_ents; + u8 *iv = giv ? giv : areq->iv; + + src_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->src_addr); + if (!src_ddt) + return -ENOMEM; + + dst_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->dst_addr); + if (!dst_ddt) { + dma_pool_free(engine->req_pool, src_ddt, req->src_addr); + return -ENOMEM; + } + + req->src_ddt = src_ddt; + req->dst_ddt = dst_ddt; + + assoc_ents = dma_map_sg(engine->dev, areq->assoc, + sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE); + if (areq->src != areq->dst) { + src_ents = dma_map_sg(engine->dev, areq->src, nents, + DMA_TO_DEVICE); + dst_ents = dma_map_sg(engine->dev, areq->dst, nents, + DMA_FROM_DEVICE); + } else { + src_ents = dma_map_sg(engine->dev, areq->src, nents, + DMA_BIDIRECTIONAL); + dst_ents = 0; + } + + /* + * Map the IV/GIV. For the GIV it needs to be bidirectional as it is + * formed by the crypto block and sent as the ESP IV for IPSEC. + */ + iv_addr = dma_map_single(engine->dev, iv, ivsize, + giv ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); + req->giv_pa = iv_addr; + + /* + * Map the associated data. For decryption we don't copy the + * associated data. + */ + for_each_sg(areq->assoc, cur, assoc_ents, i) { + ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur)); + if (req->is_encrypt) + ddt_set(dst_ddt++, sg_dma_address(cur), + sg_dma_len(cur)); + } + ddt_set(src_ddt++, iv_addr, ivsize); + + if (giv || req->is_encrypt) + ddt_set(dst_ddt++, iv_addr, ivsize); + + /* + * Now map in the payload for the source and destination and terminate + * with the NULL pointers. + */ + for_each_sg(areq->src, cur, src_ents, i) { + ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur)); + if (areq->src == areq->dst) + ddt_set(dst_ddt++, sg_dma_address(cur), + sg_dma_len(cur)); + } + + for_each_sg(areq->dst, cur, dst_ents, i) + ddt_set(dst_ddt++, sg_dma_address(cur), + sg_dma_len(cur)); + + ddt_set(src_ddt, 0, 0); + ddt_set(dst_ddt, 0, 0); + + return 0; +} + +static void spacc_aead_free_ddts(struct spacc_req *req) +{ + struct aead_request *areq = container_of(req->req, struct aead_request, + base); + struct spacc_alg *alg = to_spacc_alg(req->req->tfm->__crt_alg); + struct spacc_ablk_ctx *aead_ctx = crypto_tfm_ctx(req->req->tfm); + struct spacc_engine *engine = aead_ctx->generic.engine; + unsigned ivsize = alg->alg.cra_aead.ivsize; + unsigned nents = sg_count(areq->src, areq->cryptlen); + + if (areq->src != areq->dst) { + dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE); + dma_unmap_sg(engine->dev, areq->dst, + sg_count(areq->dst, areq->cryptlen), + DMA_FROM_DEVICE); + } else + dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL); + + dma_unmap_sg(engine->dev, areq->assoc, + sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE); + + dma_unmap_single(engine->dev, req->giv_pa, ivsize, DMA_BIDIRECTIONAL); + + dma_pool_free(engine->req_pool, req->src_ddt, req->src_addr); + dma_pool_free(engine->req_pool, req->dst_ddt, req->dst_addr); +} + +static void spacc_free_ddt(struct spacc_req *req, struct spacc_ddt *ddt, + dma_addr_t ddt_addr, struct scatterlist *payload, + unsigned nbytes, enum dma_data_direction dir) +{ + unsigned nents = sg_count(payload, nbytes); + + dma_unmap_sg(req->engine->dev, payload, nents, dir); + dma_pool_free(req->engine->req_pool, ddt, ddt_addr); +} + +/* + * Set key for a DES operation in an AEAD cipher. This also performs weak key + * checking if required. + */ +static int spacc_aead_des_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int len) +{ + struct crypto_tfm *tfm = crypto_aead_tfm(aead); + struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); + u32 tmp[DES_EXPKEY_WORDS]; + + if (unlikely(!des_ekey(tmp, key)) && + (crypto_aead_get_flags(aead)) & CRYPTO_TFM_REQ_WEAK_KEY) { + tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; + return -EINVAL; + } + + memcpy(ctx->cipher_key, key, len); + ctx->cipher_key_len = len; + + return 0; +} + +/* Set the key for the AES block cipher component of the AEAD transform. */ +static int spacc_aead_aes_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int len) +{ + struct crypto_tfm *tfm = crypto_aead_tfm(aead); + struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); + + /* + * IPSec engine only supports 128 and 256 bit AES keys. If we get a + * request for any other size (192 bits) then we need to do a software + * fallback. + */ + if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256) { + /* + * Set the fallback transform to use the same request flags as + * the hardware transform. + */ + ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; + ctx->sw_cipher->base.crt_flags |= + tfm->crt_flags & CRYPTO_TFM_REQ_MASK; + return crypto_aead_setkey(ctx->sw_cipher, key, len); + } + + memcpy(ctx->cipher_key, key, len); + ctx->cipher_key_len = len; + + return 0; +} + +static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key, + unsigned int keylen) +{ + struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm); + struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg); + struct crypto_authenc_keys keys; + int err = -EINVAL; + + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) + goto badkey; + + if (keys.enckeylen > AES_MAX_KEY_SIZE) + goto badkey; + + if (keys.authkeylen > sizeof(ctx->hash_ctx)) + goto badkey; + + if ((alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) == + SPA_CTRL_CIPH_ALG_AES) + err = spacc_aead_aes_setkey(tfm, keys.enckey, keys.enckeylen); + else + err = spacc_aead_des_setkey(tfm, keys.enckey, keys.enckeylen); + + if (err) + goto badkey; + + memcpy(ctx->hash_ctx, keys.authkey, keys.authkeylen); + ctx->hash_key_len = keys.authkeylen; + + return 0; + +badkey: + crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; +} + +static int spacc_aead_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + struct spacc_aead_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm)); + + ctx->auth_size = authsize; + + return 0; +} + +/* + * Check if an AEAD request requires a fallback operation. Some requests can't + * be completed in hardware because the hardware may not support certain key + * sizes. In these cases we need to complete the request in software. + */ +static int spacc_aead_need_fallback(struct spacc_req *req) +{ + struct aead_request *aead_req; + struct crypto_tfm *tfm = req->req->tfm; + struct crypto_alg *alg = req->req->tfm->__crt_alg; + struct spacc_alg *spacc_alg = to_spacc_alg(alg); + struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); + + aead_req = container_of(req->req, struct aead_request, base); + /* + * If we have a non-supported key-length, then we need to do a + * software fallback. + */ + if ((spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) == + SPA_CTRL_CIPH_ALG_AES && + ctx->cipher_key_len != AES_KEYSIZE_128 && + ctx->cipher_key_len != AES_KEYSIZE_256) + return 1; + + return 0; +} + +static int spacc_aead_do_fallback(struct aead_request *req, unsigned alg_type, + bool is_encrypt) +{ + struct crypto_tfm *old_tfm = crypto_aead_tfm(crypto_aead_reqtfm(req)); + struct spacc_aead_ctx *ctx = crypto_tfm_ctx(old_tfm); + int err; + + if (ctx->sw_cipher) { + /* + * Change the request to use the software fallback transform, + * and once the ciphering has completed, put the old transform + * back into the request. + */ + aead_request_set_tfm(req, ctx->sw_cipher); + err = is_encrypt ? crypto_aead_encrypt(req) : + crypto_aead_decrypt(req); + aead_request_set_tfm(req, __crypto_aead_cast(old_tfm)); + } else + err = -EINVAL; + + return err; +} + +static void spacc_aead_complete(struct spacc_req *req) +{ + spacc_aead_free_ddts(req); + req->req->complete(req->req, req->result); +} + +static int spacc_aead_submit(struct spacc_req *req) +{ + struct crypto_tfm *tfm = req->req->tfm; + struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_alg *alg = req->req->tfm->__crt_alg; + struct spacc_alg *spacc_alg = to_spacc_alg(alg); + struct spacc_engine *engine = ctx->generic.engine; + u32 ctrl, proc_len, assoc_len; + struct aead_request *aead_req = + container_of(req->req, struct aead_request, base); + + req->result = -EINPROGRESS; + req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->cipher_key, + ctx->cipher_key_len, aead_req->iv, alg->cra_aead.ivsize, + ctx->hash_ctx, ctx->hash_key_len); + + /* Set the source and destination DDT pointers. */ + writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET); + writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET); + writel(0, engine->regs + SPA_OFFSET_REG_OFFSET); + + assoc_len = aead_req->assoclen; + proc_len = aead_req->cryptlen + assoc_len; + + /* + * If we aren't generating an IV, then we need to include the IV in the + * associated data so that it is included in the hash. + */ + if (!req->giv) { + assoc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req)); + proc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req)); + } else + proc_len += req->giv_len; + + /* + * If we are decrypting, we need to take the length of the ICV out of + * the processing length. + */ + if (!req->is_encrypt) + proc_len -= ctx->auth_size; + + writel(proc_len, engine->regs + SPA_PROC_LEN_REG_OFFSET); + writel(assoc_len, engine->regs + SPA_AAD_LEN_REG_OFFSET); + writel(ctx->auth_size, engine->regs + SPA_ICV_LEN_REG_OFFSET); + writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET); + writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET); + + ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) | + (1 << SPA_CTRL_ICV_APPEND); + if (req->is_encrypt) + ctrl |= (1 << SPA_CTRL_ENCRYPT_IDX) | (1 << SPA_CTRL_AAD_COPY); + else + ctrl |= (1 << SPA_CTRL_KEY_EXP); + + mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT); + + writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET); + + return -EINPROGRESS; +} + +static int spacc_req_submit(struct spacc_req *req); + +static void spacc_push(struct spacc_engine *engine) +{ + struct spacc_req *req; + + while (!list_empty(&engine->pending) && + engine->in_flight + 1 <= engine->fifo_sz) { + + ++engine->in_flight; + req = list_first_entry(&engine->pending, struct spacc_req, + list); + list_move_tail(&req->list, &engine->in_progress); + + req->result = spacc_req_submit(req); + } +} + +/* + * Setup an AEAD request for processing. This will configure the engine, load + * the context and then start the packet processing. + * + * @giv Pointer to destination address for a generated IV. If the + * request does not need to generate an IV then this should be set to NULL. + */ +static int spacc_aead_setup(struct aead_request *req, u8 *giv, + unsigned alg_type, bool is_encrypt) +{ + struct crypto_alg *alg = req->base.tfm->__crt_alg; + struct spacc_engine *engine = to_spacc_alg(alg)->engine; + struct spacc_req *dev_req = aead_request_ctx(req); + int err = -EINPROGRESS; + unsigned long flags; + unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req)); + + dev_req->giv = giv; + dev_req->giv_len = ivsize; + dev_req->req = &req->base; + dev_req->is_encrypt = is_encrypt; + dev_req->result = -EBUSY; + dev_req->engine = engine; + dev_req->complete = spacc_aead_complete; + + if (unlikely(spacc_aead_need_fallback(dev_req))) + return spacc_aead_do_fallback(req, alg_type, is_encrypt); + + spacc_aead_make_ddts(dev_req, dev_req->giv); + + err = -EINPROGRESS; + spin_lock_irqsave(&engine->hw_lock, flags); + if (unlikely(spacc_fifo_cmd_full(engine)) || + engine->in_flight + 1 > engine->fifo_sz) { + if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { + err = -EBUSY; + spin_unlock_irqrestore(&engine->hw_lock, flags); + goto out_free_ddts; + } + list_add_tail(&dev_req->list, &engine->pending); + } else { + list_add_tail(&dev_req->list, &engine->pending); + spacc_push(engine); + } + spin_unlock_irqrestore(&engine->hw_lock, flags); + + goto out; + +out_free_ddts: + spacc_aead_free_ddts(dev_req); +out: + return err; +} + +static int spacc_aead_encrypt(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct crypto_tfm *tfm = crypto_aead_tfm(aead); + struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg); + + return spacc_aead_setup(req, NULL, alg->type, 1); +} + +static int spacc_aead_givencrypt(struct aead_givcrypt_request *req) +{ + struct crypto_aead *tfm = aead_givcrypt_reqtfm(req); + struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm); + size_t ivsize = crypto_aead_ivsize(tfm); + struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg); + unsigned len; + __be64 seq; + + memcpy(req->areq.iv, ctx->salt, ivsize); + len = ivsize; + if (ivsize > sizeof(u64)) { + memset(req->giv, 0, ivsize - sizeof(u64)); + len = sizeof(u64); + } + seq = cpu_to_be64(req->seq); + memcpy(req->giv + ivsize - len, &seq, len); + + return spacc_aead_setup(&req->areq, req->giv, alg->type, 1); +} + +static int spacc_aead_decrypt(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct crypto_tfm *tfm = crypto_aead_tfm(aead); + struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg); + + return spacc_aead_setup(req, NULL, alg->type, 0); +} + +/* + * Initialise a new AEAD context. This is responsible for allocating the + * fallback cipher and initialising the context. + */ +static int spacc_aead_cra_init(struct crypto_tfm *tfm) +{ + struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_alg *alg = tfm->__crt_alg; + struct spacc_alg *spacc_alg = to_spacc_alg(alg); + struct spacc_engine *engine = spacc_alg->engine; + + ctx->generic.flags = spacc_alg->type; + ctx->generic.engine = engine; + ctx->sw_cipher = crypto_alloc_aead(alg->cra_name, 0, + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->sw_cipher)) { + dev_warn(engine->dev, "failed to allocate fallback for %s\n", + alg->cra_name); + ctx->sw_cipher = NULL; + } + ctx->generic.key_offs = spacc_alg->key_offs; + ctx->generic.iv_offs = spacc_alg->iv_offs; + + get_random_bytes(ctx->salt, sizeof(ctx->salt)); + + tfm->crt_aead.reqsize = sizeof(struct spacc_req); + + return 0; +} + +/* + * Destructor for an AEAD context. This is called when the transform is freed + * and must free the fallback cipher. + */ +static void spacc_aead_cra_exit(struct crypto_tfm *tfm) +{ + struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); + + if (ctx->sw_cipher) + crypto_free_aead(ctx->sw_cipher); + ctx->sw_cipher = NULL; +} + +/* + * Set the DES key for a block cipher transform. This also performs weak key + * checking if the transform has requested it. + */ +static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, + unsigned int len) +{ + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); + struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); + u32 tmp[DES_EXPKEY_WORDS]; + + if (len > DES3_EDE_KEY_SIZE) { + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + if (unlikely(!des_ekey(tmp, key)) && + (crypto_ablkcipher_get_flags(cipher) & CRYPTO_TFM_REQ_WEAK_KEY)) { + tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; + return -EINVAL; + } + + memcpy(ctx->key, key, len); + ctx->key_len = len; + + return 0; +} + +/* + * Set the key for an AES block cipher. Some key lengths are not supported in + * hardware so this must also check whether a fallback is needed. + */ +static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, + unsigned int len) +{ + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); + struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); + int err = 0; + + if (len > AES_MAX_KEY_SIZE) { + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + /* + * IPSec engine only supports 128 and 256 bit AES keys. If we get a + * request for any other size (192 bits) then we need to do a software + * fallback. + */ + if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256 && + ctx->sw_cipher) { + /* + * Set the fallback transform to use the same request flags as + * the hardware transform. + */ + ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; + ctx->sw_cipher->base.crt_flags |= + cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK; + + err = crypto_ablkcipher_setkey(ctx->sw_cipher, key, len); + if (err) + goto sw_setkey_failed; + } else if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256 && + !ctx->sw_cipher) + err = -EINVAL; + + memcpy(ctx->key, key, len); + ctx->key_len = len; + +sw_setkey_failed: + if (err && ctx->sw_cipher) { + tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; + tfm->crt_flags |= + ctx->sw_cipher->base.crt_flags & CRYPTO_TFM_RES_MASK; + } + + return err; +} + +static int spacc_kasumi_f8_setkey(struct crypto_ablkcipher *cipher, + const u8 *key, unsigned int len) +{ + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); + struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); + int err = 0; + + if (len > AES_MAX_KEY_SIZE) { + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + err = -EINVAL; + goto out; + } + + memcpy(ctx->key, key, len); + ctx->key_len = len; + +out: + return err; +} + +static int spacc_ablk_need_fallback(struct spacc_req *req) +{ + struct spacc_ablk_ctx *ctx; + struct crypto_tfm *tfm = req->req->tfm; + struct crypto_alg *alg = req->req->tfm->__crt_alg; + struct spacc_alg *spacc_alg = to_spacc_alg(alg); + + ctx = crypto_tfm_ctx(tfm); + + return (spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) == + SPA_CTRL_CIPH_ALG_AES && + ctx->key_len != AES_KEYSIZE_128 && + ctx->key_len != AES_KEYSIZE_256; +} + +static void spacc_ablk_complete(struct spacc_req *req) +{ + struct ablkcipher_request *ablk_req = + container_of(req->req, struct ablkcipher_request, base); + + if (ablk_req->src != ablk_req->dst) { + spacc_free_ddt(req, req->src_ddt, req->src_addr, ablk_req->src, + ablk_req->nbytes, DMA_TO_DEVICE); + spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst, + ablk_req->nbytes, DMA_FROM_DEVICE); + } else + spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst, + ablk_req->nbytes, DMA_BIDIRECTIONAL); + + req->req->complete(req->req, req->result); +} + +static int spacc_ablk_submit(struct spacc_req *req) +{ + struct crypto_tfm *tfm = req->req->tfm; + struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); + struct ablkcipher_request *ablk_req = ablkcipher_request_cast(req->req); + struct crypto_alg *alg = req->req->tfm->__crt_alg; + struct spacc_alg *spacc_alg = to_spacc_alg(alg); + struct spacc_engine *engine = ctx->generic.engine; + u32 ctrl; + + req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->key, + ctx->key_len, ablk_req->info, alg->cra_ablkcipher.ivsize, + NULL, 0); + + writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET); + writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET); + writel(0, engine->regs + SPA_OFFSET_REG_OFFSET); + + writel(ablk_req->nbytes, engine->regs + SPA_PROC_LEN_REG_OFFSET); + writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET); + writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET); + writel(0, engine->regs + SPA_AAD_LEN_REG_OFFSET); + + ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) | + (req->is_encrypt ? (1 << SPA_CTRL_ENCRYPT_IDX) : + (1 << SPA_CTRL_KEY_EXP)); + + mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT); + + writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET); + + return -EINPROGRESS; +} + +static int spacc_ablk_do_fallback(struct ablkcipher_request *req, + unsigned alg_type, bool is_encrypt) +{ + struct crypto_tfm *old_tfm = + crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); + struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm); + int err; + + if (!ctx->sw_cipher) + return -EINVAL; + + /* + * Change the request to use the software fallback transform, and once + * the ciphering has completed, put the old transform back into the + * request. + */ + ablkcipher_request_set_tfm(req, ctx->sw_cipher); + err = is_encrypt ? crypto_ablkcipher_encrypt(req) : + crypto_ablkcipher_decrypt(req); + ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(old_tfm)); + + return err; +} + +static int spacc_ablk_setup(struct ablkcipher_request *req, unsigned alg_type, + bool is_encrypt) +{ + struct crypto_alg *alg = req->base.tfm->__crt_alg; + struct spacc_engine *engine = to_spacc_alg(alg)->engine; + struct spacc_req *dev_req = ablkcipher_request_ctx(req); + unsigned long flags; + int err = -ENOMEM; + + dev_req->req = &req->base; + dev_req->is_encrypt = is_encrypt; + dev_req->engine = engine; + dev_req->complete = spacc_ablk_complete; + dev_req->result = -EINPROGRESS; + + if (unlikely(spacc_ablk_need_fallback(dev_req))) + return spacc_ablk_do_fallback(req, alg_type, is_encrypt); + + /* + * Create the DDT's for the engine. If we share the same source and + * destination then we can optimize by reusing the DDT's. + */ + if (req->src != req->dst) { + dev_req->src_ddt = spacc_sg_to_ddt(engine, req->src, + req->nbytes, DMA_TO_DEVICE, &dev_req->src_addr); + if (!dev_req->src_ddt) + goto out; + + dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst, + req->nbytes, DMA_FROM_DEVICE, &dev_req->dst_addr); + if (!dev_req->dst_ddt) + goto out_free_src; + } else { + dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst, + req->nbytes, DMA_BIDIRECTIONAL, &dev_req->dst_addr); + if (!dev_req->dst_ddt) + goto out; + + dev_req->src_ddt = NULL; + dev_req->src_addr = dev_req->dst_addr; + } + + err = -EINPROGRESS; + spin_lock_irqsave(&engine->hw_lock, flags); + /* + * Check if the engine will accept the operation now. If it won't then + * we either stick it on the end of a pending list if we can backlog, + * or bailout with an error if not. + */ + if (unlikely(spacc_fifo_cmd_full(engine)) || + engine->in_flight + 1 > engine->fifo_sz) { + if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { + err = -EBUSY; + spin_unlock_irqrestore(&engine->hw_lock, flags); + goto out_free_ddts; + } + list_add_tail(&dev_req->list, &engine->pending); + } else { + list_add_tail(&dev_req->list, &engine->pending); + spacc_push(engine); + } + spin_unlock_irqrestore(&engine->hw_lock, flags); + + goto out; + +out_free_ddts: + spacc_free_ddt(dev_req, dev_req->dst_ddt, dev_req->dst_addr, req->dst, + req->nbytes, req->src == req->dst ? + DMA_BIDIRECTIONAL : DMA_FROM_DEVICE); +out_free_src: + if (req->src != req->dst) + spacc_free_ddt(dev_req, dev_req->src_ddt, dev_req->src_addr, + req->src, req->nbytes, DMA_TO_DEVICE); +out: + return err; +} + +static int spacc_ablk_cra_init(struct crypto_tfm *tfm) +{ + struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_alg *alg = tfm->__crt_alg; + struct spacc_alg *spacc_alg = to_spacc_alg(alg); + struct spacc_engine *engine = spacc_alg->engine; + + ctx->generic.flags = spacc_alg->type; + ctx->generic.engine = engine; + if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) { + ctx->sw_cipher = crypto_alloc_ablkcipher(alg->cra_name, 0, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->sw_cipher)) { + dev_warn(engine->dev, "failed to allocate fallback for %s\n", + alg->cra_name); + ctx->sw_cipher = NULL; + } + } + ctx->generic.key_offs = spacc_alg->key_offs; + ctx->generic.iv_offs = spacc_alg->iv_offs; + + tfm->crt_ablkcipher.reqsize = sizeof(struct spacc_req); + + return 0; +} + +static void spacc_ablk_cra_exit(struct crypto_tfm *tfm) +{ + struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); + + if (ctx->sw_cipher) + crypto_free_ablkcipher(ctx->sw_cipher); + ctx->sw_cipher = NULL; +} + +static int spacc_ablk_encrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req); + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); + struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg); + + return spacc_ablk_setup(req, alg->type, 1); +} + +static int spacc_ablk_decrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req); + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); + struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg); + + return spacc_ablk_setup(req, alg->type, 0); +} + +static inline int spacc_fifo_stat_empty(struct spacc_engine *engine) +{ + return readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET) & + SPA_FIFO_STAT_EMPTY; +} + +static void spacc_process_done(struct spacc_engine *engine) +{ + struct spacc_req *req; + unsigned long flags; + + spin_lock_irqsave(&engine->hw_lock, flags); + + while (!spacc_fifo_stat_empty(engine)) { + req = list_first_entry(&engine->in_progress, struct spacc_req, + list); + list_move_tail(&req->list, &engine->completed); + --engine->in_flight; + + /* POP the status register. */ + writel(~0, engine->regs + SPA_STAT_POP_REG_OFFSET); + req->result = (readl(engine->regs + SPA_STATUS_REG_OFFSET) & + SPA_STATUS_RES_CODE_MASK) >> SPA_STATUS_RES_CODE_OFFSET; + + /* + * Convert the SPAcc error status into the standard POSIX error + * codes. + */ + if (unlikely(req->result)) { + switch (req->result) { + case SPA_STATUS_ICV_FAIL: + req->result = -EBADMSG; + break; + + case SPA_STATUS_MEMORY_ERROR: + dev_warn(engine->dev, + "memory error triggered\n"); + req->result = -EFAULT; + break; + + case SPA_STATUS_BLOCK_ERROR: + dev_warn(engine->dev, + "block error triggered\n"); + req->result = -EIO; + break; + } + } + } + + tasklet_schedule(&engine->complete); + + spin_unlock_irqrestore(&engine->hw_lock, flags); +} + +static irqreturn_t spacc_spacc_irq(int irq, void *dev) +{ + struct spacc_engine *engine = (struct spacc_engine *)dev; + u32 spacc_irq_stat = readl(engine->regs + SPA_IRQ_STAT_REG_OFFSET); + + writel(spacc_irq_stat, engine->regs + SPA_IRQ_STAT_REG_OFFSET); + spacc_process_done(engine); + + return IRQ_HANDLED; +} + +static void spacc_packet_timeout(unsigned long data) +{ + struct spacc_engine *engine = (struct spacc_engine *)data; + + spacc_process_done(engine); +} + +static int spacc_req_submit(struct spacc_req *req) +{ + struct crypto_alg *alg = req->req->tfm->__crt_alg; + + if (CRYPTO_ALG_TYPE_AEAD == (CRYPTO_ALG_TYPE_MASK & alg->cra_flags)) + return spacc_aead_submit(req); + else + return spacc_ablk_submit(req); +} + +static void spacc_spacc_complete(unsigned long data) +{ + struct spacc_engine *engine = (struct spacc_engine *)data; + struct spacc_req *req, *tmp; + unsigned long flags; + LIST_HEAD(completed); + + spin_lock_irqsave(&engine->hw_lock, flags); + + list_splice_init(&engine->completed, &completed); + spacc_push(engine); + if (engine->in_flight) + mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT); + + spin_unlock_irqrestore(&engine->hw_lock, flags); + + list_for_each_entry_safe(req, tmp, &completed, list) { + list_del(&req->list); + req->complete(req); + } +} + +#ifdef CONFIG_PM +static int spacc_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct spacc_engine *engine = platform_get_drvdata(pdev); + + /* + * We only support standby mode. All we have to do is gate the clock to + * the spacc. The hardware will preserve state until we turn it back + * on again. + */ + clk_disable(engine->clk); + + return 0; +} + +static int spacc_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct spacc_engine *engine = platform_get_drvdata(pdev); + + return clk_enable(engine->clk); +} + +static const struct dev_pm_ops spacc_pm_ops = { + .suspend = spacc_suspend, + .resume = spacc_resume, +}; +#endif /* CONFIG_PM */ + +static inline struct spacc_engine *spacc_dev_to_engine(struct device *dev) +{ + return dev ? platform_get_drvdata(to_platform_device(dev)) : NULL; +} + +static ssize_t spacc_stat_irq_thresh_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct spacc_engine *engine = spacc_dev_to_engine(dev); + + return snprintf(buf, PAGE_SIZE, "%u\n", engine->stat_irq_thresh); +} + +static ssize_t spacc_stat_irq_thresh_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct spacc_engine *engine = spacc_dev_to_engine(dev); + unsigned long thresh; + + if (kstrtoul(buf, 0, &thresh)) + return -EINVAL; + + thresh = clamp(thresh, 1UL, engine->fifo_sz - 1); + + engine->stat_irq_thresh = thresh; + writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET, + engine->regs + SPA_IRQ_CTRL_REG_OFFSET); + + return len; +} +static DEVICE_ATTR(stat_irq_thresh, 0644, spacc_stat_irq_thresh_show, + spacc_stat_irq_thresh_store); + +static struct spacc_alg ipsec_engine_algs[] = { + { + .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC, + .key_offs = 0, + .iv_offs = AES_MAX_KEY_SIZE, + .alg = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-picoxcell", + .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct spacc_ablk_ctx), + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_ablkcipher = { + .setkey = spacc_aes_setkey, + .encrypt = spacc_ablk_encrypt, + .decrypt = spacc_ablk_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + }, + .cra_init = spacc_ablk_cra_init, + .cra_exit = spacc_ablk_cra_exit, + }, + }, + { + .key_offs = 0, + .iv_offs = AES_MAX_KEY_SIZE, + .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_ECB, + .alg = { + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-picoxcell", + .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct spacc_ablk_ctx), + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_ablkcipher = { + .setkey = spacc_aes_setkey, + .encrypt = spacc_ablk_encrypt, + .decrypt = spacc_ablk_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + }, + .cra_init = spacc_ablk_cra_init, + .cra_exit = spacc_ablk_cra_exit, + }, + }, + { + .key_offs = DES_BLOCK_SIZE, + .iv_offs = 0, + .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC, + .alg = { + .cra_name = "cbc(des)", + .cra_driver_name = "cbc-des-picoxcell", + .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct spacc_ablk_ctx), + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_ablkcipher = { + .setkey = spacc_des_setkey, + .encrypt = spacc_ablk_encrypt, + .decrypt = spacc_ablk_decrypt, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + }, + .cra_init = spacc_ablk_cra_init, + .cra_exit = spacc_ablk_cra_exit, + }, + }, + { + .key_offs = DES_BLOCK_SIZE, + .iv_offs = 0, + .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB, + .alg = { + .cra_name = "ecb(des)", + .cra_driver_name = "ecb-des-picoxcell", + .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct spacc_ablk_ctx), + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_ablkcipher = { + .setkey = spacc_des_setkey, + .encrypt = spacc_ablk_encrypt, + .decrypt = spacc_ablk_decrypt, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + }, + .cra_init = spacc_ablk_cra_init, + .cra_exit = spacc_ablk_cra_exit, + }, + }, + { + .key_offs = DES_BLOCK_SIZE, + .iv_offs = 0, + .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC, + .alg = { + .cra_name = "cbc(des3_ede)", + .cra_driver_name = "cbc-des3-ede-picoxcell", + .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct spacc_ablk_ctx), + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_ablkcipher = { + .setkey = spacc_des_setkey, + .encrypt = spacc_ablk_encrypt, + .decrypt = spacc_ablk_decrypt, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + }, + .cra_init = spacc_ablk_cra_init, + .cra_exit = spacc_ablk_cra_exit, + }, + }, + { + .key_offs = DES_BLOCK_SIZE, + .iv_offs = 0, + .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB, + .alg = { + .cra_name = "ecb(des3_ede)", + .cra_driver_name = "ecb-des3-ede-picoxcell", + .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct spacc_ablk_ctx), + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_ablkcipher = { + .setkey = spacc_des_setkey, + .encrypt = spacc_ablk_encrypt, + .decrypt = spacc_ablk_decrypt, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + }, + .cra_init = spacc_ablk_cra_init, + .cra_exit = spacc_ablk_cra_exit, + }, + }, + { + .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC | + SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC, + .key_offs = 0, + .iv_offs = AES_MAX_KEY_SIZE, + .alg = { + .cra_name = "authenc(hmac(sha1),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha1-cbc-aes-picoxcell", + .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct spacc_aead_ctx), + .cra_type = &crypto_aead_type, + .cra_module = THIS_MODULE, + .cra_aead = { + .setkey = spacc_aead_setkey, + .setauthsize = spacc_aead_setauthsize, + .encrypt = spacc_aead_encrypt, + .decrypt = spacc_aead_decrypt, + .givencrypt = spacc_aead_givencrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .cra_init = spacc_aead_cra_init, + .cra_exit = spacc_aead_cra_exit, + }, + }, + { + .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC | + SPA_CTRL_HASH_ALG_SHA256 | + SPA_CTRL_HASH_MODE_HMAC, + .key_offs = 0, + .iv_offs = AES_MAX_KEY_SIZE, + .alg = { + .cra_name = "authenc(hmac(sha256),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha256-cbc-aes-picoxcell", + .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct spacc_aead_ctx), + .cra_type = &crypto_aead_type, + .cra_module = THIS_MODULE, + .cra_aead = { + .setkey = spacc_aead_setkey, + .setauthsize = spacc_aead_setauthsize, + .encrypt = spacc_aead_encrypt, + .decrypt = spacc_aead_decrypt, + .givencrypt = spacc_aead_givencrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .cra_init = spacc_aead_cra_init, + .cra_exit = spacc_aead_cra_exit, + }, + }, + { + .key_offs = 0, + .iv_offs = AES_MAX_KEY_SIZE, + .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC | + SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC, + .alg = { + .cra_name = "authenc(hmac(md5),cbc(aes))", + .cra_driver_name = "authenc-hmac-md5-cbc-aes-picoxcell", + .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct spacc_aead_ctx), + .cra_type = &crypto_aead_type, + .cra_module = THIS_MODULE, + .cra_aead = { + .setkey = spacc_aead_setkey, + .setauthsize = spacc_aead_setauthsize, + .encrypt = spacc_aead_encrypt, + .decrypt = spacc_aead_decrypt, + .givencrypt = spacc_aead_givencrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .cra_init = spacc_aead_cra_init, + .cra_exit = spacc_aead_cra_exit, + }, + }, + { + .key_offs = DES_BLOCK_SIZE, + .iv_offs = 0, + .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC | + SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC, + .alg = { + .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha1-cbc-3des-picoxcell", + .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct spacc_aead_ctx), + .cra_type = &crypto_aead_type, + .cra_module = THIS_MODULE, + .cra_aead = { + .setkey = spacc_aead_setkey, + .setauthsize = spacc_aead_setauthsize, + .encrypt = spacc_aead_encrypt, + .decrypt = spacc_aead_decrypt, + .givencrypt = spacc_aead_givencrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .cra_init = spacc_aead_cra_init, + .cra_exit = spacc_aead_cra_exit, + }, + }, + { + .key_offs = DES_BLOCK_SIZE, + .iv_offs = 0, + .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC | + SPA_CTRL_HASH_ALG_SHA256 | + SPA_CTRL_HASH_MODE_HMAC, + .alg = { + .cra_name = "authenc(hmac(sha256),cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha256-cbc-3des-picoxcell", + .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct spacc_aead_ctx), + .cra_type = &crypto_aead_type, + .cra_module = THIS_MODULE, + .cra_aead = { + .setkey = spacc_aead_setkey, + .setauthsize = spacc_aead_setauthsize, + .encrypt = spacc_aead_encrypt, + .decrypt = spacc_aead_decrypt, + .givencrypt = spacc_aead_givencrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .cra_init = spacc_aead_cra_init, + .cra_exit = spacc_aead_cra_exit, + }, + }, + { + .key_offs = DES_BLOCK_SIZE, + .iv_offs = 0, + .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC | + SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC, + .alg = { + .cra_name = "authenc(hmac(md5),cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-md5-cbc-3des-picoxcell", + .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct spacc_aead_ctx), + .cra_type = &crypto_aead_type, + .cra_module = THIS_MODULE, + .cra_aead = { + .setkey = spacc_aead_setkey, + .setauthsize = spacc_aead_setauthsize, + .encrypt = spacc_aead_encrypt, + .decrypt = spacc_aead_decrypt, + .givencrypt = spacc_aead_givencrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .cra_init = spacc_aead_cra_init, + .cra_exit = spacc_aead_cra_exit, + }, + }, +}; + +static struct spacc_alg l2_engine_algs[] = { + { + .key_offs = 0, + .iv_offs = SPACC_CRYPTO_KASUMI_F8_KEY_LEN, + .ctrl_default = SPA_CTRL_CIPH_ALG_KASUMI | + SPA_CTRL_CIPH_MODE_F8, + .alg = { + .cra_name = "f8(kasumi)", + .cra_driver_name = "f8-kasumi-picoxcell", + .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = 8, + .cra_ctxsize = sizeof(struct spacc_ablk_ctx), + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_ablkcipher = { + .setkey = spacc_kasumi_f8_setkey, + .encrypt = spacc_ablk_encrypt, + .decrypt = spacc_ablk_decrypt, + .min_keysize = 16, + .max_keysize = 16, + .ivsize = 8, + }, + .cra_init = spacc_ablk_cra_init, + .cra_exit = spacc_ablk_cra_exit, + }, + }, +}; + +#ifdef CONFIG_OF +static const struct of_device_id spacc_of_id_table[] = { + { .compatible = "picochip,spacc-ipsec" }, + { .compatible = "picochip,spacc-l2" }, + {} +}; +#endif /* CONFIG_OF */ + +static bool spacc_is_compatible(struct platform_device *pdev, + const char *spacc_type) +{ + const struct platform_device_id *platid = platform_get_device_id(pdev); + + if (platid && !strcmp(platid->name, spacc_type)) + return true; + +#ifdef CONFIG_OF + if (of_device_is_compatible(pdev->dev.of_node, spacc_type)) + return true; +#endif /* CONFIG_OF */ + + return false; +} + +static int spacc_probe(struct platform_device *pdev) +{ + int i, err, ret = -EINVAL; + struct resource *mem, *irq; + struct spacc_engine *engine = devm_kzalloc(&pdev->dev, sizeof(*engine), + GFP_KERNEL); + if (!engine) + return -ENOMEM; + + if (spacc_is_compatible(pdev, "picochip,spacc-ipsec")) { + engine->max_ctxs = SPACC_CRYPTO_IPSEC_MAX_CTXS; + engine->cipher_pg_sz = SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ; + engine->hash_pg_sz = SPACC_CRYPTO_IPSEC_HASH_PG_SZ; + engine->fifo_sz = SPACC_CRYPTO_IPSEC_FIFO_SZ; + engine->algs = ipsec_engine_algs; + engine->num_algs = ARRAY_SIZE(ipsec_engine_algs); + } else if (spacc_is_compatible(pdev, "picochip,spacc-l2")) { + engine->max_ctxs = SPACC_CRYPTO_L2_MAX_CTXS; + engine->cipher_pg_sz = SPACC_CRYPTO_L2_CIPHER_PG_SZ; + engine->hash_pg_sz = SPACC_CRYPTO_L2_HASH_PG_SZ; + engine->fifo_sz = SPACC_CRYPTO_L2_FIFO_SZ; + engine->algs = l2_engine_algs; + engine->num_algs = ARRAY_SIZE(l2_engine_algs); + } else { + return -EINVAL; + } + + engine->name = dev_name(&pdev->dev); + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + engine->regs = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(engine->regs)) + return PTR_ERR(engine->regs); + + irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!irq) { + dev_err(&pdev->dev, "no memory/irq resource for engine\n"); + return -ENXIO; + } + + if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0, + engine->name, engine)) { + dev_err(engine->dev, "failed to request IRQ\n"); + return -EBUSY; + } + + engine->dev = &pdev->dev; + engine->cipher_ctx_base = engine->regs + SPA_CIPH_KEY_BASE_REG_OFFSET; + engine->hash_key_base = engine->regs + SPA_HASH_KEY_BASE_REG_OFFSET; + + engine->req_pool = dmam_pool_create(engine->name, engine->dev, + MAX_DDT_LEN * sizeof(struct spacc_ddt), 8, SZ_64K); + if (!engine->req_pool) + return -ENOMEM; + + spin_lock_init(&engine->hw_lock); + + engine->clk = clk_get(&pdev->dev, "ref"); + if (IS_ERR(engine->clk)) { + dev_info(&pdev->dev, "clk unavailable\n"); + device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh); + return PTR_ERR(engine->clk); + } + + if (clk_enable(engine->clk)) { + dev_info(&pdev->dev, "unable to enable clk\n"); + clk_put(engine->clk); + return -EIO; + } + + err = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh); + if (err) { + clk_disable(engine->clk); + clk_put(engine->clk); + return err; + } + + + /* + * Use an IRQ threshold of 50% as a default. This seems to be a + * reasonable trade off of latency against throughput but can be + * changed at runtime. + */ + engine->stat_irq_thresh = (engine->fifo_sz / 2); + + /* + * Configure the interrupts. We only use the STAT_CNT interrupt as we + * only submit a new packet for processing when we complete another in + * the queue. This minimizes time spent in the interrupt handler. + */ + writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET, + engine->regs + SPA_IRQ_CTRL_REG_OFFSET); + writel(SPA_IRQ_EN_STAT_EN | SPA_IRQ_EN_GLBL_EN, + engine->regs + SPA_IRQ_EN_REG_OFFSET); + + setup_timer(&engine->packet_timeout, spacc_packet_timeout, + (unsigned long)engine); + + INIT_LIST_HEAD(&engine->pending); + INIT_LIST_HEAD(&engine->completed); + INIT_LIST_HEAD(&engine->in_progress); + engine->in_flight = 0; + tasklet_init(&engine->complete, spacc_spacc_complete, + (unsigned long)engine); + + platform_set_drvdata(pdev, engine); + + INIT_LIST_HEAD(&engine->registered_algs); + for (i = 0; i < engine->num_algs; ++i) { + engine->algs[i].engine = engine; + err = crypto_register_alg(&engine->algs[i].alg); + if (!err) { + list_add_tail(&engine->algs[i].entry, + &engine->registered_algs); + ret = 0; + } + if (err) + dev_err(engine->dev, "failed to register alg \"%s\"\n", + engine->algs[i].alg.cra_name); + else + dev_dbg(engine->dev, "registered alg \"%s\"\n", + engine->algs[i].alg.cra_name); + } + + return ret; +} + +static int spacc_remove(struct platform_device *pdev) +{ + struct spacc_alg *alg, *next; + struct spacc_engine *engine = platform_get_drvdata(pdev); + + del_timer_sync(&engine->packet_timeout); + device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh); + + list_for_each_entry_safe(alg, next, &engine->registered_algs, entry) { + list_del(&alg->entry); + crypto_unregister_alg(&alg->alg); + } + + clk_disable(engine->clk); + clk_put(engine->clk); + + return 0; +} + +static const struct platform_device_id spacc_id_table[] = { + { "picochip,spacc-ipsec", }, + { "picochip,spacc-l2", }, + { } +}; + +static struct platform_driver spacc_driver = { + .probe = spacc_probe, + .remove = spacc_remove, + .driver = { + .name = "picochip,spacc", +#ifdef CONFIG_PM + .pm = &spacc_pm_ops, +#endif /* CONFIG_PM */ + .of_match_table = of_match_ptr(spacc_of_id_table), + }, + .id_table = spacc_id_table, +}; + +module_platform_driver(spacc_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Jamie Iles"); diff --git a/drivers/crypto/picoxcell_crypto_regs.h b/drivers/crypto/picoxcell_crypto_regs.h new file mode 100644 index 000000000..af9344256 --- /dev/null +++ b/drivers/crypto/picoxcell_crypto_regs.h @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2010 Picochip Ltd., Jamie Iles + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef __PICOXCELL_CRYPTO_REGS_H__ +#define __PICOXCELL_CRYPTO_REGS_H__ + +#define SPA_STATUS_OK 0 +#define SPA_STATUS_ICV_FAIL 1 +#define SPA_STATUS_MEMORY_ERROR 2 +#define SPA_STATUS_BLOCK_ERROR 3 + +#define SPA_IRQ_CTRL_STAT_CNT_OFFSET 16 +#define SPA_IRQ_STAT_STAT_MASK (1 << 4) +#define SPA_FIFO_STAT_STAT_OFFSET 16 +#define SPA_FIFO_STAT_STAT_CNT_MASK (0x3F << SPA_FIFO_STAT_STAT_OFFSET) +#define SPA_STATUS_RES_CODE_OFFSET 24 +#define SPA_STATUS_RES_CODE_MASK (0x3 << SPA_STATUS_RES_CODE_OFFSET) +#define SPA_KEY_SZ_CTX_INDEX_OFFSET 8 +#define SPA_KEY_SZ_CIPHER_OFFSET 31 + +#define SPA_IRQ_EN_REG_OFFSET 0x00000000 +#define SPA_IRQ_STAT_REG_OFFSET 0x00000004 +#define SPA_IRQ_CTRL_REG_OFFSET 0x00000008 +#define SPA_FIFO_STAT_REG_OFFSET 0x0000000C +#define SPA_SDMA_BRST_SZ_REG_OFFSET 0x00000010 +#define SPA_SRC_PTR_REG_OFFSET 0x00000020 +#define SPA_DST_PTR_REG_OFFSET 0x00000024 +#define SPA_OFFSET_REG_OFFSET 0x00000028 +#define SPA_AAD_LEN_REG_OFFSET 0x0000002C +#define SPA_PROC_LEN_REG_OFFSET 0x00000030 +#define SPA_ICV_LEN_REG_OFFSET 0x00000034 +#define SPA_ICV_OFFSET_REG_OFFSET 0x00000038 +#define SPA_SW_CTRL_REG_OFFSET 0x0000003C +#define SPA_CTRL_REG_OFFSET 0x00000040 +#define SPA_AUX_INFO_REG_OFFSET 0x0000004C +#define SPA_STAT_POP_REG_OFFSET 0x00000050 +#define SPA_STATUS_REG_OFFSET 0x00000054 +#define SPA_KEY_SZ_REG_OFFSET 0x00000100 +#define SPA_CIPH_KEY_BASE_REG_OFFSET 0x00004000 +#define SPA_HASH_KEY_BASE_REG_OFFSET 0x00008000 +#define SPA_RC4_CTX_BASE_REG_OFFSET 0x00020000 + +#define SPA_IRQ_EN_REG_RESET 0x00000000 +#define SPA_IRQ_CTRL_REG_RESET 0x00000000 +#define SPA_FIFO_STAT_REG_RESET 0x00000000 +#define SPA_SDMA_BRST_SZ_REG_RESET 0x00000000 +#define SPA_SRC_PTR_REG_RESET 0x00000000 +#define SPA_DST_PTR_REG_RESET 0x00000000 +#define SPA_OFFSET_REG_RESET 0x00000000 +#define SPA_AAD_LEN_REG_RESET 0x00000000 +#define SPA_PROC_LEN_REG_RESET 0x00000000 +#define SPA_ICV_LEN_REG_RESET 0x00000000 +#define SPA_ICV_OFFSET_REG_RESET 0x00000000 +#define SPA_SW_CTRL_REG_RESET 0x00000000 +#define SPA_CTRL_REG_RESET 0x00000000 +#define SPA_AUX_INFO_REG_RESET 0x00000000 +#define SPA_STAT_POP_REG_RESET 0x00000000 +#define SPA_STATUS_REG_RESET 0x00000000 +#define SPA_KEY_SZ_REG_RESET 0x00000000 + +#define SPA_CTRL_HASH_ALG_IDX 4 +#define SPA_CTRL_CIPH_MODE_IDX 8 +#define SPA_CTRL_HASH_MODE_IDX 12 +#define SPA_CTRL_CTX_IDX 16 +#define SPA_CTRL_ENCRYPT_IDX 24 +#define SPA_CTRL_AAD_COPY 25 +#define SPA_CTRL_ICV_PT 26 +#define SPA_CTRL_ICV_ENC 27 +#define SPA_CTRL_ICV_APPEND 28 +#define SPA_CTRL_KEY_EXP 29 + +#define SPA_KEY_SZ_CXT_IDX 8 +#define SPA_KEY_SZ_CIPHER_IDX 31 + +#define SPA_IRQ_EN_CMD0_EN (1 << 0) +#define SPA_IRQ_EN_STAT_EN (1 << 4) +#define SPA_IRQ_EN_GLBL_EN (1 << 31) + +#define SPA_CTRL_CIPH_ALG_NULL 0x00 +#define SPA_CTRL_CIPH_ALG_DES 0x01 +#define SPA_CTRL_CIPH_ALG_AES 0x02 +#define SPA_CTRL_CIPH_ALG_RC4 0x03 +#define SPA_CTRL_CIPH_ALG_MULTI2 0x04 +#define SPA_CTRL_CIPH_ALG_KASUMI 0x05 + +#define SPA_CTRL_HASH_ALG_NULL (0x00 << SPA_CTRL_HASH_ALG_IDX) +#define SPA_CTRL_HASH_ALG_MD5 (0x01 << SPA_CTRL_HASH_ALG_IDX) +#define SPA_CTRL_HASH_ALG_SHA (0x02 << SPA_CTRL_HASH_ALG_IDX) +#define SPA_CTRL_HASH_ALG_SHA224 (0x03 << SPA_CTRL_HASH_ALG_IDX) +#define SPA_CTRL_HASH_ALG_SHA256 (0x04 << SPA_CTRL_HASH_ALG_IDX) +#define SPA_CTRL_HASH_ALG_SHA384 (0x05 << SPA_CTRL_HASH_ALG_IDX) +#define SPA_CTRL_HASH_ALG_SHA512 (0x06 << SPA_CTRL_HASH_ALG_IDX) +#define SPA_CTRL_HASH_ALG_AESMAC (0x07 << SPA_CTRL_HASH_ALG_IDX) +#define SPA_CTRL_HASH_ALG_AESCMAC (0x08 << SPA_CTRL_HASH_ALG_IDX) +#define SPA_CTRL_HASH_ALG_KASF9 (0x09 << SPA_CTRL_HASH_ALG_IDX) + +#define SPA_CTRL_CIPH_MODE_NULL (0x00 << SPA_CTRL_CIPH_MODE_IDX) +#define SPA_CTRL_CIPH_MODE_ECB (0x00 << SPA_CTRL_CIPH_MODE_IDX) +#define SPA_CTRL_CIPH_MODE_CBC (0x01 << SPA_CTRL_CIPH_MODE_IDX) +#define SPA_CTRL_CIPH_MODE_CTR (0x02 << SPA_CTRL_CIPH_MODE_IDX) +#define SPA_CTRL_CIPH_MODE_CCM (0x03 << SPA_CTRL_CIPH_MODE_IDX) +#define SPA_CTRL_CIPH_MODE_GCM (0x05 << SPA_CTRL_CIPH_MODE_IDX) +#define SPA_CTRL_CIPH_MODE_OFB (0x07 << SPA_CTRL_CIPH_MODE_IDX) +#define SPA_CTRL_CIPH_MODE_CFB (0x08 << SPA_CTRL_CIPH_MODE_IDX) +#define SPA_CTRL_CIPH_MODE_F8 (0x09 << SPA_CTRL_CIPH_MODE_IDX) + +#define SPA_CTRL_HASH_MODE_RAW (0x00 << SPA_CTRL_HASH_MODE_IDX) +#define SPA_CTRL_HASH_MODE_SSLMAC (0x01 << SPA_CTRL_HASH_MODE_IDX) +#define SPA_CTRL_HASH_MODE_HMAC (0x02 << SPA_CTRL_HASH_MODE_IDX) + +#define SPA_FIFO_STAT_EMPTY (1 << 31) +#define SPA_FIFO_CMD_FULL (1 << 7) + +#endif /* __PICOXCELL_CRYPTO_REGS_H__ */ diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig new file mode 100644 index 000000000..49bede2a9 --- /dev/null +++ b/drivers/crypto/qat/Kconfig @@ -0,0 +1,23 @@ +config CRYPTO_DEV_QAT + tristate + select CRYPTO_AEAD + select CRYPTO_AUTHENC + select CRYPTO_ALGAPI + select CRYPTO_AES + select CRYPTO_CBC + select CRYPTO_SHA1 + select CRYPTO_SHA256 + select CRYPTO_SHA512 + select FW_LOADER + +config CRYPTO_DEV_QAT_DH895xCC + tristate "Support for Intel(R) DH895xCC" + depends on X86 && PCI + default n + select CRYPTO_DEV_QAT + help + Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology + for accelerating crypto and compression workloads. + + To compile this as a module, choose M here: the module + will be called qat_dh895xcc. diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile new file mode 100644 index 000000000..d11481be2 --- /dev/null +++ b/drivers/crypto/qat/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/ +obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/ diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile new file mode 100644 index 000000000..e0424dc38 --- /dev/null +++ b/drivers/crypto/qat/qat_common/Makefile @@ -0,0 +1,14 @@ +obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o +intel_qat-objs := adf_cfg.o \ + adf_ctl_drv.o \ + adf_dev_mgr.o \ + adf_init.o \ + adf_accel_engine.o \ + adf_aer.o \ + adf_transport.o \ + qat_crypto.o \ + qat_algs.o \ + qat_uclo.o \ + qat_hal.o + +intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h new file mode 100644 index 000000000..f22ce7169 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h @@ -0,0 +1,205 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef ADF_ACCEL_DEVICES_H_ +#define ADF_ACCEL_DEVICES_H_ +#include <linux/module.h> +#include <linux/list.h> +#include <linux/proc_fs.h> +#include <linux/io.h> +#include "adf_cfg_common.h" + +#define ADF_DH895XCC_DEVICE_NAME "dh895xcc" +#define ADF_DH895XCC_PCI_DEVICE_ID 0x435 +#define ADF_PCI_MAX_BARS 3 +#define ADF_DEVICE_NAME_LENGTH 32 +#define ADF_ETR_MAX_RINGS_PER_BANK 16 +#define ADF_MAX_MSIX_VECTOR_NAME 16 +#define ADF_DEVICE_NAME_PREFIX "qat_" + +enum adf_accel_capabilities { + ADF_ACCEL_CAPABILITIES_NULL = 0, + ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC = 1, + ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC = 2, + ADF_ACCEL_CAPABILITIES_CIPHER = 4, + ADF_ACCEL_CAPABILITIES_AUTHENTICATION = 8, + ADF_ACCEL_CAPABILITIES_COMPRESSION = 32, + ADF_ACCEL_CAPABILITIES_LZS_COMPRESSION = 64, + ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128 +}; + +struct adf_bar { + resource_size_t base_addr; + void __iomem *virt_addr; + resource_size_t size; +} __packed; + +struct adf_accel_msix { + struct msix_entry *entries; + char **names; +} __packed; + +struct adf_accel_pci { + struct pci_dev *pci_dev; + struct adf_accel_msix msix_entries; + struct adf_bar pci_bars[ADF_PCI_MAX_BARS]; + uint8_t revid; + uint8_t sku; +} __packed; + +enum dev_state { + DEV_DOWN = 0, + DEV_UP +}; + +enum dev_sku_info { + DEV_SKU_1 = 0, + DEV_SKU_2, + DEV_SKU_3, + DEV_SKU_4, + DEV_SKU_UNKNOWN, +}; + +static inline const char *get_sku_info(enum dev_sku_info info) +{ + switch (info) { + case DEV_SKU_1: + return "SKU1"; + case DEV_SKU_2: + return "SKU2"; + case DEV_SKU_3: + return "SKU3"; + case DEV_SKU_4: + return "SKU4"; + case DEV_SKU_UNKNOWN: + default: + break; + } + return "Unknown SKU"; +} + +struct adf_hw_device_class { + const char *name; + const enum adf_device_type type; + uint32_t instances; +} __packed; + +struct adf_cfg_device_data; +struct adf_accel_dev; +struct adf_etr_data; +struct adf_etr_ring_data; + +struct adf_hw_device_data { + struct adf_hw_device_class *dev_class; + uint32_t (*get_accel_mask)(uint32_t fuse); + uint32_t (*get_ae_mask)(uint32_t fuse); + uint32_t (*get_misc_bar_id)(struct adf_hw_device_data *self); + uint32_t (*get_etr_bar_id)(struct adf_hw_device_data *self); + uint32_t (*get_num_aes)(struct adf_hw_device_data *self); + uint32_t (*get_num_accels)(struct adf_hw_device_data *self); + enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self); + void (*hw_arb_ring_enable)(struct adf_etr_ring_data *ring); + void (*hw_arb_ring_disable)(struct adf_etr_ring_data *ring); + int (*alloc_irq)(struct adf_accel_dev *accel_dev); + void (*free_irq)(struct adf_accel_dev *accel_dev); + void (*enable_error_correction)(struct adf_accel_dev *accel_dev); + int (*init_admin_comms)(struct adf_accel_dev *accel_dev); + void (*exit_admin_comms)(struct adf_accel_dev *accel_dev); + int (*init_arb)(struct adf_accel_dev *accel_dev); + void (*exit_arb)(struct adf_accel_dev *accel_dev); + void (*enable_ints)(struct adf_accel_dev *accel_dev); + const char *fw_name; + uint32_t pci_dev_id; + uint32_t fuses; + uint32_t accel_capabilities_mask; + uint16_t accel_mask; + uint16_t ae_mask; + uint16_t tx_rings_mask; + uint8_t tx_rx_gap; + uint8_t instance_id; + uint8_t num_banks; + uint8_t num_accel; + uint8_t num_logical_accel; + uint8_t num_engines; +} __packed; + +/* CSR write macro */ +#define ADF_CSR_WR(csr_base, csr_offset, val) \ + __raw_writel(val, csr_base + csr_offset) + +/* CSR read macro */ +#define ADF_CSR_RD(csr_base, csr_offset) __raw_readl(csr_base + csr_offset) + +#define GET_DEV(accel_dev) ((accel_dev)->accel_pci_dev.pci_dev->dev) +#define GET_BARS(accel_dev) ((accel_dev)->accel_pci_dev.pci_bars) +#define GET_HW_DATA(accel_dev) (accel_dev->hw_device) +#define GET_MAX_BANKS(accel_dev) (GET_HW_DATA(accel_dev)->num_banks) +#define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines) +#define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev + +struct adf_admin_comms; +struct icp_qat_fw_loader_handle; +struct adf_fw_loader_data { + struct icp_qat_fw_loader_handle *fw_loader; + const struct firmware *uof_fw; +}; + +struct adf_accel_dev { + struct adf_etr_data *transport; + struct adf_hw_device_data *hw_device; + struct adf_cfg_device_data *cfg; + struct adf_fw_loader_data *fw_loader; + struct adf_admin_comms *admin; + struct list_head crypto_list; + unsigned long status; + atomic_t ref_count; + struct dentry *debugfs_dir; + struct list_head list; + struct module *owner; + struct adf_accel_pci accel_pci_dev; + uint8_t accel_id; +} __packed; +#endif diff --git a/drivers/crypto/qat/qat_common/adf_accel_engine.c b/drivers/crypto/qat/qat_common/adf_accel_engine.c new file mode 100644 index 000000000..eee371037 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_accel_engine.c @@ -0,0 +1,177 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include <linux/firmware.h> +#include <linux/pci.h> +#include "adf_cfg.h" +#include "adf_accel_devices.h" +#include "adf_common_drv.h" +#include "icp_qat_uclo.h" + +int adf_ae_fw_load(struct adf_accel_dev *accel_dev) +{ + struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; + struct adf_hw_device_data *hw_device = accel_dev->hw_device; + void *uof_addr; + uint32_t uof_size; + + if (reject_firmware(&loader_data->uof_fw, hw_device->fw_name, + &accel_dev->accel_pci_dev.pci_dev->dev)) { + dev_err(&GET_DEV(accel_dev), "Failed to load firmware %s\n", + hw_device->fw_name); + return -EFAULT; + } + + uof_size = loader_data->uof_fw->size; + uof_addr = (void *)loader_data->uof_fw->data; + if (qat_uclo_map_uof_obj(loader_data->fw_loader, uof_addr, uof_size)) { + dev_err(&GET_DEV(accel_dev), "Failed to map UOF\n"); + goto out_err; + } + if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) { + dev_err(&GET_DEV(accel_dev), "Failed to map UOF\n"); + goto out_err; + } + return 0; + +out_err: + adf_ae_fw_release(accel_dev); + return -EFAULT; +} + +void adf_ae_fw_release(struct adf_accel_dev *accel_dev) +{ + struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; + + qat_uclo_del_uof_obj(loader_data->fw_loader); + qat_hal_deinit(loader_data->fw_loader); + + if (loader_data->uof_fw) + release_firmware(loader_data->uof_fw); + + loader_data->uof_fw = NULL; + loader_data->fw_loader = NULL; +} + +int adf_ae_start(struct adf_accel_dev *accel_dev) +{ + struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev); + + for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) { + if (hw_data->ae_mask & (1 << ae)) { + qat_hal_start(loader_data->fw_loader, ae, 0xFF); + ae_ctr++; + } + } + dev_info(&GET_DEV(accel_dev), + "qat_dev%d started %d acceleration engines\n", + accel_dev->accel_id, ae_ctr); + return 0; +} + +int adf_ae_stop(struct adf_accel_dev *accel_dev) +{ + struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev); + + for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) { + if (hw_data->ae_mask & (1 << ae)) { + qat_hal_stop(loader_data->fw_loader, ae, 0xFF); + ae_ctr++; + } + } + dev_info(&GET_DEV(accel_dev), + "qat_dev%d stopped %d acceleration engines\n", + accel_dev->accel_id, ae_ctr); + return 0; +} + +static int adf_ae_reset(struct adf_accel_dev *accel_dev, int ae) +{ + struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; + + qat_hal_reset(loader_data->fw_loader); + if (qat_hal_clr_reset(loader_data->fw_loader)) + return -EFAULT; + + return 0; +} + +int adf_ae_init(struct adf_accel_dev *accel_dev) +{ + struct adf_fw_loader_data *loader_data; + + loader_data = kzalloc(sizeof(*loader_data), GFP_KERNEL); + if (!loader_data) + return -ENOMEM; + + accel_dev->fw_loader = loader_data; + if (qat_hal_init(accel_dev)) { + dev_err(&GET_DEV(accel_dev), "Failed to init the AEs\n"); + kfree(loader_data); + return -EFAULT; + } + if (adf_ae_reset(accel_dev, 0)) { + dev_err(&GET_DEV(accel_dev), "Failed to reset the AEs\n"); + qat_hal_deinit(loader_data->fw_loader); + kfree(loader_data); + return -EFAULT; + } + return 0; +} + +int adf_ae_shutdown(struct adf_accel_dev *accel_dev) +{ + struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; + + qat_hal_deinit(loader_data->fw_loader); + kfree(accel_dev->fw_loader); + accel_dev->fw_loader = NULL; + return 0; +} diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c new file mode 100644 index 000000000..2dbc733b8 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_aer.c @@ -0,0 +1,250 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/aer.h> +#include <linux/completion.h> +#include <linux/workqueue.h> +#include <linux/delay.h> +#include "adf_accel_devices.h" +#include "adf_common_drv.h" + +static struct workqueue_struct *device_reset_wq; + +static pci_ers_result_t adf_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + dev_info(&pdev->dev, "Acceleration driver hardware error detected.\n"); + if (!accel_dev) { + dev_err(&pdev->dev, "Can't find acceleration device\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + if (state == pci_channel_io_perm_failure) { + dev_err(&pdev->dev, "Can't recover from device error\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + return PCI_ERS_RESULT_NEED_RESET; +} + +/* reset dev data */ +struct adf_reset_dev_data { + int mode; + struct adf_accel_dev *accel_dev; + struct completion compl; + struct work_struct reset_work; +}; + +static void adf_dev_restore(struct adf_accel_dev *accel_dev) +{ + struct pci_dev *pdev = accel_to_pci_dev(accel_dev); + struct pci_dev *parent = pdev->bus->self; + uint16_t bridge_ctl = 0; + + dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n", + accel_dev->accel_id); + + if (!pci_wait_for_pending_transaction(pdev)) + dev_info(&GET_DEV(accel_dev), + "Transaction still in progress. Proceeding\n"); + + pci_read_config_word(parent, PCI_BRIDGE_CONTROL, &bridge_ctl); + bridge_ctl |= PCI_BRIDGE_CTL_BUS_RESET; + pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl); + msleep(100); + bridge_ctl &= ~PCI_BRIDGE_CTL_BUS_RESET; + pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl); + msleep(100); + pci_restore_state(pdev); + pci_save_state(pdev); +} + +static void adf_device_reset_worker(struct work_struct *work) +{ + struct adf_reset_dev_data *reset_data = + container_of(work, struct adf_reset_dev_data, reset_work); + struct adf_accel_dev *accel_dev = reset_data->accel_dev; + + adf_dev_restarting_notify(accel_dev); + adf_dev_stop(accel_dev); + adf_dev_shutdown(accel_dev); + adf_dev_restore(accel_dev); + if (adf_dev_init(accel_dev) || adf_dev_start(accel_dev)) { + /* The device hanged and we can't restart it so stop here */ + dev_err(&GET_DEV(accel_dev), "Restart device failed\n"); + kfree(reset_data); + WARN(1, "QAT: device restart failed. Device is unusable\n"); + return; + } + adf_dev_restarted_notify(accel_dev); + clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status); + + /* The dev is back alive. Notify the caller if in sync mode */ + if (reset_data->mode == ADF_DEV_RESET_SYNC) + complete(&reset_data->compl); + else + kfree(reset_data); +} + +static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev, + enum adf_dev_reset_mode mode) +{ + struct adf_reset_dev_data *reset_data; + + if (!adf_dev_started(accel_dev) || + test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) + return 0; + + set_bit(ADF_STATUS_RESTARTING, &accel_dev->status); + reset_data = kzalloc(sizeof(*reset_data), GFP_ATOMIC); + if (!reset_data) + return -ENOMEM; + reset_data->accel_dev = accel_dev; + init_completion(&reset_data->compl); + reset_data->mode = mode; + INIT_WORK(&reset_data->reset_work, adf_device_reset_worker); + queue_work(device_reset_wq, &reset_data->reset_work); + + /* If in sync mode wait for the result */ + if (mode == ADF_DEV_RESET_SYNC) { + int ret = 0; + /* Maximum device reset time is 10 seconds */ + unsigned long wait_jiffies = msecs_to_jiffies(10000); + unsigned long timeout = wait_for_completion_timeout( + &reset_data->compl, wait_jiffies); + if (!timeout) { + dev_err(&GET_DEV(accel_dev), + "Reset device timeout expired\n"); + ret = -EFAULT; + } + kfree(reset_data); + return ret; + } + return 0; +} + +static pci_ers_result_t adf_slot_reset(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + if (!accel_dev) { + pr_err("QAT: Can't find acceleration device\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_cleanup_aer_uncorrect_error_status(pdev); + if (adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_SYNC)) + return PCI_ERS_RESULT_DISCONNECT; + + return PCI_ERS_RESULT_RECOVERED; +} + +static void adf_resume(struct pci_dev *pdev) +{ + dev_info(&pdev->dev, "Acceleration driver reset completed\n"); + dev_info(&pdev->dev, "Device is up and runnig\n"); +} + +static struct pci_error_handlers adf_err_handler = { + .error_detected = adf_error_detected, + .slot_reset = adf_slot_reset, + .resume = adf_resume, +}; + +/** + * adf_enable_aer() - Enable Advance Error Reporting for acceleration device + * @accel_dev: Pointer to acceleration device. + * @adf: PCI device driver owning the given acceleration device. + * + * Function enables PCI Advance Error Reporting for the + * QAT acceleration device accel_dev. + * To be used by QAT device specific drivers. + * + * Return: 0 on success, error code othewise. + */ +int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf) +{ + struct pci_dev *pdev = accel_to_pci_dev(accel_dev); + + adf->err_handler = &adf_err_handler; + pci_enable_pcie_error_reporting(pdev); + return 0; +} +EXPORT_SYMBOL_GPL(adf_enable_aer); + +/** + * adf_disable_aer() - Enable Advance Error Reporting for acceleration device + * @accel_dev: Pointer to acceleration device. + * + * Function disables PCI Advance Error Reporting for the + * QAT acceleration device accel_dev. + * To be used by QAT device specific drivers. + * + * Return: void + */ +void adf_disable_aer(struct adf_accel_dev *accel_dev) +{ + struct pci_dev *pdev = accel_to_pci_dev(accel_dev); + + pci_disable_pcie_error_reporting(pdev); +} +EXPORT_SYMBOL_GPL(adf_disable_aer); + +int adf_init_aer(void) +{ + device_reset_wq = create_workqueue("qat_device_reset_wq"); + return !device_reset_wq ? -EFAULT : 0; +} + +void adf_exit_aer(void) +{ + if (device_reset_wq) + destroy_workqueue(device_reset_wq); + device_reset_wq = NULL; +} diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c new file mode 100644 index 000000000..ab65bc274 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_cfg.c @@ -0,0 +1,364 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/list.h> +#include <linux/seq_file.h> +#include "adf_accel_devices.h" +#include "adf_cfg.h" +#include "adf_common_drv.h" + +static DEFINE_MUTEX(qat_cfg_read_lock); + +static void *qat_dev_cfg_start(struct seq_file *sfile, loff_t *pos) +{ + struct adf_cfg_device_data *dev_cfg = sfile->private; + + mutex_lock(&qat_cfg_read_lock); + return seq_list_start(&dev_cfg->sec_list, *pos); +} + +static int qat_dev_cfg_show(struct seq_file *sfile, void *v) +{ + struct list_head *list; + struct adf_cfg_section *sec = + list_entry(v, struct adf_cfg_section, list); + + seq_printf(sfile, "[%s]\n", sec->name); + list_for_each(list, &sec->param_head) { + struct adf_cfg_key_val *ptr = + list_entry(list, struct adf_cfg_key_val, list); + seq_printf(sfile, "%s = %s\n", ptr->key, ptr->val); + } + return 0; +} + +static void *qat_dev_cfg_next(struct seq_file *sfile, void *v, loff_t *pos) +{ + struct adf_cfg_device_data *dev_cfg = sfile->private; + + return seq_list_next(v, &dev_cfg->sec_list, pos); +} + +static void qat_dev_cfg_stop(struct seq_file *sfile, void *v) +{ + mutex_unlock(&qat_cfg_read_lock); +} + +static const struct seq_operations qat_dev_cfg_sops = { + .start = qat_dev_cfg_start, + .next = qat_dev_cfg_next, + .stop = qat_dev_cfg_stop, + .show = qat_dev_cfg_show +}; + +static int qat_dev_cfg_open(struct inode *inode, struct file *file) +{ + int ret = seq_open(file, &qat_dev_cfg_sops); + + if (!ret) { + struct seq_file *seq_f = file->private_data; + + seq_f->private = inode->i_private; + } + return ret; +} + +static const struct file_operations qat_dev_cfg_fops = { + .open = qat_dev_cfg_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release +}; + +/** + * adf_cfg_dev_add() - Create an acceleration device configuration table. + * @accel_dev: Pointer to acceleration device. + * + * Function creates a configuration table for the given acceleration device. + * The table stores device specific config values. + * To be used by QAT device specific drivers. + * + * Return: 0 on success, error code othewise. + */ +int adf_cfg_dev_add(struct adf_accel_dev *accel_dev) +{ + struct adf_cfg_device_data *dev_cfg_data; + + dev_cfg_data = kzalloc(sizeof(*dev_cfg_data), GFP_KERNEL); + if (!dev_cfg_data) + return -ENOMEM; + INIT_LIST_HEAD(&dev_cfg_data->sec_list); + init_rwsem(&dev_cfg_data->lock); + accel_dev->cfg = dev_cfg_data; + + /* accel_dev->debugfs_dir should always be non-NULL here */ + dev_cfg_data->debug = debugfs_create_file("dev_cfg", S_IRUSR, + accel_dev->debugfs_dir, + dev_cfg_data, + &qat_dev_cfg_fops); + if (!dev_cfg_data->debug) { + dev_err(&GET_DEV(accel_dev), + "Failed to create qat cfg debugfs entry.\n"); + kfree(dev_cfg_data); + accel_dev->cfg = NULL; + return -EFAULT; + } + return 0; +} +EXPORT_SYMBOL_GPL(adf_cfg_dev_add); + +static void adf_cfg_section_del_all(struct list_head *head); + +void adf_cfg_del_all(struct adf_accel_dev *accel_dev) +{ + struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg; + + down_write(&dev_cfg_data->lock); + adf_cfg_section_del_all(&dev_cfg_data->sec_list); + up_write(&dev_cfg_data->lock); + clear_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); +} + +/** + * adf_cfg_dev_remove() - Clears acceleration device configuration table. + * @accel_dev: Pointer to acceleration device. + * + * Function removes configuration table from the given acceleration device + * and frees all allocated memory. + * To be used by QAT device specific drivers. + * + * Return: void + */ +void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev) +{ + struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg; + + down_write(&dev_cfg_data->lock); + adf_cfg_section_del_all(&dev_cfg_data->sec_list); + up_write(&dev_cfg_data->lock); + debugfs_remove(dev_cfg_data->debug); + kfree(dev_cfg_data); + accel_dev->cfg = NULL; +} +EXPORT_SYMBOL_GPL(adf_cfg_dev_remove); + +static void adf_cfg_keyval_add(struct adf_cfg_key_val *new, + struct adf_cfg_section *sec) +{ + list_add_tail(&new->list, &sec->param_head); +} + +static void adf_cfg_keyval_del_all(struct list_head *head) +{ + struct list_head *list_ptr, *tmp; + + list_for_each_prev_safe(list_ptr, tmp, head) { + struct adf_cfg_key_val *ptr = + list_entry(list_ptr, struct adf_cfg_key_val, list); + list_del(list_ptr); + kfree(ptr); + } +} + +static void adf_cfg_section_del_all(struct list_head *head) +{ + struct adf_cfg_section *ptr; + struct list_head *list, *tmp; + + list_for_each_prev_safe(list, tmp, head) { + ptr = list_entry(list, struct adf_cfg_section, list); + adf_cfg_keyval_del_all(&ptr->param_head); + list_del(list); + kfree(ptr); + } +} + +static struct adf_cfg_key_val *adf_cfg_key_value_find(struct adf_cfg_section *s, + const char *key) +{ + struct list_head *list; + + list_for_each(list, &s->param_head) { + struct adf_cfg_key_val *ptr = + list_entry(list, struct adf_cfg_key_val, list); + if (!strcmp(ptr->key, key)) + return ptr; + } + return NULL; +} + +static struct adf_cfg_section *adf_cfg_sec_find(struct adf_accel_dev *accel_dev, + const char *sec_name) +{ + struct adf_cfg_device_data *cfg = accel_dev->cfg; + struct list_head *list; + + list_for_each(list, &cfg->sec_list) { + struct adf_cfg_section *ptr = + list_entry(list, struct adf_cfg_section, list); + if (!strcmp(ptr->name, sec_name)) + return ptr; + } + return NULL; +} + +static int adf_cfg_key_val_get(struct adf_accel_dev *accel_dev, + const char *sec_name, + const char *key_name, + char *val) +{ + struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, sec_name); + struct adf_cfg_key_val *keyval = NULL; + + if (sec) + keyval = adf_cfg_key_value_find(sec, key_name); + if (keyval) { + memcpy(val, keyval->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES); + return 0; + } + return -1; +} + +/** + * adf_cfg_add_key_value_param() - Add key-value config entry to config table. + * @accel_dev: Pointer to acceleration device. + * @section_name: Name of the section where the param will be added + * @key: The key string + * @val: Value pain for the given @key + * @type: Type - string, int or address + * + * Function adds configuration key - value entry in the appropriate section + * in the given acceleration device + * To be used by QAT device specific drivers. + * + * Return: 0 on success, error code othewise. + */ +int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev, + const char *section_name, + const char *key, const void *val, + enum adf_cfg_val_type type) +{ + struct adf_cfg_device_data *cfg = accel_dev->cfg; + struct adf_cfg_key_val *key_val; + struct adf_cfg_section *section = adf_cfg_sec_find(accel_dev, + section_name); + if (!section) + return -EFAULT; + + key_val = kzalloc(sizeof(*key_val), GFP_KERNEL); + if (!key_val) + return -ENOMEM; + + INIT_LIST_HEAD(&key_val->list); + strlcpy(key_val->key, key, sizeof(key_val->key)); + + if (type == ADF_DEC) { + snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES, + "%ld", (*((long *)val))); + } else if (type == ADF_STR) { + strlcpy(key_val->val, (char *)val, sizeof(key_val->val)); + } else if (type == ADF_HEX) { + snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES, + "0x%lx", (unsigned long)val); + } else { + dev_err(&GET_DEV(accel_dev), "Unknown type given.\n"); + kfree(key_val); + return -1; + } + key_val->type = type; + down_write(&cfg->lock); + adf_cfg_keyval_add(key_val, section); + up_write(&cfg->lock); + return 0; +} +EXPORT_SYMBOL_GPL(adf_cfg_add_key_value_param); + +/** + * adf_cfg_section_add() - Add config section entry to config table. + * @accel_dev: Pointer to acceleration device. + * @name: Name of the section + * + * Function adds configuration section where key - value entries + * will be stored. + * To be used by QAT device specific drivers. + * + * Return: 0 on success, error code othewise. + */ +int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name) +{ + struct adf_cfg_device_data *cfg = accel_dev->cfg; + struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, name); + + if (sec) + return 0; + + sec = kzalloc(sizeof(*sec), GFP_KERNEL); + if (!sec) + return -ENOMEM; + + strlcpy(sec->name, name, sizeof(sec->name)); + INIT_LIST_HEAD(&sec->param_head); + down_write(&cfg->lock); + list_add_tail(&sec->list, &cfg->sec_list); + up_write(&cfg->lock); + return 0; +} +EXPORT_SYMBOL_GPL(adf_cfg_section_add); + +int adf_cfg_get_param_value(struct adf_accel_dev *accel_dev, + const char *section, const char *name, + char *value) +{ + struct adf_cfg_device_data *cfg = accel_dev->cfg; + int ret; + + down_read(&cfg->lock); + ret = adf_cfg_key_val_get(accel_dev, section, name, value); + up_read(&cfg->lock); + return ret; +} diff --git a/drivers/crypto/qat/qat_common/adf_cfg.h b/drivers/crypto/qat/qat_common/adf_cfg.h new file mode 100644 index 000000000..6a9c6f6b5 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_cfg.h @@ -0,0 +1,87 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef ADF_CFG_H_ +#define ADF_CFG_H_ + +#include <linux/list.h> +#include <linux/rwsem.h> +#include <linux/debugfs.h> +#include "adf_accel_devices.h" +#include "adf_cfg_common.h" +#include "adf_cfg_strings.h" + +struct adf_cfg_key_val { + char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; + char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; + enum adf_cfg_val_type type; + struct list_head list; +}; + +struct adf_cfg_section { + char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES]; + struct list_head list; + struct list_head param_head; +}; + +struct adf_cfg_device_data { + struct list_head sec_list; + struct dentry *debug; + struct rw_semaphore lock; +}; + +int adf_cfg_dev_add(struct adf_accel_dev *accel_dev); +void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev); +int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name); +void adf_cfg_del_all(struct adf_accel_dev *accel_dev); +int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev, + const char *section_name, + const char *key, const void *val, + enum adf_cfg_val_type type); +int adf_cfg_get_param_value(struct adf_accel_dev *accel_dev, + const char *section, const char *name, char *value); + +#endif diff --git a/drivers/crypto/qat/qat_common/adf_cfg_common.h b/drivers/crypto/qat/qat_common/adf_cfg_common.h new file mode 100644 index 000000000..88b82187a --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_cfg_common.h @@ -0,0 +1,100 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef ADF_CFG_COMMON_H_ +#define ADF_CFG_COMMON_H_ + +#include <linux/types.h> +#include <linux/ioctl.h> + +#define ADF_CFG_MAX_STR_LEN 64 +#define ADF_CFG_MAX_KEY_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN +#define ADF_CFG_MAX_VAL_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN +#define ADF_CFG_MAX_SECTION_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN +#define ADF_CFG_BASE_DEC 10 +#define ADF_CFG_BASE_HEX 16 +#define ADF_CFG_ALL_DEVICES 0xFE +#define ADF_CFG_NO_DEVICE 0xFF +#define ADF_CFG_AFFINITY_WHATEVER 0xFF +#define MAX_DEVICE_NAME_SIZE 32 +#define ADF_MAX_DEVICES 32 + +enum adf_cfg_val_type { + ADF_DEC, + ADF_HEX, + ADF_STR +}; + +enum adf_device_type { + DEV_UNKNOWN = 0, + DEV_DH895XCC, +}; + +struct adf_dev_status_info { + enum adf_device_type type; + uint8_t accel_id; + uint8_t instance_id; + uint8_t num_ae; + uint8_t num_accel; + uint8_t num_logical_accel; + uint8_t banks_per_accel; + uint8_t state; + uint8_t bus; + uint8_t dev; + uint8_t fun; + char name[MAX_DEVICE_NAME_SIZE]; +}; + +#define ADF_CTL_IOC_MAGIC 'a' +#define IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS _IOW(ADF_CTL_IOC_MAGIC, 0, \ + struct adf_user_cfg_ctl_data) +#define IOCTL_STOP_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 1, \ + struct adf_user_cfg_ctl_data) +#define IOCTL_START_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 2, \ + struct adf_user_cfg_ctl_data) +#define IOCTL_STATUS_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 3, uint32_t) +#define IOCTL_GET_NUM_DEVICES _IOW(ADF_CTL_IOC_MAGIC, 4, int32_t) +#endif diff --git a/drivers/crypto/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/qat/qat_common/adf_cfg_strings.h new file mode 100644 index 000000000..135751113 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_cfg_strings.h @@ -0,0 +1,83 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef ADF_CFG_STRINGS_H_ +#define ADF_CFG_STRINGS_H_ + +#define ADF_GENERAL_SEC "GENERAL" +#define ADF_KERNEL_SEC "KERNEL" +#define ADF_ACCEL_SEC "Accelerator" +#define ADF_NUM_CY "NumberCyInstances" +#define ADF_NUM_DC "NumberDcInstances" +#define ADF_RING_SYM_SIZE "NumConcurrentSymRequests" +#define ADF_RING_ASYM_SIZE "NumConcurrentAsymRequests" +#define ADF_RING_DC_SIZE "NumConcurrentRequests" +#define ADF_RING_ASYM_TX "RingAsymTx" +#define ADF_RING_SYM_TX "RingSymTx" +#define ADF_RING_RND_TX "RingNrbgTx" +#define ADF_RING_ASYM_RX "RingAsymRx" +#define ADF_RING_SYM_RX "RingSymRx" +#define ADF_RING_RND_RX "RingNrbgRx" +#define ADF_RING_DC_TX "RingTx" +#define ADF_RING_DC_RX "RingRx" +#define ADF_ETRMGR_BANK "Bank" +#define ADF_RING_BANK_NUM "BankNumber" +#define ADF_CY "Cy" +#define ADF_DC "Dc" +#define ADF_ETRMGR_COALESCING_ENABLED "InterruptCoalescingEnabled" +#define ADF_ETRMGR_COALESCING_ENABLED_FORMAT \ + ADF_ETRMGR_BANK "%d" ADF_ETRMGR_COALESCING_ENABLED +#define ADF_ETRMGR_COALESCE_TIMER "InterruptCoalescingTimerNs" +#define ADF_ETRMGR_COALESCE_TIMER_FORMAT \ + ADF_ETRMGR_BANK "%d" ADF_ETRMGR_COALESCE_TIMER +#define ADF_ETRMGR_COALESCING_MSG_ENABLED "InterruptCoalescingNumResponses" +#define ADF_ETRMGR_COALESCING_MSG_ENABLED_FORMAT \ + ADF_ETRMGR_BANK "%d" ADF_ETRMGR_COALESCING_MSG_ENABLED +#define ADF_ETRMGR_CORE_AFFINITY "CoreAffinity" +#define ADF_ETRMGR_CORE_AFFINITY_FORMAT \ + ADF_ETRMGR_BANK "%d" ADF_ETRMGR_CORE_AFFINITY +#define ADF_ACCEL_STR "Accelerator%d" +#endif diff --git a/drivers/crypto/qat/qat_common/adf_cfg_user.h b/drivers/crypto/qat/qat_common/adf_cfg_user.h new file mode 100644 index 000000000..0c38a155a --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_cfg_user.h @@ -0,0 +1,94 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef ADF_CFG_USER_H_ +#define ADF_CFG_USER_H_ + +#include "adf_cfg_common.h" +#include "adf_cfg_strings.h" + +struct adf_user_cfg_key_val { + char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; + char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; + union { + char *user_val_ptr; + uint64_t padding1; + }; + union { + struct adf_user_cfg_key_val *prev; + uint64_t padding2; + }; + union { + struct adf_user_cfg_key_val *next; + uint64_t padding3; + }; + enum adf_cfg_val_type type; +}; + +struct adf_user_cfg_section { + char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES]; + union { + struct adf_user_cfg_key_val *params; + uint64_t padding1; + }; + union { + struct adf_user_cfg_section *prev; + uint64_t padding2; + }; + union { + struct adf_user_cfg_section *next; + uint64_t padding3; + }; +}; + +struct adf_user_cfg_ctl_data { + union { + struct adf_user_cfg_section *config_section; + uint64_t padding; + }; + uint8_t device_id; +}; +#endif diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h new file mode 100644 index 000000000..0666ee6a3 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h @@ -0,0 +1,192 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef ADF_DRV_H +#define ADF_DRV_H + +#include <linux/list.h> +#include <linux/pci.h> +#include "adf_accel_devices.h" +#include "icp_qat_fw_loader_handle.h" +#include "icp_qat_hal.h" + +#define ADF_STATUS_RESTARTING 0 +#define ADF_STATUS_STARTING 1 +#define ADF_STATUS_CONFIGURED 2 +#define ADF_STATUS_STARTED 3 +#define ADF_STATUS_AE_INITIALISED 4 +#define ADF_STATUS_AE_UCODE_LOADED 5 +#define ADF_STATUS_AE_STARTED 6 +#define ADF_STATUS_ORPHAN_TH_RUNNING 7 +#define ADF_STATUS_IRQ_ALLOCATED 8 + +enum adf_dev_reset_mode { + ADF_DEV_RESET_ASYNC = 0, + ADF_DEV_RESET_SYNC +}; + +enum adf_event { + ADF_EVENT_INIT = 0, + ADF_EVENT_START, + ADF_EVENT_STOP, + ADF_EVENT_SHUTDOWN, + ADF_EVENT_RESTARTING, + ADF_EVENT_RESTARTED, +}; + +struct service_hndl { + int (*event_hld)(struct adf_accel_dev *accel_dev, + enum adf_event event); + unsigned long init_status; + unsigned long start_status; + char *name; + struct list_head list; + int admin; +}; + +int adf_service_register(struct service_hndl *service); +int adf_service_unregister(struct service_hndl *service); + +int adf_dev_init(struct adf_accel_dev *accel_dev); +int adf_dev_start(struct adf_accel_dev *accel_dev); +int adf_dev_stop(struct adf_accel_dev *accel_dev); +void adf_dev_shutdown(struct adf_accel_dev *accel_dev); + +int adf_ctl_dev_register(void); +void adf_ctl_dev_unregister(void); +int adf_processes_dev_register(void); +void adf_processes_dev_unregister(void); + +int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev); +void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev); +struct list_head *adf_devmgr_get_head(void); +struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id); +struct adf_accel_dev *adf_devmgr_get_first(void); +struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev); +int adf_devmgr_verify_id(uint32_t id); +void adf_devmgr_get_num_dev(uint32_t *num); +int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev); +int adf_dev_started(struct adf_accel_dev *accel_dev); +int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev); +int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev); +int adf_ae_init(struct adf_accel_dev *accel_dev); +int adf_ae_shutdown(struct adf_accel_dev *accel_dev); +int adf_ae_fw_load(struct adf_accel_dev *accel_dev); +void adf_ae_fw_release(struct adf_accel_dev *accel_dev); +int adf_ae_start(struct adf_accel_dev *accel_dev); +int adf_ae_stop(struct adf_accel_dev *accel_dev); + +int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf); +void adf_disable_aer(struct adf_accel_dev *accel_dev); +int adf_init_aer(void); +void adf_exit_aer(void); + +int adf_dev_get(struct adf_accel_dev *accel_dev); +void adf_dev_put(struct adf_accel_dev *accel_dev); +int adf_dev_in_use(struct adf_accel_dev *accel_dev); +int adf_init_etr_data(struct adf_accel_dev *accel_dev); +void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev); +int qat_crypto_register(void); +int qat_crypto_unregister(void); +struct qat_crypto_instance *qat_crypto_get_instance_node(int node); +void qat_crypto_put_instance(struct qat_crypto_instance *inst); +void qat_alg_callback(void *resp); +int qat_algs_init(void); +void qat_algs_exit(void); +int qat_algs_register(void); +int qat_algs_unregister(void); + +int qat_hal_init(struct adf_accel_dev *accel_dev); +void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle); +void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae, + unsigned int ctx_mask); +void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae, + unsigned int ctx_mask); +void qat_hal_reset(struct icp_qat_fw_loader_handle *handle); +int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle); +void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned int ctx_mask); +int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, enum icp_qat_uof_regtype lm_type, + unsigned char mode); +int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char mode); +int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char mode); +void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned int ctx_mask, unsigned int upc); +void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned int uaddr, + unsigned int words_num, uint64_t *uword); +void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle, unsigned char ae, + unsigned int uword_addr, unsigned int words_num, + unsigned int *data); +int qat_hal_get_ins_num(void); +int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, + struct icp_qat_uof_batch_init *lm_init_header); +int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char ctx_mask, + enum icp_qat_uof_regtype reg_type, + unsigned short reg_num, unsigned int regdata); +int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char ctx_mask, + enum icp_qat_uof_regtype reg_type, + unsigned short reg_num, unsigned int regdata); +int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char ctx_mask, + enum icp_qat_uof_regtype reg_type, + unsigned short reg_num, unsigned int regdata); +int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char ctx_mask, + unsigned short reg_num, unsigned int regdata); +int qat_hal_wr_lm(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned short lm_addr, unsigned int value); +int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle); +void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle); +int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, + void *addr_ptr, int mem_size); +#endif diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c new file mode 100644 index 000000000..cb5f066e9 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c @@ -0,0 +1,506 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/fs.h> +#include <linux/bitops.h> +#include <linux/pci.h> +#include <linux/cdev.h> +#include <linux/uaccess.h> +#include <linux/crypto.h> + +#include "adf_accel_devices.h" +#include "adf_common_drv.h" +#include "adf_cfg.h" +#include "adf_cfg_common.h" +#include "adf_cfg_user.h" + +#define DEVICE_NAME "qat_adf_ctl" + +static DEFINE_MUTEX(adf_ctl_lock); +static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg); + +static const struct file_operations adf_ctl_ops = { + .owner = THIS_MODULE, + .unlocked_ioctl = adf_ctl_ioctl, + .compat_ioctl = adf_ctl_ioctl, +}; + +struct adf_ctl_drv_info { + unsigned int major; + struct cdev drv_cdev; + struct class *drv_class; +}; + +static struct adf_ctl_drv_info adf_ctl_drv; + +static void adf_chr_drv_destroy(void) +{ + device_destroy(adf_ctl_drv.drv_class, MKDEV(adf_ctl_drv.major, 0)); + cdev_del(&adf_ctl_drv.drv_cdev); + class_destroy(adf_ctl_drv.drv_class); + unregister_chrdev_region(MKDEV(adf_ctl_drv.major, 0), 1); +} + +static int adf_chr_drv_create(void) +{ + dev_t dev_id; + struct device *drv_device; + + if (alloc_chrdev_region(&dev_id, 0, 1, DEVICE_NAME)) { + pr_err("QAT: unable to allocate chrdev region\n"); + return -EFAULT; + } + + adf_ctl_drv.drv_class = class_create(THIS_MODULE, DEVICE_NAME); + if (IS_ERR(adf_ctl_drv.drv_class)) { + pr_err("QAT: class_create failed for adf_ctl\n"); + goto err_chrdev_unreg; + } + adf_ctl_drv.major = MAJOR(dev_id); + cdev_init(&adf_ctl_drv.drv_cdev, &adf_ctl_ops); + if (cdev_add(&adf_ctl_drv.drv_cdev, dev_id, 1)) { + pr_err("QAT: cdev add failed\n"); + goto err_class_destr; + } + + drv_device = device_create(adf_ctl_drv.drv_class, NULL, + MKDEV(adf_ctl_drv.major, 0), + NULL, DEVICE_NAME); + if (IS_ERR(drv_device)) { + pr_err("QAT: failed to create device\n"); + goto err_cdev_del; + } + return 0; +err_cdev_del: + cdev_del(&adf_ctl_drv.drv_cdev); +err_class_destr: + class_destroy(adf_ctl_drv.drv_class); +err_chrdev_unreg: + unregister_chrdev_region(dev_id, 1); + return -EFAULT; +} + +static int adf_ctl_alloc_resources(struct adf_user_cfg_ctl_data **ctl_data, + unsigned long arg) +{ + struct adf_user_cfg_ctl_data *cfg_data; + + cfg_data = kzalloc(sizeof(*cfg_data), GFP_KERNEL); + if (!cfg_data) + return -ENOMEM; + + /* Initialize device id to NO DEVICE as 0 is a valid device id */ + cfg_data->device_id = ADF_CFG_NO_DEVICE; + + if (copy_from_user(cfg_data, (void __user *)arg, sizeof(*cfg_data))) { + pr_err("QAT: failed to copy from user cfg_data.\n"); + kfree(cfg_data); + return -EIO; + } + + *ctl_data = cfg_data; + return 0; +} + +static int adf_add_key_value_data(struct adf_accel_dev *accel_dev, + const char *section, + const struct adf_user_cfg_key_val *key_val) +{ + if (key_val->type == ADF_HEX) { + long *ptr = (long *)key_val->val; + long val = *ptr; + + if (adf_cfg_add_key_value_param(accel_dev, section, + key_val->key, (void *)val, + key_val->type)) { + dev_err(&GET_DEV(accel_dev), + "failed to add hex keyvalue.\n"); + return -EFAULT; + } + } else { + if (adf_cfg_add_key_value_param(accel_dev, section, + key_val->key, key_val->val, + key_val->type)) { + dev_err(&GET_DEV(accel_dev), + "failed to add keyvalue.\n"); + return -EFAULT; + } + } + return 0; +} + +static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev, + struct adf_user_cfg_ctl_data *ctl_data) +{ + struct adf_user_cfg_key_val key_val; + struct adf_user_cfg_key_val *params_head; + struct adf_user_cfg_section section, *section_head; + + section_head = ctl_data->config_section; + + while (section_head) { + if (copy_from_user(§ion, (void __user *)section_head, + sizeof(*section_head))) { + dev_err(&GET_DEV(accel_dev), + "failed to copy section info\n"); + goto out_err; + } + + if (adf_cfg_section_add(accel_dev, section.name)) { + dev_err(&GET_DEV(accel_dev), + "failed to add section.\n"); + goto out_err; + } + + params_head = section_head->params; + + while (params_head) { + if (copy_from_user(&key_val, (void __user *)params_head, + sizeof(key_val))) { + dev_err(&GET_DEV(accel_dev), + "Failed to copy keyvalue.\n"); + goto out_err; + } + if (adf_add_key_value_data(accel_dev, section.name, + &key_val)) { + goto out_err; + } + params_head = key_val.next; + } + section_head = section.next; + } + return 0; +out_err: + adf_cfg_del_all(accel_dev); + return -EFAULT; +} + +static int adf_ctl_ioctl_dev_config(struct file *fp, unsigned int cmd, + unsigned long arg) +{ + int ret; + struct adf_user_cfg_ctl_data *ctl_data; + struct adf_accel_dev *accel_dev; + + ret = adf_ctl_alloc_resources(&ctl_data, arg); + if (ret) + return ret; + + accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id); + if (!accel_dev) { + ret = -EFAULT; + goto out; + } + + if (adf_dev_started(accel_dev)) { + ret = -EFAULT; + goto out; + } + + if (adf_copy_key_value_data(accel_dev, ctl_data)) { + ret = -EFAULT; + goto out; + } + set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); +out: + kfree(ctl_data); + return ret; +} + +static int adf_ctl_is_device_in_use(int id) +{ + struct list_head *itr, *head = adf_devmgr_get_head(); + + list_for_each(itr, head) { + struct adf_accel_dev *dev = + list_entry(itr, struct adf_accel_dev, list); + + if (id == dev->accel_id || id == ADF_CFG_ALL_DEVICES) { + if (adf_devmgr_in_reset(dev) || adf_dev_in_use(dev)) { + dev_info(&GET_DEV(dev), + "device qat_dev%d is busy\n", + dev->accel_id); + return -EBUSY; + } + } + } + return 0; +} + +static int adf_ctl_stop_devices(uint32_t id) +{ + struct list_head *itr, *head = adf_devmgr_get_head(); + int ret = 0; + + list_for_each(itr, head) { + struct adf_accel_dev *accel_dev = + list_entry(itr, struct adf_accel_dev, list); + if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) { + if (!adf_dev_started(accel_dev)) + continue; + + if (adf_dev_stop(accel_dev)) { + dev_err(&GET_DEV(accel_dev), + "Failed to stop qat_dev%d\n", id); + ret = -EFAULT; + } else { + adf_dev_shutdown(accel_dev); + } + } + } + return ret; +} + +static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd, + unsigned long arg) +{ + int ret; + struct adf_user_cfg_ctl_data *ctl_data; + + ret = adf_ctl_alloc_resources(&ctl_data, arg); + if (ret) + return ret; + + if (adf_devmgr_verify_id(ctl_data->device_id)) { + pr_err("QAT: Device %d not found\n", ctl_data->device_id); + ret = -ENODEV; + goto out; + } + + ret = adf_ctl_is_device_in_use(ctl_data->device_id); + if (ret) + goto out; + + if (ctl_data->device_id == ADF_CFG_ALL_DEVICES) + pr_info("QAT: Stopping all acceleration devices.\n"); + else + pr_info("QAT: Stopping acceleration device qat_dev%d.\n", + ctl_data->device_id); + + ret = adf_ctl_stop_devices(ctl_data->device_id); + if (ret) + pr_err("QAT: failed to stop device.\n"); +out: + kfree(ctl_data); + return ret; +} + +static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd, + unsigned long arg) +{ + int ret; + struct adf_user_cfg_ctl_data *ctl_data; + struct adf_accel_dev *accel_dev; + + ret = adf_ctl_alloc_resources(&ctl_data, arg); + if (ret) + return ret; + + accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id); + if (!accel_dev) { + pr_err("QAT: Device %d not found\n", ctl_data->device_id); + ret = -ENODEV; + goto out; + } + + if (!adf_dev_started(accel_dev)) { + dev_info(&GET_DEV(accel_dev), + "Starting acceleration device qat_dev%d.\n", + ctl_data->device_id); + ret = adf_dev_init(accel_dev); + if (!ret) + ret = adf_dev_start(accel_dev); + } else { + dev_info(&GET_DEV(accel_dev), + "Acceleration device qat_dev%d already started.\n", + ctl_data->device_id); + } + if (ret) { + dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n", + ctl_data->device_id); + adf_dev_stop(accel_dev); + adf_dev_shutdown(accel_dev); + } +out: + kfree(ctl_data); + return ret; +} + +static int adf_ctl_ioctl_get_num_devices(struct file *fp, unsigned int cmd, + unsigned long arg) +{ + uint32_t num_devices = 0; + + adf_devmgr_get_num_dev(&num_devices); + if (copy_to_user((void __user *)arg, &num_devices, sizeof(num_devices))) + return -EFAULT; + + return 0; +} + +static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd, + unsigned long arg) +{ + struct adf_hw_device_data *hw_data; + struct adf_dev_status_info dev_info; + struct adf_accel_dev *accel_dev; + + if (copy_from_user(&dev_info, (void __user *)arg, + sizeof(struct adf_dev_status_info))) { + pr_err("QAT: failed to copy from user.\n"); + return -EFAULT; + } + + accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id); + if (!accel_dev) { + pr_err("QAT: Device %d not found\n", dev_info.accel_id); + return -ENODEV; + } + hw_data = accel_dev->hw_device; + dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN; + dev_info.num_ae = hw_data->get_num_aes(hw_data); + dev_info.num_accel = hw_data->get_num_accels(hw_data); + dev_info.num_logical_accel = hw_data->num_logical_accel; + dev_info.banks_per_accel = hw_data->num_banks + / hw_data->num_logical_accel; + strlcpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name)); + dev_info.instance_id = hw_data->instance_id; + dev_info.type = hw_data->dev_class->type; + dev_info.bus = accel_to_pci_dev(accel_dev)->bus->number; + dev_info.dev = PCI_SLOT(accel_to_pci_dev(accel_dev)->devfn); + dev_info.fun = PCI_FUNC(accel_to_pci_dev(accel_dev)->devfn); + + if (copy_to_user((void __user *)arg, &dev_info, + sizeof(struct adf_dev_status_info))) { + dev_err(&GET_DEV(accel_dev), "failed to copy status.\n"); + return -EFAULT; + } + return 0; +} + +static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) +{ + int ret; + + if (mutex_lock_interruptible(&adf_ctl_lock)) + return -EFAULT; + + switch (cmd) { + case IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS: + ret = adf_ctl_ioctl_dev_config(fp, cmd, arg); + break; + + case IOCTL_STOP_ACCEL_DEV: + ret = adf_ctl_ioctl_dev_stop(fp, cmd, arg); + break; + + case IOCTL_START_ACCEL_DEV: + ret = adf_ctl_ioctl_dev_start(fp, cmd, arg); + break; + + case IOCTL_GET_NUM_DEVICES: + ret = adf_ctl_ioctl_get_num_devices(fp, cmd, arg); + break; + + case IOCTL_STATUS_ACCEL_DEV: + ret = adf_ctl_ioctl_get_status(fp, cmd, arg); + break; + default: + pr_err("QAT: Invalid ioctl\n"); + ret = -EFAULT; + break; + } + mutex_unlock(&adf_ctl_lock); + return ret; +} + +static int __init adf_register_ctl_device_driver(void) +{ + mutex_init(&adf_ctl_lock); + + if (qat_algs_init()) + goto err_algs_init; + + if (adf_chr_drv_create()) + goto err_chr_dev; + + if (adf_init_aer()) + goto err_aer; + + if (qat_crypto_register()) + goto err_crypto_register; + + return 0; + +err_crypto_register: + adf_exit_aer(); +err_aer: + adf_chr_drv_destroy(); +err_chr_dev: + qat_algs_exit(); +err_algs_init: + mutex_destroy(&adf_ctl_lock); + return -EFAULT; +} + +static void __exit adf_unregister_ctl_device_driver(void) +{ + adf_chr_drv_destroy(); + adf_exit_aer(); + qat_crypto_unregister(); + qat_algs_exit(); + mutex_destroy(&adf_ctl_lock); +} + +module_init(adf_register_ctl_device_driver); +module_exit(adf_unregister_ctl_device_driver); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Intel"); +MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); +MODULE_ALIAS_CRYPTO("intel_qat"); diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c new file mode 100644 index 000000000..3f0ff9e7d --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_dev_mgr.c @@ -0,0 +1,220 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include <linux/mutex.h> +#include <linux/list.h> +#include "adf_cfg.h" +#include "adf_common_drv.h" + +static LIST_HEAD(accel_table); +static DEFINE_MUTEX(table_lock); +static uint32_t num_devices; + +/** + * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework + * @accel_dev: Pointer to acceleration device. + * + * Function adds acceleration device to the acceleration framework. + * To be used by QAT device specific drivers. + * + * Return: 0 on success, error code othewise. + */ +int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev) +{ + struct list_head *itr; + + if (num_devices == ADF_MAX_DEVICES) { + dev_err(&GET_DEV(accel_dev), "Only support up to %d devices\n", + ADF_MAX_DEVICES); + return -EFAULT; + } + + mutex_lock(&table_lock); + list_for_each(itr, &accel_table) { + struct adf_accel_dev *ptr = + list_entry(itr, struct adf_accel_dev, list); + + if (ptr == accel_dev) { + mutex_unlock(&table_lock); + return -EEXIST; + } + } + atomic_set(&accel_dev->ref_count, 0); + list_add_tail(&accel_dev->list, &accel_table); + accel_dev->accel_id = num_devices++; + mutex_unlock(&table_lock); + return 0; +} +EXPORT_SYMBOL_GPL(adf_devmgr_add_dev); + +struct list_head *adf_devmgr_get_head(void) +{ + return &accel_table; +} + +/** + * adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework. + * @accel_dev: Pointer to acceleration device. + * + * Function removes acceleration device from the acceleration framework. + * To be used by QAT device specific drivers. + * + * Return: void + */ +void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev) +{ + mutex_lock(&table_lock); + list_del(&accel_dev->list); + num_devices--; + mutex_unlock(&table_lock); +} +EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev); + +struct adf_accel_dev *adf_devmgr_get_first(void) +{ + struct adf_accel_dev *dev = NULL; + + if (!list_empty(&accel_table)) + dev = list_first_entry(&accel_table, struct adf_accel_dev, + list); + return dev; +} + +/** + * adf_devmgr_pci_to_accel_dev() - Get accel_dev associated with the pci_dev. + * @accel_dev: Pointer to pci device. + * + * Function returns acceleration device associated with the given pci device. + * To be used by QAT device specific drivers. + * + * Return: pointer to accel_dev or NULL if not found. + */ +struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev) +{ + struct list_head *itr; + + mutex_lock(&table_lock); + list_for_each(itr, &accel_table) { + struct adf_accel_dev *ptr = + list_entry(itr, struct adf_accel_dev, list); + + if (ptr->accel_pci_dev.pci_dev == pci_dev) { + mutex_unlock(&table_lock); + return ptr; + } + } + mutex_unlock(&table_lock); + return NULL; +} +EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev); + +struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id) +{ + struct list_head *itr; + + mutex_lock(&table_lock); + list_for_each(itr, &accel_table) { + struct adf_accel_dev *ptr = + list_entry(itr, struct adf_accel_dev, list); + + if (ptr->accel_id == id) { + mutex_unlock(&table_lock); + return ptr; + } + } + mutex_unlock(&table_lock); + return NULL; +} + +int adf_devmgr_verify_id(uint32_t id) +{ + if (id == ADF_CFG_ALL_DEVICES) + return 0; + + if (adf_devmgr_get_dev_by_id(id)) + return 0; + + return -ENODEV; +} + +void adf_devmgr_get_num_dev(uint32_t *num) +{ + struct list_head *itr; + + *num = 0; + list_for_each(itr, &accel_table) { + (*num)++; + } +} + +int adf_dev_in_use(struct adf_accel_dev *accel_dev) +{ + return atomic_read(&accel_dev->ref_count) != 0; +} + +int adf_dev_get(struct adf_accel_dev *accel_dev) +{ + if (atomic_add_return(1, &accel_dev->ref_count) == 1) + if (!try_module_get(accel_dev->owner)) + return -EFAULT; + return 0; +} + +void adf_dev_put(struct adf_accel_dev *accel_dev) +{ + if (atomic_sub_return(1, &accel_dev->ref_count) == 0) + module_put(accel_dev->owner); +} + +int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev) +{ + return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status); +} + +int adf_dev_started(struct adf_accel_dev *accel_dev) +{ + return test_bit(ADF_STATUS_STARTED, &accel_dev->status); +} diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c new file mode 100644 index 000000000..245f43237 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_init.c @@ -0,0 +1,470 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include <linux/mutex.h> +#include <linux/list.h> +#include <linux/bitops.h> +#include <linux/delay.h> +#include "adf_accel_devices.h" +#include "adf_cfg.h" +#include "adf_common_drv.h" + +static LIST_HEAD(service_table); +static DEFINE_MUTEX(service_lock); + +static void adf_service_add(struct service_hndl *service) +{ + mutex_lock(&service_lock); + list_add(&service->list, &service_table); + mutex_unlock(&service_lock); +} + +/** + * adf_service_register() - Register acceleration service in the accel framework + * @service: Pointer to the service + * + * Function adds the acceleration service to the acceleration framework. + * To be used by QAT device specific drivers. + * + * Return: 0 on success, error code othewise. + */ +int adf_service_register(struct service_hndl *service) +{ + service->init_status = 0; + service->start_status = 0; + adf_service_add(service); + return 0; +} +EXPORT_SYMBOL_GPL(adf_service_register); + +static void adf_service_remove(struct service_hndl *service) +{ + mutex_lock(&service_lock); + list_del(&service->list); + mutex_unlock(&service_lock); +} + +/** + * adf_service_unregister() - Unregister acceleration service from the framework + * @service: Pointer to the service + * + * Function remove the acceleration service from the acceleration framework. + * To be used by QAT device specific drivers. + * + * Return: 0 on success, error code othewise. + */ +int adf_service_unregister(struct service_hndl *service) +{ + if (service->init_status || service->start_status) { + pr_err("QAT: Could not remove active service\n"); + return -EFAULT; + } + adf_service_remove(service); + return 0; +} +EXPORT_SYMBOL_GPL(adf_service_unregister); + +/** + * adf_dev_init() - Init data structures and services for the given accel device + * @accel_dev: Pointer to acceleration device. + * + * Initialize the ring data structures and the admin comms and arbitration + * services. + * + * Return: 0 on success, error code othewise. + */ +int adf_dev_init(struct adf_accel_dev *accel_dev) +{ + struct service_hndl *service; + struct list_head *list_itr; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + + if (!hw_data) { + dev_err(&GET_DEV(accel_dev), + "Failed to init device - hw_data not set\n"); + return -EFAULT; + } + + if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status)) { + dev_err(&GET_DEV(accel_dev), "Device not configured\n"); + return -EFAULT; + } + + if (adf_init_etr_data(accel_dev)) { + dev_err(&GET_DEV(accel_dev), "Failed initialize etr\n"); + return -EFAULT; + } + + if (hw_data->init_admin_comms && hw_data->init_admin_comms(accel_dev)) { + dev_err(&GET_DEV(accel_dev), "Failed initialize admin comms\n"); + return -EFAULT; + } + + if (hw_data->init_arb && hw_data->init_arb(accel_dev)) { + dev_err(&GET_DEV(accel_dev), "Failed initialize hw arbiter\n"); + return -EFAULT; + } + + hw_data->enable_ints(accel_dev); + + if (adf_ae_init(accel_dev)) { + dev_err(&GET_DEV(accel_dev), + "Failed to initialise Acceleration Engine\n"); + return -EFAULT; + } + set_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status); + + if (adf_ae_fw_load(accel_dev)) { + dev_err(&GET_DEV(accel_dev), + "Failed to load acceleration FW\n"); + return -EFAULT; + } + set_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status); + + if (hw_data->alloc_irq(accel_dev)) { + dev_err(&GET_DEV(accel_dev), "Failed to allocate interrupts\n"); + return -EFAULT; + } + set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status); + + /* + * Subservice initialisation is divided into two stages: init and start. + * This is to facilitate any ordering dependencies between services + * prior to starting any of the accelerators. + */ + list_for_each(list_itr, &service_table) { + service = list_entry(list_itr, struct service_hndl, list); + if (!service->admin) + continue; + if (service->event_hld(accel_dev, ADF_EVENT_INIT)) { + dev_err(&GET_DEV(accel_dev), + "Failed to initialise service %s\n", + service->name); + return -EFAULT; + } + set_bit(accel_dev->accel_id, &service->init_status); + } + list_for_each(list_itr, &service_table) { + service = list_entry(list_itr, struct service_hndl, list); + if (service->admin) + continue; + if (service->event_hld(accel_dev, ADF_EVENT_INIT)) { + dev_err(&GET_DEV(accel_dev), + "Failed to initialise service %s\n", + service->name); + return -EFAULT; + } + set_bit(accel_dev->accel_id, &service->init_status); + } + + hw_data->enable_error_correction(accel_dev); + + return 0; +} +EXPORT_SYMBOL_GPL(adf_dev_init); + +/** + * adf_dev_start() - Start acceleration service for the given accel device + * @accel_dev: Pointer to acceleration device. + * + * Function notifies all the registered services that the acceleration device + * is ready to be used. + * To be used by QAT device specific drivers. + * + * Return: 0 on success, error code othewise. + */ +int adf_dev_start(struct adf_accel_dev *accel_dev) +{ + struct service_hndl *service; + struct list_head *list_itr; + + set_bit(ADF_STATUS_STARTING, &accel_dev->status); + + if (adf_ae_start(accel_dev)) { + dev_err(&GET_DEV(accel_dev), "AE Start Failed\n"); + return -EFAULT; + } + set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status); + + list_for_each(list_itr, &service_table) { + service = list_entry(list_itr, struct service_hndl, list); + if (!service->admin) + continue; + if (service->event_hld(accel_dev, ADF_EVENT_START)) { + dev_err(&GET_DEV(accel_dev), + "Failed to start service %s\n", + service->name); + return -EFAULT; + } + set_bit(accel_dev->accel_id, &service->start_status); + } + list_for_each(list_itr, &service_table) { + service = list_entry(list_itr, struct service_hndl, list); + if (service->admin) + continue; + if (service->event_hld(accel_dev, ADF_EVENT_START)) { + dev_err(&GET_DEV(accel_dev), + "Failed to start service %s\n", + service->name); + return -EFAULT; + } + set_bit(accel_dev->accel_id, &service->start_status); + } + + clear_bit(ADF_STATUS_STARTING, &accel_dev->status); + set_bit(ADF_STATUS_STARTED, &accel_dev->status); + + if (qat_algs_register()) { + dev_err(&GET_DEV(accel_dev), + "Failed to register crypto algs\n"); + set_bit(ADF_STATUS_STARTING, &accel_dev->status); + clear_bit(ADF_STATUS_STARTED, &accel_dev->status); + return -EFAULT; + } + return 0; +} +EXPORT_SYMBOL_GPL(adf_dev_start); + +/** + * adf_dev_stop() - Stop acceleration service for the given accel device + * @accel_dev: Pointer to acceleration device. + * + * Function notifies all the registered services that the acceleration device + * is shuting down. + * To be used by QAT device specific drivers. + * + * Return: 0 on success, error code othewise. + */ +int adf_dev_stop(struct adf_accel_dev *accel_dev) +{ + struct service_hndl *service; + struct list_head *list_itr; + bool wait = false; + int ret; + + if (!adf_dev_started(accel_dev) && + !test_bit(ADF_STATUS_STARTING, &accel_dev->status)) { + return 0; + } + clear_bit(ADF_STATUS_STARTING, &accel_dev->status); + clear_bit(ADF_STATUS_STARTED, &accel_dev->status); + + if (qat_algs_unregister()) + dev_err(&GET_DEV(accel_dev), + "Failed to unregister crypto algs\n"); + + list_for_each(list_itr, &service_table) { + service = list_entry(list_itr, struct service_hndl, list); + if (service->admin) + continue; + if (!test_bit(accel_dev->accel_id, &service->start_status)) + continue; + ret = service->event_hld(accel_dev, ADF_EVENT_STOP); + if (!ret) { + clear_bit(accel_dev->accel_id, &service->start_status); + } else if (ret == -EAGAIN) { + wait = true; + clear_bit(accel_dev->accel_id, &service->start_status); + } + } + list_for_each(list_itr, &service_table) { + service = list_entry(list_itr, struct service_hndl, list); + if (!service->admin) + continue; + if (!test_bit(accel_dev->accel_id, &service->start_status)) + continue; + if (service->event_hld(accel_dev, ADF_EVENT_STOP)) + dev_err(&GET_DEV(accel_dev), + "Failed to shutdown service %s\n", + service->name); + else + clear_bit(accel_dev->accel_id, &service->start_status); + } + + if (wait) + msleep(100); + + if (test_bit(ADF_STATUS_AE_STARTED, &accel_dev->status)) { + if (adf_ae_stop(accel_dev)) + dev_err(&GET_DEV(accel_dev), "failed to stop AE\n"); + else + clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status); + } + + return 0; +} +EXPORT_SYMBOL_GPL(adf_dev_stop); + +/** + * adf_dev_shutdown() - shutdown acceleration services and data strucutures + * @accel_dev: Pointer to acceleration device + * + * Cleanup the ring data structures and the admin comms and arbitration + * services. + */ +void adf_dev_shutdown(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct service_hndl *service; + struct list_head *list_itr; + + if (!hw_data) { + dev_err(&GET_DEV(accel_dev), + "QAT: Failed to shutdown device - hw_data not set\n"); + return; + } + + if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) { + adf_ae_fw_release(accel_dev); + clear_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status); + } + + if (test_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status)) { + if (adf_ae_shutdown(accel_dev)) + dev_err(&GET_DEV(accel_dev), + "Failed to shutdown Accel Engine\n"); + else + clear_bit(ADF_STATUS_AE_INITIALISED, + &accel_dev->status); + } + + list_for_each(list_itr, &service_table) { + service = list_entry(list_itr, struct service_hndl, list); + if (service->admin) + continue; + if (!test_bit(accel_dev->accel_id, &service->init_status)) + continue; + if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN)) + dev_err(&GET_DEV(accel_dev), + "Failed to shutdown service %s\n", + service->name); + else + clear_bit(accel_dev->accel_id, &service->init_status); + } + list_for_each(list_itr, &service_table) { + service = list_entry(list_itr, struct service_hndl, list); + if (!service->admin) + continue; + if (!test_bit(accel_dev->accel_id, &service->init_status)) + continue; + if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN)) + dev_err(&GET_DEV(accel_dev), + "Failed to shutdown service %s\n", + service->name); + else + clear_bit(accel_dev->accel_id, &service->init_status); + } + + if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) { + hw_data->free_irq(accel_dev); + clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status); + } + + /* Delete configuration only if not restarting */ + if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) + adf_cfg_del_all(accel_dev); + + if (hw_data->exit_arb) + hw_data->exit_arb(accel_dev); + + if (hw_data->exit_admin_comms) + hw_data->exit_admin_comms(accel_dev); + + adf_cleanup_etr_data(accel_dev); +} +EXPORT_SYMBOL_GPL(adf_dev_shutdown); + +int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev) +{ + struct service_hndl *service; + struct list_head *list_itr; + + list_for_each(list_itr, &service_table) { + service = list_entry(list_itr, struct service_hndl, list); + if (service->admin) + continue; + if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING)) + dev_err(&GET_DEV(accel_dev), + "Failed to restart service %s.\n", + service->name); + } + list_for_each(list_itr, &service_table) { + service = list_entry(list_itr, struct service_hndl, list); + if (!service->admin) + continue; + if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING)) + dev_err(&GET_DEV(accel_dev), + "Failed to restart service %s.\n", + service->name); + } + return 0; +} + +int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev) +{ + struct service_hndl *service; + struct list_head *list_itr; + + list_for_each(list_itr, &service_table) { + service = list_entry(list_itr, struct service_hndl, list); + if (service->admin) + continue; + if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED)) + dev_err(&GET_DEV(accel_dev), + "Failed to restart service %s.\n", + service->name); + } + list_for_each(list_itr, &service_table) { + service = list_entry(list_itr, struct service_hndl, list); + if (!service->admin) + continue; + if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED)) + dev_err(&GET_DEV(accel_dev), + "Failed to restart service %s.\n", + service->name); + } + return 0; +} diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c new file mode 100644 index 000000000..ccec32748 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_transport.c @@ -0,0 +1,575 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include <linux/delay.h> +#include "adf_accel_devices.h" +#include "adf_transport_internal.h" +#include "adf_transport_access_macros.h" +#include "adf_cfg.h" +#include "adf_common_drv.h" + +static inline uint32_t adf_modulo(uint32_t data, uint32_t shift) +{ + uint32_t div = data >> shift; + uint32_t mult = div << shift; + + return data - mult; +} + +static inline int adf_check_ring_alignment(uint64_t addr, uint64_t size) +{ + if (((size - 1) & addr) != 0) + return -EFAULT; + return 0; +} + +static int adf_verify_ring_size(uint32_t msg_size, uint32_t msg_num) +{ + int i = ADF_MIN_RING_SIZE; + + for (; i <= ADF_MAX_RING_SIZE; i++) + if ((msg_size * msg_num) == ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) + return i; + + return ADF_DEFAULT_RING_SIZE; +} + +static int adf_reserve_ring(struct adf_etr_bank_data *bank, uint32_t ring) +{ + spin_lock(&bank->lock); + if (bank->ring_mask & (1 << ring)) { + spin_unlock(&bank->lock); + return -EFAULT; + } + bank->ring_mask |= (1 << ring); + spin_unlock(&bank->lock); + return 0; +} + +static void adf_unreserve_ring(struct adf_etr_bank_data *bank, uint32_t ring) +{ + spin_lock(&bank->lock); + bank->ring_mask &= ~(1 << ring); + spin_unlock(&bank->lock); +} + +static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring) +{ + spin_lock_bh(&bank->lock); + bank->irq_mask |= (1 << ring); + spin_unlock_bh(&bank->lock); + WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask); + WRITE_CSR_INT_COL_CTL(bank->csr_addr, bank->bank_number, + bank->irq_coalesc_timer); +} + +static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring) +{ + spin_lock_bh(&bank->lock); + bank->irq_mask &= ~(1 << ring); + spin_unlock_bh(&bank->lock); + WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask); +} + +int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg) +{ + if (atomic_add_return(1, ring->inflights) > + ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) { + atomic_dec(ring->inflights); + return -EAGAIN; + } + spin_lock_bh(&ring->lock); + memcpy(ring->base_addr + ring->tail, msg, + ADF_MSG_SIZE_TO_BYTES(ring->msg_size)); + + ring->tail = adf_modulo(ring->tail + + ADF_MSG_SIZE_TO_BYTES(ring->msg_size), + ADF_RING_SIZE_MODULO(ring->ring_size)); + WRITE_CSR_RING_TAIL(ring->bank->csr_addr, ring->bank->bank_number, + ring->ring_number, ring->tail); + spin_unlock_bh(&ring->lock); + return 0; +} + +static int adf_handle_response(struct adf_etr_ring_data *ring) +{ + uint32_t msg_counter = 0; + uint32_t *msg = (uint32_t *)(ring->base_addr + ring->head); + + while (*msg != ADF_RING_EMPTY_SIG) { + ring->callback((uint32_t *)msg); + *msg = ADF_RING_EMPTY_SIG; + ring->head = adf_modulo(ring->head + + ADF_MSG_SIZE_TO_BYTES(ring->msg_size), + ADF_RING_SIZE_MODULO(ring->ring_size)); + msg_counter++; + msg = (uint32_t *)(ring->base_addr + ring->head); + } + if (msg_counter > 0) { + WRITE_CSR_RING_HEAD(ring->bank->csr_addr, + ring->bank->bank_number, + ring->ring_number, ring->head); + atomic_sub(msg_counter, ring->inflights); + } + return 0; +} + +static void adf_configure_tx_ring(struct adf_etr_ring_data *ring) +{ + uint32_t ring_config = BUILD_RING_CONFIG(ring->ring_size); + + WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number, + ring->ring_number, ring_config); +} + +static void adf_configure_rx_ring(struct adf_etr_ring_data *ring) +{ + uint32_t ring_config = + BUILD_RESP_RING_CONFIG(ring->ring_size, + ADF_RING_NEAR_WATERMARK_512, + ADF_RING_NEAR_WATERMARK_0); + + WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number, + ring->ring_number, ring_config); +} + +static int adf_init_ring(struct adf_etr_ring_data *ring) +{ + struct adf_etr_bank_data *bank = ring->bank; + struct adf_accel_dev *accel_dev = bank->accel_dev; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + uint64_t ring_base; + uint32_t ring_size_bytes = + ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size); + + ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes); + ring->base_addr = dma_alloc_coherent(&GET_DEV(accel_dev), + ring_size_bytes, &ring->dma_addr, + GFP_KERNEL); + if (!ring->base_addr) + return -ENOMEM; + + memset(ring->base_addr, 0x7F, ring_size_bytes); + /* The base_addr has to be aligned to the size of the buffer */ + if (adf_check_ring_alignment(ring->dma_addr, ring_size_bytes)) { + dev_err(&GET_DEV(accel_dev), "Ring address not aligned\n"); + dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes, + ring->base_addr, ring->dma_addr); + return -EFAULT; + } + + if (hw_data->tx_rings_mask & (1 << ring->ring_number)) + adf_configure_tx_ring(ring); + + else + adf_configure_rx_ring(ring); + + ring_base = BUILD_RING_BASE_ADDR(ring->dma_addr, ring->ring_size); + WRITE_CSR_RING_BASE(ring->bank->csr_addr, ring->bank->bank_number, + ring->ring_number, ring_base); + spin_lock_init(&ring->lock); + return 0; +} + +static void adf_cleanup_ring(struct adf_etr_ring_data *ring) +{ + uint32_t ring_size_bytes = + ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size); + ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes); + + if (ring->base_addr) { + memset(ring->base_addr, 0x7F, ring_size_bytes); + dma_free_coherent(&GET_DEV(ring->bank->accel_dev), + ring_size_bytes, ring->base_addr, + ring->dma_addr); + } +} + +int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, + uint32_t bank_num, uint32_t num_msgs, + uint32_t msg_size, const char *ring_name, + adf_callback_fn callback, int poll_mode, + struct adf_etr_ring_data **ring_ptr) +{ + struct adf_etr_data *transport_data = accel_dev->transport; + struct adf_etr_bank_data *bank; + struct adf_etr_ring_data *ring; + char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; + uint32_t ring_num; + int ret; + + if (bank_num >= GET_MAX_BANKS(accel_dev)) { + dev_err(&GET_DEV(accel_dev), "Invalid bank number\n"); + return -EFAULT; + } + if (msg_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) { + dev_err(&GET_DEV(accel_dev), "Invalid msg size\n"); + return -EFAULT; + } + if (ADF_MAX_INFLIGHTS(adf_verify_ring_size(msg_size, num_msgs), + ADF_BYTES_TO_MSG_SIZE(msg_size)) < 2) { + dev_err(&GET_DEV(accel_dev), + "Invalid ring size for given msg size\n"); + return -EFAULT; + } + if (adf_cfg_get_param_value(accel_dev, section, ring_name, val)) { + dev_err(&GET_DEV(accel_dev), "Section %s, no such entry : %s\n", + section, ring_name); + return -EFAULT; + } + if (kstrtouint(val, 10, &ring_num)) { + dev_err(&GET_DEV(accel_dev), "Can't get ring number\n"); + return -EFAULT; + } + + bank = &transport_data->banks[bank_num]; + if (adf_reserve_ring(bank, ring_num)) { + dev_err(&GET_DEV(accel_dev), "Ring %d, %s already exists.\n", + ring_num, ring_name); + return -EFAULT; + } + ring = &bank->rings[ring_num]; + ring->ring_number = ring_num; + ring->bank = bank; + ring->callback = callback; + ring->msg_size = ADF_BYTES_TO_MSG_SIZE(msg_size); + ring->ring_size = adf_verify_ring_size(msg_size, num_msgs); + ring->head = 0; + ring->tail = 0; + atomic_set(ring->inflights, 0); + ret = adf_init_ring(ring); + if (ret) + goto err; + + /* Enable HW arbitration for the given ring */ + accel_dev->hw_device->hw_arb_ring_enable(ring); + + if (adf_ring_debugfs_add(ring, ring_name)) { + dev_err(&GET_DEV(accel_dev), + "Couldn't add ring debugfs entry\n"); + ret = -EFAULT; + goto err; + } + + /* Enable interrupts if needed */ + if (callback && (!poll_mode)) + adf_enable_ring_irq(bank, ring->ring_number); + *ring_ptr = ring; + return 0; +err: + adf_cleanup_ring(ring); + adf_unreserve_ring(bank, ring_num); + accel_dev->hw_device->hw_arb_ring_disable(ring); + return ret; +} + +void adf_remove_ring(struct adf_etr_ring_data *ring) +{ + struct adf_etr_bank_data *bank = ring->bank; + struct adf_accel_dev *accel_dev = bank->accel_dev; + + /* Disable interrupts for the given ring */ + adf_disable_ring_irq(bank, ring->ring_number); + + /* Clear PCI config space */ + WRITE_CSR_RING_CONFIG(bank->csr_addr, bank->bank_number, + ring->ring_number, 0); + WRITE_CSR_RING_BASE(bank->csr_addr, bank->bank_number, + ring->ring_number, 0); + adf_ring_debugfs_rm(ring); + adf_unreserve_ring(bank, ring->ring_number); + /* Disable HW arbitration for the given ring */ + accel_dev->hw_device->hw_arb_ring_disable(ring); + adf_cleanup_ring(ring); +} + +static void adf_ring_response_handler(struct adf_etr_bank_data *bank) +{ + uint32_t empty_rings, i; + + empty_rings = READ_CSR_E_STAT(bank->csr_addr, bank->bank_number); + empty_rings = ~empty_rings & bank->irq_mask; + + for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; ++i) { + if (empty_rings & (1 << i)) + adf_handle_response(&bank->rings[i]); + } +} + +/** + * adf_response_handler() - Bottom half handler response handler + * @bank_addr: Address of a ring bank for with the BH was scheduled. + * + * Function is the bottom half handler for the response from acceleration + * device. There is one handler for every ring bank. Function checks all + * communication rings in the bank. + * To be used by QAT device specific drivers. + * + * Return: void + */ +void adf_response_handler(unsigned long bank_addr) +{ + struct adf_etr_bank_data *bank = (void *)bank_addr; + + /* Handle all the responses nad reenable IRQs */ + adf_ring_response_handler(bank); + WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, + bank->irq_mask); +} +EXPORT_SYMBOL_GPL(adf_response_handler); + +static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev, + const char *section, const char *format, + uint32_t key, uint32_t *value) +{ + char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; + char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; + + snprintf(key_buf, ADF_CFG_MAX_KEY_LEN_IN_BYTES, format, key); + + if (adf_cfg_get_param_value(accel_dev, section, key_buf, val_buf)) + return -EFAULT; + + if (kstrtouint(val_buf, 10, value)) + return -EFAULT; + return 0; +} + +static void adf_get_coalesc_timer(struct adf_etr_bank_data *bank, + const char *section, + uint32_t bank_num_in_accel) +{ + if (adf_get_cfg_int(bank->accel_dev, section, + ADF_ETRMGR_COALESCE_TIMER_FORMAT, + bank_num_in_accel, &bank->irq_coalesc_timer)) + bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME; + + if (ADF_COALESCING_MAX_TIME < bank->irq_coalesc_timer || + ADF_COALESCING_MIN_TIME > bank->irq_coalesc_timer) + bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME; +} + +static int adf_init_bank(struct adf_accel_dev *accel_dev, + struct adf_etr_bank_data *bank, + uint32_t bank_num, void __iomem *csr_addr) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_etr_ring_data *ring; + struct adf_etr_ring_data *tx_ring; + uint32_t i, coalesc_enabled = 0; + + memset(bank, 0, sizeof(*bank)); + bank->bank_number = bank_num; + bank->csr_addr = csr_addr; + bank->accel_dev = accel_dev; + spin_lock_init(&bank->lock); + + /* Enable IRQ coalescing always. This will allow to use + * the optimised flag and coalesc register. + * If it is disabled in the config file just use min time value */ + if ((adf_get_cfg_int(accel_dev, "Accelerator0", + ADF_ETRMGR_COALESCING_ENABLED_FORMAT, bank_num, + &coalesc_enabled) == 0) && coalesc_enabled) + adf_get_coalesc_timer(bank, "Accelerator0", bank_num); + else + bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME; + + for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) { + WRITE_CSR_RING_CONFIG(csr_addr, bank_num, i, 0); + WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0); + ring = &bank->rings[i]; + if (hw_data->tx_rings_mask & (1 << i)) { + ring->inflights = + kzalloc_node(sizeof(atomic_t), + GFP_KERNEL, + dev_to_node(&GET_DEV(accel_dev))); + if (!ring->inflights) + goto err; + } else { + if (i < hw_data->tx_rx_gap) { + dev_err(&GET_DEV(accel_dev), + "Invalid tx rings mask config\n"); + goto err; + } + tx_ring = &bank->rings[i - hw_data->tx_rx_gap]; + ring->inflights = tx_ring->inflights; + } + } + if (adf_bank_debugfs_add(bank)) { + dev_err(&GET_DEV(accel_dev), + "Failed to add bank debugfs entry\n"); + goto err; + } + + WRITE_CSR_INT_SRCSEL(csr_addr, bank_num); + return 0; +err: + for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) { + ring = &bank->rings[i]; + if (hw_data->tx_rings_mask & (1 << i) && ring->inflights) + kfree(ring->inflights); + } + return -ENOMEM; +} + +/** + * adf_init_etr_data() - Initialize transport rings for acceleration device + * @accel_dev: Pointer to acceleration device. + * + * Function is the initializes the communications channels (rings) to the + * acceleration device accel_dev. + * To be used by QAT device specific drivers. + * + * Return: 0 on success, error code othewise. + */ +int adf_init_etr_data(struct adf_accel_dev *accel_dev) +{ + struct adf_etr_data *etr_data; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + void __iomem *csr_addr; + uint32_t size; + uint32_t num_banks = 0; + int i, ret; + + etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL, + dev_to_node(&GET_DEV(accel_dev))); + if (!etr_data) + return -ENOMEM; + + num_banks = GET_MAX_BANKS(accel_dev); + size = num_banks * sizeof(struct adf_etr_bank_data); + etr_data->banks = kzalloc_node(size, GFP_KERNEL, + dev_to_node(&GET_DEV(accel_dev))); + if (!etr_data->banks) { + ret = -ENOMEM; + goto err_bank; + } + + accel_dev->transport = etr_data; + i = hw_data->get_etr_bar_id(hw_data); + csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr; + + /* accel_dev->debugfs_dir should always be non-NULL here */ + etr_data->debug = debugfs_create_dir("transport", + accel_dev->debugfs_dir); + if (!etr_data->debug) { + dev_err(&GET_DEV(accel_dev), + "Unable to create transport debugfs entry\n"); + ret = -ENOENT; + goto err_bank_debug; + } + + for (i = 0; i < num_banks; i++) { + ret = adf_init_bank(accel_dev, &etr_data->banks[i], i, + csr_addr); + if (ret) + goto err_bank_all; + } + + return 0; + +err_bank_all: + debugfs_remove(etr_data->debug); +err_bank_debug: + kfree(etr_data->banks); +err_bank: + kfree(etr_data); + accel_dev->transport = NULL; + return ret; +} +EXPORT_SYMBOL_GPL(adf_init_etr_data); + +static void cleanup_bank(struct adf_etr_bank_data *bank) +{ + uint32_t i; + + for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) { + struct adf_accel_dev *accel_dev = bank->accel_dev; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_etr_ring_data *ring = &bank->rings[i]; + + if (bank->ring_mask & (1 << i)) + adf_cleanup_ring(ring); + + if (hw_data->tx_rings_mask & (1 << i)) + kfree(ring->inflights); + } + adf_bank_debugfs_rm(bank); + memset(bank, 0, sizeof(*bank)); +} + +static void adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev) +{ + struct adf_etr_data *etr_data = accel_dev->transport; + uint32_t i, num_banks = GET_MAX_BANKS(accel_dev); + + for (i = 0; i < num_banks; i++) + cleanup_bank(&etr_data->banks[i]); +} + +/** + * adf_cleanup_etr_data() - Clear transport rings for acceleration device + * @accel_dev: Pointer to acceleration device. + * + * Function is the clears the communications channels (rings) of the + * acceleration device accel_dev. + * To be used by QAT device specific drivers. + * + * Return: void + */ +void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev) +{ + struct adf_etr_data *etr_data = accel_dev->transport; + + if (etr_data) { + adf_cleanup_etr_handles(accel_dev); + debugfs_remove(etr_data->debug); + kfree(etr_data->banks); + kfree(etr_data); + accel_dev->transport = NULL; + } +} +EXPORT_SYMBOL_GPL(adf_cleanup_etr_data); diff --git a/drivers/crypto/qat/qat_common/adf_transport.h b/drivers/crypto/qat/qat_common/adf_transport.h new file mode 100644 index 000000000..386485bd9 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_transport.h @@ -0,0 +1,63 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef ADF_TRANSPORT_H +#define ADF_TRANSPORT_H + +#include "adf_accel_devices.h" + +struct adf_etr_ring_data; + +typedef void (*adf_callback_fn)(void *resp_msg); + +int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, + uint32_t bank_num, uint32_t num_mgs, uint32_t msg_size, + const char *ring_name, adf_callback_fn callback, + int poll_mode, struct adf_etr_ring_data **ring_ptr); + +int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg); +void adf_remove_ring(struct adf_etr_ring_data *ring); +#endif diff --git a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h new file mode 100644 index 000000000..160c9a36c --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h @@ -0,0 +1,163 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef ADF_TRANSPORT_ACCESS_MACROS_H +#define ADF_TRANSPORT_ACCESS_MACROS_H + +#include "adf_accel_devices.h" +#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL +#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL +#define ADF_RING_CSR_RING_CONFIG 0x000 +#define ADF_RING_CSR_RING_LBASE 0x040 +#define ADF_RING_CSR_RING_UBASE 0x080 +#define ADF_RING_CSR_RING_HEAD 0x0C0 +#define ADF_RING_CSR_RING_TAIL 0x100 +#define ADF_RING_CSR_E_STAT 0x14C +#define ADF_RING_CSR_INT_SRCSEL 0x174 +#define ADF_RING_CSR_INT_SRCSEL_2 0x178 +#define ADF_RING_CSR_INT_COL_EN 0x17C +#define ADF_RING_CSR_INT_COL_CTL 0x180 +#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184 +#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000 +#define ADF_RING_BUNDLE_SIZE 0x1000 +#define ADF_RING_CONFIG_NEAR_FULL_WM 0x0A +#define ADF_RING_CONFIG_NEAR_EMPTY_WM 0x05 +#define ADF_COALESCING_MIN_TIME 0x1FF +#define ADF_COALESCING_MAX_TIME 0xFFFFF +#define ADF_COALESCING_DEF_TIME 0x27FF +#define ADF_RING_NEAR_WATERMARK_512 0x08 +#define ADF_RING_NEAR_WATERMARK_0 0x00 +#define ADF_RING_EMPTY_SIG 0x7F7F7F7F + +/* Valid internal ring size values */ +#define ADF_RING_SIZE_128 0x01 +#define ADF_RING_SIZE_256 0x02 +#define ADF_RING_SIZE_512 0x03 +#define ADF_RING_SIZE_4K 0x06 +#define ADF_RING_SIZE_16K 0x08 +#define ADF_RING_SIZE_4M 0x10 +#define ADF_MIN_RING_SIZE ADF_RING_SIZE_128 +#define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M +#define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K + +/* Valid internal msg size values */ +#define ADF_MSG_SIZE_32 0x01 +#define ADF_MSG_SIZE_64 0x02 +#define ADF_MSG_SIZE_128 0x04 +#define ADF_MIN_MSG_SIZE ADF_MSG_SIZE_32 +#define ADF_MAX_MSG_SIZE ADF_MSG_SIZE_128 + +/* Size to bytes conversion macros for ring and msg size values */ +#define ADF_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5) +#define ADF_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5) +#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7) +#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7) + +/* Minimum ring bufer size for memory allocation */ +#define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \ + ADF_RING_SIZE_4K : SIZE) +#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6) +#define ADF_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \ + SIZE) & ~0x4) +/* Max outstanding requests */ +#define ADF_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \ + ((((1 << (RING_SIZE - 1)) << 3) >> ADF_SIZE_TO_POW(MSG_SIZE)) - 1) +#define BUILD_RING_CONFIG(size) \ + ((ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_FULL_WM) \ + | (ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_EMPTY_WM) \ + | size) +#define BUILD_RESP_RING_CONFIG(size, watermark_nf, watermark_ne) \ + ((watermark_nf << ADF_RING_CONFIG_NEAR_FULL_WM) \ + | (watermark_ne << ADF_RING_CONFIG_NEAR_EMPTY_WM) \ + | size) +#define BUILD_RING_BASE_ADDR(addr, size) \ + ((addr >> 6) & (0xFFFFFFFFFFFFFFFFULL << size)) +#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \ + ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_RING_HEAD + (ring << 2)) +#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \ + ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_RING_TAIL + (ring << 2)) +#define READ_CSR_E_STAT(csr_base_addr, bank) \ + ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_E_STAT) +#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_RING_CONFIG + (ring << 2), value) +#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \ +do { \ + uint32_t l_base = 0, u_base = 0; \ + l_base = (uint32_t)(value & 0xFFFFFFFF); \ + u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_RING_LBASE + (ring << 2), l_base); \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_RING_UBASE + (ring << 2), u_base); \ +} while (0) +#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_RING_HEAD + (ring << 2), value) +#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_RING_TAIL + (ring << 2), value) +#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \ +do { \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0); \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \ +} while (0) +#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_INT_COL_EN, value) +#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_INT_COL_CTL, \ + ADF_RING_CSR_INT_COL_CTL_ENABLE | value) +#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_INT_FLAG_AND_COL, value) +#endif diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/qat/qat_common/adf_transport_debug.c new file mode 100644 index 000000000..e41986967 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_transport_debug.c @@ -0,0 +1,306 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/seq_file.h> +#include "adf_accel_devices.h" +#include "adf_transport_internal.h" +#include "adf_transport_access_macros.h" + +static DEFINE_MUTEX(ring_read_lock); +static DEFINE_MUTEX(bank_read_lock); + +static void *adf_ring_start(struct seq_file *sfile, loff_t *pos) +{ + struct adf_etr_ring_data *ring = sfile->private; + + mutex_lock(&ring_read_lock); + if (*pos == 0) + return SEQ_START_TOKEN; + + if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) / + ADF_MSG_SIZE_TO_BYTES(ring->msg_size))) + return NULL; + + return ring->base_addr + + (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++); +} + +static void *adf_ring_next(struct seq_file *sfile, void *v, loff_t *pos) +{ + struct adf_etr_ring_data *ring = sfile->private; + + if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) / + ADF_MSG_SIZE_TO_BYTES(ring->msg_size))) + return NULL; + + return ring->base_addr + + (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++); +} + +static int adf_ring_show(struct seq_file *sfile, void *v) +{ + struct adf_etr_ring_data *ring = sfile->private; + struct adf_etr_bank_data *bank = ring->bank; + uint32_t *msg = v; + void __iomem *csr = ring->bank->csr_addr; + int i, x; + + if (v == SEQ_START_TOKEN) { + int head, tail, empty; + + head = READ_CSR_RING_HEAD(csr, bank->bank_number, + ring->ring_number); + tail = READ_CSR_RING_TAIL(csr, bank->bank_number, + ring->ring_number); + empty = READ_CSR_E_STAT(csr, bank->bank_number); + + seq_puts(sfile, "------- Ring configuration -------\n"); + seq_printf(sfile, "ring name: %s\n", + ring->ring_debug->ring_name); + seq_printf(sfile, "ring num %d, bank num %d\n", + ring->ring_number, ring->bank->bank_number); + seq_printf(sfile, "head %x, tail %x, empty: %d\n", + head, tail, (empty & 1 << ring->ring_number) + >> ring->ring_number); + seq_printf(sfile, "ring size %d, msg size %d\n", + ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size), + ADF_MSG_SIZE_TO_BYTES(ring->msg_size)); + seq_puts(sfile, "----------- Ring data ------------\n"); + return 0; + } + seq_printf(sfile, "%p:", msg); + x = 0; + i = 0; + for (; i < (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) >> 2); i++) { + seq_printf(sfile, " %08X", *(msg + i)); + if ((ADF_MSG_SIZE_TO_BYTES(ring->msg_size) >> 2) != i + 1 && + (++x == 8)) { + seq_printf(sfile, "\n%p:", msg + i + 1); + x = 0; + } + } + seq_puts(sfile, "\n"); + return 0; +} + +static void adf_ring_stop(struct seq_file *sfile, void *v) +{ + mutex_unlock(&ring_read_lock); +} + +static const struct seq_operations adf_ring_sops = { + .start = adf_ring_start, + .next = adf_ring_next, + .stop = adf_ring_stop, + .show = adf_ring_show +}; + +static int adf_ring_open(struct inode *inode, struct file *file) +{ + int ret = seq_open(file, &adf_ring_sops); + + if (!ret) { + struct seq_file *seq_f = file->private_data; + + seq_f->private = inode->i_private; + } + return ret; +} + +static const struct file_operations adf_ring_debug_fops = { + .open = adf_ring_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release +}; + +int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name) +{ + struct adf_etr_ring_debug_entry *ring_debug; + char entry_name[8]; + + ring_debug = kzalloc(sizeof(*ring_debug), GFP_KERNEL); + if (!ring_debug) + return -ENOMEM; + + strlcpy(ring_debug->ring_name, name, sizeof(ring_debug->ring_name)); + snprintf(entry_name, sizeof(entry_name), "ring_%02d", + ring->ring_number); + + ring_debug->debug = debugfs_create_file(entry_name, S_IRUSR, + ring->bank->bank_debug_dir, + ring, &adf_ring_debug_fops); + if (!ring_debug->debug) { + pr_err("QAT: Failed to create ring debug entry.\n"); + kfree(ring_debug); + return -EFAULT; + } + ring->ring_debug = ring_debug; + return 0; +} + +void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring) +{ + if (ring->ring_debug) { + debugfs_remove(ring->ring_debug->debug); + kfree(ring->ring_debug); + ring->ring_debug = NULL; + } +} + +static void *adf_bank_start(struct seq_file *sfile, loff_t *pos) +{ + mutex_lock(&bank_read_lock); + if (*pos == 0) + return SEQ_START_TOKEN; + + if (*pos >= ADF_ETR_MAX_RINGS_PER_BANK) + return NULL; + + return pos; +} + +static void *adf_bank_next(struct seq_file *sfile, void *v, loff_t *pos) +{ + if (++(*pos) >= ADF_ETR_MAX_RINGS_PER_BANK) + return NULL; + + return pos; +} + +static int adf_bank_show(struct seq_file *sfile, void *v) +{ + struct adf_etr_bank_data *bank = sfile->private; + + if (v == SEQ_START_TOKEN) { + seq_printf(sfile, "------- Bank %d configuration -------\n", + bank->bank_number); + } else { + int ring_id = *((int *)v) - 1; + struct adf_etr_ring_data *ring = &bank->rings[ring_id]; + void __iomem *csr = bank->csr_addr; + int head, tail, empty; + + if (!(bank->ring_mask & 1 << ring_id)) + return 0; + + head = READ_CSR_RING_HEAD(csr, bank->bank_number, + ring->ring_number); + tail = READ_CSR_RING_TAIL(csr, bank->bank_number, + ring->ring_number); + empty = READ_CSR_E_STAT(csr, bank->bank_number); + + seq_printf(sfile, + "ring num %02d, head %04x, tail %04x, empty: %d\n", + ring->ring_number, head, tail, + (empty & 1 << ring->ring_number) >> + ring->ring_number); + } + return 0; +} + +static void adf_bank_stop(struct seq_file *sfile, void *v) +{ + mutex_unlock(&bank_read_lock); +} + +static const struct seq_operations adf_bank_sops = { + .start = adf_bank_start, + .next = adf_bank_next, + .stop = adf_bank_stop, + .show = adf_bank_show +}; + +static int adf_bank_open(struct inode *inode, struct file *file) +{ + int ret = seq_open(file, &adf_bank_sops); + + if (!ret) { + struct seq_file *seq_f = file->private_data; + + seq_f->private = inode->i_private; + } + return ret; +} + +static const struct file_operations adf_bank_debug_fops = { + .open = adf_bank_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release +}; + +int adf_bank_debugfs_add(struct adf_etr_bank_data *bank) +{ + struct adf_accel_dev *accel_dev = bank->accel_dev; + struct dentry *parent = accel_dev->transport->debug; + char name[8]; + + snprintf(name, sizeof(name), "bank_%02d", bank->bank_number); + bank->bank_debug_dir = debugfs_create_dir(name, parent); + if (!bank->bank_debug_dir) { + pr_err("QAT: Failed to create bank debug dir.\n"); + return -EFAULT; + } + + bank->bank_debug_cfg = debugfs_create_file("config", S_IRUSR, + bank->bank_debug_dir, bank, + &adf_bank_debug_fops); + if (!bank->bank_debug_cfg) { + pr_err("QAT: Failed to create bank debug entry.\n"); + debugfs_remove(bank->bank_debug_dir); + return -EFAULT; + } + return 0; +} + +void adf_bank_debugfs_rm(struct adf_etr_bank_data *bank) +{ + debugfs_remove(bank->bank_debug_cfg); + debugfs_remove(bank->bank_debug_dir); +} diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/qat/qat_common/adf_transport_internal.h new file mode 100644 index 000000000..a4869627f --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_transport_internal.h @@ -0,0 +1,117 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef ADF_TRANSPORT_INTRN_H +#define ADF_TRANSPORT_INTRN_H + +#include <linux/interrupt.h> +#include <linux/spinlock_types.h> +#include "adf_transport.h" + +struct adf_etr_ring_debug_entry { + char ring_name[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; + struct dentry *debug; +}; + +struct adf_etr_ring_data { + void *base_addr; + atomic_t *inflights; + spinlock_t lock; /* protects ring data struct */ + adf_callback_fn callback; + struct adf_etr_bank_data *bank; + dma_addr_t dma_addr; + uint16_t head; + uint16_t tail; + uint8_t ring_number; + uint8_t ring_size; + uint8_t msg_size; + uint8_t reserved; + struct adf_etr_ring_debug_entry *ring_debug; +} __packed; + +struct adf_etr_bank_data { + struct adf_etr_ring_data rings[ADF_ETR_MAX_RINGS_PER_BANK]; + struct tasklet_struct resp_handler; + void __iomem *csr_addr; + struct adf_accel_dev *accel_dev; + uint32_t irq_coalesc_timer; + uint16_t ring_mask; + uint16_t irq_mask; + spinlock_t lock; /* protects bank data struct */ + struct dentry *bank_debug_dir; + struct dentry *bank_debug_cfg; + uint32_t bank_number; +} __packed; + +struct adf_etr_data { + struct adf_etr_bank_data *banks; + struct dentry *debug; +}; + +void adf_response_handler(unsigned long bank_addr); +#ifdef CONFIG_DEBUG_FS +#include <linux/debugfs.h> +int adf_bank_debugfs_add(struct adf_etr_bank_data *bank); +void adf_bank_debugfs_rm(struct adf_etr_bank_data *bank); +int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name); +void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring); +#else +static inline int adf_bank_debugfs_add(struct adf_etr_bank_data *bank) +{ + return 0; +} + +#define adf_bank_debugfs_rm(bank) do {} while (0) + +static inline int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, + const char *name) +{ + return 0; +} + +#define adf_ring_debugfs_rm(ring) do {} while (0) +#endif +#endif diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw.h b/drivers/crypto/qat/qat_common/icp_qat_fw.h new file mode 100644 index 000000000..f1e30e24a --- /dev/null +++ b/drivers/crypto/qat/qat_common/icp_qat_fw.h @@ -0,0 +1,316 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef _ICP_QAT_FW_H_ +#define _ICP_QAT_FW_H_ +#include <linux/types.h> +#include "icp_qat_hw.h" + +#define QAT_FIELD_SET(flags, val, bitpos, mask) \ +{ (flags) = (((flags) & (~((mask) << (bitpos)))) | \ + (((val) & (mask)) << (bitpos))) ; } + +#define QAT_FIELD_GET(flags, bitpos, mask) \ + (((flags) >> (bitpos)) & (mask)) + +#define ICP_QAT_FW_REQ_DEFAULT_SZ 128 +#define ICP_QAT_FW_RESP_DEFAULT_SZ 32 +#define ICP_QAT_FW_COMN_ONE_BYTE_SHIFT 8 +#define ICP_QAT_FW_COMN_SINGLE_BYTE_MASK 0xFF +#define ICP_QAT_FW_NUM_LONGWORDS_1 1 +#define ICP_QAT_FW_NUM_LONGWORDS_2 2 +#define ICP_QAT_FW_NUM_LONGWORDS_3 3 +#define ICP_QAT_FW_NUM_LONGWORDS_4 4 +#define ICP_QAT_FW_NUM_LONGWORDS_5 5 +#define ICP_QAT_FW_NUM_LONGWORDS_6 6 +#define ICP_QAT_FW_NUM_LONGWORDS_7 7 +#define ICP_QAT_FW_NUM_LONGWORDS_10 10 +#define ICP_QAT_FW_NUM_LONGWORDS_13 13 +#define ICP_QAT_FW_NULL_REQ_SERV_ID 1 + +enum icp_qat_fw_comn_resp_serv_id { + ICP_QAT_FW_COMN_RESP_SERV_NULL, + ICP_QAT_FW_COMN_RESP_SERV_CPM_FW, + ICP_QAT_FW_COMN_RESP_SERV_DELIMITER +}; + +enum icp_qat_fw_comn_request_id { + ICP_QAT_FW_COMN_REQ_NULL = 0, + ICP_QAT_FW_COMN_REQ_CPM_FW_PKE = 3, + ICP_QAT_FW_COMN_REQ_CPM_FW_LA = 4, + ICP_QAT_FW_COMN_REQ_CPM_FW_DMA = 7, + ICP_QAT_FW_COMN_REQ_CPM_FW_COMP = 9, + ICP_QAT_FW_COMN_REQ_DELIMITER +}; + +struct icp_qat_fw_comn_req_hdr_cd_pars { + union { + struct { + uint64_t content_desc_addr; + uint16_t content_desc_resrvd1; + uint8_t content_desc_params_sz; + uint8_t content_desc_hdr_resrvd2; + uint32_t content_desc_resrvd3; + } s; + struct { + uint32_t serv_specif_fields[4]; + } s1; + } u; +}; + +struct icp_qat_fw_comn_req_mid { + uint64_t opaque_data; + uint64_t src_data_addr; + uint64_t dest_data_addr; + uint32_t src_length; + uint32_t dst_length; +}; + +struct icp_qat_fw_comn_req_cd_ctrl { + uint32_t content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5]; +}; + +struct icp_qat_fw_comn_req_hdr { + uint8_t resrvd1; + uint8_t service_cmd_id; + uint8_t service_type; + uint8_t hdr_flags; + uint16_t serv_specif_flags; + uint16_t comn_req_flags; +}; + +struct icp_qat_fw_comn_req_rqpars { + uint32_t serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13]; +}; + +struct icp_qat_fw_comn_req { + struct icp_qat_fw_comn_req_hdr comn_hdr; + struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars; + struct icp_qat_fw_comn_req_mid comn_mid; + struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars; + struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl; +}; + +struct icp_qat_fw_comn_error { + uint8_t xlat_err_code; + uint8_t cmp_err_code; +}; + +struct icp_qat_fw_comn_resp_hdr { + uint8_t resrvd1; + uint8_t service_id; + uint8_t response_type; + uint8_t hdr_flags; + struct icp_qat_fw_comn_error comn_error; + uint8_t comn_status; + uint8_t cmd_id; +}; + +struct icp_qat_fw_comn_resp { + struct icp_qat_fw_comn_resp_hdr comn_hdr; + uint64_t opaque_data; + uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4]; +}; + +#define ICP_QAT_FW_COMN_REQ_FLAG_SET 1 +#define ICP_QAT_FW_COMN_REQ_FLAG_CLR 0 +#define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7 +#define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1 +#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F + +#define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \ + icp_qat_fw_comn_req_hdr_t.service_type + +#define ICP_QAT_FW_COMN_OV_SRV_TYPE_SET(icp_qat_fw_comn_req_hdr_t, val) \ + icp_qat_fw_comn_req_hdr_t.service_type = val + +#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_GET(icp_qat_fw_comn_req_hdr_t) \ + icp_qat_fw_comn_req_hdr_t.service_cmd_id + +#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_SET(icp_qat_fw_comn_req_hdr_t, val) \ + icp_qat_fw_comn_req_hdr_t.service_cmd_id = val + +#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \ + ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags) + +#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \ + ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) + +#define ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_flags) \ + QAT_FIELD_GET(hdr_flags, \ + ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \ + ICP_QAT_FW_COMN_VALID_FLAG_MASK) + +#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_GET(hdr_flags) \ + (hdr_flags & ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK) + +#define ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) \ + QAT_FIELD_SET((hdr_t.hdr_flags), (val), \ + ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \ + ICP_QAT_FW_COMN_VALID_FLAG_MASK) + +#define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(valid) \ + (((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \ + ICP_QAT_FW_COMN_VALID_FLAG_BITPOS) + +#define QAT_COMN_PTR_TYPE_BITPOS 0 +#define QAT_COMN_PTR_TYPE_MASK 0x1 +#define QAT_COMN_CD_FLD_TYPE_BITPOS 1 +#define QAT_COMN_CD_FLD_TYPE_MASK 0x1 +#define QAT_COMN_PTR_TYPE_FLAT 0x0 +#define QAT_COMN_PTR_TYPE_SGL 0x1 +#define QAT_COMN_CD_FLD_TYPE_64BIT_ADR 0x0 +#define QAT_COMN_CD_FLD_TYPE_16BYTE_DATA 0x1 + +#define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \ + ((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \ + | (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS)) + +#define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \ + QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK) + +#define ICP_QAT_FW_COMN_CD_FLD_TYPE_GET(flags) \ + QAT_FIELD_GET(flags, QAT_COMN_CD_FLD_TYPE_BITPOS, \ + QAT_COMN_CD_FLD_TYPE_MASK) + +#define ICP_QAT_FW_COMN_PTR_TYPE_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_COMN_PTR_TYPE_BITPOS, \ + QAT_COMN_PTR_TYPE_MASK) + +#define ICP_QAT_FW_COMN_CD_FLD_TYPE_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_COMN_CD_FLD_TYPE_BITPOS, \ + QAT_COMN_CD_FLD_TYPE_MASK) + +#define ICP_QAT_FW_COMN_NEXT_ID_BITPOS 4 +#define ICP_QAT_FW_COMN_NEXT_ID_MASK 0xF0 +#define ICP_QAT_FW_COMN_CURR_ID_BITPOS 0 +#define ICP_QAT_FW_COMN_CURR_ID_MASK 0x0F + +#define ICP_QAT_FW_COMN_NEXT_ID_GET(cd_ctrl_hdr_t) \ + ((((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \ + >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS)) + +#define ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl_hdr_t, val) \ + { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \ + & ICP_QAT_FW_COMN_CURR_ID_MASK) | \ + ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \ + & ICP_QAT_FW_COMN_NEXT_ID_MASK)); } + +#define ICP_QAT_FW_COMN_CURR_ID_GET(cd_ctrl_hdr_t) \ + (((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK) + +#define ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl_hdr_t, val) \ + { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \ + & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \ + ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)); } + +#define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7 +#define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1 +#define QAT_COMN_RESP_CMP_STATUS_BITPOS 5 +#define QAT_COMN_RESP_CMP_STATUS_MASK 0x1 +#define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4 +#define QAT_COMN_RESP_XLAT_STATUS_MASK 0x1 +#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3 +#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1 + +#define ICP_QAT_FW_COMN_RESP_STATUS_BUILD(crypto, comp, xlat, eolb) \ + ((((crypto) & QAT_COMN_RESP_CRYPTO_STATUS_MASK) << \ + QAT_COMN_RESP_CRYPTO_STATUS_BITPOS) | \ + (((comp) & QAT_COMN_RESP_CMP_STATUS_MASK) << \ + QAT_COMN_RESP_CMP_STATUS_BITPOS) | \ + (((xlat) & QAT_COMN_RESP_XLAT_STATUS_MASK) << \ + QAT_COMN_RESP_XLAT_STATUS_BITPOS) | \ + (((eolb) & QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) << \ + QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS)) + +#define ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(status) \ + QAT_FIELD_GET(status, QAT_COMN_RESP_CRYPTO_STATUS_BITPOS, \ + QAT_COMN_RESP_CRYPTO_STATUS_MASK) + +#define ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(status) \ + QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_STATUS_BITPOS, \ + QAT_COMN_RESP_CMP_STATUS_MASK) + +#define ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(status) \ + QAT_FIELD_GET(status, QAT_COMN_RESP_XLAT_STATUS_BITPOS, \ + QAT_COMN_RESP_XLAT_STATUS_MASK) + +#define ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \ + QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \ + QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) + +#define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0 +#define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1 +#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0 +#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_SET 1 +#define ERR_CODE_NO_ERROR 0 +#define ERR_CODE_INVALID_BLOCK_TYPE -1 +#define ERR_CODE_NO_MATCH_ONES_COMP -2 +#define ERR_CODE_TOO_MANY_LEN_OR_DIS -3 +#define ERR_CODE_INCOMPLETE_LEN -4 +#define ERR_CODE_RPT_LEN_NO_FIRST_LEN -5 +#define ERR_CODE_RPT_GT_SPEC_LEN -6 +#define ERR_CODE_INV_LIT_LEN_CODE_LEN -7 +#define ERR_CODE_INV_DIS_CODE_LEN -8 +#define ERR_CODE_INV_LIT_LEN_DIS_IN_BLK -9 +#define ERR_CODE_DIS_TOO_FAR_BACK -10 +#define ERR_CODE_OVERFLOW_ERROR -11 +#define ERR_CODE_SOFT_ERROR -12 +#define ERR_CODE_FATAL_ERROR -13 +#define ERR_CODE_SSM_ERROR -14 +#define ERR_CODE_ENDPOINT_ERROR -15 + +enum icp_qat_fw_slice { + ICP_QAT_FW_SLICE_NULL = 0, + ICP_QAT_FW_SLICE_CIPHER = 1, + ICP_QAT_FW_SLICE_AUTH = 2, + ICP_QAT_FW_SLICE_DRAM_RD = 3, + ICP_QAT_FW_SLICE_DRAM_WR = 4, + ICP_QAT_FW_SLICE_COMP = 5, + ICP_QAT_FW_SLICE_XLAT = 6, + ICP_QAT_FW_SLICE_DELIMITER +}; +#endif diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h new file mode 100644 index 000000000..72a59faa9 --- /dev/null +++ b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h @@ -0,0 +1,131 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef _ICP_QAT_FW_INIT_ADMIN_H_ +#define _ICP_QAT_FW_INIT_ADMIN_H_ + +#include "icp_qat_fw.h" + +enum icp_qat_fw_init_admin_cmd_id { + ICP_QAT_FW_INIT_ME = 0, + ICP_QAT_FW_TRNG_ENABLE = 1, + ICP_QAT_FW_TRNG_DISABLE = 2, + ICP_QAT_FW_CONSTANTS_CFG = 3, + ICP_QAT_FW_STATUS_GET = 4, + ICP_QAT_FW_COUNTERS_GET = 5, + ICP_QAT_FW_LOOPBACK = 6, + ICP_QAT_FW_HEARTBEAT_SYNC = 7, + ICP_QAT_FW_HEARTBEAT_GET = 8 +}; + +enum icp_qat_fw_init_admin_resp_status { + ICP_QAT_FW_INIT_RESP_STATUS_SUCCESS = 0, + ICP_QAT_FW_INIT_RESP_STATUS_FAIL +}; + +struct icp_qat_fw_init_admin_req { + uint16_t init_cfg_sz; + uint8_t resrvd1; + uint8_t init_admin_cmd_id; + uint32_t resrvd2; + uint64_t opaque_data; + uint64_t init_cfg_ptr; + uint64_t resrvd3; +}; + +struct icp_qat_fw_init_admin_resp_hdr { + uint8_t flags; + uint8_t resrvd1; + uint8_t status; + uint8_t init_admin_cmd_id; +}; + +struct icp_qat_fw_init_admin_resp_pars { + union { + uint32_t resrvd1[ICP_QAT_FW_NUM_LONGWORDS_4]; + struct { + uint32_t version_patch_num; + uint8_t context_id; + uint8_t ae_id; + uint16_t resrvd1; + uint64_t resrvd2; + } s1; + struct { + uint64_t req_rec_count; + uint64_t resp_sent_count; + } s2; + } u; +}; + +struct icp_qat_fw_init_admin_resp { + struct icp_qat_fw_init_admin_resp_hdr init_resp_hdr; + union { + uint32_t resrvd2; + struct { + uint16_t version_minor_num; + uint16_t version_major_num; + } s; + } u; + uint64_t opaque_data; + struct icp_qat_fw_init_admin_resp_pars init_resp_pars; +}; + +#define ICP_QAT_FW_COMN_HEARTBEAT_OK 0 +#define ICP_QAT_FW_COMN_HEARTBEAT_BLOCKED 1 +#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS 0 +#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK 0x1 +#define ICP_QAT_FW_COMN_STATUS_RESRVD_FLD_MASK 0xFE +#define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_GET(hdr_t) \ + ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(hdr_t.flags) + +#define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_SET(hdr_t, val) \ + ICP_QAT_FW_COMN_HEARTBEAT_FLAG_SET(hdr_t, val) + +#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(flags) \ + QAT_FIELD_GET(flags, \ + ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS, \ + ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK) +#endif diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_la.h b/drivers/crypto/qat/qat_common/icp_qat_fw_la.h new file mode 100644 index 000000000..c8d26697e --- /dev/null +++ b/drivers/crypto/qat/qat_common/icp_qat_fw_la.h @@ -0,0 +1,404 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef _ICP_QAT_FW_LA_H_ +#define _ICP_QAT_FW_LA_H_ +#include "icp_qat_fw.h" + +enum icp_qat_fw_la_cmd_id { + ICP_QAT_FW_LA_CMD_CIPHER = 0, + ICP_QAT_FW_LA_CMD_AUTH = 1, + ICP_QAT_FW_LA_CMD_CIPHER_HASH = 2, + ICP_QAT_FW_LA_CMD_HASH_CIPHER = 3, + ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM = 4, + ICP_QAT_FW_LA_CMD_TRNG_TEST = 5, + ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE = 6, + ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE = 7, + ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE = 8, + ICP_QAT_FW_LA_CMD_MGF1 = 9, + ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP = 10, + ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP = 11, + ICP_QAT_FW_LA_CMD_DELIMITER = 12 +}; + +#define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK +#define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR +#define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK +#define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR + +struct icp_qat_fw_la_bulk_req { + struct icp_qat_fw_comn_req_hdr comn_hdr; + struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars; + struct icp_qat_fw_comn_req_mid comn_mid; + struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars; + struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl; +}; + +#define ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS 1 +#define ICP_QAT_FW_LA_GCM_IV_LEN_NOT_12_OCTETS 0 +#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS 12 +#define ICP_QAT_FW_LA_ZUC_3G_PROTO 1 +#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK 0x1 +#define QAT_LA_GCM_IV_LEN_FLAG_BITPOS 11 +#define QAT_LA_GCM_IV_LEN_FLAG_MASK 0x1 +#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER 1 +#define ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER 0 +#define QAT_LA_DIGEST_IN_BUFFER_BITPOS 10 +#define QAT_LA_DIGEST_IN_BUFFER_MASK 0x1 +#define ICP_QAT_FW_LA_SNOW_3G_PROTO 4 +#define ICP_QAT_FW_LA_GCM_PROTO 2 +#define ICP_QAT_FW_LA_CCM_PROTO 1 +#define ICP_QAT_FW_LA_NO_PROTO 0 +#define QAT_LA_PROTO_BITPOS 7 +#define QAT_LA_PROTO_MASK 0x7 +#define ICP_QAT_FW_LA_CMP_AUTH_RES 1 +#define ICP_QAT_FW_LA_NO_CMP_AUTH_RES 0 +#define QAT_LA_CMP_AUTH_RES_BITPOS 6 +#define QAT_LA_CMP_AUTH_RES_MASK 0x1 +#define ICP_QAT_FW_LA_RET_AUTH_RES 1 +#define ICP_QAT_FW_LA_NO_RET_AUTH_RES 0 +#define QAT_LA_RET_AUTH_RES_BITPOS 5 +#define QAT_LA_RET_AUTH_RES_MASK 0x1 +#define ICP_QAT_FW_LA_UPDATE_STATE 1 +#define ICP_QAT_FW_LA_NO_UPDATE_STATE 0 +#define QAT_LA_UPDATE_STATE_BITPOS 4 +#define QAT_LA_UPDATE_STATE_MASK 0x1 +#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_CD_SETUP 0 +#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_SHRAM_CP 1 +#define QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS 3 +#define QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK 0x1 +#define ICP_QAT_FW_CIPH_IV_64BIT_PTR 0 +#define ICP_QAT_FW_CIPH_IV_16BYTE_DATA 1 +#define QAT_LA_CIPH_IV_FLD_BITPOS 2 +#define QAT_LA_CIPH_IV_FLD_MASK 0x1 +#define ICP_QAT_FW_LA_PARTIAL_NONE 0 +#define ICP_QAT_FW_LA_PARTIAL_START 1 +#define ICP_QAT_FW_LA_PARTIAL_MID 3 +#define ICP_QAT_FW_LA_PARTIAL_END 2 +#define QAT_LA_PARTIAL_BITPOS 0 +#define QAT_LA_PARTIAL_MASK 0x3 +#define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \ + cmp_auth, ret_auth, update_state, \ + ciph_iv, ciphcfg, partial) \ + (((zuc_proto & QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) << \ + QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS) | \ + ((gcm_iv_len & QAT_LA_GCM_IV_LEN_FLAG_MASK) << \ + QAT_LA_GCM_IV_LEN_FLAG_BITPOS) | \ + ((auth_rslt & QAT_LA_DIGEST_IN_BUFFER_MASK) << \ + QAT_LA_DIGEST_IN_BUFFER_BITPOS) | \ + ((proto & QAT_LA_PROTO_MASK) << \ + QAT_LA_PROTO_BITPOS) | \ + ((cmp_auth & QAT_LA_CMP_AUTH_RES_MASK) << \ + QAT_LA_CMP_AUTH_RES_BITPOS) | \ + ((ret_auth & QAT_LA_RET_AUTH_RES_MASK) << \ + QAT_LA_RET_AUTH_RES_BITPOS) | \ + ((update_state & QAT_LA_UPDATE_STATE_MASK) << \ + QAT_LA_UPDATE_STATE_BITPOS) | \ + ((ciph_iv & QAT_LA_CIPH_IV_FLD_MASK) << \ + QAT_LA_CIPH_IV_FLD_BITPOS) | \ + ((ciphcfg & QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) << \ + QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS) | \ + ((partial & QAT_LA_PARTIAL_MASK) << \ + QAT_LA_PARTIAL_BITPOS)) + +#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_CIPH_IV_FLD_BITPOS, \ + QAT_LA_CIPH_IV_FLD_MASK) + +#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \ + QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) + +#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_GET(flags) \ + QAT_FIELD_GET(flags, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \ + QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) + +#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \ + QAT_LA_GCM_IV_LEN_FLAG_MASK) + +#define ICP_QAT_FW_LA_PROTO_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_PROTO_BITPOS, QAT_LA_PROTO_MASK) + +#define ICP_QAT_FW_LA_CMP_AUTH_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_CMP_AUTH_RES_BITPOS, \ + QAT_LA_CMP_AUTH_RES_MASK) + +#define ICP_QAT_FW_LA_RET_AUTH_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_RET_AUTH_RES_BITPOS, \ + QAT_LA_RET_AUTH_RES_MASK) + +#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \ + QAT_LA_DIGEST_IN_BUFFER_MASK) + +#define ICP_QAT_FW_LA_UPDATE_STATE_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_UPDATE_STATE_BITPOS, \ + QAT_LA_UPDATE_STATE_MASK) + +#define ICP_QAT_FW_LA_PARTIAL_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_PARTIAL_BITPOS, \ + QAT_LA_PARTIAL_MASK) + +#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_CIPH_IV_FLD_BITPOS, \ + QAT_LA_CIPH_IV_FLD_MASK) + +#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \ + QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) + +#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \ + QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) + +#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \ + QAT_LA_GCM_IV_LEN_FLAG_MASK) + +#define ICP_QAT_FW_LA_PROTO_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_PROTO_BITPOS, \ + QAT_LA_PROTO_MASK) + +#define ICP_QAT_FW_LA_CMP_AUTH_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_CMP_AUTH_RES_BITPOS, \ + QAT_LA_CMP_AUTH_RES_MASK) + +#define ICP_QAT_FW_LA_RET_AUTH_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_RET_AUTH_RES_BITPOS, \ + QAT_LA_RET_AUTH_RES_MASK) + +#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \ + QAT_LA_DIGEST_IN_BUFFER_MASK) + +#define ICP_QAT_FW_LA_UPDATE_STATE_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_UPDATE_STATE_BITPOS, \ + QAT_LA_UPDATE_STATE_MASK) + +#define ICP_QAT_FW_LA_PARTIAL_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \ + QAT_LA_PARTIAL_MASK) + +struct icp_qat_fw_cipher_req_hdr_cd_pars { + union { + struct { + uint64_t content_desc_addr; + uint16_t content_desc_resrvd1; + uint8_t content_desc_params_sz; + uint8_t content_desc_hdr_resrvd2; + uint32_t content_desc_resrvd3; + } s; + struct { + uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4]; + } s1; + } u; +}; + +struct icp_qat_fw_cipher_auth_req_hdr_cd_pars { + union { + struct { + uint64_t content_desc_addr; + uint16_t content_desc_resrvd1; + uint8_t content_desc_params_sz; + uint8_t content_desc_hdr_resrvd2; + uint32_t content_desc_resrvd3; + } s; + struct { + uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4]; + } sl; + } u; +}; + +struct icp_qat_fw_cipher_cd_ctrl_hdr { + uint8_t cipher_state_sz; + uint8_t cipher_key_sz; + uint8_t cipher_cfg_offset; + uint8_t next_curr_id; + uint8_t cipher_padding_sz; + uint8_t resrvd1; + uint16_t resrvd2; + uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3]; +}; + +struct icp_qat_fw_auth_cd_ctrl_hdr { + uint32_t resrvd1; + uint8_t resrvd2; + uint8_t hash_flags; + uint8_t hash_cfg_offset; + uint8_t next_curr_id; + uint8_t resrvd3; + uint8_t outer_prefix_sz; + uint8_t final_sz; + uint8_t inner_res_sz; + uint8_t resrvd4; + uint8_t inner_state1_sz; + uint8_t inner_state2_offset; + uint8_t inner_state2_sz; + uint8_t outer_config_offset; + uint8_t outer_state1_sz; + uint8_t outer_res_sz; + uint8_t outer_prefix_offset; +}; + +struct icp_qat_fw_cipher_auth_cd_ctrl_hdr { + uint8_t cipher_state_sz; + uint8_t cipher_key_sz; + uint8_t cipher_cfg_offset; + uint8_t next_curr_id_cipher; + uint8_t cipher_padding_sz; + uint8_t hash_flags; + uint8_t hash_cfg_offset; + uint8_t next_curr_id_auth; + uint8_t resrvd1; + uint8_t outer_prefix_sz; + uint8_t final_sz; + uint8_t inner_res_sz; + uint8_t resrvd2; + uint8_t inner_state1_sz; + uint8_t inner_state2_offset; + uint8_t inner_state2_sz; + uint8_t outer_config_offset; + uint8_t outer_state1_sz; + uint8_t outer_res_sz; + uint8_t outer_prefix_offset; +}; + +#define ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED 1 +#define ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED 0 +#define ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX 240 +#define ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET \ + (sizeof(struct icp_qat_fw_la_cipher_req_params_t)) +#define ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0) + +struct icp_qat_fw_la_cipher_req_params { + uint32_t cipher_offset; + uint32_t cipher_length; + union { + uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4]; + struct { + uint64_t cipher_IV_ptr; + uint64_t resrvd1; + } s; + } u; +}; + +struct icp_qat_fw_la_auth_req_params { + uint32_t auth_off; + uint32_t auth_len; + union { + uint64_t auth_partial_st_prefix; + uint64_t aad_adr; + } u1; + uint64_t auth_res_addr; + union { + uint8_t inner_prefix_sz; + uint8_t aad_sz; + } u2; + uint8_t resrvd1; + uint8_t hash_state_sz; + uint8_t auth_res_sz; +} __packed; + +struct icp_qat_fw_la_auth_req_params_resrvd_flds { + uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_6]; + union { + uint8_t inner_prefix_sz; + uint8_t aad_sz; + } u2; + uint8_t resrvd1; + uint16_t resrvd2; +}; + +struct icp_qat_fw_la_resp { + struct icp_qat_fw_comn_resp_hdr comn_resp; + uint64_t opaque_data; + uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4]; +}; + +#define ICP_QAT_FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \ + ((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \ + ICP_QAT_FW_COMN_NEXT_ID_MASK) >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS)) + +#define ICP_QAT_FW_CIPHER_NEXT_ID_SET(cd_ctrl_hdr_t, val) \ +{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \ + ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \ + & ICP_QAT_FW_COMN_CURR_ID_MASK) | \ + ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \ + & ICP_QAT_FW_COMN_NEXT_ID_MASK)) } + +#define ICP_QAT_FW_CIPHER_CURR_ID_GET(cd_ctrl_hdr_t) \ + (((cd_ctrl_hdr_t)->next_curr_id_cipher) \ + & ICP_QAT_FW_COMN_CURR_ID_MASK) + +#define ICP_QAT_FW_CIPHER_CURR_ID_SET(cd_ctrl_hdr_t, val) \ +{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \ + ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \ + & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \ + ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) } + +#define ICP_QAT_FW_AUTH_NEXT_ID_GET(cd_ctrl_hdr_t) \ + ((((cd_ctrl_hdr_t)->next_curr_id_auth) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \ + >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS)) + +#define ICP_QAT_FW_AUTH_NEXT_ID_SET(cd_ctrl_hdr_t, val) \ +{ (cd_ctrl_hdr_t)->next_curr_id_auth = \ + ((((cd_ctrl_hdr_t)->next_curr_id_auth) \ + & ICP_QAT_FW_COMN_CURR_ID_MASK) | \ + ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \ + & ICP_QAT_FW_COMN_NEXT_ID_MASK)) } + +#define ICP_QAT_FW_AUTH_CURR_ID_GET(cd_ctrl_hdr_t) \ + (((cd_ctrl_hdr_t)->next_curr_id_auth) \ + & ICP_QAT_FW_COMN_CURR_ID_MASK) + +#define ICP_QAT_FW_AUTH_CURR_ID_SET(cd_ctrl_hdr_t, val) \ +{ (cd_ctrl_hdr_t)->next_curr_id_auth = \ + ((((cd_ctrl_hdr_t)->next_curr_id_auth) \ + & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \ + ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) } + +#endif diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h b/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h new file mode 100644 index 000000000..5e1aa40c0 --- /dev/null +++ b/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h @@ -0,0 +1,78 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef __ICP_QAT_FW_LOADER_HANDLE_H__ +#define __ICP_QAT_FW_LOADER_HANDLE_H__ +#include "icp_qat_uclo.h" + +struct icp_qat_fw_loader_ae_data { + unsigned int state; + unsigned int ustore_size; + unsigned int free_addr; + unsigned int free_size; + unsigned int live_ctx_mask; +}; + +struct icp_qat_fw_loader_hal_handle { + struct icp_qat_fw_loader_ae_data aes[ICP_QAT_UCLO_MAX_AE]; + unsigned int ae_mask; + unsigned int slice_mask; + unsigned int revision_id; + unsigned int ae_max_num; + unsigned int upc_mask; + unsigned int max_ustore; +}; + +struct icp_qat_fw_loader_handle { + struct icp_qat_fw_loader_hal_handle *hal_handle; + void *obj_handle; + void __iomem *hal_sram_addr_v; + void __iomem *hal_cap_g_ctl_csr_addr_v; + void __iomem *hal_cap_ae_xfer_csr_addr_v; + void __iomem *hal_cap_ae_local_csr_addr_v; + void __iomem *hal_ep_csr_addr_v; +}; +#endif diff --git a/drivers/crypto/qat/qat_common/icp_qat_hal.h b/drivers/crypto/qat/qat_common/icp_qat_hal.h new file mode 100644 index 000000000..85b6d241e --- /dev/null +++ b/drivers/crypto/qat/qat_common/icp_qat_hal.h @@ -0,0 +1,125 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef __ICP_QAT_HAL_H +#define __ICP_QAT_HAL_H +#include "icp_qat_fw_loader_handle.h" + +enum hal_global_csr { + MISC_CONTROL = 0x04, + ICP_RESET = 0x0c, + ICP_GLOBAL_CLK_ENABLE = 0x50 +}; + +enum hal_ae_csr { + USTORE_ADDRESS = 0x000, + USTORE_DATA_LOWER = 0x004, + USTORE_DATA_UPPER = 0x008, + ALU_OUT = 0x010, + CTX_ARB_CNTL = 0x014, + CTX_ENABLES = 0x018, + CC_ENABLE = 0x01c, + CSR_CTX_POINTER = 0x020, + CTX_STS_INDIRECT = 0x040, + ACTIVE_CTX_STATUS = 0x044, + CTX_SIG_EVENTS_INDIRECT = 0x048, + CTX_SIG_EVENTS_ACTIVE = 0x04c, + CTX_WAKEUP_EVENTS_INDIRECT = 0x050, + LM_ADDR_0_INDIRECT = 0x060, + LM_ADDR_1_INDIRECT = 0x068, + INDIRECT_LM_ADDR_0_BYTE_INDEX = 0x0e0, + INDIRECT_LM_ADDR_1_BYTE_INDEX = 0x0e8, + FUTURE_COUNT_SIGNAL_INDIRECT = 0x078, + TIMESTAMP_LOW = 0x0c0, + TIMESTAMP_HIGH = 0x0c4, + PROFILE_COUNT = 0x144, + SIGNATURE_ENABLE = 0x150, + AE_MISC_CONTROL = 0x160, + LOCAL_CSR_STATUS = 0x180, +}; + +#define UA_ECS (0x1 << 31) +#define ACS_ABO_BITPOS 31 +#define ACS_ACNO 0x7 +#define CE_ENABLE_BITPOS 0x8 +#define CE_LMADDR_0_GLOBAL_BITPOS 16 +#define CE_LMADDR_1_GLOBAL_BITPOS 17 +#define CE_NN_MODE_BITPOS 20 +#define CE_REG_PAR_ERR_BITPOS 25 +#define CE_BREAKPOINT_BITPOS 27 +#define CE_CNTL_STORE_PARITY_ERROR_BITPOS 29 +#define CE_INUSE_CONTEXTS_BITPOS 31 +#define CE_NN_MODE (0x1 << CE_NN_MODE_BITPOS) +#define CE_INUSE_CONTEXTS (0x1 << CE_INUSE_CONTEXTS_BITPOS) +#define XCWE_VOLUNTARY (0x1) +#define LCS_STATUS (0x1) +#define MMC_SHARE_CS_BITPOS 2 +#define GLOBAL_CSR 0xA00 + +#define SET_CAP_CSR(handle, csr, val) \ + ADF_CSR_WR(handle->hal_cap_g_ctl_csr_addr_v, csr, val) +#define GET_CAP_CSR(handle, csr) \ + ADF_CSR_RD(handle->hal_cap_g_ctl_csr_addr_v, csr) +#define SET_GLB_CSR(handle, csr, val) SET_CAP_CSR(handle, csr + GLOBAL_CSR, val) +#define GET_GLB_CSR(handle, csr) GET_CAP_CSR(handle, GLOBAL_CSR + csr) +#define AE_CSR(handle, ae) \ + (handle->hal_cap_ae_local_csr_addr_v + \ + ((ae & handle->hal_handle->ae_mask) << 12)) +#define AE_CSR_ADDR(handle, ae, csr) (AE_CSR(handle, ae) + (0x3ff & csr)) +#define SET_AE_CSR(handle, ae, csr, val) \ + ADF_CSR_WR(AE_CSR_ADDR(handle, ae, csr), 0, val) +#define GET_AE_CSR(handle, ae, csr) ADF_CSR_RD(AE_CSR_ADDR(handle, ae, csr), 0) +#define AE_XFER(handle, ae) \ + (handle->hal_cap_ae_xfer_csr_addr_v + \ + ((ae & handle->hal_handle->ae_mask) << 12)) +#define AE_XFER_ADDR(handle, ae, reg) (AE_XFER(handle, ae) + \ + ((reg & 0xff) << 2)) +#define SET_AE_XFER(handle, ae, reg, val) \ + ADF_CSR_WR(AE_XFER_ADDR(handle, ae, reg), 0, val) +#define SRAM_WRITE(handle, addr, val) \ + ADF_CSR_WR(handle->hal_sram_addr_v, addr, val) +#define SRAM_READ(handle, addr) ADF_CSR_RD(handle->hal_sram_addr_v, addr) +#endif diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw.h b/drivers/crypto/qat/qat_common/icp_qat_hw.h new file mode 100644 index 000000000..121d5e6e4 --- /dev/null +++ b/drivers/crypto/qat/qat_common/icp_qat_hw.h @@ -0,0 +1,305 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef _ICP_QAT_HW_H_ +#define _ICP_QAT_HW_H_ + +enum icp_qat_hw_ae_id { + ICP_QAT_HW_AE_0 = 0, + ICP_QAT_HW_AE_1 = 1, + ICP_QAT_HW_AE_2 = 2, + ICP_QAT_HW_AE_3 = 3, + ICP_QAT_HW_AE_4 = 4, + ICP_QAT_HW_AE_5 = 5, + ICP_QAT_HW_AE_6 = 6, + ICP_QAT_HW_AE_7 = 7, + ICP_QAT_HW_AE_8 = 8, + ICP_QAT_HW_AE_9 = 9, + ICP_QAT_HW_AE_10 = 10, + ICP_QAT_HW_AE_11 = 11, + ICP_QAT_HW_AE_DELIMITER = 12 +}; + +enum icp_qat_hw_qat_id { + ICP_QAT_HW_QAT_0 = 0, + ICP_QAT_HW_QAT_1 = 1, + ICP_QAT_HW_QAT_2 = 2, + ICP_QAT_HW_QAT_3 = 3, + ICP_QAT_HW_QAT_4 = 4, + ICP_QAT_HW_QAT_5 = 5, + ICP_QAT_HW_QAT_DELIMITER = 6 +}; + +enum icp_qat_hw_auth_algo { + ICP_QAT_HW_AUTH_ALGO_NULL = 0, + ICP_QAT_HW_AUTH_ALGO_SHA1 = 1, + ICP_QAT_HW_AUTH_ALGO_MD5 = 2, + ICP_QAT_HW_AUTH_ALGO_SHA224 = 3, + ICP_QAT_HW_AUTH_ALGO_SHA256 = 4, + ICP_QAT_HW_AUTH_ALGO_SHA384 = 5, + ICP_QAT_HW_AUTH_ALGO_SHA512 = 6, + ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC = 7, + ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC = 8, + ICP_QAT_HW_AUTH_ALGO_AES_F9 = 9, + ICP_QAT_HW_AUTH_ALGO_GALOIS_128 = 10, + ICP_QAT_HW_AUTH_ALGO_GALOIS_64 = 11, + ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 = 12, + ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 = 13, + ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 = 14, + ICP_QAT_HW_AUTH_RESERVED_1 = 15, + ICP_QAT_HW_AUTH_RESERVED_2 = 16, + ICP_QAT_HW_AUTH_ALGO_SHA3_256 = 17, + ICP_QAT_HW_AUTH_RESERVED_3 = 18, + ICP_QAT_HW_AUTH_ALGO_SHA3_512 = 19, + ICP_QAT_HW_AUTH_ALGO_DELIMITER = 20 +}; + +enum icp_qat_hw_auth_mode { + ICP_QAT_HW_AUTH_MODE0 = 0, + ICP_QAT_HW_AUTH_MODE1 = 1, + ICP_QAT_HW_AUTH_MODE2 = 2, + ICP_QAT_HW_AUTH_MODE_DELIMITER = 3 +}; + +struct icp_qat_hw_auth_config { + uint32_t config; + uint32_t reserved; +}; + +#define QAT_AUTH_MODE_BITPOS 4 +#define QAT_AUTH_MODE_MASK 0xF +#define QAT_AUTH_ALGO_BITPOS 0 +#define QAT_AUTH_ALGO_MASK 0xF +#define QAT_AUTH_CMP_BITPOS 8 +#define QAT_AUTH_CMP_MASK 0x7F +#define QAT_AUTH_SHA3_PADDING_BITPOS 16 +#define QAT_AUTH_SHA3_PADDING_MASK 0x1 +#define QAT_AUTH_ALGO_SHA3_BITPOS 22 +#define QAT_AUTH_ALGO_SHA3_MASK 0x3 +#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \ + (((mode & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \ + ((algo & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \ + (((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) << \ + QAT_AUTH_ALGO_SHA3_BITPOS) | \ + (((((algo == ICP_QAT_HW_AUTH_ALGO_SHA3_256) || \ + (algo == ICP_QAT_HW_AUTH_ALGO_SHA3_512)) ? 1 : 0) \ + & QAT_AUTH_SHA3_PADDING_MASK) << QAT_AUTH_SHA3_PADDING_BITPOS) | \ + ((cmp_len & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS)) + +struct icp_qat_hw_auth_counter { + __be32 counter; + uint32_t reserved; +}; + +#define QAT_AUTH_COUNT_MASK 0xFFFFFFFF +#define QAT_AUTH_COUNT_BITPOS 0 +#define ICP_QAT_HW_AUTH_COUNT_BUILD(val) \ + (((val) & QAT_AUTH_COUNT_MASK) << QAT_AUTH_COUNT_BITPOS) + +struct icp_qat_hw_auth_setup { + struct icp_qat_hw_auth_config auth_config; + struct icp_qat_hw_auth_counter auth_counter; +}; + +#define QAT_HW_DEFAULT_ALIGNMENT 8 +#define QAT_HW_ROUND_UP(val, n) (((val) + ((n) - 1)) & (~(n - 1))) +#define ICP_QAT_HW_NULL_STATE1_SZ 32 +#define ICP_QAT_HW_MD5_STATE1_SZ 16 +#define ICP_QAT_HW_SHA1_STATE1_SZ 20 +#define ICP_QAT_HW_SHA224_STATE1_SZ 32 +#define ICP_QAT_HW_SHA256_STATE1_SZ 32 +#define ICP_QAT_HW_SHA3_256_STATE1_SZ 32 +#define ICP_QAT_HW_SHA384_STATE1_SZ 64 +#define ICP_QAT_HW_SHA512_STATE1_SZ 64 +#define ICP_QAT_HW_SHA3_512_STATE1_SZ 64 +#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28 +#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48 +#define ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ 16 +#define ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ 16 +#define ICP_QAT_HW_AES_F9_STATE1_SZ 32 +#define ICP_QAT_HW_KASUMI_F9_STATE1_SZ 16 +#define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16 +#define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8 +#define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8 +#define ICP_QAT_HW_NULL_STATE2_SZ 32 +#define ICP_QAT_HW_MD5_STATE2_SZ 16 +#define ICP_QAT_HW_SHA1_STATE2_SZ 20 +#define ICP_QAT_HW_SHA224_STATE2_SZ 32 +#define ICP_QAT_HW_SHA256_STATE2_SZ 32 +#define ICP_QAT_HW_SHA3_256_STATE2_SZ 0 +#define ICP_QAT_HW_SHA384_STATE2_SZ 64 +#define ICP_QAT_HW_SHA512_STATE2_SZ 64 +#define ICP_QAT_HW_SHA3_512_STATE2_SZ 0 +#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0 +#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0 +#define ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ 16 +#define ICP_QAT_HW_AES_CBC_MAC_KEY_SZ 16 +#define ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ 16 +#define ICP_QAT_HW_F9_IK_SZ 16 +#define ICP_QAT_HW_F9_FK_SZ 16 +#define ICP_QAT_HW_KASUMI_F9_STATE2_SZ (ICP_QAT_HW_F9_IK_SZ + \ + ICP_QAT_HW_F9_FK_SZ) +#define ICP_QAT_HW_AES_F9_STATE2_SZ ICP_QAT_HW_KASUMI_F9_STATE2_SZ +#define ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ 24 +#define ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ 32 +#define ICP_QAT_HW_GALOIS_H_SZ 16 +#define ICP_QAT_HW_GALOIS_LEN_A_SZ 8 +#define ICP_QAT_HW_GALOIS_E_CTR0_SZ 16 + +struct icp_qat_hw_auth_sha512 { + struct icp_qat_hw_auth_setup inner_setup; + uint8_t state1[ICP_QAT_HW_SHA512_STATE1_SZ]; + struct icp_qat_hw_auth_setup outer_setup; + uint8_t state2[ICP_QAT_HW_SHA512_STATE2_SZ]; +}; + +struct icp_qat_hw_auth_algo_blk { + struct icp_qat_hw_auth_sha512 sha; +}; + +#define ICP_QAT_HW_GALOIS_LEN_A_BITPOS 0 +#define ICP_QAT_HW_GALOIS_LEN_A_MASK 0xFFFFFFFF + +enum icp_qat_hw_cipher_algo { + ICP_QAT_HW_CIPHER_ALGO_NULL = 0, + ICP_QAT_HW_CIPHER_ALGO_DES = 1, + ICP_QAT_HW_CIPHER_ALGO_3DES = 2, + ICP_QAT_HW_CIPHER_ALGO_AES128 = 3, + ICP_QAT_HW_CIPHER_ALGO_AES192 = 4, + ICP_QAT_HW_CIPHER_ALGO_AES256 = 5, + ICP_QAT_HW_CIPHER_ALGO_ARC4 = 6, + ICP_QAT_HW_CIPHER_ALGO_KASUMI = 7, + ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 = 8, + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 = 9, + ICP_QAT_HW_CIPHER_DELIMITER = 10 +}; + +enum icp_qat_hw_cipher_mode { + ICP_QAT_HW_CIPHER_ECB_MODE = 0, + ICP_QAT_HW_CIPHER_CBC_MODE = 1, + ICP_QAT_HW_CIPHER_CTR_MODE = 2, + ICP_QAT_HW_CIPHER_F8_MODE = 3, + ICP_QAT_HW_CIPHER_XTS_MODE = 6, + ICP_QAT_HW_CIPHER_MODE_DELIMITER = 7 +}; + +struct icp_qat_hw_cipher_config { + uint32_t val; + uint32_t reserved; +}; + +enum icp_qat_hw_cipher_dir { + ICP_QAT_HW_CIPHER_ENCRYPT = 0, + ICP_QAT_HW_CIPHER_DECRYPT = 1, +}; + +enum icp_qat_hw_cipher_convert { + ICP_QAT_HW_CIPHER_NO_CONVERT = 0, + ICP_QAT_HW_CIPHER_KEY_CONVERT = 1, +}; + +#define QAT_CIPHER_MODE_BITPOS 4 +#define QAT_CIPHER_MODE_MASK 0xF +#define QAT_CIPHER_ALGO_BITPOS 0 +#define QAT_CIPHER_ALGO_MASK 0xF +#define QAT_CIPHER_CONVERT_BITPOS 9 +#define QAT_CIPHER_CONVERT_MASK 0x1 +#define QAT_CIPHER_DIR_BITPOS 8 +#define QAT_CIPHER_DIR_MASK 0x1 +#define QAT_CIPHER_MODE_F8_KEY_SZ_MULT 2 +#define QAT_CIPHER_MODE_XTS_KEY_SZ_MULT 2 +#define ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, algo, convert, dir) \ + (((mode & QAT_CIPHER_MODE_MASK) << QAT_CIPHER_MODE_BITPOS) | \ + ((algo & QAT_CIPHER_ALGO_MASK) << QAT_CIPHER_ALGO_BITPOS) | \ + ((convert & QAT_CIPHER_CONVERT_MASK) << QAT_CIPHER_CONVERT_BITPOS) | \ + ((dir & QAT_CIPHER_DIR_MASK) << QAT_CIPHER_DIR_BITPOS)) +#define ICP_QAT_HW_DES_BLK_SZ 8 +#define ICP_QAT_HW_3DES_BLK_SZ 8 +#define ICP_QAT_HW_NULL_BLK_SZ 8 +#define ICP_QAT_HW_AES_BLK_SZ 16 +#define ICP_QAT_HW_KASUMI_BLK_SZ 8 +#define ICP_QAT_HW_SNOW_3G_BLK_SZ 8 +#define ICP_QAT_HW_ZUC_3G_BLK_SZ 8 +#define ICP_QAT_HW_NULL_KEY_SZ 256 +#define ICP_QAT_HW_DES_KEY_SZ 8 +#define ICP_QAT_HW_3DES_KEY_SZ 24 +#define ICP_QAT_HW_AES_128_KEY_SZ 16 +#define ICP_QAT_HW_AES_192_KEY_SZ 24 +#define ICP_QAT_HW_AES_256_KEY_SZ 32 +#define ICP_QAT_HW_AES_128_F8_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \ + QAT_CIPHER_MODE_F8_KEY_SZ_MULT) +#define ICP_QAT_HW_AES_192_F8_KEY_SZ (ICP_QAT_HW_AES_192_KEY_SZ * \ + QAT_CIPHER_MODE_F8_KEY_SZ_MULT) +#define ICP_QAT_HW_AES_256_F8_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \ + QAT_CIPHER_MODE_F8_KEY_SZ_MULT) +#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \ + QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) +#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \ + QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) +#define ICP_QAT_HW_KASUMI_KEY_SZ 16 +#define ICP_QAT_HW_KASUMI_F8_KEY_SZ (ICP_QAT_HW_KASUMI_KEY_SZ * \ + QAT_CIPHER_MODE_F8_KEY_SZ_MULT) +#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \ + QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) +#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \ + QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) +#define ICP_QAT_HW_ARC4_KEY_SZ 256 +#define ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ 16 +#define ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ 16 +#define ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ 16 +#define ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ 16 +#define ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR 2 +#define INIT_SHRAM_CONSTANTS_TABLE_SZ 1024 + +struct icp_qat_hw_cipher_aes256_f8 { + struct icp_qat_hw_cipher_config cipher_config; + uint8_t key[ICP_QAT_HW_AES_256_F8_KEY_SZ]; +}; + +struct icp_qat_hw_cipher_algo_blk { + struct icp_qat_hw_cipher_aes256_f8 aes; +} __aligned(64); +#endif diff --git a/drivers/crypto/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/qat/qat_common/icp_qat_uclo.h new file mode 100644 index 000000000..2132a8cbc --- /dev/null +++ b/drivers/crypto/qat/qat_common/icp_qat_uclo.h @@ -0,0 +1,377 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef __ICP_QAT_UCLO_H__ +#define __ICP_QAT_UCLO_H__ + +#define ICP_QAT_AC_C_CPU_TYPE 0x00400000 +#define ICP_QAT_UCLO_MAX_AE 12 +#define ICP_QAT_UCLO_MAX_CTX 8 +#define ICP_QAT_UCLO_MAX_UIMAGE (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX) +#define ICP_QAT_UCLO_MAX_USTORE 0x4000 +#define ICP_QAT_UCLO_MAX_XFER_REG 128 +#define ICP_QAT_UCLO_MAX_GPR_REG 128 +#define ICP_QAT_UCLO_MAX_NN_REG 128 +#define ICP_QAT_UCLO_MAX_LMEM_REG 1024 +#define ICP_QAT_UCLO_AE_ALL_CTX 0xff +#define ICP_QAT_UOF_OBJID_LEN 8 +#define ICP_QAT_UOF_FID 0xc6c2 +#define ICP_QAT_UOF_MAJVER 0x4 +#define ICP_QAT_UOF_MINVER 0x11 +#define ICP_QAT_UOF_NN_MODE_NOTCARE 0xff +#define ICP_QAT_UOF_OBJS "UOF_OBJS" +#define ICP_QAT_UOF_STRT "UOF_STRT" +#define ICP_QAT_UOF_GTID "UOF_GTID" +#define ICP_QAT_UOF_IMAG "UOF_IMAG" +#define ICP_QAT_UOF_IMEM "UOF_IMEM" +#define ICP_QAT_UOF_MSEG "UOF_MSEG" +#define ICP_QAT_UOF_LOCAL_SCOPE 1 +#define ICP_QAT_UOF_INIT_EXPR 0 +#define ICP_QAT_UOF_INIT_REG 1 +#define ICP_QAT_UOF_INIT_REG_CTX 2 +#define ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP 3 + +#define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode) & 0xf) +#define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf) +#define ICP_QAT_SHARED_USTORE_MODE(ae_mode) (((ae_mode) >> 0xb) & 0x1) +#define RELOADABLE_CTX_SHARED_MODE(ae_mode) (((ae_mode) >> 0xc) & 0x1) + +#define ICP_QAT_LOC_MEM0_MODE(ae_mode) (((ae_mode) >> 0x8) & 0x1) +#define ICP_QAT_LOC_MEM1_MODE(ae_mode) (((ae_mode) >> 0x9) & 0x1) + +enum icp_qat_uof_mem_region { + ICP_QAT_UOF_SRAM_REGION = 0x0, + ICP_QAT_UOF_LMEM_REGION = 0x3, + ICP_QAT_UOF_UMEM_REGION = 0x5 +}; + +enum icp_qat_uof_regtype { + ICP_NO_DEST, + ICP_GPA_REL, + ICP_GPA_ABS, + ICP_GPB_REL, + ICP_GPB_ABS, + ICP_SR_REL, + ICP_SR_RD_REL, + ICP_SR_WR_REL, + ICP_SR_ABS, + ICP_SR_RD_ABS, + ICP_SR_WR_ABS, + ICP_DR_REL, + ICP_DR_RD_REL, + ICP_DR_WR_REL, + ICP_DR_ABS, + ICP_DR_RD_ABS, + ICP_DR_WR_ABS, + ICP_LMEM, + ICP_LMEM0, + ICP_LMEM1, + ICP_NEIGH_REL, +}; + +struct icp_qat_uclo_page { + struct icp_qat_uclo_encap_page *encap_page; + struct icp_qat_uclo_region *region; + unsigned int flags; +}; + +struct icp_qat_uclo_region { + struct icp_qat_uclo_page *loaded; + struct icp_qat_uclo_page *page; +}; + +struct icp_qat_uclo_aeslice { + struct icp_qat_uclo_region *region; + struct icp_qat_uclo_page *page; + struct icp_qat_uclo_page *cur_page[ICP_QAT_UCLO_MAX_CTX]; + struct icp_qat_uclo_encapme *encap_image; + unsigned int ctx_mask_assigned; + unsigned int new_uaddr[ICP_QAT_UCLO_MAX_CTX]; +}; + +struct icp_qat_uclo_aedata { + unsigned int slice_num; + unsigned int eff_ustore_size; + struct icp_qat_uclo_aeslice ae_slices[ICP_QAT_UCLO_MAX_CTX]; +}; + +struct icp_qat_uof_encap_obj { + char *beg_uof; + struct icp_qat_uof_objhdr *obj_hdr; + struct icp_qat_uof_chunkhdr *chunk_hdr; + struct icp_qat_uof_varmem_seg *var_mem_seg; +}; + +struct icp_qat_uclo_encap_uwblock { + unsigned int start_addr; + unsigned int words_num; + uint64_t micro_words; +}; + +struct icp_qat_uclo_encap_page { + unsigned int def_page; + unsigned int page_region; + unsigned int beg_addr_v; + unsigned int beg_addr_p; + unsigned int micro_words_num; + unsigned int uwblock_num; + struct icp_qat_uclo_encap_uwblock *uwblock; +}; + +struct icp_qat_uclo_encapme { + struct icp_qat_uof_image *img_ptr; + struct icp_qat_uclo_encap_page *page; + unsigned int ae_reg_num; + struct icp_qat_uof_ae_reg *ae_reg; + unsigned int init_regsym_num; + struct icp_qat_uof_init_regsym *init_regsym; + unsigned int sbreak_num; + struct icp_qat_uof_sbreak *sbreak; + unsigned int uwords_num; +}; + +struct icp_qat_uclo_init_mem_table { + unsigned int entry_num; + struct icp_qat_uof_initmem *init_mem; +}; + +struct icp_qat_uclo_objhdr { + char *file_buff; + unsigned int checksum; + unsigned int size; +}; + +struct icp_qat_uof_strtable { + unsigned int table_len; + unsigned int reserved; + uint64_t strings; +}; + +struct icp_qat_uclo_objhandle { + unsigned int prod_type; + unsigned int prod_rev; + struct icp_qat_uclo_objhdr *obj_hdr; + struct icp_qat_uof_encap_obj encap_uof_obj; + struct icp_qat_uof_strtable str_table; + struct icp_qat_uclo_encapme ae_uimage[ICP_QAT_UCLO_MAX_UIMAGE]; + struct icp_qat_uclo_aedata ae_data[ICP_QAT_UCLO_MAX_AE]; + struct icp_qat_uclo_init_mem_table init_mem_tab; + struct icp_qat_uof_batch_init *lm_init_tab[ICP_QAT_UCLO_MAX_AE]; + struct icp_qat_uof_batch_init *umem_init_tab[ICP_QAT_UCLO_MAX_AE]; + int uimage_num; + int uword_in_bytes; + int global_inited; + unsigned int ae_num; + unsigned int ustore_phy_size; + void *obj_buf; + uint64_t *uword_buf; +}; + +struct icp_qat_uof_uword_block { + unsigned int start_addr; + unsigned int words_num; + unsigned int uword_offset; + unsigned int reserved; +}; + +struct icp_qat_uof_filehdr { + unsigned short file_id; + unsigned short reserved1; + char min_ver; + char maj_ver; + unsigned short reserved2; + unsigned short max_chunks; + unsigned short num_chunks; +}; + +struct icp_qat_uof_filechunkhdr { + char chunk_id[ICP_QAT_UOF_OBJID_LEN]; + unsigned int checksum; + unsigned int offset; + unsigned int size; +}; + +struct icp_qat_uof_objhdr { + unsigned int cpu_type; + unsigned short min_cpu_ver; + unsigned short max_cpu_ver; + short max_chunks; + short num_chunks; + unsigned int reserved1; + unsigned int reserved2; +}; + +struct icp_qat_uof_chunkhdr { + char chunk_id[ICP_QAT_UOF_OBJID_LEN]; + unsigned int offset; + unsigned int size; +}; + +struct icp_qat_uof_memvar_attr { + unsigned int offset_in_byte; + unsigned int value; +}; + +struct icp_qat_uof_initmem { + unsigned int sym_name; + char region; + char scope; + unsigned short reserved1; + unsigned int addr; + unsigned int num_in_bytes; + unsigned int val_attr_num; +}; + +struct icp_qat_uof_init_regsym { + unsigned int sym_name; + char init_type; + char value_type; + char reg_type; + unsigned char ctx; + unsigned int reg_addr; + unsigned int value; +}; + +struct icp_qat_uof_varmem_seg { + unsigned int sram_base; + unsigned int sram_size; + unsigned int sram_alignment; + unsigned int sdram_base; + unsigned int sdram_size; + unsigned int sdram_alignment; + unsigned int sdram1_base; + unsigned int sdram1_size; + unsigned int sdram1_alignment; + unsigned int scratch_base; + unsigned int scratch_size; + unsigned int scratch_alignment; +}; + +struct icp_qat_uof_gtid { + char tool_id[ICP_QAT_UOF_OBJID_LEN]; + int tool_ver; + unsigned int reserved1; + unsigned int reserved2; +}; + +struct icp_qat_uof_sbreak { + unsigned int page_num; + unsigned int virt_uaddr; + unsigned char sbreak_type; + unsigned char reg_type; + unsigned short reserved1; + unsigned int addr_offset; + unsigned int reg_addr; +}; + +struct icp_qat_uof_code_page { + unsigned int page_region; + unsigned int page_num; + unsigned char def_page; + unsigned char reserved2; + unsigned short reserved1; + unsigned int beg_addr_v; + unsigned int beg_addr_p; + unsigned int neigh_reg_tab_offset; + unsigned int uc_var_tab_offset; + unsigned int imp_var_tab_offset; + unsigned int imp_expr_tab_offset; + unsigned int code_area_offset; +}; + +struct icp_qat_uof_image { + unsigned int img_name; + unsigned int ae_assigned; + unsigned int ctx_assigned; + unsigned int cpu_type; + unsigned int entry_address; + unsigned int fill_pattern[2]; + unsigned int reloadable_size; + unsigned char sensitivity; + unsigned char reserved; + unsigned short ae_mode; + unsigned short max_ver; + unsigned short min_ver; + unsigned short image_attrib; + unsigned short reserved2; + unsigned short page_region_num; + unsigned short numpages; + unsigned int reg_tab_offset; + unsigned int init_reg_sym_tab; + unsigned int sbreak_tab; + unsigned int app_metadata; +}; + +struct icp_qat_uof_objtable { + unsigned int entry_num; +}; + +struct icp_qat_uof_ae_reg { + unsigned int name; + unsigned int vis_name; + unsigned short type; + unsigned short addr; + unsigned short access_mode; + unsigned char visible; + unsigned char reserved1; + unsigned short ref_count; + unsigned short reserved2; + unsigned int xo_id; +}; + +struct icp_qat_uof_code_area { + unsigned int micro_words_num; + unsigned int uword_block_tab; +}; + +struct icp_qat_uof_batch_init { + unsigned int ae; + unsigned int addr; + unsigned int *value; + unsigned int size; + struct icp_qat_uof_batch_init *next; +}; +#endif diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c new file mode 100644 index 000000000..1dc5b0a17 --- /dev/null +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -0,0 +1,1305 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/crypto.h> +#include <crypto/aead.h> +#include <crypto/aes.h> +#include <crypto/sha.h> +#include <crypto/hash.h> +#include <crypto/algapi.h> +#include <crypto/authenc.h> +#include <crypto/rng.h> +#include <linux/dma-mapping.h> +#include "adf_accel_devices.h" +#include "adf_transport.h" +#include "adf_common_drv.h" +#include "qat_crypto.h" +#include "icp_qat_hw.h" +#include "icp_qat_fw.h" +#include "icp_qat_fw_la.h" + +#define QAT_AES_HW_CONFIG_CBC_ENC(alg) \ + ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \ + ICP_QAT_HW_CIPHER_NO_CONVERT, \ + ICP_QAT_HW_CIPHER_ENCRYPT) + +#define QAT_AES_HW_CONFIG_CBC_DEC(alg) \ + ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \ + ICP_QAT_HW_CIPHER_KEY_CONVERT, \ + ICP_QAT_HW_CIPHER_DECRYPT) + +static atomic_t active_dev; + +struct qat_alg_buf { + uint32_t len; + uint32_t resrvd; + uint64_t addr; +} __packed; + +struct qat_alg_buf_list { + uint64_t resrvd; + uint32_t num_bufs; + uint32_t num_mapped_bufs; + struct qat_alg_buf bufers[]; +} __packed __aligned(64); + +/* Common content descriptor */ +struct qat_alg_cd { + union { + struct qat_enc { /* Encrypt content desc */ + struct icp_qat_hw_cipher_algo_blk cipher; + struct icp_qat_hw_auth_algo_blk hash; + } qat_enc_cd; + struct qat_dec { /* Decrytp content desc */ + struct icp_qat_hw_auth_algo_blk hash; + struct icp_qat_hw_cipher_algo_blk cipher; + } qat_dec_cd; + }; +} __aligned(64); + +struct qat_alg_aead_ctx { + struct qat_alg_cd *enc_cd; + struct qat_alg_cd *dec_cd; + dma_addr_t enc_cd_paddr; + dma_addr_t dec_cd_paddr; + struct icp_qat_fw_la_bulk_req enc_fw_req; + struct icp_qat_fw_la_bulk_req dec_fw_req; + struct crypto_shash *hash_tfm; + enum icp_qat_hw_auth_algo qat_hash_alg; + struct qat_crypto_instance *inst; + struct crypto_tfm *tfm; + uint8_t salt[AES_BLOCK_SIZE]; + spinlock_t lock; /* protects qat_alg_aead_ctx struct */ +}; + +struct qat_alg_ablkcipher_ctx { + struct icp_qat_hw_cipher_algo_blk *enc_cd; + struct icp_qat_hw_cipher_algo_blk *dec_cd; + dma_addr_t enc_cd_paddr; + dma_addr_t dec_cd_paddr; + struct icp_qat_fw_la_bulk_req enc_fw_req; + struct icp_qat_fw_la_bulk_req dec_fw_req; + struct qat_crypto_instance *inst; + struct crypto_tfm *tfm; + spinlock_t lock; /* protects qat_alg_ablkcipher_ctx struct */ +}; + +static int get_current_node(void) +{ + return cpu_data(current_thread_info()->cpu).phys_proc_id; +} + +static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg) +{ + switch (qat_hash_alg) { + case ICP_QAT_HW_AUTH_ALGO_SHA1: + return ICP_QAT_HW_SHA1_STATE1_SZ; + case ICP_QAT_HW_AUTH_ALGO_SHA256: + return ICP_QAT_HW_SHA256_STATE1_SZ; + case ICP_QAT_HW_AUTH_ALGO_SHA512: + return ICP_QAT_HW_SHA512_STATE1_SZ; + default: + return -EFAULT; + }; + return -EFAULT; +} + +static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash, + struct qat_alg_aead_ctx *ctx, + const uint8_t *auth_key, + unsigned int auth_keylen) +{ + SHASH_DESC_ON_STACK(shash, ctx->hash_tfm); + struct sha1_state sha1; + struct sha256_state sha256; + struct sha512_state sha512; + int block_size = crypto_shash_blocksize(ctx->hash_tfm); + int digest_size = crypto_shash_digestsize(ctx->hash_tfm); + char ipad[block_size]; + char opad[block_size]; + __be32 *hash_state_out; + __be64 *hash512_state_out; + int i, offset; + + memset(ipad, 0, block_size); + memset(opad, 0, block_size); + shash->tfm = ctx->hash_tfm; + shash->flags = 0x0; + + if (auth_keylen > block_size) { + int ret = crypto_shash_digest(shash, auth_key, + auth_keylen, ipad); + if (ret) + return ret; + + memcpy(opad, ipad, digest_size); + } else { + memcpy(ipad, auth_key, auth_keylen); + memcpy(opad, auth_key, auth_keylen); + } + + for (i = 0; i < block_size; i++) { + char *ipad_ptr = ipad + i; + char *opad_ptr = opad + i; + *ipad_ptr ^= 0x36; + *opad_ptr ^= 0x5C; + } + + if (crypto_shash_init(shash)) + return -EFAULT; + + if (crypto_shash_update(shash, ipad, block_size)) + return -EFAULT; + + hash_state_out = (__be32 *)hash->sha.state1; + hash512_state_out = (__be64 *)hash_state_out; + + switch (ctx->qat_hash_alg) { + case ICP_QAT_HW_AUTH_ALGO_SHA1: + if (crypto_shash_export(shash, &sha1)) + return -EFAULT; + for (i = 0; i < digest_size >> 2; i++, hash_state_out++) + *hash_state_out = cpu_to_be32(*(sha1.state + i)); + break; + case ICP_QAT_HW_AUTH_ALGO_SHA256: + if (crypto_shash_export(shash, &sha256)) + return -EFAULT; + for (i = 0; i < digest_size >> 2; i++, hash_state_out++) + *hash_state_out = cpu_to_be32(*(sha256.state + i)); + break; + case ICP_QAT_HW_AUTH_ALGO_SHA512: + if (crypto_shash_export(shash, &sha512)) + return -EFAULT; + for (i = 0; i < digest_size >> 3; i++, hash512_state_out++) + *hash512_state_out = cpu_to_be64(*(sha512.state + i)); + break; + default: + return -EFAULT; + } + + if (crypto_shash_init(shash)) + return -EFAULT; + + if (crypto_shash_update(shash, opad, block_size)) + return -EFAULT; + + offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8); + hash_state_out = (__be32 *)(hash->sha.state1 + offset); + hash512_state_out = (__be64 *)hash_state_out; + + switch (ctx->qat_hash_alg) { + case ICP_QAT_HW_AUTH_ALGO_SHA1: + if (crypto_shash_export(shash, &sha1)) + return -EFAULT; + for (i = 0; i < digest_size >> 2; i++, hash_state_out++) + *hash_state_out = cpu_to_be32(*(sha1.state + i)); + break; + case ICP_QAT_HW_AUTH_ALGO_SHA256: + if (crypto_shash_export(shash, &sha256)) + return -EFAULT; + for (i = 0; i < digest_size >> 2; i++, hash_state_out++) + *hash_state_out = cpu_to_be32(*(sha256.state + i)); + break; + case ICP_QAT_HW_AUTH_ALGO_SHA512: + if (crypto_shash_export(shash, &sha512)) + return -EFAULT; + for (i = 0; i < digest_size >> 3; i++, hash512_state_out++) + *hash512_state_out = cpu_to_be64(*(sha512.state + i)); + break; + default: + return -EFAULT; + } + memzero_explicit(ipad, block_size); + memzero_explicit(opad, block_size); + return 0; +} + +static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header) +{ + header->hdr_flags = + ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET); + header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA; + header->comn_req_flags = + ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR, + QAT_COMN_PTR_TYPE_SGL); + ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_PARTIAL_NONE); + ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags, + ICP_QAT_FW_CIPH_IV_16BYTE_DATA); + ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_PROTO); + ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_UPDATE_STATE); +} + +static int qat_alg_aead_init_enc_session(struct qat_alg_aead_ctx *ctx, + int alg, + struct crypto_authenc_keys *keys) +{ + struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm); + unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize; + struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd; + struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher; + struct icp_qat_hw_auth_algo_blk *hash = + (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx + + sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen); + struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req; + struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; + struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; + void *ptr = &req_tmpl->cd_ctrl; + struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr; + struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr; + + /* CD setup */ + cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg); + memcpy(cipher->aes.key, keys->enckey, keys->enckeylen); + hash->sha.inner_setup.auth_config.config = + ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1, + ctx->qat_hash_alg, digestsize); + hash->sha.inner_setup.auth_counter.counter = + cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm)); + + if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen)) + return -EFAULT; + + /* Request setup */ + qat_alg_init_common_hdr(header); + header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH; + ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_DIGEST_IN_BUFFER); + ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_RET_AUTH_RES); + ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_CMP_AUTH_RES); + cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr; + cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3; + + /* Cipher CD config setup */ + cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3; + cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3; + cipher_cd_ctrl->cipher_cfg_offset = 0; + ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER); + ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH); + /* Auth CD config setup */ + hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3; + hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED; + hash_cd_ctrl->inner_res_sz = digestsize; + hash_cd_ctrl->final_sz = digestsize; + + switch (ctx->qat_hash_alg) { + case ICP_QAT_HW_AUTH_ALGO_SHA1: + hash_cd_ctrl->inner_state1_sz = + round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8); + hash_cd_ctrl->inner_state2_sz = + round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8); + break; + case ICP_QAT_HW_AUTH_ALGO_SHA256: + hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ; + hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ; + break; + case ICP_QAT_HW_AUTH_ALGO_SHA512: + hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ; + hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ; + break; + default: + break; + } + hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset + + ((sizeof(struct icp_qat_hw_auth_setup) + + round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3); + ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH); + ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR); + return 0; +} + +static int qat_alg_aead_init_dec_session(struct qat_alg_aead_ctx *ctx, + int alg, + struct crypto_authenc_keys *keys) +{ + struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm); + unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize; + struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd; + struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash; + struct icp_qat_hw_cipher_algo_blk *cipher = + (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx + + sizeof(struct icp_qat_hw_auth_setup) + + roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2); + struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req; + struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; + struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; + void *ptr = &req_tmpl->cd_ctrl; + struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr; + struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr; + struct icp_qat_fw_la_auth_req_params *auth_param = + (struct icp_qat_fw_la_auth_req_params *) + ((char *)&req_tmpl->serv_specif_rqpars + + sizeof(struct icp_qat_fw_la_cipher_req_params)); + + /* CD setup */ + cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg); + memcpy(cipher->aes.key, keys->enckey, keys->enckeylen); + hash->sha.inner_setup.auth_config.config = + ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1, + ctx->qat_hash_alg, + digestsize); + hash->sha.inner_setup.auth_counter.counter = + cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm)); + + if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen)) + return -EFAULT; + + /* Request setup */ + qat_alg_init_common_hdr(header); + header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER; + ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_DIGEST_IN_BUFFER); + ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_RET_AUTH_RES); + ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_CMP_AUTH_RES); + cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr; + cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3; + + /* Cipher CD config setup */ + cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3; + cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3; + cipher_cd_ctrl->cipher_cfg_offset = + (sizeof(struct icp_qat_hw_auth_setup) + + roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3; + ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER); + ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR); + + /* Auth CD config setup */ + hash_cd_ctrl->hash_cfg_offset = 0; + hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED; + hash_cd_ctrl->inner_res_sz = digestsize; + hash_cd_ctrl->final_sz = digestsize; + + switch (ctx->qat_hash_alg) { + case ICP_QAT_HW_AUTH_ALGO_SHA1: + hash_cd_ctrl->inner_state1_sz = + round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8); + hash_cd_ctrl->inner_state2_sz = + round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8); + break; + case ICP_QAT_HW_AUTH_ALGO_SHA256: + hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ; + hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ; + break; + case ICP_QAT_HW_AUTH_ALGO_SHA512: + hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ; + hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ; + break; + default: + break; + } + + hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset + + ((sizeof(struct icp_qat_hw_auth_setup) + + round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3); + auth_param->auth_res_sz = digestsize; + ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH); + ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER); + return 0; +} + +static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx, + struct icp_qat_fw_la_bulk_req *req, + struct icp_qat_hw_cipher_algo_blk *cd, + const uint8_t *key, unsigned int keylen) +{ + struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars; + struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr; + struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl; + + memcpy(cd->aes.key, key, keylen); + qat_alg_init_common_hdr(header); + header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER; + cd_pars->u.s.content_desc_params_sz = + sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3; + /* Cipher CD config setup */ + cd_ctrl->cipher_key_sz = keylen >> 3; + cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3; + cd_ctrl->cipher_cfg_offset = 0; + ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER); + ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR); +} + +static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx, + int alg, const uint8_t *key, + unsigned int keylen) +{ + struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd; + struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req; + struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars; + + qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen); + cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr; + enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg); +} + +static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx, + int alg, const uint8_t *key, + unsigned int keylen) +{ + struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd; + struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req; + struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars; + + qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen); + cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr; + dec_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg); +} + +static int qat_alg_validate_key(int key_len, int *alg) +{ + switch (key_len) { + case AES_KEYSIZE_128: + *alg = ICP_QAT_HW_CIPHER_ALGO_AES128; + break; + case AES_KEYSIZE_192: + *alg = ICP_QAT_HW_CIPHER_ALGO_AES192; + break; + case AES_KEYSIZE_256: + *alg = ICP_QAT_HW_CIPHER_ALGO_AES256; + break; + default: + return -EINVAL; + } + return 0; +} + +static int qat_alg_aead_init_sessions(struct qat_alg_aead_ctx *ctx, + const uint8_t *key, unsigned int keylen) +{ + struct crypto_authenc_keys keys; + int alg; + + if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE)) + return -EFAULT; + + if (crypto_authenc_extractkeys(&keys, key, keylen)) + goto bad_key; + + if (qat_alg_validate_key(keys.enckeylen, &alg)) + goto bad_key; + + if (qat_alg_aead_init_enc_session(ctx, alg, &keys)) + goto error; + + if (qat_alg_aead_init_dec_session(ctx, alg, &keys)) + goto error; + + return 0; +bad_key: + crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; +error: + return -EFAULT; +} + +static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx, + const uint8_t *key, + unsigned int keylen) +{ + int alg; + + if (qat_alg_validate_key(keylen, &alg)) + goto bad_key; + + qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen); + qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen); + return 0; +bad_key: + crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; +} + +static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key, + unsigned int keylen) +{ + struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); + struct device *dev; + + spin_lock(&ctx->lock); + if (ctx->enc_cd) { + /* rekeying */ + dev = &GET_DEV(ctx->inst->accel_dev); + memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd)); + memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd)); + memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req)); + memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req)); + } else { + /* new key */ + int node = get_current_node(); + struct qat_crypto_instance *inst = + qat_crypto_get_instance_node(node); + if (!inst) { + spin_unlock(&ctx->lock); + return -EINVAL; + } + + dev = &GET_DEV(inst->accel_dev); + ctx->inst = inst; + ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd), + &ctx->enc_cd_paddr, + GFP_ATOMIC); + if (!ctx->enc_cd) { + spin_unlock(&ctx->lock); + return -ENOMEM; + } + ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd), + &ctx->dec_cd_paddr, + GFP_ATOMIC); + if (!ctx->dec_cd) { + spin_unlock(&ctx->lock); + goto out_free_enc; + } + } + spin_unlock(&ctx->lock); + if (qat_alg_aead_init_sessions(ctx, key, keylen)) + goto out_free_all; + + return 0; + +out_free_all: + memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd)); + dma_free_coherent(dev, sizeof(struct qat_alg_cd), + ctx->dec_cd, ctx->dec_cd_paddr); + ctx->dec_cd = NULL; +out_free_enc: + memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd)); + dma_free_coherent(dev, sizeof(struct qat_alg_cd), + ctx->enc_cd, ctx->enc_cd_paddr); + ctx->enc_cd = NULL; + return -ENOMEM; +} + +static void qat_alg_free_bufl(struct qat_crypto_instance *inst, + struct qat_crypto_request *qat_req) +{ + struct device *dev = &GET_DEV(inst->accel_dev); + struct qat_alg_buf_list *bl = qat_req->buf.bl; + struct qat_alg_buf_list *blout = qat_req->buf.blout; + dma_addr_t blp = qat_req->buf.blp; + dma_addr_t blpout = qat_req->buf.bloutp; + size_t sz = qat_req->buf.sz; + size_t sz_out = qat_req->buf.sz_out; + int i; + + for (i = 0; i < bl->num_bufs; i++) + dma_unmap_single(dev, bl->bufers[i].addr, + bl->bufers[i].len, DMA_BIDIRECTIONAL); + + dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); + kfree(bl); + if (blp != blpout) { + /* If out of place operation dma unmap only data */ + int bufless = blout->num_bufs - blout->num_mapped_bufs; + + for (i = bufless; i < blout->num_bufs; i++) { + dma_unmap_single(dev, blout->bufers[i].addr, + blout->bufers[i].len, + DMA_BIDIRECTIONAL); + } + dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE); + kfree(blout); + } +} + +static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, + struct scatterlist *assoc, + struct scatterlist *sgl, + struct scatterlist *sglout, uint8_t *iv, + uint8_t ivlen, + struct qat_crypto_request *qat_req) +{ + struct device *dev = &GET_DEV(inst->accel_dev); + int i, bufs = 0, sg_nctr = 0; + int n = sg_nents(sgl), assoc_n = sg_nents(assoc); + struct qat_alg_buf_list *bufl; + struct qat_alg_buf_list *buflout = NULL; + dma_addr_t blp; + dma_addr_t bloutp = 0; + struct scatterlist *sg; + size_t sz_out, sz = sizeof(struct qat_alg_buf_list) + + ((1 + n + assoc_n) * sizeof(struct qat_alg_buf)); + + if (unlikely(!n)) + return -EINVAL; + + bufl = kzalloc_node(sz, GFP_ATOMIC, + dev_to_node(&GET_DEV(inst->accel_dev))); + if (unlikely(!bufl)) + return -ENOMEM; + + blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, blp))) + goto err; + + for_each_sg(assoc, sg, assoc_n, i) { + if (!sg->length) + continue; + bufl->bufers[bufs].addr = dma_map_single(dev, + sg_virt(sg), + sg->length, + DMA_BIDIRECTIONAL); + bufl->bufers[bufs].len = sg->length; + if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr))) + goto err; + bufs++; + } + if (ivlen) { + bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen, + DMA_BIDIRECTIONAL); + bufl->bufers[bufs].len = ivlen; + if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr))) + goto err; + bufs++; + } + + for_each_sg(sgl, sg, n, i) { + int y = sg_nctr + bufs; + + if (!sg->length) + continue; + + bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg), + sg->length, + DMA_BIDIRECTIONAL); + bufl->bufers[y].len = sg->length; + if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr))) + goto err; + sg_nctr++; + } + bufl->num_bufs = sg_nctr + bufs; + qat_req->buf.bl = bufl; + qat_req->buf.blp = blp; + qat_req->buf.sz = sz; + /* Handle out of place operation */ + if (sgl != sglout) { + struct qat_alg_buf *bufers; + + n = sg_nents(sglout); + sz_out = sizeof(struct qat_alg_buf_list) + + ((1 + n + assoc_n) * sizeof(struct qat_alg_buf)); + sg_nctr = 0; + buflout = kzalloc_node(sz_out, GFP_ATOMIC, + dev_to_node(&GET_DEV(inst->accel_dev))); + if (unlikely(!buflout)) + goto err; + bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, bloutp))) + goto err; + bufers = buflout->bufers; + /* For out of place operation dma map only data and + * reuse assoc mapping and iv */ + for (i = 0; i < bufs; i++) { + bufers[i].len = bufl->bufers[i].len; + bufers[i].addr = bufl->bufers[i].addr; + } + for_each_sg(sglout, sg, n, i) { + int y = sg_nctr + bufs; + + if (!sg->length) + continue; + + bufers[y].addr = dma_map_single(dev, sg_virt(sg), + sg->length, + DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(dev, bufers[y].addr))) + goto err; + bufers[y].len = sg->length; + sg_nctr++; + } + buflout->num_bufs = sg_nctr + bufs; + buflout->num_mapped_bufs = sg_nctr; + qat_req->buf.blout = buflout; + qat_req->buf.bloutp = bloutp; + qat_req->buf.sz_out = sz_out; + } else { + /* Otherwise set the src and dst to the same address */ + qat_req->buf.bloutp = qat_req->buf.blp; + qat_req->buf.sz_out = 0; + } + return 0; +err: + dev_err(dev, "Failed to map buf for dma\n"); + sg_nctr = 0; + for (i = 0; i < n + bufs; i++) + if (!dma_mapping_error(dev, bufl->bufers[i].addr)) + dma_unmap_single(dev, bufl->bufers[i].addr, + bufl->bufers[i].len, + DMA_BIDIRECTIONAL); + + if (!dma_mapping_error(dev, blp)) + dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); + kfree(bufl); + if (sgl != sglout && buflout) { + n = sg_nents(sglout); + for (i = bufs; i < n + bufs; i++) + if (!dma_mapping_error(dev, buflout->bufers[i].addr)) + dma_unmap_single(dev, buflout->bufers[i].addr, + buflout->bufers[i].len, + DMA_BIDIRECTIONAL); + if (!dma_mapping_error(dev, bloutp)) + dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE); + kfree(buflout); + } + return -ENOMEM; +} + +static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp, + struct qat_crypto_request *qat_req) +{ + struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx; + struct qat_crypto_instance *inst = ctx->inst; + struct aead_request *areq = qat_req->aead_req; + uint8_t stat_filed = qat_resp->comn_resp.comn_status; + int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed); + + qat_alg_free_bufl(inst, qat_req); + if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK)) + res = -EBADMSG; + areq->base.complete(&areq->base, res); +} + +static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp, + struct qat_crypto_request *qat_req) +{ + struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx; + struct qat_crypto_instance *inst = ctx->inst; + struct ablkcipher_request *areq = qat_req->ablkcipher_req; + uint8_t stat_filed = qat_resp->comn_resp.comn_status; + int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed); + + qat_alg_free_bufl(inst, qat_req); + if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK)) + res = -EINVAL; + areq->base.complete(&areq->base, res); +} + +void qat_alg_callback(void *resp) +{ + struct icp_qat_fw_la_resp *qat_resp = resp; + struct qat_crypto_request *qat_req = + (void *)(__force long)qat_resp->opaque_data; + + qat_req->cb(qat_resp, qat_req); +} + +static int qat_alg_aead_dec(struct aead_request *areq) +{ + struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq); + struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm); + struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm); + struct qat_crypto_request *qat_req = aead_request_ctx(areq); + struct icp_qat_fw_la_cipher_req_params *cipher_param; + struct icp_qat_fw_la_auth_req_params *auth_param; + struct icp_qat_fw_la_bulk_req *msg; + int digst_size = crypto_aead_crt(aead_tfm)->authsize; + int ret, ctr = 0; + + ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst, + areq->iv, AES_BLOCK_SIZE, qat_req); + if (unlikely(ret)) + return ret; + + msg = &qat_req->req; + *msg = ctx->dec_fw_req; + qat_req->aead_ctx = ctx; + qat_req->aead_req = areq; + qat_req->cb = qat_aead_alg_callback; + qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; + qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; + qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; + cipher_param = (void *)&qat_req->req.serv_specif_rqpars; + cipher_param->cipher_length = areq->cryptlen - digst_size; + cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE; + memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE); + auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); + auth_param->auth_off = 0; + auth_param->auth_len = areq->assoclen + + cipher_param->cipher_length + AES_BLOCK_SIZE; + do { + ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); + } while (ret == -EAGAIN && ctr++ < 10); + + if (ret == -EAGAIN) { + qat_alg_free_bufl(ctx->inst, qat_req); + return -EBUSY; + } + return -EINPROGRESS; +} + +static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv, + int enc_iv) +{ + struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq); + struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm); + struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm); + struct qat_crypto_request *qat_req = aead_request_ctx(areq); + struct icp_qat_fw_la_cipher_req_params *cipher_param; + struct icp_qat_fw_la_auth_req_params *auth_param; + struct icp_qat_fw_la_bulk_req *msg; + int ret, ctr = 0; + + ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst, + iv, AES_BLOCK_SIZE, qat_req); + if (unlikely(ret)) + return ret; + + msg = &qat_req->req; + *msg = ctx->enc_fw_req; + qat_req->aead_ctx = ctx; + qat_req->aead_req = areq; + qat_req->cb = qat_aead_alg_callback; + qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; + qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; + qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; + cipher_param = (void *)&qat_req->req.serv_specif_rqpars; + auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); + + if (enc_iv) { + cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE; + cipher_param->cipher_offset = areq->assoclen; + } else { + memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE); + cipher_param->cipher_length = areq->cryptlen; + cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE; + } + auth_param->auth_off = 0; + auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE; + + do { + ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); + } while (ret == -EAGAIN && ctr++ < 10); + + if (ret == -EAGAIN) { + qat_alg_free_bufl(ctx->inst, qat_req); + return -EBUSY; + } + return -EINPROGRESS; +} + +static int qat_alg_aead_enc(struct aead_request *areq) +{ + return qat_alg_aead_enc_internal(areq, areq->iv, 0); +} + +static int qat_alg_aead_genivenc(struct aead_givcrypt_request *req) +{ + struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq); + struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm); + struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm); + __be64 seq; + + memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE); + seq = cpu_to_be64(req->seq); + memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t), + &seq, sizeof(uint64_t)); + return qat_alg_aead_enc_internal(&req->areq, req->giv, 1); +} + +static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm, + const uint8_t *key, + unsigned int keylen) +{ + struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct device *dev; + + spin_lock(&ctx->lock); + if (ctx->enc_cd) { + /* rekeying */ + dev = &GET_DEV(ctx->inst->accel_dev); + memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd)); + memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd)); + memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req)); + memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req)); + } else { + /* new key */ + int node = get_current_node(); + struct qat_crypto_instance *inst = + qat_crypto_get_instance_node(node); + if (!inst) { + spin_unlock(&ctx->lock); + return -EINVAL; + } + + dev = &GET_DEV(inst->accel_dev); + ctx->inst = inst; + ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd), + &ctx->enc_cd_paddr, + GFP_ATOMIC); + if (!ctx->enc_cd) { + spin_unlock(&ctx->lock); + return -ENOMEM; + } + ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd), + &ctx->dec_cd_paddr, + GFP_ATOMIC); + if (!ctx->dec_cd) { + spin_unlock(&ctx->lock); + goto out_free_enc; + } + } + spin_unlock(&ctx->lock); + if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen)) + goto out_free_all; + + return 0; + +out_free_all: + memset(ctx->dec_cd, 0, sizeof(*ctx->enc_cd)); + dma_free_coherent(dev, sizeof(*ctx->enc_cd), + ctx->dec_cd, ctx->dec_cd_paddr); + ctx->dec_cd = NULL; +out_free_enc: + memset(ctx->enc_cd, 0, sizeof(*ctx->dec_cd)); + dma_free_coherent(dev, sizeof(*ctx->dec_cd), + ctx->enc_cd, ctx->enc_cd_paddr); + ctx->enc_cd = NULL; + return -ENOMEM; +} + +static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req); + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm); + struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); + struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req); + struct icp_qat_fw_la_cipher_req_params *cipher_param; + struct icp_qat_fw_la_bulk_req *msg; + int ret, ctr = 0; + + ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, req->src, req->dst, + NULL, 0, qat_req); + if (unlikely(ret)) + return ret; + + msg = &qat_req->req; + *msg = ctx->enc_fw_req; + qat_req->ablkcipher_ctx = ctx; + qat_req->ablkcipher_req = req; + qat_req->cb = qat_ablkcipher_alg_callback; + qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; + qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; + qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; + cipher_param = (void *)&qat_req->req.serv_specif_rqpars; + cipher_param->cipher_length = req->nbytes; + cipher_param->cipher_offset = 0; + memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE); + do { + ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); + } while (ret == -EAGAIN && ctr++ < 10); + + if (ret == -EAGAIN) { + qat_alg_free_bufl(ctx->inst, qat_req); + return -EBUSY; + } + return -EINPROGRESS; +} + +static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req); + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm); + struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); + struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req); + struct icp_qat_fw_la_cipher_req_params *cipher_param; + struct icp_qat_fw_la_bulk_req *msg; + int ret, ctr = 0; + + ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, req->src, req->dst, + NULL, 0, qat_req); + if (unlikely(ret)) + return ret; + + msg = &qat_req->req; + *msg = ctx->dec_fw_req; + qat_req->ablkcipher_ctx = ctx; + qat_req->ablkcipher_req = req; + qat_req->cb = qat_ablkcipher_alg_callback; + qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; + qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; + qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; + cipher_param = (void *)&qat_req->req.serv_specif_rqpars; + cipher_param->cipher_length = req->nbytes; + cipher_param->cipher_offset = 0; + memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE); + do { + ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); + } while (ret == -EAGAIN && ctr++ < 10); + + if (ret == -EAGAIN) { + qat_alg_free_bufl(ctx->inst, qat_req); + return -EBUSY; + } + return -EINPROGRESS; +} + +static int qat_alg_aead_init(struct crypto_tfm *tfm, + enum icp_qat_hw_auth_algo hash, + const char *hash_name) +{ + struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm); + + ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0); + if (IS_ERR(ctx->hash_tfm)) + return -EFAULT; + spin_lock_init(&ctx->lock); + ctx->qat_hash_alg = hash; + tfm->crt_aead.reqsize = sizeof(struct aead_request) + + sizeof(struct qat_crypto_request); + ctx->tfm = tfm; + return 0; +} + +static int qat_alg_aead_sha1_init(struct crypto_tfm *tfm) +{ + return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1"); +} + +static int qat_alg_aead_sha256_init(struct crypto_tfm *tfm) +{ + return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256"); +} + +static int qat_alg_aead_sha512_init(struct crypto_tfm *tfm) +{ + return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512"); +} + +static void qat_alg_aead_exit(struct crypto_tfm *tfm) +{ + struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm); + struct qat_crypto_instance *inst = ctx->inst; + struct device *dev; + + if (!IS_ERR(ctx->hash_tfm)) + crypto_free_shash(ctx->hash_tfm); + + if (!inst) + return; + + dev = &GET_DEV(inst->accel_dev); + if (ctx->enc_cd) { + memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd)); + dma_free_coherent(dev, sizeof(struct qat_alg_cd), + ctx->enc_cd, ctx->enc_cd_paddr); + } + if (ctx->dec_cd) { + memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd)); + dma_free_coherent(dev, sizeof(struct qat_alg_cd), + ctx->dec_cd, ctx->dec_cd_paddr); + } + qat_crypto_put_instance(inst); +} + +static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm) +{ + struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); + + spin_lock_init(&ctx->lock); + tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) + + sizeof(struct qat_crypto_request); + ctx->tfm = tfm; + return 0; +} + +static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm) +{ + struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); + struct qat_crypto_instance *inst = ctx->inst; + struct device *dev; + + if (!inst) + return; + + dev = &GET_DEV(inst->accel_dev); + if (ctx->enc_cd) { + memset(ctx->enc_cd, 0, + sizeof(struct icp_qat_hw_cipher_algo_blk)); + dma_free_coherent(dev, + sizeof(struct icp_qat_hw_cipher_algo_blk), + ctx->enc_cd, ctx->enc_cd_paddr); + } + if (ctx->dec_cd) { + memset(ctx->dec_cd, 0, + sizeof(struct icp_qat_hw_cipher_algo_blk)); + dma_free_coherent(dev, + sizeof(struct icp_qat_hw_cipher_algo_blk), + ctx->dec_cd, ctx->dec_cd_paddr); + } + qat_crypto_put_instance(inst); +} + +static struct crypto_alg qat_algs[] = { { + .cra_name = "authenc(hmac(sha1),cbc(aes))", + .cra_driver_name = "qat_aes_cbc_hmac_sha1", + .cra_priority = 4001, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_aead_type, + .cra_module = THIS_MODULE, + .cra_init = qat_alg_aead_sha1_init, + .cra_exit = qat_alg_aead_exit, + .cra_u = { + .aead = { + .setkey = qat_alg_aead_setkey, + .decrypt = qat_alg_aead_dec, + .encrypt = qat_alg_aead_enc, + .givencrypt = qat_alg_aead_genivenc, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + }, +}, { + .cra_name = "authenc(hmac(sha256),cbc(aes))", + .cra_driver_name = "qat_aes_cbc_hmac_sha256", + .cra_priority = 4001, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_aead_type, + .cra_module = THIS_MODULE, + .cra_init = qat_alg_aead_sha256_init, + .cra_exit = qat_alg_aead_exit, + .cra_u = { + .aead = { + .setkey = qat_alg_aead_setkey, + .decrypt = qat_alg_aead_dec, + .encrypt = qat_alg_aead_enc, + .givencrypt = qat_alg_aead_genivenc, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + }, +}, { + .cra_name = "authenc(hmac(sha512),cbc(aes))", + .cra_driver_name = "qat_aes_cbc_hmac_sha512", + .cra_priority = 4001, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_aead_type, + .cra_module = THIS_MODULE, + .cra_init = qat_alg_aead_sha512_init, + .cra_exit = qat_alg_aead_exit, + .cra_u = { + .aead = { + .setkey = qat_alg_aead_setkey, + .decrypt = qat_alg_aead_dec, + .encrypt = qat_alg_aead_enc, + .givencrypt = qat_alg_aead_genivenc, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + }, +}, { + .cra_name = "cbc(aes)", + .cra_driver_name = "qat_aes_cbc", + .cra_priority = 4001, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = qat_alg_ablkcipher_init, + .cra_exit = qat_alg_ablkcipher_exit, + .cra_u = { + .ablkcipher = { + .setkey = qat_alg_ablkcipher_setkey, + .decrypt = qat_alg_ablkcipher_decrypt, + .encrypt = qat_alg_ablkcipher_encrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + }, + }, +} }; + +int qat_algs_register(void) +{ + if (atomic_add_return(1, &active_dev) == 1) { + int i; + + for (i = 0; i < ARRAY_SIZE(qat_algs); i++) + qat_algs[i].cra_flags = + (qat_algs[i].cra_type == &crypto_aead_type) ? + CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC : + CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; + + return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs)); + } + return 0; +} + +int qat_algs_unregister(void) +{ + if (atomic_sub_return(1, &active_dev) == 0) + return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs)); + return 0; +} + +int qat_algs_init(void) +{ + atomic_set(&active_dev, 0); + crypto_get_default_rng(); + return 0; +} + +void qat_algs_exit(void) +{ + crypto_put_default_rng(); +} diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c new file mode 100644 index 000000000..3bd705ca5 --- /dev/null +++ b/drivers/crypto/qat/qat_common/qat_crypto.c @@ -0,0 +1,287 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include <linux/module.h> +#include <linux/slab.h> +#include "adf_accel_devices.h" +#include "adf_common_drv.h" +#include "adf_transport.h" +#include "adf_cfg.h" +#include "adf_cfg_strings.h" +#include "qat_crypto.h" +#include "icp_qat_fw.h" + +#define SEC ADF_KERNEL_SEC + +static struct service_hndl qat_crypto; + +void qat_crypto_put_instance(struct qat_crypto_instance *inst) +{ + if (atomic_sub_return(1, &inst->refctr) == 0) + adf_dev_put(inst->accel_dev); +} + +static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev) +{ + struct qat_crypto_instance *inst; + struct list_head *list_ptr, *tmp; + int i; + + list_for_each_safe(list_ptr, tmp, &accel_dev->crypto_list) { + inst = list_entry(list_ptr, struct qat_crypto_instance, list); + + for (i = 0; i < atomic_read(&inst->refctr); i++) + qat_crypto_put_instance(inst); + + if (inst->sym_tx) + adf_remove_ring(inst->sym_tx); + + if (inst->sym_rx) + adf_remove_ring(inst->sym_rx); + + if (inst->pke_tx) + adf_remove_ring(inst->pke_tx); + + if (inst->pke_rx) + adf_remove_ring(inst->pke_rx); + + if (inst->rnd_tx) + adf_remove_ring(inst->rnd_tx); + + if (inst->rnd_rx) + adf_remove_ring(inst->rnd_rx); + + list_del(list_ptr); + kfree(inst); + } + return 0; +} + +struct qat_crypto_instance *qat_crypto_get_instance_node(int node) +{ + struct adf_accel_dev *accel_dev = NULL; + struct qat_crypto_instance *inst_best = NULL; + struct list_head *itr; + unsigned long best = ~0; + + list_for_each(itr, adf_devmgr_get_head()) { + accel_dev = list_entry(itr, struct adf_accel_dev, list); + if ((node == dev_to_node(&GET_DEV(accel_dev)) || + dev_to_node(&GET_DEV(accel_dev)) < 0) && + adf_dev_started(accel_dev)) + break; + accel_dev = NULL; + } + if (!accel_dev) { + pr_err("QAT: Could not find a device on node %d\n", node); + accel_dev = adf_devmgr_get_first(); + } + if (!accel_dev || !adf_dev_started(accel_dev)) + return NULL; + + list_for_each(itr, &accel_dev->crypto_list) { + struct qat_crypto_instance *inst; + unsigned long cur; + + inst = list_entry(itr, struct qat_crypto_instance, list); + cur = atomic_read(&inst->refctr); + if (best > cur) { + inst_best = inst; + best = cur; + } + } + if (inst_best) { + if (atomic_add_return(1, &inst_best->refctr) == 1) { + if (adf_dev_get(accel_dev)) { + atomic_dec(&inst_best->refctr); + dev_err(&GET_DEV(accel_dev), + "Could not increment dev refctr\n"); + return NULL; + } + } + } + return inst_best; +} + +static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev) +{ + int i; + unsigned long bank; + unsigned long num_inst, num_msg_sym, num_msg_asym; + int msg_size; + struct qat_crypto_instance *inst; + char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; + char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; + + INIT_LIST_HEAD(&accel_dev->crypto_list); + strlcpy(key, ADF_NUM_CY, sizeof(key)); + + if (adf_cfg_get_param_value(accel_dev, SEC, key, val)) + return -EFAULT; + + if (kstrtoul(val, 0, &num_inst)) + return -EFAULT; + + for (i = 0; i < num_inst; i++) { + inst = kzalloc_node(sizeof(*inst), GFP_KERNEL, + dev_to_node(&GET_DEV(accel_dev))); + if (!inst) + goto err; + + list_add_tail(&inst->list, &accel_dev->crypto_list); + inst->id = i; + atomic_set(&inst->refctr, 0); + inst->accel_dev = accel_dev; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i); + if (adf_cfg_get_param_value(accel_dev, SEC, key, val)) + goto err; + + if (kstrtoul(val, 10, &bank)) + goto err; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i); + if (adf_cfg_get_param_value(accel_dev, SEC, key, val)) + goto err; + + if (kstrtoul(val, 10, &num_msg_sym)) + goto err; + num_msg_sym = num_msg_sym >> 1; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i); + if (adf_cfg_get_param_value(accel_dev, SEC, key, val)) + goto err; + + if (kstrtoul(val, 10, &num_msg_asym)) + goto err; + num_msg_asym = num_msg_asym >> 1; + + msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i); + if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym, + msg_size, key, NULL, 0, &inst->sym_tx)) + goto err; + + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i); + if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym, + msg_size, key, NULL, 0, &inst->rnd_tx)) + goto err; + + msg_size = msg_size >> 1; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i); + if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym, + msg_size, key, NULL, 0, &inst->pke_tx)) + goto err; + + msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i); + if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym, + msg_size, key, qat_alg_callback, 0, + &inst->sym_rx)) + goto err; + + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i); + if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym, + msg_size, key, qat_alg_callback, 0, + &inst->rnd_rx)) + goto err; + + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i); + if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym, + msg_size, key, qat_alg_callback, 0, + &inst->pke_rx)) + goto err; + } + return 0; +err: + qat_crypto_free_instances(accel_dev); + return -ENOMEM; +} + +static int qat_crypto_init(struct adf_accel_dev *accel_dev) +{ + if (qat_crypto_create_instances(accel_dev)) + return -EFAULT; + + return 0; +} + +static int qat_crypto_shutdown(struct adf_accel_dev *accel_dev) +{ + return qat_crypto_free_instances(accel_dev); +} + +static int qat_crypto_event_handler(struct adf_accel_dev *accel_dev, + enum adf_event event) +{ + int ret; + + switch (event) { + case ADF_EVENT_INIT: + ret = qat_crypto_init(accel_dev); + break; + case ADF_EVENT_SHUTDOWN: + ret = qat_crypto_shutdown(accel_dev); + break; + case ADF_EVENT_RESTARTING: + case ADF_EVENT_RESTARTED: + case ADF_EVENT_START: + case ADF_EVENT_STOP: + default: + ret = 0; + } + return ret; +} + +int qat_crypto_register(void) +{ + memset(&qat_crypto, 0, sizeof(qat_crypto)); + qat_crypto.event_hld = qat_crypto_event_handler; + qat_crypto.name = "qat_crypto"; + return adf_service_register(&qat_crypto); +} + +int qat_crypto_unregister(void) +{ + return adf_service_unregister(&qat_crypto); +} diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h new file mode 100644 index 000000000..d503007b4 --- /dev/null +++ b/drivers/crypto/qat/qat_common/qat_crypto.h @@ -0,0 +1,95 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef _QAT_CRYPTO_INSTANCE_H_ +#define _QAT_CRYPTO_INSTANCE_H_ + +#include <linux/list.h> +#include <linux/slab.h> +#include "adf_accel_devices.h" +#include "icp_qat_fw_la.h" + +struct qat_crypto_instance { + struct adf_etr_ring_data *sym_tx; + struct adf_etr_ring_data *sym_rx; + struct adf_etr_ring_data *pke_tx; + struct adf_etr_ring_data *pke_rx; + struct adf_etr_ring_data *rnd_tx; + struct adf_etr_ring_data *rnd_rx; + struct adf_accel_dev *accel_dev; + struct list_head list; + unsigned long state; + int id; + atomic_t refctr; +}; + +struct qat_crypto_request_buffs { + struct qat_alg_buf_list *bl; + dma_addr_t blp; + struct qat_alg_buf_list *blout; + dma_addr_t bloutp; + size_t sz; + size_t sz_out; +}; + +struct qat_crypto_request; + +struct qat_crypto_request { + struct icp_qat_fw_la_bulk_req req; + union { + struct qat_alg_aead_ctx *aead_ctx; + struct qat_alg_ablkcipher_ctx *ablkcipher_ctx; + }; + union { + struct aead_request *aead_req; + struct ablkcipher_request *ablkcipher_req; + }; + struct qat_crypto_request_buffs buf; + void (*cb)(struct icp_qat_fw_la_resp *resp, + struct qat_crypto_request *req); +}; + +#endif diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c new file mode 100644 index 000000000..274ff7e9d --- /dev/null +++ b/drivers/crypto/qat/qat_common/qat_hal.c @@ -0,0 +1,1394 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include <linux/slab.h> + +#include "adf_accel_devices.h" +#include "adf_common_drv.h" +#include "icp_qat_hal.h" +#include "icp_qat_uclo.h" + +#define BAD_REGADDR 0xffff +#define MAX_RETRY_TIMES 10000 +#define INIT_CTX_ARB_VALUE 0x0 +#define INIT_CTX_ENABLE_VALUE 0x0 +#define INIT_PC_VALUE 0x0 +#define INIT_WAKEUP_EVENTS_VALUE 0x1 +#define INIT_SIG_EVENTS_VALUE 0x1 +#define INIT_CCENABLE_VALUE 0x2000 +#define RST_CSR_QAT_LSB 20 +#define RST_CSR_AE_LSB 0 +#define MC_TIMESTAMP_ENABLE (0x1 << 7) + +#define IGNORE_W1C_MASK ((~(1 << CE_BREAKPOINT_BITPOS)) & \ + (~(1 << CE_CNTL_STORE_PARITY_ERROR_BITPOS)) & \ + (~(1 << CE_REG_PAR_ERR_BITPOS))) +#define INSERT_IMMED_GPRA_CONST(inst, const_val) \ + (inst = ((inst & 0xFFFF00C03FFull) | \ + ((((const_val) << 12) & 0x0FF00000ull) | \ + (((const_val) << 10) & 0x0003FC00ull)))) +#define INSERT_IMMED_GPRB_CONST(inst, const_val) \ + (inst = ((inst & 0xFFFF00FFF00ull) | \ + ((((const_val) << 12) & 0x0FF00000ull) | \ + (((const_val) << 0) & 0x000000FFull)))) + +#define AE(handle, ae) handle->hal_handle->aes[ae] + +static const uint64_t inst_4b[] = { + 0x0F0400C0000ull, 0x0F4400C0000ull, 0x0F040000300ull, 0x0F440000300ull, + 0x0FC066C0000ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, + 0x0A021000000ull +}; + +static const uint64_t inst[] = { + 0x0F0000C0000ull, 0x0F000000380ull, 0x0D805000011ull, 0x0FC082C0300ull, + 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, + 0x0A0643C0000ull, 0x0BAC0000301ull, 0x0D802000101ull, 0x0F0000C0001ull, + 0x0FC066C0001ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, + 0x0F000400300ull, 0x0A0610C0000ull, 0x0BAC0000301ull, 0x0D804400101ull, + 0x0A0580C0000ull, 0x0A0581C0000ull, 0x0A0582C0000ull, 0x0A0583C0000ull, + 0x0A0584C0000ull, 0x0A0585C0000ull, 0x0A0586C0000ull, 0x0A0587C0000ull, + 0x0A0588C0000ull, 0x0A0589C0000ull, 0x0A058AC0000ull, 0x0A058BC0000ull, + 0x0A058CC0000ull, 0x0A058DC0000ull, 0x0A058EC0000ull, 0x0A058FC0000ull, + 0x0A05C0C0000ull, 0x0A05C1C0000ull, 0x0A05C2C0000ull, 0x0A05C3C0000ull, + 0x0A05C4C0000ull, 0x0A05C5C0000ull, 0x0A05C6C0000ull, 0x0A05C7C0000ull, + 0x0A05C8C0000ull, 0x0A05C9C0000ull, 0x0A05CAC0000ull, 0x0A05CBC0000ull, + 0x0A05CCC0000ull, 0x0A05CDC0000ull, 0x0A05CEC0000ull, 0x0A05CFC0000ull, + 0x0A0400C0000ull, 0x0B0400C0000ull, 0x0A0401C0000ull, 0x0B0401C0000ull, + 0x0A0402C0000ull, 0x0B0402C0000ull, 0x0A0403C0000ull, 0x0B0403C0000ull, + 0x0A0404C0000ull, 0x0B0404C0000ull, 0x0A0405C0000ull, 0x0B0405C0000ull, + 0x0A0406C0000ull, 0x0B0406C0000ull, 0x0A0407C0000ull, 0x0B0407C0000ull, + 0x0A0408C0000ull, 0x0B0408C0000ull, 0x0A0409C0000ull, 0x0B0409C0000ull, + 0x0A040AC0000ull, 0x0B040AC0000ull, 0x0A040BC0000ull, 0x0B040BC0000ull, + 0x0A040CC0000ull, 0x0B040CC0000ull, 0x0A040DC0000ull, 0x0B040DC0000ull, + 0x0A040EC0000ull, 0x0B040EC0000ull, 0x0A040FC0000ull, 0x0B040FC0000ull, + 0x0D81581C010ull, 0x0E000010000ull, 0x0E000010000ull, +}; + +void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned int ctx_mask) +{ + AE(handle, ae).live_ctx_mask = ctx_mask; +} + +#define CSR_RETRY_TIMES 500 +static int qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned int csr, + unsigned int *value) +{ + unsigned int iterations = CSR_RETRY_TIMES; + + do { + *value = GET_AE_CSR(handle, ae, csr); + if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS)) + return 0; + } while (iterations--); + + pr_err("QAT: Read CSR timeout\n"); + return -EFAULT; +} + +static int qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned int csr, + unsigned int value) +{ + unsigned int iterations = CSR_RETRY_TIMES; + + do { + SET_AE_CSR(handle, ae, csr, value); + if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS)) + return 0; + } while (iterations--); + + pr_err("QAT: Write CSR Timeout\n"); + return -EFAULT; +} + +static void qat_hal_get_wakeup_event(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char ctx, + unsigned int *events) +{ + unsigned int cur_ctx; + + qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); + qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx); + qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT, events); + qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); +} + +static int qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned int cycles, + int chk_inactive) +{ + unsigned int base_cnt = 0, cur_cnt = 0; + unsigned int csr = (1 << ACS_ABO_BITPOS); + int times = MAX_RETRY_TIMES; + int elapsed_cycles = 0; + + qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &base_cnt); + base_cnt &= 0xffff; + while ((int)cycles > elapsed_cycles && times--) { + if (chk_inactive) + qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &csr); + + qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &cur_cnt); + cur_cnt &= 0xffff; + elapsed_cycles = cur_cnt - base_cnt; + + if (elapsed_cycles < 0) + elapsed_cycles += 0x10000; + + /* ensure at least 8 time cycles elapsed in wait_cycles */ + if (elapsed_cycles >= 8 && !(csr & (1 << ACS_ABO_BITPOS))) + return 0; + } + if (!times) { + pr_err("QAT: wait_num_cycles time out\n"); + return -EFAULT; + } + return 0; +} + +#define CLR_BIT(wrd, bit) (wrd & ~(1 << bit)) +#define SET_BIT(wrd, bit) (wrd | 1 << bit) + +int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char mode) +{ + unsigned int csr, new_csr; + + if ((mode != 4) && (mode != 8)) { + pr_err("QAT: bad ctx mode=%d\n", mode); + return -EINVAL; + } + + /* Sets the accelaration engine context mode to either four or eight */ + qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr); + csr = IGNORE_W1C_MASK & csr; + new_csr = (mode == 4) ? + SET_BIT(csr, CE_INUSE_CONTEXTS_BITPOS) : + CLR_BIT(csr, CE_INUSE_CONTEXTS_BITPOS); + qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr); + return 0; +} + +int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char mode) +{ + unsigned int csr, new_csr; + + qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr); + csr &= IGNORE_W1C_MASK; + + new_csr = (mode) ? + SET_BIT(csr, CE_NN_MODE_BITPOS) : + CLR_BIT(csr, CE_NN_MODE_BITPOS); + + if (new_csr != csr) + qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr); + + return 0; +} + +int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, enum icp_qat_uof_regtype lm_type, + unsigned char mode) +{ + unsigned int csr, new_csr; + + qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr); + csr &= IGNORE_W1C_MASK; + switch (lm_type) { + case ICP_LMEM0: + new_csr = (mode) ? + SET_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS) : + CLR_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS); + break; + case ICP_LMEM1: + new_csr = (mode) ? + SET_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS) : + CLR_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS); + break; + default: + pr_err("QAT: lmType = 0x%x\n", lm_type); + return -EINVAL; + } + + if (new_csr != csr) + qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr); + return 0; +} + +static unsigned short qat_hal_get_reg_addr(unsigned int type, + unsigned short reg_num) +{ + unsigned short reg_addr; + + switch (type) { + case ICP_GPA_ABS: + case ICP_GPB_ABS: + reg_addr = 0x80 | (reg_num & 0x7f); + break; + case ICP_GPA_REL: + case ICP_GPB_REL: + reg_addr = reg_num & 0x1f; + break; + case ICP_SR_RD_REL: + case ICP_SR_WR_REL: + case ICP_SR_REL: + reg_addr = 0x180 | (reg_num & 0x1f); + break; + case ICP_SR_ABS: + reg_addr = 0x140 | ((reg_num & 0x3) << 1); + break; + case ICP_DR_RD_REL: + case ICP_DR_WR_REL: + case ICP_DR_REL: + reg_addr = 0x1c0 | (reg_num & 0x1f); + break; + case ICP_DR_ABS: + reg_addr = 0x100 | ((reg_num & 0x3) << 1); + break; + case ICP_NEIGH_REL: + reg_addr = 0x280 | (reg_num & 0x1f); + break; + case ICP_LMEM0: + reg_addr = 0x200; + break; + case ICP_LMEM1: + reg_addr = 0x220; + break; + case ICP_NO_DEST: + reg_addr = 0x300 | (reg_num & 0xff); + break; + default: + reg_addr = BAD_REGADDR; + break; + } + return reg_addr; +} + +void qat_hal_reset(struct icp_qat_fw_loader_handle *handle) +{ + unsigned int ae_reset_csr; + + ae_reset_csr = GET_GLB_CSR(handle, ICP_RESET); + ae_reset_csr |= handle->hal_handle->ae_mask << RST_CSR_AE_LSB; + ae_reset_csr |= handle->hal_handle->slice_mask << RST_CSR_QAT_LSB; + SET_GLB_CSR(handle, ICP_RESET, ae_reset_csr); +} + +static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned int ctx_mask, + unsigned int ae_csr, unsigned int csr_val) +{ + unsigned int ctx, cur_ctx; + + qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); + + for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) { + if (!(ctx_mask & (1 << ctx))) + continue; + qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx); + qat_hal_wr_ae_csr(handle, ae, ae_csr, csr_val); + } + + qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); +} + +static void qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char ctx, + unsigned int ae_csr, unsigned int *csr_val) +{ + unsigned int cur_ctx; + + qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); + qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx); + qat_hal_rd_ae_csr(handle, ae, ae_csr, csr_val); + qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); +} + +static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned int ctx_mask, + unsigned int events) +{ + unsigned int ctx, cur_ctx; + + qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); + for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) { + if (!(ctx_mask & (1 << ctx))) + continue; + qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx); + qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_INDIRECT, events); + } + qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); +} + +static void qat_hal_put_wakeup_event(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned int ctx_mask, + unsigned int events) +{ + unsigned int ctx, cur_ctx; + + qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); + for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) { + if (!(ctx_mask & (1 << ctx))) + continue; + qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx); + qat_hal_wr_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT, + events); + } + qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); +} + +static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle) +{ + unsigned int base_cnt, cur_cnt; + unsigned char ae; + unsigned int times = MAX_RETRY_TIMES; + + for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { + if (!(handle->hal_handle->ae_mask & (1 << ae))) + continue; + + qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, + (unsigned int *)&base_cnt); + base_cnt &= 0xffff; + + do { + qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, + (unsigned int *)&cur_cnt); + cur_cnt &= 0xffff; + } while (times-- && (cur_cnt == base_cnt)); + + if (!times) { + pr_err("QAT: AE%d is inactive!!\n", ae); + return -EFAULT; + } + } + + return 0; +} + +static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle) +{ + unsigned int misc_ctl; + unsigned char ae; + + /* stop the timestamp timers */ + misc_ctl = GET_GLB_CSR(handle, MISC_CONTROL); + if (misc_ctl & MC_TIMESTAMP_ENABLE) + SET_GLB_CSR(handle, MISC_CONTROL, misc_ctl & + (~MC_TIMESTAMP_ENABLE)); + + for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { + if (!(handle->hal_handle->ae_mask & (1 << ae))) + continue; + qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0); + qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_HIGH, 0); + } + /* start timestamp timers */ + SET_GLB_CSR(handle, MISC_CONTROL, misc_ctl | MC_TIMESTAMP_ENABLE); +} + +#define ESRAM_AUTO_TINIT BIT(2) +#define ESRAM_AUTO_TINIT_DONE BIT(3) +#define ESRAM_AUTO_INIT_USED_CYCLES (1640) +#define ESRAM_AUTO_INIT_CSR_OFFSET 0xC1C +static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle) +{ + void __iomem *csr_addr = handle->hal_ep_csr_addr_v + + ESRAM_AUTO_INIT_CSR_OFFSET; + unsigned int csr_val, times = 30; + + csr_val = ADF_CSR_RD(csr_addr, 0); + if ((csr_val & ESRAM_AUTO_TINIT) && (csr_val & ESRAM_AUTO_TINIT_DONE)) + return 0; + + csr_val = ADF_CSR_RD(csr_addr, 0); + csr_val |= ESRAM_AUTO_TINIT; + ADF_CSR_WR(csr_addr, 0, csr_val); + + do { + qat_hal_wait_cycles(handle, 0, ESRAM_AUTO_INIT_USED_CYCLES, 0); + csr_val = ADF_CSR_RD(csr_addr, 0); + } while (!(csr_val & ESRAM_AUTO_TINIT_DONE) && times--); + if ((!times)) { + pr_err("QAT: Fail to init eSram!\n"); + return -EFAULT; + } + return 0; +} + +#define SHRAM_INIT_CYCLES 2060 +int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle) +{ + unsigned int ae_reset_csr; + unsigned char ae; + unsigned int clk_csr; + unsigned int times = 100; + unsigned int csr; + + /* write to the reset csr */ + ae_reset_csr = GET_GLB_CSR(handle, ICP_RESET); + ae_reset_csr &= ~(handle->hal_handle->ae_mask << RST_CSR_AE_LSB); + ae_reset_csr &= ~(handle->hal_handle->slice_mask << RST_CSR_QAT_LSB); + do { + SET_GLB_CSR(handle, ICP_RESET, ae_reset_csr); + if (!(times--)) + goto out_err; + csr = GET_GLB_CSR(handle, ICP_RESET); + } while ((handle->hal_handle->ae_mask | + (handle->hal_handle->slice_mask << RST_CSR_QAT_LSB)) & csr); + /* enable clock */ + clk_csr = GET_GLB_CSR(handle, ICP_GLOBAL_CLK_ENABLE); + clk_csr |= handle->hal_handle->ae_mask << 0; + clk_csr |= handle->hal_handle->slice_mask << 20; + SET_GLB_CSR(handle, ICP_GLOBAL_CLK_ENABLE, clk_csr); + if (qat_hal_check_ae_alive(handle)) + goto out_err; + + /* Set undefined power-up/reset states to reasonable default values */ + for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { + if (!(handle->hal_handle->ae_mask & (1 << ae))) + continue; + qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, + INIT_CTX_ENABLE_VALUE); + qat_hal_wr_indr_csr(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX, + CTX_STS_INDIRECT, + handle->hal_handle->upc_mask & + INIT_PC_VALUE); + qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE); + qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE); + qat_hal_put_wakeup_event(handle, ae, + ICP_QAT_UCLO_AE_ALL_CTX, + INIT_WAKEUP_EVENTS_VALUE); + qat_hal_put_sig_event(handle, ae, + ICP_QAT_UCLO_AE_ALL_CTX, + INIT_SIG_EVENTS_VALUE); + } + if (qat_hal_init_esram(handle)) + goto out_err; + if (qat_hal_wait_cycles(handle, 0, SHRAM_INIT_CYCLES, 0)) + goto out_err; + qat_hal_reset_timestamp(handle); + + return 0; +out_err: + pr_err("QAT: failed to get device out of reset\n"); + return -EFAULT; +} + +static void qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned int ctx_mask) +{ + unsigned int ctx; + + qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx); + ctx &= IGNORE_W1C_MASK & + (~((ctx_mask & ICP_QAT_UCLO_AE_ALL_CTX) << CE_ENABLE_BITPOS)); + qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx); +} + +static uint64_t qat_hal_parity_64bit(uint64_t word) +{ + word ^= word >> 1; + word ^= word >> 2; + word ^= word >> 4; + word ^= word >> 8; + word ^= word >> 16; + word ^= word >> 32; + return word & 1; +} + +static uint64_t qat_hal_set_uword_ecc(uint64_t uword) +{ + uint64_t bit0_mask = 0xff800007fffULL, bit1_mask = 0x1f801ff801fULL, + bit2_mask = 0xe387e0781e1ULL, bit3_mask = 0x7cb8e388e22ULL, + bit4_mask = 0xaf5b2c93244ULL, bit5_mask = 0xf56d5525488ULL, + bit6_mask = 0xdaf69a46910ULL; + + /* clear the ecc bits */ + uword &= ~(0x7fULL << 0x2C); + uword |= qat_hal_parity_64bit(bit0_mask & uword) << 0x2C; + uword |= qat_hal_parity_64bit(bit1_mask & uword) << 0x2D; + uword |= qat_hal_parity_64bit(bit2_mask & uword) << 0x2E; + uword |= qat_hal_parity_64bit(bit3_mask & uword) << 0x2F; + uword |= qat_hal_parity_64bit(bit4_mask & uword) << 0x30; + uword |= qat_hal_parity_64bit(bit5_mask & uword) << 0x31; + uword |= qat_hal_parity_64bit(bit6_mask & uword) << 0x32; + return uword; +} + +void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned int uaddr, + unsigned int words_num, uint64_t *uword) +{ + unsigned int ustore_addr; + unsigned int i; + + qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr); + uaddr |= UA_ECS; + qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); + for (i = 0; i < words_num; i++) { + unsigned int uwrd_lo, uwrd_hi; + uint64_t tmp; + + tmp = qat_hal_set_uword_ecc(uword[i]); + uwrd_lo = (unsigned int)(tmp & 0xffffffff); + uwrd_hi = (unsigned int)(tmp >> 0x20); + qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo); + qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi); + } + qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr); +} + +static void qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned int ctx_mask) +{ + unsigned int ctx; + + qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx); + ctx &= IGNORE_W1C_MASK; + ctx_mask &= (ctx & CE_INUSE_CONTEXTS) ? 0x55 : 0xFF; + ctx |= (ctx_mask << CE_ENABLE_BITPOS); + qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx); +} + +static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle) +{ + unsigned char ae; + unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX; + int times = MAX_RETRY_TIMES; + unsigned int csr_val = 0; + unsigned short reg; + unsigned int savctx = 0; + int ret = 0; + + for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { + if (!(handle->hal_handle->ae_mask & (1 << ae))) + continue; + for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) { + qat_hal_init_rd_xfer(handle, ae, 0, ICP_SR_RD_ABS, + reg, 0); + qat_hal_init_rd_xfer(handle, ae, 0, ICP_DR_RD_ABS, + reg, 0); + } + qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val); + csr_val &= ~(1 << MMC_SHARE_CS_BITPOS); + qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val); + qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr_val); + csr_val &= IGNORE_W1C_MASK; + csr_val |= CE_NN_MODE; + qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val); + qat_hal_wr_uwords(handle, ae, 0, ARRAY_SIZE(inst), + (uint64_t *)inst); + qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT, + handle->hal_handle->upc_mask & + INIT_PC_VALUE); + qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx); + qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, 0); + qat_hal_put_wakeup_event(handle, ae, ctx_mask, XCWE_VOLUNTARY); + qat_hal_wr_indr_csr(handle, ae, ctx_mask, + CTX_SIG_EVENTS_INDIRECT, 0); + qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0); + qat_hal_enable_ctx(handle, ae, ctx_mask); + } + for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { + if (!(handle->hal_handle->ae_mask & (1 << ae))) + continue; + /* wait for AE to finish */ + do { + ret = qat_hal_wait_cycles(handle, ae, 20, 1); + } while (ret && times--); + + if (!times) { + pr_err("QAT: clear GPR of AE %d failed", ae); + return -EINVAL; + } + qat_hal_disable_ctx(handle, ae, ctx_mask); + qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, + savctx & ACS_ACNO); + qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, + INIT_CTX_ENABLE_VALUE); + qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT, + handle->hal_handle->upc_mask & + INIT_PC_VALUE); + qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE); + qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE); + qat_hal_put_wakeup_event(handle, ae, ctx_mask, + INIT_WAKEUP_EVENTS_VALUE); + qat_hal_put_sig_event(handle, ae, ctx_mask, + INIT_SIG_EVENTS_VALUE); + } + return 0; +} + +#define ICP_DH895XCC_AE_OFFSET 0x20000 +#define ICP_DH895XCC_CAP_OFFSET (ICP_DH895XCC_AE_OFFSET + 0x10000) +#define LOCAL_TO_XFER_REG_OFFSET 0x800 +#define ICP_DH895XCC_EP_OFFSET 0x3a000 +#define ICP_DH895XCC_PMISC_BAR 1 +int qat_hal_init(struct adf_accel_dev *accel_dev) +{ + unsigned char ae; + unsigned int max_en_ae_id = 0; + struct icp_qat_fw_loader_handle *handle; + struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_bar *bar = + &pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)]; + + handle = kzalloc(sizeof(*handle), GFP_KERNEL); + if (!handle) + return -ENOMEM; + + handle->hal_cap_g_ctl_csr_addr_v = bar->virt_addr + + ICP_DH895XCC_CAP_OFFSET; + handle->hal_cap_ae_xfer_csr_addr_v = bar->virt_addr + + ICP_DH895XCC_AE_OFFSET; + handle->hal_ep_csr_addr_v = bar->virt_addr + ICP_DH895XCC_EP_OFFSET; + handle->hal_cap_ae_local_csr_addr_v = + handle->hal_cap_ae_xfer_csr_addr_v + LOCAL_TO_XFER_REG_OFFSET; + + handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL); + if (!handle->hal_handle) + goto out_hal_handle; + handle->hal_handle->revision_id = accel_dev->accel_pci_dev.revid; + handle->hal_handle->ae_mask = hw_data->ae_mask; + handle->hal_handle->slice_mask = hw_data->accel_mask; + /* create AE objects */ + handle->hal_handle->upc_mask = 0x1ffff; + handle->hal_handle->max_ustore = 0x4000; + for (ae = 0; ae < ICP_QAT_UCLO_MAX_AE; ae++) { + if (!(hw_data->ae_mask & (1 << ae))) + continue; + handle->hal_handle->aes[ae].free_addr = 0; + handle->hal_handle->aes[ae].free_size = + handle->hal_handle->max_ustore; + handle->hal_handle->aes[ae].ustore_size = + handle->hal_handle->max_ustore; + handle->hal_handle->aes[ae].live_ctx_mask = + ICP_QAT_UCLO_AE_ALL_CTX; + max_en_ae_id = ae; + } + handle->hal_handle->ae_max_num = max_en_ae_id + 1; + /* take all AEs out of reset */ + if (qat_hal_clr_reset(handle)) { + dev_err(&GET_DEV(accel_dev), "qat_hal_clr_reset error\n"); + goto out_err; + } + if (qat_hal_clear_gpr(handle)) + goto out_err; + /* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */ + for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { + unsigned int csr_val = 0; + + if (!(hw_data->ae_mask & (1 << ae))) + continue; + qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE, &csr_val); + csr_val |= 0x1; + qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val); + } + accel_dev->fw_loader->fw_loader = handle; + return 0; + +out_err: + kfree(handle->hal_handle); +out_hal_handle: + kfree(handle); + return -EFAULT; +} + +void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle) +{ + if (!handle) + return; + kfree(handle->hal_handle); + kfree(handle); +} + +void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae, + unsigned int ctx_mask) +{ + qat_hal_put_wakeup_event(handle, ae, (~ctx_mask) & + ICP_QAT_UCLO_AE_ALL_CTX, 0x10000); + qat_hal_enable_ctx(handle, ae, ctx_mask); +} + +void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae, + unsigned int ctx_mask) +{ + qat_hal_disable_ctx(handle, ae, ctx_mask); +} + +void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned int ctx_mask, unsigned int upc) +{ + qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT, + handle->hal_handle->upc_mask & upc); +} + +static void qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned int uaddr, + unsigned int words_num, uint64_t *uword) +{ + unsigned int i, uwrd_lo, uwrd_hi; + unsigned int ustore_addr, misc_control; + + qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &misc_control); + qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, + misc_control & 0xfffffffb); + qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr); + uaddr |= UA_ECS; + for (i = 0; i < words_num; i++) { + qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); + uaddr++; + qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER, &uwrd_lo); + qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER, &uwrd_hi); + uword[i] = uwrd_hi; + uword[i] = (uword[i] << 0x20) | uwrd_lo; + } + qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, misc_control); + qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr); +} + +void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned int uaddr, + unsigned int words_num, unsigned int *data) +{ + unsigned int i, ustore_addr; + + qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr); + uaddr |= UA_ECS; + qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); + for (i = 0; i < words_num; i++) { + unsigned int uwrd_lo, uwrd_hi, tmp; + + uwrd_lo = ((data[i] & 0xfff0000) << 4) | (0x3 << 18) | + ((data[i] & 0xff00) << 2) | + (0x3 << 8) | (data[i] & 0xff); + uwrd_hi = (0xf << 4) | ((data[i] & 0xf0000000) >> 28); + uwrd_hi |= (hweight32(data[i] & 0xffff) & 0x1) << 8; + tmp = ((data[i] >> 0x10) & 0xffff); + uwrd_hi |= (hweight32(tmp) & 0x1) << 9; + qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo); + qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi); + } + qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr); +} + +#define MAX_EXEC_INST 100 +static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char ctx, + uint64_t *micro_inst, unsigned int inst_num, + int code_off, unsigned int max_cycle, + unsigned int *endpc) +{ + uint64_t savuwords[MAX_EXEC_INST]; + unsigned int ind_lm_addr0, ind_lm_addr1; + unsigned int ind_lm_addr_byte0, ind_lm_addr_byte1; + unsigned int ind_cnt_sig; + unsigned int ind_sig, act_sig; + unsigned int csr_val = 0, newcsr_val; + unsigned int savctx; + unsigned int savcc, wakeup_events, savpc; + unsigned int ctxarb_ctl, ctx_enables; + + if ((inst_num > handle->hal_handle->max_ustore) || !micro_inst) { + pr_err("QAT: invalid instruction num %d\n", inst_num); + return -EINVAL; + } + /* save current context */ + qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT, &ind_lm_addr0); + qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT, &ind_lm_addr1); + qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX, + &ind_lm_addr_byte0); + qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX, + &ind_lm_addr_byte1); + if (inst_num <= MAX_EXEC_INST) + qat_hal_get_uwords(handle, ae, 0, inst_num, savuwords); + qat_hal_get_wakeup_event(handle, ae, ctx, &wakeup_events); + qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT, &savpc); + savpc = (savpc & handle->hal_handle->upc_mask) >> 0; + qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); + ctx_enables &= IGNORE_W1C_MASK; + qat_hal_rd_ae_csr(handle, ae, CC_ENABLE, &savcc); + qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx); + qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_ctl); + qat_hal_rd_indr_csr(handle, ae, ctx, FUTURE_COUNT_SIGNAL_INDIRECT, + &ind_cnt_sig); + qat_hal_rd_indr_csr(handle, ae, ctx, CTX_SIG_EVENTS_INDIRECT, &ind_sig); + qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, &act_sig); + /* execute micro codes */ + qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables); + qat_hal_wr_uwords(handle, ae, 0, inst_num, micro_inst); + qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT, 0); + qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, ctx & ACS_ACNO); + if (code_off) + qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc & 0xffffdfff); + qat_hal_put_wakeup_event(handle, ae, (1 << ctx), XCWE_VOLUNTARY); + qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_SIG_EVENTS_INDIRECT, 0); + qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0); + qat_hal_enable_ctx(handle, ae, (1 << ctx)); + /* wait for micro codes to finish */ + if (qat_hal_wait_cycles(handle, ae, max_cycle, 1) != 0) + return -EFAULT; + if (endpc) { + unsigned int ctx_status; + + qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT, + &ctx_status); + *endpc = ctx_status & handle->hal_handle->upc_mask; + } + /* retore to saved context */ + qat_hal_disable_ctx(handle, ae, (1 << ctx)); + if (inst_num <= MAX_EXEC_INST) + qat_hal_wr_uwords(handle, ae, 0, inst_num, savuwords); + qat_hal_put_wakeup_event(handle, ae, (1 << ctx), wakeup_events); + qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT, + handle->hal_handle->upc_mask & savpc); + qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val); + newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS); + qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val); + qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc); + qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, savctx & ACS_ACNO); + qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_ctl); + qat_hal_wr_indr_csr(handle, ae, (1 << ctx), + LM_ADDR_0_INDIRECT, ind_lm_addr0); + qat_hal_wr_indr_csr(handle, ae, (1 << ctx), + LM_ADDR_1_INDIRECT, ind_lm_addr1); + qat_hal_wr_indr_csr(handle, ae, (1 << ctx), + INDIRECT_LM_ADDR_0_BYTE_INDEX, ind_lm_addr_byte0); + qat_hal_wr_indr_csr(handle, ae, (1 << ctx), + INDIRECT_LM_ADDR_1_BYTE_INDEX, ind_lm_addr_byte1); + qat_hal_wr_indr_csr(handle, ae, (1 << ctx), + FUTURE_COUNT_SIGNAL_INDIRECT, ind_cnt_sig); + qat_hal_wr_indr_csr(handle, ae, (1 << ctx), + CTX_SIG_EVENTS_INDIRECT, ind_sig); + qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, act_sig); + qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables); + + return 0; +} + +static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char ctx, + enum icp_qat_uof_regtype reg_type, + unsigned short reg_num, unsigned int *data) +{ + unsigned int savctx, uaddr, uwrd_lo, uwrd_hi; + unsigned int ctxarb_cntl, ustore_addr, ctx_enables; + unsigned short reg_addr; + int status = 0; + uint64_t insts, savuword; + + reg_addr = qat_hal_get_reg_addr(reg_type, reg_num); + if (reg_addr == BAD_REGADDR) { + pr_err("QAT: bad regaddr=0x%x\n", reg_addr); + return -EINVAL; + } + switch (reg_type) { + case ICP_GPA_REL: + insts = 0xA070000000ull | (reg_addr & 0x3ff); + break; + default: + insts = (uint64_t)0xA030000000ull | ((reg_addr & 0x3ff) << 10); + break; + } + qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx); + qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_cntl); + qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); + ctx_enables &= IGNORE_W1C_MASK; + if (ctx != (savctx & ACS_ACNO)) + qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, + ctx & ACS_ACNO); + qat_hal_get_uwords(handle, ae, 0, 1, &savuword); + qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables); + qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr); + uaddr = UA_ECS; + qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); + insts = qat_hal_set_uword_ecc(insts); + uwrd_lo = (unsigned int)(insts & 0xffffffff); + uwrd_hi = (unsigned int)(insts >> 0x20); + qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo); + qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi); + qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); + /* delay for at least 8 cycles */ + qat_hal_wait_cycles(handle, ae, 0x8, 0); + /* + * read ALU output + * the instruction should have been executed + * prior to clearing the ECS in putUwords + */ + qat_hal_rd_ae_csr(handle, ae, ALU_OUT, data); + qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr); + qat_hal_wr_uwords(handle, ae, 0, 1, &savuword); + if (ctx != (savctx & ACS_ACNO)) + qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, + savctx & ACS_ACNO); + qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_cntl); + qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables); + + return status; +} + +static int qat_hal_wr_rel_reg(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char ctx, + enum icp_qat_uof_regtype reg_type, + unsigned short reg_num, unsigned int data) +{ + unsigned short src_hiaddr, src_lowaddr, dest_addr, data16hi, data16lo; + uint64_t insts[] = { + 0x0F440000000ull, + 0x0F040000000ull, + 0x0F0000C0300ull, + 0x0E000010000ull + }; + const int num_inst = ARRAY_SIZE(insts), code_off = 1; + const int imm_w1 = 0, imm_w0 = 1; + + dest_addr = qat_hal_get_reg_addr(reg_type, reg_num); + if (dest_addr == BAD_REGADDR) { + pr_err("QAT: bad destAddr=0x%x\n", dest_addr); + return -EINVAL; + } + + data16lo = 0xffff & data; + data16hi = 0xffff & (data >> 0x10); + src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short) + (0xff & data16hi)); + src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short) + (0xff & data16lo)); + switch (reg_type) { + case ICP_GPA_REL: + insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) | + ((src_hiaddr & 0x3ff) << 10) | (dest_addr & 0x3ff); + insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) | + ((src_lowaddr & 0x3ff) << 10) | (dest_addr & 0x3ff); + break; + default: + insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) | + ((dest_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff); + + insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) | + ((dest_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff); + break; + } + + return qat_hal_exec_micro_inst(handle, ae, ctx, insts, num_inst, + code_off, num_inst * 0x5, NULL); +} + +int qat_hal_get_ins_num(void) +{ + return ARRAY_SIZE(inst_4b); +} + +static int qat_hal_concat_micro_code(uint64_t *micro_inst, + unsigned int inst_num, unsigned int size, + unsigned int addr, unsigned int *value) +{ + int i, val_indx; + unsigned int cur_value; + const uint64_t *inst_arr; + int fixup_offset; + int usize = 0; + int orig_num; + + orig_num = inst_num; + val_indx = 0; + cur_value = value[val_indx++]; + inst_arr = inst_4b; + usize = ARRAY_SIZE(inst_4b); + fixup_offset = inst_num; + for (i = 0; i < usize; i++) + micro_inst[inst_num++] = inst_arr[i]; + INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], (addr)); + fixup_offset++; + INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], 0); + fixup_offset++; + INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0)); + fixup_offset++; + INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0x10)); + + return inst_num - orig_num; +} + +static int qat_hal_exec_micro_init_lm(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char ctx, + int *pfirst_exec, uint64_t *micro_inst, + unsigned int inst_num) +{ + int stat = 0; + unsigned int gpra0 = 0, gpra1 = 0, gpra2 = 0; + unsigned int gprb0 = 0, gprb1 = 0; + + if (*pfirst_exec) { + qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, &gpra0); + qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, &gpra1); + qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, &gpra2); + qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, &gprb0); + qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, &gprb1); + *pfirst_exec = 0; + } + stat = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, inst_num, 1, + inst_num * 0x5, NULL); + if (stat != 0) + return -EFAULT; + qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, gpra0); + qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, gpra1); + qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, gpra2); + qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, gprb0); + qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, gprb1); + + return 0; +} + +int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, + struct icp_qat_uof_batch_init *lm_init_header) +{ + struct icp_qat_uof_batch_init *plm_init; + uint64_t *micro_inst_arry; + int micro_inst_num; + int alloc_inst_size; + int first_exec = 1; + int stat = 0; + + plm_init = lm_init_header->next; + alloc_inst_size = lm_init_header->size; + if ((unsigned int)alloc_inst_size > handle->hal_handle->max_ustore) + alloc_inst_size = handle->hal_handle->max_ustore; + micro_inst_arry = kmalloc_array(alloc_inst_size, sizeof(uint64_t), + GFP_KERNEL); + if (!micro_inst_arry) + return -ENOMEM; + micro_inst_num = 0; + while (plm_init) { + unsigned int addr, *value, size; + + ae = plm_init->ae; + addr = plm_init->addr; + value = plm_init->value; + size = plm_init->size; + micro_inst_num += qat_hal_concat_micro_code(micro_inst_arry, + micro_inst_num, + size, addr, value); + plm_init = plm_init->next; + } + /* exec micro codes */ + if (micro_inst_arry && (micro_inst_num > 0)) { + micro_inst_arry[micro_inst_num++] = 0x0E000010000ull; + stat = qat_hal_exec_micro_init_lm(handle, ae, 0, &first_exec, + micro_inst_arry, + micro_inst_num); + } + kfree(micro_inst_arry); + return stat; +} + +static int qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char ctx, + enum icp_qat_uof_regtype reg_type, + unsigned short reg_num, unsigned int val) +{ + int status = 0; + unsigned int reg_addr; + unsigned int ctx_enables; + unsigned short mask; + unsigned short dr_offset = 0x10; + + status = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); + if (CE_INUSE_CONTEXTS & ctx_enables) { + if (ctx & 0x1) { + pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx); + return -EINVAL; + } + mask = 0x1f; + dr_offset = 0x20; + } else { + mask = 0x0f; + } + if (reg_num & ~mask) + return -EINVAL; + reg_addr = reg_num + (ctx << 0x5); + switch (reg_type) { + case ICP_SR_RD_REL: + case ICP_SR_REL: + SET_AE_XFER(handle, ae, reg_addr, val); + break; + case ICP_DR_RD_REL: + case ICP_DR_REL: + SET_AE_XFER(handle, ae, (reg_addr + dr_offset), val); + break; + default: + status = -EINVAL; + break; + } + return status; +} + +static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char ctx, + enum icp_qat_uof_regtype reg_type, + unsigned short reg_num, unsigned int data) +{ + unsigned int gprval, ctx_enables; + unsigned short src_hiaddr, src_lowaddr, gpr_addr, xfr_addr, data16hi, + data16low; + unsigned short reg_mask; + int status = 0; + uint64_t micro_inst[] = { + 0x0F440000000ull, + 0x0F040000000ull, + 0x0A000000000ull, + 0x0F0000C0300ull, + 0x0E000010000ull + }; + const int num_inst = ARRAY_SIZE(micro_inst), code_off = 1; + const unsigned short gprnum = 0, dly = num_inst * 0x5; + + qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); + if (CE_INUSE_CONTEXTS & ctx_enables) { + if (ctx & 0x1) { + pr_err("QAT: 4-ctx mode,ctx=0x%x\n", ctx); + return -EINVAL; + } + reg_mask = (unsigned short)~0x1f; + } else { + reg_mask = (unsigned short)~0xf; + } + if (reg_num & reg_mask) + return -EINVAL; + xfr_addr = qat_hal_get_reg_addr(reg_type, reg_num); + if (xfr_addr == BAD_REGADDR) { + pr_err("QAT: bad xfrAddr=0x%x\n", xfr_addr); + return -EINVAL; + } + qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval); + gpr_addr = qat_hal_get_reg_addr(ICP_GPB_REL, gprnum); + data16low = 0xffff & data; + data16hi = 0xffff & (data >> 0x10); + src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST, + (unsigned short)(0xff & data16hi)); + src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST, + (unsigned short)(0xff & data16low)); + micro_inst[0] = micro_inst[0x0] | ((data16hi >> 8) << 20) | + ((gpr_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff); + micro_inst[1] = micro_inst[0x1] | ((data16low >> 8) << 20) | + ((gpr_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff); + micro_inst[0x2] = micro_inst[0x2] | + ((xfr_addr & 0x3ff) << 20) | ((gpr_addr & 0x3ff) << 10); + status = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, num_inst, + code_off, dly, NULL); + qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, gprval); + return status; +} + +static int qat_hal_put_rel_nn(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char ctx, + unsigned short nn, unsigned int val) +{ + unsigned int ctx_enables; + int stat = 0; + + qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); + ctx_enables &= IGNORE_W1C_MASK; + qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables | CE_NN_MODE); + + stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, ICP_NEIGH_REL, nn, val); + qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables); + return stat; +} + +static int qat_hal_convert_abs_to_rel(struct icp_qat_fw_loader_handle + *handle, unsigned char ae, + unsigned short absreg_num, + unsigned short *relreg, + unsigned char *ctx) +{ + unsigned int ctx_enables; + + qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); + if (ctx_enables & CE_INUSE_CONTEXTS) { + /* 4-ctx mode */ + *relreg = absreg_num & 0x1F; + *ctx = (absreg_num >> 0x4) & 0x6; + } else { + /* 8-ctx mode */ + *relreg = absreg_num & 0x0F; + *ctx = (absreg_num >> 0x4) & 0x7; + } + return 0; +} + +int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char ctx_mask, + enum icp_qat_uof_regtype reg_type, + unsigned short reg_num, unsigned int regdata) +{ + int stat = 0; + unsigned short reg; + unsigned char ctx = 0; + enum icp_qat_uof_regtype type; + + if (reg_num >= ICP_QAT_UCLO_MAX_GPR_REG) + return -EINVAL; + + do { + if (ctx_mask == 0) { + qat_hal_convert_abs_to_rel(handle, ae, reg_num, ®, + &ctx); + type = reg_type - 1; + } else { + reg = reg_num; + type = reg_type; + if (!test_bit(ctx, (unsigned long *)&ctx_mask)) + continue; + } + stat = qat_hal_wr_rel_reg(handle, ae, ctx, type, reg, regdata); + if (stat) { + pr_err("QAT: write gpr fail\n"); + return -EINVAL; + } + } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX)); + + return 0; +} + +int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char ctx_mask, + enum icp_qat_uof_regtype reg_type, + unsigned short reg_num, unsigned int regdata) +{ + int stat = 0; + unsigned short reg; + unsigned char ctx = 0; + enum icp_qat_uof_regtype type; + + if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG) + return -EINVAL; + + do { + if (ctx_mask == 0) { + qat_hal_convert_abs_to_rel(handle, ae, reg_num, ®, + &ctx); + type = reg_type - 3; + } else { + reg = reg_num; + type = reg_type; + if (!test_bit(ctx, (unsigned long *)&ctx_mask)) + continue; + } + stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, type, reg, + regdata); + if (stat) { + pr_err("QAT: write wr xfer fail\n"); + return -EINVAL; + } + } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX)); + + return 0; +} + +int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char ctx_mask, + enum icp_qat_uof_regtype reg_type, + unsigned short reg_num, unsigned int regdata) +{ + int stat = 0; + unsigned short reg; + unsigned char ctx = 0; + enum icp_qat_uof_regtype type; + + if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG) + return -EINVAL; + + do { + if (ctx_mask == 0) { + qat_hal_convert_abs_to_rel(handle, ae, reg_num, ®, + &ctx); + type = reg_type - 3; + } else { + reg = reg_num; + type = reg_type; + if (!test_bit(ctx, (unsigned long *)&ctx_mask)) + continue; + } + stat = qat_hal_put_rel_rd_xfer(handle, ae, ctx, type, reg, + regdata); + if (stat) { + pr_err("QAT: write rd xfer fail\n"); + return -EINVAL; + } + } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX)); + + return 0; +} + +int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char ctx_mask, + unsigned short reg_num, unsigned int regdata) +{ + int stat = 0; + unsigned char ctx; + + if (ctx_mask == 0) + return -EINVAL; + + for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) { + if (!test_bit(ctx, (unsigned long *)&ctx_mask)) + continue; + stat = qat_hal_put_rel_nn(handle, ae, ctx, reg_num, regdata); + if (stat) { + pr_err("QAT: write neigh error\n"); + return -EINVAL; + } + } + + return 0; +} diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c new file mode 100644 index 000000000..1e27f9f7f --- /dev/null +++ b/drivers/crypto/qat/qat_common/qat_uclo.c @@ -0,0 +1,1181 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include <linux/slab.h> +#include <linux/ctype.h> +#include <linux/kernel.h> + +#include "adf_accel_devices.h" +#include "adf_common_drv.h" +#include "icp_qat_uclo.h" +#include "icp_qat_hal.h" +#include "icp_qat_fw_loader_handle.h" + +#define UWORD_CPYBUF_SIZE 1024 +#define INVLD_UWORD 0xffffffffffull +#define PID_MINOR_REV 0xf +#define PID_MAJOR_REV (0xf << 4) + +static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle, + unsigned int ae, unsigned int image_num) +{ + struct icp_qat_uclo_aedata *ae_data; + struct icp_qat_uclo_encapme *encap_image; + struct icp_qat_uclo_page *page = NULL; + struct icp_qat_uclo_aeslice *ae_slice = NULL; + + ae_data = &obj_handle->ae_data[ae]; + encap_image = &obj_handle->ae_uimage[image_num]; + ae_slice = &ae_data->ae_slices[ae_data->slice_num]; + ae_slice->encap_image = encap_image; + + if (encap_image->img_ptr) { + ae_slice->ctx_mask_assigned = + encap_image->img_ptr->ctx_assigned; + ae_data->eff_ustore_size = obj_handle->ustore_phy_size; + } else { + ae_slice->ctx_mask_assigned = 0; + } + ae_slice->region = kzalloc(sizeof(*ae_slice->region), GFP_KERNEL); + if (!ae_slice->region) + return -ENOMEM; + ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL); + if (!ae_slice->page) + goto out_err; + page = ae_slice->page; + page->encap_page = encap_image->page; + ae_slice->page->region = ae_slice->region; + ae_data->slice_num++; + return 0; +out_err: + kfree(ae_slice->region); + ae_slice->region = NULL; + return -ENOMEM; +} + +static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data) +{ + unsigned int i; + + if (!ae_data) { + pr_err("QAT: bad argument, ae_data is NULL\n "); + return -EINVAL; + } + + for (i = 0; i < ae_data->slice_num; i++) { + kfree(ae_data->ae_slices[i].region); + ae_data->ae_slices[i].region = NULL; + kfree(ae_data->ae_slices[i].page); + ae_data->ae_slices[i].page = NULL; + } + return 0; +} + +static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table, + unsigned int str_offset) +{ + if ((!str_table->table_len) || (str_offset > str_table->table_len)) + return NULL; + return (char *)(((unsigned long)(str_table->strings)) + str_offset); +} + +static int qat_uclo_check_format(struct icp_qat_uof_filehdr *hdr) +{ + int maj = hdr->maj_ver & 0xff; + int min = hdr->min_ver & 0xff; + + if (hdr->file_id != ICP_QAT_UOF_FID) { + pr_err("QAT: Invalid header 0x%x\n", hdr->file_id); + return -EINVAL; + } + if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) { + pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n", + maj, min); + return -EINVAL; + } + return 0; +} + +static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle, + unsigned int addr, unsigned int *val, + unsigned int num_in_bytes) +{ + unsigned int outval; + unsigned char *ptr = (unsigned char *)val; + + while (num_in_bytes) { + memcpy(&outval, ptr, 4); + SRAM_WRITE(handle, addr, outval); + num_in_bytes -= 4; + ptr += 4; + addr += 4; + } +} + +static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned int addr, + unsigned int *val, + unsigned int num_in_bytes) +{ + unsigned int outval; + unsigned char *ptr = (unsigned char *)val; + + addr >>= 0x2; /* convert to uword address */ + + while (num_in_bytes) { + memcpy(&outval, ptr, 4); + qat_hal_wr_umem(handle, ae, addr++, 1, &outval); + num_in_bytes -= 4; + ptr += 4; + } +} + +static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, + struct icp_qat_uof_batch_init + *umem_init_header) +{ + struct icp_qat_uof_batch_init *umem_init; + + if (!umem_init_header) + return; + umem_init = umem_init_header->next; + while (umem_init) { + unsigned int addr, *value, size; + + ae = umem_init->ae; + addr = umem_init->addr; + value = umem_init->value; + size = umem_init->size; + qat_uclo_wr_umem_by_words(handle, ae, addr, value, size); + umem_init = umem_init->next; + } +} + +static void +qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle, + struct icp_qat_uof_batch_init **base) +{ + struct icp_qat_uof_batch_init *umem_init; + + umem_init = *base; + while (umem_init) { + struct icp_qat_uof_batch_init *pre; + + pre = umem_init; + umem_init = umem_init->next; + kfree(pre); + } + *base = NULL; +} + +static int qat_uclo_parse_num(char *str, unsigned int *num) +{ + char buf[16] = {0}; + unsigned long ae = 0; + int i; + + strncpy(buf, str, 15); + for (i = 0; i < 16; i++) { + if (!isdigit(buf[i])) { + buf[i] = '\0'; + break; + } + } + if ((kstrtoul(buf, 10, &ae))) + return -EFAULT; + + *num = (unsigned int)ae; + return 0; +} + +static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle, + struct icp_qat_uof_initmem *init_mem, + unsigned int size_range, unsigned int *ae) +{ + struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; + char *str; + + if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) { + pr_err("QAT: initmem is out of range"); + return -EINVAL; + } + if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) { + pr_err("QAT: Memory scope for init_mem error\n"); + return -EINVAL; + } + str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name); + if (!str) { + pr_err("QAT: AE name assigned in UOF init table is NULL\n"); + return -EINVAL; + } + if (qat_uclo_parse_num(str, ae)) { + pr_err("QAT: Parse num for AE number failed\n"); + return -EINVAL; + } + if (*ae >= ICP_QAT_UCLO_MAX_AE) { + pr_err("QAT: ae %d out of range\n", *ae); + return -EINVAL; + } + return 0; +} + +static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle + *handle, struct icp_qat_uof_initmem + *init_mem, unsigned int ae, + struct icp_qat_uof_batch_init + **init_tab_base) +{ + struct icp_qat_uof_batch_init *init_header, *tail; + struct icp_qat_uof_batch_init *mem_init, *tail_old; + struct icp_qat_uof_memvar_attr *mem_val_attr; + unsigned int i, flag = 0; + + mem_val_attr = + (struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem + + sizeof(struct icp_qat_uof_initmem)); + + init_header = *init_tab_base; + if (!init_header) { + init_header = kzalloc(sizeof(*init_header), GFP_KERNEL); + if (!init_header) + return -ENOMEM; + init_header->size = 1; + *init_tab_base = init_header; + flag = 1; + } + tail_old = init_header; + while (tail_old->next) + tail_old = tail_old->next; + tail = tail_old; + for (i = 0; i < init_mem->val_attr_num; i++) { + mem_init = kzalloc(sizeof(*mem_init), GFP_KERNEL); + if (!mem_init) + goto out_err; + mem_init->ae = ae; + mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte; + mem_init->value = &mem_val_attr->value; + mem_init->size = 4; + mem_init->next = NULL; + tail->next = mem_init; + tail = mem_init; + init_header->size += qat_hal_get_ins_num(); + mem_val_attr++; + } + return 0; +out_err: + while (tail_old) { + mem_init = tail_old->next; + kfree(tail_old); + tail_old = mem_init; + } + if (flag) + kfree(*init_tab_base); + return -ENOMEM; +} + +static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle, + struct icp_qat_uof_initmem *init_mem) +{ + struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; + unsigned int ae; + + if (qat_uclo_fetch_initmem_ae(handle, init_mem, + ICP_QAT_UCLO_MAX_LMEM_REG, &ae)) + return -EINVAL; + if (qat_uclo_create_batch_init_list(handle, init_mem, ae, + &obj_handle->lm_init_tab[ae])) + return -EINVAL; + return 0; +} + +static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle, + struct icp_qat_uof_initmem *init_mem) +{ + struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; + unsigned int ae, ustore_size, uaddr, i; + + ustore_size = obj_handle->ustore_phy_size; + if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae)) + return -EINVAL; + if (qat_uclo_create_batch_init_list(handle, init_mem, ae, + &obj_handle->umem_init_tab[ae])) + return -EINVAL; + /* set the highest ustore address referenced */ + uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2; + for (i = 0; i < obj_handle->ae_data[ae].slice_num; i++) { + if (obj_handle->ae_data[ae].ae_slices[i]. + encap_image->uwords_num < uaddr) + obj_handle->ae_data[ae].ae_slices[i]. + encap_image->uwords_num = uaddr; + } + return 0; +} + +#define ICP_DH895XCC_PESRAM_BAR_SIZE 0x80000 +static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle, + struct icp_qat_uof_initmem *init_mem) +{ + unsigned int i; + struct icp_qat_uof_memvar_attr *mem_val_attr; + + mem_val_attr = + (struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem + + sizeof(struct icp_qat_uof_initmem)); + + switch (init_mem->region) { + case ICP_QAT_UOF_SRAM_REGION: + if ((init_mem->addr + init_mem->num_in_bytes) > + ICP_DH895XCC_PESRAM_BAR_SIZE) { + pr_err("QAT: initmem on SRAM is out of range"); + return -EINVAL; + } + for (i = 0; i < init_mem->val_attr_num; i++) { + qat_uclo_wr_sram_by_words(handle, + init_mem->addr + + mem_val_attr->offset_in_byte, + &mem_val_attr->value, 4); + mem_val_attr++; + } + break; + case ICP_QAT_UOF_LMEM_REGION: + if (qat_uclo_init_lmem_seg(handle, init_mem)) + return -EINVAL; + break; + case ICP_QAT_UOF_UMEM_REGION: + if (qat_uclo_init_umem_seg(handle, init_mem)) + return -EINVAL; + break; + default: + pr_err("QAT: initmem region error. region type=0x%x\n", + init_mem->region); + return -EINVAL; + } + return 0; +} + +static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle, + struct icp_qat_uclo_encapme *image) +{ + unsigned int i; + struct icp_qat_uclo_encap_page *page; + struct icp_qat_uof_image *uof_image; + unsigned char ae; + unsigned int ustore_size; + unsigned int patt_pos; + struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; + uint64_t *fill_data; + + uof_image = image->img_ptr; + fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(uint64_t), + GFP_KERNEL); + if (!fill_data) + return -ENOMEM; + for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++) + memcpy(&fill_data[i], &uof_image->fill_pattern, + sizeof(uint64_t)); + page = image->page; + + for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { + if (!test_bit(ae, (unsigned long *)&uof_image->ae_assigned)) + continue; + ustore_size = obj_handle->ae_data[ae].eff_ustore_size; + patt_pos = page->beg_addr_p + page->micro_words_num; + + qat_hal_wr_uwords(handle, (unsigned char)ae, 0, + page->beg_addr_p, &fill_data[0]); + qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos, + ustore_size - patt_pos + 1, + &fill_data[page->beg_addr_p]); + } + kfree(fill_data); + return 0; +} + +static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle) +{ + int i, ae; + struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; + struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem; + + for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) { + if (initmem->num_in_bytes) { + if (qat_uclo_init_ae_memory(handle, initmem)) + return -EINVAL; + } + initmem = (struct icp_qat_uof_initmem *)((unsigned long)( + (unsigned long)initmem + + sizeof(struct icp_qat_uof_initmem)) + + (sizeof(struct icp_qat_uof_memvar_attr) * + initmem->val_attr_num)); + } + for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { + if (qat_hal_batch_wr_lm(handle, ae, + obj_handle->lm_init_tab[ae])) { + pr_err("QAT: fail to batch init lmem for AE %d\n", ae); + return -EINVAL; + } + qat_uclo_cleanup_batch_init_list(handle, + &obj_handle->lm_init_tab[ae]); + qat_uclo_batch_wr_umem(handle, ae, + obj_handle->umem_init_tab[ae]); + qat_uclo_cleanup_batch_init_list(handle, + &obj_handle-> + umem_init_tab[ae]); + } + return 0; +} + +static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr, + char *chunk_id, void *cur) +{ + int i; + struct icp_qat_uof_chunkhdr *chunk_hdr = + (struct icp_qat_uof_chunkhdr *) + ((unsigned long)obj_hdr + sizeof(struct icp_qat_uof_objhdr)); + + for (i = 0; i < obj_hdr->num_chunks; i++) { + if ((cur < (void *)&chunk_hdr[i]) && + !strncmp(chunk_hdr[i].chunk_id, chunk_id, + ICP_QAT_UOF_OBJID_LEN)) { + return &chunk_hdr[i]; + } + } + return NULL; +} + +static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch) +{ + int i; + unsigned int topbit = 1 << 0xF; + unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch); + + reg ^= inbyte << 0x8; + for (i = 0; i < 0x8; i++) { + if (reg & topbit) + reg = (reg << 1) ^ 0x1021; + else + reg <<= 1; + } + return reg & 0xFFFF; +} + +static unsigned int qat_uclo_calc_str_checksum(char *ptr, int num) +{ + unsigned int chksum = 0; + + if (ptr) + while (num--) + chksum = qat_uclo_calc_checksum(chksum, *ptr++); + return chksum; +} + +static struct icp_qat_uclo_objhdr * +qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr, + char *chunk_id) +{ + struct icp_qat_uof_filechunkhdr *file_chunk; + struct icp_qat_uclo_objhdr *obj_hdr; + char *chunk; + int i; + + file_chunk = (struct icp_qat_uof_filechunkhdr *) + (buf + sizeof(struct icp_qat_uof_filehdr)); + for (i = 0; i < file_hdr->num_chunks; i++) { + if (!strncmp(file_chunk->chunk_id, chunk_id, + ICP_QAT_UOF_OBJID_LEN)) { + chunk = buf + file_chunk->offset; + if (file_chunk->checksum != qat_uclo_calc_str_checksum( + chunk, file_chunk->size)) + break; + obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL); + if (!obj_hdr) + break; + obj_hdr->file_buff = chunk; + obj_hdr->checksum = file_chunk->checksum; + obj_hdr->size = file_chunk->size; + return obj_hdr; + } + file_chunk++; + } + return NULL; +} + +static unsigned int +qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj, + struct icp_qat_uof_image *image) +{ + struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab; + struct icp_qat_uof_objtable *neigh_reg_tab; + struct icp_qat_uof_code_page *code_page; + + code_page = (struct icp_qat_uof_code_page *) + ((char *)image + sizeof(struct icp_qat_uof_image)); + uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof + + code_page->uc_var_tab_offset); + imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof + + code_page->imp_var_tab_offset); + imp_expr_tab = (struct icp_qat_uof_objtable *) + (encap_uof_obj->beg_uof + + code_page->imp_expr_tab_offset); + if (uc_var_tab->entry_num || imp_var_tab->entry_num || + imp_expr_tab->entry_num) { + pr_err("QAT: UOF can't contain imported variable to be parsed"); + return -EINVAL; + } + neigh_reg_tab = (struct icp_qat_uof_objtable *) + (encap_uof_obj->beg_uof + + code_page->neigh_reg_tab_offset); + if (neigh_reg_tab->entry_num) { + pr_err("QAT: UOF can't contain shared control store feature"); + return -EINVAL; + } + if (image->numpages > 1) { + pr_err("QAT: UOF can't contain multiple pages"); + return -EINVAL; + } + if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) { + pr_err("QAT: UOF can't use shared control store feature"); + return -EFAULT; + } + if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) { + pr_err("QAT: UOF can't use reloadable feature"); + return -EFAULT; + } + return 0; +} + +static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj + *encap_uof_obj, + struct icp_qat_uof_image *img, + struct icp_qat_uclo_encap_page *page) +{ + struct icp_qat_uof_code_page *code_page; + struct icp_qat_uof_code_area *code_area; + struct icp_qat_uof_objtable *uword_block_tab; + struct icp_qat_uof_uword_block *uwblock; + int i; + + code_page = (struct icp_qat_uof_code_page *) + ((char *)img + sizeof(struct icp_qat_uof_image)); + page->def_page = code_page->def_page; + page->page_region = code_page->page_region; + page->beg_addr_v = code_page->beg_addr_v; + page->beg_addr_p = code_page->beg_addr_p; + code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof + + code_page->code_area_offset); + page->micro_words_num = code_area->micro_words_num; + uword_block_tab = (struct icp_qat_uof_objtable *) + (encap_uof_obj->beg_uof + + code_area->uword_block_tab); + page->uwblock_num = uword_block_tab->entry_num; + uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab + + sizeof(struct icp_qat_uof_objtable)); + page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock; + for (i = 0; i < uword_block_tab->entry_num; i++) + page->uwblock[i].micro_words = + (unsigned long)encap_uof_obj->beg_uof + uwblock[i].uword_offset; +} + +static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle, + struct icp_qat_uclo_encapme *ae_uimage, + int max_image) +{ + int i, j; + struct icp_qat_uof_chunkhdr *chunk_hdr = NULL; + struct icp_qat_uof_image *image; + struct icp_qat_uof_objtable *ae_regtab; + struct icp_qat_uof_objtable *init_reg_sym_tab; + struct icp_qat_uof_objtable *sbreak_tab; + struct icp_qat_uof_encap_obj *encap_uof_obj = + &obj_handle->encap_uof_obj; + + for (j = 0; j < max_image; j++) { + chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr, + ICP_QAT_UOF_IMAG, chunk_hdr); + if (!chunk_hdr) + break; + image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof + + chunk_hdr->offset); + ae_regtab = (struct icp_qat_uof_objtable *) + (image->reg_tab_offset + + obj_handle->obj_hdr->file_buff); + ae_uimage[j].ae_reg_num = ae_regtab->entry_num; + ae_uimage[j].ae_reg = (struct icp_qat_uof_ae_reg *) + (((char *)ae_regtab) + + sizeof(struct icp_qat_uof_objtable)); + init_reg_sym_tab = (struct icp_qat_uof_objtable *) + (image->init_reg_sym_tab + + obj_handle->obj_hdr->file_buff); + ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num; + ae_uimage[j].init_regsym = (struct icp_qat_uof_init_regsym *) + (((char *)init_reg_sym_tab) + + sizeof(struct icp_qat_uof_objtable)); + sbreak_tab = (struct icp_qat_uof_objtable *) + (image->sbreak_tab + obj_handle->obj_hdr->file_buff); + ae_uimage[j].sbreak_num = sbreak_tab->entry_num; + ae_uimage[j].sbreak = (struct icp_qat_uof_sbreak *) + (((char *)sbreak_tab) + + sizeof(struct icp_qat_uof_objtable)); + ae_uimage[j].img_ptr = image; + if (qat_uclo_check_image_compat(encap_uof_obj, image)) + goto out_err; + ae_uimage[j].page = + kzalloc(sizeof(struct icp_qat_uclo_encap_page), + GFP_KERNEL); + if (!ae_uimage[j].page) + goto out_err; + qat_uclo_map_image_page(encap_uof_obj, image, + ae_uimage[j].page); + } + return j; +out_err: + for (i = 0; i < j; i++) + kfree(ae_uimage[i].page); + return 0; +} + +static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae) +{ + int i, ae; + int mflag = 0; + struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; + + for (ae = 0; ae <= max_ae; ae++) { + if (!test_bit(ae, + (unsigned long *)&handle->hal_handle->ae_mask)) + continue; + for (i = 0; i < obj_handle->uimage_num; i++) { + if (!test_bit(ae, (unsigned long *) + &obj_handle->ae_uimage[i].img_ptr->ae_assigned)) + continue; + mflag = 1; + if (qat_uclo_init_ae_data(obj_handle, ae, i)) + return -EINVAL; + } + } + if (!mflag) { + pr_err("QAT: uimage uses AE not set"); + return -EINVAL; + } + return 0; +} + +static struct icp_qat_uof_strtable * +qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr, + char *tab_name, struct icp_qat_uof_strtable *str_table) +{ + struct icp_qat_uof_chunkhdr *chunk_hdr; + + chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *) + obj_hdr->file_buff, tab_name, NULL); + if (chunk_hdr) { + int hdr_size; + + memcpy(&str_table->table_len, obj_hdr->file_buff + + chunk_hdr->offset, sizeof(str_table->table_len)); + hdr_size = (char *)&str_table->strings - (char *)str_table; + str_table->strings = (unsigned long)obj_hdr->file_buff + + chunk_hdr->offset + hdr_size; + return str_table; + } + return NULL; +} + +static void +qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj, + struct icp_qat_uclo_init_mem_table *init_mem_tab) +{ + struct icp_qat_uof_chunkhdr *chunk_hdr; + + chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr, + ICP_QAT_UOF_IMEM, NULL); + if (chunk_hdr) { + memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof + + chunk_hdr->offset, sizeof(unsigned int)); + init_mem_tab->init_mem = (struct icp_qat_uof_initmem *) + (encap_uof_obj->beg_uof + chunk_hdr->offset + + sizeof(unsigned int)); + } +} + +static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle) +{ + unsigned int maj_ver, prod_type = obj_handle->prod_type; + + if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->cpu_type)) { + pr_err("QAT: UOF type 0x%x not match with cur platform 0x%x\n", + obj_handle->encap_uof_obj.obj_hdr->cpu_type, prod_type); + return -EINVAL; + } + maj_ver = obj_handle->prod_rev & 0xff; + if ((obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver) || + (obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver)) { + pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver); + return -EINVAL; + } + return 0; +} + +static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char ctx_mask, + enum icp_qat_uof_regtype reg_type, + unsigned short reg_addr, unsigned int value) +{ + switch (reg_type) { + case ICP_GPA_ABS: + case ICP_GPB_ABS: + ctx_mask = 0; + case ICP_GPA_REL: + case ICP_GPB_REL: + return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type, + reg_addr, value); + case ICP_SR_ABS: + case ICP_DR_ABS: + case ICP_SR_RD_ABS: + case ICP_DR_RD_ABS: + ctx_mask = 0; + case ICP_SR_REL: + case ICP_DR_REL: + case ICP_SR_RD_REL: + case ICP_DR_RD_REL: + return qat_hal_init_rd_xfer(handle, ae, ctx_mask, reg_type, + reg_addr, value); + case ICP_SR_WR_ABS: + case ICP_DR_WR_ABS: + ctx_mask = 0; + case ICP_SR_WR_REL: + case ICP_DR_WR_REL: + return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type, + reg_addr, value); + case ICP_NEIGH_REL: + return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value); + default: + pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type); + return -EFAULT; + } + return 0; +} + +static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle, + unsigned int ae, + struct icp_qat_uclo_encapme *encap_ae) +{ + unsigned int i; + unsigned char ctx_mask; + struct icp_qat_uof_init_regsym *init_regsym; + + if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) == + ICP_QAT_UCLO_MAX_CTX) + ctx_mask = 0xff; + else + ctx_mask = 0x55; + + for (i = 0; i < encap_ae->init_regsym_num; i++) { + unsigned int exp_res; + + init_regsym = &encap_ae->init_regsym[i]; + exp_res = init_regsym->value; + switch (init_regsym->init_type) { + case ICP_QAT_UOF_INIT_REG: + qat_uclo_init_reg(handle, ae, ctx_mask, + (enum icp_qat_uof_regtype) + init_regsym->reg_type, + (unsigned short)init_regsym->reg_addr, + exp_res); + break; + case ICP_QAT_UOF_INIT_REG_CTX: + /* check if ctx is appropriate for the ctxMode */ + if (!((1 << init_regsym->ctx) & ctx_mask)) { + pr_err("QAT: invalid ctx num = 0x%x\n", + init_regsym->ctx); + return -EINVAL; + } + qat_uclo_init_reg(handle, ae, + (unsigned char) + (1 << init_regsym->ctx), + (enum icp_qat_uof_regtype) + init_regsym->reg_type, + (unsigned short)init_regsym->reg_addr, + exp_res); + break; + case ICP_QAT_UOF_INIT_EXPR: + pr_err("QAT: INIT_EXPR feature not supported\n"); + return -EINVAL; + case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP: + pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n"); + return -EINVAL; + default: + break; + } + } + return 0; +} + +static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle) +{ + struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; + unsigned int s, ae; + + if (obj_handle->global_inited) + return 0; + if (obj_handle->init_mem_tab.entry_num) { + if (qat_uclo_init_memory(handle)) { + pr_err("QAT: initialize memory failed\n"); + return -EINVAL; + } + } + for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { + for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) { + if (!obj_handle->ae_data[ae].ae_slices[s].encap_image) + continue; + if (qat_uclo_init_reg_sym(handle, ae, + obj_handle->ae_data[ae]. + ae_slices[s].encap_image)) + return -EINVAL; + } + } + obj_handle->global_inited = 1; + return 0; +} + +static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle) +{ + unsigned char ae, nn_mode, s; + struct icp_qat_uof_image *uof_image; + struct icp_qat_uclo_aedata *ae_data; + struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; + + for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { + if (!test_bit(ae, + (unsigned long *)&handle->hal_handle->ae_mask)) + continue; + ae_data = &obj_handle->ae_data[ae]; + for (s = 0; s < min_t(unsigned int, ae_data->slice_num, + ICP_QAT_UCLO_MAX_CTX); s++) { + if (!obj_handle->ae_data[ae].ae_slices[s].encap_image) + continue; + uof_image = ae_data->ae_slices[s].encap_image->img_ptr; + if (qat_hal_set_ae_ctx_mode(handle, ae, + (char)ICP_QAT_CTX_MODE + (uof_image->ae_mode))) { + pr_err("QAT: qat_hal_set_ae_ctx_mode error\n"); + return -EFAULT; + } + nn_mode = ICP_QAT_NN_MODE(uof_image->ae_mode); + if (qat_hal_set_ae_nn_mode(handle, ae, nn_mode)) { + pr_err("QAT: qat_hal_set_ae_nn_mode error\n"); + return -EFAULT; + } + if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0, + (char)ICP_QAT_LOC_MEM0_MODE + (uof_image->ae_mode))) { + pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n"); + return -EFAULT; + } + if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1, + (char)ICP_QAT_LOC_MEM1_MODE + (uof_image->ae_mode))) { + pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n"); + return -EFAULT; + } + } + } + return 0; +} + +static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle) +{ + struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; + struct icp_qat_uclo_encapme *image; + int a; + + for (a = 0; a < obj_handle->uimage_num; a++) { + image = &obj_handle->ae_uimage[a]; + image->uwords_num = image->page->beg_addr_p + + image->page->micro_words_num; + } +} + +static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle) +{ + struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; + unsigned int ae; + + obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t), + GFP_KERNEL); + if (!obj_handle->uword_buf) + return -ENOMEM; + obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff; + obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *) + obj_handle->obj_hdr->file_buff; + obj_handle->uword_in_bytes = 6; + obj_handle->prod_type = ICP_QAT_AC_C_CPU_TYPE; + obj_handle->prod_rev = PID_MAJOR_REV | + (PID_MINOR_REV & handle->hal_handle->revision_id); + if (qat_uclo_check_uof_compat(obj_handle)) { + pr_err("QAT: UOF incompatible\n"); + return -EINVAL; + } + obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE; + if (!obj_handle->obj_hdr->file_buff || + !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT, + &obj_handle->str_table)) { + pr_err("QAT: UOF doesn't have effective images\n"); + goto out_err; + } + obj_handle->uimage_num = + qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage, + ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX); + if (!obj_handle->uimage_num) + goto out_err; + if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) { + pr_err("QAT: Bad object\n"); + goto out_check_uof_aemask_err; + } + qat_uclo_init_uword_num(handle); + qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj, + &obj_handle->init_mem_tab); + if (qat_uclo_set_ae_mode(handle)) + goto out_check_uof_aemask_err; + return 0; +out_check_uof_aemask_err: + for (ae = 0; ae < obj_handle->uimage_num; ae++) + kfree(obj_handle->ae_uimage[ae].page); +out_err: + kfree(obj_handle->uword_buf); + return -EFAULT; +} + +int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, + void *addr_ptr, int mem_size) +{ + struct icp_qat_uof_filehdr *filehdr; + struct icp_qat_uclo_objhandle *objhdl; + + BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >= + (sizeof(handle->hal_handle->ae_mask) * 8)); + + if (!handle || !addr_ptr || mem_size < 24) + return -EINVAL; + objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL); + if (!objhdl) + return -ENOMEM; + objhdl->obj_buf = kmemdup(addr_ptr, mem_size, GFP_KERNEL); + if (!objhdl->obj_buf) + goto out_objbuf_err; + filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf; + if (qat_uclo_check_format(filehdr)) + goto out_objhdr_err; + objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr, + ICP_QAT_UOF_OBJS); + if (!objhdl->obj_hdr) { + pr_err("QAT: object file chunk is null\n"); + goto out_objhdr_err; + } + handle->obj_handle = objhdl; + if (qat_uclo_parse_uof_obj(handle)) + goto out_overlay_obj_err; + return 0; + +out_overlay_obj_err: + handle->obj_handle = NULL; + kfree(objhdl->obj_hdr); +out_objhdr_err: + kfree(objhdl->obj_buf); +out_objbuf_err: + kfree(objhdl); + return -ENOMEM; +} + +void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle) +{ + struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; + unsigned int a; + + if (!obj_handle) + return; + + kfree(obj_handle->uword_buf); + for (a = 0; a < obj_handle->uimage_num; a++) + kfree(obj_handle->ae_uimage[a].page); + + for (a = 0; a < handle->hal_handle->ae_max_num; a++) + qat_uclo_free_ae_data(&obj_handle->ae_data[a]); + + kfree(obj_handle->obj_hdr); + kfree(obj_handle->obj_buf); + kfree(obj_handle); + handle->obj_handle = NULL; +} + +static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle, + struct icp_qat_uclo_encap_page *encap_page, + uint64_t *uword, unsigned int addr_p, + unsigned int raddr, uint64_t fill) +{ + uint64_t uwrd = 0; + unsigned int i; + + if (!encap_page) { + *uword = fill; + return; + } + for (i = 0; i < encap_page->uwblock_num; i++) { + if (raddr >= encap_page->uwblock[i].start_addr && + raddr <= encap_page->uwblock[i].start_addr + + encap_page->uwblock[i].words_num - 1) { + raddr -= encap_page->uwblock[i].start_addr; + raddr *= obj_handle->uword_in_bytes; + memcpy(&uwrd, (void *)(((unsigned long) + encap_page->uwblock[i].micro_words) + raddr), + obj_handle->uword_in_bytes); + uwrd = uwrd & 0xbffffffffffull; + } + } + *uword = uwrd; + if (*uword == INVLD_UWORD) + *uword = fill; +} + +static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle, + struct icp_qat_uclo_encap_page + *encap_page, unsigned int ae) +{ + unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen; + struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; + uint64_t fill_pat; + + /* load the page starting at appropriate ustore address */ + /* get fill-pattern from an image -- they are all the same */ + memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern, + sizeof(uint64_t)); + uw_physical_addr = encap_page->beg_addr_p; + uw_relative_addr = 0; + words_num = encap_page->micro_words_num; + while (words_num) { + if (words_num < UWORD_CPYBUF_SIZE) + cpylen = words_num; + else + cpylen = UWORD_CPYBUF_SIZE; + + /* load the buffer */ + for (i = 0; i < cpylen; i++) + qat_uclo_fill_uwords(obj_handle, encap_page, + &obj_handle->uword_buf[i], + uw_physical_addr + i, + uw_relative_addr + i, fill_pat); + + /* copy the buffer to ustore */ + qat_hal_wr_uwords(handle, (unsigned char)ae, + uw_physical_addr, cpylen, + obj_handle->uword_buf); + + uw_physical_addr += cpylen; + uw_relative_addr += cpylen; + words_num -= cpylen; + } +} + +static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle, + struct icp_qat_uof_image *image) +{ + struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; + unsigned int ctx_mask, s; + struct icp_qat_uclo_page *page; + unsigned char ae; + int ctx; + + if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX) + ctx_mask = 0xff; + else + ctx_mask = 0x55; + /* load the default page and set assigned CTX PC + * to the entrypoint address */ + for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { + if (!test_bit(ae, (unsigned long *)&image->ae_assigned)) + continue; + /* find the slice to which this image is assigned */ + for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) { + if (image->ctx_assigned & obj_handle->ae_data[ae]. + ae_slices[s].ctx_mask_assigned) + break; + } + if (s >= obj_handle->ae_data[ae].slice_num) + continue; + page = obj_handle->ae_data[ae].ae_slices[s].page; + if (!page->encap_page->def_page) + continue; + qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae); + + page = obj_handle->ae_data[ae].ae_slices[s].page; + for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) + obj_handle->ae_data[ae].ae_slices[s].cur_page[ctx] = + (ctx_mask & (1 << ctx)) ? page : NULL; + qat_hal_set_live_ctx(handle, (unsigned char)ae, + image->ctx_assigned); + qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned, + image->entry_address); + } +} + +int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle) +{ + struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; + unsigned int i; + + if (qat_uclo_init_globals(handle)) + return -EINVAL; + for (i = 0; i < obj_handle->uimage_num; i++) { + if (!obj_handle->ae_uimage[i].img_ptr) + return -EINVAL; + if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i])) + return -EINVAL; + qat_uclo_wr_uimage_page(handle, + obj_handle->ae_uimage[i].img_ptr); + } + return 0; +} diff --git a/drivers/crypto/qat/qat_dh895xcc/Makefile b/drivers/crypto/qat/qat_dh895xcc/Makefile new file mode 100644 index 000000000..25171c557 --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xcc/Makefile @@ -0,0 +1,8 @@ +ccflags-y := -I$(src)/../qat_common +obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o +qat_dh895xcc-objs := adf_drv.o \ + adf_isr.o \ + adf_dh895xcc_hw_data.o \ + adf_hw_arbiter.o \ + qat_admin.o \ + adf_admin.o diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_admin.c b/drivers/crypto/qat/qat_dh895xcc/adf_admin.c new file mode 100644 index 000000000..e4666065c --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xcc/adf_admin.c @@ -0,0 +1,145 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include <linux/types.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/delay.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <adf_accel_devices.h> +#include "adf_drv.h" +#include "adf_dh895xcc_hw_data.h" + +#define ADF_ADMINMSG_LEN 32 + +struct adf_admin_comms { + dma_addr_t phy_addr; + void *virt_addr; + void __iomem *mailbox_addr; + struct mutex lock; /* protects adf_admin_comms struct */ +}; + +int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, + uint32_t ae, void *in, void *out) +{ + struct adf_admin_comms *admin = accel_dev->admin; + int offset = ae * ADF_ADMINMSG_LEN * 2; + void __iomem *mailbox = admin->mailbox_addr; + int mb_offset = ae * ADF_DH895XCC_MAILBOX_STRIDE; + int times, received; + + mutex_lock(&admin->lock); + + if (ADF_CSR_RD(mailbox, mb_offset) == 1) { + mutex_unlock(&admin->lock); + return -EAGAIN; + } + + memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN); + ADF_CSR_WR(mailbox, mb_offset, 1); + received = 0; + for (times = 0; times < 50; times++) { + msleep(20); + if (ADF_CSR_RD(mailbox, mb_offset) == 0) { + received = 1; + break; + } + } + if (received) + memcpy(out, admin->virt_addr + offset + + ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN); + else + dev_err(&GET_DEV(accel_dev), + "Failed to send admin msg to accelerator\n"); + + mutex_unlock(&admin->lock); + return received ? 0 : -EFAULT; +} + +int adf_init_admin_comms(struct adf_accel_dev *accel_dev) +{ + struct adf_admin_comms *admin; + struct adf_bar *pmisc = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR]; + void __iomem *csr = pmisc->virt_addr; + void __iomem *mailbox = csr + ADF_DH895XCC_MAILBOX_BASE_OFFSET; + uint64_t reg_val; + + admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL, + dev_to_node(&GET_DEV(accel_dev))); + if (!admin) + return -ENOMEM; + admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE, + &admin->phy_addr, GFP_KERNEL); + if (!admin->virt_addr) { + dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n"); + kfree(admin); + return -ENOMEM; + } + reg_val = (uint64_t)admin->phy_addr; + ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32); + ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val); + mutex_init(&admin->lock); + admin->mailbox_addr = mailbox; + accel_dev->admin = admin; + return 0; +} + +void adf_exit_admin_comms(struct adf_accel_dev *accel_dev) +{ + struct adf_admin_comms *admin = accel_dev->admin; + + if (!admin) + return; + + if (admin->virt_addr) + dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE, + admin->virt_addr, admin->phy_addr); + + mutex_destroy(&admin->lock); + kfree(admin); + accel_dev->admin = NULL; +} diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c new file mode 100644 index 000000000..b1386922d --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c @@ -0,0 +1,234 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include <adf_accel_devices.h> +#include "adf_dh895xcc_hw_data.h" +#include "adf_common_drv.h" +#include "adf_drv.h" + +/* Worker thread to service arbiter mappings based on dev SKUs */ +static const uint32_t thrd_to_arb_map_sku4[] = { + 0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666, + 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222, + 0x00000000, 0x00000000, 0x00000000, 0x00000000 +}; + +static const uint32_t thrd_to_arb_map_sku6[] = { + 0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666, + 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222, + 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222 +}; + +static struct adf_hw_device_class dh895xcc_class = { + .name = ADF_DH895XCC_DEVICE_NAME, + .type = DEV_DH895XCC, + .instances = 0 +}; + +static uint32_t get_accel_mask(uint32_t fuse) +{ + return (~fuse) >> ADF_DH895XCC_ACCELERATORS_REG_OFFSET & + ADF_DH895XCC_ACCELERATORS_MASK; +} + +static uint32_t get_ae_mask(uint32_t fuse) +{ + return (~fuse) & ADF_DH895XCC_ACCELENGINES_MASK; +} + +static uint32_t get_num_accels(struct adf_hw_device_data *self) +{ + uint32_t i, ctr = 0; + + if (!self || !self->accel_mask) + return 0; + + for (i = 0; i < ADF_DH895XCC_MAX_ACCELERATORS; i++) { + if (self->accel_mask & (1 << i)) + ctr++; + } + return ctr; +} + +static uint32_t get_num_aes(struct adf_hw_device_data *self) +{ + uint32_t i, ctr = 0; + + if (!self || !self->ae_mask) + return 0; + + for (i = 0; i < ADF_DH895XCC_MAX_ACCELENGINES; i++) { + if (self->ae_mask & (1 << i)) + ctr++; + } + return ctr; +} + +static uint32_t get_misc_bar_id(struct adf_hw_device_data *self) +{ + return ADF_DH895XCC_PMISC_BAR; +} + +static uint32_t get_etr_bar_id(struct adf_hw_device_data *self) +{ + return ADF_DH895XCC_ETR_BAR; +} + +static enum dev_sku_info get_sku(struct adf_hw_device_data *self) +{ + int sku = (self->fuses & ADF_DH895XCC_FUSECTL_SKU_MASK) + >> ADF_DH895XCC_FUSECTL_SKU_SHIFT; + + switch (sku) { + case ADF_DH895XCC_FUSECTL_SKU_1: + return DEV_SKU_1; + case ADF_DH895XCC_FUSECTL_SKU_2: + return DEV_SKU_2; + case ADF_DH895XCC_FUSECTL_SKU_3: + return DEV_SKU_3; + case ADF_DH895XCC_FUSECTL_SKU_4: + return DEV_SKU_4; + default: + return DEV_SKU_UNKNOWN; + } + return DEV_SKU_UNKNOWN; +} + +void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, + uint32_t const **arb_map_config) +{ + switch (accel_dev->accel_pci_dev.sku) { + case DEV_SKU_1: + *arb_map_config = thrd_to_arb_map_sku4; + break; + + case DEV_SKU_2: + case DEV_SKU_4: + *arb_map_config = thrd_to_arb_map_sku6; + break; + default: + dev_err(&GET_DEV(accel_dev), + "The configuration doesn't match any SKU"); + *arb_map_config = NULL; + } +} + +static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_device = accel_dev->hw_device; + struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR]; + void __iomem *csr = misc_bar->virt_addr; + unsigned int val, i; + + /* Enable Accel Engine error detection & correction */ + for (i = 0; i < hw_device->get_num_aes(hw_device); i++) { + val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_CTX_ENABLES(i)); + val |= ADF_DH895XCC_ENABLE_AE_ECC_ERR; + ADF_CSR_WR(csr, ADF_DH895XCC_AE_CTX_ENABLES(i), val); + val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_MISC_CONTROL(i)); + val |= ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR; + ADF_CSR_WR(csr, ADF_DH895XCC_AE_MISC_CONTROL(i), val); + } + + /* Enable shared memory error detection & correction */ + for (i = 0; i < hw_device->get_num_accels(hw_device); i++) { + val = ADF_CSR_RD(csr, ADF_DH895XCC_UERRSSMSH(i)); + val |= ADF_DH895XCC_ERRSSMSH_EN; + ADF_CSR_WR(csr, ADF_DH895XCC_UERRSSMSH(i), val); + val = ADF_CSR_RD(csr, ADF_DH895XCC_CERRSSMSH(i)); + val |= ADF_DH895XCC_ERRSSMSH_EN; + ADF_CSR_WR(csr, ADF_DH895XCC_CERRSSMSH(i), val); + } +} + +static void adf_enable_ints(struct adf_accel_dev *accel_dev) +{ + void __iomem *addr; + + addr = (&GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR])->virt_addr; + + /* Enable bundle and misc interrupts */ + ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET, + ADF_DH895XCC_SMIA0_MASK); + ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET, + ADF_DH895XCC_SMIA1_MASK); +} + +void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data) +{ + hw_data->dev_class = &dh895xcc_class; + hw_data->instance_id = dh895xcc_class.instances++; + hw_data->num_banks = ADF_DH895XCC_ETR_MAX_BANKS; + hw_data->num_accel = ADF_DH895XCC_MAX_ACCELERATORS; + hw_data->pci_dev_id = ADF_DH895XCC_PCI_DEVICE_ID; + hw_data->num_logical_accel = 1; + hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES; + hw_data->tx_rx_gap = ADF_DH895XCC_RX_RINGS_OFFSET; + hw_data->tx_rings_mask = ADF_DH895XCC_TX_RINGS_MASK; + hw_data->alloc_irq = adf_isr_resource_alloc; + hw_data->free_irq = adf_isr_resource_free; + hw_data->enable_error_correction = adf_enable_error_correction; + hw_data->hw_arb_ring_enable = adf_update_ring_arb_enable; + hw_data->hw_arb_ring_disable = adf_update_ring_arb_enable; + hw_data->get_accel_mask = get_accel_mask; + hw_data->get_ae_mask = get_ae_mask; + hw_data->get_num_accels = get_num_accels; + hw_data->get_num_aes = get_num_aes; + hw_data->get_etr_bar_id = get_etr_bar_id; + hw_data->get_misc_bar_id = get_misc_bar_id; + hw_data->get_sku = get_sku; + hw_data->fw_name = ADF_DH895XCC_FW; + hw_data->init_admin_comms = adf_init_admin_comms; + hw_data->exit_admin_comms = adf_exit_admin_comms; + hw_data->init_arb = adf_init_arb; + hw_data->exit_arb = adf_exit_arb; + hw_data->enable_ints = adf_enable_ints; +} + +void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data) +{ + hw_data->dev_class->instances--; +} diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h new file mode 100644 index 000000000..d5cb7beaa --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h @@ -0,0 +1,88 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef ADF_DH895x_HW_DATA_H_ +#define ADF_DH895x_HW_DATA_H_ + +/* PCIe configuration space */ +#define ADF_DH895XCC_PMISC_BAR 1 +#define ADF_DH895XCC_ETR_BAR 2 +#define ADF_DH895XCC_RX_RINGS_OFFSET 8 +#define ADF_DH895XCC_TX_RINGS_MASK 0xFF +#define ADF_DH895XCC_FUSECTL_OFFSET 0x40 +#define ADF_DH895XCC_FUSECTL_SKU_MASK 0x300000 +#define ADF_DH895XCC_FUSECTL_SKU_SHIFT 20 +#define ADF_DH895XCC_FUSECTL_SKU_1 0x0 +#define ADF_DH895XCC_FUSECTL_SKU_2 0x1 +#define ADF_DH895XCC_FUSECTL_SKU_3 0x2 +#define ADF_DH895XCC_FUSECTL_SKU_4 0x3 +#define ADF_DH895XCC_MAX_ACCELERATORS 6 +#define ADF_DH895XCC_MAX_ACCELENGINES 12 +#define ADF_DH895XCC_ACCELERATORS_REG_OFFSET 13 +#define ADF_DH895XCC_ACCELERATORS_MASK 0x3F +#define ADF_DH895XCC_ACCELENGINES_MASK 0xFFF +#define ADF_DH895XCC_LEGFUSE_OFFSET 0x4C +#define ADF_DH895XCC_ETR_MAX_BANKS 32 +#define ADF_DH895XCC_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28) +#define ADF_DH895XCC_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30) +#define ADF_DH895XCC_SMIA0_MASK 0xFFFFFFFF +#define ADF_DH895XCC_SMIA1_MASK 0x1 +/* Error detection and correction */ +#define ADF_DH895XCC_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818) +#define ADF_DH895XCC_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960) +#define ADF_DH895XCC_ENABLE_AE_ECC_ERR BIT(28) +#define ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12)) +#define ADF_DH895XCC_UERRSSMSH(i) (i * 0x4000 + 0x18) +#define ADF_DH895XCC_CERRSSMSH(i) (i * 0x4000 + 0x10) +#define ADF_DH895XCC_ERRSSMSH_EN BIT(3) + +/* Admin Messages Registers */ +#define ADF_DH895XCC_ADMINMSGUR_OFFSET (0x3A000 + 0x574) +#define ADF_DH895XCC_ADMINMSGLR_OFFSET (0x3A000 + 0x578) +#define ADF_DH895XCC_MAILBOX_BASE_OFFSET 0x20970 +#define ADF_DH895XCC_MAILBOX_STRIDE 0x1000 +#define ADF_DH895XCC_FW "/*(DEBLOBBED)*/" +#endif diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c new file mode 100644 index 000000000..87bd36a3e --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c @@ -0,0 +1,421 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/fs.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/platform_device.h> +#include <linux/workqueue.h> +#include <linux/io.h> +#include <adf_accel_devices.h> +#include <adf_common_drv.h> +#include <adf_cfg.h> +#include <adf_transport_access_macros.h> +#include "adf_dh895xcc_hw_data.h" +#include "adf_drv.h" + +static const char adf_driver_name[] = ADF_DH895XCC_DEVICE_NAME; + +#define ADF_SYSTEM_DEVICE(device_id) \ + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + +static const struct pci_device_id adf_pci_tbl[] = { + ADF_SYSTEM_DEVICE(ADF_DH895XCC_PCI_DEVICE_ID), + {0,} +}; +MODULE_DEVICE_TABLE(pci, adf_pci_tbl); + +static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent); +static void adf_remove(struct pci_dev *dev); + +static struct pci_driver adf_driver = { + .id_table = adf_pci_tbl, + .name = adf_driver_name, + .probe = adf_probe, + .remove = adf_remove +}; + +static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) +{ + struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; + int i; + + adf_dev_shutdown(accel_dev); + + for (i = 0; i < ADF_PCI_MAX_BARS; i++) { + struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; + + if (bar->virt_addr) + pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr); + } + + if (accel_dev->hw_device) { + switch (accel_dev->hw_device->pci_dev_id) { + case ADF_DH895XCC_PCI_DEVICE_ID: + adf_clean_hw_data_dh895xcc(accel_dev->hw_device); + break; + default: + break; + } + kfree(accel_dev->hw_device); + } + adf_cfg_dev_remove(accel_dev); + debugfs_remove(accel_dev->debugfs_dir); + adf_devmgr_rm_dev(accel_dev); + pci_release_regions(accel_pci_dev->pci_dev); + pci_disable_device(accel_pci_dev->pci_dev); + kfree(accel_dev); +} + +static int adf_dev_configure(struct adf_accel_dev *accel_dev) +{ + int cpus = num_online_cpus(); + int banks = GET_MAX_BANKS(accel_dev); + int instances = min(cpus, banks); + char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; + int i; + unsigned long val; + + if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC)) + goto err; + if (adf_cfg_section_add(accel_dev, "Accelerator0")) + goto err; + for (i = 0; i < instances; i++) { + val = i; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY, + i); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i); + val = 128; + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + val = 512; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + val = 0; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + val = 2; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + val = 4; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + val = 8; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + val = 10; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + val = 12; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + val = ADF_COALESCING_DEF_TIME; + snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i); + if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0", + key, (void *)&val, ADF_DEC)) + goto err; + } + + val = i; + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + ADF_NUM_CY, (void *)&val, ADF_DEC)) + goto err; + + set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); + return 0; +err: + dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n"); + return -EINVAL; +} + +static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct adf_accel_dev *accel_dev; + struct adf_accel_pci *accel_pci_dev; + struct adf_hw_device_data *hw_data; + char name[ADF_DEVICE_NAME_LENGTH]; + unsigned int i, bar_nr; + int ret; + + switch (ent->device) { + case ADF_DH895XCC_PCI_DEVICE_ID: + break; + default: + dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device); + return -ENODEV; + } + + if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) { + /* If the accelerator is connected to a node with no memory + * there is no point in using the accelerator since the remote + * memory transaction will be very slow. */ + dev_err(&pdev->dev, "Invalid NUMA configuration.\n"); + return -EINVAL; + } + + accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, + dev_to_node(&pdev->dev)); + if (!accel_dev) + return -ENOMEM; + + INIT_LIST_HEAD(&accel_dev->crypto_list); + + /* Add accel device to accel table. + * This should be called before adf_cleanup_accel is called */ + if (adf_devmgr_add_dev(accel_dev)) { + dev_err(&pdev->dev, "Failed to add new accelerator device.\n"); + kfree(accel_dev); + return -EFAULT; + } + + accel_dev->owner = THIS_MODULE; + /* Allocate and configure device configuration structure */ + hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, + dev_to_node(&pdev->dev)); + if (!hw_data) { + ret = -ENOMEM; + goto out_err; + } + + accel_dev->hw_device = hw_data; + switch (ent->device) { + case ADF_DH895XCC_PCI_DEVICE_ID: + adf_init_hw_data_dh895xcc(accel_dev->hw_device); + break; + default: + return -ENODEV; + } + accel_pci_dev = &accel_dev->accel_pci_dev; + pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); + pci_read_config_dword(pdev, ADF_DH895XCC_FUSECTL_OFFSET, + &hw_data->fuses); + + /* Get Accelerators and Accelerators Engines masks */ + hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses); + hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses); + accel_pci_dev->sku = hw_data->get_sku(hw_data); + accel_pci_dev->pci_dev = pdev; + /* If the device has no acceleration engines then ignore it. */ + if (!hw_data->accel_mask || !hw_data->ae_mask || + ((~hw_data->ae_mask) & 0x01)) { + dev_err(&pdev->dev, "No acceleration units found"); + ret = -EFAULT; + goto out_err; + } + + /* Create dev top level debugfs entry */ + snprintf(name, sizeof(name), "%s%s_dev%d", ADF_DEVICE_NAME_PREFIX, + hw_data->dev_class->name, hw_data->instance_id); + accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); + if (!accel_dev->debugfs_dir) { + dev_err(&pdev->dev, "Could not create debugfs dir\n"); + ret = -EINVAL; + goto out_err; + } + + /* Create device configuration table */ + ret = adf_cfg_dev_add(accel_dev); + if (ret) + goto out_err; + + /* enable PCI device */ + if (pci_enable_device(pdev)) { + ret = -EFAULT; + goto out_err; + } + + /* set dma identifier */ + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { + dev_err(&pdev->dev, "No usable DMA configuration\n"); + ret = -EFAULT; + goto out_err; + } else { + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + } + + } else { + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + } + + if (pci_request_regions(pdev, adf_driver_name)) { + ret = -EFAULT; + goto out_err; + } + + /* Read accelerator capabilities mask */ + pci_read_config_dword(pdev, ADF_DH895XCC_LEGFUSE_OFFSET, + &hw_data->accel_capabilities_mask); + + /* Find and map all the device's BARS */ + for (i = 0; i < ADF_PCI_MAX_BARS; i++) { + struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; + + bar_nr = i * 2; + bar->base_addr = pci_resource_start(pdev, bar_nr); + if (!bar->base_addr) + break; + bar->size = pci_resource_len(pdev, bar_nr); + bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0); + if (!bar->virt_addr) { + dev_err(&pdev->dev, "Failed to map BAR %d\n", i); + ret = -EFAULT; + goto out_err; + } + } + pci_set_master(pdev); + + if (adf_enable_aer(accel_dev, &adf_driver)) { + dev_err(&pdev->dev, "Failed to enable aer\n"); + ret = -EFAULT; + goto out_err; + } + + if (pci_save_state(pdev)) { + dev_err(&pdev->dev, "Failed to save pci state\n"); + ret = -ENOMEM; + goto out_err; + } + + ret = adf_dev_configure(accel_dev); + if (ret) + goto out_err; + + ret = adf_dev_init(accel_dev); + if (ret) + goto out_err; + + ret = adf_dev_start(accel_dev); + if (ret) { + adf_dev_stop(accel_dev); + goto out_err; + } + + return 0; +out_err: + adf_cleanup_accel(accel_dev); + return ret; +} + +static void adf_remove(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + if (!accel_dev) { + pr_err("QAT: Driver removal failed\n"); + return; + } + if (adf_dev_stop(accel_dev)) + dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n"); + adf_disable_aer(accel_dev); + adf_cleanup_accel(accel_dev); +} + +static int __init adfdrv_init(void) +{ + request_module("intel_qat"); + if (qat_admin_register()) + return -EFAULT; + + if (pci_register_driver(&adf_driver)) { + pr_err("QAT: Driver initialization failed\n"); + return -EFAULT; + } + return 0; +} + +static void __exit adfdrv_release(void) +{ + pci_unregister_driver(&adf_driver); + qat_admin_unregister(); +} + +module_init(adfdrv_init); +module_exit(adfdrv_release); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Intel"); +/*(DEBLOBBED)*/ +MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.h b/drivers/crypto/qat/qat_dh895xcc/adf_drv.h new file mode 100644 index 000000000..a2fbb6ce7 --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.h @@ -0,0 +1,67 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef ADF_DH895x_DRV_H_ +#define ADF_DH895x_DRV_H_ +#include <adf_accel_devices.h> +#include <adf_transport.h> + +void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data); +void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data); +int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev); +void adf_isr_resource_free(struct adf_accel_dev *accel_dev); +void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring); +void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, + uint32_t const **arb_map_config); +int adf_init_admin_comms(struct adf_accel_dev *accel_dev); +void adf_exit_admin_comms(struct adf_accel_dev *accel_dev); +int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, + uint32_t ae, void *in, void *out); +int qat_admin_register(void); +int qat_admin_unregister(void); +int adf_init_arb(struct adf_accel_dev *accel_dev); +void adf_exit_arb(struct adf_accel_dev *accel_dev); +#endif diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c b/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c new file mode 100644 index 000000000..1864bdb36 --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c @@ -0,0 +1,159 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include <adf_accel_devices.h> +#include <adf_transport_internal.h> +#include "adf_drv.h" + +#define ADF_ARB_NUM 4 +#define ADF_ARB_REQ_RING_NUM 8 +#define ADF_ARB_REG_SIZE 0x4 +#define ADF_ARB_WTR_SIZE 0x20 +#define ADF_ARB_OFFSET 0x30000 +#define ADF_ARB_REG_SLOT 0x1000 +#define ADF_ARB_WTR_OFFSET 0x010 +#define ADF_ARB_RO_EN_OFFSET 0x090 +#define ADF_ARB_WQCFG_OFFSET 0x100 +#define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180 +#define ADF_ARB_WRK_2_SER_MAP 10 +#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C + +#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \ + ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \ + (ADF_ARB_REG_SLOT * index), value) + +#define WRITE_CSR_ARB_RESPORDERING(csr_addr, index, value) \ + ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \ + ADF_ARB_RO_EN_OFFSET) + (ADF_ARB_REG_SIZE * index), value) + +#define WRITE_CSR_ARB_WEIGHT(csr_addr, arb, index, value) \ + ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \ + ADF_ARB_WTR_OFFSET) + (ADF_ARB_WTR_SIZE * arb) + \ + (ADF_ARB_REG_SIZE * index), value) + +#define WRITE_CSR_ARB_SARCONFIG(csr_addr, index, value) \ + ADF_CSR_WR(csr_addr, ADF_ARB_OFFSET + \ + (ADF_ARB_REG_SIZE * index), value) + +#define WRITE_CSR_ARB_WRK_2_SER_MAP(csr_addr, index, value) \ + ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \ + ADF_ARB_WRK_2_SER_MAP_OFFSET) + \ + (ADF_ARB_REG_SIZE * index), value) + +#define WRITE_CSR_ARB_WQCFG(csr_addr, index, value) \ + ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \ + ADF_ARB_WQCFG_OFFSET) + (ADF_ARB_REG_SIZE * index), value) + +int adf_init_arb(struct adf_accel_dev *accel_dev) +{ + void __iomem *csr = accel_dev->transport->banks[0].csr_addr; + uint32_t arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1; + uint32_t arb, i; + const uint32_t *thd_2_arb_cfg; + + /* Service arb configured for 32 bytes responses and + * ring flow control check enabled. */ + for (arb = 0; arb < ADF_ARB_NUM; arb++) + WRITE_CSR_ARB_SARCONFIG(csr, arb, arb_cfg); + + /* Setup service weighting */ + for (arb = 0; arb < ADF_ARB_NUM; arb++) + for (i = 0; i < ADF_ARB_REQ_RING_NUM; i++) + WRITE_CSR_ARB_WEIGHT(csr, arb, i, 0xFFFFFFFF); + + /* Setup ring response ordering */ + for (i = 0; i < ADF_ARB_REQ_RING_NUM; i++) + WRITE_CSR_ARB_RESPORDERING(csr, i, 0xFFFFFFFF); + + /* Setup worker queue registers */ + for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++) + WRITE_CSR_ARB_WQCFG(csr, i, i); + + /* Map worker threads to service arbiters */ + adf_get_arbiter_mapping(accel_dev, &thd_2_arb_cfg); + + if (!thd_2_arb_cfg) + return -EFAULT; + + for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++) + WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, *(thd_2_arb_cfg + i)); + + return 0; +} + +void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring) +{ + WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr, + ring->bank->bank_number, + ring->bank->ring_mask & 0xFF); +} + +void adf_exit_arb(struct adf_accel_dev *accel_dev) +{ + void __iomem *csr; + unsigned int i; + + if (!accel_dev->transport) + return; + + csr = accel_dev->transport->banks[0].csr_addr; + + /* Reset arbiter configuration */ + for (i = 0; i < ADF_ARB_NUM; i++) + WRITE_CSR_ARB_SARCONFIG(csr, i, 0); + + /* Shutdown work queue */ + for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++) + WRITE_CSR_ARB_WQCFG(csr, i, 0); + + /* Unmap worker threads to service arbiters */ + for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++) + WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, 0); + + /* Disable arbitration on all rings */ + for (i = 0; i < GET_MAX_BANKS(accel_dev); i++) + WRITE_CSR_ARB_RINGSRVARBEN(csr, i, 0); +} diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c new file mode 100644 index 000000000..0d03c109c --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c @@ -0,0 +1,265 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <adf_accel_devices.h> +#include <adf_common_drv.h> +#include <adf_cfg.h> +#include <adf_cfg_strings.h> +#include <adf_cfg_common.h> +#include <adf_transport_access_macros.h> +#include <adf_transport_internal.h> +#include "adf_drv.h" + +static int adf_enable_msix(struct adf_accel_dev *accel_dev) +{ + struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + uint32_t msix_num_entries = hw_data->num_banks + 1; + int i; + + for (i = 0; i < msix_num_entries; i++) + pci_dev_info->msix_entries.entries[i].entry = i; + + if (pci_enable_msix_exact(pci_dev_info->pci_dev, + pci_dev_info->msix_entries.entries, + msix_num_entries)) { + dev_err(&GET_DEV(accel_dev), "Failed to enable MSIX IRQ\n"); + return -EFAULT; + } + return 0; +} + +static void adf_disable_msix(struct adf_accel_pci *pci_dev_info) +{ + pci_disable_msix(pci_dev_info->pci_dev); +} + +static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr) +{ + struct adf_etr_bank_data *bank = bank_ptr; + + WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, 0); + tasklet_hi_schedule(&bank->resp_handler); + return IRQ_HANDLED; +} + +static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr) +{ + struct adf_accel_dev *accel_dev = dev_ptr; + + dev_info(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n", + accel_dev->accel_id); + return IRQ_HANDLED; +} + +static int adf_request_irqs(struct adf_accel_dev *accel_dev) +{ + struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct msix_entry *msixe = pci_dev_info->msix_entries.entries; + struct adf_etr_data *etr_data = accel_dev->transport; + int ret, i; + char *name; + + /* Request msix irq for all banks */ + for (i = 0; i < hw_data->num_banks; i++) { + struct adf_etr_bank_data *bank = &etr_data->banks[i]; + unsigned int cpu, cpus = num_online_cpus(); + + name = *(pci_dev_info->msix_entries.names + i); + snprintf(name, ADF_MAX_MSIX_VECTOR_NAME, + "qat%d-bundle%d", accel_dev->accel_id, i); + ret = request_irq(msixe[i].vector, + adf_msix_isr_bundle, 0, name, bank); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "failed to enable irq %d for %s\n", + msixe[i].vector, name); + return ret; + } + + cpu = ((accel_dev->accel_id * hw_data->num_banks) + i) % cpus; + irq_set_affinity_hint(msixe[i].vector, get_cpu_mask(cpu)); + } + + /* Request msix irq for AE */ + name = *(pci_dev_info->msix_entries.names + i); + snprintf(name, ADF_MAX_MSIX_VECTOR_NAME, + "qat%d-ae-cluster", accel_dev->accel_id); + ret = request_irq(msixe[i].vector, adf_msix_isr_ae, 0, name, accel_dev); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "failed to enable irq %d, for %s\n", + msixe[i].vector, name); + return ret; + } + return ret; +} + +static void adf_free_irqs(struct adf_accel_dev *accel_dev) +{ + struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct msix_entry *msixe = pci_dev_info->msix_entries.entries; + struct adf_etr_data *etr_data = accel_dev->transport; + int i; + + for (i = 0; i < hw_data->num_banks; i++) { + irq_set_affinity_hint(msixe[i].vector, NULL); + free_irq(msixe[i].vector, &etr_data->banks[i]); + } + irq_set_affinity_hint(msixe[i].vector, NULL); + free_irq(msixe[i].vector, accel_dev); +} + +static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev) +{ + int i; + char **names; + struct msix_entry *entries; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + uint32_t msix_num_entries = hw_data->num_banks + 1; + + entries = kzalloc_node(msix_num_entries * sizeof(*entries), + GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev))); + if (!entries) + return -ENOMEM; + + names = kcalloc(msix_num_entries, sizeof(char *), GFP_KERNEL); + if (!names) { + kfree(entries); + return -ENOMEM; + } + for (i = 0; i < msix_num_entries; i++) { + *(names + i) = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL); + if (!(*(names + i))) + goto err; + } + accel_dev->accel_pci_dev.msix_entries.entries = entries; + accel_dev->accel_pci_dev.msix_entries.names = names; + return 0; +err: + for (i = 0; i < msix_num_entries; i++) + kfree(*(names + i)); + kfree(entries); + kfree(names); + return -ENOMEM; +} + +static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + uint32_t msix_num_entries = hw_data->num_banks + 1; + char **names = accel_dev->accel_pci_dev.msix_entries.names; + int i; + + kfree(accel_dev->accel_pci_dev.msix_entries.entries); + for (i = 0; i < msix_num_entries; i++) + kfree(*(names + i)); + kfree(names); +} + +static int adf_setup_bh(struct adf_accel_dev *accel_dev) +{ + struct adf_etr_data *priv_data = accel_dev->transport; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + int i; + + for (i = 0; i < hw_data->num_banks; i++) + tasklet_init(&priv_data->banks[i].resp_handler, + adf_response_handler, + (unsigned long)&priv_data->banks[i]); + return 0; +} + +static void adf_cleanup_bh(struct adf_accel_dev *accel_dev) +{ + struct adf_etr_data *priv_data = accel_dev->transport; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + int i; + + for (i = 0; i < hw_data->num_banks; i++) { + tasklet_disable(&priv_data->banks[i].resp_handler); + tasklet_kill(&priv_data->banks[i].resp_handler); + } +} + +void adf_isr_resource_free(struct adf_accel_dev *accel_dev) +{ + adf_free_irqs(accel_dev); + adf_cleanup_bh(accel_dev); + adf_disable_msix(&accel_dev->accel_pci_dev); + adf_isr_free_msix_entry_table(accel_dev); +} + +int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev) +{ + int ret; + + ret = adf_isr_alloc_msix_entry_table(accel_dev); + if (ret) + return ret; + if (adf_enable_msix(accel_dev)) + goto err_out; + + if (adf_setup_bh(accel_dev)) + goto err_out; + + if (adf_request_irqs(accel_dev)) + goto err_out; + + return 0; +err_out: + adf_isr_resource_free(accel_dev); + return -EFAULT; +} diff --git a/drivers/crypto/qat/qat_dh895xcc/qat_admin.c b/drivers/crypto/qat/qat_dh895xcc/qat_admin.c new file mode 100644 index 000000000..55b7a8e48 --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xcc/qat_admin.c @@ -0,0 +1,107 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include <icp_qat_fw_init_admin.h> +#include <adf_accel_devices.h> +#include <adf_common_drv.h> +#include "adf_drv.h" + +static struct service_hndl qat_admin; + +static int qat_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd) +{ + struct adf_hw_device_data *hw_device = accel_dev->hw_device; + struct icp_qat_fw_init_admin_req req; + struct icp_qat_fw_init_admin_resp resp; + int i; + + memset(&req, 0, sizeof(struct icp_qat_fw_init_admin_req)); + req.init_admin_cmd_id = cmd; + for (i = 0; i < hw_device->get_num_aes(hw_device); i++) { + memset(&resp, 0, sizeof(struct icp_qat_fw_init_admin_resp)); + if (adf_put_admin_msg_sync(accel_dev, i, &req, &resp) || + resp.init_resp_hdr.status) + return -EFAULT; + } + return 0; +} + +static int qat_admin_start(struct adf_accel_dev *accel_dev) +{ + return qat_send_admin_cmd(accel_dev, ICP_QAT_FW_INIT_ME); +} + +static int qat_admin_event_handler(struct adf_accel_dev *accel_dev, + enum adf_event event) +{ + int ret; + + switch (event) { + case ADF_EVENT_START: + ret = qat_admin_start(accel_dev); + break; + case ADF_EVENT_STOP: + case ADF_EVENT_INIT: + case ADF_EVENT_SHUTDOWN: + default: + ret = 0; + } + return ret; +} + +int qat_admin_register(void) +{ + memset(&qat_admin, 0, sizeof(struct service_hndl)); + qat_admin.event_hld = qat_admin_event_handler; + qat_admin.name = "qat_admin"; + qat_admin.admin = 1; + return adf_service_register(&qat_admin); +} + +int qat_admin_unregister(void) +{ + return adf_service_unregister(&qat_admin); +} diff --git a/drivers/crypto/qce/Makefile b/drivers/crypto/qce/Makefile new file mode 100644 index 000000000..348dc3173 --- /dev/null +++ b/drivers/crypto/qce/Makefile @@ -0,0 +1,6 @@ +obj-$(CONFIG_CRYPTO_DEV_QCE) += qcrypto.o +qcrypto-objs := core.o \ + common.o \ + dma.o \ + sha.o \ + ablkcipher.o diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c new file mode 100644 index 000000000..ad592de47 --- /dev/null +++ b/drivers/crypto/qce/ablkcipher.c @@ -0,0 +1,431 @@ +/* + * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/types.h> +#include <crypto/aes.h> +#include <crypto/algapi.h> +#include <crypto/des.h> + +#include "cipher.h" + +static LIST_HEAD(ablkcipher_algs); + +static void qce_ablkcipher_done(void *data) +{ + struct crypto_async_request *async_req = data; + struct ablkcipher_request *req = ablkcipher_request_cast(async_req); + struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); + struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm); + struct qce_device *qce = tmpl->qce; + enum dma_data_direction dir_src, dir_dst; + u32 status; + int error; + bool diff_dst; + + diff_dst = (req->src != req->dst) ? true : false; + dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; + dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL; + + error = qce_dma_terminate_all(&qce->dma); + if (error) + dev_dbg(qce->dev, "ablkcipher dma termination error (%d)\n", + error); + + if (diff_dst) + qce_unmapsg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src, + rctx->dst_chained); + qce_unmapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst, + rctx->dst_chained); + + sg_free_table(&rctx->dst_tbl); + + error = qce_check_status(qce, &status); + if (error < 0) + dev_dbg(qce->dev, "ablkcipher operation error (%x)\n", status); + + qce->async_req_done(tmpl->qce, error); +} + +static int +qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req) +{ + struct ablkcipher_request *req = ablkcipher_request_cast(async_req); + struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); + struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm); + struct qce_device *qce = tmpl->qce; + enum dma_data_direction dir_src, dir_dst; + struct scatterlist *sg; + bool diff_dst; + gfp_t gfp; + int ret; + + rctx->iv = req->info; + rctx->ivsize = crypto_ablkcipher_ivsize(ablkcipher); + rctx->cryptlen = req->nbytes; + + diff_dst = (req->src != req->dst) ? true : false; + dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; + dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL; + + rctx->src_nents = qce_countsg(req->src, req->nbytes, + &rctx->src_chained); + if (diff_dst) { + rctx->dst_nents = qce_countsg(req->dst, req->nbytes, + &rctx->dst_chained); + } else { + rctx->dst_nents = rctx->src_nents; + rctx->dst_chained = rctx->src_chained; + } + + rctx->dst_nents += 1; + + gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC; + + ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp); + if (ret) + return ret; + + sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); + + sg = qce_sgtable_add(&rctx->dst_tbl, req->dst); + if (IS_ERR(sg)) { + ret = PTR_ERR(sg); + goto error_free; + } + + sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg); + if (IS_ERR(sg)) { + ret = PTR_ERR(sg); + goto error_free; + } + + sg_mark_end(sg); + rctx->dst_sg = rctx->dst_tbl.sgl; + + ret = qce_mapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst, + rctx->dst_chained); + if (ret < 0) + goto error_free; + + if (diff_dst) { + ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, dir_src, + rctx->src_chained); + if (ret < 0) + goto error_unmap_dst; + rctx->src_sg = req->src; + } else { + rctx->src_sg = rctx->dst_sg; + } + + ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents, + rctx->dst_sg, rctx->dst_nents, + qce_ablkcipher_done, async_req); + if (ret) + goto error_unmap_src; + + qce_dma_issue_pending(&qce->dma); + + ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0); + if (ret) + goto error_terminate; + + return 0; + +error_terminate: + qce_dma_terminate_all(&qce->dma); +error_unmap_src: + if (diff_dst) + qce_unmapsg(qce->dev, req->src, rctx->src_nents, dir_src, + rctx->src_chained); +error_unmap_dst: + qce_unmapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst, + rctx->dst_chained); +error_free: + sg_free_table(&rctx->dst_tbl); + return ret; +} + +static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key, + unsigned int keylen) +{ + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk); + struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + unsigned long flags = to_cipher_tmpl(tfm)->alg_flags; + int ret; + + if (!key || !keylen) + return -EINVAL; + + if (IS_AES(flags)) { + switch (keylen) { + case AES_KEYSIZE_128: + case AES_KEYSIZE_256: + break; + default: + goto fallback; + } + } else if (IS_DES(flags)) { + u32 tmp[DES_EXPKEY_WORDS]; + + ret = des_ekey(tmp, key); + if (!ret && crypto_ablkcipher_get_flags(ablk) & + CRYPTO_TFM_REQ_WEAK_KEY) + goto weakkey; + } + + ctx->enc_keylen = keylen; + memcpy(ctx->enc_key, key, keylen); + return 0; +fallback: + ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen); + if (!ret) + ctx->enc_keylen = keylen; + return ret; +weakkey: + crypto_ablkcipher_set_flags(ablk, CRYPTO_TFM_RES_WEAK_KEY); + return -EINVAL; +} + +static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt) +{ + struct crypto_tfm *tfm = + crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); + struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); + struct qce_alg_template *tmpl = to_cipher_tmpl(tfm); + int ret; + + rctx->flags = tmpl->alg_flags; + rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT; + + if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 && + ctx->enc_keylen != AES_KEYSIZE_256) { + ablkcipher_request_set_tfm(req, ctx->fallback); + ret = encrypt ? crypto_ablkcipher_encrypt(req) : + crypto_ablkcipher_decrypt(req); + ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); + return ret; + } + + return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base); +} + +static int qce_ablkcipher_encrypt(struct ablkcipher_request *req) +{ + return qce_ablkcipher_crypt(req, 1); +} + +static int qce_ablkcipher_decrypt(struct ablkcipher_request *req) +{ + return qce_ablkcipher_crypt(req, 0); +} + +static int qce_ablkcipher_init(struct crypto_tfm *tfm) +{ + struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + memset(ctx, 0, sizeof(*ctx)); + tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx); + + ctx->fallback = crypto_alloc_ablkcipher(crypto_tfm_alg_name(tfm), + CRYPTO_ALG_TYPE_ABLKCIPHER, + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->fallback)) + return PTR_ERR(ctx->fallback); + + return 0; +} + +static void qce_ablkcipher_exit(struct crypto_tfm *tfm) +{ + struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + crypto_free_ablkcipher(ctx->fallback); +} + +struct qce_ablkcipher_def { + unsigned long flags; + const char *name; + const char *drv_name; + unsigned int blocksize; + unsigned int ivsize; + unsigned int min_keysize; + unsigned int max_keysize; +}; + +static const struct qce_ablkcipher_def ablkcipher_def[] = { + { + .flags = QCE_ALG_AES | QCE_MODE_ECB, + .name = "ecb(aes)", + .drv_name = "ecb-aes-qce", + .blocksize = AES_BLOCK_SIZE, + .ivsize = AES_BLOCK_SIZE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + }, + { + .flags = QCE_ALG_AES | QCE_MODE_CBC, + .name = "cbc(aes)", + .drv_name = "cbc-aes-qce", + .blocksize = AES_BLOCK_SIZE, + .ivsize = AES_BLOCK_SIZE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + }, + { + .flags = QCE_ALG_AES | QCE_MODE_CTR, + .name = "ctr(aes)", + .drv_name = "ctr-aes-qce", + .blocksize = AES_BLOCK_SIZE, + .ivsize = AES_BLOCK_SIZE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + }, + { + .flags = QCE_ALG_AES | QCE_MODE_XTS, + .name = "xts(aes)", + .drv_name = "xts-aes-qce", + .blocksize = AES_BLOCK_SIZE, + .ivsize = AES_BLOCK_SIZE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + }, + { + .flags = QCE_ALG_DES | QCE_MODE_ECB, + .name = "ecb(des)", + .drv_name = "ecb-des-qce", + .blocksize = DES_BLOCK_SIZE, + .ivsize = 0, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + }, + { + .flags = QCE_ALG_DES | QCE_MODE_CBC, + .name = "cbc(des)", + .drv_name = "cbc-des-qce", + .blocksize = DES_BLOCK_SIZE, + .ivsize = DES_BLOCK_SIZE, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + }, + { + .flags = QCE_ALG_3DES | QCE_MODE_ECB, + .name = "ecb(des3_ede)", + .drv_name = "ecb-3des-qce", + .blocksize = DES3_EDE_BLOCK_SIZE, + .ivsize = 0, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + }, + { + .flags = QCE_ALG_3DES | QCE_MODE_CBC, + .name = "cbc(des3_ede)", + .drv_name = "cbc-3des-qce", + .blocksize = DES3_EDE_BLOCK_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + }, +}; + +static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def, + struct qce_device *qce) +{ + struct qce_alg_template *tmpl; + struct crypto_alg *alg; + int ret; + + tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL); + if (!tmpl) + return -ENOMEM; + + alg = &tmpl->alg.crypto; + + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + def->drv_name); + + alg->cra_blocksize = def->blocksize; + alg->cra_ablkcipher.ivsize = def->ivsize; + alg->cra_ablkcipher.min_keysize = def->min_keysize; + alg->cra_ablkcipher.max_keysize = def->max_keysize; + alg->cra_ablkcipher.setkey = qce_ablkcipher_setkey; + alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt; + alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt; + + alg->cra_priority = 300; + alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK; + alg->cra_ctxsize = sizeof(struct qce_cipher_ctx); + alg->cra_alignmask = 0; + alg->cra_type = &crypto_ablkcipher_type; + alg->cra_module = THIS_MODULE; + alg->cra_init = qce_ablkcipher_init; + alg->cra_exit = qce_ablkcipher_exit; + INIT_LIST_HEAD(&alg->cra_list); + + INIT_LIST_HEAD(&tmpl->entry); + tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_ABLKCIPHER; + tmpl->alg_flags = def->flags; + tmpl->qce = qce; + + ret = crypto_register_alg(alg); + if (ret) { + kfree(tmpl); + dev_err(qce->dev, "%s registration failed\n", alg->cra_name); + return ret; + } + + list_add_tail(&tmpl->entry, &ablkcipher_algs); + dev_dbg(qce->dev, "%s is registered\n", alg->cra_name); + return 0; +} + +static void qce_ablkcipher_unregister(struct qce_device *qce) +{ + struct qce_alg_template *tmpl, *n; + + list_for_each_entry_safe(tmpl, n, &ablkcipher_algs, entry) { + crypto_unregister_alg(&tmpl->alg.crypto); + list_del(&tmpl->entry); + kfree(tmpl); + } +} + +static int qce_ablkcipher_register(struct qce_device *qce) +{ + int ret, i; + + for (i = 0; i < ARRAY_SIZE(ablkcipher_def); i++) { + ret = qce_ablkcipher_register_one(&ablkcipher_def[i], qce); + if (ret) + goto err; + } + + return 0; +err: + qce_ablkcipher_unregister(qce); + return ret; +} + +const struct qce_algo_ops ablkcipher_ops = { + .type = CRYPTO_ALG_TYPE_ABLKCIPHER, + .register_algs = qce_ablkcipher_register, + .unregister_algs = qce_ablkcipher_unregister, + .async_req_handle = qce_ablkcipher_async_req_handle, +}; diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h new file mode 100644 index 000000000..d5757cfcd --- /dev/null +++ b/drivers/crypto/qce/cipher.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CIPHER_H_ +#define _CIPHER_H_ + +#include "common.h" +#include "core.h" + +#define QCE_MAX_KEY_SIZE 64 + +struct qce_cipher_ctx { + u8 enc_key[QCE_MAX_KEY_SIZE]; + unsigned int enc_keylen; + struct crypto_ablkcipher *fallback; +}; + +/** + * struct qce_cipher_reqctx - holds private cipher objects per request + * @flags: operation flags + * @iv: pointer to the IV + * @ivsize: IV size + * @src_nents: source entries + * @dst_nents: destination entries + * @src_chained: is source chained + * @dst_chained: is destination chained + * @result_sg: scatterlist used for result buffer + * @dst_tbl: destination sg table + * @dst_sg: destination sg pointer table beginning + * @src_tbl: source sg table + * @src_sg: source sg pointer table beginning; + * @cryptlen: crypto length + */ +struct qce_cipher_reqctx { + unsigned long flags; + u8 *iv; + unsigned int ivsize; + int src_nents; + int dst_nents; + bool src_chained; + bool dst_chained; + struct scatterlist result_sg; + struct sg_table dst_tbl; + struct scatterlist *dst_sg; + struct sg_table src_tbl; + struct scatterlist *src_sg; + unsigned int cryptlen; +}; + +static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_tfm *tfm) +{ + struct crypto_alg *alg = tfm->__crt_alg; + return container_of(alg, struct qce_alg_template, alg.crypto); +} + +extern const struct qce_algo_ops ablkcipher_ops; + +#endif /* _CIPHER_H_ */ diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c new file mode 100644 index 000000000..1fb5fde7f --- /dev/null +++ b/drivers/crypto/qce/common.c @@ -0,0 +1,438 @@ +/* + * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/types.h> +#include <crypto/scatterwalk.h> +#include <crypto/sha.h> + +#include "cipher.h" +#include "common.h" +#include "core.h" +#include "regs-v5.h" +#include "sha.h" + +#define QCE_SECTOR_SIZE 512 + +static inline u32 qce_read(struct qce_device *qce, u32 offset) +{ + return readl(qce->base + offset); +} + +static inline void qce_write(struct qce_device *qce, u32 offset, u32 val) +{ + writel(val, qce->base + offset); +} + +static inline void qce_write_array(struct qce_device *qce, u32 offset, + const u32 *val, unsigned int len) +{ + int i; + + for (i = 0; i < len; i++) + qce_write(qce, offset + i * sizeof(u32), val[i]); +} + +static inline void +qce_clear_array(struct qce_device *qce, u32 offset, unsigned int len) +{ + int i; + + for (i = 0; i < len; i++) + qce_write(qce, offset + i * sizeof(u32), 0); +} + +static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size) +{ + u32 cfg = 0; + + if (IS_AES(flags)) { + if (aes_key_size == AES_KEYSIZE_128) + cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT; + else if (aes_key_size == AES_KEYSIZE_256) + cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT; + } + + if (IS_AES(flags)) + cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT; + else if (IS_DES(flags) || IS_3DES(flags)) + cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT; + + if (IS_DES(flags)) + cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT; + + if (IS_3DES(flags)) + cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT; + + switch (flags & QCE_MODE_MASK) { + case QCE_MODE_ECB: + cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT; + break; + case QCE_MODE_CBC: + cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT; + break; + case QCE_MODE_CTR: + cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT; + break; + case QCE_MODE_XTS: + cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT; + break; + case QCE_MODE_CCM: + cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT; + cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT; + break; + default: + return ~0; + } + + return cfg; +} + +static u32 qce_auth_cfg(unsigned long flags, u32 key_size) +{ + u32 cfg = 0; + + if (IS_AES(flags) && (IS_CCM(flags) || IS_CMAC(flags))) + cfg |= AUTH_ALG_AES << AUTH_ALG_SHIFT; + else + cfg |= AUTH_ALG_SHA << AUTH_ALG_SHIFT; + + if (IS_CCM(flags) || IS_CMAC(flags)) { + if (key_size == AES_KEYSIZE_128) + cfg |= AUTH_KEY_SZ_AES128 << AUTH_KEY_SIZE_SHIFT; + else if (key_size == AES_KEYSIZE_256) + cfg |= AUTH_KEY_SZ_AES256 << AUTH_KEY_SIZE_SHIFT; + } + + if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) + cfg |= AUTH_SIZE_SHA1 << AUTH_SIZE_SHIFT; + else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) + cfg |= AUTH_SIZE_SHA256 << AUTH_SIZE_SHIFT; + else if (IS_CMAC(flags)) + cfg |= AUTH_SIZE_ENUM_16_BYTES << AUTH_SIZE_SHIFT; + + if (IS_SHA1(flags) || IS_SHA256(flags)) + cfg |= AUTH_MODE_HASH << AUTH_MODE_SHIFT; + else if (IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags) || + IS_CBC(flags) || IS_CTR(flags)) + cfg |= AUTH_MODE_HMAC << AUTH_MODE_SHIFT; + else if (IS_AES(flags) && IS_CCM(flags)) + cfg |= AUTH_MODE_CCM << AUTH_MODE_SHIFT; + else if (IS_AES(flags) && IS_CMAC(flags)) + cfg |= AUTH_MODE_CMAC << AUTH_MODE_SHIFT; + + if (IS_SHA(flags) || IS_SHA_HMAC(flags)) + cfg |= AUTH_POS_BEFORE << AUTH_POS_SHIFT; + + if (IS_CCM(flags)) + cfg |= QCE_MAX_NONCE_WORDS << AUTH_NONCE_NUM_WORDS_SHIFT; + + if (IS_CBC(flags) || IS_CTR(flags) || IS_CCM(flags) || + IS_CMAC(flags)) + cfg |= BIT(AUTH_LAST_SHIFT) | BIT(AUTH_FIRST_SHIFT); + + return cfg; +} + +static u32 qce_config_reg(struct qce_device *qce, int little) +{ + u32 beats = (qce->burst_size >> 3) - 1; + u32 pipe_pair = qce->pipe_pair_id; + u32 config; + + config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK; + config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) | + BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT); + config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK; + config &= ~HIGH_SPD_EN_N_SHIFT; + + if (little) + config |= BIT(LITTLE_ENDIAN_MODE_SHIFT); + + return config; +} + +void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len) +{ + __be32 *d = dst; + const u8 *s = src; + unsigned int n; + + n = len / sizeof(u32); + for (; n > 0; n--) { + *d = cpu_to_be32p((const __u32 *) s); + s += sizeof(__u32); + d++; + } +} + +static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize) +{ + u8 swap[QCE_AES_IV_LENGTH]; + u32 i, j; + + if (ivsize > QCE_AES_IV_LENGTH) + return; + + memset(swap, 0, QCE_AES_IV_LENGTH); + + for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1; + i < QCE_AES_IV_LENGTH; i++, j--) + swap[i] = src[j]; + + qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH); +} + +static void qce_xtskey(struct qce_device *qce, const u8 *enckey, + unsigned int enckeylen, unsigned int cryptlen) +{ + u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0}; + unsigned int xtsklen = enckeylen / (2 * sizeof(u32)); + unsigned int xtsdusize; + + qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2, + enckeylen / 2); + qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen); + + /* xts du size 512B */ + xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen); + qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize); +} + +static void qce_setup_config(struct qce_device *qce) +{ + u32 config; + + /* get big endianness */ + config = qce_config_reg(qce, 0); + + /* clear status */ + qce_write(qce, REG_STATUS, 0); + qce_write(qce, REG_CONFIG, config); +} + +static inline void qce_crypto_go(struct qce_device *qce) +{ + qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT)); +} + +static int qce_setup_regs_ahash(struct crypto_async_request *async_req, + u32 totallen, u32 offset) +{ + struct ahash_request *req = ahash_request_cast(async_req); + struct crypto_ahash *ahash = __crypto_ahash_cast(async_req->tfm); + struct qce_sha_reqctx *rctx = ahash_request_ctx(req); + struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm); + struct qce_device *qce = tmpl->qce; + unsigned int digestsize = crypto_ahash_digestsize(ahash); + unsigned int blocksize = crypto_tfm_alg_blocksize(async_req->tfm); + __be32 auth[SHA256_DIGEST_SIZE / sizeof(__be32)] = {0}; + __be32 mackey[QCE_SHA_HMAC_KEY_SIZE / sizeof(__be32)] = {0}; + u32 auth_cfg = 0, config; + unsigned int iv_words; + + /* if not the last, the size has to be on the block boundary */ + if (!rctx->last_blk && req->nbytes % blocksize) + return -EINVAL; + + qce_setup_config(qce); + + if (IS_CMAC(rctx->flags)) { + qce_write(qce, REG_AUTH_SEG_CFG, 0); + qce_write(qce, REG_ENCR_SEG_CFG, 0); + qce_write(qce, REG_ENCR_SEG_SIZE, 0); + qce_clear_array(qce, REG_AUTH_IV0, 16); + qce_clear_array(qce, REG_AUTH_KEY0, 16); + qce_clear_array(qce, REG_AUTH_BYTECNT0, 4); + + auth_cfg = qce_auth_cfg(rctx->flags, rctx->authklen); + } + + if (IS_SHA_HMAC(rctx->flags) || IS_CMAC(rctx->flags)) { + u32 authkey_words = rctx->authklen / sizeof(u32); + + qce_cpu_to_be32p_array(mackey, rctx->authkey, rctx->authklen); + qce_write_array(qce, REG_AUTH_KEY0, (u32 *)mackey, + authkey_words); + } + + if (IS_CMAC(rctx->flags)) + goto go_proc; + + if (rctx->first_blk) + memcpy(auth, rctx->digest, digestsize); + else + qce_cpu_to_be32p_array(auth, rctx->digest, digestsize); + + iv_words = (IS_SHA1(rctx->flags) || IS_SHA1_HMAC(rctx->flags)) ? 5 : 8; + qce_write_array(qce, REG_AUTH_IV0, (u32 *)auth, iv_words); + + if (rctx->first_blk) + qce_clear_array(qce, REG_AUTH_BYTECNT0, 4); + else + qce_write_array(qce, REG_AUTH_BYTECNT0, + (u32 *)rctx->byte_count, 2); + + auth_cfg = qce_auth_cfg(rctx->flags, 0); + + if (rctx->last_blk) + auth_cfg |= BIT(AUTH_LAST_SHIFT); + else + auth_cfg &= ~BIT(AUTH_LAST_SHIFT); + + if (rctx->first_blk) + auth_cfg |= BIT(AUTH_FIRST_SHIFT); + else + auth_cfg &= ~BIT(AUTH_FIRST_SHIFT); + +go_proc: + qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg); + qce_write(qce, REG_AUTH_SEG_SIZE, req->nbytes); + qce_write(qce, REG_AUTH_SEG_START, 0); + qce_write(qce, REG_ENCR_SEG_CFG, 0); + qce_write(qce, REG_SEG_SIZE, req->nbytes); + + /* get little endianness */ + config = qce_config_reg(qce, 1); + qce_write(qce, REG_CONFIG, config); + + qce_crypto_go(qce); + + return 0; +} + +static int qce_setup_regs_ablkcipher(struct crypto_async_request *async_req, + u32 totallen, u32 offset) +{ + struct ablkcipher_request *req = ablkcipher_request_cast(async_req); + struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); + struct qce_cipher_ctx *ctx = crypto_tfm_ctx(async_req->tfm); + struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm); + struct qce_device *qce = tmpl->qce; + __be32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(__be32)] = {0}; + __be32 enciv[QCE_MAX_IV_SIZE / sizeof(__be32)] = {0}; + unsigned int enckey_words, enciv_words; + unsigned int keylen; + u32 encr_cfg = 0, auth_cfg = 0, config; + unsigned int ivsize = rctx->ivsize; + unsigned long flags = rctx->flags; + + qce_setup_config(qce); + + if (IS_XTS(flags)) + keylen = ctx->enc_keylen / 2; + else + keylen = ctx->enc_keylen; + + qce_cpu_to_be32p_array(enckey, ctx->enc_key, keylen); + enckey_words = keylen / sizeof(u32); + + qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg); + + encr_cfg = qce_encr_cfg(flags, keylen); + + if (IS_DES(flags)) { + enciv_words = 2; + enckey_words = 2; + } else if (IS_3DES(flags)) { + enciv_words = 2; + enckey_words = 6; + } else if (IS_AES(flags)) { + if (IS_XTS(flags)) + qce_xtskey(qce, ctx->enc_key, ctx->enc_keylen, + rctx->cryptlen); + enciv_words = 4; + } else { + return -EINVAL; + } + + qce_write_array(qce, REG_ENCR_KEY0, (u32 *)enckey, enckey_words); + + if (!IS_ECB(flags)) { + if (IS_XTS(flags)) + qce_xts_swapiv(enciv, rctx->iv, ivsize); + else + qce_cpu_to_be32p_array(enciv, rctx->iv, ivsize); + + qce_write_array(qce, REG_CNTR0_IV0, (u32 *)enciv, enciv_words); + } + + if (IS_ENCRYPT(flags)) + encr_cfg |= BIT(ENCODE_SHIFT); + + qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg); + qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen); + qce_write(qce, REG_ENCR_SEG_START, offset & 0xffff); + + if (IS_CTR(flags)) { + qce_write(qce, REG_CNTR_MASK, ~0); + qce_write(qce, REG_CNTR_MASK0, ~0); + qce_write(qce, REG_CNTR_MASK1, ~0); + qce_write(qce, REG_CNTR_MASK2, ~0); + } + + qce_write(qce, REG_SEG_SIZE, totallen); + + /* get little endianness */ + config = qce_config_reg(qce, 1); + qce_write(qce, REG_CONFIG, config); + + qce_crypto_go(qce); + + return 0; +} + +int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen, + u32 offset) +{ + switch (type) { + case CRYPTO_ALG_TYPE_ABLKCIPHER: + return qce_setup_regs_ablkcipher(async_req, totallen, offset); + case CRYPTO_ALG_TYPE_AHASH: + return qce_setup_regs_ahash(async_req, totallen, offset); + default: + return -EINVAL; + } +} + +#define STATUS_ERRORS \ + (BIT(SW_ERR_SHIFT) | BIT(AXI_ERR_SHIFT) | BIT(HSD_ERR_SHIFT)) + +int qce_check_status(struct qce_device *qce, u32 *status) +{ + int ret = 0; + + *status = qce_read(qce, REG_STATUS); + + /* + * Don't use result dump status. The operation may not be complete. + * Instead, use the status we just read from device. In case, we need to + * use result_status from result dump the result_status needs to be byte + * swapped, since we set the device to little endian. + */ + if (*status & STATUS_ERRORS || !(*status & BIT(OPERATION_DONE_SHIFT))) + ret = -ENXIO; + + return ret; +} + +void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step) +{ + u32 val; + + val = qce_read(qce, REG_VERSION); + *major = (val & CORE_MAJOR_REV_MASK) >> CORE_MAJOR_REV_SHIFT; + *minor = (val & CORE_MINOR_REV_MASK) >> CORE_MINOR_REV_SHIFT; + *step = (val & CORE_STEP_REV_MASK) >> CORE_STEP_REV_SHIFT; +} diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h new file mode 100644 index 000000000..a4addd4f7 --- /dev/null +++ b/drivers/crypto/qce/common.h @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _COMMON_H_ +#define _COMMON_H_ + +#include <linux/crypto.h> +#include <linux/types.h> +#include <crypto/aes.h> +#include <crypto/hash.h> + +/* key size in bytes */ +#define QCE_SHA_HMAC_KEY_SIZE 64 +#define QCE_MAX_CIPHER_KEY_SIZE AES_KEYSIZE_256 + +/* IV length in bytes */ +#define QCE_AES_IV_LENGTH AES_BLOCK_SIZE +/* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ +#define QCE_MAX_IV_SIZE AES_BLOCK_SIZE + +/* maximum nonce bytes */ +#define QCE_MAX_NONCE 16 +#define QCE_MAX_NONCE_WORDS (QCE_MAX_NONCE / sizeof(u32)) + +/* burst size alignment requirement */ +#define QCE_MAX_ALIGN_SIZE 64 + +/* cipher algorithms */ +#define QCE_ALG_DES BIT(0) +#define QCE_ALG_3DES BIT(1) +#define QCE_ALG_AES BIT(2) + +/* hash and hmac algorithms */ +#define QCE_HASH_SHA1 BIT(3) +#define QCE_HASH_SHA256 BIT(4) +#define QCE_HASH_SHA1_HMAC BIT(5) +#define QCE_HASH_SHA256_HMAC BIT(6) +#define QCE_HASH_AES_CMAC BIT(7) + +/* cipher modes */ +#define QCE_MODE_CBC BIT(8) +#define QCE_MODE_ECB BIT(9) +#define QCE_MODE_CTR BIT(10) +#define QCE_MODE_XTS BIT(11) +#define QCE_MODE_CCM BIT(12) +#define QCE_MODE_MASK GENMASK(12, 8) + +/* cipher encryption/decryption operations */ +#define QCE_ENCRYPT BIT(13) +#define QCE_DECRYPT BIT(14) + +#define IS_DES(flags) (flags & QCE_ALG_DES) +#define IS_3DES(flags) (flags & QCE_ALG_3DES) +#define IS_AES(flags) (flags & QCE_ALG_AES) + +#define IS_SHA1(flags) (flags & QCE_HASH_SHA1) +#define IS_SHA256(flags) (flags & QCE_HASH_SHA256) +#define IS_SHA1_HMAC(flags) (flags & QCE_HASH_SHA1_HMAC) +#define IS_SHA256_HMAC(flags) (flags & QCE_HASH_SHA256_HMAC) +#define IS_CMAC(flags) (flags & QCE_HASH_AES_CMAC) +#define IS_SHA(flags) (IS_SHA1(flags) || IS_SHA256(flags)) +#define IS_SHA_HMAC(flags) \ + (IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags)) + +#define IS_CBC(mode) (mode & QCE_MODE_CBC) +#define IS_ECB(mode) (mode & QCE_MODE_ECB) +#define IS_CTR(mode) (mode & QCE_MODE_CTR) +#define IS_XTS(mode) (mode & QCE_MODE_XTS) +#define IS_CCM(mode) (mode & QCE_MODE_CCM) + +#define IS_ENCRYPT(dir) (dir & QCE_ENCRYPT) +#define IS_DECRYPT(dir) (dir & QCE_DECRYPT) + +struct qce_alg_template { + struct list_head entry; + u32 crypto_alg_type; + unsigned long alg_flags; + const u32 *std_iv; + union { + struct crypto_alg crypto; + struct ahash_alg ahash; + } alg; + struct qce_device *qce; +}; + +void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len); +int qce_check_status(struct qce_device *qce, u32 *status); +void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step); +int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen, + u32 offset); + +#endif /* _COMMON_H_ */ diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c new file mode 100644 index 000000000..718b32a31 --- /dev/null +++ b/drivers/crypto/qce/core.c @@ -0,0 +1,285 @@ +/* + * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> +#include <linux/types.h> +#include <crypto/algapi.h> +#include <crypto/internal/hash.h> +#include <crypto/sha.h> + +#include "core.h" +#include "cipher.h" +#include "sha.h" + +#define QCE_MAJOR_VERSION5 0x05 +#define QCE_QUEUE_LENGTH 1 + +static const struct qce_algo_ops *qce_ops[] = { + &ablkcipher_ops, + &ahash_ops, +}; + +static void qce_unregister_algs(struct qce_device *qce) +{ + const struct qce_algo_ops *ops; + int i; + + for (i = 0; i < ARRAY_SIZE(qce_ops); i++) { + ops = qce_ops[i]; + ops->unregister_algs(qce); + } +} + +static int qce_register_algs(struct qce_device *qce) +{ + const struct qce_algo_ops *ops; + int i, ret = -ENODEV; + + for (i = 0; i < ARRAY_SIZE(qce_ops); i++) { + ops = qce_ops[i]; + ret = ops->register_algs(qce); + if (ret) + break; + } + + return ret; +} + +static int qce_handle_request(struct crypto_async_request *async_req) +{ + int ret = -EINVAL, i; + const struct qce_algo_ops *ops; + u32 type = crypto_tfm_alg_type(async_req->tfm); + + for (i = 0; i < ARRAY_SIZE(qce_ops); i++) { + ops = qce_ops[i]; + if (type != ops->type) + continue; + ret = ops->async_req_handle(async_req); + break; + } + + return ret; +} + +static int qce_handle_queue(struct qce_device *qce, + struct crypto_async_request *req) +{ + struct crypto_async_request *async_req, *backlog; + unsigned long flags; + int ret = 0, err; + + spin_lock_irqsave(&qce->lock, flags); + + if (req) + ret = crypto_enqueue_request(&qce->queue, req); + + /* busy, do not dequeue request */ + if (qce->req) { + spin_unlock_irqrestore(&qce->lock, flags); + return ret; + } + + backlog = crypto_get_backlog(&qce->queue); + async_req = crypto_dequeue_request(&qce->queue); + if (async_req) + qce->req = async_req; + + spin_unlock_irqrestore(&qce->lock, flags); + + if (!async_req) + return ret; + + if (backlog) { + spin_lock_bh(&qce->lock); + backlog->complete(backlog, -EINPROGRESS); + spin_unlock_bh(&qce->lock); + } + + err = qce_handle_request(async_req); + if (err) { + qce->result = err; + tasklet_schedule(&qce->done_tasklet); + } + + return ret; +} + +static void qce_tasklet_req_done(unsigned long data) +{ + struct qce_device *qce = (struct qce_device *)data; + struct crypto_async_request *req; + unsigned long flags; + + spin_lock_irqsave(&qce->lock, flags); + req = qce->req; + qce->req = NULL; + spin_unlock_irqrestore(&qce->lock, flags); + + if (req) + req->complete(req, qce->result); + + qce_handle_queue(qce, NULL); +} + +static int qce_async_request_enqueue(struct qce_device *qce, + struct crypto_async_request *req) +{ + return qce_handle_queue(qce, req); +} + +static void qce_async_request_done(struct qce_device *qce, int ret) +{ + qce->result = ret; + tasklet_schedule(&qce->done_tasklet); +} + +static int qce_check_version(struct qce_device *qce) +{ + u32 major, minor, step; + + qce_get_version(qce, &major, &minor, &step); + + /* + * the driver does not support v5 with minor 0 because it has special + * alignment requirements. + */ + if (major != QCE_MAJOR_VERSION5 || minor == 0) + return -ENODEV; + + qce->burst_size = QCE_BAM_BURST_SIZE; + qce->pipe_pair_id = 1; + + dev_dbg(qce->dev, "Crypto device found, version %d.%d.%d\n", + major, minor, step); + + return 0; +} + +static int qce_crypto_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct qce_device *qce; + struct resource *res; + int ret; + + qce = devm_kzalloc(dev, sizeof(*qce), GFP_KERNEL); + if (!qce) + return -ENOMEM; + + qce->dev = dev; + platform_set_drvdata(pdev, qce); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + qce->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(qce->base)) + return PTR_ERR(qce->base); + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (ret < 0) + return ret; + + qce->core = devm_clk_get(qce->dev, "core"); + if (IS_ERR(qce->core)) + return PTR_ERR(qce->core); + + qce->iface = devm_clk_get(qce->dev, "iface"); + if (IS_ERR(qce->iface)) + return PTR_ERR(qce->iface); + + qce->bus = devm_clk_get(qce->dev, "bus"); + if (IS_ERR(qce->bus)) + return PTR_ERR(qce->bus); + + ret = clk_prepare_enable(qce->core); + if (ret) + return ret; + + ret = clk_prepare_enable(qce->iface); + if (ret) + goto err_clks_core; + + ret = clk_prepare_enable(qce->bus); + if (ret) + goto err_clks_iface; + + ret = qce_dma_request(qce->dev, &qce->dma); + if (ret) + goto err_clks; + + ret = qce_check_version(qce); + if (ret) + goto err_clks; + + spin_lock_init(&qce->lock); + tasklet_init(&qce->done_tasklet, qce_tasklet_req_done, + (unsigned long)qce); + crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH); + + qce->async_req_enqueue = qce_async_request_enqueue; + qce->async_req_done = qce_async_request_done; + + ret = qce_register_algs(qce); + if (ret) + goto err_dma; + + return 0; + +err_dma: + qce_dma_release(&qce->dma); +err_clks: + clk_disable_unprepare(qce->bus); +err_clks_iface: + clk_disable_unprepare(qce->iface); +err_clks_core: + clk_disable_unprepare(qce->core); + return ret; +} + +static int qce_crypto_remove(struct platform_device *pdev) +{ + struct qce_device *qce = platform_get_drvdata(pdev); + + tasklet_kill(&qce->done_tasklet); + qce_unregister_algs(qce); + qce_dma_release(&qce->dma); + clk_disable_unprepare(qce->bus); + clk_disable_unprepare(qce->iface); + clk_disable_unprepare(qce->core); + return 0; +} + +static const struct of_device_id qce_crypto_of_match[] = { + { .compatible = "qcom,crypto-v5.1", }, + {} +}; +MODULE_DEVICE_TABLE(of, qce_crypto_of_match); + +static struct platform_driver qce_crypto_driver = { + .probe = qce_crypto_probe, + .remove = qce_crypto_remove, + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = qce_crypto_of_match, + }, +}; +module_platform_driver(qce_crypto_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Qualcomm crypto engine driver"); +MODULE_ALIAS("platform:" KBUILD_MODNAME); +MODULE_AUTHOR("The Linux Foundation"); diff --git a/drivers/crypto/qce/core.h b/drivers/crypto/qce/core.h new file mode 100644 index 000000000..549965d4d --- /dev/null +++ b/drivers/crypto/qce/core.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CORE_H_ +#define _CORE_H_ + +#include "dma.h" + +/** + * struct qce_device - crypto engine device structure + * @queue: crypto request queue + * @lock: the lock protects queue and req + * @done_tasklet: done tasklet object + * @req: current active request + * @result: result of current transform + * @base: virtual IO base + * @dev: pointer to device structure + * @core: core device clock + * @iface: interface clock + * @bus: bus clock + * @dma: pointer to dma data + * @burst_size: the crypto burst size + * @pipe_pair_id: which pipe pair id the device using + * @async_req_enqueue: invoked by every algorithm to enqueue a request + * @async_req_done: invoked by every algorithm to finish its request + */ +struct qce_device { + struct crypto_queue queue; + spinlock_t lock; + struct tasklet_struct done_tasklet; + struct crypto_async_request *req; + int result; + void __iomem *base; + struct device *dev; + struct clk *core, *iface, *bus; + struct qce_dma_data dma; + int burst_size; + unsigned int pipe_pair_id; + int (*async_req_enqueue)(struct qce_device *qce, + struct crypto_async_request *req); + void (*async_req_done)(struct qce_device *qce, int ret); +}; + +/** + * struct qce_algo_ops - algorithm operations per crypto type + * @type: should be CRYPTO_ALG_TYPE_XXX + * @register_algs: invoked by core to register the algorithms + * @unregister_algs: invoked by core to unregister the algorithms + * @async_req_handle: invoked by core to handle enqueued request + */ +struct qce_algo_ops { + u32 type; + int (*register_algs)(struct qce_device *qce); + void (*unregister_algs)(struct qce_device *qce); + int (*async_req_handle)(struct crypto_async_request *async_req); +}; + +#endif /* _CORE_H_ */ diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c new file mode 100644 index 000000000..378cb7686 --- /dev/null +++ b/drivers/crypto/qce/dma.c @@ -0,0 +1,186 @@ +/* + * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/dmaengine.h> +#include <crypto/scatterwalk.h> + +#include "dma.h" + +int qce_dma_request(struct device *dev, struct qce_dma_data *dma) +{ + int ret; + + dma->txchan = dma_request_slave_channel_reason(dev, "tx"); + if (IS_ERR(dma->txchan)) + return PTR_ERR(dma->txchan); + + dma->rxchan = dma_request_slave_channel_reason(dev, "rx"); + if (IS_ERR(dma->rxchan)) { + ret = PTR_ERR(dma->rxchan); + goto error_rx; + } + + dma->result_buf = kmalloc(QCE_RESULT_BUF_SZ + QCE_IGNORE_BUF_SZ, + GFP_KERNEL); + if (!dma->result_buf) { + ret = -ENOMEM; + goto error_nomem; + } + + dma->ignore_buf = dma->result_buf + QCE_RESULT_BUF_SZ; + + return 0; +error_nomem: + dma_release_channel(dma->rxchan); +error_rx: + dma_release_channel(dma->txchan); + return ret; +} + +void qce_dma_release(struct qce_dma_data *dma) +{ + dma_release_channel(dma->txchan); + dma_release_channel(dma->rxchan); + kfree(dma->result_buf); +} + +int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir, bool chained) +{ + int err; + + if (chained) { + while (sg) { + err = dma_map_sg(dev, sg, 1, dir); + if (!err) + return -EFAULT; + sg = sg_next(sg); + } + } else { + err = dma_map_sg(dev, sg, nents, dir); + if (!err) + return -EFAULT; + } + + return nents; +} + +void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir, bool chained) +{ + if (chained) + while (sg) { + dma_unmap_sg(dev, sg, 1, dir); + sg = sg_next(sg); + } + else + dma_unmap_sg(dev, sg, nents, dir); +} + +int qce_countsg(struct scatterlist *sglist, int nbytes, bool *chained) +{ + struct scatterlist *sg = sglist; + int nents = 0; + + if (chained) + *chained = false; + + while (nbytes > 0 && sg) { + nents++; + nbytes -= sg->length; + if (!sg_is_last(sg) && (sg + 1)->length == 0 && chained) + *chained = true; + sg = sg_next(sg); + } + + return nents; +} + +struct scatterlist * +qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl) +{ + struct scatterlist *sg = sgt->sgl, *sg_last = NULL; + + while (sg) { + if (!sg_page(sg)) + break; + sg = sg_next(sg); + } + + if (!sg) + return ERR_PTR(-EINVAL); + + while (new_sgl && sg) { + sg_set_page(sg, sg_page(new_sgl), new_sgl->length, + new_sgl->offset); + sg_last = sg; + sg = sg_next(sg); + new_sgl = sg_next(new_sgl); + } + + return sg_last; +} + +static int qce_dma_prep_sg(struct dma_chan *chan, struct scatterlist *sg, + int nents, unsigned long flags, + enum dma_transfer_direction dir, + dma_async_tx_callback cb, void *cb_param) +{ + struct dma_async_tx_descriptor *desc; + dma_cookie_t cookie; + + if (!sg || !nents) + return -EINVAL; + + desc = dmaengine_prep_slave_sg(chan, sg, nents, dir, flags); + if (!desc) + return -EINVAL; + + desc->callback = cb; + desc->callback_param = cb_param; + cookie = dmaengine_submit(desc); + + return dma_submit_error(cookie); +} + +int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *rx_sg, + int rx_nents, struct scatterlist *tx_sg, int tx_nents, + dma_async_tx_callback cb, void *cb_param) +{ + struct dma_chan *rxchan = dma->rxchan; + struct dma_chan *txchan = dma->txchan; + unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK; + int ret; + + ret = qce_dma_prep_sg(rxchan, rx_sg, rx_nents, flags, DMA_MEM_TO_DEV, + NULL, NULL); + if (ret) + return ret; + + return qce_dma_prep_sg(txchan, tx_sg, tx_nents, flags, DMA_DEV_TO_MEM, + cb, cb_param); +} + +void qce_dma_issue_pending(struct qce_dma_data *dma) +{ + dma_async_issue_pending(dma->rxchan); + dma_async_issue_pending(dma->txchan); +} + +int qce_dma_terminate_all(struct qce_dma_data *dma) +{ + int ret; + + ret = dmaengine_terminate_all(dma->rxchan); + return ret ?: dmaengine_terminate_all(dma->txchan); +} diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h new file mode 100644 index 000000000..65bedb81d --- /dev/null +++ b/drivers/crypto/qce/dma.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DMA_H_ +#define _DMA_H_ + +#include <linux/dmaengine.h> + +/* maximum data transfer block size between BAM and CE */ +#define QCE_BAM_BURST_SIZE 64 + +#define QCE_AUTHIV_REGS_CNT 16 +#define QCE_AUTH_BYTECOUNT_REGS_CNT 4 +#define QCE_CNTRIV_REGS_CNT 4 + +struct qce_result_dump { + u32 auth_iv[QCE_AUTHIV_REGS_CNT]; + u32 auth_byte_count[QCE_AUTH_BYTECOUNT_REGS_CNT]; + u32 encr_cntr_iv[QCE_CNTRIV_REGS_CNT]; + u32 status; + u32 status2; +}; + +#define QCE_IGNORE_BUF_SZ (2 * QCE_BAM_BURST_SIZE) +#define QCE_RESULT_BUF_SZ \ + ALIGN(sizeof(struct qce_result_dump), QCE_BAM_BURST_SIZE) + +struct qce_dma_data { + struct dma_chan *txchan; + struct dma_chan *rxchan; + struct qce_result_dump *result_buf; + void *ignore_buf; +}; + +int qce_dma_request(struct device *dev, struct qce_dma_data *dma); +void qce_dma_release(struct qce_dma_data *dma); +int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *sg_in, + int in_ents, struct scatterlist *sg_out, int out_ents, + dma_async_tx_callback cb, void *cb_param); +void qce_dma_issue_pending(struct qce_dma_data *dma); +int qce_dma_terminate_all(struct qce_dma_data *dma); +int qce_countsg(struct scatterlist *sg_list, int nbytes, bool *chained); +void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir, bool chained); +int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir, bool chained); +struct scatterlist * +qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add); + +#endif /* _DMA_H_ */ diff --git a/drivers/crypto/qce/regs-v5.h b/drivers/crypto/qce/regs-v5.h new file mode 100644 index 000000000..f0e19e356 --- /dev/null +++ b/drivers/crypto/qce/regs-v5.h @@ -0,0 +1,334 @@ +/* + * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _REGS_V5_H_ +#define _REGS_V5_H_ + +#include <linux/bitops.h> + +#define REG_VERSION 0x000 +#define REG_STATUS 0x100 +#define REG_STATUS2 0x104 +#define REG_ENGINES_AVAIL 0x108 +#define REG_FIFO_SIZES 0x10c +#define REG_SEG_SIZE 0x110 +#define REG_GOPROC 0x120 +#define REG_ENCR_SEG_CFG 0x200 +#define REG_ENCR_SEG_SIZE 0x204 +#define REG_ENCR_SEG_START 0x208 +#define REG_CNTR0_IV0 0x20c +#define REG_CNTR1_IV1 0x210 +#define REG_CNTR2_IV2 0x214 +#define REG_CNTR3_IV3 0x218 +#define REG_CNTR_MASK 0x21C +#define REG_ENCR_CCM_INT_CNTR0 0x220 +#define REG_ENCR_CCM_INT_CNTR1 0x224 +#define REG_ENCR_CCM_INT_CNTR2 0x228 +#define REG_ENCR_CCM_INT_CNTR3 0x22c +#define REG_ENCR_XTS_DU_SIZE 0x230 +#define REG_CNTR_MASK2 0x234 +#define REG_CNTR_MASK1 0x238 +#define REG_CNTR_MASK0 0x23c +#define REG_AUTH_SEG_CFG 0x300 +#define REG_AUTH_SEG_SIZE 0x304 +#define REG_AUTH_SEG_START 0x308 +#define REG_AUTH_IV0 0x310 +#define REG_AUTH_IV1 0x314 +#define REG_AUTH_IV2 0x318 +#define REG_AUTH_IV3 0x31c +#define REG_AUTH_IV4 0x320 +#define REG_AUTH_IV5 0x324 +#define REG_AUTH_IV6 0x328 +#define REG_AUTH_IV7 0x32c +#define REG_AUTH_IV8 0x330 +#define REG_AUTH_IV9 0x334 +#define REG_AUTH_IV10 0x338 +#define REG_AUTH_IV11 0x33c +#define REG_AUTH_IV12 0x340 +#define REG_AUTH_IV13 0x344 +#define REG_AUTH_IV14 0x348 +#define REG_AUTH_IV15 0x34c +#define REG_AUTH_INFO_NONCE0 0x350 +#define REG_AUTH_INFO_NONCE1 0x354 +#define REG_AUTH_INFO_NONCE2 0x358 +#define REG_AUTH_INFO_NONCE3 0x35c +#define REG_AUTH_BYTECNT0 0x390 +#define REG_AUTH_BYTECNT1 0x394 +#define REG_AUTH_BYTECNT2 0x398 +#define REG_AUTH_BYTECNT3 0x39c +#define REG_AUTH_EXP_MAC0 0x3a0 +#define REG_AUTH_EXP_MAC1 0x3a4 +#define REG_AUTH_EXP_MAC2 0x3a8 +#define REG_AUTH_EXP_MAC3 0x3ac +#define REG_AUTH_EXP_MAC4 0x3b0 +#define REG_AUTH_EXP_MAC5 0x3b4 +#define REG_AUTH_EXP_MAC6 0x3b8 +#define REG_AUTH_EXP_MAC7 0x3bc +#define REG_CONFIG 0x400 +#define REG_GOPROC_QC_KEY 0x1000 +#define REG_GOPROC_OEM_KEY 0x2000 +#define REG_ENCR_KEY0 0x3000 +#define REG_ENCR_KEY1 0x3004 +#define REG_ENCR_KEY2 0x3008 +#define REG_ENCR_KEY3 0x300c +#define REG_ENCR_KEY4 0x3010 +#define REG_ENCR_KEY5 0x3014 +#define REG_ENCR_KEY6 0x3018 +#define REG_ENCR_KEY7 0x301c +#define REG_ENCR_XTS_KEY0 0x3020 +#define REG_ENCR_XTS_KEY1 0x3024 +#define REG_ENCR_XTS_KEY2 0x3028 +#define REG_ENCR_XTS_KEY3 0x302c +#define REG_ENCR_XTS_KEY4 0x3030 +#define REG_ENCR_XTS_KEY5 0x3034 +#define REG_ENCR_XTS_KEY6 0x3038 +#define REG_ENCR_XTS_KEY7 0x303c +#define REG_AUTH_KEY0 0x3040 +#define REG_AUTH_KEY1 0x3044 +#define REG_AUTH_KEY2 0x3048 +#define REG_AUTH_KEY3 0x304c +#define REG_AUTH_KEY4 0x3050 +#define REG_AUTH_KEY5 0x3054 +#define REG_AUTH_KEY6 0x3058 +#define REG_AUTH_KEY7 0x305c +#define REG_AUTH_KEY8 0x3060 +#define REG_AUTH_KEY9 0x3064 +#define REG_AUTH_KEY10 0x3068 +#define REG_AUTH_KEY11 0x306c +#define REG_AUTH_KEY12 0x3070 +#define REG_AUTH_KEY13 0x3074 +#define REG_AUTH_KEY14 0x3078 +#define REG_AUTH_KEY15 0x307c + +/* Register bits - REG_VERSION */ +#define CORE_STEP_REV_SHIFT 0 +#define CORE_STEP_REV_MASK GENMASK(15, 0) +#define CORE_MINOR_REV_SHIFT 16 +#define CORE_MINOR_REV_MASK GENMASK(23, 16) +#define CORE_MAJOR_REV_SHIFT 24 +#define CORE_MAJOR_REV_MASK GENMASK(31, 24) + +/* Register bits - REG_STATUS */ +#define MAC_FAILED_SHIFT 31 +#define DOUT_SIZE_AVAIL_SHIFT 26 +#define DOUT_SIZE_AVAIL_MASK GENMASK(30, 26) +#define DIN_SIZE_AVAIL_SHIFT 21 +#define DIN_SIZE_AVAIL_MASK GENMASK(25, 21) +#define HSD_ERR_SHIFT 20 +#define ACCESS_VIOL_SHIFT 19 +#define PIPE_ACTIVE_ERR_SHIFT 18 +#define CFG_CHNG_ERR_SHIFT 17 +#define DOUT_ERR_SHIFT 16 +#define DIN_ERR_SHIFT 15 +#define AXI_ERR_SHIFT 14 +#define CRYPTO_STATE_SHIFT 10 +#define CRYPTO_STATE_MASK GENMASK(13, 10) +#define ENCR_BUSY_SHIFT 9 +#define AUTH_BUSY_SHIFT 8 +#define DOUT_INTR_SHIFT 7 +#define DIN_INTR_SHIFT 6 +#define OP_DONE_INTR_SHIFT 5 +#define ERR_INTR_SHIFT 4 +#define DOUT_RDY_SHIFT 3 +#define DIN_RDY_SHIFT 2 +#define OPERATION_DONE_SHIFT 1 +#define SW_ERR_SHIFT 0 + +/* Register bits - REG_STATUS2 */ +#define AXI_EXTRA_SHIFT 1 +#define LOCKED_SHIFT 2 + +/* Register bits - REG_CONFIG */ +#define REQ_SIZE_SHIFT 17 +#define REQ_SIZE_MASK GENMASK(20, 17) +#define REQ_SIZE_ENUM_1_BEAT 0 +#define REQ_SIZE_ENUM_2_BEAT 1 +#define REQ_SIZE_ENUM_3_BEAT 2 +#define REQ_SIZE_ENUM_4_BEAT 3 +#define REQ_SIZE_ENUM_5_BEAT 4 +#define REQ_SIZE_ENUM_6_BEAT 5 +#define REQ_SIZE_ENUM_7_BEAT 6 +#define REQ_SIZE_ENUM_8_BEAT 7 +#define REQ_SIZE_ENUM_9_BEAT 8 +#define REQ_SIZE_ENUM_10_BEAT 9 +#define REQ_SIZE_ENUM_11_BEAT 10 +#define REQ_SIZE_ENUM_12_BEAT 11 +#define REQ_SIZE_ENUM_13_BEAT 12 +#define REQ_SIZE_ENUM_14_BEAT 13 +#define REQ_SIZE_ENUM_15_BEAT 14 +#define REQ_SIZE_ENUM_16_BEAT 15 + +#define MAX_QUEUED_REQ_SHIFT 14 +#define MAX_QUEUED_REQ_MASK GENMASK(24, 16) +#define ENUM_1_QUEUED_REQS 0 +#define ENUM_2_QUEUED_REQS 1 +#define ENUM_3_QUEUED_REQS 2 + +#define IRQ_ENABLES_SHIFT 10 +#define IRQ_ENABLES_MASK GENMASK(13, 10) + +#define LITTLE_ENDIAN_MODE_SHIFT 9 +#define PIPE_SET_SELECT_SHIFT 5 +#define PIPE_SET_SELECT_MASK GENMASK(8, 5) + +#define HIGH_SPD_EN_N_SHIFT 4 +#define MASK_DOUT_INTR_SHIFT 3 +#define MASK_DIN_INTR_SHIFT 2 +#define MASK_OP_DONE_INTR_SHIFT 1 +#define MASK_ERR_INTR_SHIFT 0 + +/* Register bits - REG_AUTH_SEG_CFG */ +#define COMP_EXP_MAC_SHIFT 24 +#define COMP_EXP_MAC_DISABLED 0 +#define COMP_EXP_MAC_ENABLED 1 + +#define F9_DIRECTION_SHIFT 23 +#define F9_DIRECTION_UPLINK 0 +#define F9_DIRECTION_DOWNLINK 1 + +#define AUTH_NONCE_NUM_WORDS_SHIFT 20 +#define AUTH_NONCE_NUM_WORDS_MASK GENMASK(22, 20) + +#define USE_PIPE_KEY_AUTH_SHIFT 19 +#define USE_HW_KEY_AUTH_SHIFT 18 +#define AUTH_FIRST_SHIFT 17 +#define AUTH_LAST_SHIFT 16 + +#define AUTH_POS_SHIFT 14 +#define AUTH_POS_MASK GENMASK(15, 14) +#define AUTH_POS_BEFORE 0 +#define AUTH_POS_AFTER 1 + +#define AUTH_SIZE_SHIFT 9 +#define AUTH_SIZE_MASK GENMASK(13, 9) +#define AUTH_SIZE_SHA1 0 +#define AUTH_SIZE_SHA256 1 +#define AUTH_SIZE_ENUM_1_BYTES 0 +#define AUTH_SIZE_ENUM_2_BYTES 1 +#define AUTH_SIZE_ENUM_3_BYTES 2 +#define AUTH_SIZE_ENUM_4_BYTES 3 +#define AUTH_SIZE_ENUM_5_BYTES 4 +#define AUTH_SIZE_ENUM_6_BYTES 5 +#define AUTH_SIZE_ENUM_7_BYTES 6 +#define AUTH_SIZE_ENUM_8_BYTES 7 +#define AUTH_SIZE_ENUM_9_BYTES 8 +#define AUTH_SIZE_ENUM_10_BYTES 9 +#define AUTH_SIZE_ENUM_11_BYTES 10 +#define AUTH_SIZE_ENUM_12_BYTES 11 +#define AUTH_SIZE_ENUM_13_BYTES 12 +#define AUTH_SIZE_ENUM_14_BYTES 13 +#define AUTH_SIZE_ENUM_15_BYTES 14 +#define AUTH_SIZE_ENUM_16_BYTES 15 + +#define AUTH_MODE_SHIFT 6 +#define AUTH_MODE_MASK GENMASK(8, 6) +#define AUTH_MODE_HASH 0 +#define AUTH_MODE_HMAC 1 +#define AUTH_MODE_CCM 0 +#define AUTH_MODE_CMAC 1 + +#define AUTH_KEY_SIZE_SHIFT 3 +#define AUTH_KEY_SIZE_MASK GENMASK(5, 3) +#define AUTH_KEY_SZ_AES128 0 +#define AUTH_KEY_SZ_AES256 2 + +#define AUTH_ALG_SHIFT 0 +#define AUTH_ALG_MASK GENMASK(2, 0) +#define AUTH_ALG_NONE 0 +#define AUTH_ALG_SHA 1 +#define AUTH_ALG_AES 2 +#define AUTH_ALG_KASUMI 3 +#define AUTH_ALG_SNOW3G 4 +#define AUTH_ALG_ZUC 5 + +/* Register bits - REG_ENCR_XTS_DU_SIZE */ +#define ENCR_XTS_DU_SIZE_SHIFT 0 +#define ENCR_XTS_DU_SIZE_MASK GENMASK(19, 0) + +/* Register bits - REG_ENCR_SEG_CFG */ +#define F8_KEYSTREAM_ENABLE_SHIFT 17 +#define F8_KEYSTREAM_DISABLED 0 +#define F8_KEYSTREAM_ENABLED 1 + +#define F8_DIRECTION_SHIFT 16 +#define F8_DIRECTION_UPLINK 0 +#define F8_DIRECTION_DOWNLINK 1 + +#define USE_PIPE_KEY_ENCR_SHIFT 15 +#define USE_PIPE_KEY_ENCR_ENABLED 1 +#define USE_KEY_REGISTERS 0 + +#define USE_HW_KEY_ENCR_SHIFT 14 +#define USE_KEY_REG 0 +#define USE_HW_KEY 1 + +#define LAST_CCM_SHIFT 13 +#define LAST_CCM_XFR 1 +#define INTERM_CCM_XFR 0 + +#define CNTR_ALG_SHIFT 11 +#define CNTR_ALG_MASK GENMASK(12, 11) +#define CNTR_ALG_NIST 0 + +#define ENCODE_SHIFT 10 + +#define ENCR_MODE_SHIFT 6 +#define ENCR_MODE_MASK GENMASK(9, 6) +#define ENCR_MODE_ECB 0 +#define ENCR_MODE_CBC 1 +#define ENCR_MODE_CTR 2 +#define ENCR_MODE_XTS 3 +#define ENCR_MODE_CCM 4 + +#define ENCR_KEY_SZ_SHIFT 3 +#define ENCR_KEY_SZ_MASK GENMASK(5, 3) +#define ENCR_KEY_SZ_DES 0 +#define ENCR_KEY_SZ_3DES 1 +#define ENCR_KEY_SZ_AES128 0 +#define ENCR_KEY_SZ_AES256 2 + +#define ENCR_ALG_SHIFT 0 +#define ENCR_ALG_MASK GENMASK(2, 0) +#define ENCR_ALG_NONE 0 +#define ENCR_ALG_DES 1 +#define ENCR_ALG_AES 2 +#define ENCR_ALG_KASUMI 4 +#define ENCR_ALG_SNOW_3G 5 +#define ENCR_ALG_ZUC 6 + +/* Register bits - REG_GOPROC */ +#define GO_SHIFT 0 +#define CLR_CNTXT_SHIFT 1 +#define RESULTS_DUMP_SHIFT 2 + +/* Register bits - REG_ENGINES_AVAIL */ +#define ENCR_AES_SEL_SHIFT 0 +#define DES_SEL_SHIFT 1 +#define ENCR_SNOW3G_SEL_SHIFT 2 +#define ENCR_KASUMI_SEL_SHIFT 3 +#define SHA_SEL_SHIFT 4 +#define SHA512_SEL_SHIFT 5 +#define AUTH_AES_SEL_SHIFT 6 +#define AUTH_SNOW3G_SEL_SHIFT 7 +#define AUTH_KASUMI_SEL_SHIFT 8 +#define BAM_PIPE_SETS_SHIFT 9 +#define BAM_PIPE_SETS_MASK GENMASK(12, 9) +#define AXI_WR_BEATS_SHIFT 13 +#define AXI_WR_BEATS_MASK GENMASK(18, 13) +#define AXI_RD_BEATS_SHIFT 19 +#define AXI_RD_BEATS_MASK GENMASK(24, 19) +#define ENCR_ZUC_SEL_SHIFT 26 +#define AUTH_ZUC_SEL_SHIFT 27 +#define ZUC_ENABLE_SHIFT 28 + +#endif /* _REGS_V5_H_ */ diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c new file mode 100644 index 000000000..5c5df1d17 --- /dev/null +++ b/drivers/crypto/qce/sha.c @@ -0,0 +1,588 @@ +/* + * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/device.h> +#include <linux/interrupt.h> +#include <crypto/internal/hash.h> + +#include "common.h" +#include "core.h" +#include "sha.h" + +/* crypto hw padding constant for first operation */ +#define SHA_PADDING 64 +#define SHA_PADDING_MASK (SHA_PADDING - 1) + +static LIST_HEAD(ahash_algs); + +static const u32 std_iv_sha1[SHA256_DIGEST_SIZE / sizeof(u32)] = { + SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 0, 0, 0 +}; + +static const u32 std_iv_sha256[SHA256_DIGEST_SIZE / sizeof(u32)] = { + SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, + SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7 +}; + +static void qce_ahash_done(void *data) +{ + struct crypto_async_request *async_req = data; + struct ahash_request *req = ahash_request_cast(async_req); + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct qce_sha_reqctx *rctx = ahash_request_ctx(req); + struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm); + struct qce_device *qce = tmpl->qce; + struct qce_result_dump *result = qce->dma.result_buf; + unsigned int digestsize = crypto_ahash_digestsize(ahash); + int error; + u32 status; + + error = qce_dma_terminate_all(&qce->dma); + if (error) + dev_dbg(qce->dev, "ahash dma termination error (%d)\n", error); + + qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE, + rctx->src_chained); + qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0); + + memcpy(rctx->digest, result->auth_iv, digestsize); + if (req->result) + memcpy(req->result, result->auth_iv, digestsize); + + rctx->byte_count[0] = cpu_to_be32(result->auth_byte_count[0]); + rctx->byte_count[1] = cpu_to_be32(result->auth_byte_count[1]); + + error = qce_check_status(qce, &status); + if (error < 0) + dev_dbg(qce->dev, "ahash operation error (%x)\n", status); + + req->src = rctx->src_orig; + req->nbytes = rctx->nbytes_orig; + rctx->last_blk = false; + rctx->first_blk = false; + + qce->async_req_done(tmpl->qce, error); +} + +static int qce_ahash_async_req_handle(struct crypto_async_request *async_req) +{ + struct ahash_request *req = ahash_request_cast(async_req); + struct qce_sha_reqctx *rctx = ahash_request_ctx(req); + struct qce_sha_ctx *ctx = crypto_tfm_ctx(async_req->tfm); + struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm); + struct qce_device *qce = tmpl->qce; + unsigned long flags = rctx->flags; + int ret; + + if (IS_SHA_HMAC(flags)) { + rctx->authkey = ctx->authkey; + rctx->authklen = QCE_SHA_HMAC_KEY_SIZE; + } else if (IS_CMAC(flags)) { + rctx->authkey = ctx->authkey; + rctx->authklen = AES_KEYSIZE_128; + } + + rctx->src_nents = qce_countsg(req->src, req->nbytes, + &rctx->src_chained); + ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE, + rctx->src_chained); + if (ret < 0) + return ret; + + sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); + + ret = qce_mapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0); + if (ret < 0) + goto error_unmap_src; + + ret = qce_dma_prep_sgs(&qce->dma, req->src, rctx->src_nents, + &rctx->result_sg, 1, qce_ahash_done, async_req); + if (ret) + goto error_unmap_dst; + + qce_dma_issue_pending(&qce->dma); + + ret = qce_start(async_req, tmpl->crypto_alg_type, 0, 0); + if (ret) + goto error_terminate; + + return 0; + +error_terminate: + qce_dma_terminate_all(&qce->dma); +error_unmap_dst: + qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0); +error_unmap_src: + qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE, + rctx->src_chained); + return ret; +} + +static int qce_ahash_init(struct ahash_request *req) +{ + struct qce_sha_reqctx *rctx = ahash_request_ctx(req); + struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); + const u32 *std_iv = tmpl->std_iv; + + memset(rctx, 0, sizeof(*rctx)); + rctx->first_blk = true; + rctx->last_blk = false; + rctx->flags = tmpl->alg_flags; + memcpy(rctx->digest, std_iv, sizeof(rctx->digest)); + + return 0; +} + +static int qce_ahash_export(struct ahash_request *req, void *out) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct qce_sha_reqctx *rctx = ahash_request_ctx(req); + unsigned long flags = rctx->flags; + unsigned int digestsize = crypto_ahash_digestsize(ahash); + unsigned int blocksize = + crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash)); + + if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) { + struct sha1_state *out_state = out; + + out_state->count = rctx->count; + qce_cpu_to_be32p_array((__be32 *)out_state->state, + rctx->digest, digestsize); + memcpy(out_state->buffer, rctx->buf, blocksize); + } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) { + struct sha256_state *out_state = out; + + out_state->count = rctx->count; + qce_cpu_to_be32p_array((__be32 *)out_state->state, + rctx->digest, digestsize); + memcpy(out_state->buf, rctx->buf, blocksize); + } else { + return -EINVAL; + } + + return 0; +} + +static int qce_import_common(struct ahash_request *req, u64 in_count, + const u32 *state, const u8 *buffer, bool hmac) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct qce_sha_reqctx *rctx = ahash_request_ctx(req); + unsigned int digestsize = crypto_ahash_digestsize(ahash); + unsigned int blocksize; + u64 count = in_count; + + blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash)); + rctx->count = in_count; + memcpy(rctx->buf, buffer, blocksize); + + if (in_count <= blocksize) { + rctx->first_blk = 1; + } else { + rctx->first_blk = 0; + /* + * For HMAC, there is a hardware padding done when first block + * is set. Therefore the byte_count must be incremened by 64 + * after the first block operation. + */ + if (hmac) + count += SHA_PADDING; + } + + rctx->byte_count[0] = (__force __be32)(count & ~SHA_PADDING_MASK); + rctx->byte_count[1] = (__force __be32)(count >> 32); + qce_cpu_to_be32p_array((__be32 *)rctx->digest, (const u8 *)state, + digestsize); + rctx->buflen = (unsigned int)(in_count & (blocksize - 1)); + + return 0; +} + +static int qce_ahash_import(struct ahash_request *req, const void *in) +{ + struct qce_sha_reqctx *rctx = ahash_request_ctx(req); + unsigned long flags = rctx->flags; + bool hmac = IS_SHA_HMAC(flags); + int ret = -EINVAL; + + if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) { + const struct sha1_state *state = in; + + ret = qce_import_common(req, state->count, state->state, + state->buffer, hmac); + } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) { + const struct sha256_state *state = in; + + ret = qce_import_common(req, state->count, state->state, + state->buf, hmac); + } + + return ret; +} + +static int qce_ahash_update(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct qce_sha_reqctx *rctx = ahash_request_ctx(req); + struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); + struct qce_device *qce = tmpl->qce; + struct scatterlist *sg_last, *sg; + unsigned int total, len; + unsigned int hash_later; + unsigned int nbytes; + unsigned int blocksize; + + blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); + rctx->count += req->nbytes; + + /* check for buffer from previous updates and append it */ + total = req->nbytes + rctx->buflen; + + if (total <= blocksize) { + scatterwalk_map_and_copy(rctx->buf + rctx->buflen, req->src, + 0, req->nbytes, 0); + rctx->buflen += req->nbytes; + return 0; + } + + /* save the original req structure fields */ + rctx->src_orig = req->src; + rctx->nbytes_orig = req->nbytes; + + /* + * if we have data from previous update copy them on buffer. The old + * data will be combined with current request bytes. + */ + if (rctx->buflen) + memcpy(rctx->tmpbuf, rctx->buf, rctx->buflen); + + /* calculate how many bytes will be hashed later */ + hash_later = total % blocksize; + if (hash_later) { + unsigned int src_offset = req->nbytes - hash_later; + scatterwalk_map_and_copy(rctx->buf, req->src, src_offset, + hash_later, 0); + } + + /* here nbytes is multiple of blocksize */ + nbytes = total - hash_later; + + len = rctx->buflen; + sg = sg_last = req->src; + + while (len < nbytes && sg) { + if (len + sg_dma_len(sg) > nbytes) + break; + len += sg_dma_len(sg); + sg_last = sg; + sg = sg_next(sg); + } + + if (!sg_last) + return -EINVAL; + + sg_mark_end(sg_last); + + if (rctx->buflen) { + sg_init_table(rctx->sg, 2); + sg_set_buf(rctx->sg, rctx->tmpbuf, rctx->buflen); + scatterwalk_sg_chain(rctx->sg, 2, req->src); + req->src = rctx->sg; + } + + req->nbytes = nbytes; + rctx->buflen = hash_later; + + return qce->async_req_enqueue(tmpl->qce, &req->base); +} + +static int qce_ahash_final(struct ahash_request *req) +{ + struct qce_sha_reqctx *rctx = ahash_request_ctx(req); + struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); + struct qce_device *qce = tmpl->qce; + + if (!rctx->buflen) + return 0; + + rctx->last_blk = true; + + rctx->src_orig = req->src; + rctx->nbytes_orig = req->nbytes; + + memcpy(rctx->tmpbuf, rctx->buf, rctx->buflen); + sg_init_one(rctx->sg, rctx->tmpbuf, rctx->buflen); + + req->src = rctx->sg; + req->nbytes = rctx->buflen; + + return qce->async_req_enqueue(tmpl->qce, &req->base); +} + +static int qce_ahash_digest(struct ahash_request *req) +{ + struct qce_sha_reqctx *rctx = ahash_request_ctx(req); + struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); + struct qce_device *qce = tmpl->qce; + int ret; + + ret = qce_ahash_init(req); + if (ret) + return ret; + + rctx->src_orig = req->src; + rctx->nbytes_orig = req->nbytes; + rctx->first_blk = true; + rctx->last_blk = true; + + return qce->async_req_enqueue(tmpl->qce, &req->base); +} + +struct qce_ahash_result { + struct completion completion; + int error; +}; + +static void qce_digest_complete(struct crypto_async_request *req, int error) +{ + struct qce_ahash_result *result = req->data; + + if (error == -EINPROGRESS) + return; + + result->error = error; + complete(&result->completion); +} + +static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) +{ + unsigned int digestsize = crypto_ahash_digestsize(tfm); + struct qce_sha_ctx *ctx = crypto_tfm_ctx(&tfm->base); + struct qce_ahash_result result; + struct ahash_request *req; + struct scatterlist sg; + unsigned int blocksize; + struct crypto_ahash *ahash_tfm; + u8 *buf; + int ret; + const char *alg_name; + + blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); + memset(ctx->authkey, 0, sizeof(ctx->authkey)); + + if (keylen <= blocksize) { + memcpy(ctx->authkey, key, keylen); + return 0; + } + + if (digestsize == SHA1_DIGEST_SIZE) + alg_name = "sha1-qce"; + else if (digestsize == SHA256_DIGEST_SIZE) + alg_name = "sha256-qce"; + else + return -EINVAL; + + ahash_tfm = crypto_alloc_ahash(alg_name, CRYPTO_ALG_TYPE_AHASH, + CRYPTO_ALG_TYPE_AHASH_MASK); + if (IS_ERR(ahash_tfm)) + return PTR_ERR(ahash_tfm); + + req = ahash_request_alloc(ahash_tfm, GFP_KERNEL); + if (!req) { + ret = -ENOMEM; + goto err_free_ahash; + } + + init_completion(&result.completion); + ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + qce_digest_complete, &result); + crypto_ahash_clear_flags(ahash_tfm, ~0); + + buf = kzalloc(keylen + QCE_MAX_ALIGN_SIZE, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto err_free_req; + } + + memcpy(buf, key, keylen); + sg_init_one(&sg, buf, keylen); + ahash_request_set_crypt(req, &sg, ctx->authkey, keylen); + + ret = crypto_ahash_digest(req); + if (ret == -EINPROGRESS || ret == -EBUSY) { + ret = wait_for_completion_interruptible(&result.completion); + if (!ret) + ret = result.error; + } + + if (ret) + crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + + kfree(buf); +err_free_req: + ahash_request_free(req); +err_free_ahash: + crypto_free_ahash(ahash_tfm); + return ret; +} + +static int qce_ahash_cra_init(struct crypto_tfm *tfm) +{ + struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); + struct qce_sha_ctx *ctx = crypto_tfm_ctx(tfm); + + crypto_ahash_set_reqsize(ahash, sizeof(struct qce_sha_reqctx)); + memset(ctx, 0, sizeof(*ctx)); + return 0; +} + +struct qce_ahash_def { + unsigned long flags; + const char *name; + const char *drv_name; + unsigned int digestsize; + unsigned int blocksize; + unsigned int statesize; + const u32 *std_iv; +}; + +static const struct qce_ahash_def ahash_def[] = { + { + .flags = QCE_HASH_SHA1, + .name = "sha1", + .drv_name = "sha1-qce", + .digestsize = SHA1_DIGEST_SIZE, + .blocksize = SHA1_BLOCK_SIZE, + .statesize = sizeof(struct sha1_state), + .std_iv = std_iv_sha1, + }, + { + .flags = QCE_HASH_SHA256, + .name = "sha256", + .drv_name = "sha256-qce", + .digestsize = SHA256_DIGEST_SIZE, + .blocksize = SHA256_BLOCK_SIZE, + .statesize = sizeof(struct sha256_state), + .std_iv = std_iv_sha256, + }, + { + .flags = QCE_HASH_SHA1_HMAC, + .name = "hmac(sha1)", + .drv_name = "hmac-sha1-qce", + .digestsize = SHA1_DIGEST_SIZE, + .blocksize = SHA1_BLOCK_SIZE, + .statesize = sizeof(struct sha1_state), + .std_iv = std_iv_sha1, + }, + { + .flags = QCE_HASH_SHA256_HMAC, + .name = "hmac(sha256)", + .drv_name = "hmac-sha256-qce", + .digestsize = SHA256_DIGEST_SIZE, + .blocksize = SHA256_BLOCK_SIZE, + .statesize = sizeof(struct sha256_state), + .std_iv = std_iv_sha256, + }, +}; + +static int qce_ahash_register_one(const struct qce_ahash_def *def, + struct qce_device *qce) +{ + struct qce_alg_template *tmpl; + struct ahash_alg *alg; + struct crypto_alg *base; + int ret; + + tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL); + if (!tmpl) + return -ENOMEM; + + tmpl->std_iv = def->std_iv; + + alg = &tmpl->alg.ahash; + alg->init = qce_ahash_init; + alg->update = qce_ahash_update; + alg->final = qce_ahash_final; + alg->digest = qce_ahash_digest; + alg->export = qce_ahash_export; + alg->import = qce_ahash_import; + if (IS_SHA_HMAC(def->flags)) + alg->setkey = qce_ahash_hmac_setkey; + alg->halg.digestsize = def->digestsize; + alg->halg.statesize = def->statesize; + + base = &alg->halg.base; + base->cra_blocksize = def->blocksize; + base->cra_priority = 300; + base->cra_flags = CRYPTO_ALG_ASYNC; + base->cra_ctxsize = sizeof(struct qce_sha_ctx); + base->cra_alignmask = 0; + base->cra_module = THIS_MODULE; + base->cra_init = qce_ahash_cra_init; + INIT_LIST_HEAD(&base->cra_list); + + snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); + snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + def->drv_name); + + INIT_LIST_HEAD(&tmpl->entry); + tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_AHASH; + tmpl->alg_flags = def->flags; + tmpl->qce = qce; + + ret = crypto_register_ahash(alg); + if (ret) { + kfree(tmpl); + dev_err(qce->dev, "%s registration failed\n", base->cra_name); + return ret; + } + + list_add_tail(&tmpl->entry, &ahash_algs); + dev_dbg(qce->dev, "%s is registered\n", base->cra_name); + return 0; +} + +static void qce_ahash_unregister(struct qce_device *qce) +{ + struct qce_alg_template *tmpl, *n; + + list_for_each_entry_safe(tmpl, n, &ahash_algs, entry) { + crypto_unregister_ahash(&tmpl->alg.ahash); + list_del(&tmpl->entry); + kfree(tmpl); + } +} + +static int qce_ahash_register(struct qce_device *qce) +{ + int ret, i; + + for (i = 0; i < ARRAY_SIZE(ahash_def); i++) { + ret = qce_ahash_register_one(&ahash_def[i], qce); + if (ret) + goto err; + } + + return 0; +err: + qce_ahash_unregister(qce); + return ret; +} + +const struct qce_algo_ops ahash_ops = { + .type = CRYPTO_ALG_TYPE_AHASH, + .register_algs = qce_ahash_register, + .unregister_algs = qce_ahash_unregister, + .async_req_handle = qce_ahash_async_req_handle, +}; diff --git a/drivers/crypto/qce/sha.h b/drivers/crypto/qce/sha.h new file mode 100644 index 000000000..286f0d539 --- /dev/null +++ b/drivers/crypto/qce/sha.h @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _SHA_H_ +#define _SHA_H_ + +#include <crypto/scatterwalk.h> +#include <crypto/sha.h> + +#include "common.h" +#include "core.h" + +#define QCE_SHA_MAX_BLOCKSIZE SHA256_BLOCK_SIZE +#define QCE_SHA_MAX_DIGESTSIZE SHA256_DIGEST_SIZE + +struct qce_sha_ctx { + u8 authkey[QCE_SHA_MAX_BLOCKSIZE]; +}; + +/** + * struct qce_sha_reqctx - holds private ahash objects per request + * @buf: used during update, import and export + * @tmpbuf: buffer for internal use + * @digest: calculated digest buffer + * @buflen: length of the buffer + * @flags: operation flags + * @src_orig: original request sg list + * @nbytes_orig: original request number of bytes + * @src_chained: is source scatterlist chained + * @src_nents: source number of entries + * @byte_count: byte count + * @count: save count in states during update, import and export + * @first_blk: is it the first block + * @last_blk: is it the last block + * @sg: used to chain sg lists + * @authkey: pointer to auth key in sha ctx + * @authklen: auth key length + * @result_sg: scatterlist used for result buffer + */ +struct qce_sha_reqctx { + u8 buf[QCE_SHA_MAX_BLOCKSIZE]; + u8 tmpbuf[QCE_SHA_MAX_BLOCKSIZE]; + u8 digest[QCE_SHA_MAX_DIGESTSIZE]; + unsigned int buflen; + unsigned long flags; + struct scatterlist *src_orig; + unsigned int nbytes_orig; + bool src_chained; + int src_nents; + __be32 byte_count[2]; + u64 count; + bool first_blk; + bool last_blk; + struct scatterlist sg[2]; + u8 *authkey; + unsigned int authklen; + struct scatterlist result_sg; +}; + +static inline struct qce_alg_template *to_ahash_tmpl(struct crypto_tfm *tfm) +{ + struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); + struct ahash_alg *alg = container_of(crypto_hash_alg_common(ahash), + struct ahash_alg, halg); + + return container_of(alg, struct qce_alg_template, alg.ahash); +} + +extern const struct qce_algo_ops ahash_ops; + +#endif /* _SHA_H_ */ diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c new file mode 100644 index 000000000..f214a8755 --- /dev/null +++ b/drivers/crypto/s5p-sss.c @@ -0,0 +1,757 @@ +/* + * Cryptographic API. + * + * Support for Samsung S5PV210 HW acceleration. + * + * Copyright (C) 2011 NetUP Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + */ + +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/clk.h> +#include <linux/platform_device.h> +#include <linux/scatterlist.h> +#include <linux/dma-mapping.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/crypto.h> +#include <linux/interrupt.h> + +#include <crypto/algapi.h> +#include <crypto/aes.h> +#include <crypto/ctr.h> + +#define _SBF(s, v) ((v) << (s)) +#define _BIT(b) _SBF(b, 1) + +/* Feed control registers */ +#define SSS_REG_FCINTSTAT 0x0000 +#define SSS_FCINTSTAT_BRDMAINT _BIT(3) +#define SSS_FCINTSTAT_BTDMAINT _BIT(2) +#define SSS_FCINTSTAT_HRDMAINT _BIT(1) +#define SSS_FCINTSTAT_PKDMAINT _BIT(0) + +#define SSS_REG_FCINTENSET 0x0004 +#define SSS_FCINTENSET_BRDMAINTENSET _BIT(3) +#define SSS_FCINTENSET_BTDMAINTENSET _BIT(2) +#define SSS_FCINTENSET_HRDMAINTENSET _BIT(1) +#define SSS_FCINTENSET_PKDMAINTENSET _BIT(0) + +#define SSS_REG_FCINTENCLR 0x0008 +#define SSS_FCINTENCLR_BRDMAINTENCLR _BIT(3) +#define SSS_FCINTENCLR_BTDMAINTENCLR _BIT(2) +#define SSS_FCINTENCLR_HRDMAINTENCLR _BIT(1) +#define SSS_FCINTENCLR_PKDMAINTENCLR _BIT(0) + +#define SSS_REG_FCINTPEND 0x000C +#define SSS_FCINTPEND_BRDMAINTP _BIT(3) +#define SSS_FCINTPEND_BTDMAINTP _BIT(2) +#define SSS_FCINTPEND_HRDMAINTP _BIT(1) +#define SSS_FCINTPEND_PKDMAINTP _BIT(0) + +#define SSS_REG_FCFIFOSTAT 0x0010 +#define SSS_FCFIFOSTAT_BRFIFOFUL _BIT(7) +#define SSS_FCFIFOSTAT_BRFIFOEMP _BIT(6) +#define SSS_FCFIFOSTAT_BTFIFOFUL _BIT(5) +#define SSS_FCFIFOSTAT_BTFIFOEMP _BIT(4) +#define SSS_FCFIFOSTAT_HRFIFOFUL _BIT(3) +#define SSS_FCFIFOSTAT_HRFIFOEMP _BIT(2) +#define SSS_FCFIFOSTAT_PKFIFOFUL _BIT(1) +#define SSS_FCFIFOSTAT_PKFIFOEMP _BIT(0) + +#define SSS_REG_FCFIFOCTRL 0x0014 +#define SSS_FCFIFOCTRL_DESSEL _BIT(2) +#define SSS_HASHIN_INDEPENDENT _SBF(0, 0x00) +#define SSS_HASHIN_CIPHER_INPUT _SBF(0, 0x01) +#define SSS_HASHIN_CIPHER_OUTPUT _SBF(0, 0x02) + +#define SSS_REG_FCBRDMAS 0x0020 +#define SSS_REG_FCBRDMAL 0x0024 +#define SSS_REG_FCBRDMAC 0x0028 +#define SSS_FCBRDMAC_BYTESWAP _BIT(1) +#define SSS_FCBRDMAC_FLUSH _BIT(0) + +#define SSS_REG_FCBTDMAS 0x0030 +#define SSS_REG_FCBTDMAL 0x0034 +#define SSS_REG_FCBTDMAC 0x0038 +#define SSS_FCBTDMAC_BYTESWAP _BIT(1) +#define SSS_FCBTDMAC_FLUSH _BIT(0) + +#define SSS_REG_FCHRDMAS 0x0040 +#define SSS_REG_FCHRDMAL 0x0044 +#define SSS_REG_FCHRDMAC 0x0048 +#define SSS_FCHRDMAC_BYTESWAP _BIT(1) +#define SSS_FCHRDMAC_FLUSH _BIT(0) + +#define SSS_REG_FCPKDMAS 0x0050 +#define SSS_REG_FCPKDMAL 0x0054 +#define SSS_REG_FCPKDMAC 0x0058 +#define SSS_FCPKDMAC_BYTESWAP _BIT(3) +#define SSS_FCPKDMAC_DESCEND _BIT(2) +#define SSS_FCPKDMAC_TRANSMIT _BIT(1) +#define SSS_FCPKDMAC_FLUSH _BIT(0) + +#define SSS_REG_FCPKDMAO 0x005C + +/* AES registers */ +#define SSS_REG_AES_CONTROL 0x00 +#define SSS_AES_BYTESWAP_DI _BIT(11) +#define SSS_AES_BYTESWAP_DO _BIT(10) +#define SSS_AES_BYTESWAP_IV _BIT(9) +#define SSS_AES_BYTESWAP_CNT _BIT(8) +#define SSS_AES_BYTESWAP_KEY _BIT(7) +#define SSS_AES_KEY_CHANGE_MODE _BIT(6) +#define SSS_AES_KEY_SIZE_128 _SBF(4, 0x00) +#define SSS_AES_KEY_SIZE_192 _SBF(4, 0x01) +#define SSS_AES_KEY_SIZE_256 _SBF(4, 0x02) +#define SSS_AES_FIFO_MODE _BIT(3) +#define SSS_AES_CHAIN_MODE_ECB _SBF(1, 0x00) +#define SSS_AES_CHAIN_MODE_CBC _SBF(1, 0x01) +#define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02) +#define SSS_AES_MODE_DECRYPT _BIT(0) + +#define SSS_REG_AES_STATUS 0x04 +#define SSS_AES_BUSY _BIT(2) +#define SSS_AES_INPUT_READY _BIT(1) +#define SSS_AES_OUTPUT_READY _BIT(0) + +#define SSS_REG_AES_IN_DATA(s) (0x10 + (s << 2)) +#define SSS_REG_AES_OUT_DATA(s) (0x20 + (s << 2)) +#define SSS_REG_AES_IV_DATA(s) (0x30 + (s << 2)) +#define SSS_REG_AES_CNT_DATA(s) (0x40 + (s << 2)) +#define SSS_REG_AES_KEY_DATA(s) (0x80 + (s << 2)) + +#define SSS_REG(dev, reg) ((dev)->ioaddr + (SSS_REG_##reg)) +#define SSS_READ(dev, reg) __raw_readl(SSS_REG(dev, reg)) +#define SSS_WRITE(dev, reg, val) __raw_writel((val), SSS_REG(dev, reg)) + +#define SSS_AES_REG(dev, reg) ((dev)->aes_ioaddr + SSS_REG_##reg) +#define SSS_AES_WRITE(dev, reg, val) __raw_writel((val), \ + SSS_AES_REG(dev, reg)) + +/* HW engine modes */ +#define FLAGS_AES_DECRYPT _BIT(0) +#define FLAGS_AES_MODE_MASK _SBF(1, 0x03) +#define FLAGS_AES_CBC _SBF(1, 0x01) +#define FLAGS_AES_CTR _SBF(1, 0x02) + +#define AES_KEY_LEN 16 +#define CRYPTO_QUEUE_LEN 1 + +/** + * struct samsung_aes_variant - platform specific SSS driver data + * @has_hash_irq: true if SSS module uses hash interrupt, false otherwise + * @aes_offset: AES register offset from SSS module's base. + * + * Specifies platform specific configuration of SSS module. + * Note: A structure for driver specific platform data is used for future + * expansion of its usage. + */ +struct samsung_aes_variant { + bool has_hash_irq; + unsigned int aes_offset; +}; + +struct s5p_aes_reqctx { + unsigned long mode; +}; + +struct s5p_aes_ctx { + struct s5p_aes_dev *dev; + + uint8_t aes_key[AES_MAX_KEY_SIZE]; + uint8_t nonce[CTR_RFC3686_NONCE_SIZE]; + int keylen; +}; + +struct s5p_aes_dev { + struct device *dev; + struct clk *clk; + void __iomem *ioaddr; + void __iomem *aes_ioaddr; + int irq_hash; + int irq_fc; + + struct ablkcipher_request *req; + struct s5p_aes_ctx *ctx; + struct scatterlist *sg_src; + struct scatterlist *sg_dst; + + struct tasklet_struct tasklet; + struct crypto_queue queue; + bool busy; + spinlock_t lock; + + struct samsung_aes_variant *variant; +}; + +static struct s5p_aes_dev *s5p_dev; + +static const struct samsung_aes_variant s5p_aes_data = { + .has_hash_irq = true, + .aes_offset = 0x4000, +}; + +static const struct samsung_aes_variant exynos_aes_data = { + .has_hash_irq = false, + .aes_offset = 0x200, +}; + +static const struct of_device_id s5p_sss_dt_match[] = { + { + .compatible = "samsung,s5pv210-secss", + .data = &s5p_aes_data, + }, + { + .compatible = "samsung,exynos4210-secss", + .data = &exynos_aes_data, + }, + { }, +}; +MODULE_DEVICE_TABLE(of, s5p_sss_dt_match); + +static inline struct samsung_aes_variant *find_s5p_sss_version + (struct platform_device *pdev) +{ + if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) { + const struct of_device_id *match; + match = of_match_node(s5p_sss_dt_match, + pdev->dev.of_node); + return (struct samsung_aes_variant *)match->data; + } + return (struct samsung_aes_variant *) + platform_get_device_id(pdev)->driver_data; +} + +static void s5p_set_dma_indata(struct s5p_aes_dev *dev, struct scatterlist *sg) +{ + SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg)); + SSS_WRITE(dev, FCBRDMAL, sg_dma_len(sg)); +} + +static void s5p_set_dma_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg) +{ + SSS_WRITE(dev, FCBTDMAS, sg_dma_address(sg)); + SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg)); +} + +static void s5p_aes_complete(struct s5p_aes_dev *dev, int err) +{ + /* holding a lock outside */ + dev->req->base.complete(&dev->req->base, err); + dev->busy = false; +} + +static void s5p_unset_outdata(struct s5p_aes_dev *dev) +{ + dma_unmap_sg(dev->dev, dev->sg_dst, 1, DMA_FROM_DEVICE); +} + +static void s5p_unset_indata(struct s5p_aes_dev *dev) +{ + dma_unmap_sg(dev->dev, dev->sg_src, 1, DMA_TO_DEVICE); +} + +static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg) +{ + int err; + + if (!IS_ALIGNED(sg_dma_len(sg), AES_BLOCK_SIZE)) { + err = -EINVAL; + goto exit; + } + if (!sg_dma_len(sg)) { + err = -EINVAL; + goto exit; + } + + err = dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE); + if (!err) { + err = -ENOMEM; + goto exit; + } + + dev->sg_dst = sg; + err = 0; + + exit: + return err; +} + +static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg) +{ + int err; + + if (!IS_ALIGNED(sg_dma_len(sg), AES_BLOCK_SIZE)) { + err = -EINVAL; + goto exit; + } + if (!sg_dma_len(sg)) { + err = -EINVAL; + goto exit; + } + + err = dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE); + if (!err) { + err = -ENOMEM; + goto exit; + } + + dev->sg_src = sg; + err = 0; + + exit: + return err; +} + +static void s5p_aes_tx(struct s5p_aes_dev *dev) +{ + int err = 0; + + s5p_unset_outdata(dev); + + if (!sg_is_last(dev->sg_dst)) { + err = s5p_set_outdata(dev, sg_next(dev->sg_dst)); + if (err) { + s5p_aes_complete(dev, err); + return; + } + + s5p_set_dma_outdata(dev, dev->sg_dst); + } else { + s5p_aes_complete(dev, err); + + dev->busy = true; + tasklet_schedule(&dev->tasklet); + } +} + +static void s5p_aes_rx(struct s5p_aes_dev *dev) +{ + int err; + + s5p_unset_indata(dev); + + if (!sg_is_last(dev->sg_src)) { + err = s5p_set_indata(dev, sg_next(dev->sg_src)); + if (err) { + s5p_aes_complete(dev, err); + return; + } + + s5p_set_dma_indata(dev, dev->sg_src); + } +} + +static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id) +{ + struct platform_device *pdev = dev_id; + struct s5p_aes_dev *dev = platform_get_drvdata(pdev); + uint32_t status; + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + + if (irq == dev->irq_fc) { + status = SSS_READ(dev, FCINTSTAT); + if (status & SSS_FCINTSTAT_BRDMAINT) + s5p_aes_rx(dev); + if (status & SSS_FCINTSTAT_BTDMAINT) + s5p_aes_tx(dev); + + SSS_WRITE(dev, FCINTPEND, status); + } + + spin_unlock_irqrestore(&dev->lock, flags); + + return IRQ_HANDLED; +} + +static void s5p_set_aes(struct s5p_aes_dev *dev, + uint8_t *key, uint8_t *iv, unsigned int keylen) +{ + void __iomem *keystart; + + if (iv) + memcpy(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10); + + if (keylen == AES_KEYSIZE_256) + keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0); + else if (keylen == AES_KEYSIZE_192) + keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(2); + else + keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(4); + + memcpy(keystart, key, keylen); +} + +static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode) +{ + struct ablkcipher_request *req = dev->req; + + uint32_t aes_control; + int err; + unsigned long flags; + + aes_control = SSS_AES_KEY_CHANGE_MODE; + if (mode & FLAGS_AES_DECRYPT) + aes_control |= SSS_AES_MODE_DECRYPT; + + if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) + aes_control |= SSS_AES_CHAIN_MODE_CBC; + else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) + aes_control |= SSS_AES_CHAIN_MODE_CTR; + + if (dev->ctx->keylen == AES_KEYSIZE_192) + aes_control |= SSS_AES_KEY_SIZE_192; + else if (dev->ctx->keylen == AES_KEYSIZE_256) + aes_control |= SSS_AES_KEY_SIZE_256; + + aes_control |= SSS_AES_FIFO_MODE; + + /* as a variant it is possible to use byte swapping on DMA side */ + aes_control |= SSS_AES_BYTESWAP_DI + | SSS_AES_BYTESWAP_DO + | SSS_AES_BYTESWAP_IV + | SSS_AES_BYTESWAP_KEY + | SSS_AES_BYTESWAP_CNT; + + spin_lock_irqsave(&dev->lock, flags); + + SSS_WRITE(dev, FCINTENCLR, + SSS_FCINTENCLR_BTDMAINTENCLR | SSS_FCINTENCLR_BRDMAINTENCLR); + SSS_WRITE(dev, FCFIFOCTRL, 0x00); + + err = s5p_set_indata(dev, req->src); + if (err) + goto indata_error; + + err = s5p_set_outdata(dev, req->dst); + if (err) + goto outdata_error; + + SSS_AES_WRITE(dev, AES_CONTROL, aes_control); + s5p_set_aes(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen); + + s5p_set_dma_indata(dev, req->src); + s5p_set_dma_outdata(dev, req->dst); + + SSS_WRITE(dev, FCINTENSET, + SSS_FCINTENSET_BTDMAINTENSET | SSS_FCINTENSET_BRDMAINTENSET); + + spin_unlock_irqrestore(&dev->lock, flags); + + return; + + outdata_error: + s5p_unset_indata(dev); + + indata_error: + s5p_aes_complete(dev, err); + spin_unlock_irqrestore(&dev->lock, flags); +} + +static void s5p_tasklet_cb(unsigned long data) +{ + struct s5p_aes_dev *dev = (struct s5p_aes_dev *)data; + struct crypto_async_request *async_req, *backlog; + struct s5p_aes_reqctx *reqctx; + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + backlog = crypto_get_backlog(&dev->queue); + async_req = crypto_dequeue_request(&dev->queue); + + if (!async_req) { + dev->busy = false; + spin_unlock_irqrestore(&dev->lock, flags); + return; + } + spin_unlock_irqrestore(&dev->lock, flags); + + if (backlog) + backlog->complete(backlog, -EINPROGRESS); + + dev->req = ablkcipher_request_cast(async_req); + dev->ctx = crypto_tfm_ctx(dev->req->base.tfm); + reqctx = ablkcipher_request_ctx(dev->req); + + s5p_aes_crypt_start(dev, reqctx->mode); +} + +static int s5p_aes_handle_req(struct s5p_aes_dev *dev, + struct ablkcipher_request *req) +{ + unsigned long flags; + int err; + + spin_lock_irqsave(&dev->lock, flags); + err = ablkcipher_enqueue_request(&dev->queue, req); + if (dev->busy) { + spin_unlock_irqrestore(&dev->lock, flags); + goto exit; + } + dev->busy = true; + + spin_unlock_irqrestore(&dev->lock, flags); + + tasklet_schedule(&dev->tasklet); + + exit: + return err; +} + +static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req); + struct s5p_aes_dev *dev = ctx->dev; + + if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { + pr_err("request size is not exact amount of AES blocks\n"); + return -EINVAL; + } + + reqctx->mode = mode; + + return s5p_aes_handle_req(dev, req); +} + +static int s5p_aes_setkey(struct crypto_ablkcipher *cipher, + const uint8_t *key, unsigned int keylen) +{ + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); + struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm); + + if (keylen != AES_KEYSIZE_128 && + keylen != AES_KEYSIZE_192 && + keylen != AES_KEYSIZE_256) + return -EINVAL; + + memcpy(ctx->aes_key, key, keylen); + ctx->keylen = keylen; + + return 0; +} + +static int s5p_aes_ecb_encrypt(struct ablkcipher_request *req) +{ + return s5p_aes_crypt(req, 0); +} + +static int s5p_aes_ecb_decrypt(struct ablkcipher_request *req) +{ + return s5p_aes_crypt(req, FLAGS_AES_DECRYPT); +} + +static int s5p_aes_cbc_encrypt(struct ablkcipher_request *req) +{ + return s5p_aes_crypt(req, FLAGS_AES_CBC); +} + +static int s5p_aes_cbc_decrypt(struct ablkcipher_request *req) +{ + return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC); +} + +static int s5p_aes_cra_init(struct crypto_tfm *tfm) +{ + struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm); + + ctx->dev = s5p_dev; + tfm->crt_ablkcipher.reqsize = sizeof(struct s5p_aes_reqctx); + + return 0; +} + +static struct crypto_alg algs[] = { + { + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-s5p", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct s5p_aes_ctx), + .cra_alignmask = 0x0f, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = s5p_aes_cra_init, + .cra_u.ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = s5p_aes_setkey, + .encrypt = s5p_aes_ecb_encrypt, + .decrypt = s5p_aes_ecb_decrypt, + } + }, + { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-s5p", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct s5p_aes_ctx), + .cra_alignmask = 0x0f, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = s5p_aes_cra_init, + .cra_u.ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = s5p_aes_setkey, + .encrypt = s5p_aes_cbc_encrypt, + .decrypt = s5p_aes_cbc_decrypt, + } + }, +}; + +static int s5p_aes_probe(struct platform_device *pdev) +{ + int i, j, err = -ENODEV; + struct s5p_aes_dev *pdata; + struct device *dev = &pdev->dev; + struct resource *res; + struct samsung_aes_variant *variant; + + if (s5p_dev) + return -EEXIST; + + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pdata->ioaddr)) + return PTR_ERR(pdata->ioaddr); + + variant = find_s5p_sss_version(pdev); + + pdata->clk = devm_clk_get(dev, "secss"); + if (IS_ERR(pdata->clk)) { + dev_err(dev, "failed to find secss clock source\n"); + return -ENOENT; + } + + err = clk_prepare_enable(pdata->clk); + if (err < 0) { + dev_err(dev, "Enabling SSS clk failed, err %d\n", err); + return err; + } + + spin_lock_init(&pdata->lock); + + pdata->aes_ioaddr = pdata->ioaddr + variant->aes_offset; + + pdata->irq_fc = platform_get_irq(pdev, 0); + if (pdata->irq_fc < 0) { + err = pdata->irq_fc; + dev_warn(dev, "feed control interrupt is not available.\n"); + goto err_irq; + } + err = devm_request_irq(dev, pdata->irq_fc, s5p_aes_interrupt, + IRQF_SHARED, pdev->name, pdev); + if (err < 0) { + dev_warn(dev, "feed control interrupt is not available.\n"); + goto err_irq; + } + + if (variant->has_hash_irq) { + pdata->irq_hash = platform_get_irq(pdev, 1); + if (pdata->irq_hash < 0) { + err = pdata->irq_hash; + dev_warn(dev, "hash interrupt is not available.\n"); + goto err_irq; + } + err = devm_request_irq(dev, pdata->irq_hash, s5p_aes_interrupt, + IRQF_SHARED, pdev->name, pdev); + if (err < 0) { + dev_warn(dev, "hash interrupt is not available.\n"); + goto err_irq; + } + } + + pdata->busy = false; + pdata->variant = variant; + pdata->dev = dev; + platform_set_drvdata(pdev, pdata); + s5p_dev = pdata; + + tasklet_init(&pdata->tasklet, s5p_tasklet_cb, (unsigned long)pdata); + crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN); + + for (i = 0; i < ARRAY_SIZE(algs); i++) { + err = crypto_register_alg(&algs[i]); + if (err) + goto err_algs; + } + + pr_info("s5p-sss driver registered\n"); + + return 0; + + err_algs: + dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name, err); + + for (j = 0; j < i; j++) + crypto_unregister_alg(&algs[j]); + + tasklet_kill(&pdata->tasklet); + + err_irq: + clk_disable_unprepare(pdata->clk); + + s5p_dev = NULL; + + return err; +} + +static int s5p_aes_remove(struct platform_device *pdev) +{ + struct s5p_aes_dev *pdata = platform_get_drvdata(pdev); + int i; + + if (!pdata) + return -ENODEV; + + for (i = 0; i < ARRAY_SIZE(algs); i++) + crypto_unregister_alg(&algs[i]); + + tasklet_kill(&pdata->tasklet); + + clk_disable_unprepare(pdata->clk); + + s5p_dev = NULL; + + return 0; +} + +static struct platform_driver s5p_aes_crypto = { + .probe = s5p_aes_probe, + .remove = s5p_aes_remove, + .driver = { + .name = "s5p-secss", + .of_match_table = s5p_sss_dt_match, + }, +}; + +module_platform_driver(s5p_aes_crypto); + +MODULE_DESCRIPTION("S5PV210 AES hw acceleration support."); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Vladimir Zapolskiy <vzapolskiy@gmail.com>"); diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c new file mode 100644 index 000000000..6be377f6b --- /dev/null +++ b/drivers/crypto/sahara.c @@ -0,0 +1,1682 @@ +/* + * Cryptographic API. + * + * Support for SAHARA cryptographic accelerator. + * + * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de> + * Copyright (c) 2013 Vista Silicon S.L. + * Author: Javier Martin <javier.martin@vista-silicon.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Based on omap-aes.c and tegra-aes.c + */ + +#include <crypto/algapi.h> +#include <crypto/aes.h> +#include <crypto/hash.h> +#include <crypto/internal/hash.h> +#include <crypto/scatterwalk.h> +#include <crypto/sha.h> + +#include <linux/clk.h> +#include <linux/crypto.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/kthread.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> + +#define SHA_BUFFER_LEN PAGE_SIZE +#define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE + +#define SAHARA_NAME "sahara" +#define SAHARA_VERSION_3 3 +#define SAHARA_VERSION_4 4 +#define SAHARA_TIMEOUT_MS 1000 +#define SAHARA_MAX_HW_DESC 2 +#define SAHARA_MAX_HW_LINK 20 + +#define FLAGS_MODE_MASK 0x000f +#define FLAGS_ENCRYPT BIT(0) +#define FLAGS_CBC BIT(1) +#define FLAGS_NEW_KEY BIT(3) + +#define SAHARA_HDR_BASE 0x00800000 +#define SAHARA_HDR_SKHA_ALG_AES 0 +#define SAHARA_HDR_SKHA_OP_ENC (1 << 2) +#define SAHARA_HDR_SKHA_MODE_ECB (0 << 3) +#define SAHARA_HDR_SKHA_MODE_CBC (1 << 3) +#define SAHARA_HDR_FORM_DATA (5 << 16) +#define SAHARA_HDR_FORM_KEY (8 << 16) +#define SAHARA_HDR_LLO (1 << 24) +#define SAHARA_HDR_CHA_SKHA (1 << 28) +#define SAHARA_HDR_CHA_MDHA (2 << 28) +#define SAHARA_HDR_PARITY_BIT (1 << 31) + +#define SAHARA_HDR_MDHA_SET_MODE_MD_KEY 0x20880000 +#define SAHARA_HDR_MDHA_SET_MODE_HASH 0x208D0000 +#define SAHARA_HDR_MDHA_HASH 0xA0850000 +#define SAHARA_HDR_MDHA_STORE_DIGEST 0x20820000 +#define SAHARA_HDR_MDHA_ALG_SHA1 0 +#define SAHARA_HDR_MDHA_ALG_MD5 1 +#define SAHARA_HDR_MDHA_ALG_SHA256 2 +#define SAHARA_HDR_MDHA_ALG_SHA224 3 +#define SAHARA_HDR_MDHA_PDATA (1 << 2) +#define SAHARA_HDR_MDHA_HMAC (1 << 3) +#define SAHARA_HDR_MDHA_INIT (1 << 5) +#define SAHARA_HDR_MDHA_IPAD (1 << 6) +#define SAHARA_HDR_MDHA_OPAD (1 << 7) +#define SAHARA_HDR_MDHA_SWAP (1 << 8) +#define SAHARA_HDR_MDHA_MAC_FULL (1 << 9) +#define SAHARA_HDR_MDHA_SSL (1 << 10) + +/* SAHARA can only process one request at a time */ +#define SAHARA_QUEUE_LENGTH 1 + +#define SAHARA_REG_VERSION 0x00 +#define SAHARA_REG_DAR 0x04 +#define SAHARA_REG_CONTROL 0x08 +#define SAHARA_CONTROL_SET_THROTTLE(x) (((x) & 0xff) << 24) +#define SAHARA_CONTROL_SET_MAXBURST(x) (((x) & 0xff) << 16) +#define SAHARA_CONTROL_RNG_AUTORSD (1 << 7) +#define SAHARA_CONTROL_ENABLE_INT (1 << 4) +#define SAHARA_REG_CMD 0x0C +#define SAHARA_CMD_RESET (1 << 0) +#define SAHARA_CMD_CLEAR_INT (1 << 8) +#define SAHARA_CMD_CLEAR_ERR (1 << 9) +#define SAHARA_CMD_SINGLE_STEP (1 << 10) +#define SAHARA_CMD_MODE_BATCH (1 << 16) +#define SAHARA_CMD_MODE_DEBUG (1 << 18) +#define SAHARA_REG_STATUS 0x10 +#define SAHARA_STATUS_GET_STATE(x) ((x) & 0x7) +#define SAHARA_STATE_IDLE 0 +#define SAHARA_STATE_BUSY 1 +#define SAHARA_STATE_ERR 2 +#define SAHARA_STATE_FAULT 3 +#define SAHARA_STATE_COMPLETE 4 +#define SAHARA_STATE_COMP_FLAG (1 << 2) +#define SAHARA_STATUS_DAR_FULL (1 << 3) +#define SAHARA_STATUS_ERROR (1 << 4) +#define SAHARA_STATUS_SECURE (1 << 5) +#define SAHARA_STATUS_FAIL (1 << 6) +#define SAHARA_STATUS_INIT (1 << 7) +#define SAHARA_STATUS_RNG_RESEED (1 << 8) +#define SAHARA_STATUS_ACTIVE_RNG (1 << 9) +#define SAHARA_STATUS_ACTIVE_MDHA (1 << 10) +#define SAHARA_STATUS_ACTIVE_SKHA (1 << 11) +#define SAHARA_STATUS_MODE_BATCH (1 << 16) +#define SAHARA_STATUS_MODE_DEDICATED (1 << 17) +#define SAHARA_STATUS_MODE_DEBUG (1 << 18) +#define SAHARA_STATUS_GET_ISTATE(x) (((x) >> 24) & 0xff) +#define SAHARA_REG_ERRSTATUS 0x14 +#define SAHARA_ERRSTATUS_GET_SOURCE(x) ((x) & 0xf) +#define SAHARA_ERRSOURCE_CHA 14 +#define SAHARA_ERRSOURCE_DMA 15 +#define SAHARA_ERRSTATUS_DMA_DIR (1 << 8) +#define SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3) +#define SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7) +#define SAHARA_ERRSTATUS_GET_CHASRC(x) (((x) >> 16) & 0xfff) +#define SAHARA_ERRSTATUS_GET_CHAERR(x) (((x) >> 28) & 0x3) +#define SAHARA_REG_FADDR 0x18 +#define SAHARA_REG_CDAR 0x1C +#define SAHARA_REG_IDAR 0x20 + +struct sahara_hw_desc { + u32 hdr; + u32 len1; + dma_addr_t p1; + u32 len2; + dma_addr_t p2; + dma_addr_t next; +}; + +struct sahara_hw_link { + u32 len; + dma_addr_t p; + dma_addr_t next; +}; + +struct sahara_ctx { + unsigned long flags; + + /* AES-specific context */ + int keylen; + u8 key[AES_KEYSIZE_128]; + struct crypto_ablkcipher *fallback; + + /* SHA-specific context */ + struct crypto_shash *shash_fallback; +}; + +struct sahara_aes_reqctx { + unsigned long mode; +}; + +/* + * struct sahara_sha_reqctx - private data per request + * @buf: holds data for requests smaller than block_size + * @rembuf: used to prepare one block_size-aligned request + * @context: hw-specific context for request. Digest is extracted from this + * @mode: specifies what type of hw-descriptor needs to be built + * @digest_size: length of digest for this request + * @context_size: length of hw-context for this request. + * Always digest_size + 4 + * @buf_cnt: number of bytes saved in buf + * @sg_in_idx: number of hw links + * @in_sg: scatterlist for input data + * @in_sg_chain: scatterlists for chained input data + * @in_sg_chained: specifies if chained scatterlists are used or not + * @total: total number of bytes for transfer + * @last: is this the last block + * @first: is this the first block + * @active: inside a transfer + */ +struct sahara_sha_reqctx { + u8 buf[SAHARA_MAX_SHA_BLOCK_SIZE]; + u8 rembuf[SAHARA_MAX_SHA_BLOCK_SIZE]; + u8 context[SHA256_DIGEST_SIZE + 4]; + struct mutex mutex; + unsigned int mode; + unsigned int digest_size; + unsigned int context_size; + unsigned int buf_cnt; + unsigned int sg_in_idx; + struct scatterlist *in_sg; + struct scatterlist in_sg_chain[2]; + bool in_sg_chained; + size_t total; + unsigned int last; + unsigned int first; + unsigned int active; +}; + +struct sahara_dev { + struct device *device; + unsigned int version; + void __iomem *regs_base; + struct clk *clk_ipg; + struct clk *clk_ahb; + struct mutex queue_mutex; + struct task_struct *kthread; + struct completion dma_completion; + + struct sahara_ctx *ctx; + spinlock_t lock; + struct crypto_queue queue; + unsigned long flags; + + struct sahara_hw_desc *hw_desc[SAHARA_MAX_HW_DESC]; + dma_addr_t hw_phys_desc[SAHARA_MAX_HW_DESC]; + + u8 *key_base; + dma_addr_t key_phys_base; + + u8 *iv_base; + dma_addr_t iv_phys_base; + + u8 *context_base; + dma_addr_t context_phys_base; + + struct sahara_hw_link *hw_link[SAHARA_MAX_HW_LINK]; + dma_addr_t hw_phys_link[SAHARA_MAX_HW_LINK]; + + size_t total; + struct scatterlist *in_sg; + unsigned int nb_in_sg; + struct scatterlist *out_sg; + unsigned int nb_out_sg; + + u32 error; +}; + +static struct sahara_dev *dev_ptr; + +static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg) +{ + writel(data, dev->regs_base + reg); +} + +static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg) +{ + return readl(dev->regs_base + reg); +} + +static u32 sahara_aes_key_hdr(struct sahara_dev *dev) +{ + u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES | + SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO | + SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT; + + if (dev->flags & FLAGS_CBC) { + hdr |= SAHARA_HDR_SKHA_MODE_CBC; + hdr ^= SAHARA_HDR_PARITY_BIT; + } + + if (dev->flags & FLAGS_ENCRYPT) { + hdr |= SAHARA_HDR_SKHA_OP_ENC; + hdr ^= SAHARA_HDR_PARITY_BIT; + } + + return hdr; +} + +static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev) +{ + return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA | + SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT; +} + +static int sahara_sg_length(struct scatterlist *sg, + unsigned int total) +{ + int sg_nb; + unsigned int len; + struct scatterlist *sg_list; + + sg_nb = 0; + sg_list = sg; + + while (total) { + len = min(sg_list->length, total); + + sg_nb++; + total -= len; + + sg_list = sg_next(sg_list); + if (!sg_list) + total = 0; + } + + return sg_nb; +} + +static char *sahara_err_src[16] = { + "No error", + "Header error", + "Descriptor length error", + "Descriptor length or pointer error", + "Link length error", + "Link pointer error", + "Input buffer error", + "Output buffer error", + "Output buffer starvation", + "Internal state fault", + "General descriptor problem", + "Reserved", + "Descriptor address error", + "Link address error", + "CHA error", + "DMA error" +}; + +static char *sahara_err_dmasize[4] = { + "Byte transfer", + "Half-word transfer", + "Word transfer", + "Reserved" +}; + +static char *sahara_err_dmasrc[8] = { + "No error", + "AHB bus error", + "Internal IP bus error", + "Parity error", + "DMA crosses 256 byte boundary", + "DMA is busy", + "Reserved", + "DMA HW error" +}; + +static char *sahara_cha_errsrc[12] = { + "Input buffer non-empty", + "Illegal address", + "Illegal mode", + "Illegal data size", + "Illegal key size", + "Write during processing", + "CTX read during processing", + "HW error", + "Input buffer disabled/underflow", + "Output buffer disabled/overflow", + "DES key parity error", + "Reserved" +}; + +static char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" }; + +static void sahara_decode_error(struct sahara_dev *dev, unsigned int error) +{ + u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error); + u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error)); + + dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error); + + dev_err(dev->device, " - %s.\n", sahara_err_src[source]); + + if (source == SAHARA_ERRSOURCE_DMA) { + if (error & SAHARA_ERRSTATUS_DMA_DIR) + dev_err(dev->device, " * DMA read.\n"); + else + dev_err(dev->device, " * DMA write.\n"); + + dev_err(dev->device, " * %s.\n", + sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]); + dev_err(dev->device, " * %s.\n", + sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]); + } else if (source == SAHARA_ERRSOURCE_CHA) { + dev_err(dev->device, " * %s.\n", + sahara_cha_errsrc[chasrc]); + dev_err(dev->device, " * %s.\n", + sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]); + } + dev_err(dev->device, "\n"); +} + +static char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" }; + +static void sahara_decode_status(struct sahara_dev *dev, unsigned int status) +{ + u8 state; + + if (!IS_ENABLED(DEBUG)) + return; + + state = SAHARA_STATUS_GET_STATE(status); + + dev_dbg(dev->device, "%s: Status Register = 0x%08x\n", + __func__, status); + + dev_dbg(dev->device, " - State = %d:\n", state); + if (state & SAHARA_STATE_COMP_FLAG) + dev_dbg(dev->device, " * Descriptor completed. IRQ pending.\n"); + + dev_dbg(dev->device, " * %s.\n", + sahara_state[state & ~SAHARA_STATE_COMP_FLAG]); + + if (status & SAHARA_STATUS_DAR_FULL) + dev_dbg(dev->device, " - DAR Full.\n"); + if (status & SAHARA_STATUS_ERROR) + dev_dbg(dev->device, " - Error.\n"); + if (status & SAHARA_STATUS_SECURE) + dev_dbg(dev->device, " - Secure.\n"); + if (status & SAHARA_STATUS_FAIL) + dev_dbg(dev->device, " - Fail.\n"); + if (status & SAHARA_STATUS_RNG_RESEED) + dev_dbg(dev->device, " - RNG Reseed Request.\n"); + if (status & SAHARA_STATUS_ACTIVE_RNG) + dev_dbg(dev->device, " - RNG Active.\n"); + if (status & SAHARA_STATUS_ACTIVE_MDHA) + dev_dbg(dev->device, " - MDHA Active.\n"); + if (status & SAHARA_STATUS_ACTIVE_SKHA) + dev_dbg(dev->device, " - SKHA Active.\n"); + + if (status & SAHARA_STATUS_MODE_BATCH) + dev_dbg(dev->device, " - Batch Mode.\n"); + else if (status & SAHARA_STATUS_MODE_DEDICATED) + dev_dbg(dev->device, " - Decidated Mode.\n"); + else if (status & SAHARA_STATUS_MODE_DEBUG) + dev_dbg(dev->device, " - Debug Mode.\n"); + + dev_dbg(dev->device, " - Internal state = 0x%02x\n", + SAHARA_STATUS_GET_ISTATE(status)); + + dev_dbg(dev->device, "Current DAR: 0x%08x\n", + sahara_read(dev, SAHARA_REG_CDAR)); + dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n", + sahara_read(dev, SAHARA_REG_IDAR)); +} + +static void sahara_dump_descriptors(struct sahara_dev *dev) +{ + int i; + + if (!IS_ENABLED(DEBUG)) + return; + + for (i = 0; i < SAHARA_MAX_HW_DESC; i++) { + dev_dbg(dev->device, "Descriptor (%d) (0x%08x):\n", + i, dev->hw_phys_desc[i]); + dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr); + dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1); + dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1); + dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2); + dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2); + dev_dbg(dev->device, "\tnext = 0x%08x\n", + dev->hw_desc[i]->next); + } + dev_dbg(dev->device, "\n"); +} + +static void sahara_dump_links(struct sahara_dev *dev) +{ + int i; + + if (!IS_ENABLED(DEBUG)) + return; + + for (i = 0; i < SAHARA_MAX_HW_LINK; i++) { + dev_dbg(dev->device, "Link (%d) (0x%08x):\n", + i, dev->hw_phys_link[i]); + dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len); + dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p); + dev_dbg(dev->device, "\tnext = 0x%08x\n", + dev->hw_link[i]->next); + } + dev_dbg(dev->device, "\n"); +} + +static int sahara_hw_descriptor_create(struct sahara_dev *dev) +{ + struct sahara_ctx *ctx = dev->ctx; + struct scatterlist *sg; + int ret; + int i, j; + int idx = 0; + + /* Copy new key if necessary */ + if (ctx->flags & FLAGS_NEW_KEY) { + memcpy(dev->key_base, ctx->key, ctx->keylen); + ctx->flags &= ~FLAGS_NEW_KEY; + + if (dev->flags & FLAGS_CBC) { + dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE; + dev->hw_desc[idx]->p1 = dev->iv_phys_base; + } else { + dev->hw_desc[idx]->len1 = 0; + dev->hw_desc[idx]->p1 = 0; + } + dev->hw_desc[idx]->len2 = ctx->keylen; + dev->hw_desc[idx]->p2 = dev->key_phys_base; + dev->hw_desc[idx]->next = dev->hw_phys_desc[1]; + + dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev); + + idx++; + } + + dev->nb_in_sg = sahara_sg_length(dev->in_sg, dev->total); + dev->nb_out_sg = sahara_sg_length(dev->out_sg, dev->total); + if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) { + dev_err(dev->device, "not enough hw links (%d)\n", + dev->nb_in_sg + dev->nb_out_sg); + return -EINVAL; + } + + ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, + DMA_TO_DEVICE); + if (ret != dev->nb_in_sg) { + dev_err(dev->device, "couldn't map in sg\n"); + goto unmap_in; + } + ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg, + DMA_FROM_DEVICE); + if (ret != dev->nb_out_sg) { + dev_err(dev->device, "couldn't map out sg\n"); + goto unmap_out; + } + + /* Create input links */ + dev->hw_desc[idx]->p1 = dev->hw_phys_link[0]; + sg = dev->in_sg; + for (i = 0; i < dev->nb_in_sg; i++) { + dev->hw_link[i]->len = sg->length; + dev->hw_link[i]->p = sg->dma_address; + if (i == (dev->nb_in_sg - 1)) { + dev->hw_link[i]->next = 0; + } else { + dev->hw_link[i]->next = dev->hw_phys_link[i + 1]; + sg = sg_next(sg); + } + } + + /* Create output links */ + dev->hw_desc[idx]->p2 = dev->hw_phys_link[i]; + sg = dev->out_sg; + for (j = i; j < dev->nb_out_sg + i; j++) { + dev->hw_link[j]->len = sg->length; + dev->hw_link[j]->p = sg->dma_address; + if (j == (dev->nb_out_sg + i - 1)) { + dev->hw_link[j]->next = 0; + } else { + dev->hw_link[j]->next = dev->hw_phys_link[j + 1]; + sg = sg_next(sg); + } + } + + /* Fill remaining fields of hw_desc[1] */ + dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev); + dev->hw_desc[idx]->len1 = dev->total; + dev->hw_desc[idx]->len2 = dev->total; + dev->hw_desc[idx]->next = 0; + + sahara_dump_descriptors(dev); + sahara_dump_links(dev); + + sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR); + + return 0; + +unmap_out: + dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg, + DMA_TO_DEVICE); +unmap_in: + dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg, + DMA_FROM_DEVICE); + + return -EINVAL; +} + +static int sahara_aes_process(struct ablkcipher_request *req) +{ + struct sahara_dev *dev = dev_ptr; + struct sahara_ctx *ctx; + struct sahara_aes_reqctx *rctx; + int ret; + unsigned long timeout; + + /* Request is ready to be dispatched by the device */ + dev_dbg(dev->device, + "dispatch request (nbytes=%d, src=%p, dst=%p)\n", + req->nbytes, req->src, req->dst); + + /* assign new request to device */ + dev->total = req->nbytes; + dev->in_sg = req->src; + dev->out_sg = req->dst; + + rctx = ablkcipher_request_ctx(req); + ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); + rctx->mode &= FLAGS_MODE_MASK; + dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode; + + if ((dev->flags & FLAGS_CBC) && req->info) + memcpy(dev->iv_base, req->info, AES_KEYSIZE_128); + + /* assign new context to device */ + dev->ctx = ctx; + + reinit_completion(&dev->dma_completion); + + ret = sahara_hw_descriptor_create(dev); + if (ret) + return -EINVAL; + + timeout = wait_for_completion_timeout(&dev->dma_completion, + msecs_to_jiffies(SAHARA_TIMEOUT_MS)); + if (!timeout) { + dev_err(dev->device, "AES timeout\n"); + return -ETIMEDOUT; + } + + dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg, + DMA_TO_DEVICE); + dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg, + DMA_FROM_DEVICE); + + return 0; +} + +static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct sahara_ctx *ctx = crypto_ablkcipher_ctx(tfm); + int ret; + + ctx->keylen = keylen; + + /* SAHARA only supports 128bit keys */ + if (keylen == AES_KEYSIZE_128) { + memcpy(ctx->key, key, keylen); + ctx->flags |= FLAGS_NEW_KEY; + return 0; + } + + if (keylen != AES_KEYSIZE_128 && + keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256) + return -EINVAL; + + /* + * The requested key size is not supported by HW, do a fallback. + */ + ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; + ctx->fallback->base.crt_flags |= + (tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); + + ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen); + if (ret) { + struct crypto_tfm *tfm_aux = crypto_ablkcipher_tfm(tfm); + + tfm_aux->crt_flags &= ~CRYPTO_TFM_RES_MASK; + tfm_aux->crt_flags |= + (ctx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK); + } + return ret; +} + +static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode) +{ + struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req); + struct sahara_dev *dev = dev_ptr; + int err = 0; + + dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n", + req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC)); + + if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { + dev_err(dev->device, + "request size is not exact amount of AES blocks\n"); + return -EINVAL; + } + + rctx->mode = mode; + + mutex_lock(&dev->queue_mutex); + err = ablkcipher_enqueue_request(&dev->queue, req); + mutex_unlock(&dev->queue_mutex); + + wake_up_process(dev->kthread); + + return err; +} + +static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req) +{ + struct crypto_tfm *tfm = + crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); + struct sahara_ctx *ctx = crypto_ablkcipher_ctx( + crypto_ablkcipher_reqtfm(req)); + int err; + + if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { + ablkcipher_request_set_tfm(req, ctx->fallback); + err = crypto_ablkcipher_encrypt(req); + ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); + return err; + } + + return sahara_aes_crypt(req, FLAGS_ENCRYPT); +} + +static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req) +{ + struct crypto_tfm *tfm = + crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); + struct sahara_ctx *ctx = crypto_ablkcipher_ctx( + crypto_ablkcipher_reqtfm(req)); + int err; + + if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { + ablkcipher_request_set_tfm(req, ctx->fallback); + err = crypto_ablkcipher_decrypt(req); + ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); + return err; + } + + return sahara_aes_crypt(req, 0); +} + +static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req) +{ + struct crypto_tfm *tfm = + crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); + struct sahara_ctx *ctx = crypto_ablkcipher_ctx( + crypto_ablkcipher_reqtfm(req)); + int err; + + if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { + ablkcipher_request_set_tfm(req, ctx->fallback); + err = crypto_ablkcipher_encrypt(req); + ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); + return err; + } + + return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC); +} + +static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req) +{ + struct crypto_tfm *tfm = + crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); + struct sahara_ctx *ctx = crypto_ablkcipher_ctx( + crypto_ablkcipher_reqtfm(req)); + int err; + + if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { + ablkcipher_request_set_tfm(req, ctx->fallback); + err = crypto_ablkcipher_decrypt(req); + ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); + return err; + } + + return sahara_aes_crypt(req, FLAGS_CBC); +} + +static int sahara_aes_cra_init(struct crypto_tfm *tfm) +{ + const char *name = crypto_tfm_alg_name(tfm); + struct sahara_ctx *ctx = crypto_tfm_ctx(tfm); + + ctx->fallback = crypto_alloc_ablkcipher(name, 0, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->fallback)) { + pr_err("Error allocating fallback algo %s\n", name); + return PTR_ERR(ctx->fallback); + } + + tfm->crt_ablkcipher.reqsize = sizeof(struct sahara_aes_reqctx); + + return 0; +} + +static void sahara_aes_cra_exit(struct crypto_tfm *tfm) +{ + struct sahara_ctx *ctx = crypto_tfm_ctx(tfm); + + if (ctx->fallback) + crypto_free_ablkcipher(ctx->fallback); + ctx->fallback = NULL; +} + +static u32 sahara_sha_init_hdr(struct sahara_dev *dev, + struct sahara_sha_reqctx *rctx) +{ + u32 hdr = 0; + + hdr = rctx->mode; + + if (rctx->first) { + hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH; + hdr |= SAHARA_HDR_MDHA_INIT; + } else { + hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY; + } + + if (rctx->last) + hdr |= SAHARA_HDR_MDHA_PDATA; + + if (hweight_long(hdr) % 2 == 0) + hdr |= SAHARA_HDR_PARITY_BIT; + + return hdr; +} + +static int sahara_sha_hw_links_create(struct sahara_dev *dev, + struct sahara_sha_reqctx *rctx, + int start) +{ + struct scatterlist *sg; + unsigned int i; + int ret; + + dev->in_sg = rctx->in_sg; + + dev->nb_in_sg = sahara_sg_length(dev->in_sg, rctx->total); + if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) { + dev_err(dev->device, "not enough hw links (%d)\n", + dev->nb_in_sg + dev->nb_out_sg); + return -EINVAL; + } + + if (rctx->in_sg_chained) { + i = start; + sg = dev->in_sg; + while (sg) { + ret = dma_map_sg(dev->device, sg, 1, + DMA_TO_DEVICE); + if (!ret) + return -EFAULT; + + dev->hw_link[i]->len = sg->length; + dev->hw_link[i]->p = sg->dma_address; + dev->hw_link[i]->next = dev->hw_phys_link[i + 1]; + sg = sg_next(sg); + i += 1; + } + dev->hw_link[i-1]->next = 0; + } else { + sg = dev->in_sg; + ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, + DMA_TO_DEVICE); + if (!ret) + return -EFAULT; + + for (i = start; i < dev->nb_in_sg + start; i++) { + dev->hw_link[i]->len = sg->length; + dev->hw_link[i]->p = sg->dma_address; + if (i == (dev->nb_in_sg + start - 1)) { + dev->hw_link[i]->next = 0; + } else { + dev->hw_link[i]->next = dev->hw_phys_link[i + 1]; + sg = sg_next(sg); + } + } + } + + return i; +} + +static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev, + struct sahara_sha_reqctx *rctx, + struct ahash_request *req, + int index) +{ + unsigned result_len; + int i = index; + + if (rctx->first) + /* Create initial descriptor: #8*/ + dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx); + else + /* Create hash descriptor: #10. Must follow #6. */ + dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH; + + dev->hw_desc[index]->len1 = rctx->total; + if (dev->hw_desc[index]->len1 == 0) { + /* if len1 is 0, p1 must be 0, too */ + dev->hw_desc[index]->p1 = 0; + rctx->sg_in_idx = 0; + } else { + /* Create input links */ + dev->hw_desc[index]->p1 = dev->hw_phys_link[index]; + i = sahara_sha_hw_links_create(dev, rctx, index); + + rctx->sg_in_idx = index; + if (i < 0) + return i; + } + + dev->hw_desc[index]->p2 = dev->hw_phys_link[i]; + + /* Save the context for the next operation */ + result_len = rctx->context_size; + dev->hw_link[i]->p = dev->context_phys_base; + + dev->hw_link[i]->len = result_len; + dev->hw_desc[index]->len2 = result_len; + + dev->hw_link[i]->next = 0; + + return 0; +} + +/* + * Load descriptor aka #6 + * + * To load a previously saved context back to the MDHA unit + * + * p1: Saved Context + * p2: NULL + * + */ +static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev, + struct sahara_sha_reqctx *rctx, + struct ahash_request *req, + int index) +{ + dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx); + + dev->hw_desc[index]->len1 = rctx->context_size; + dev->hw_desc[index]->p1 = dev->hw_phys_link[index]; + dev->hw_desc[index]->len2 = 0; + dev->hw_desc[index]->p2 = 0; + + dev->hw_link[index]->len = rctx->context_size; + dev->hw_link[index]->p = dev->context_phys_base; + dev->hw_link[index]->next = 0; + + return 0; +} + +static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes) +{ + if (!sg || !sg->length) + return nbytes; + + while (nbytes && sg) { + if (nbytes <= sg->length) { + sg->length = nbytes; + sg_mark_end(sg); + break; + } + nbytes -= sg->length; + sg = sg_next(sg); + } + + return nbytes; +} + +static int sahara_sha_prepare_request(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct sahara_sha_reqctx *rctx = ahash_request_ctx(req); + unsigned int hash_later; + unsigned int block_size; + unsigned int len; + + block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); + + /* append bytes from previous operation */ + len = rctx->buf_cnt + req->nbytes; + + /* only the last transfer can be padded in hardware */ + if (!rctx->last && (len < block_size)) { + /* to few data, save for next operation */ + scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src, + 0, req->nbytes, 0); + rctx->buf_cnt += req->nbytes; + + return 0; + } + + /* add data from previous operation first */ + if (rctx->buf_cnt) + memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt); + + /* data must always be a multiple of block_size */ + hash_later = rctx->last ? 0 : len & (block_size - 1); + if (hash_later) { + unsigned int offset = req->nbytes - hash_later; + /* Save remaining bytes for later use */ + scatterwalk_map_and_copy(rctx->buf, req->src, offset, + hash_later, 0); + } + + /* nbytes should now be multiple of blocksize */ + req->nbytes = req->nbytes - hash_later; + + sahara_walk_and_recalc(req->src, req->nbytes); + + /* have data from previous operation and current */ + if (rctx->buf_cnt && req->nbytes) { + sg_init_table(rctx->in_sg_chain, 2); + sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt); + + scatterwalk_sg_chain(rctx->in_sg_chain, 2, req->src); + + rctx->total = req->nbytes + rctx->buf_cnt; + rctx->in_sg = rctx->in_sg_chain; + + rctx->in_sg_chained = true; + req->src = rctx->in_sg_chain; + /* only data from previous operation */ + } else if (rctx->buf_cnt) { + if (req->src) + rctx->in_sg = req->src; + else + rctx->in_sg = rctx->in_sg_chain; + /* buf was copied into rembuf above */ + sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt); + rctx->total = rctx->buf_cnt; + rctx->in_sg_chained = false; + /* no data from previous operation */ + } else { + rctx->in_sg = req->src; + rctx->total = req->nbytes; + req->src = rctx->in_sg; + rctx->in_sg_chained = false; + } + + /* on next call, we only have the remaining data in the buffer */ + rctx->buf_cnt = hash_later; + + return -EINPROGRESS; +} + +static void sahara_sha_unmap_sg(struct sahara_dev *dev, + struct sahara_sha_reqctx *rctx) +{ + struct scatterlist *sg; + + if (rctx->in_sg_chained) { + sg = dev->in_sg; + while (sg) { + dma_unmap_sg(dev->device, sg, 1, DMA_TO_DEVICE); + sg = sg_next(sg); + } + } else { + dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg, + DMA_TO_DEVICE); + } +} + +static int sahara_sha_process(struct ahash_request *req) +{ + struct sahara_dev *dev = dev_ptr; + struct sahara_sha_reqctx *rctx = ahash_request_ctx(req); + int ret; + unsigned long timeout; + + ret = sahara_sha_prepare_request(req); + if (!ret) + return ret; + + if (rctx->first) { + sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0); + dev->hw_desc[0]->next = 0; + rctx->first = 0; + } else { + memcpy(dev->context_base, rctx->context, rctx->context_size); + + sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0); + dev->hw_desc[0]->next = dev->hw_phys_desc[1]; + sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1); + dev->hw_desc[1]->next = 0; + } + + sahara_dump_descriptors(dev); + sahara_dump_links(dev); + + reinit_completion(&dev->dma_completion); + + sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR); + + timeout = wait_for_completion_timeout(&dev->dma_completion, + msecs_to_jiffies(SAHARA_TIMEOUT_MS)); + if (!timeout) { + dev_err(dev->device, "SHA timeout\n"); + return -ETIMEDOUT; + } + + if (rctx->sg_in_idx) + sahara_sha_unmap_sg(dev, rctx); + + memcpy(rctx->context, dev->context_base, rctx->context_size); + + if (req->result) + memcpy(req->result, rctx->context, rctx->digest_size); + + return 0; +} + +static int sahara_queue_manage(void *data) +{ + struct sahara_dev *dev = (struct sahara_dev *)data; + struct crypto_async_request *async_req; + struct crypto_async_request *backlog; + int ret = 0; + + do { + __set_current_state(TASK_INTERRUPTIBLE); + + mutex_lock(&dev->queue_mutex); + backlog = crypto_get_backlog(&dev->queue); + async_req = crypto_dequeue_request(&dev->queue); + mutex_unlock(&dev->queue_mutex); + + if (backlog) + backlog->complete(backlog, -EINPROGRESS); + + if (async_req) { + if (crypto_tfm_alg_type(async_req->tfm) == + CRYPTO_ALG_TYPE_AHASH) { + struct ahash_request *req = + ahash_request_cast(async_req); + + ret = sahara_sha_process(req); + } else { + struct ablkcipher_request *req = + ablkcipher_request_cast(async_req); + + ret = sahara_aes_process(req); + } + + async_req->complete(async_req, ret); + + continue; + } + + schedule(); + } while (!kthread_should_stop()); + + return 0; +} + +static int sahara_sha_enqueue(struct ahash_request *req, int last) +{ + struct sahara_sha_reqctx *rctx = ahash_request_ctx(req); + struct sahara_dev *dev = dev_ptr; + int ret; + + if (!req->nbytes && !last) + return 0; + + mutex_lock(&rctx->mutex); + rctx->last = last; + + if (!rctx->active) { + rctx->active = 1; + rctx->first = 1; + } + + mutex_lock(&dev->queue_mutex); + ret = crypto_enqueue_request(&dev->queue, &req->base); + mutex_unlock(&dev->queue_mutex); + + wake_up_process(dev->kthread); + mutex_unlock(&rctx->mutex); + + return ret; +} + +static int sahara_sha_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct sahara_sha_reqctx *rctx = ahash_request_ctx(req); + + memset(rctx, 0, sizeof(*rctx)); + + switch (crypto_ahash_digestsize(tfm)) { + case SHA1_DIGEST_SIZE: + rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1; + rctx->digest_size = SHA1_DIGEST_SIZE; + break; + case SHA256_DIGEST_SIZE: + rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256; + rctx->digest_size = SHA256_DIGEST_SIZE; + break; + default: + return -EINVAL; + } + + rctx->context_size = rctx->digest_size + 4; + rctx->active = 0; + + mutex_init(&rctx->mutex); + + return 0; +} + +static int sahara_sha_update(struct ahash_request *req) +{ + return sahara_sha_enqueue(req, 0); +} + +static int sahara_sha_final(struct ahash_request *req) +{ + req->nbytes = 0; + return sahara_sha_enqueue(req, 1); +} + +static int sahara_sha_finup(struct ahash_request *req) +{ + return sahara_sha_enqueue(req, 1); +} + +static int sahara_sha_digest(struct ahash_request *req) +{ + sahara_sha_init(req); + + return sahara_sha_finup(req); +} + +static int sahara_sha_export(struct ahash_request *req, void *out) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct sahara_ctx *ctx = crypto_ahash_ctx(ahash); + struct sahara_sha_reqctx *rctx = ahash_request_ctx(req); + + memcpy(out, ctx, sizeof(struct sahara_ctx)); + memcpy(out + sizeof(struct sahara_sha_reqctx), rctx, + sizeof(struct sahara_sha_reqctx)); + + return 0; +} + +static int sahara_sha_import(struct ahash_request *req, const void *in) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct sahara_ctx *ctx = crypto_ahash_ctx(ahash); + struct sahara_sha_reqctx *rctx = ahash_request_ctx(req); + + memcpy(ctx, in, sizeof(struct sahara_ctx)); + memcpy(rctx, in + sizeof(struct sahara_sha_reqctx), + sizeof(struct sahara_sha_reqctx)); + + return 0; +} + +static int sahara_sha_cra_init(struct crypto_tfm *tfm) +{ + const char *name = crypto_tfm_alg_name(tfm); + struct sahara_ctx *ctx = crypto_tfm_ctx(tfm); + + ctx->shash_fallback = crypto_alloc_shash(name, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->shash_fallback)) { + pr_err("Error allocating fallback algo %s\n", name); + return PTR_ERR(ctx->shash_fallback); + } + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct sahara_sha_reqctx) + + SHA_BUFFER_LEN + SHA256_BLOCK_SIZE); + + return 0; +} + +static void sahara_sha_cra_exit(struct crypto_tfm *tfm) +{ + struct sahara_ctx *ctx = crypto_tfm_ctx(tfm); + + crypto_free_shash(ctx->shash_fallback); + ctx->shash_fallback = NULL; +} + +static struct crypto_alg aes_algs[] = { +{ + .cra_name = "ecb(aes)", + .cra_driver_name = "sahara-ecb-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sahara_ctx), + .cra_alignmask = 0x0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = sahara_aes_cra_init, + .cra_exit = sahara_aes_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE , + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = sahara_aes_setkey, + .encrypt = sahara_aes_ecb_encrypt, + .decrypt = sahara_aes_ecb_decrypt, + } +}, { + .cra_name = "cbc(aes)", + .cra_driver_name = "sahara-cbc-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sahara_ctx), + .cra_alignmask = 0x0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = sahara_aes_cra_init, + .cra_exit = sahara_aes_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE , + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = sahara_aes_setkey, + .encrypt = sahara_aes_cbc_encrypt, + .decrypt = sahara_aes_cbc_decrypt, + } +} +}; + +static struct ahash_alg sha_v3_algs[] = { +{ + .init = sahara_sha_init, + .update = sahara_sha_update, + .final = sahara_sha_final, + .finup = sahara_sha_finup, + .digest = sahara_sha_digest, + .export = sahara_sha_export, + .import = sahara_sha_import, + .halg.digestsize = SHA1_DIGEST_SIZE, + .halg.base = { + .cra_name = "sha1", + .cra_driver_name = "sahara-sha1", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sahara_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = sahara_sha_cra_init, + .cra_exit = sahara_sha_cra_exit, + } +}, +}; + +static struct ahash_alg sha_v4_algs[] = { +{ + .init = sahara_sha_init, + .update = sahara_sha_update, + .final = sahara_sha_final, + .finup = sahara_sha_finup, + .digest = sahara_sha_digest, + .export = sahara_sha_export, + .import = sahara_sha_import, + .halg.digestsize = SHA256_DIGEST_SIZE, + .halg.base = { + .cra_name = "sha256", + .cra_driver_name = "sahara-sha256", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sahara_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = sahara_sha_cra_init, + .cra_exit = sahara_sha_cra_exit, + } +}, +}; + +static irqreturn_t sahara_irq_handler(int irq, void *data) +{ + struct sahara_dev *dev = (struct sahara_dev *)data; + unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS); + unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS); + + sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR, + SAHARA_REG_CMD); + + sahara_decode_status(dev, stat); + + if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) { + return IRQ_NONE; + } else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) { + dev->error = 0; + } else { + sahara_decode_error(dev, err); + dev->error = -EINVAL; + } + + complete(&dev->dma_completion); + + return IRQ_HANDLED; +} + + +static int sahara_register_algs(struct sahara_dev *dev) +{ + int err; + unsigned int i, j, k, l; + + for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { + INIT_LIST_HEAD(&aes_algs[i].cra_list); + err = crypto_register_alg(&aes_algs[i]); + if (err) + goto err_aes_algs; + } + + for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) { + err = crypto_register_ahash(&sha_v3_algs[k]); + if (err) + goto err_sha_v3_algs; + } + + if (dev->version > SAHARA_VERSION_3) + for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) { + err = crypto_register_ahash(&sha_v4_algs[l]); + if (err) + goto err_sha_v4_algs; + } + + return 0; + +err_sha_v4_algs: + for (j = 0; j < l; j++) + crypto_unregister_ahash(&sha_v4_algs[j]); + +err_sha_v3_algs: + for (j = 0; j < k; j++) + crypto_unregister_ahash(&sha_v4_algs[j]); + +err_aes_algs: + for (j = 0; j < i; j++) + crypto_unregister_alg(&aes_algs[j]); + + return err; +} + +static void sahara_unregister_algs(struct sahara_dev *dev) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(aes_algs); i++) + crypto_unregister_alg(&aes_algs[i]); + + for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++) + crypto_unregister_ahash(&sha_v3_algs[i]); + + if (dev->version > SAHARA_VERSION_3) + for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++) + crypto_unregister_ahash(&sha_v4_algs[i]); +} + +static struct platform_device_id sahara_platform_ids[] = { + { .name = "sahara-imx27" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(platform, sahara_platform_ids); + +static struct of_device_id sahara_dt_ids[] = { + { .compatible = "fsl,imx53-sahara" }, + { .compatible = "fsl,imx27-sahara" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, sahara_dt_ids); + +static int sahara_probe(struct platform_device *pdev) +{ + struct sahara_dev *dev; + struct resource *res; + u32 version; + int irq; + int err; + int i; + + dev = devm_kzalloc(&pdev->dev, sizeof(struct sahara_dev), GFP_KERNEL); + if (dev == NULL) { + dev_err(&pdev->dev, "unable to alloc data struct.\n"); + return -ENOMEM; + } + + dev->device = &pdev->dev; + platform_set_drvdata(pdev, dev); + + /* Get the base address */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + dev->regs_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(dev->regs_base)) + return PTR_ERR(dev->regs_base); + + /* Get the IRQ */ + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "failed to get irq resource\n"); + return irq; + } + + err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler, + 0, dev_name(&pdev->dev), dev); + if (err) { + dev_err(&pdev->dev, "failed to request irq\n"); + return err; + } + + /* clocks */ + dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); + if (IS_ERR(dev->clk_ipg)) { + dev_err(&pdev->dev, "Could not get ipg clock\n"); + return PTR_ERR(dev->clk_ipg); + } + + dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); + if (IS_ERR(dev->clk_ahb)) { + dev_err(&pdev->dev, "Could not get ahb clock\n"); + return PTR_ERR(dev->clk_ahb); + } + + /* Allocate HW descriptors */ + dev->hw_desc[0] = dma_alloc_coherent(&pdev->dev, + SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc), + &dev->hw_phys_desc[0], GFP_KERNEL); + if (!dev->hw_desc[0]) { + dev_err(&pdev->dev, "Could not allocate hw descriptors\n"); + return -ENOMEM; + } + dev->hw_desc[1] = dev->hw_desc[0] + 1; + dev->hw_phys_desc[1] = dev->hw_phys_desc[0] + + sizeof(struct sahara_hw_desc); + + /* Allocate space for iv and key */ + dev->key_base = dma_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, + &dev->key_phys_base, GFP_KERNEL); + if (!dev->key_base) { + dev_err(&pdev->dev, "Could not allocate memory for key\n"); + err = -ENOMEM; + goto err_key; + } + dev->iv_base = dev->key_base + AES_KEYSIZE_128; + dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128; + + /* Allocate space for context: largest digest + message length field */ + dev->context_base = dma_alloc_coherent(&pdev->dev, + SHA256_DIGEST_SIZE + 4, + &dev->context_phys_base, GFP_KERNEL); + if (!dev->context_base) { + dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n"); + err = -ENOMEM; + goto err_key; + } + + /* Allocate space for HW links */ + dev->hw_link[0] = dma_alloc_coherent(&pdev->dev, + SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link), + &dev->hw_phys_link[0], GFP_KERNEL); + if (!dev->hw_link[0]) { + dev_err(&pdev->dev, "Could not allocate hw links\n"); + err = -ENOMEM; + goto err_link; + } + for (i = 1; i < SAHARA_MAX_HW_LINK; i++) { + dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] + + sizeof(struct sahara_hw_link); + dev->hw_link[i] = dev->hw_link[i - 1] + 1; + } + + crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH); + + spin_lock_init(&dev->lock); + mutex_init(&dev->queue_mutex); + + dev_ptr = dev; + + dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto"); + if (IS_ERR(dev->kthread)) { + err = PTR_ERR(dev->kthread); + goto err_link; + } + + init_completion(&dev->dma_completion); + + clk_prepare_enable(dev->clk_ipg); + clk_prepare_enable(dev->clk_ahb); + + version = sahara_read(dev, SAHARA_REG_VERSION); + if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) { + if (version != SAHARA_VERSION_3) + err = -ENODEV; + } else if (of_device_is_compatible(pdev->dev.of_node, + "fsl,imx53-sahara")) { + if (((version >> 8) & 0xff) != SAHARA_VERSION_4) + err = -ENODEV; + version = (version >> 8) & 0xff; + } + if (err == -ENODEV) { + dev_err(&pdev->dev, "SAHARA version %d not supported\n", + version); + goto err_algs; + } + + dev->version = version; + + sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH, + SAHARA_REG_CMD); + sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) | + SAHARA_CONTROL_SET_MAXBURST(8) | + SAHARA_CONTROL_RNG_AUTORSD | + SAHARA_CONTROL_ENABLE_INT, + SAHARA_REG_CONTROL); + + err = sahara_register_algs(dev); + if (err) + goto err_algs; + + dev_info(&pdev->dev, "SAHARA version %d initialized\n", version); + + return 0; + +err_algs: + dma_free_coherent(&pdev->dev, + SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link), + dev->hw_link[0], dev->hw_phys_link[0]); + clk_disable_unprepare(dev->clk_ipg); + clk_disable_unprepare(dev->clk_ahb); + kthread_stop(dev->kthread); + dev_ptr = NULL; +err_link: + dma_free_coherent(&pdev->dev, + 2 * AES_KEYSIZE_128, + dev->key_base, dev->key_phys_base); + dma_free_coherent(&pdev->dev, + SHA256_DIGEST_SIZE, + dev->context_base, dev->context_phys_base); +err_key: + dma_free_coherent(&pdev->dev, + SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc), + dev->hw_desc[0], dev->hw_phys_desc[0]); + + return err; +} + +static int sahara_remove(struct platform_device *pdev) +{ + struct sahara_dev *dev = platform_get_drvdata(pdev); + + dma_free_coherent(&pdev->dev, + SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link), + dev->hw_link[0], dev->hw_phys_link[0]); + dma_free_coherent(&pdev->dev, + 2 * AES_KEYSIZE_128, + dev->key_base, dev->key_phys_base); + dma_free_coherent(&pdev->dev, + SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc), + dev->hw_desc[0], dev->hw_phys_desc[0]); + + kthread_stop(dev->kthread); + + sahara_unregister_algs(dev); + + clk_disable_unprepare(dev->clk_ipg); + clk_disable_unprepare(dev->clk_ahb); + + dev_ptr = NULL; + + return 0; +} + +static struct platform_driver sahara_driver = { + .probe = sahara_probe, + .remove = sahara_remove, + .driver = { + .name = SAHARA_NAME, + .of_match_table = sahara_dt_ids, + }, + .id_table = sahara_platform_ids, +}; + +module_platform_driver(sahara_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>"); +MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>"); +MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator"); diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c new file mode 100644 index 000000000..f062158d4 --- /dev/null +++ b/drivers/crypto/talitos.c @@ -0,0 +1,2819 @@ +/* + * talitos - Freescale Integrated Security Engine (SEC) device driver + * + * Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * + * Scatterlist Crypto API glue code copied from files with the following: + * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au> + * + * Crypto algorithm registration code copied from hifn driver: + * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru> + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/crypto.h> +#include <linux/hw_random.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/dma-mapping.h> +#include <linux/io.h> +#include <linux/spinlock.h> +#include <linux/rtnetlink.h> +#include <linux/slab.h> + +#include <crypto/algapi.h> +#include <crypto/aes.h> +#include <crypto/des.h> +#include <crypto/sha.h> +#include <crypto/md5.h> +#include <crypto/aead.h> +#include <crypto/authenc.h> +#include <crypto/skcipher.h> +#include <crypto/hash.h> +#include <crypto/internal/hash.h> +#include <crypto/scatterwalk.h> + +#include "talitos.h" + +static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr) +{ + talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr)); + talitos_ptr->eptr = upper_32_bits(dma_addr); +} + +/* + * map virtual single (contiguous) pointer to h/w descriptor pointer + */ +static void map_single_talitos_ptr(struct device *dev, + struct talitos_ptr *talitos_ptr, + unsigned short len, void *data, + unsigned char extent, + enum dma_data_direction dir) +{ + dma_addr_t dma_addr = dma_map_single(dev, data, len, dir); + + talitos_ptr->len = cpu_to_be16(len); + to_talitos_ptr(talitos_ptr, dma_addr); + talitos_ptr->j_extent = extent; +} + +/* + * unmap bus single (contiguous) h/w descriptor pointer + */ +static void unmap_single_talitos_ptr(struct device *dev, + struct talitos_ptr *talitos_ptr, + enum dma_data_direction dir) +{ + dma_unmap_single(dev, be32_to_cpu(talitos_ptr->ptr), + be16_to_cpu(talitos_ptr->len), dir); +} + +static int reset_channel(struct device *dev, int ch) +{ + struct talitos_private *priv = dev_get_drvdata(dev); + unsigned int timeout = TALITOS_TIMEOUT; + + setbits32(priv->chan[ch].reg + TALITOS_CCCR, TALITOS_CCCR_RESET); + + while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & TALITOS_CCCR_RESET) + && --timeout) + cpu_relax(); + + if (timeout == 0) { + dev_err(dev, "failed to reset channel %d\n", ch); + return -EIO; + } + + /* set 36-bit addressing, done writeback enable and done IRQ enable */ + setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE | + TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE); + + /* and ICCR writeback, if available */ + if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) + setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, + TALITOS_CCCR_LO_IWSE); + + return 0; +} + +static int reset_device(struct device *dev) +{ + struct talitos_private *priv = dev_get_drvdata(dev); + unsigned int timeout = TALITOS_TIMEOUT; + u32 mcr = TALITOS_MCR_SWR; + + setbits32(priv->reg + TALITOS_MCR, mcr); + + while ((in_be32(priv->reg + TALITOS_MCR) & TALITOS_MCR_SWR) + && --timeout) + cpu_relax(); + + if (priv->irq[1]) { + mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3; + setbits32(priv->reg + TALITOS_MCR, mcr); + } + + if (timeout == 0) { + dev_err(dev, "failed to reset device\n"); + return -EIO; + } + + return 0; +} + +/* + * Reset and initialize the device + */ +static int init_device(struct device *dev) +{ + struct talitos_private *priv = dev_get_drvdata(dev); + int ch, err; + + /* + * Master reset + * errata documentation: warning: certain SEC interrupts + * are not fully cleared by writing the MCR:SWR bit, + * set bit twice to completely reset + */ + err = reset_device(dev); + if (err) + return err; + + err = reset_device(dev); + if (err) + return err; + + /* reset channels */ + for (ch = 0; ch < priv->num_channels; ch++) { + err = reset_channel(dev, ch); + if (err) + return err; + } + + /* enable channel done and error interrupts */ + setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT); + setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); + + /* disable integrity check error interrupts (use writeback instead) */ + if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) + setbits32(priv->reg + TALITOS_MDEUICR_LO, + TALITOS_MDEUICR_LO_ICE); + + return 0; +} + +/** + * talitos_submit - submits a descriptor to the device for processing + * @dev: the SEC device to be used + * @ch: the SEC device channel to be used + * @desc: the descriptor to be processed by the device + * @callback: whom to call when processing is complete + * @context: a handle for use by caller (optional) + * + * desc must contain valid dma-mapped (bus physical) address pointers. + * callback must check err and feedback in descriptor header + * for device processing status. + */ +int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, + void (*callback)(struct device *dev, + struct talitos_desc *desc, + void *context, int error), + void *context) +{ + struct talitos_private *priv = dev_get_drvdata(dev); + struct talitos_request *request; + unsigned long flags; + int head; + + spin_lock_irqsave(&priv->chan[ch].head_lock, flags); + + if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) { + /* h/w fifo is full */ + spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags); + return -EAGAIN; + } + + head = priv->chan[ch].head; + request = &priv->chan[ch].fifo[head]; + + /* map descriptor and save caller data */ + request->dma_desc = dma_map_single(dev, desc, sizeof(*desc), + DMA_BIDIRECTIONAL); + request->callback = callback; + request->context = context; + + /* increment fifo head */ + priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1); + + smp_wmb(); + request->desc = desc; + + /* GO! */ + wmb(); + out_be32(priv->chan[ch].reg + TALITOS_FF, + upper_32_bits(request->dma_desc)); + out_be32(priv->chan[ch].reg + TALITOS_FF_LO, + lower_32_bits(request->dma_desc)); + + spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags); + + return -EINPROGRESS; +} +EXPORT_SYMBOL(talitos_submit); + +/* + * process what was done, notify callback of error if not + */ +static void flush_channel(struct device *dev, int ch, int error, int reset_ch) +{ + struct talitos_private *priv = dev_get_drvdata(dev); + struct talitos_request *request, saved_req; + unsigned long flags; + int tail, status; + + spin_lock_irqsave(&priv->chan[ch].tail_lock, flags); + + tail = priv->chan[ch].tail; + while (priv->chan[ch].fifo[tail].desc) { + request = &priv->chan[ch].fifo[tail]; + + /* descriptors with their done bits set don't get the error */ + rmb(); + if ((request->desc->hdr & DESC_HDR_DONE) == DESC_HDR_DONE) + status = 0; + else + if (!error) + break; + else + status = error; + + dma_unmap_single(dev, request->dma_desc, + sizeof(struct talitos_desc), + DMA_BIDIRECTIONAL); + + /* copy entries so we can call callback outside lock */ + saved_req.desc = request->desc; + saved_req.callback = request->callback; + saved_req.context = request->context; + + /* release request entry in fifo */ + smp_wmb(); + request->desc = NULL; + + /* increment fifo tail */ + priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1); + + spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags); + + atomic_dec(&priv->chan[ch].submit_count); + + saved_req.callback(dev, saved_req.desc, saved_req.context, + status); + /* channel may resume processing in single desc error case */ + if (error && !reset_ch && status == error) + return; + spin_lock_irqsave(&priv->chan[ch].tail_lock, flags); + tail = priv->chan[ch].tail; + } + + spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags); +} + +/* + * process completed requests for channels that have done status + */ +#define DEF_TALITOS_DONE(name, ch_done_mask) \ +static void talitos_done_##name(unsigned long data) \ +{ \ + struct device *dev = (struct device *)data; \ + struct talitos_private *priv = dev_get_drvdata(dev); \ + unsigned long flags; \ + \ + if (ch_done_mask & 1) \ + flush_channel(dev, 0, 0, 0); \ + if (priv->num_channels == 1) \ + goto out; \ + if (ch_done_mask & (1 << 2)) \ + flush_channel(dev, 1, 0, 0); \ + if (ch_done_mask & (1 << 4)) \ + flush_channel(dev, 2, 0, 0); \ + if (ch_done_mask & (1 << 6)) \ + flush_channel(dev, 3, 0, 0); \ + \ +out: \ + /* At this point, all completed channels have been processed */ \ + /* Unmask done interrupts for channels completed later on. */ \ + spin_lock_irqsave(&priv->reg_lock, flags); \ + setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ + setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); \ + spin_unlock_irqrestore(&priv->reg_lock, flags); \ +} +DEF_TALITOS_DONE(4ch, TALITOS_ISR_4CHDONE) +DEF_TALITOS_DONE(ch0_2, TALITOS_ISR_CH_0_2_DONE) +DEF_TALITOS_DONE(ch1_3, TALITOS_ISR_CH_1_3_DONE) + +/* + * locate current (offending) descriptor + */ +static u32 current_desc_hdr(struct device *dev, int ch) +{ + struct talitos_private *priv = dev_get_drvdata(dev); + int tail, iter; + dma_addr_t cur_desc; + + cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32; + cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO); + + if (!cur_desc) { + dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n"); + return 0; + } + + tail = priv->chan[ch].tail; + + iter = tail; + while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) { + iter = (iter + 1) & (priv->fifo_len - 1); + if (iter == tail) { + dev_err(dev, "couldn't locate current descriptor\n"); + return 0; + } + } + + return priv->chan[ch].fifo[iter].desc->hdr; +} + +/* + * user diagnostics; report root cause of error based on execution unit status + */ +static void report_eu_error(struct device *dev, int ch, u32 desc_hdr) +{ + struct talitos_private *priv = dev_get_drvdata(dev); + int i; + + if (!desc_hdr) + desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF); + + switch (desc_hdr & DESC_HDR_SEL0_MASK) { + case DESC_HDR_SEL0_AFEU: + dev_err(dev, "AFEUISR 0x%08x_%08x\n", + in_be32(priv->reg + TALITOS_AFEUISR), + in_be32(priv->reg + TALITOS_AFEUISR_LO)); + break; + case DESC_HDR_SEL0_DEU: + dev_err(dev, "DEUISR 0x%08x_%08x\n", + in_be32(priv->reg + TALITOS_DEUISR), + in_be32(priv->reg + TALITOS_DEUISR_LO)); + break; + case DESC_HDR_SEL0_MDEUA: + case DESC_HDR_SEL0_MDEUB: + dev_err(dev, "MDEUISR 0x%08x_%08x\n", + in_be32(priv->reg + TALITOS_MDEUISR), + in_be32(priv->reg + TALITOS_MDEUISR_LO)); + break; + case DESC_HDR_SEL0_RNG: + dev_err(dev, "RNGUISR 0x%08x_%08x\n", + in_be32(priv->reg + TALITOS_RNGUISR), + in_be32(priv->reg + TALITOS_RNGUISR_LO)); + break; + case DESC_HDR_SEL0_PKEU: + dev_err(dev, "PKEUISR 0x%08x_%08x\n", + in_be32(priv->reg + TALITOS_PKEUISR), + in_be32(priv->reg + TALITOS_PKEUISR_LO)); + break; + case DESC_HDR_SEL0_AESU: + dev_err(dev, "AESUISR 0x%08x_%08x\n", + in_be32(priv->reg + TALITOS_AESUISR), + in_be32(priv->reg + TALITOS_AESUISR_LO)); + break; + case DESC_HDR_SEL0_CRCU: + dev_err(dev, "CRCUISR 0x%08x_%08x\n", + in_be32(priv->reg + TALITOS_CRCUISR), + in_be32(priv->reg + TALITOS_CRCUISR_LO)); + break; + case DESC_HDR_SEL0_KEU: + dev_err(dev, "KEUISR 0x%08x_%08x\n", + in_be32(priv->reg + TALITOS_KEUISR), + in_be32(priv->reg + TALITOS_KEUISR_LO)); + break; + } + + switch (desc_hdr & DESC_HDR_SEL1_MASK) { + case DESC_HDR_SEL1_MDEUA: + case DESC_HDR_SEL1_MDEUB: + dev_err(dev, "MDEUISR 0x%08x_%08x\n", + in_be32(priv->reg + TALITOS_MDEUISR), + in_be32(priv->reg + TALITOS_MDEUISR_LO)); + break; + case DESC_HDR_SEL1_CRCU: + dev_err(dev, "CRCUISR 0x%08x_%08x\n", + in_be32(priv->reg + TALITOS_CRCUISR), + in_be32(priv->reg + TALITOS_CRCUISR_LO)); + break; + } + + for (i = 0; i < 8; i++) + dev_err(dev, "DESCBUF 0x%08x_%08x\n", + in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i), + in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i)); +} + +/* + * recover from error interrupts + */ +static void talitos_error(struct device *dev, u32 isr, u32 isr_lo) +{ + struct talitos_private *priv = dev_get_drvdata(dev); + unsigned int timeout = TALITOS_TIMEOUT; + int ch, error, reset_dev = 0, reset_ch = 0; + u32 v, v_lo; + + for (ch = 0; ch < priv->num_channels; ch++) { + /* skip channels without errors */ + if (!(isr & (1 << (ch * 2 + 1)))) + continue; + + error = -EINVAL; + + v = in_be32(priv->chan[ch].reg + TALITOS_CCPSR); + v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO); + + if (v_lo & TALITOS_CCPSR_LO_DOF) { + dev_err(dev, "double fetch fifo overflow error\n"); + error = -EAGAIN; + reset_ch = 1; + } + if (v_lo & TALITOS_CCPSR_LO_SOF) { + /* h/w dropped descriptor */ + dev_err(dev, "single fetch fifo overflow error\n"); + error = -EAGAIN; + } + if (v_lo & TALITOS_CCPSR_LO_MDTE) + dev_err(dev, "master data transfer error\n"); + if (v_lo & TALITOS_CCPSR_LO_SGDLZ) + dev_err(dev, "s/g data length zero error\n"); + if (v_lo & TALITOS_CCPSR_LO_FPZ) + dev_err(dev, "fetch pointer zero error\n"); + if (v_lo & TALITOS_CCPSR_LO_IDH) + dev_err(dev, "illegal descriptor header error\n"); + if (v_lo & TALITOS_CCPSR_LO_IEU) + dev_err(dev, "invalid execution unit error\n"); + if (v_lo & TALITOS_CCPSR_LO_EU) + report_eu_error(dev, ch, current_desc_hdr(dev, ch)); + if (v_lo & TALITOS_CCPSR_LO_GB) + dev_err(dev, "gather boundary error\n"); + if (v_lo & TALITOS_CCPSR_LO_GRL) + dev_err(dev, "gather return/length error\n"); + if (v_lo & TALITOS_CCPSR_LO_SB) + dev_err(dev, "scatter boundary error\n"); + if (v_lo & TALITOS_CCPSR_LO_SRL) + dev_err(dev, "scatter return/length error\n"); + + flush_channel(dev, ch, error, reset_ch); + + if (reset_ch) { + reset_channel(dev, ch); + } else { + setbits32(priv->chan[ch].reg + TALITOS_CCCR, + TALITOS_CCCR_CONT); + setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0); + while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & + TALITOS_CCCR_CONT) && --timeout) + cpu_relax(); + if (timeout == 0) { + dev_err(dev, "failed to restart channel %d\n", + ch); + reset_dev = 1; + } + } + } + if (reset_dev || isr & ~TALITOS_ISR_4CHERR || isr_lo) { + dev_err(dev, "done overflow, internal time out, or rngu error: " + "ISR 0x%08x_%08x\n", isr, isr_lo); + + /* purge request queues */ + for (ch = 0; ch < priv->num_channels; ch++) + flush_channel(dev, ch, -EIO, 1); + + /* reset and reinitialize the device */ + init_device(dev); + } +} + +#define DEF_TALITOS_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \ +static irqreturn_t talitos_interrupt_##name(int irq, void *data) \ +{ \ + struct device *dev = data; \ + struct talitos_private *priv = dev_get_drvdata(dev); \ + u32 isr, isr_lo; \ + unsigned long flags; \ + \ + spin_lock_irqsave(&priv->reg_lock, flags); \ + isr = in_be32(priv->reg + TALITOS_ISR); \ + isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \ + /* Acknowledge interrupt */ \ + out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \ + out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \ + \ + if (unlikely(isr & ch_err_mask || isr_lo)) { \ + spin_unlock_irqrestore(&priv->reg_lock, flags); \ + talitos_error(dev, isr & ch_err_mask, isr_lo); \ + } \ + else { \ + if (likely(isr & ch_done_mask)) { \ + /* mask further done interrupts. */ \ + clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ + /* done_task will unmask done interrupts at exit */ \ + tasklet_schedule(&priv->done_task[tlet]); \ + } \ + spin_unlock_irqrestore(&priv->reg_lock, flags); \ + } \ + \ + return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \ + IRQ_NONE; \ +} +DEF_TALITOS_INTERRUPT(4ch, TALITOS_ISR_4CHDONE, TALITOS_ISR_4CHERR, 0) +DEF_TALITOS_INTERRUPT(ch0_2, TALITOS_ISR_CH_0_2_DONE, TALITOS_ISR_CH_0_2_ERR, 0) +DEF_TALITOS_INTERRUPT(ch1_3, TALITOS_ISR_CH_1_3_DONE, TALITOS_ISR_CH_1_3_ERR, 1) + +/* + * hwrng + */ +static int talitos_rng_data_present(struct hwrng *rng, int wait) +{ + struct device *dev = (struct device *)rng->priv; + struct talitos_private *priv = dev_get_drvdata(dev); + u32 ofl; + int i; + + for (i = 0; i < 20; i++) { + ofl = in_be32(priv->reg + TALITOS_RNGUSR_LO) & + TALITOS_RNGUSR_LO_OFL; + if (ofl || !wait) + break; + udelay(10); + } + + return !!ofl; +} + +static int talitos_rng_data_read(struct hwrng *rng, u32 *data) +{ + struct device *dev = (struct device *)rng->priv; + struct talitos_private *priv = dev_get_drvdata(dev); + + /* rng fifo requires 64-bit accesses */ + *data = in_be32(priv->reg + TALITOS_RNGU_FIFO); + *data = in_be32(priv->reg + TALITOS_RNGU_FIFO_LO); + + return sizeof(u32); +} + +static int talitos_rng_init(struct hwrng *rng) +{ + struct device *dev = (struct device *)rng->priv; + struct talitos_private *priv = dev_get_drvdata(dev); + unsigned int timeout = TALITOS_TIMEOUT; + + setbits32(priv->reg + TALITOS_RNGURCR_LO, TALITOS_RNGURCR_LO_SR); + while (!(in_be32(priv->reg + TALITOS_RNGUSR_LO) & TALITOS_RNGUSR_LO_RD) + && --timeout) + cpu_relax(); + if (timeout == 0) { + dev_err(dev, "failed to reset rng hw\n"); + return -ENODEV; + } + + /* start generating */ + setbits32(priv->reg + TALITOS_RNGUDSR_LO, 0); + + return 0; +} + +static int talitos_register_rng(struct device *dev) +{ + struct talitos_private *priv = dev_get_drvdata(dev); + + priv->rng.name = dev_driver_string(dev), + priv->rng.init = talitos_rng_init, + priv->rng.data_present = talitos_rng_data_present, + priv->rng.data_read = talitos_rng_data_read, + priv->rng.priv = (unsigned long)dev; + + return hwrng_register(&priv->rng); +} + +static void talitos_unregister_rng(struct device *dev) +{ + struct talitos_private *priv = dev_get_drvdata(dev); + + hwrng_unregister(&priv->rng); +} + +/* + * crypto alg + */ +#define TALITOS_CRA_PRIORITY 3000 +#define TALITOS_MAX_KEY_SIZE 96 +#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ + +struct talitos_ctx { + struct device *dev; + int ch; + __be32 desc_hdr_template; + u8 key[TALITOS_MAX_KEY_SIZE]; + u8 iv[TALITOS_MAX_IV_LENGTH]; + unsigned int keylen; + unsigned int enckeylen; + unsigned int authkeylen; + unsigned int authsize; +}; + +#define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE +#define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512 + +struct talitos_ahash_req_ctx { + u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)]; + unsigned int hw_context_size; + u8 buf[HASH_MAX_BLOCK_SIZE]; + u8 bufnext[HASH_MAX_BLOCK_SIZE]; + unsigned int swinit; + unsigned int first; + unsigned int last; + unsigned int to_hash_later; + u64 nbuf; + struct scatterlist bufsl[2]; + struct scatterlist *psrc; +}; + +static int aead_setauthsize(struct crypto_aead *authenc, + unsigned int authsize) +{ + struct talitos_ctx *ctx = crypto_aead_ctx(authenc); + + ctx->authsize = authsize; + + return 0; +} + +static int aead_setkey(struct crypto_aead *authenc, + const u8 *key, unsigned int keylen) +{ + struct talitos_ctx *ctx = crypto_aead_ctx(authenc); + struct crypto_authenc_keys keys; + + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) + goto badkey; + + if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE) + goto badkey; + + memcpy(ctx->key, keys.authkey, keys.authkeylen); + memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen); + + ctx->keylen = keys.authkeylen + keys.enckeylen; + ctx->enckeylen = keys.enckeylen; + ctx->authkeylen = keys.authkeylen; + + return 0; + +badkey: + crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; +} + +/* + * talitos_edesc - s/w-extended descriptor + * @assoc_nents: number of segments in associated data scatterlist + * @src_nents: number of segments in input scatterlist + * @dst_nents: number of segments in output scatterlist + * @assoc_chained: whether assoc is chained or not + * @src_chained: whether src is chained or not + * @dst_chained: whether dst is chained or not + * @iv_dma: dma address of iv for checking continuity and link table + * @dma_len: length of dma mapped link_tbl space + * @dma_link_tbl: bus physical address of link_tbl + * @desc: h/w descriptor + * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) + * + * if decrypting (with authcheck), or either one of src_nents or dst_nents + * is greater than 1, an integrity check value is concatenated to the end + * of link_tbl data + */ +struct talitos_edesc { + int assoc_nents; + int src_nents; + int dst_nents; + bool assoc_chained; + bool src_chained; + bool dst_chained; + dma_addr_t iv_dma; + int dma_len; + dma_addr_t dma_link_tbl; + struct talitos_desc desc; + struct talitos_ptr link_tbl[0]; +}; + +static int talitos_map_sg(struct device *dev, struct scatterlist *sg, + unsigned int nents, enum dma_data_direction dir, + bool chained) +{ + if (unlikely(chained)) + while (sg) { + dma_map_sg(dev, sg, 1, dir); + sg = sg_next(sg); + } + else + dma_map_sg(dev, sg, nents, dir); + return nents; +} + +static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg, + enum dma_data_direction dir) +{ + while (sg) { + dma_unmap_sg(dev, sg, 1, dir); + sg = sg_next(sg); + } +} + +static void talitos_sg_unmap(struct device *dev, + struct talitos_edesc *edesc, + struct scatterlist *src, + struct scatterlist *dst) +{ + unsigned int src_nents = edesc->src_nents ? : 1; + unsigned int dst_nents = edesc->dst_nents ? : 1; + + if (src != dst) { + if (edesc->src_chained) + talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE); + else + dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); + + if (dst) { + if (edesc->dst_chained) + talitos_unmap_sg_chain(dev, dst, + DMA_FROM_DEVICE); + else + dma_unmap_sg(dev, dst, dst_nents, + DMA_FROM_DEVICE); + } + } else + if (edesc->src_chained) + talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL); + else + dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); +} + +static void ipsec_esp_unmap(struct device *dev, + struct talitos_edesc *edesc, + struct aead_request *areq) +{ + unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE); + unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE); + unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); + unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE); + + if (edesc->assoc_chained) + talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE); + else if (areq->assoclen) + /* assoc_nents counts also for IV in non-contiguous cases */ + dma_unmap_sg(dev, areq->assoc, + edesc->assoc_nents ? edesc->assoc_nents - 1 : 1, + DMA_TO_DEVICE); + + talitos_sg_unmap(dev, edesc, areq->src, areq->dst); + + if (edesc->dma_len) + dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, + DMA_BIDIRECTIONAL); +} + +/* + * ipsec_esp descriptor callbacks + */ +static void ipsec_esp_encrypt_done(struct device *dev, + struct talitos_desc *desc, void *context, + int err) +{ + struct aead_request *areq = context; + struct crypto_aead *authenc = crypto_aead_reqtfm(areq); + struct talitos_ctx *ctx = crypto_aead_ctx(authenc); + struct talitos_edesc *edesc; + struct scatterlist *sg; + void *icvdata; + + edesc = container_of(desc, struct talitos_edesc, desc); + + ipsec_esp_unmap(dev, edesc, areq); + + /* copy the generated ICV to dst */ + if (edesc->dst_nents) { + icvdata = &edesc->link_tbl[edesc->src_nents + + edesc->dst_nents + 2 + + edesc->assoc_nents]; + sg = sg_last(areq->dst, edesc->dst_nents); + memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize, + icvdata, ctx->authsize); + } + + kfree(edesc); + + aead_request_complete(areq, err); +} + +static void ipsec_esp_decrypt_swauth_done(struct device *dev, + struct talitos_desc *desc, + void *context, int err) +{ + struct aead_request *req = context; + struct crypto_aead *authenc = crypto_aead_reqtfm(req); + struct talitos_ctx *ctx = crypto_aead_ctx(authenc); + struct talitos_edesc *edesc; + struct scatterlist *sg; + void *icvdata; + + edesc = container_of(desc, struct talitos_edesc, desc); + + ipsec_esp_unmap(dev, edesc, req); + + if (!err) { + /* auth check */ + if (edesc->dma_len) + icvdata = &edesc->link_tbl[edesc->src_nents + + edesc->dst_nents + 2 + + edesc->assoc_nents]; + else + icvdata = &edesc->link_tbl[0]; + + sg = sg_last(req->dst, edesc->dst_nents ? : 1); + err = memcmp(icvdata, (char *)sg_virt(sg) + sg->length - + ctx->authsize, ctx->authsize) ? -EBADMSG : 0; + } + + kfree(edesc); + + aead_request_complete(req, err); +} + +static void ipsec_esp_decrypt_hwauth_done(struct device *dev, + struct talitos_desc *desc, + void *context, int err) +{ + struct aead_request *req = context; + struct talitos_edesc *edesc; + + edesc = container_of(desc, struct talitos_edesc, desc); + + ipsec_esp_unmap(dev, edesc, req); + + /* check ICV auth status */ + if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) != + DESC_HDR_LO_ICCR1_PASS)) + err = -EBADMSG; + + kfree(edesc); + + aead_request_complete(req, err); +} + +/* + * convert scatterlist to SEC h/w link table format + * stop at cryptlen bytes + */ +static int sg_to_link_tbl(struct scatterlist *sg, int sg_count, + int cryptlen, struct talitos_ptr *link_tbl_ptr) +{ + int n_sg = sg_count; + + while (n_sg--) { + to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg)); + link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg)); + link_tbl_ptr->j_extent = 0; + link_tbl_ptr++; + cryptlen -= sg_dma_len(sg); + sg = sg_next(sg); + } + + /* adjust (decrease) last one (or two) entry's len to cryptlen */ + link_tbl_ptr--; + while (be16_to_cpu(link_tbl_ptr->len) <= (-cryptlen)) { + /* Empty this entry, and move to previous one */ + cryptlen += be16_to_cpu(link_tbl_ptr->len); + link_tbl_ptr->len = 0; + sg_count--; + link_tbl_ptr--; + } + link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len) + + cryptlen); + + /* tag end of link table */ + link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; + + return sg_count; +} + +/* + * fill in and submit ipsec_esp descriptor + */ +static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, + u64 seq, void (*callback) (struct device *dev, + struct talitos_desc *desc, + void *context, int error)) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(areq); + struct talitos_ctx *ctx = crypto_aead_ctx(aead); + struct device *dev = ctx->dev; + struct talitos_desc *desc = &edesc->desc; + unsigned int cryptlen = areq->cryptlen; + unsigned int authsize = ctx->authsize; + unsigned int ivsize = crypto_aead_ivsize(aead); + int sg_count, ret; + int sg_link_tbl_len; + + /* hmac key */ + map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key, + 0, DMA_TO_DEVICE); + + /* hmac data */ + desc->ptr[1].len = cpu_to_be16(areq->assoclen + ivsize); + if (edesc->assoc_nents) { + int tbl_off = edesc->src_nents + edesc->dst_nents + 2; + struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; + + to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off * + sizeof(struct talitos_ptr)); + desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP; + + /* assoc_nents - 1 entries for assoc, 1 for IV */ + sg_count = sg_to_link_tbl(areq->assoc, edesc->assoc_nents - 1, + areq->assoclen, tbl_ptr); + + /* add IV to link table */ + tbl_ptr += sg_count - 1; + tbl_ptr->j_extent = 0; + tbl_ptr++; + to_talitos_ptr(tbl_ptr, edesc->iv_dma); + tbl_ptr->len = cpu_to_be16(ivsize); + tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; + + dma_sync_single_for_device(dev, edesc->dma_link_tbl, + edesc->dma_len, DMA_BIDIRECTIONAL); + } else { + if (areq->assoclen) + to_talitos_ptr(&desc->ptr[1], + sg_dma_address(areq->assoc)); + else + to_talitos_ptr(&desc->ptr[1], edesc->iv_dma); + desc->ptr[1].j_extent = 0; + } + + /* cipher iv */ + to_talitos_ptr(&desc->ptr[2], edesc->iv_dma); + desc->ptr[2].len = cpu_to_be16(ivsize); + desc->ptr[2].j_extent = 0; + /* Sync needed for the aead_givencrypt case */ + dma_sync_single_for_device(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE); + + /* cipher key */ + map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen, + (char *)&ctx->key + ctx->authkeylen, 0, + DMA_TO_DEVICE); + + /* + * cipher in + * map and adjust cipher len to aead request cryptlen. + * extent is bytes of HMAC postpended to ciphertext, + * typically 12 for ipsec + */ + desc->ptr[4].len = cpu_to_be16(cryptlen); + desc->ptr[4].j_extent = authsize; + + sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1, + (areq->src == areq->dst) ? DMA_BIDIRECTIONAL + : DMA_TO_DEVICE, + edesc->src_chained); + + if (sg_count == 1) { + to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src)); + } else { + sg_link_tbl_len = cryptlen; + + if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) + sg_link_tbl_len = cryptlen + authsize; + + sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len, + &edesc->link_tbl[0]); + if (sg_count > 1) { + desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; + to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl); + dma_sync_single_for_device(dev, edesc->dma_link_tbl, + edesc->dma_len, + DMA_BIDIRECTIONAL); + } else { + /* Only one segment now, so no link tbl needed */ + to_talitos_ptr(&desc->ptr[4], + sg_dma_address(areq->src)); + } + } + + /* cipher out */ + desc->ptr[5].len = cpu_to_be16(cryptlen); + desc->ptr[5].j_extent = authsize; + + if (areq->src != areq->dst) + sg_count = talitos_map_sg(dev, areq->dst, + edesc->dst_nents ? : 1, + DMA_FROM_DEVICE, edesc->dst_chained); + + if (sg_count == 1) { + to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst)); + } else { + int tbl_off = edesc->src_nents + 1; + struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; + + to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl + + tbl_off * sizeof(struct talitos_ptr)); + sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, + tbl_ptr); + + /* Add an entry to the link table for ICV data */ + tbl_ptr += sg_count - 1; + tbl_ptr->j_extent = 0; + tbl_ptr++; + tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; + tbl_ptr->len = cpu_to_be16(authsize); + + /* icv data follows link tables */ + to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + + (tbl_off + edesc->dst_nents + 1 + + edesc->assoc_nents) * + sizeof(struct talitos_ptr)); + desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP; + dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, + edesc->dma_len, DMA_BIDIRECTIONAL); + } + + /* iv out */ + map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 0, + DMA_FROM_DEVICE); + + ret = talitos_submit(dev, ctx->ch, desc, callback, areq); + if (ret != -EINPROGRESS) { + ipsec_esp_unmap(dev, edesc, areq); + kfree(edesc); + } + return ret; +} + +/* + * derive number of elements in scatterlist + */ +static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained) +{ + struct scatterlist *sg = sg_list; + int sg_nents = 0; + + *chained = false; + while (nbytes > 0) { + sg_nents++; + nbytes -= sg->length; + if (!sg_is_last(sg) && (sg + 1)->length == 0) + *chained = true; + sg = sg_next(sg); + } + + return sg_nents; +} + +/* + * allocate and map the extended descriptor + */ +static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, + struct scatterlist *assoc, + struct scatterlist *src, + struct scatterlist *dst, + u8 *iv, + unsigned int assoclen, + unsigned int cryptlen, + unsigned int authsize, + unsigned int ivsize, + int icv_stashing, + u32 cryptoflags, + bool encrypt) +{ + struct talitos_edesc *edesc; + int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len; + bool assoc_chained = false, src_chained = false, dst_chained = false; + dma_addr_t iv_dma = 0; + gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : + GFP_ATOMIC; + + if (cryptlen + authsize > TALITOS_MAX_DATA_LEN) { + dev_err(dev, "length exceeds h/w max limit\n"); + return ERR_PTR(-EINVAL); + } + + if (ivsize) + iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); + + if (assoclen) { + /* + * Currently it is assumed that iv is provided whenever assoc + * is. + */ + BUG_ON(!iv); + + assoc_nents = sg_count(assoc, assoclen, &assoc_chained); + talitos_map_sg(dev, assoc, assoc_nents, DMA_TO_DEVICE, + assoc_chained); + assoc_nents = (assoc_nents == 1) ? 0 : assoc_nents; + + if (assoc_nents || sg_dma_address(assoc) + assoclen != iv_dma) + assoc_nents = assoc_nents ? assoc_nents + 1 : 2; + } + + if (!dst || dst == src) { + src_nents = sg_count(src, cryptlen + authsize, &src_chained); + src_nents = (src_nents == 1) ? 0 : src_nents; + dst_nents = dst ? src_nents : 0; + } else { /* dst && dst != src*/ + src_nents = sg_count(src, cryptlen + (encrypt ? 0 : authsize), + &src_chained); + src_nents = (src_nents == 1) ? 0 : src_nents; + dst_nents = sg_count(dst, cryptlen + (encrypt ? authsize : 0), + &dst_chained); + dst_nents = (dst_nents == 1) ? 0 : dst_nents; + } + + /* + * allocate space for base edesc plus the link tables, + * allowing for two separate entries for ICV and generated ICV (+ 2), + * and the ICV data itself + */ + alloc_len = sizeof(struct talitos_edesc); + if (assoc_nents || src_nents || dst_nents) { + dma_len = (src_nents + dst_nents + 2 + assoc_nents) * + sizeof(struct talitos_ptr) + authsize; + alloc_len += dma_len; + } else { + dma_len = 0; + alloc_len += icv_stashing ? authsize : 0; + } + + edesc = kmalloc(alloc_len, GFP_DMA | flags); + if (!edesc) { + if (assoc_chained) + talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE); + else if (assoclen) + dma_unmap_sg(dev, assoc, + assoc_nents ? assoc_nents - 1 : 1, + DMA_TO_DEVICE); + + if (iv_dma) + dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); + + dev_err(dev, "could not allocate edescriptor\n"); + return ERR_PTR(-ENOMEM); + } + + edesc->assoc_nents = assoc_nents; + edesc->src_nents = src_nents; + edesc->dst_nents = dst_nents; + edesc->assoc_chained = assoc_chained; + edesc->src_chained = src_chained; + edesc->dst_chained = dst_chained; + edesc->iv_dma = iv_dma; + edesc->dma_len = dma_len; + if (dma_len) + edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0], + edesc->dma_len, + DMA_BIDIRECTIONAL); + + return edesc; +} + +static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, + int icv_stashing, bool encrypt) +{ + struct crypto_aead *authenc = crypto_aead_reqtfm(areq); + struct talitos_ctx *ctx = crypto_aead_ctx(authenc); + unsigned int ivsize = crypto_aead_ivsize(authenc); + + return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst, + iv, areq->assoclen, areq->cryptlen, + ctx->authsize, ivsize, icv_stashing, + areq->base.flags, encrypt); +} + +static int aead_encrypt(struct aead_request *req) +{ + struct crypto_aead *authenc = crypto_aead_reqtfm(req); + struct talitos_ctx *ctx = crypto_aead_ctx(authenc); + struct talitos_edesc *edesc; + + /* allocate extended descriptor */ + edesc = aead_edesc_alloc(req, req->iv, 0, true); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + /* set encrypt */ + edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; + + return ipsec_esp(edesc, req, 0, ipsec_esp_encrypt_done); +} + +static int aead_decrypt(struct aead_request *req) +{ + struct crypto_aead *authenc = crypto_aead_reqtfm(req); + struct talitos_ctx *ctx = crypto_aead_ctx(authenc); + unsigned int authsize = ctx->authsize; + struct talitos_private *priv = dev_get_drvdata(ctx->dev); + struct talitos_edesc *edesc; + struct scatterlist *sg; + void *icvdata; + + req->cryptlen -= authsize; + + /* allocate extended descriptor */ + edesc = aead_edesc_alloc(req, req->iv, 1, false); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) && + ((!edesc->src_nents && !edesc->dst_nents) || + priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) { + + /* decrypt and check the ICV */ + edesc->desc.hdr = ctx->desc_hdr_template | + DESC_HDR_DIR_INBOUND | + DESC_HDR_MODE1_MDEU_CICV; + + /* reset integrity check result bits */ + edesc->desc.hdr_lo = 0; + + return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_hwauth_done); + } + + /* Have to check the ICV with software */ + edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; + + /* stash incoming ICV for later cmp with ICV generated by the h/w */ + if (edesc->dma_len) + icvdata = &edesc->link_tbl[edesc->src_nents + + edesc->dst_nents + 2 + + edesc->assoc_nents]; + else + icvdata = &edesc->link_tbl[0]; + + sg = sg_last(req->src, edesc->src_nents ? : 1); + + memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize, + ctx->authsize); + + return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_swauth_done); +} + +static int aead_givencrypt(struct aead_givcrypt_request *req) +{ + struct aead_request *areq = &req->areq; + struct crypto_aead *authenc = crypto_aead_reqtfm(areq); + struct talitos_ctx *ctx = crypto_aead_ctx(authenc); + struct talitos_edesc *edesc; + + /* allocate extended descriptor */ + edesc = aead_edesc_alloc(areq, req->giv, 0, true); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + /* set encrypt */ + edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; + + memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc)); + /* avoid consecutive packets going out with same IV */ + *(__be64 *)req->giv ^= cpu_to_be64(req->seq); + + return ipsec_esp(edesc, areq, req->seq, ipsec_esp_encrypt_done); +} + +static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, + const u8 *key, unsigned int keylen) +{ + struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); + + memcpy(&ctx->key, key, keylen); + ctx->keylen = keylen; + + return 0; +} + +static void common_nonsnoop_unmap(struct device *dev, + struct talitos_edesc *edesc, + struct ablkcipher_request *areq) +{ + unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); + unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); + unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE); + + talitos_sg_unmap(dev, edesc, areq->src, areq->dst); + + if (edesc->dma_len) + dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, + DMA_BIDIRECTIONAL); +} + +static void ablkcipher_done(struct device *dev, + struct talitos_desc *desc, void *context, + int err) +{ + struct ablkcipher_request *areq = context; + struct talitos_edesc *edesc; + + edesc = container_of(desc, struct talitos_edesc, desc); + + common_nonsnoop_unmap(dev, edesc, areq); + + kfree(edesc); + + areq->base.complete(&areq->base, err); +} + +static int common_nonsnoop(struct talitos_edesc *edesc, + struct ablkcipher_request *areq, + void (*callback) (struct device *dev, + struct talitos_desc *desc, + void *context, int error)) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); + struct device *dev = ctx->dev; + struct talitos_desc *desc = &edesc->desc; + unsigned int cryptlen = areq->nbytes; + unsigned int ivsize = crypto_ablkcipher_ivsize(cipher); + int sg_count, ret; + + /* first DWORD empty */ + desc->ptr[0].len = 0; + to_talitos_ptr(&desc->ptr[0], 0); + desc->ptr[0].j_extent = 0; + + /* cipher iv */ + to_talitos_ptr(&desc->ptr[1], edesc->iv_dma); + desc->ptr[1].len = cpu_to_be16(ivsize); + desc->ptr[1].j_extent = 0; + + /* cipher key */ + map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, + (char *)&ctx->key, 0, DMA_TO_DEVICE); + + /* + * cipher in + */ + desc->ptr[3].len = cpu_to_be16(cryptlen); + desc->ptr[3].j_extent = 0; + + sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1, + (areq->src == areq->dst) ? DMA_BIDIRECTIONAL + : DMA_TO_DEVICE, + edesc->src_chained); + + if (sg_count == 1) { + to_talitos_ptr(&desc->ptr[3], sg_dma_address(areq->src)); + } else { + sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen, + &edesc->link_tbl[0]); + if (sg_count > 1) { + to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl); + desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP; + dma_sync_single_for_device(dev, edesc->dma_link_tbl, + edesc->dma_len, + DMA_BIDIRECTIONAL); + } else { + /* Only one segment now, so no link tbl needed */ + to_talitos_ptr(&desc->ptr[3], + sg_dma_address(areq->src)); + } + } + + /* cipher out */ + desc->ptr[4].len = cpu_to_be16(cryptlen); + desc->ptr[4].j_extent = 0; + + if (areq->src != areq->dst) + sg_count = talitos_map_sg(dev, areq->dst, + edesc->dst_nents ? : 1, + DMA_FROM_DEVICE, edesc->dst_chained); + + if (sg_count == 1) { + to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->dst)); + } else { + struct talitos_ptr *link_tbl_ptr = + &edesc->link_tbl[edesc->src_nents + 1]; + + to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl + + (edesc->src_nents + 1) * + sizeof(struct talitos_ptr)); + desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; + sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, + link_tbl_ptr); + dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, + edesc->dma_len, DMA_BIDIRECTIONAL); + } + + /* iv out */ + map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, 0, + DMA_FROM_DEVICE); + + /* last DWORD empty */ + desc->ptr[6].len = 0; + to_talitos_ptr(&desc->ptr[6], 0); + desc->ptr[6].j_extent = 0; + + ret = talitos_submit(dev, ctx->ch, desc, callback, areq); + if (ret != -EINPROGRESS) { + common_nonsnoop_unmap(dev, edesc, areq); + kfree(edesc); + } + return ret; +} + +static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request * + areq, bool encrypt) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); + unsigned int ivsize = crypto_ablkcipher_ivsize(cipher); + + return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst, + areq->info, 0, areq->nbytes, 0, ivsize, 0, + areq->base.flags, encrypt); +} + +static int ablkcipher_encrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); + struct talitos_edesc *edesc; + + /* allocate extended descriptor */ + edesc = ablkcipher_edesc_alloc(areq, true); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + /* set encrypt */ + edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; + + return common_nonsnoop(edesc, areq, ablkcipher_done); +} + +static int ablkcipher_decrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); + struct talitos_edesc *edesc; + + /* allocate extended descriptor */ + edesc = ablkcipher_edesc_alloc(areq, false); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; + + return common_nonsnoop(edesc, areq, ablkcipher_done); +} + +static void common_nonsnoop_hash_unmap(struct device *dev, + struct talitos_edesc *edesc, + struct ahash_request *areq) +{ + struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); + + unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); + + /* When using hashctx-in, must unmap it. */ + if (edesc->desc.ptr[1].len) + unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], + DMA_TO_DEVICE); + + if (edesc->desc.ptr[2].len) + unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], + DMA_TO_DEVICE); + + talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL); + + if (edesc->dma_len) + dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, + DMA_BIDIRECTIONAL); + +} + +static void ahash_done(struct device *dev, + struct talitos_desc *desc, void *context, + int err) +{ + struct ahash_request *areq = context; + struct talitos_edesc *edesc = + container_of(desc, struct talitos_edesc, desc); + struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); + + if (!req_ctx->last && req_ctx->to_hash_later) { + /* Position any partial block for next update/final/finup */ + memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later); + req_ctx->nbuf = req_ctx->to_hash_later; + } + common_nonsnoop_hash_unmap(dev, edesc, areq); + + kfree(edesc); + + areq->base.complete(&areq->base, err); +} + +static int common_nonsnoop_hash(struct talitos_edesc *edesc, + struct ahash_request *areq, unsigned int length, + void (*callback) (struct device *dev, + struct talitos_desc *desc, + void *context, int error)) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); + struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); + struct device *dev = ctx->dev; + struct talitos_desc *desc = &edesc->desc; + int sg_count, ret; + + /* first DWORD empty */ + desc->ptr[0] = zero_entry; + + /* hash context in */ + if (!req_ctx->first || req_ctx->swinit) { + map_single_talitos_ptr(dev, &desc->ptr[1], + req_ctx->hw_context_size, + (char *)req_ctx->hw_context, 0, + DMA_TO_DEVICE); + req_ctx->swinit = 0; + } else { + desc->ptr[1] = zero_entry; + /* Indicate next op is not the first. */ + req_ctx->first = 0; + } + + /* HMAC key */ + if (ctx->keylen) + map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, + (char *)&ctx->key, 0, DMA_TO_DEVICE); + else + desc->ptr[2] = zero_entry; + + /* + * data in + */ + desc->ptr[3].len = cpu_to_be16(length); + desc->ptr[3].j_extent = 0; + + sg_count = talitos_map_sg(dev, req_ctx->psrc, + edesc->src_nents ? : 1, + DMA_TO_DEVICE, edesc->src_chained); + + if (sg_count == 1) { + to_talitos_ptr(&desc->ptr[3], sg_dma_address(req_ctx->psrc)); + } else { + sg_count = sg_to_link_tbl(req_ctx->psrc, sg_count, length, + &edesc->link_tbl[0]); + if (sg_count > 1) { + desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP; + to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl); + dma_sync_single_for_device(ctx->dev, + edesc->dma_link_tbl, + edesc->dma_len, + DMA_BIDIRECTIONAL); + } else { + /* Only one segment now, so no link tbl needed */ + to_talitos_ptr(&desc->ptr[3], + sg_dma_address(req_ctx->psrc)); + } + } + + /* fifth DWORD empty */ + desc->ptr[4] = zero_entry; + + /* hash/HMAC out -or- hash context out */ + if (req_ctx->last) + map_single_talitos_ptr(dev, &desc->ptr[5], + crypto_ahash_digestsize(tfm), + areq->result, 0, DMA_FROM_DEVICE); + else + map_single_talitos_ptr(dev, &desc->ptr[5], + req_ctx->hw_context_size, + req_ctx->hw_context, 0, DMA_FROM_DEVICE); + + /* last DWORD empty */ + desc->ptr[6] = zero_entry; + + ret = talitos_submit(dev, ctx->ch, desc, callback, areq); + if (ret != -EINPROGRESS) { + common_nonsnoop_hash_unmap(dev, edesc, areq); + kfree(edesc); + } + return ret; +} + +static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq, + unsigned int nbytes) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); + struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); + + return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0, + nbytes, 0, 0, 0, areq->base.flags, false); +} + +static int ahash_init(struct ahash_request *areq) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); + + /* Initialize the context */ + req_ctx->nbuf = 0; + req_ctx->first = 1; /* first indicates h/w must init its context */ + req_ctx->swinit = 0; /* assume h/w init of context */ + req_ctx->hw_context_size = + (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE) + ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 + : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512; + + return 0; +} + +/* + * on h/w without explicit sha224 support, we initialize h/w context + * manually with sha224 constants, and tell it to run sha256. + */ +static int ahash_init_sha224_swinit(struct ahash_request *areq) +{ + struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); + + ahash_init(areq); + req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/ + + req_ctx->hw_context[0] = SHA224_H0; + req_ctx->hw_context[1] = SHA224_H1; + req_ctx->hw_context[2] = SHA224_H2; + req_ctx->hw_context[3] = SHA224_H3; + req_ctx->hw_context[4] = SHA224_H4; + req_ctx->hw_context[5] = SHA224_H5; + req_ctx->hw_context[6] = SHA224_H6; + req_ctx->hw_context[7] = SHA224_H7; + + /* init 64-bit count */ + req_ctx->hw_context[8] = 0; + req_ctx->hw_context[9] = 0; + + return 0; +} + +static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); + struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); + struct talitos_edesc *edesc; + unsigned int blocksize = + crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); + unsigned int nbytes_to_hash; + unsigned int to_hash_later; + unsigned int nsg; + bool chained; + + if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) { + /* Buffer up to one whole block */ + sg_copy_to_buffer(areq->src, + sg_count(areq->src, nbytes, &chained), + req_ctx->buf + req_ctx->nbuf, nbytes); + req_ctx->nbuf += nbytes; + return 0; + } + + /* At least (blocksize + 1) bytes are available to hash */ + nbytes_to_hash = nbytes + req_ctx->nbuf; + to_hash_later = nbytes_to_hash & (blocksize - 1); + + if (req_ctx->last) + to_hash_later = 0; + else if (to_hash_later) + /* There is a partial block. Hash the full block(s) now */ + nbytes_to_hash -= to_hash_later; + else { + /* Keep one block buffered */ + nbytes_to_hash -= blocksize; + to_hash_later = blocksize; + } + + /* Chain in any previously buffered data */ + if (req_ctx->nbuf) { + nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1; + sg_init_table(req_ctx->bufsl, nsg); + sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf); + if (nsg > 1) + scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src); + req_ctx->psrc = req_ctx->bufsl; + } else + req_ctx->psrc = areq->src; + + if (to_hash_later) { + int nents = sg_count(areq->src, nbytes, &chained); + sg_pcopy_to_buffer(areq->src, nents, + req_ctx->bufnext, + to_hash_later, + nbytes - to_hash_later); + } + req_ctx->to_hash_later = to_hash_later; + + /* Allocate extended descriptor */ + edesc = ahash_edesc_alloc(areq, nbytes_to_hash); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + edesc->desc.hdr = ctx->desc_hdr_template; + + /* On last one, request SEC to pad; otherwise continue */ + if (req_ctx->last) + edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD; + else + edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT; + + /* request SEC to INIT hash. */ + if (req_ctx->first && !req_ctx->swinit) + edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT; + + /* When the tfm context has a keylen, it's an HMAC. + * A first or last (ie. not middle) descriptor must request HMAC. + */ + if (ctx->keylen && (req_ctx->first || req_ctx->last)) + edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC; + + return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, + ahash_done); +} + +static int ahash_update(struct ahash_request *areq) +{ + struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); + + req_ctx->last = 0; + + return ahash_process_req(areq, areq->nbytes); +} + +static int ahash_final(struct ahash_request *areq) +{ + struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); + + req_ctx->last = 1; + + return ahash_process_req(areq, 0); +} + +static int ahash_finup(struct ahash_request *areq) +{ + struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); + + req_ctx->last = 1; + + return ahash_process_req(areq, areq->nbytes); +} + +static int ahash_digest(struct ahash_request *areq) +{ + struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); + struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); + + ahash->init(areq); + req_ctx->last = 1; + + return ahash_process_req(areq, areq->nbytes); +} + +struct keyhash_result { + struct completion completion; + int err; +}; + +static void keyhash_complete(struct crypto_async_request *req, int err) +{ + struct keyhash_result *res = req->data; + + if (err == -EINPROGRESS) + return; + + res->err = err; + complete(&res->completion); +} + +static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen, + u8 *hash) +{ + struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); + + struct scatterlist sg[1]; + struct ahash_request *req; + struct keyhash_result hresult; + int ret; + + init_completion(&hresult.completion); + + req = ahash_request_alloc(tfm, GFP_KERNEL); + if (!req) + return -ENOMEM; + + /* Keep tfm keylen == 0 during hash of the long key */ + ctx->keylen = 0; + ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + keyhash_complete, &hresult); + + sg_init_one(&sg[0], key, keylen); + + ahash_request_set_crypt(req, sg, hash, keylen); + ret = crypto_ahash_digest(req); + switch (ret) { + case 0: + break; + case -EINPROGRESS: + case -EBUSY: + ret = wait_for_completion_interruptible( + &hresult.completion); + if (!ret) + ret = hresult.err; + break; + default: + break; + } + ahash_request_free(req); + + return ret; +} + +static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) +{ + struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); + unsigned int blocksize = + crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); + unsigned int digestsize = crypto_ahash_digestsize(tfm); + unsigned int keysize = keylen; + u8 hash[SHA512_DIGEST_SIZE]; + int ret; + + if (keylen <= blocksize) + memcpy(ctx->key, key, keysize); + else { + /* Must get the hash of the long key */ + ret = keyhash(tfm, key, keylen, hash); + + if (ret) { + crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + keysize = digestsize; + memcpy(ctx->key, hash, digestsize); + } + + ctx->keylen = keysize; + + return 0; +} + + +struct talitos_alg_template { + u32 type; + union { + struct crypto_alg crypto; + struct ahash_alg hash; + } alg; + __be32 desc_hdr_template; +}; + +static struct talitos_alg_template driver_algs[] = { + /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */ + { .type = CRYPTO_ALG_TYPE_AEAD, + .alg.crypto = { + .cra_name = "authenc(hmac(sha1),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, + .cra_aead = { + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | + DESC_HDR_SEL0_AESU | + DESC_HDR_MODE0_AESU_CBC | + DESC_HDR_SEL1_MDEUA | + DESC_HDR_MODE1_MDEU_INIT | + DESC_HDR_MODE1_MDEU_PAD | + DESC_HDR_MODE1_MDEU_SHA1_HMAC, + }, + { .type = CRYPTO_ALG_TYPE_AEAD, + .alg.crypto = { + .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, + .cra_aead = { + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | + DESC_HDR_SEL0_DEU | + DESC_HDR_MODE0_DEU_CBC | + DESC_HDR_MODE0_DEU_3DES | + DESC_HDR_SEL1_MDEUA | + DESC_HDR_MODE1_MDEU_INIT | + DESC_HDR_MODE1_MDEU_PAD | + DESC_HDR_MODE1_MDEU_SHA1_HMAC, + }, + { .type = CRYPTO_ALG_TYPE_AEAD, + .alg.crypto = { + .cra_name = "authenc(hmac(sha224),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha224-cbc-aes-talitos", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, + .cra_aead = { + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | + DESC_HDR_SEL0_AESU | + DESC_HDR_MODE0_AESU_CBC | + DESC_HDR_SEL1_MDEUA | + DESC_HDR_MODE1_MDEU_INIT | + DESC_HDR_MODE1_MDEU_PAD | + DESC_HDR_MODE1_MDEU_SHA224_HMAC, + }, + { .type = CRYPTO_ALG_TYPE_AEAD, + .alg.crypto = { + .cra_name = "authenc(hmac(sha224),cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha224-cbc-3des-talitos", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, + .cra_aead = { + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | + DESC_HDR_SEL0_DEU | + DESC_HDR_MODE0_DEU_CBC | + DESC_HDR_MODE0_DEU_3DES | + DESC_HDR_SEL1_MDEUA | + DESC_HDR_MODE1_MDEU_INIT | + DESC_HDR_MODE1_MDEU_PAD | + DESC_HDR_MODE1_MDEU_SHA224_HMAC, + }, + { .type = CRYPTO_ALG_TYPE_AEAD, + .alg.crypto = { + .cra_name = "authenc(hmac(sha256),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, + .cra_aead = { + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | + DESC_HDR_SEL0_AESU | + DESC_HDR_MODE0_AESU_CBC | + DESC_HDR_SEL1_MDEUA | + DESC_HDR_MODE1_MDEU_INIT | + DESC_HDR_MODE1_MDEU_PAD | + DESC_HDR_MODE1_MDEU_SHA256_HMAC, + }, + { .type = CRYPTO_ALG_TYPE_AEAD, + .alg.crypto = { + .cra_name = "authenc(hmac(sha256),cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, + .cra_aead = { + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | + DESC_HDR_SEL0_DEU | + DESC_HDR_MODE0_DEU_CBC | + DESC_HDR_MODE0_DEU_3DES | + DESC_HDR_SEL1_MDEUA | + DESC_HDR_MODE1_MDEU_INIT | + DESC_HDR_MODE1_MDEU_PAD | + DESC_HDR_MODE1_MDEU_SHA256_HMAC, + }, + { .type = CRYPTO_ALG_TYPE_AEAD, + .alg.crypto = { + .cra_name = "authenc(hmac(sha384),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha384-cbc-aes-talitos", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, + .cra_aead = { + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | + DESC_HDR_SEL0_AESU | + DESC_HDR_MODE0_AESU_CBC | + DESC_HDR_SEL1_MDEUB | + DESC_HDR_MODE1_MDEU_INIT | + DESC_HDR_MODE1_MDEU_PAD | + DESC_HDR_MODE1_MDEUB_SHA384_HMAC, + }, + { .type = CRYPTO_ALG_TYPE_AEAD, + .alg.crypto = { + .cra_name = "authenc(hmac(sha384),cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha384-cbc-3des-talitos", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, + .cra_aead = { + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | + DESC_HDR_SEL0_DEU | + DESC_HDR_MODE0_DEU_CBC | + DESC_HDR_MODE0_DEU_3DES | + DESC_HDR_SEL1_MDEUB | + DESC_HDR_MODE1_MDEU_INIT | + DESC_HDR_MODE1_MDEU_PAD | + DESC_HDR_MODE1_MDEUB_SHA384_HMAC, + }, + { .type = CRYPTO_ALG_TYPE_AEAD, + .alg.crypto = { + .cra_name = "authenc(hmac(sha512),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha512-cbc-aes-talitos", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, + .cra_aead = { + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | + DESC_HDR_SEL0_AESU | + DESC_HDR_MODE0_AESU_CBC | + DESC_HDR_SEL1_MDEUB | + DESC_HDR_MODE1_MDEU_INIT | + DESC_HDR_MODE1_MDEU_PAD | + DESC_HDR_MODE1_MDEUB_SHA512_HMAC, + }, + { .type = CRYPTO_ALG_TYPE_AEAD, + .alg.crypto = { + .cra_name = "authenc(hmac(sha512),cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha512-cbc-3des-talitos", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, + .cra_aead = { + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | + DESC_HDR_SEL0_DEU | + DESC_HDR_MODE0_DEU_CBC | + DESC_HDR_MODE0_DEU_3DES | + DESC_HDR_SEL1_MDEUB | + DESC_HDR_MODE1_MDEU_INIT | + DESC_HDR_MODE1_MDEU_PAD | + DESC_HDR_MODE1_MDEUB_SHA512_HMAC, + }, + { .type = CRYPTO_ALG_TYPE_AEAD, + .alg.crypto = { + .cra_name = "authenc(hmac(md5),cbc(aes))", + .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, + .cra_aead = { + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | + DESC_HDR_SEL0_AESU | + DESC_HDR_MODE0_AESU_CBC | + DESC_HDR_SEL1_MDEUA | + DESC_HDR_MODE1_MDEU_INIT | + DESC_HDR_MODE1_MDEU_PAD | + DESC_HDR_MODE1_MDEU_MD5_HMAC, + }, + { .type = CRYPTO_ALG_TYPE_AEAD, + .alg.crypto = { + .cra_name = "authenc(hmac(md5),cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, + .cra_aead = { + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | + DESC_HDR_SEL0_DEU | + DESC_HDR_MODE0_DEU_CBC | + DESC_HDR_MODE0_DEU_3DES | + DESC_HDR_SEL1_MDEUA | + DESC_HDR_MODE1_MDEU_INIT | + DESC_HDR_MODE1_MDEU_PAD | + DESC_HDR_MODE1_MDEU_MD5_HMAC, + }, + /* ABLKCIPHER algorithms. */ + { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, + .alg.crypto = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-talitos", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + DESC_HDR_SEL0_AESU | + DESC_HDR_MODE0_AESU_CBC, + }, + { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, + .alg.crypto = { + .cra_name = "cbc(des3_ede)", + .cra_driver_name = "cbc-3des-talitos", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_ablkcipher = { + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + DESC_HDR_SEL0_DEU | + DESC_HDR_MODE0_DEU_CBC | + DESC_HDR_MODE0_DEU_3DES, + }, + /* AHASH algorithms. */ + { .type = CRYPTO_ALG_TYPE_AHASH, + .alg.hash = { + .halg.digestsize = MD5_DIGEST_SIZE, + .halg.base = { + .cra_name = "md5", + .cra_driver_name = "md5-talitos", + .cra_blocksize = MD5_HMAC_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + DESC_HDR_SEL0_MDEUA | + DESC_HDR_MODE0_MDEU_MD5, + }, + { .type = CRYPTO_ALG_TYPE_AHASH, + .alg.hash = { + .halg.digestsize = SHA1_DIGEST_SIZE, + .halg.base = { + .cra_name = "sha1", + .cra_driver_name = "sha1-talitos", + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + DESC_HDR_SEL0_MDEUA | + DESC_HDR_MODE0_MDEU_SHA1, + }, + { .type = CRYPTO_ALG_TYPE_AHASH, + .alg.hash = { + .halg.digestsize = SHA224_DIGEST_SIZE, + .halg.base = { + .cra_name = "sha224", + .cra_driver_name = "sha224-talitos", + .cra_blocksize = SHA224_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + DESC_HDR_SEL0_MDEUA | + DESC_HDR_MODE0_MDEU_SHA224, + }, + { .type = CRYPTO_ALG_TYPE_AHASH, + .alg.hash = { + .halg.digestsize = SHA256_DIGEST_SIZE, + .halg.base = { + .cra_name = "sha256", + .cra_driver_name = "sha256-talitos", + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + DESC_HDR_SEL0_MDEUA | + DESC_HDR_MODE0_MDEU_SHA256, + }, + { .type = CRYPTO_ALG_TYPE_AHASH, + .alg.hash = { + .halg.digestsize = SHA384_DIGEST_SIZE, + .halg.base = { + .cra_name = "sha384", + .cra_driver_name = "sha384-talitos", + .cra_blocksize = SHA384_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + DESC_HDR_SEL0_MDEUB | + DESC_HDR_MODE0_MDEUB_SHA384, + }, + { .type = CRYPTO_ALG_TYPE_AHASH, + .alg.hash = { + .halg.digestsize = SHA512_DIGEST_SIZE, + .halg.base = { + .cra_name = "sha512", + .cra_driver_name = "sha512-talitos", + .cra_blocksize = SHA512_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + DESC_HDR_SEL0_MDEUB | + DESC_HDR_MODE0_MDEUB_SHA512, + }, + { .type = CRYPTO_ALG_TYPE_AHASH, + .alg.hash = { + .halg.digestsize = MD5_DIGEST_SIZE, + .halg.base = { + .cra_name = "hmac(md5)", + .cra_driver_name = "hmac-md5-talitos", + .cra_blocksize = MD5_HMAC_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + DESC_HDR_SEL0_MDEUA | + DESC_HDR_MODE0_MDEU_MD5, + }, + { .type = CRYPTO_ALG_TYPE_AHASH, + .alg.hash = { + .halg.digestsize = SHA1_DIGEST_SIZE, + .halg.base = { + .cra_name = "hmac(sha1)", + .cra_driver_name = "hmac-sha1-talitos", + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + DESC_HDR_SEL0_MDEUA | + DESC_HDR_MODE0_MDEU_SHA1, + }, + { .type = CRYPTO_ALG_TYPE_AHASH, + .alg.hash = { + .halg.digestsize = SHA224_DIGEST_SIZE, + .halg.base = { + .cra_name = "hmac(sha224)", + .cra_driver_name = "hmac-sha224-talitos", + .cra_blocksize = SHA224_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + DESC_HDR_SEL0_MDEUA | + DESC_HDR_MODE0_MDEU_SHA224, + }, + { .type = CRYPTO_ALG_TYPE_AHASH, + .alg.hash = { + .halg.digestsize = SHA256_DIGEST_SIZE, + .halg.base = { + .cra_name = "hmac(sha256)", + .cra_driver_name = "hmac-sha256-talitos", + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + DESC_HDR_SEL0_MDEUA | + DESC_HDR_MODE0_MDEU_SHA256, + }, + { .type = CRYPTO_ALG_TYPE_AHASH, + .alg.hash = { + .halg.digestsize = SHA384_DIGEST_SIZE, + .halg.base = { + .cra_name = "hmac(sha384)", + .cra_driver_name = "hmac-sha384-talitos", + .cra_blocksize = SHA384_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + DESC_HDR_SEL0_MDEUB | + DESC_HDR_MODE0_MDEUB_SHA384, + }, + { .type = CRYPTO_ALG_TYPE_AHASH, + .alg.hash = { + .halg.digestsize = SHA512_DIGEST_SIZE, + .halg.base = { + .cra_name = "hmac(sha512)", + .cra_driver_name = "hmac-sha512-talitos", + .cra_blocksize = SHA512_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + DESC_HDR_SEL0_MDEUB | + DESC_HDR_MODE0_MDEUB_SHA512, + } +}; + +struct talitos_crypto_alg { + struct list_head entry; + struct device *dev; + struct talitos_alg_template algt; +}; + +static int talitos_cra_init(struct crypto_tfm *tfm) +{ + struct crypto_alg *alg = tfm->__crt_alg; + struct talitos_crypto_alg *talitos_alg; + struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); + struct talitos_private *priv; + + if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH) + talitos_alg = container_of(__crypto_ahash_alg(alg), + struct talitos_crypto_alg, + algt.alg.hash); + else + talitos_alg = container_of(alg, struct talitos_crypto_alg, + algt.alg.crypto); + + /* update context with ptr to dev */ + ctx->dev = talitos_alg->dev; + + /* assign SEC channel to tfm in round-robin fashion */ + priv = dev_get_drvdata(ctx->dev); + ctx->ch = atomic_inc_return(&priv->last_chan) & + (priv->num_channels - 1); + + /* copy descriptor header template value */ + ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template; + + /* select done notification */ + ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY; + + return 0; +} + +static int talitos_cra_init_aead(struct crypto_tfm *tfm) +{ + struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); + + talitos_cra_init(tfm); + + /* random first IV */ + get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH); + + return 0; +} + +static int talitos_cra_init_ahash(struct crypto_tfm *tfm) +{ + struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); + + talitos_cra_init(tfm); + + ctx->keylen = 0; + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct talitos_ahash_req_ctx)); + + return 0; +} + +/* + * given the alg's descriptor header template, determine whether descriptor + * type and primary/secondary execution units required match the hw + * capabilities description provided in the device tree node. + */ +static int hw_supports(struct device *dev, __be32 desc_hdr_template) +{ + struct talitos_private *priv = dev_get_drvdata(dev); + int ret; + + ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) && + (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units); + + if (SECONDARY_EU(desc_hdr_template)) + ret = ret && (1 << SECONDARY_EU(desc_hdr_template) + & priv->exec_units); + + return ret; +} + +static int talitos_remove(struct platform_device *ofdev) +{ + struct device *dev = &ofdev->dev; + struct talitos_private *priv = dev_get_drvdata(dev); + struct talitos_crypto_alg *t_alg, *n; + int i; + + list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) { + switch (t_alg->algt.type) { + case CRYPTO_ALG_TYPE_ABLKCIPHER: + case CRYPTO_ALG_TYPE_AEAD: + crypto_unregister_alg(&t_alg->algt.alg.crypto); + break; + case CRYPTO_ALG_TYPE_AHASH: + crypto_unregister_ahash(&t_alg->algt.alg.hash); + break; + } + list_del(&t_alg->entry); + kfree(t_alg); + } + + if (hw_supports(dev, DESC_HDR_SEL0_RNG)) + talitos_unregister_rng(dev); + + for (i = 0; i < priv->num_channels; i++) + kfree(priv->chan[i].fifo); + + kfree(priv->chan); + + for (i = 0; i < 2; i++) + if (priv->irq[i]) { + free_irq(priv->irq[i], dev); + irq_dispose_mapping(priv->irq[i]); + } + + tasklet_kill(&priv->done_task[0]); + if (priv->irq[1]) + tasklet_kill(&priv->done_task[1]); + + iounmap(priv->reg); + + kfree(priv); + + return 0; +} + +static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, + struct talitos_alg_template + *template) +{ + struct talitos_private *priv = dev_get_drvdata(dev); + struct talitos_crypto_alg *t_alg; + struct crypto_alg *alg; + + t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL); + if (!t_alg) + return ERR_PTR(-ENOMEM); + + t_alg->algt = *template; + + switch (t_alg->algt.type) { + case CRYPTO_ALG_TYPE_ABLKCIPHER: + alg = &t_alg->algt.alg.crypto; + alg->cra_init = talitos_cra_init; + alg->cra_type = &crypto_ablkcipher_type; + alg->cra_ablkcipher.setkey = ablkcipher_setkey; + alg->cra_ablkcipher.encrypt = ablkcipher_encrypt; + alg->cra_ablkcipher.decrypt = ablkcipher_decrypt; + alg->cra_ablkcipher.geniv = "eseqiv"; + break; + case CRYPTO_ALG_TYPE_AEAD: + alg = &t_alg->algt.alg.crypto; + alg->cra_init = talitos_cra_init_aead; + alg->cra_type = &crypto_aead_type; + alg->cra_aead.setkey = aead_setkey; + alg->cra_aead.setauthsize = aead_setauthsize; + alg->cra_aead.encrypt = aead_encrypt; + alg->cra_aead.decrypt = aead_decrypt; + alg->cra_aead.givencrypt = aead_givencrypt; + alg->cra_aead.geniv = "<built-in>"; + break; + case CRYPTO_ALG_TYPE_AHASH: + alg = &t_alg->algt.alg.hash.halg.base; + alg->cra_init = talitos_cra_init_ahash; + alg->cra_type = &crypto_ahash_type; + t_alg->algt.alg.hash.init = ahash_init; + t_alg->algt.alg.hash.update = ahash_update; + t_alg->algt.alg.hash.final = ahash_final; + t_alg->algt.alg.hash.finup = ahash_finup; + t_alg->algt.alg.hash.digest = ahash_digest; + t_alg->algt.alg.hash.setkey = ahash_setkey; + + if (!(priv->features & TALITOS_FTR_HMAC_OK) && + !strncmp(alg->cra_name, "hmac", 4)) { + kfree(t_alg); + return ERR_PTR(-ENOTSUPP); + } + if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) && + (!strcmp(alg->cra_name, "sha224") || + !strcmp(alg->cra_name, "hmac(sha224)"))) { + t_alg->algt.alg.hash.init = ahash_init_sha224_swinit; + t_alg->algt.desc_hdr_template = + DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + DESC_HDR_SEL0_MDEUA | + DESC_HDR_MODE0_MDEU_SHA256; + } + break; + default: + dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type); + kfree(t_alg); + return ERR_PTR(-EINVAL); + } + + alg->cra_module = THIS_MODULE; + alg->cra_priority = TALITOS_CRA_PRIORITY; + alg->cra_alignmask = 0; + alg->cra_ctxsize = sizeof(struct talitos_ctx); + alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY; + + t_alg->dev = dev; + + return t_alg; +} + +static int talitos_probe_irq(struct platform_device *ofdev) +{ + struct device *dev = &ofdev->dev; + struct device_node *np = ofdev->dev.of_node; + struct talitos_private *priv = dev_get_drvdata(dev); + int err; + + priv->irq[0] = irq_of_parse_and_map(np, 0); + if (!priv->irq[0]) { + dev_err(dev, "failed to map irq\n"); + return -EINVAL; + } + + priv->irq[1] = irq_of_parse_and_map(np, 1); + + /* get the primary irq line */ + if (!priv->irq[1]) { + err = request_irq(priv->irq[0], talitos_interrupt_4ch, 0, + dev_driver_string(dev), dev); + goto primary_out; + } + + err = request_irq(priv->irq[0], talitos_interrupt_ch0_2, 0, + dev_driver_string(dev), dev); + if (err) + goto primary_out; + + /* get the secondary irq line */ + err = request_irq(priv->irq[1], talitos_interrupt_ch1_3, 0, + dev_driver_string(dev), dev); + if (err) { + dev_err(dev, "failed to request secondary irq\n"); + irq_dispose_mapping(priv->irq[1]); + priv->irq[1] = 0; + } + + return err; + +primary_out: + if (err) { + dev_err(dev, "failed to request primary irq\n"); + irq_dispose_mapping(priv->irq[0]); + priv->irq[0] = 0; + } + + return err; +} + +static int talitos_probe(struct platform_device *ofdev) +{ + struct device *dev = &ofdev->dev; + struct device_node *np = ofdev->dev.of_node; + struct talitos_private *priv; + const unsigned int *prop; + int i, err; + + priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + INIT_LIST_HEAD(&priv->alg_list); + + dev_set_drvdata(dev, priv); + + priv->ofdev = ofdev; + + spin_lock_init(&priv->reg_lock); + + err = talitos_probe_irq(ofdev); + if (err) + goto err_out; + + if (!priv->irq[1]) { + tasklet_init(&priv->done_task[0], talitos_done_4ch, + (unsigned long)dev); + } else { + tasklet_init(&priv->done_task[0], talitos_done_ch0_2, + (unsigned long)dev); + tasklet_init(&priv->done_task[1], talitos_done_ch1_3, + (unsigned long)dev); + } + + priv->reg = of_iomap(np, 0); + if (!priv->reg) { + dev_err(dev, "failed to of_iomap\n"); + err = -ENOMEM; + goto err_out; + } + + /* get SEC version capabilities from device tree */ + prop = of_get_property(np, "fsl,num-channels", NULL); + if (prop) + priv->num_channels = *prop; + + prop = of_get_property(np, "fsl,channel-fifo-len", NULL); + if (prop) + priv->chfifo_len = *prop; + + prop = of_get_property(np, "fsl,exec-units-mask", NULL); + if (prop) + priv->exec_units = *prop; + + prop = of_get_property(np, "fsl,descriptor-types-mask", NULL); + if (prop) + priv->desc_types = *prop; + + if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len || + !priv->exec_units || !priv->desc_types) { + dev_err(dev, "invalid property data in device tree node\n"); + err = -EINVAL; + goto err_out; + } + + if (of_device_is_compatible(np, "fsl,sec3.0")) + priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT; + + if (of_device_is_compatible(np, "fsl,sec2.1")) + priv->features |= TALITOS_FTR_HW_AUTH_CHECK | + TALITOS_FTR_SHA224_HWINIT | + TALITOS_FTR_HMAC_OK; + + priv->chan = kzalloc(sizeof(struct talitos_channel) * + priv->num_channels, GFP_KERNEL); + if (!priv->chan) { + dev_err(dev, "failed to allocate channel management space\n"); + err = -ENOMEM; + goto err_out; + } + + priv->fifo_len = roundup_pow_of_two(priv->chfifo_len); + + for (i = 0; i < priv->num_channels; i++) { + priv->chan[i].reg = priv->reg + TALITOS_CH_STRIDE * (i + 1); + if (!priv->irq[1] || !(i & 1)) + priv->chan[i].reg += TALITOS_CH_BASE_OFFSET; + + spin_lock_init(&priv->chan[i].head_lock); + spin_lock_init(&priv->chan[i].tail_lock); + + priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) * + priv->fifo_len, GFP_KERNEL); + if (!priv->chan[i].fifo) { + dev_err(dev, "failed to allocate request fifo %d\n", i); + err = -ENOMEM; + goto err_out; + } + + atomic_set(&priv->chan[i].submit_count, + -(priv->chfifo_len - 1)); + } + + dma_set_mask(dev, DMA_BIT_MASK(36)); + + /* reset and initialize the h/w */ + err = init_device(dev); + if (err) { + dev_err(dev, "failed to initialize device\n"); + goto err_out; + } + + /* register the RNG, if available */ + if (hw_supports(dev, DESC_HDR_SEL0_RNG)) { + err = talitos_register_rng(dev); + if (err) { + dev_err(dev, "failed to register hwrng: %d\n", err); + goto err_out; + } else + dev_info(dev, "hwrng\n"); + } + + /* register crypto algorithms the device supports */ + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { + if (hw_supports(dev, driver_algs[i].desc_hdr_template)) { + struct talitos_crypto_alg *t_alg; + char *name = NULL; + + t_alg = talitos_alg_alloc(dev, &driver_algs[i]); + if (IS_ERR(t_alg)) { + err = PTR_ERR(t_alg); + if (err == -ENOTSUPP) + continue; + goto err_out; + } + + switch (t_alg->algt.type) { + case CRYPTO_ALG_TYPE_ABLKCIPHER: + case CRYPTO_ALG_TYPE_AEAD: + err = crypto_register_alg( + &t_alg->algt.alg.crypto); + name = t_alg->algt.alg.crypto.cra_driver_name; + break; + case CRYPTO_ALG_TYPE_AHASH: + err = crypto_register_ahash( + &t_alg->algt.alg.hash); + name = + t_alg->algt.alg.hash.halg.base.cra_driver_name; + break; + } + if (err) { + dev_err(dev, "%s alg registration failed\n", + name); + kfree(t_alg); + } else + list_add_tail(&t_alg->entry, &priv->alg_list); + } + } + if (!list_empty(&priv->alg_list)) + dev_info(dev, "%s algorithms registered in /proc/crypto\n", + (char *)of_get_property(np, "compatible", NULL)); + + return 0; + +err_out: + talitos_remove(ofdev); + + return err; +} + +static const struct of_device_id talitos_match[] = { + { + .compatible = "fsl,sec2.0", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, talitos_match); + +static struct platform_driver talitos_driver = { + .driver = { + .name = "talitos", + .of_match_table = talitos_match, + }, + .probe = talitos_probe, + .remove = talitos_remove, +}; + +module_platform_driver(talitos_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>"); +MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver"); diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h new file mode 100644 index 000000000..61a14054a --- /dev/null +++ b/drivers/crypto/talitos.h @@ -0,0 +1,351 @@ +/* + * Freescale SEC (talitos) device register and descriptor header defines + * + * Copyright (c) 2006-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#define TALITOS_TIMEOUT 100000 +#define TALITOS_MAX_DATA_LEN 65535 + +#define DESC_TYPE(desc_hdr) ((be32_to_cpu(desc_hdr) >> 3) & 0x1f) +#define PRIMARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 28) & 0xf) +#define SECONDARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 16) & 0xf) + +/* descriptor pointer entry */ +struct talitos_ptr { + __be16 len; /* length */ + u8 j_extent; /* jump to sg link table and/or extent */ + u8 eptr; /* extended address */ + __be32 ptr; /* address */ +}; + +static const struct talitos_ptr zero_entry = { + .len = 0, + .j_extent = 0, + .eptr = 0, + .ptr = 0 +}; + +/* descriptor */ +struct talitos_desc { + __be32 hdr; /* header high bits */ + __be32 hdr_lo; /* header low bits */ + struct talitos_ptr ptr[7]; /* ptr/len pair array */ +}; + +/** + * talitos_request - descriptor submission request + * @desc: descriptor pointer (kernel virtual) + * @dma_desc: descriptor's physical bus address + * @callback: whom to call when descriptor processing is done + * @context: caller context (optional) + */ +struct talitos_request { + struct talitos_desc *desc; + dma_addr_t dma_desc; + void (*callback) (struct device *dev, struct talitos_desc *desc, + void *context, int error); + void *context; +}; + +/* per-channel fifo management */ +struct talitos_channel { + void __iomem *reg; + + /* request fifo */ + struct talitos_request *fifo; + + /* number of requests pending in channel h/w fifo */ + atomic_t submit_count ____cacheline_aligned; + + /* request submission (head) lock */ + spinlock_t head_lock ____cacheline_aligned; + /* index to next free descriptor request */ + int head; + + /* request release (tail) lock */ + spinlock_t tail_lock ____cacheline_aligned; + /* index to next in-progress/done descriptor request */ + int tail; +}; + +struct talitos_private { + struct device *dev; + struct platform_device *ofdev; + void __iomem *reg; + int irq[2]; + + /* SEC global registers lock */ + spinlock_t reg_lock ____cacheline_aligned; + + /* SEC version geometry (from device tree node) */ + unsigned int num_channels; + unsigned int chfifo_len; + unsigned int exec_units; + unsigned int desc_types; + + /* SEC Compatibility info */ + unsigned long features; + + /* + * length of the request fifo + * fifo_len is chfifo_len rounded up to next power of 2 + * so we can use bitwise ops to wrap + */ + unsigned int fifo_len; + + struct talitos_channel *chan; + + /* next channel to be assigned next incoming descriptor */ + atomic_t last_chan ____cacheline_aligned; + + /* request callback tasklet */ + struct tasklet_struct done_task[2]; + + /* list of registered algorithms */ + struct list_head alg_list; + + /* hwrng device */ + struct hwrng rng; +}; + +extern int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, + void (*callback)(struct device *dev, + struct talitos_desc *desc, + void *context, int error), + void *context); + +/* .features flag */ +#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001 +#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002 +#define TALITOS_FTR_SHA224_HWINIT 0x00000004 +#define TALITOS_FTR_HMAC_OK 0x00000008 + +/* + * TALITOS_xxx_LO addresses point to the low data bits (32-63) of the register + */ + +/* global register offset addresses */ +#define TALITOS_MCR 0x1030 /* master control register */ +#define TALITOS_MCR_RCA0 (1 << 15) /* remap channel 0 */ +#define TALITOS_MCR_RCA1 (1 << 14) /* remap channel 1 */ +#define TALITOS_MCR_RCA2 (1 << 13) /* remap channel 2 */ +#define TALITOS_MCR_RCA3 (1 << 12) /* remap channel 3 */ +#define TALITOS_MCR_SWR 0x1 /* s/w reset */ +#define TALITOS_MCR_LO 0x1034 +#define TALITOS_IMR 0x1008 /* interrupt mask register */ +#define TALITOS_IMR_INIT 0x100ff /* enable channel IRQs */ +#define TALITOS_IMR_DONE 0x00055 /* done IRQs */ +#define TALITOS_IMR_LO 0x100C +#define TALITOS_IMR_LO_INIT 0x20000 /* allow RNGU error IRQs */ +#define TALITOS_ISR 0x1010 /* interrupt status register */ +#define TALITOS_ISR_4CHERR 0xaa /* 4 channel errors mask */ +#define TALITOS_ISR_4CHDONE 0x55 /* 4 channel done mask */ +#define TALITOS_ISR_CH_0_2_ERR 0x22 /* channels 0, 2 errors mask */ +#define TALITOS_ISR_CH_0_2_DONE 0x11 /* channels 0, 2 done mask */ +#define TALITOS_ISR_CH_1_3_ERR 0x88 /* channels 1, 3 errors mask */ +#define TALITOS_ISR_CH_1_3_DONE 0x44 /* channels 1, 3 done mask */ +#define TALITOS_ISR_LO 0x1014 +#define TALITOS_ICR 0x1018 /* interrupt clear register */ +#define TALITOS_ICR_LO 0x101C + +/* channel register address stride */ +#define TALITOS_CH_BASE_OFFSET 0x1000 /* default channel map base */ +#define TALITOS_CH_STRIDE 0x100 + +/* channel configuration register */ +#define TALITOS_CCCR 0x8 +#define TALITOS_CCCR_CONT 0x2 /* channel continue */ +#define TALITOS_CCCR_RESET 0x1 /* channel reset */ +#define TALITOS_CCCR_LO 0xc +#define TALITOS_CCCR_LO_IWSE 0x80 /* chan. ICCR writeback enab. */ +#define TALITOS_CCCR_LO_EAE 0x20 /* extended address enable */ +#define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */ +#define TALITOS_CCCR_LO_NT 0x4 /* notification type */ +#define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */ + +/* CCPSR: channel pointer status register */ +#define TALITOS_CCPSR 0x10 +#define TALITOS_CCPSR_LO 0x14 +#define TALITOS_CCPSR_LO_DOF 0x8000 /* double FF write oflow error */ +#define TALITOS_CCPSR_LO_SOF 0x4000 /* single FF write oflow error */ +#define TALITOS_CCPSR_LO_MDTE 0x2000 /* master data transfer error */ +#define TALITOS_CCPSR_LO_SGDLZ 0x1000 /* s/g data len zero error */ +#define TALITOS_CCPSR_LO_FPZ 0x0800 /* fetch ptr zero error */ +#define TALITOS_CCPSR_LO_IDH 0x0400 /* illegal desc hdr error */ +#define TALITOS_CCPSR_LO_IEU 0x0200 /* invalid EU error */ +#define TALITOS_CCPSR_LO_EU 0x0100 /* EU error detected */ +#define TALITOS_CCPSR_LO_GB 0x0080 /* gather boundary error */ +#define TALITOS_CCPSR_LO_GRL 0x0040 /* gather return/length error */ +#define TALITOS_CCPSR_LO_SB 0x0020 /* scatter boundary error */ +#define TALITOS_CCPSR_LO_SRL 0x0010 /* scatter return/length error */ + +/* channel fetch fifo register */ +#define TALITOS_FF 0x48 +#define TALITOS_FF_LO 0x4c + +/* current descriptor pointer register */ +#define TALITOS_CDPR 0x40 +#define TALITOS_CDPR_LO 0x44 + +/* descriptor buffer register */ +#define TALITOS_DESCBUF 0x80 +#define TALITOS_DESCBUF_LO 0x84 + +/* gather link table */ +#define TALITOS_GATHER 0xc0 +#define TALITOS_GATHER_LO 0xc4 + +/* scatter link table */ +#define TALITOS_SCATTER 0xe0 +#define TALITOS_SCATTER_LO 0xe4 + +/* execution unit interrupt status registers */ +#define TALITOS_DEUISR 0x2030 /* DES unit */ +#define TALITOS_DEUISR_LO 0x2034 +#define TALITOS_AESUISR 0x4030 /* AES unit */ +#define TALITOS_AESUISR_LO 0x4034 +#define TALITOS_MDEUISR 0x6030 /* message digest unit */ +#define TALITOS_MDEUISR_LO 0x6034 +#define TALITOS_MDEUICR 0x6038 /* interrupt control */ +#define TALITOS_MDEUICR_LO 0x603c +#define TALITOS_MDEUICR_LO_ICE 0x4000 /* integrity check IRQ enable */ +#define TALITOS_AFEUISR 0x8030 /* arc4 unit */ +#define TALITOS_AFEUISR_LO 0x8034 +#define TALITOS_RNGUISR 0xa030 /* random number unit */ +#define TALITOS_RNGUISR_LO 0xa034 +#define TALITOS_RNGUSR 0xa028 /* rng status */ +#define TALITOS_RNGUSR_LO 0xa02c +#define TALITOS_RNGUSR_LO_RD 0x1 /* reset done */ +#define TALITOS_RNGUSR_LO_OFL 0xff0000/* output FIFO length */ +#define TALITOS_RNGUDSR 0xa010 /* data size */ +#define TALITOS_RNGUDSR_LO 0xa014 +#define TALITOS_RNGU_FIFO 0xa800 /* output FIFO */ +#define TALITOS_RNGU_FIFO_LO 0xa804 /* output FIFO */ +#define TALITOS_RNGURCR 0xa018 /* reset control */ +#define TALITOS_RNGURCR_LO 0xa01c +#define TALITOS_RNGURCR_LO_SR 0x1 /* software reset */ +#define TALITOS_PKEUISR 0xc030 /* public key unit */ +#define TALITOS_PKEUISR_LO 0xc034 +#define TALITOS_KEUISR 0xe030 /* kasumi unit */ +#define TALITOS_KEUISR_LO 0xe034 +#define TALITOS_CRCUISR 0xf030 /* cyclic redundancy check unit*/ +#define TALITOS_CRCUISR_LO 0xf034 + +#define TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 0x28 +#define TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512 0x48 + +/* + * talitos descriptor header (hdr) bits + */ + +/* written back when done */ +#define DESC_HDR_DONE cpu_to_be32(0xff000000) +#define DESC_HDR_LO_ICCR1_MASK cpu_to_be32(0x00180000) +#define DESC_HDR_LO_ICCR1_PASS cpu_to_be32(0x00080000) +#define DESC_HDR_LO_ICCR1_FAIL cpu_to_be32(0x00100000) + +/* primary execution unit select */ +#define DESC_HDR_SEL0_MASK cpu_to_be32(0xf0000000) +#define DESC_HDR_SEL0_AFEU cpu_to_be32(0x10000000) +#define DESC_HDR_SEL0_DEU cpu_to_be32(0x20000000) +#define DESC_HDR_SEL0_MDEUA cpu_to_be32(0x30000000) +#define DESC_HDR_SEL0_MDEUB cpu_to_be32(0xb0000000) +#define DESC_HDR_SEL0_RNG cpu_to_be32(0x40000000) +#define DESC_HDR_SEL0_PKEU cpu_to_be32(0x50000000) +#define DESC_HDR_SEL0_AESU cpu_to_be32(0x60000000) +#define DESC_HDR_SEL0_KEU cpu_to_be32(0x70000000) +#define DESC_HDR_SEL0_CRCU cpu_to_be32(0x80000000) + +/* primary execution unit mode (MODE0) and derivatives */ +#define DESC_HDR_MODE0_ENCRYPT cpu_to_be32(0x00100000) +#define DESC_HDR_MODE0_AESU_CBC cpu_to_be32(0x00200000) +#define DESC_HDR_MODE0_DEU_CBC cpu_to_be32(0x00400000) +#define DESC_HDR_MODE0_DEU_3DES cpu_to_be32(0x00200000) +#define DESC_HDR_MODE0_MDEU_CONT cpu_to_be32(0x08000000) +#define DESC_HDR_MODE0_MDEU_INIT cpu_to_be32(0x01000000) +#define DESC_HDR_MODE0_MDEU_HMAC cpu_to_be32(0x00800000) +#define DESC_HDR_MODE0_MDEU_PAD cpu_to_be32(0x00400000) +#define DESC_HDR_MODE0_MDEU_SHA224 cpu_to_be32(0x00300000) +#define DESC_HDR_MODE0_MDEU_MD5 cpu_to_be32(0x00200000) +#define DESC_HDR_MODE0_MDEU_SHA256 cpu_to_be32(0x00100000) +#define DESC_HDR_MODE0_MDEU_SHA1 cpu_to_be32(0x00000000) +#define DESC_HDR_MODE0_MDEUB_SHA384 cpu_to_be32(0x00000000) +#define DESC_HDR_MODE0_MDEUB_SHA512 cpu_to_be32(0x00200000) +#define DESC_HDR_MODE0_MDEU_MD5_HMAC (DESC_HDR_MODE0_MDEU_MD5 | \ + DESC_HDR_MODE0_MDEU_HMAC) +#define DESC_HDR_MODE0_MDEU_SHA256_HMAC (DESC_HDR_MODE0_MDEU_SHA256 | \ + DESC_HDR_MODE0_MDEU_HMAC) +#define DESC_HDR_MODE0_MDEU_SHA1_HMAC (DESC_HDR_MODE0_MDEU_SHA1 | \ + DESC_HDR_MODE0_MDEU_HMAC) + +/* secondary execution unit select (SEL1) */ +#define DESC_HDR_SEL1_MASK cpu_to_be32(0x000f0000) +#define DESC_HDR_SEL1_MDEUA cpu_to_be32(0x00030000) +#define DESC_HDR_SEL1_MDEUB cpu_to_be32(0x000b0000) +#define DESC_HDR_SEL1_CRCU cpu_to_be32(0x00080000) + +/* secondary execution unit mode (MODE1) and derivatives */ +#define DESC_HDR_MODE1_MDEU_CICV cpu_to_be32(0x00004000) +#define DESC_HDR_MODE1_MDEU_INIT cpu_to_be32(0x00001000) +#define DESC_HDR_MODE1_MDEU_HMAC cpu_to_be32(0x00000800) +#define DESC_HDR_MODE1_MDEU_PAD cpu_to_be32(0x00000400) +#define DESC_HDR_MODE1_MDEU_SHA224 cpu_to_be32(0x00000300) +#define DESC_HDR_MODE1_MDEU_MD5 cpu_to_be32(0x00000200) +#define DESC_HDR_MODE1_MDEU_SHA256 cpu_to_be32(0x00000100) +#define DESC_HDR_MODE1_MDEU_SHA1 cpu_to_be32(0x00000000) +#define DESC_HDR_MODE1_MDEUB_SHA384 cpu_to_be32(0x00000000) +#define DESC_HDR_MODE1_MDEUB_SHA512 cpu_to_be32(0x00000200) +#define DESC_HDR_MODE1_MDEU_MD5_HMAC (DESC_HDR_MODE1_MDEU_MD5 | \ + DESC_HDR_MODE1_MDEU_HMAC) +#define DESC_HDR_MODE1_MDEU_SHA256_HMAC (DESC_HDR_MODE1_MDEU_SHA256 | \ + DESC_HDR_MODE1_MDEU_HMAC) +#define DESC_HDR_MODE1_MDEU_SHA1_HMAC (DESC_HDR_MODE1_MDEU_SHA1 | \ + DESC_HDR_MODE1_MDEU_HMAC) +#define DESC_HDR_MODE1_MDEU_SHA224_HMAC (DESC_HDR_MODE1_MDEU_SHA224 | \ + DESC_HDR_MODE1_MDEU_HMAC) +#define DESC_HDR_MODE1_MDEUB_SHA384_HMAC (DESC_HDR_MODE1_MDEUB_SHA384 | \ + DESC_HDR_MODE1_MDEU_HMAC) +#define DESC_HDR_MODE1_MDEUB_SHA512_HMAC (DESC_HDR_MODE1_MDEUB_SHA512 | \ + DESC_HDR_MODE1_MDEU_HMAC) + +/* direction of overall data flow (DIR) */ +#define DESC_HDR_DIR_INBOUND cpu_to_be32(0x00000002) + +/* request done notification (DN) */ +#define DESC_HDR_DONE_NOTIFY cpu_to_be32(0x00000001) + +/* descriptor types */ +#define DESC_HDR_TYPE_AESU_CTR_NONSNOOP cpu_to_be32(0 << 3) +#define DESC_HDR_TYPE_IPSEC_ESP cpu_to_be32(1 << 3) +#define DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU cpu_to_be32(2 << 3) +#define DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU cpu_to_be32(4 << 3) + +/* link table extent field bits */ +#define DESC_PTR_LNKTBL_JUMP 0x80 +#define DESC_PTR_LNKTBL_RETURN 0x02 +#define DESC_PTR_LNKTBL_NEXT 0x01 diff --git a/drivers/crypto/ux500/Kconfig b/drivers/crypto/ux500/Kconfig new file mode 100644 index 000000000..b35e5c4b0 --- /dev/null +++ b/drivers/crypto/ux500/Kconfig @@ -0,0 +1,30 @@ +# +# Copyright (C) ST-Ericsson SA 2010 +# Author: Shujuan Chen (shujuan.chen@stericsson.com) +# License terms: GNU General Public License (GPL) version 2 +# + +config CRYPTO_DEV_UX500_CRYP + tristate "UX500 crypto driver for CRYP block" + depends on CRYPTO_DEV_UX500 + select CRYPTO_DES + help + This selects the crypto driver for the UX500_CRYP hardware. It supports + AES-ECB, CBC and CTR with keys sizes of 128, 192 and 256 bit sizes. + +config CRYPTO_DEV_UX500_HASH + tristate "UX500 crypto driver for HASH block" + depends on CRYPTO_DEV_UX500 + select CRYPTO_HASH + select CRYPTO_HMAC + help + This selects the hash driver for the UX500_HASH hardware. + Depends on UX500/STM DMA if running in DMA mode. + +config CRYPTO_DEV_UX500_DEBUG + bool "Activate ux500 platform debug-mode for crypto and hash block" + depends on CRYPTO_DEV_UX500_CRYP || CRYPTO_DEV_UX500_HASH + default n + help + Say Y if you want to add debug prints to ux500_hash and + ux500_cryp devices. diff --git a/drivers/crypto/ux500/Makefile b/drivers/crypto/ux500/Makefile new file mode 100644 index 000000000..b9a365bad --- /dev/null +++ b/drivers/crypto/ux500/Makefile @@ -0,0 +1,8 @@ +# +# Copyright (C) ST-Ericsson SA 2010 +# Author: Shujuan Chen (shujuan.chen@stericsson.com) +# License terms: GNU General Public License (GPL) version 2 +# + +obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += hash/ +obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += cryp/ diff --git a/drivers/crypto/ux500/cryp/Makefile b/drivers/crypto/ux500/cryp/Makefile new file mode 100644 index 000000000..e5d362a6f --- /dev/null +++ b/drivers/crypto/ux500/cryp/Makefile @@ -0,0 +1,13 @@ +#/* +# * Copyright (C) ST-Ericsson SA 2010 +# * Author: shujuan.chen@stericsson.com for ST-Ericsson. +# * License terms: GNU General Public License (GPL) version 2 */ + +ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG +CFLAGS_cryp_core.o := -DDEBUG -O0 +CFLAGS_cryp.o := -DDEBUG -O0 +CFLAGS_cryp_irq.o := -DDEBUG -O0 +endif + +obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += ux500_cryp.o +ux500_cryp-objs := cryp.o cryp_irq.o cryp_core.o diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c new file mode 100644 index 000000000..43a0c8a26 --- /dev/null +++ b/drivers/crypto/ux500/cryp/cryp.c @@ -0,0 +1,387 @@ +/** + * Copyright (C) ST-Ericsson SA 2010 + * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson. + * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson. + * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson. + * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson. + * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson. + * License terms: GNU General Public License (GPL) version 2 + */ + +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/types.h> + +#include "cryp_p.h" +#include "cryp.h" + +/** + * cryp_wait_until_done - wait until the device logic is not busy + */ +void cryp_wait_until_done(struct cryp_device_data *device_data) +{ + while (cryp_is_logic_busy(device_data)) + cpu_relax(); +} + +/** + * cryp_check - This routine checks Peripheral and PCell Id + * @device_data: Pointer to the device data struct for base address. + */ +int cryp_check(struct cryp_device_data *device_data) +{ + int peripheralid2 = 0; + + if (NULL == device_data) + return -EINVAL; + + peripheralid2 = readl_relaxed(&device_data->base->periphId2); + + if (peripheralid2 != CRYP_PERIPHERAL_ID2_DB8500) + return -EPERM; + + /* Check Peripheral and Pcell Id Register for CRYP */ + if ((CRYP_PERIPHERAL_ID0 == + readl_relaxed(&device_data->base->periphId0)) + && (CRYP_PERIPHERAL_ID1 == + readl_relaxed(&device_data->base->periphId1)) + && (CRYP_PERIPHERAL_ID3 == + readl_relaxed(&device_data->base->periphId3)) + && (CRYP_PCELL_ID0 == + readl_relaxed(&device_data->base->pcellId0)) + && (CRYP_PCELL_ID1 == + readl_relaxed(&device_data->base->pcellId1)) + && (CRYP_PCELL_ID2 == + readl_relaxed(&device_data->base->pcellId2)) + && (CRYP_PCELL_ID3 == + readl_relaxed(&device_data->base->pcellId3))) { + return 0; + } + + return -EPERM; +} + +/** + * cryp_activity - This routine enables/disable the cryptography function. + * @device_data: Pointer to the device data struct for base address. + * @cryp_crypen: Enable/Disable functionality + */ +void cryp_activity(struct cryp_device_data *device_data, + enum cryp_crypen cryp_crypen) +{ + CRYP_PUT_BITS(&device_data->base->cr, + cryp_crypen, + CRYP_CR_CRYPEN_POS, + CRYP_CR_CRYPEN_MASK); +} + +/** + * cryp_flush_inoutfifo - Resets both the input and the output FIFOs + * @device_data: Pointer to the device data struct for base address. + */ +void cryp_flush_inoutfifo(struct cryp_device_data *device_data) +{ + /* + * We always need to disble the hardware before trying to flush the + * FIFO. This is something that isn't written in the design + * specification, but we have been informed by the hardware designers + * that this must be done. + */ + cryp_activity(device_data, CRYP_CRYPEN_DISABLE); + cryp_wait_until_done(device_data); + + CRYP_SET_BITS(&device_data->base->cr, CRYP_CR_FFLUSH_MASK); + /* + * CRYP_SR_INFIFO_READY_MASK is the expected value on the status + * register when starting a new calculation, which means Input FIFO is + * not full and input FIFO is empty. + */ + while (readl_relaxed(&device_data->base->sr) != + CRYP_SR_INFIFO_READY_MASK) + cpu_relax(); +} + +/** + * cryp_set_configuration - This routine set the cr CRYP IP + * @device_data: Pointer to the device data struct for base address. + * @cryp_config: Pointer to the configuration parameter + * @control_register: The control register to be written later on. + */ +int cryp_set_configuration(struct cryp_device_data *device_data, + struct cryp_config *cryp_config, + u32 *control_register) +{ + u32 cr_for_kse; + + if (NULL == device_data || NULL == cryp_config) + return -EINVAL; + + *control_register |= (cryp_config->keysize << CRYP_CR_KEYSIZE_POS); + + /* Prepare key for decryption in AES_ECB and AES_CBC mode. */ + if ((CRYP_ALGORITHM_DECRYPT == cryp_config->algodir) && + ((CRYP_ALGO_AES_ECB == cryp_config->algomode) || + (CRYP_ALGO_AES_CBC == cryp_config->algomode))) { + cr_for_kse = *control_register; + /* + * This seems a bit odd, but it is indeed needed to set this to + * encrypt even though it is a decryption that we are doing. It + * also mentioned in the design spec that you need to do this. + * After the keyprepartion for decrypting is done you should set + * algodir back to decryption, which is done outside this if + * statement. + * + * According to design specification we should set mode ECB + * during key preparation even though we might be running CBC + * when enter this function. + * + * Writing to KSE_ENABLED will drop CRYPEN when key preparation + * is done. Therefore we need to set CRYPEN again outside this + * if statement when running decryption. + */ + cr_for_kse |= ((CRYP_ALGORITHM_ENCRYPT << CRYP_CR_ALGODIR_POS) | + (CRYP_ALGO_AES_ECB << CRYP_CR_ALGOMODE_POS) | + (CRYP_CRYPEN_ENABLE << CRYP_CR_CRYPEN_POS) | + (KSE_ENABLED << CRYP_CR_KSE_POS)); + + writel_relaxed(cr_for_kse, &device_data->base->cr); + cryp_wait_until_done(device_data); + } + + *control_register |= + ((cryp_config->algomode << CRYP_CR_ALGOMODE_POS) | + (cryp_config->algodir << CRYP_CR_ALGODIR_POS)); + + return 0; +} + +/** + * cryp_configure_protection - set the protection bits in the CRYP logic. + * @device_data: Pointer to the device data struct for base address. + * @p_protect_config: Pointer to the protection mode and + * secure mode configuration + */ +int cryp_configure_protection(struct cryp_device_data *device_data, + struct cryp_protection_config *p_protect_config) +{ + if (NULL == p_protect_config) + return -EINVAL; + + CRYP_WRITE_BIT(&device_data->base->cr, + (u32) p_protect_config->secure_access, + CRYP_CR_SECURE_MASK); + CRYP_PUT_BITS(&device_data->base->cr, + p_protect_config->privilege_access, + CRYP_CR_PRLG_POS, + CRYP_CR_PRLG_MASK); + + return 0; +} + +/** + * cryp_is_logic_busy - returns the busy status of the CRYP logic + * @device_data: Pointer to the device data struct for base address. + */ +int cryp_is_logic_busy(struct cryp_device_data *device_data) +{ + return CRYP_TEST_BITS(&device_data->base->sr, + CRYP_SR_BUSY_MASK); +} + +/** + * cryp_configure_for_dma - configures the CRYP IP for DMA operation + * @device_data: Pointer to the device data struct for base address. + * @dma_req: Specifies the DMA request type value. + */ +void cryp_configure_for_dma(struct cryp_device_data *device_data, + enum cryp_dma_req_type dma_req) +{ + CRYP_SET_BITS(&device_data->base->dmacr, + (u32) dma_req); +} + +/** + * cryp_configure_key_values - configures the key values for CRYP operations + * @device_data: Pointer to the device data struct for base address. + * @key_reg_index: Key value index register + * @key_value: The key value struct + */ +int cryp_configure_key_values(struct cryp_device_data *device_data, + enum cryp_key_reg_index key_reg_index, + struct cryp_key_value key_value) +{ + while (cryp_is_logic_busy(device_data)) + cpu_relax(); + + switch (key_reg_index) { + case CRYP_KEY_REG_1: + writel_relaxed(key_value.key_value_left, + &device_data->base->key_1_l); + writel_relaxed(key_value.key_value_right, + &device_data->base->key_1_r); + break; + case CRYP_KEY_REG_2: + writel_relaxed(key_value.key_value_left, + &device_data->base->key_2_l); + writel_relaxed(key_value.key_value_right, + &device_data->base->key_2_r); + break; + case CRYP_KEY_REG_3: + writel_relaxed(key_value.key_value_left, + &device_data->base->key_3_l); + writel_relaxed(key_value.key_value_right, + &device_data->base->key_3_r); + break; + case CRYP_KEY_REG_4: + writel_relaxed(key_value.key_value_left, + &device_data->base->key_4_l); + writel_relaxed(key_value.key_value_right, + &device_data->base->key_4_r); + break; + default: + return -EINVAL; + } + + return 0; +} + +/** + * cryp_configure_init_vector - configures the initialization vector register + * @device_data: Pointer to the device data struct for base address. + * @init_vector_index: Specifies the index of the init vector. + * @init_vector_value: Specifies the value for the init vector. + */ +int cryp_configure_init_vector(struct cryp_device_data *device_data, + enum cryp_init_vector_index + init_vector_index, + struct cryp_init_vector_value + init_vector_value) +{ + while (cryp_is_logic_busy(device_data)) + cpu_relax(); + + switch (init_vector_index) { + case CRYP_INIT_VECTOR_INDEX_0: + writel_relaxed(init_vector_value.init_value_left, + &device_data->base->init_vect_0_l); + writel_relaxed(init_vector_value.init_value_right, + &device_data->base->init_vect_0_r); + break; + case CRYP_INIT_VECTOR_INDEX_1: + writel_relaxed(init_vector_value.init_value_left, + &device_data->base->init_vect_1_l); + writel_relaxed(init_vector_value.init_value_right, + &device_data->base->init_vect_1_r); + break; + default: + return -EINVAL; + } + + return 0; +} + +/** + * cryp_save_device_context - Store hardware registers and + * other device context parameter + * @device_data: Pointer to the device data struct for base address. + * @ctx: Crypto device context + */ +void cryp_save_device_context(struct cryp_device_data *device_data, + struct cryp_device_context *ctx, + int cryp_mode) +{ + enum cryp_algo_mode algomode; + struct cryp_register __iomem *src_reg = device_data->base; + struct cryp_config *config = + (struct cryp_config *)device_data->current_ctx; + + /* + * Always start by disable the hardware and wait for it to finish the + * ongoing calculations before trying to reprogram it. + */ + cryp_activity(device_data, CRYP_CRYPEN_DISABLE); + cryp_wait_until_done(device_data); + + if (cryp_mode == CRYP_MODE_DMA) + cryp_configure_for_dma(device_data, CRYP_DMA_DISABLE_BOTH); + + if (CRYP_TEST_BITS(&src_reg->sr, CRYP_SR_IFEM_MASK) == 0) + ctx->din = readl_relaxed(&src_reg->din); + + ctx->cr = readl_relaxed(&src_reg->cr) & CRYP_CR_CONTEXT_SAVE_MASK; + + switch (config->keysize) { + case CRYP_KEY_SIZE_256: + ctx->key_4_l = readl_relaxed(&src_reg->key_4_l); + ctx->key_4_r = readl_relaxed(&src_reg->key_4_r); + + case CRYP_KEY_SIZE_192: + ctx->key_3_l = readl_relaxed(&src_reg->key_3_l); + ctx->key_3_r = readl_relaxed(&src_reg->key_3_r); + + case CRYP_KEY_SIZE_128: + ctx->key_2_l = readl_relaxed(&src_reg->key_2_l); + ctx->key_2_r = readl_relaxed(&src_reg->key_2_r); + + default: + ctx->key_1_l = readl_relaxed(&src_reg->key_1_l); + ctx->key_1_r = readl_relaxed(&src_reg->key_1_r); + } + + /* Save IV for CBC mode for both AES and DES. */ + algomode = ((ctx->cr & CRYP_CR_ALGOMODE_MASK) >> CRYP_CR_ALGOMODE_POS); + if (algomode == CRYP_ALGO_TDES_CBC || + algomode == CRYP_ALGO_DES_CBC || + algomode == CRYP_ALGO_AES_CBC) { + ctx->init_vect_0_l = readl_relaxed(&src_reg->init_vect_0_l); + ctx->init_vect_0_r = readl_relaxed(&src_reg->init_vect_0_r); + ctx->init_vect_1_l = readl_relaxed(&src_reg->init_vect_1_l); + ctx->init_vect_1_r = readl_relaxed(&src_reg->init_vect_1_r); + } +} + +/** + * cryp_restore_device_context - Restore hardware registers and + * other device context parameter + * @device_data: Pointer to the device data struct for base address. + * @ctx: Crypto device context + */ +void cryp_restore_device_context(struct cryp_device_data *device_data, + struct cryp_device_context *ctx) +{ + struct cryp_register __iomem *reg = device_data->base; + struct cryp_config *config = + (struct cryp_config *)device_data->current_ctx; + + /* + * Fall through for all items in switch statement. DES is captured in + * the default. + */ + switch (config->keysize) { + case CRYP_KEY_SIZE_256: + writel_relaxed(ctx->key_4_l, ®->key_4_l); + writel_relaxed(ctx->key_4_r, ®->key_4_r); + + case CRYP_KEY_SIZE_192: + writel_relaxed(ctx->key_3_l, ®->key_3_l); + writel_relaxed(ctx->key_3_r, ®->key_3_r); + + case CRYP_KEY_SIZE_128: + writel_relaxed(ctx->key_2_l, ®->key_2_l); + writel_relaxed(ctx->key_2_r, ®->key_2_r); + + default: + writel_relaxed(ctx->key_1_l, ®->key_1_l); + writel_relaxed(ctx->key_1_r, ®->key_1_r); + } + + /* Restore IV for CBC mode for AES and DES. */ + if (config->algomode == CRYP_ALGO_TDES_CBC || + config->algomode == CRYP_ALGO_DES_CBC || + config->algomode == CRYP_ALGO_AES_CBC) { + writel_relaxed(ctx->init_vect_0_l, ®->init_vect_0_l); + writel_relaxed(ctx->init_vect_0_r, ®->init_vect_0_r); + writel_relaxed(ctx->init_vect_1_l, ®->init_vect_1_l); + writel_relaxed(ctx->init_vect_1_r, ®->init_vect_1_r); + } +} diff --git a/drivers/crypto/ux500/cryp/cryp.h b/drivers/crypto/ux500/cryp/cryp.h new file mode 100644 index 000000000..d1d6606fe --- /dev/null +++ b/drivers/crypto/ux500/cryp/cryp.h @@ -0,0 +1,313 @@ +/** + * Copyright (C) ST-Ericsson SA 2010 + * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson. + * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson. + * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson. + * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson. + * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson. + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef _CRYP_H_ +#define _CRYP_H_ + +#include <linux/completion.h> +#include <linux/dmaengine.h> +#include <linux/klist.h> +#include <linux/mutex.h> + +#define DEV_DBG_NAME "crypX crypX:" + +/* CRYP enable/disable */ +enum cryp_crypen { + CRYP_CRYPEN_DISABLE = 0, + CRYP_CRYPEN_ENABLE = 1 +}; + +/* CRYP Start Computation enable/disable */ +enum cryp_start { + CRYP_START_DISABLE = 0, + CRYP_START_ENABLE = 1 +}; + +/* CRYP Init Signal enable/disable */ +enum cryp_init { + CRYP_INIT_DISABLE = 0, + CRYP_INIT_ENABLE = 1 +}; + +/* Cryp State enable/disable */ +enum cryp_state { + CRYP_STATE_DISABLE = 0, + CRYP_STATE_ENABLE = 1 +}; + +/* Key preparation bit enable */ +enum cryp_key_prep { + KSE_DISABLED = 0, + KSE_ENABLED = 1 +}; + +/* Key size for AES */ +#define CRYP_KEY_SIZE_128 (0) +#define CRYP_KEY_SIZE_192 (1) +#define CRYP_KEY_SIZE_256 (2) + +/* AES modes */ +enum cryp_algo_mode { + CRYP_ALGO_TDES_ECB, + CRYP_ALGO_TDES_CBC, + CRYP_ALGO_DES_ECB, + CRYP_ALGO_DES_CBC, + CRYP_ALGO_AES_ECB, + CRYP_ALGO_AES_CBC, + CRYP_ALGO_AES_CTR, + CRYP_ALGO_AES_XTS +}; + +/* Cryp Encryption or Decryption */ +enum cryp_algorithm_dir { + CRYP_ALGORITHM_ENCRYPT, + CRYP_ALGORITHM_DECRYPT +}; + +/* Hardware access method */ +enum cryp_mode { + CRYP_MODE_POLLING, + CRYP_MODE_INTERRUPT, + CRYP_MODE_DMA +}; + +/** + * struct cryp_config - + * @keysize: Key size for AES + * @algomode: AES modes + * @algodir: Cryp Encryption or Decryption + * + * CRYP configuration structure to be passed to set configuration + */ +struct cryp_config { + int keysize; + enum cryp_algo_mode algomode; + enum cryp_algorithm_dir algodir; +}; + +/** + * struct cryp_protection_config - + * @privilege_access: Privileged cryp state enable/disable + * @secure_access: Secure cryp state enable/disable + * + * Protection configuration structure for setting privilage access + */ +struct cryp_protection_config { + enum cryp_state privilege_access; + enum cryp_state secure_access; +}; + +/* Cryp status */ +enum cryp_status_id { + CRYP_STATUS_BUSY = 0x10, + CRYP_STATUS_OUTPUT_FIFO_FULL = 0x08, + CRYP_STATUS_OUTPUT_FIFO_NOT_EMPTY = 0x04, + CRYP_STATUS_INPUT_FIFO_NOT_FULL = 0x02, + CRYP_STATUS_INPUT_FIFO_EMPTY = 0x01 +}; + +/* Cryp DMA interface */ +#define CRYP_DMA_TX_FIFO 0x08 +#define CRYP_DMA_RX_FIFO 0x10 + +enum cryp_dma_req_type { + CRYP_DMA_DISABLE_BOTH, + CRYP_DMA_ENABLE_IN_DATA, + CRYP_DMA_ENABLE_OUT_DATA, + CRYP_DMA_ENABLE_BOTH_DIRECTIONS +}; + +enum cryp_dma_channel { + CRYP_DMA_RX = 0, + CRYP_DMA_TX +}; + +/* Key registers */ +enum cryp_key_reg_index { + CRYP_KEY_REG_1, + CRYP_KEY_REG_2, + CRYP_KEY_REG_3, + CRYP_KEY_REG_4 +}; + +/* Key register left and right */ +struct cryp_key_value { + u32 key_value_left; + u32 key_value_right; +}; + +/* Cryp Initialization structure */ +enum cryp_init_vector_index { + CRYP_INIT_VECTOR_INDEX_0, + CRYP_INIT_VECTOR_INDEX_1 +}; + +/* struct cryp_init_vector_value - + * @init_value_left + * @init_value_right + * */ +struct cryp_init_vector_value { + u32 init_value_left; + u32 init_value_right; +}; + +/** + * struct cryp_device_context - structure for a cryp context. + * @cr: control register + * @dmacr: DMA control register + * @imsc: Interrupt mask set/clear register + * @key_1_l: Key 1l register + * @key_1_r: Key 1r register + * @key_2_l: Key 2l register + * @key_2_r: Key 2r register + * @key_3_l: Key 3l register + * @key_3_r: Key 3r register + * @key_4_l: Key 4l register + * @key_4_r: Key 4r register + * @init_vect_0_l: Initialization vector 0l register + * @init_vect_0_r: Initialization vector 0r register + * @init_vect_1_l: Initialization vector 1l register + * @init_vect_1_r: Initialization vector 0r register + * @din: Data in register + * @dout: Data out register + * + * CRYP power management specifc structure. + */ +struct cryp_device_context { + u32 cr; + u32 dmacr; + u32 imsc; + + u32 key_1_l; + u32 key_1_r; + u32 key_2_l; + u32 key_2_r; + u32 key_3_l; + u32 key_3_r; + u32 key_4_l; + u32 key_4_r; + + u32 init_vect_0_l; + u32 init_vect_0_r; + u32 init_vect_1_l; + u32 init_vect_1_r; + + u32 din; + u32 dout; +}; + +struct cryp_dma { + dma_cap_mask_t mask; + struct completion cryp_dma_complete; + struct dma_chan *chan_cryp2mem; + struct dma_chan *chan_mem2cryp; + struct stedma40_chan_cfg *cfg_cryp2mem; + struct stedma40_chan_cfg *cfg_mem2cryp; + int sg_src_len; + int sg_dst_len; + struct scatterlist *sg_src; + struct scatterlist *sg_dst; + int nents_src; + int nents_dst; +}; + +/** + * struct cryp_device_data - structure for a cryp device. + * @base: Pointer to virtual base address of the cryp device. + * @phybase: Pointer to physical memory location of the cryp device. + * @dev: Pointer to the devices dev structure. + * @clk: Pointer to the device's clock control. + * @pwr_regulator: Pointer to the device's power control. + * @power_status: Current status of the power. + * @ctx_lock: Lock for current_ctx. + * @current_ctx: Pointer to the currently allocated context. + * @list_node: For inclusion into a klist. + * @dma: The dma structure holding channel configuration. + * @power_state: TRUE = power state on, FALSE = power state off. + * @power_state_spinlock: Spinlock for power_state. + * @restore_dev_ctx: TRUE = saved ctx, FALSE = no saved ctx. + */ +struct cryp_device_data { + struct cryp_register __iomem *base; + phys_addr_t phybase; + struct device *dev; + struct clk *clk; + struct regulator *pwr_regulator; + int power_status; + struct spinlock ctx_lock; + struct cryp_ctx *current_ctx; + struct klist_node list_node; + struct cryp_dma dma; + bool power_state; + struct spinlock power_state_spinlock; + bool restore_dev_ctx; +}; + +void cryp_wait_until_done(struct cryp_device_data *device_data); + +/* Initialization functions */ + +int cryp_check(struct cryp_device_data *device_data); + +void cryp_activity(struct cryp_device_data *device_data, + enum cryp_crypen cryp_crypen); + +void cryp_flush_inoutfifo(struct cryp_device_data *device_data); + +int cryp_set_configuration(struct cryp_device_data *device_data, + struct cryp_config *cryp_config, + u32 *control_register); + +void cryp_configure_for_dma(struct cryp_device_data *device_data, + enum cryp_dma_req_type dma_req); + +int cryp_configure_key_values(struct cryp_device_data *device_data, + enum cryp_key_reg_index key_reg_index, + struct cryp_key_value key_value); + +int cryp_configure_init_vector(struct cryp_device_data *device_data, + enum cryp_init_vector_index + init_vector_index, + struct cryp_init_vector_value + init_vector_value); + +int cryp_configure_protection(struct cryp_device_data *device_data, + struct cryp_protection_config *p_protect_config); + +/* Power management funtions */ +void cryp_save_device_context(struct cryp_device_data *device_data, + struct cryp_device_context *ctx, + int cryp_mode); + +void cryp_restore_device_context(struct cryp_device_data *device_data, + struct cryp_device_context *ctx); + +/* Data transfer and status bits. */ +int cryp_is_logic_busy(struct cryp_device_data *device_data); + +int cryp_get_status(struct cryp_device_data *device_data); + +/** + * cryp_write_indata - This routine writes 32 bit data into the data input + * register of the cryptography IP. + * @device_data: Pointer to the device data struct for base address. + * @write_data: Data to write. + */ +int cryp_write_indata(struct cryp_device_data *device_data, u32 write_data); + +/** + * cryp_read_outdata - This routine reads the data from the data output + * register of the CRYP logic + * @device_data: Pointer to the device data struct for base address. + * @read_data: Read the data from the output FIFO. + */ +int cryp_read_outdata(struct cryp_device_data *device_data, u32 *read_data); + +#endif /* _CRYP_H_ */ diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c new file mode 100644 index 000000000..fded0a5cf --- /dev/null +++ b/drivers/crypto/ux500/cryp/cryp_core.c @@ -0,0 +1,1817 @@ +/** + * Copyright (C) ST-Ericsson SA 2010 + * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson. + * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson. + * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson. + * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson. + * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson. + * Author: Andreas Westin <andreas.westin@stericsson.com> for ST-Ericsson. + * License terms: GNU General Public License (GPL) version 2 + */ + +#include <linux/clk.h> +#include <linux/completion.h> +#include <linux/crypto.h> +#include <linux/dmaengine.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/irqreturn.h> +#include <linux/klist.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/regulator/consumer.h> +#include <linux/semaphore.h> +#include <linux/platform_data/dma-ste-dma40.h> + +#include <crypto/aes.h> +#include <crypto/algapi.h> +#include <crypto/ctr.h> +#include <crypto/des.h> +#include <crypto/scatterwalk.h> + +#include <linux/platform_data/crypto-ux500.h> + +#include "cryp_p.h" +#include "cryp.h" + +#define CRYP_MAX_KEY_SIZE 32 +#define BYTES_PER_WORD 4 + +static int cryp_mode; +static atomic_t session_id; + +static struct stedma40_chan_cfg *mem_to_engine; +static struct stedma40_chan_cfg *engine_to_mem; + +/** + * struct cryp_driver_data - data specific to the driver. + * + * @device_list: A list of registered devices to choose from. + * @device_allocation: A semaphore initialized with number of devices. + */ +struct cryp_driver_data { + struct klist device_list; + struct semaphore device_allocation; +}; + +/** + * struct cryp_ctx - Crypto context + * @config: Crypto mode. + * @key[CRYP_MAX_KEY_SIZE]: Key. + * @keylen: Length of key. + * @iv: Pointer to initialization vector. + * @indata: Pointer to indata. + * @outdata: Pointer to outdata. + * @datalen: Length of indata. + * @outlen: Length of outdata. + * @blocksize: Size of blocks. + * @updated: Updated flag. + * @dev_ctx: Device dependent context. + * @device: Pointer to the device. + */ +struct cryp_ctx { + struct cryp_config config; + u8 key[CRYP_MAX_KEY_SIZE]; + u32 keylen; + u8 *iv; + const u8 *indata; + u8 *outdata; + u32 datalen; + u32 outlen; + u32 blocksize; + u8 updated; + struct cryp_device_context dev_ctx; + struct cryp_device_data *device; + u32 session_id; +}; + +static struct cryp_driver_data driver_data; + +/** + * uint8p_to_uint32_be - 4*uint8 to uint32 big endian + * @in: Data to convert. + */ +static inline u32 uint8p_to_uint32_be(u8 *in) +{ + u32 *data = (u32 *)in; + + return cpu_to_be32p(data); +} + +/** + * swap_bits_in_byte - mirror the bits in a byte + * @b: the byte to be mirrored + * + * The bits are swapped the following way: + * Byte b include bits 0-7, nibble 1 (n1) include bits 0-3 and + * nibble 2 (n2) bits 4-7. + * + * Nibble 1 (n1): + * (The "old" (moved) bit is replaced with a zero) + * 1. Move bit 6 and 7, 4 positions to the left. + * 2. Move bit 3 and 5, 2 positions to the left. + * 3. Move bit 1-4, 1 position to the left. + * + * Nibble 2 (n2): + * 1. Move bit 0 and 1, 4 positions to the right. + * 2. Move bit 2 and 4, 2 positions to the right. + * 3. Move bit 3-6, 1 position to the right. + * + * Combine the two nibbles to a complete and swapped byte. + */ + +static inline u8 swap_bits_in_byte(u8 b) +{ +#define R_SHIFT_4_MASK 0xc0 /* Bits 6 and 7, right shift 4 */ +#define R_SHIFT_2_MASK 0x28 /* (After right shift 4) Bits 3 and 5, + right shift 2 */ +#define R_SHIFT_1_MASK 0x1e /* (After right shift 2) Bits 1-4, + right shift 1 */ +#define L_SHIFT_4_MASK 0x03 /* Bits 0 and 1, left shift 4 */ +#define L_SHIFT_2_MASK 0x14 /* (After left shift 4) Bits 2 and 4, + left shift 2 */ +#define L_SHIFT_1_MASK 0x78 /* (After left shift 1) Bits 3-6, + left shift 1 */ + + u8 n1; + u8 n2; + + /* Swap most significant nibble */ + /* Right shift 4, bits 6 and 7 */ + n1 = ((b & R_SHIFT_4_MASK) >> 4) | (b & ~(R_SHIFT_4_MASK >> 4)); + /* Right shift 2, bits 3 and 5 */ + n1 = ((n1 & R_SHIFT_2_MASK) >> 2) | (n1 & ~(R_SHIFT_2_MASK >> 2)); + /* Right shift 1, bits 1-4 */ + n1 = (n1 & R_SHIFT_1_MASK) >> 1; + + /* Swap least significant nibble */ + /* Left shift 4, bits 0 and 1 */ + n2 = ((b & L_SHIFT_4_MASK) << 4) | (b & ~(L_SHIFT_4_MASK << 4)); + /* Left shift 2, bits 2 and 4 */ + n2 = ((n2 & L_SHIFT_2_MASK) << 2) | (n2 & ~(L_SHIFT_2_MASK << 2)); + /* Left shift 1, bits 3-6 */ + n2 = (n2 & L_SHIFT_1_MASK) << 1; + + return n1 | n2; +} + +static inline void swap_words_in_key_and_bits_in_byte(const u8 *in, + u8 *out, u32 len) +{ + unsigned int i = 0; + int j; + int index = 0; + + j = len - BYTES_PER_WORD; + while (j >= 0) { + for (i = 0; i < BYTES_PER_WORD; i++) { + index = len - j - BYTES_PER_WORD + i; + out[j + i] = + swap_bits_in_byte(in[index]); + } + j -= BYTES_PER_WORD; + } +} + +static void add_session_id(struct cryp_ctx *ctx) +{ + /* + * We never want 0 to be a valid value, since this is the default value + * for the software context. + */ + if (unlikely(atomic_inc_and_test(&session_id))) + atomic_inc(&session_id); + + ctx->session_id = atomic_read(&session_id); +} + +static irqreturn_t cryp_interrupt_handler(int irq, void *param) +{ + struct cryp_ctx *ctx; + int count; + struct cryp_device_data *device_data; + + if (param == NULL) { + BUG_ON(!param); + return IRQ_HANDLED; + } + + /* The device is coming from the one found in hw_crypt_noxts. */ + device_data = (struct cryp_device_data *)param; + + ctx = device_data->current_ctx; + + if (ctx == NULL) { + BUG_ON(!ctx); + return IRQ_HANDLED; + } + + dev_dbg(ctx->device->dev, "[%s] (len: %d) %s, ", __func__, ctx->outlen, + cryp_pending_irq_src(device_data, CRYP_IRQ_SRC_OUTPUT_FIFO) ? + "out" : "in"); + + if (cryp_pending_irq_src(device_data, + CRYP_IRQ_SRC_OUTPUT_FIFO)) { + if (ctx->outlen / ctx->blocksize > 0) { + count = ctx->blocksize / 4; + + readsl(&device_data->base->dout, ctx->outdata, count); + ctx->outdata += count; + ctx->outlen -= count; + + if (ctx->outlen == 0) { + cryp_disable_irq_src(device_data, + CRYP_IRQ_SRC_OUTPUT_FIFO); + } + } + } else if (cryp_pending_irq_src(device_data, + CRYP_IRQ_SRC_INPUT_FIFO)) { + if (ctx->datalen / ctx->blocksize > 0) { + count = ctx->blocksize / 4; + + writesl(&device_data->base->din, ctx->indata, count); + + ctx->indata += count; + ctx->datalen -= count; + + if (ctx->datalen == 0) + cryp_disable_irq_src(device_data, + CRYP_IRQ_SRC_INPUT_FIFO); + + if (ctx->config.algomode == CRYP_ALGO_AES_XTS) { + CRYP_PUT_BITS(&device_data->base->cr, + CRYP_START_ENABLE, + CRYP_CR_START_POS, + CRYP_CR_START_MASK); + + cryp_wait_until_done(device_data); + } + } + } + + return IRQ_HANDLED; +} + +static int mode_is_aes(enum cryp_algo_mode mode) +{ + return CRYP_ALGO_AES_ECB == mode || + CRYP_ALGO_AES_CBC == mode || + CRYP_ALGO_AES_CTR == mode || + CRYP_ALGO_AES_XTS == mode; +} + +static int cfg_iv(struct cryp_device_data *device_data, u32 left, u32 right, + enum cryp_init_vector_index index) +{ + struct cryp_init_vector_value vector_value; + + dev_dbg(device_data->dev, "[%s]", __func__); + + vector_value.init_value_left = left; + vector_value.init_value_right = right; + + return cryp_configure_init_vector(device_data, + index, + vector_value); +} + +static int cfg_ivs(struct cryp_device_data *device_data, struct cryp_ctx *ctx) +{ + int i; + int status = 0; + int num_of_regs = ctx->blocksize / 8; + u32 iv[AES_BLOCK_SIZE / 4]; + + dev_dbg(device_data->dev, "[%s]", __func__); + + /* + * Since we loop on num_of_regs we need to have a check in case + * someone provides an incorrect blocksize which would force calling + * cfg_iv with i greater than 2 which is an error. + */ + if (num_of_regs > 2) { + dev_err(device_data->dev, "[%s] Incorrect blocksize %d", + __func__, ctx->blocksize); + return -EINVAL; + } + + for (i = 0; i < ctx->blocksize / 4; i++) + iv[i] = uint8p_to_uint32_be(ctx->iv + i*4); + + for (i = 0; i < num_of_regs; i++) { + status = cfg_iv(device_data, iv[i*2], iv[i*2+1], + (enum cryp_init_vector_index) i); + if (status != 0) + return status; + } + return status; +} + +static int set_key(struct cryp_device_data *device_data, + u32 left_key, + u32 right_key, + enum cryp_key_reg_index index) +{ + struct cryp_key_value key_value; + int cryp_error; + + dev_dbg(device_data->dev, "[%s]", __func__); + + key_value.key_value_left = left_key; + key_value.key_value_right = right_key; + + cryp_error = cryp_configure_key_values(device_data, + index, + key_value); + if (cryp_error != 0) + dev_err(device_data->dev, "[%s]: " + "cryp_configure_key_values() failed!", __func__); + + return cryp_error; +} + +static int cfg_keys(struct cryp_ctx *ctx) +{ + int i; + int num_of_regs = ctx->keylen / 8; + u32 swapped_key[CRYP_MAX_KEY_SIZE / 4]; + int cryp_error = 0; + + dev_dbg(ctx->device->dev, "[%s]", __func__); + + if (mode_is_aes(ctx->config.algomode)) { + swap_words_in_key_and_bits_in_byte((u8 *)ctx->key, + (u8 *)swapped_key, + ctx->keylen); + } else { + for (i = 0; i < ctx->keylen / 4; i++) + swapped_key[i] = uint8p_to_uint32_be(ctx->key + i*4); + } + + for (i = 0; i < num_of_regs; i++) { + cryp_error = set_key(ctx->device, + *(((u32 *)swapped_key)+i*2), + *(((u32 *)swapped_key)+i*2+1), + (enum cryp_key_reg_index) i); + + if (cryp_error != 0) { + dev_err(ctx->device->dev, "[%s]: set_key() failed!", + __func__); + return cryp_error; + } + } + return cryp_error; +} + +static int cryp_setup_context(struct cryp_ctx *ctx, + struct cryp_device_data *device_data) +{ + u32 control_register = CRYP_CR_DEFAULT; + + switch (cryp_mode) { + case CRYP_MODE_INTERRUPT: + writel_relaxed(CRYP_IMSC_DEFAULT, &device_data->base->imsc); + break; + + case CRYP_MODE_DMA: + writel_relaxed(CRYP_DMACR_DEFAULT, &device_data->base->dmacr); + break; + + default: + break; + } + + if (ctx->updated == 0) { + cryp_flush_inoutfifo(device_data); + if (cfg_keys(ctx) != 0) { + dev_err(ctx->device->dev, "[%s]: cfg_keys failed!", + __func__); + return -EINVAL; + } + + if (ctx->iv && + CRYP_ALGO_AES_ECB != ctx->config.algomode && + CRYP_ALGO_DES_ECB != ctx->config.algomode && + CRYP_ALGO_TDES_ECB != ctx->config.algomode) { + if (cfg_ivs(device_data, ctx) != 0) + return -EPERM; + } + + cryp_set_configuration(device_data, &ctx->config, + &control_register); + add_session_id(ctx); + } else if (ctx->updated == 1 && + ctx->session_id != atomic_read(&session_id)) { + cryp_flush_inoutfifo(device_data); + cryp_restore_device_context(device_data, &ctx->dev_ctx); + + add_session_id(ctx); + control_register = ctx->dev_ctx.cr; + } else + control_register = ctx->dev_ctx.cr; + + writel(control_register | + (CRYP_CRYPEN_ENABLE << CRYP_CR_CRYPEN_POS), + &device_data->base->cr); + + return 0; +} + +static int cryp_get_device_data(struct cryp_ctx *ctx, + struct cryp_device_data **device_data) +{ + int ret; + struct klist_iter device_iterator; + struct klist_node *device_node; + struct cryp_device_data *local_device_data = NULL; + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + /* Wait until a device is available */ + ret = down_interruptible(&driver_data.device_allocation); + if (ret) + return ret; /* Interrupted */ + + /* Select a device */ + klist_iter_init(&driver_data.device_list, &device_iterator); + + device_node = klist_next(&device_iterator); + while (device_node) { + local_device_data = container_of(device_node, + struct cryp_device_data, list_node); + spin_lock(&local_device_data->ctx_lock); + /* current_ctx allocates a device, NULL = unallocated */ + if (local_device_data->current_ctx) { + device_node = klist_next(&device_iterator); + } else { + local_device_data->current_ctx = ctx; + ctx->device = local_device_data; + spin_unlock(&local_device_data->ctx_lock); + break; + } + spin_unlock(&local_device_data->ctx_lock); + } + klist_iter_exit(&device_iterator); + + if (!device_node) { + /** + * No free device found. + * Since we allocated a device with down_interruptible, this + * should not be able to happen. + * Number of available devices, which are contained in + * device_allocation, is therefore decremented by not doing + * an up(device_allocation). + */ + return -EBUSY; + } + + *device_data = local_device_data; + + return 0; +} + +static void cryp_dma_setup_channel(struct cryp_device_data *device_data, + struct device *dev) +{ + struct dma_slave_config mem2cryp = { + .direction = DMA_MEM_TO_DEV, + .dst_addr = device_data->phybase + CRYP_DMA_TX_FIFO, + .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES, + .dst_maxburst = 4, + }; + struct dma_slave_config cryp2mem = { + .direction = DMA_DEV_TO_MEM, + .src_addr = device_data->phybase + CRYP_DMA_RX_FIFO, + .src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES, + .src_maxburst = 4, + }; + + dma_cap_zero(device_data->dma.mask); + dma_cap_set(DMA_SLAVE, device_data->dma.mask); + + device_data->dma.cfg_mem2cryp = mem_to_engine; + device_data->dma.chan_mem2cryp = + dma_request_channel(device_data->dma.mask, + stedma40_filter, + device_data->dma.cfg_mem2cryp); + + device_data->dma.cfg_cryp2mem = engine_to_mem; + device_data->dma.chan_cryp2mem = + dma_request_channel(device_data->dma.mask, + stedma40_filter, + device_data->dma.cfg_cryp2mem); + + dmaengine_slave_config(device_data->dma.chan_mem2cryp, &mem2cryp); + dmaengine_slave_config(device_data->dma.chan_cryp2mem, &cryp2mem); + + init_completion(&device_data->dma.cryp_dma_complete); +} + +static void cryp_dma_out_callback(void *data) +{ + struct cryp_ctx *ctx = (struct cryp_ctx *) data; + dev_dbg(ctx->device->dev, "[%s]: ", __func__); + + complete(&ctx->device->dma.cryp_dma_complete); +} + +static int cryp_set_dma_transfer(struct cryp_ctx *ctx, + struct scatterlist *sg, + int len, + enum dma_data_direction direction) +{ + struct dma_async_tx_descriptor *desc; + struct dma_chan *channel = NULL; + dma_cookie_t cookie; + + dev_dbg(ctx->device->dev, "[%s]: ", __func__); + + if (unlikely(!IS_ALIGNED((u32)sg, 4))) { + dev_err(ctx->device->dev, "[%s]: Data in sg list isn't " + "aligned! Addr: 0x%08x", __func__, (u32)sg); + return -EFAULT; + } + + switch (direction) { + case DMA_TO_DEVICE: + channel = ctx->device->dma.chan_mem2cryp; + ctx->device->dma.sg_src = sg; + ctx->device->dma.sg_src_len = dma_map_sg(channel->device->dev, + ctx->device->dma.sg_src, + ctx->device->dma.nents_src, + direction); + + if (!ctx->device->dma.sg_src_len) { + dev_dbg(ctx->device->dev, + "[%s]: Could not map the sg list (TO_DEVICE)", + __func__); + return -EFAULT; + } + + dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer " + "(TO_DEVICE)", __func__); + + desc = dmaengine_prep_slave_sg(channel, + ctx->device->dma.sg_src, + ctx->device->dma.sg_src_len, + direction, DMA_CTRL_ACK); + break; + + case DMA_FROM_DEVICE: + channel = ctx->device->dma.chan_cryp2mem; + ctx->device->dma.sg_dst = sg; + ctx->device->dma.sg_dst_len = dma_map_sg(channel->device->dev, + ctx->device->dma.sg_dst, + ctx->device->dma.nents_dst, + direction); + + if (!ctx->device->dma.sg_dst_len) { + dev_dbg(ctx->device->dev, + "[%s]: Could not map the sg list (FROM_DEVICE)", + __func__); + return -EFAULT; + } + + dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer " + "(FROM_DEVICE)", __func__); + + desc = dmaengine_prep_slave_sg(channel, + ctx->device->dma.sg_dst, + ctx->device->dma.sg_dst_len, + direction, + DMA_CTRL_ACK | + DMA_PREP_INTERRUPT); + + desc->callback = cryp_dma_out_callback; + desc->callback_param = ctx; + break; + + default: + dev_dbg(ctx->device->dev, "[%s]: Invalid DMA direction", + __func__); + return -EFAULT; + } + + cookie = dmaengine_submit(desc); + dma_async_issue_pending(channel); + + return 0; +} + +static void cryp_dma_done(struct cryp_ctx *ctx) +{ + struct dma_chan *chan; + + dev_dbg(ctx->device->dev, "[%s]: ", __func__); + + chan = ctx->device->dma.chan_mem2cryp; + dmaengine_terminate_all(chan); + dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src, + ctx->device->dma.sg_src_len, DMA_TO_DEVICE); + + chan = ctx->device->dma.chan_cryp2mem; + dmaengine_terminate_all(chan); + dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst, + ctx->device->dma.sg_dst_len, DMA_FROM_DEVICE); +} + +static int cryp_dma_write(struct cryp_ctx *ctx, struct scatterlist *sg, + int len) +{ + int error = cryp_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE); + dev_dbg(ctx->device->dev, "[%s]: ", __func__); + + if (error) { + dev_dbg(ctx->device->dev, "[%s]: cryp_set_dma_transfer() " + "failed", __func__); + return error; + } + + return len; +} + +static int cryp_dma_read(struct cryp_ctx *ctx, struct scatterlist *sg, int len) +{ + int error = cryp_set_dma_transfer(ctx, sg, len, DMA_FROM_DEVICE); + if (error) { + dev_dbg(ctx->device->dev, "[%s]: cryp_set_dma_transfer() " + "failed", __func__); + return error; + } + + return len; +} + +static void cryp_polling_mode(struct cryp_ctx *ctx, + struct cryp_device_data *device_data) +{ + int len = ctx->blocksize / BYTES_PER_WORD; + int remaining_length = ctx->datalen; + u32 *indata = (u32 *)ctx->indata; + u32 *outdata = (u32 *)ctx->outdata; + + while (remaining_length > 0) { + writesl(&device_data->base->din, indata, len); + indata += len; + remaining_length -= (len * BYTES_PER_WORD); + cryp_wait_until_done(device_data); + + readsl(&device_data->base->dout, outdata, len); + outdata += len; + cryp_wait_until_done(device_data); + } +} + +static int cryp_disable_power(struct device *dev, + struct cryp_device_data *device_data, + bool save_device_context) +{ + int ret = 0; + + dev_dbg(dev, "[%s]", __func__); + + spin_lock(&device_data->power_state_spinlock); + if (!device_data->power_state) + goto out; + + spin_lock(&device_data->ctx_lock); + if (save_device_context && device_data->current_ctx) { + cryp_save_device_context(device_data, + &device_data->current_ctx->dev_ctx, + cryp_mode); + device_data->restore_dev_ctx = true; + } + spin_unlock(&device_data->ctx_lock); + + clk_disable(device_data->clk); + ret = regulator_disable(device_data->pwr_regulator); + if (ret) + dev_err(dev, "[%s]: " + "regulator_disable() failed!", + __func__); + + device_data->power_state = false; + +out: + spin_unlock(&device_data->power_state_spinlock); + + return ret; +} + +static int cryp_enable_power( + struct device *dev, + struct cryp_device_data *device_data, + bool restore_device_context) +{ + int ret = 0; + + dev_dbg(dev, "[%s]", __func__); + + spin_lock(&device_data->power_state_spinlock); + if (!device_data->power_state) { + ret = regulator_enable(device_data->pwr_regulator); + if (ret) { + dev_err(dev, "[%s]: regulator_enable() failed!", + __func__); + goto out; + } + + ret = clk_enable(device_data->clk); + if (ret) { + dev_err(dev, "[%s]: clk_enable() failed!", + __func__); + regulator_disable(device_data->pwr_regulator); + goto out; + } + device_data->power_state = true; + } + + if (device_data->restore_dev_ctx) { + spin_lock(&device_data->ctx_lock); + if (restore_device_context && device_data->current_ctx) { + device_data->restore_dev_ctx = false; + cryp_restore_device_context(device_data, + &device_data->current_ctx->dev_ctx); + } + spin_unlock(&device_data->ctx_lock); + } +out: + spin_unlock(&device_data->power_state_spinlock); + + return ret; +} + +static int hw_crypt_noxts(struct cryp_ctx *ctx, + struct cryp_device_data *device_data) +{ + int ret = 0; + + const u8 *indata = ctx->indata; + u8 *outdata = ctx->outdata; + u32 datalen = ctx->datalen; + u32 outlen = datalen; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->outlen = ctx->datalen; + + if (unlikely(!IS_ALIGNED((u32)indata, 4))) { + pr_debug(DEV_DBG_NAME " [%s]: Data isn't aligned! Addr: " + "0x%08x", __func__, (u32)indata); + return -EINVAL; + } + + ret = cryp_setup_context(ctx, device_data); + + if (ret) + goto out; + + if (cryp_mode == CRYP_MODE_INTERRUPT) { + cryp_enable_irq_src(device_data, CRYP_IRQ_SRC_INPUT_FIFO | + CRYP_IRQ_SRC_OUTPUT_FIFO); + + /* + * ctx->outlen is decremented in the cryp_interrupt_handler + * function. We had to add cpu_relax() (barrier) to make sure + * that gcc didn't optimze away this variable. + */ + while (ctx->outlen > 0) + cpu_relax(); + } else if (cryp_mode == CRYP_MODE_POLLING || + cryp_mode == CRYP_MODE_DMA) { + /* + * The reason for having DMA in this if case is that if we are + * running cryp_mode = 2, then we separate DMA routines for + * handling cipher/plaintext > blocksize, except when + * running the normal CRYPTO_ALG_TYPE_CIPHER, then we still use + * the polling mode. Overhead of doing DMA setup eats up the + * benefits using it. + */ + cryp_polling_mode(ctx, device_data); + } else { + dev_err(ctx->device->dev, "[%s]: Invalid operation mode!", + __func__); + ret = -EPERM; + goto out; + } + + cryp_save_device_context(device_data, &ctx->dev_ctx, cryp_mode); + ctx->updated = 1; + +out: + ctx->indata = indata; + ctx->outdata = outdata; + ctx->datalen = datalen; + ctx->outlen = outlen; + + return ret; +} + +static int get_nents(struct scatterlist *sg, int nbytes) +{ + int nents = 0; + + while (nbytes > 0) { + nbytes -= sg->length; + sg = sg_next(sg); + nents++; + } + + return nents; +} + +static int ablk_dma_crypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + struct cryp_device_data *device_data; + + int bytes_written = 0; + int bytes_read = 0; + int ret; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->datalen = areq->nbytes; + ctx->outlen = areq->nbytes; + + ret = cryp_get_device_data(ctx, &device_data); + if (ret) + return ret; + + ret = cryp_setup_context(ctx, device_data); + if (ret) + goto out; + + /* We have the device now, so store the nents in the dma struct. */ + ctx->device->dma.nents_src = get_nents(areq->src, ctx->datalen); + ctx->device->dma.nents_dst = get_nents(areq->dst, ctx->outlen); + + /* Enable DMA in- and output. */ + cryp_configure_for_dma(device_data, CRYP_DMA_ENABLE_BOTH_DIRECTIONS); + + bytes_written = cryp_dma_write(ctx, areq->src, ctx->datalen); + bytes_read = cryp_dma_read(ctx, areq->dst, bytes_written); + + wait_for_completion(&ctx->device->dma.cryp_dma_complete); + cryp_dma_done(ctx); + + cryp_save_device_context(device_data, &ctx->dev_ctx, cryp_mode); + ctx->updated = 1; + +out: + spin_lock(&device_data->ctx_lock); + device_data->current_ctx = NULL; + ctx->device = NULL; + spin_unlock(&device_data->ctx_lock); + + /* + * The down_interruptible part for this semaphore is called in + * cryp_get_device_data. + */ + up(&driver_data.device_allocation); + + if (unlikely(bytes_written != bytes_read)) + return -EPERM; + + return 0; +} + +static int ablk_crypt(struct ablkcipher_request *areq) +{ + struct ablkcipher_walk walk; + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + struct cryp_device_data *device_data; + unsigned long src_paddr; + unsigned long dst_paddr; + int ret; + int nbytes; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ret = cryp_get_device_data(ctx, &device_data); + if (ret) + goto out; + + ablkcipher_walk_init(&walk, areq->dst, areq->src, areq->nbytes); + ret = ablkcipher_walk_phys(areq, &walk); + + if (ret) { + pr_err(DEV_DBG_NAME "[%s]: ablkcipher_walk_phys() failed!", + __func__); + goto out; + } + + while ((nbytes = walk.nbytes) > 0) { + ctx->iv = walk.iv; + src_paddr = (page_to_phys(walk.src.page) + walk.src.offset); + ctx->indata = phys_to_virt(src_paddr); + + dst_paddr = (page_to_phys(walk.dst.page) + walk.dst.offset); + ctx->outdata = phys_to_virt(dst_paddr); + + ctx->datalen = nbytes - (nbytes % ctx->blocksize); + + ret = hw_crypt_noxts(ctx, device_data); + if (ret) + goto out; + + nbytes -= ctx->datalen; + ret = ablkcipher_walk_done(areq, &walk, nbytes); + if (ret) + goto out; + } + ablkcipher_walk_complete(&walk); + +out: + /* Release the device */ + spin_lock(&device_data->ctx_lock); + device_data->current_ctx = NULL; + ctx->device = NULL; + spin_unlock(&device_data->ctx_lock); + + /* + * The down_interruptible part for this semaphore is called in + * cryp_get_device_data. + */ + up(&driver_data.device_allocation); + + return ret; +} + +static int aes_ablkcipher_setkey(struct crypto_ablkcipher *cipher, + const u8 *key, unsigned int keylen) +{ + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + u32 *flags = &cipher->base.crt_flags; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + switch (keylen) { + case AES_KEYSIZE_128: + ctx->config.keysize = CRYP_KEY_SIZE_128; + break; + + case AES_KEYSIZE_192: + ctx->config.keysize = CRYP_KEY_SIZE_192; + break; + + case AES_KEYSIZE_256: + ctx->config.keysize = CRYP_KEY_SIZE_256; + break; + + default: + pr_err(DEV_DBG_NAME "[%s]: Unknown keylen!", __func__); + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + return -EINVAL; + } + + memcpy(ctx->key, key, keylen); + ctx->keylen = keylen; + + ctx->updated = 0; + + return 0; +} + +static int des_ablkcipher_setkey(struct crypto_ablkcipher *cipher, + const u8 *key, unsigned int keylen) +{ + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + u32 *flags = &cipher->base.crt_flags; + u32 tmp[DES_EXPKEY_WORDS]; + int ret; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + if (keylen != DES_KEY_SIZE) { + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN", + __func__); + return -EINVAL; + } + + ret = des_ekey(tmp, key); + if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + *flags |= CRYPTO_TFM_RES_WEAK_KEY; + pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY", + __func__); + return -EINVAL; + } + + memcpy(ctx->key, key, keylen); + ctx->keylen = keylen; + + ctx->updated = 0; + return 0; +} + +static int des3_ablkcipher_setkey(struct crypto_ablkcipher *cipher, + const u8 *key, unsigned int keylen) +{ + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + u32 *flags = &cipher->base.crt_flags; + const u32 *K = (const u32 *)key; + u32 tmp[DES3_EDE_EXPKEY_WORDS]; + int i, ret; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + if (keylen != DES3_EDE_KEY_SIZE) { + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN", + __func__); + return -EINVAL; + } + + /* Checking key interdependency for weak key detection. */ + if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || + !((K[2] ^ K[4]) | (K[3] ^ K[5]))) && + (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + *flags |= CRYPTO_TFM_RES_WEAK_KEY; + pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY", + __func__); + return -EINVAL; + } + for (i = 0; i < 3; i++) { + ret = des_ekey(tmp, key + i*DES_KEY_SIZE); + if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + *flags |= CRYPTO_TFM_RES_WEAK_KEY; + pr_debug(DEV_DBG_NAME " [%s]: " + "CRYPTO_TFM_REQ_WEAK_KEY", __func__); + return -EINVAL; + } + } + + memcpy(ctx->key, key, keylen); + ctx->keylen = keylen; + + ctx->updated = 0; + return 0; +} + +static int cryp_blk_encrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT; + + /* + * DMA does not work for DES due to a hw bug */ + if (cryp_mode == CRYP_MODE_DMA && mode_is_aes(ctx->config.algomode)) + return ablk_dma_crypt(areq); + + /* For everything except DMA, we run the non DMA version. */ + return ablk_crypt(areq); +} + +static int cryp_blk_decrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + ctx->config.algodir = CRYP_ALGORITHM_DECRYPT; + + /* DMA does not work for DES due to a hw bug */ + if (cryp_mode == CRYP_MODE_DMA && mode_is_aes(ctx->config.algomode)) + return ablk_dma_crypt(areq); + + /* For everything except DMA, we run the non DMA version. */ + return ablk_crypt(areq); +} + +struct cryp_algo_template { + enum cryp_algo_mode algomode; + struct crypto_alg crypto; +}; + +static int cryp_cra_init(struct crypto_tfm *tfm) +{ + struct cryp_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_alg *alg = tfm->__crt_alg; + struct cryp_algo_template *cryp_alg = container_of(alg, + struct cryp_algo_template, + crypto); + + ctx->config.algomode = cryp_alg->algomode; + ctx->blocksize = crypto_tfm_alg_blocksize(tfm); + + return 0; +} + +static struct cryp_algo_template cryp_algs[] = { + { + .algomode = CRYP_ALGO_AES_ECB, + .crypto = { + .cra_name = "aes", + .cra_driver_name = "aes-ux500", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct cryp_ctx), + .cra_alignmask = 3, + .cra_type = &crypto_ablkcipher_type, + .cra_init = cryp_cra_init, + .cra_module = THIS_MODULE, + .cra_u = { + .ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = aes_ablkcipher_setkey, + .encrypt = cryp_blk_encrypt, + .decrypt = cryp_blk_decrypt + } + } + } + }, + { + .algomode = CRYP_ALGO_AES_ECB, + .crypto = { + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-ux500", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct cryp_ctx), + .cra_alignmask = 3, + .cra_type = &crypto_ablkcipher_type, + .cra_init = cryp_cra_init, + .cra_module = THIS_MODULE, + .cra_u = { + .ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = aes_ablkcipher_setkey, + .encrypt = cryp_blk_encrypt, + .decrypt = cryp_blk_decrypt, + } + } + } + }, + { + .algomode = CRYP_ALGO_AES_CBC, + .crypto = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-ux500", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct cryp_ctx), + .cra_alignmask = 3, + .cra_type = &crypto_ablkcipher_type, + .cra_init = cryp_cra_init, + .cra_module = THIS_MODULE, + .cra_u = { + .ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = aes_ablkcipher_setkey, + .encrypt = cryp_blk_encrypt, + .decrypt = cryp_blk_decrypt, + .ivsize = AES_BLOCK_SIZE, + } + } + } + }, + { + .algomode = CRYP_ALGO_AES_CTR, + .crypto = { + .cra_name = "ctr(aes)", + .cra_driver_name = "ctr-aes-ux500", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct cryp_ctx), + .cra_alignmask = 3, + .cra_type = &crypto_ablkcipher_type, + .cra_init = cryp_cra_init, + .cra_module = THIS_MODULE, + .cra_u = { + .ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = aes_ablkcipher_setkey, + .encrypt = cryp_blk_encrypt, + .decrypt = cryp_blk_decrypt, + .ivsize = AES_BLOCK_SIZE, + } + } + } + }, + { + .algomode = CRYP_ALGO_DES_ECB, + .crypto = { + .cra_name = "des", + .cra_driver_name = "des-ux500", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct cryp_ctx), + .cra_alignmask = 3, + .cra_type = &crypto_ablkcipher_type, + .cra_init = cryp_cra_init, + .cra_module = THIS_MODULE, + .cra_u = { + .ablkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .setkey = des_ablkcipher_setkey, + .encrypt = cryp_blk_encrypt, + .decrypt = cryp_blk_decrypt + } + } + } + + }, + { + .algomode = CRYP_ALGO_TDES_ECB, + .crypto = { + .cra_name = "des3_ede", + .cra_driver_name = "des3_ede-ux500", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct cryp_ctx), + .cra_alignmask = 3, + .cra_type = &crypto_ablkcipher_type, + .cra_init = cryp_cra_init, + .cra_module = THIS_MODULE, + .cra_u = { + .ablkcipher = { + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .setkey = des_ablkcipher_setkey, + .encrypt = cryp_blk_encrypt, + .decrypt = cryp_blk_decrypt + } + } + } + }, + { + .algomode = CRYP_ALGO_DES_ECB, + .crypto = { + .cra_name = "ecb(des)", + .cra_driver_name = "ecb-des-ux500", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct cryp_ctx), + .cra_alignmask = 3, + .cra_type = &crypto_ablkcipher_type, + .cra_init = cryp_cra_init, + .cra_module = THIS_MODULE, + .cra_u = { + .ablkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .setkey = des_ablkcipher_setkey, + .encrypt = cryp_blk_encrypt, + .decrypt = cryp_blk_decrypt, + } + } + } + }, + { + .algomode = CRYP_ALGO_TDES_ECB, + .crypto = { + .cra_name = "ecb(des3_ede)", + .cra_driver_name = "ecb-des3_ede-ux500", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct cryp_ctx), + .cra_alignmask = 3, + .cra_type = &crypto_ablkcipher_type, + .cra_init = cryp_cra_init, + .cra_module = THIS_MODULE, + .cra_u = { + .ablkcipher = { + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .setkey = des3_ablkcipher_setkey, + .encrypt = cryp_blk_encrypt, + .decrypt = cryp_blk_decrypt, + } + } + } + }, + { + .algomode = CRYP_ALGO_DES_CBC, + .crypto = { + .cra_name = "cbc(des)", + .cra_driver_name = "cbc-des-ux500", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct cryp_ctx), + .cra_alignmask = 3, + .cra_type = &crypto_ablkcipher_type, + .cra_init = cryp_cra_init, + .cra_module = THIS_MODULE, + .cra_u = { + .ablkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .setkey = des_ablkcipher_setkey, + .encrypt = cryp_blk_encrypt, + .decrypt = cryp_blk_decrypt, + } + } + } + }, + { + .algomode = CRYP_ALGO_TDES_CBC, + .crypto = { + .cra_name = "cbc(des3_ede)", + .cra_driver_name = "cbc-des3_ede-ux500", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct cryp_ctx), + .cra_alignmask = 3, + .cra_type = &crypto_ablkcipher_type, + .cra_init = cryp_cra_init, + .cra_module = THIS_MODULE, + .cra_u = { + .ablkcipher = { + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .setkey = des3_ablkcipher_setkey, + .encrypt = cryp_blk_encrypt, + .decrypt = cryp_blk_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + } + } + } + } +}; + +/** + * cryp_algs_register_all - + */ +static int cryp_algs_register_all(void) +{ + int ret; + int i; + int count; + + pr_debug("[%s]", __func__); + + for (i = 0; i < ARRAY_SIZE(cryp_algs); i++) { + ret = crypto_register_alg(&cryp_algs[i].crypto); + if (ret) { + count = i; + pr_err("[%s] alg registration failed", + cryp_algs[i].crypto.cra_driver_name); + goto unreg; + } + } + return 0; +unreg: + for (i = 0; i < count; i++) + crypto_unregister_alg(&cryp_algs[i].crypto); + return ret; +} + +/** + * cryp_algs_unregister_all - + */ +static void cryp_algs_unregister_all(void) +{ + int i; + + pr_debug(DEV_DBG_NAME " [%s]", __func__); + + for (i = 0; i < ARRAY_SIZE(cryp_algs); i++) + crypto_unregister_alg(&cryp_algs[i].crypto); +} + +static int ux500_cryp_probe(struct platform_device *pdev) +{ + int ret; + int cryp_error = 0; + struct resource *res = NULL; + struct resource *res_irq = NULL; + struct cryp_device_data *device_data; + struct cryp_protection_config prot = { + .privilege_access = CRYP_STATE_ENABLE + }; + struct device *dev = &pdev->dev; + + dev_dbg(dev, "[%s]", __func__); + device_data = kzalloc(sizeof(struct cryp_device_data), GFP_ATOMIC); + if (!device_data) { + dev_err(dev, "[%s]: kzalloc() failed!", __func__); + ret = -ENOMEM; + goto out; + } + + device_data->dev = dev; + device_data->current_ctx = NULL; + + /* Grab the DMA configuration from platform data. */ + mem_to_engine = &((struct cryp_platform_data *) + dev->platform_data)->mem_to_engine; + engine_to_mem = &((struct cryp_platform_data *) + dev->platform_data)->engine_to_mem; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "[%s]: platform_get_resource() failed", + __func__); + ret = -ENODEV; + goto out_kfree; + } + + res = request_mem_region(res->start, resource_size(res), pdev->name); + if (res == NULL) { + dev_err(dev, "[%s]: request_mem_region() failed", + __func__); + ret = -EBUSY; + goto out_kfree; + } + + device_data->phybase = res->start; + device_data->base = ioremap(res->start, resource_size(res)); + if (!device_data->base) { + dev_err(dev, "[%s]: ioremap failed!", __func__); + ret = -ENOMEM; + goto out_free_mem; + } + + spin_lock_init(&device_data->ctx_lock); + spin_lock_init(&device_data->power_state_spinlock); + + /* Enable power for CRYP hardware block */ + device_data->pwr_regulator = regulator_get(&pdev->dev, "v-ape"); + if (IS_ERR(device_data->pwr_regulator)) { + dev_err(dev, "[%s]: could not get cryp regulator", __func__); + ret = PTR_ERR(device_data->pwr_regulator); + device_data->pwr_regulator = NULL; + goto out_unmap; + } + + /* Enable the clk for CRYP hardware block */ + device_data->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(device_data->clk)) { + dev_err(dev, "[%s]: clk_get() failed!", __func__); + ret = PTR_ERR(device_data->clk); + goto out_regulator; + } + + ret = clk_prepare(device_data->clk); + if (ret) { + dev_err(dev, "[%s]: clk_prepare() failed!", __func__); + goto out_clk; + } + + /* Enable device power (and clock) */ + ret = cryp_enable_power(device_data->dev, device_data, false); + if (ret) { + dev_err(dev, "[%s]: cryp_enable_power() failed!", __func__); + goto out_clk_unprepare; + } + + cryp_error = cryp_check(device_data); + if (cryp_error != 0) { + dev_err(dev, "[%s]: cryp_init() failed!", __func__); + ret = -EINVAL; + goto out_power; + } + + cryp_error = cryp_configure_protection(device_data, &prot); + if (cryp_error != 0) { + dev_err(dev, "[%s]: cryp_configure_protection() failed!", + __func__); + ret = -EINVAL; + goto out_power; + } + + res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!res_irq) { + dev_err(dev, "[%s]: IORESOURCE_IRQ unavailable", + __func__); + ret = -ENODEV; + goto out_power; + } + + ret = request_irq(res_irq->start, + cryp_interrupt_handler, + 0, + "cryp1", + device_data); + if (ret) { + dev_err(dev, "[%s]: Unable to request IRQ", __func__); + goto out_power; + } + + if (cryp_mode == CRYP_MODE_DMA) + cryp_dma_setup_channel(device_data, dev); + + platform_set_drvdata(pdev, device_data); + + /* Put the new device into the device list... */ + klist_add_tail(&device_data->list_node, &driver_data.device_list); + + /* ... and signal that a new device is available. */ + up(&driver_data.device_allocation); + + atomic_set(&session_id, 1); + + ret = cryp_algs_register_all(); + if (ret) { + dev_err(dev, "[%s]: cryp_algs_register_all() failed!", + __func__); + goto out_power; + } + + dev_info(dev, "successfully registered\n"); + + return 0; + +out_power: + cryp_disable_power(device_data->dev, device_data, false); + +out_clk_unprepare: + clk_unprepare(device_data->clk); + +out_clk: + clk_put(device_data->clk); + +out_regulator: + regulator_put(device_data->pwr_regulator); + +out_unmap: + iounmap(device_data->base); + +out_free_mem: + release_mem_region(res->start, resource_size(res)); + +out_kfree: + kfree(device_data); +out: + return ret; +} + +static int ux500_cryp_remove(struct platform_device *pdev) +{ + struct resource *res = NULL; + struct resource *res_irq = NULL; + struct cryp_device_data *device_data; + + dev_dbg(&pdev->dev, "[%s]", __func__); + device_data = platform_get_drvdata(pdev); + if (!device_data) { + dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!", + __func__); + return -ENOMEM; + } + + /* Try to decrease the number of available devices. */ + if (down_trylock(&driver_data.device_allocation)) + return -EBUSY; + + /* Check that the device is free */ + spin_lock(&device_data->ctx_lock); + /* current_ctx allocates a device, NULL = unallocated */ + if (device_data->current_ctx) { + /* The device is busy */ + spin_unlock(&device_data->ctx_lock); + /* Return the device to the pool. */ + up(&driver_data.device_allocation); + return -EBUSY; + } + + spin_unlock(&device_data->ctx_lock); + + /* Remove the device from the list */ + if (klist_node_attached(&device_data->list_node)) + klist_remove(&device_data->list_node); + + /* If this was the last device, remove the services */ + if (list_empty(&driver_data.device_list.k_list)) + cryp_algs_unregister_all(); + + res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!res_irq) + dev_err(&pdev->dev, "[%s]: IORESOURCE_IRQ, unavailable", + __func__); + else { + disable_irq(res_irq->start); + free_irq(res_irq->start, device_data); + } + + if (cryp_disable_power(&pdev->dev, device_data, false)) + dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed", + __func__); + + clk_unprepare(device_data->clk); + clk_put(device_data->clk); + regulator_put(device_data->pwr_regulator); + + iounmap(device_data->base); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res) + release_mem_region(res->start, resource_size(res)); + + kfree(device_data); + + return 0; +} + +static void ux500_cryp_shutdown(struct platform_device *pdev) +{ + struct resource *res_irq = NULL; + struct cryp_device_data *device_data; + + dev_dbg(&pdev->dev, "[%s]", __func__); + + device_data = platform_get_drvdata(pdev); + if (!device_data) { + dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!", + __func__); + return; + } + + /* Check that the device is free */ + spin_lock(&device_data->ctx_lock); + /* current_ctx allocates a device, NULL = unallocated */ + if (!device_data->current_ctx) { + if (down_trylock(&driver_data.device_allocation)) + dev_dbg(&pdev->dev, "[%s]: Cryp still in use!" + "Shutting down anyway...", __func__); + /** + * (Allocate the device) + * Need to set this to non-null (dummy) value, + * to avoid usage if context switching. + */ + device_data->current_ctx++; + } + spin_unlock(&device_data->ctx_lock); + + /* Remove the device from the list */ + if (klist_node_attached(&device_data->list_node)) + klist_remove(&device_data->list_node); + + /* If this was the last device, remove the services */ + if (list_empty(&driver_data.device_list.k_list)) + cryp_algs_unregister_all(); + + res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!res_irq) + dev_err(&pdev->dev, "[%s]: IORESOURCE_IRQ, unavailable", + __func__); + else { + disable_irq(res_irq->start); + free_irq(res_irq->start, device_data); + } + + if (cryp_disable_power(&pdev->dev, device_data, false)) + dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed", + __func__); + +} + +#ifdef CONFIG_PM_SLEEP +static int ux500_cryp_suspend(struct device *dev) +{ + int ret; + struct platform_device *pdev = to_platform_device(dev); + struct cryp_device_data *device_data; + struct resource *res_irq; + struct cryp_ctx *temp_ctx = NULL; + + dev_dbg(dev, "[%s]", __func__); + + /* Handle state? */ + device_data = platform_get_drvdata(pdev); + if (!device_data) { + dev_err(dev, "[%s]: platform_get_drvdata() failed!", __func__); + return -ENOMEM; + } + + res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!res_irq) + dev_err(dev, "[%s]: IORESOURCE_IRQ, unavailable", __func__); + else + disable_irq(res_irq->start); + + spin_lock(&device_data->ctx_lock); + if (!device_data->current_ctx) + device_data->current_ctx++; + spin_unlock(&device_data->ctx_lock); + + if (device_data->current_ctx == ++temp_ctx) { + if (down_interruptible(&driver_data.device_allocation)) + dev_dbg(dev, "[%s]: down_interruptible() failed", + __func__); + ret = cryp_disable_power(dev, device_data, false); + + } else + ret = cryp_disable_power(dev, device_data, true); + + if (ret) + dev_err(dev, "[%s]: cryp_disable_power()", __func__); + + return ret; +} + +static int ux500_cryp_resume(struct device *dev) +{ + int ret = 0; + struct platform_device *pdev = to_platform_device(dev); + struct cryp_device_data *device_data; + struct resource *res_irq; + struct cryp_ctx *temp_ctx = NULL; + + dev_dbg(dev, "[%s]", __func__); + + device_data = platform_get_drvdata(pdev); + if (!device_data) { + dev_err(dev, "[%s]: platform_get_drvdata() failed!", __func__); + return -ENOMEM; + } + + spin_lock(&device_data->ctx_lock); + if (device_data->current_ctx == ++temp_ctx) + device_data->current_ctx = NULL; + spin_unlock(&device_data->ctx_lock); + + + if (!device_data->current_ctx) + up(&driver_data.device_allocation); + else + ret = cryp_enable_power(dev, device_data, true); + + if (ret) + dev_err(dev, "[%s]: cryp_enable_power() failed!", __func__); + else { + res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (res_irq) + enable_irq(res_irq->start); + } + + return ret; +} +#endif + +static SIMPLE_DEV_PM_OPS(ux500_cryp_pm, ux500_cryp_suspend, ux500_cryp_resume); + +static const struct of_device_id ux500_cryp_match[] = { + { .compatible = "stericsson,ux500-cryp" }, + { }, +}; + +static struct platform_driver cryp_driver = { + .probe = ux500_cryp_probe, + .remove = ux500_cryp_remove, + .shutdown = ux500_cryp_shutdown, + .driver = { + .name = "cryp1", + .of_match_table = ux500_cryp_match, + .pm = &ux500_cryp_pm, + } +}; + +static int __init ux500_cryp_mod_init(void) +{ + pr_debug("[%s] is called!", __func__); + klist_init(&driver_data.device_list, NULL, NULL); + /* Initialize the semaphore to 0 devices (locked state) */ + sema_init(&driver_data.device_allocation, 0); + return platform_driver_register(&cryp_driver); +} + +static void __exit ux500_cryp_mod_fini(void) +{ + pr_debug("[%s] is called!", __func__); + platform_driver_unregister(&cryp_driver); + return; +} + +module_init(ux500_cryp_mod_init); +module_exit(ux500_cryp_mod_fini); + +module_param(cryp_mode, int, 0); + +MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 CRYP crypto engine."); +MODULE_ALIAS_CRYPTO("aes-all"); +MODULE_ALIAS_CRYPTO("des-all"); + +MODULE_LICENSE("GPL"); diff --git a/drivers/crypto/ux500/cryp/cryp_irq.c b/drivers/crypto/ux500/cryp/cryp_irq.c new file mode 100644 index 000000000..08d291cdb --- /dev/null +++ b/drivers/crypto/ux500/cryp/cryp_irq.c @@ -0,0 +1,45 @@ +/** + * Copyright (C) ST-Ericsson SA 2010 + * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson. + * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson. + * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson. + * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson. + * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson. + * License terms: GNU General Public License (GPL) version 2. + */ + +#include <linux/kernel.h> +#include <linux/bitmap.h> +#include <linux/device.h> + +#include "cryp.h" +#include "cryp_p.h" +#include "cryp_irq.h" +#include "cryp_irqp.h" + +void cryp_enable_irq_src(struct cryp_device_data *device_data, u32 irq_src) +{ + u32 i; + + dev_dbg(device_data->dev, "[%s]", __func__); + + i = readl_relaxed(&device_data->base->imsc); + i = i | irq_src; + writel_relaxed(i, &device_data->base->imsc); +} + +void cryp_disable_irq_src(struct cryp_device_data *device_data, u32 irq_src) +{ + u32 i; + + dev_dbg(device_data->dev, "[%s]", __func__); + + i = readl_relaxed(&device_data->base->imsc); + i = i & ~irq_src; + writel_relaxed(i, &device_data->base->imsc); +} + +bool cryp_pending_irq_src(struct cryp_device_data *device_data, u32 irq_src) +{ + return (readl_relaxed(&device_data->base->mis) & irq_src) > 0; +} diff --git a/drivers/crypto/ux500/cryp/cryp_irq.h b/drivers/crypto/ux500/cryp/cryp_irq.h new file mode 100644 index 000000000..5a7837f1b --- /dev/null +++ b/drivers/crypto/ux500/cryp/cryp_irq.h @@ -0,0 +1,31 @@ +/** + * Copyright (C) ST-Ericsson SA 2010 + * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson. + * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson. + * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson. + * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson. + * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson. + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef _CRYP_IRQ_H_ +#define _CRYP_IRQ_H_ + +#include "cryp.h" + +enum cryp_irq_src_id { + CRYP_IRQ_SRC_INPUT_FIFO = 0x1, + CRYP_IRQ_SRC_OUTPUT_FIFO = 0x2, + CRYP_IRQ_SRC_ALL = 0x3 +}; + +/** + * M0 Funtions + */ +void cryp_enable_irq_src(struct cryp_device_data *device_data, u32 irq_src); + +void cryp_disable_irq_src(struct cryp_device_data *device_data, u32 irq_src); + +bool cryp_pending_irq_src(struct cryp_device_data *device_data, u32 irq_src); + +#endif /* _CRYP_IRQ_H_ */ diff --git a/drivers/crypto/ux500/cryp/cryp_irqp.h b/drivers/crypto/ux500/cryp/cryp_irqp.h new file mode 100644 index 000000000..8b339cc34 --- /dev/null +++ b/drivers/crypto/ux500/cryp/cryp_irqp.h @@ -0,0 +1,125 @@ +/** + * Copyright (C) ST-Ericsson SA 2010 + * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson. + * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson. + * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson. + * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson. + * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson. + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef __CRYP_IRQP_H_ +#define __CRYP_IRQP_H_ + +#include "cryp_irq.h" + +/** + * + * CRYP Registers - Offset mapping + * +-----------------+ + * 00h | CRYP_CR | Configuration register + * +-----------------+ + * 04h | CRYP_SR | Status register + * +-----------------+ + * 08h | CRYP_DIN | Data In register + * +-----------------+ + * 0ch | CRYP_DOUT | Data out register + * +-----------------+ + * 10h | CRYP_DMACR | DMA control register + * +-----------------+ + * 14h | CRYP_IMSC | IMSC + * +-----------------+ + * 18h | CRYP_RIS | Raw interrupt status + * +-----------------+ + * 1ch | CRYP_MIS | Masked interrupt status. + * +-----------------+ + * Key registers + * IVR registers + * Peripheral + * Cell IDs + * + * Refer data structure for other register map + */ + +/** + * struct cryp_register + * @cr - Configuration register + * @status - Status register + * @din - Data input register + * @din_size - Data input size register + * @dout - Data output register + * @dout_size - Data output size register + * @dmacr - Dma control register + * @imsc - Interrupt mask set/clear register + * @ris - Raw interrupt status + * @mis - Masked interrupt statu register + * @key_1_l - Key register 1 L + * @key_1_r - Key register 1 R + * @key_2_l - Key register 2 L + * @key_2_r - Key register 2 R + * @key_3_l - Key register 3 L + * @key_3_r - Key register 3 R + * @key_4_l - Key register 4 L + * @key_4_r - Key register 4 R + * @init_vect_0_l - init vector 0 L + * @init_vect_0_r - init vector 0 R + * @init_vect_1_l - init vector 1 L + * @init_vect_1_r - init vector 1 R + * @cryp_unused1 - unused registers + * @itcr - Integration test control register + * @itip - Integration test input register + * @itop - Integration test output register + * @cryp_unused2 - unused registers + * @periphId0 - FE0 CRYP Peripheral Identication Register + * @periphId1 - FE4 + * @periphId2 - FE8 + * @periphId3 - FEC + * @pcellId0 - FF0 CRYP PCell Identication Register + * @pcellId1 - FF4 + * @pcellId2 - FF8 + * @pcellId3 - FFC + */ +struct cryp_register { + u32 cr; /* Configuration register */ + u32 sr; /* Status register */ + u32 din; /* Data input register */ + u32 din_size; /* Data input size register */ + u32 dout; /* Data output register */ + u32 dout_size; /* Data output size register */ + u32 dmacr; /* Dma control register */ + u32 imsc; /* Interrupt mask set/clear register */ + u32 ris; /* Raw interrupt status */ + u32 mis; /* Masked interrupt statu register */ + + u32 key_1_l; /*Key register 1 L */ + u32 key_1_r; /*Key register 1 R */ + u32 key_2_l; /*Key register 2 L */ + u32 key_2_r; /*Key register 2 R */ + u32 key_3_l; /*Key register 3 L */ + u32 key_3_r; /*Key register 3 R */ + u32 key_4_l; /*Key register 4 L */ + u32 key_4_r; /*Key register 4 R */ + + u32 init_vect_0_l; /*init vector 0 L */ + u32 init_vect_0_r; /*init vector 0 R */ + u32 init_vect_1_l; /*init vector 1 L */ + u32 init_vect_1_r; /*init vector 1 R */ + + u32 cryp_unused1[(0x80 - 0x58) / sizeof(u32)]; /* unused registers */ + u32 itcr; /*Integration test control register */ + u32 itip; /*Integration test input register */ + u32 itop; /*Integration test output register */ + u32 cryp_unused2[(0xFE0 - 0x8C) / sizeof(u32)]; /* unused registers */ + + u32 periphId0; /* FE0 CRYP Peripheral Identication Register */ + u32 periphId1; /* FE4 */ + u32 periphId2; /* FE8 */ + u32 periphId3; /* FEC */ + + u32 pcellId0; /* FF0 CRYP PCell Identication Register */ + u32 pcellId1; /* FF4 */ + u32 pcellId2; /* FF8 */ + u32 pcellId3; /* FFC */ +}; + +#endif diff --git a/drivers/crypto/ux500/cryp/cryp_p.h b/drivers/crypto/ux500/cryp/cryp_p.h new file mode 100644 index 000000000..6dcffe15c --- /dev/null +++ b/drivers/crypto/ux500/cryp/cryp_p.h @@ -0,0 +1,123 @@ +/** + * Copyright (C) ST-Ericsson SA 2010 + * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson. + * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson. + * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson. + * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson. + * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson. + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef _CRYP_P_H_ +#define _CRYP_P_H_ + +#include <linux/io.h> +#include <linux/bitops.h> + +#include "cryp.h" +#include "cryp_irqp.h" + +/** + * Generic Macros + */ +#define CRYP_SET_BITS(reg_name, mask) \ + writel_relaxed((readl_relaxed(reg_name) | mask), reg_name) + +#define CRYP_WRITE_BIT(reg_name, val, mask) \ + writel_relaxed(((readl_relaxed(reg_name) & ~(mask)) |\ + ((val) & (mask))), reg_name) + +#define CRYP_TEST_BITS(reg_name, val) \ + (readl_relaxed(reg_name) & (val)) + +#define CRYP_PUT_BITS(reg, val, shift, mask) \ + writel_relaxed(((readl_relaxed(reg) & ~(mask)) | \ + (((u32)val << shift) & (mask))), reg) + +/** + * CRYP specific Macros + */ +#define CRYP_PERIPHERAL_ID0 0xE3 +#define CRYP_PERIPHERAL_ID1 0x05 + +#define CRYP_PERIPHERAL_ID2_DB8500 0x28 +#define CRYP_PERIPHERAL_ID3 0x00 + +#define CRYP_PCELL_ID0 0x0D +#define CRYP_PCELL_ID1 0xF0 +#define CRYP_PCELL_ID2 0x05 +#define CRYP_PCELL_ID3 0xB1 + +/** + * CRYP register default values + */ +#define MAX_DEVICE_SUPPORT 2 + +/* Priv set, keyrden set and datatype 8bits swapped set as default. */ +#define CRYP_CR_DEFAULT 0x0482 +#define CRYP_DMACR_DEFAULT 0x0 +#define CRYP_IMSC_DEFAULT 0x0 +#define CRYP_DIN_DEFAULT 0x0 +#define CRYP_DOUT_DEFAULT 0x0 +#define CRYP_KEY_DEFAULT 0x0 +#define CRYP_INIT_VECT_DEFAULT 0x0 + +/** + * CRYP Control register specific mask + */ +#define CRYP_CR_SECURE_MASK BIT(0) +#define CRYP_CR_PRLG_MASK BIT(1) +#define CRYP_CR_ALGODIR_MASK BIT(2) +#define CRYP_CR_ALGOMODE_MASK (BIT(5) | BIT(4) | BIT(3)) +#define CRYP_CR_DATATYPE_MASK (BIT(7) | BIT(6)) +#define CRYP_CR_KEYSIZE_MASK (BIT(9) | BIT(8)) +#define CRYP_CR_KEYRDEN_MASK BIT(10) +#define CRYP_CR_KSE_MASK BIT(11) +#define CRYP_CR_START_MASK BIT(12) +#define CRYP_CR_INIT_MASK BIT(13) +#define CRYP_CR_FFLUSH_MASK BIT(14) +#define CRYP_CR_CRYPEN_MASK BIT(15) +#define CRYP_CR_CONTEXT_SAVE_MASK (CRYP_CR_SECURE_MASK |\ + CRYP_CR_PRLG_MASK |\ + CRYP_CR_ALGODIR_MASK |\ + CRYP_CR_ALGOMODE_MASK |\ + CRYP_CR_DATATYPE_MASK |\ + CRYP_CR_KEYSIZE_MASK |\ + CRYP_CR_KEYRDEN_MASK |\ + CRYP_CR_DATATYPE_MASK) + + +#define CRYP_SR_INFIFO_READY_MASK (BIT(0) | BIT(1)) +#define CRYP_SR_IFEM_MASK BIT(0) +#define CRYP_SR_BUSY_MASK BIT(4) + +/** + * Bit position used while setting bits in register + */ +#define CRYP_CR_PRLG_POS 1 +#define CRYP_CR_ALGODIR_POS 2 +#define CRYP_CR_ALGOMODE_POS 3 +#define CRYP_CR_DATATYPE_POS 6 +#define CRYP_CR_KEYSIZE_POS 8 +#define CRYP_CR_KEYRDEN_POS 10 +#define CRYP_CR_KSE_POS 11 +#define CRYP_CR_START_POS 12 +#define CRYP_CR_INIT_POS 13 +#define CRYP_CR_CRYPEN_POS 15 + +#define CRYP_SR_BUSY_POS 4 + +/** + * CRYP PCRs------PC_NAND control register + * BIT_MASK + */ +#define CRYP_DMA_REQ_MASK (BIT(1) | BIT(0)) +#define CRYP_DMA_REQ_MASK_POS 0 + + +struct cryp_system_context { + /* CRYP Register structure */ + struct cryp_register *p_cryp_reg[MAX_DEVICE_SUPPORT]; +}; + +#endif diff --git a/drivers/crypto/ux500/hash/Makefile b/drivers/crypto/ux500/hash/Makefile new file mode 100644 index 000000000..b2f90d9ba --- /dev/null +++ b/drivers/crypto/ux500/hash/Makefile @@ -0,0 +1,11 @@ +# +# Copyright (C) ST-Ericsson SA 2010 +# Author: Shujuan Chen (shujuan.chen@stericsson.com) +# License terms: GNU General Public License (GPL) version 2 +# +ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG +CFLAGS_hash_core.o := -DDEBUG -O0 +endif + +obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += ux500_hash.o +ux500_hash-objs := hash_core.o diff --git a/drivers/crypto/ux500/hash/hash_alg.h b/drivers/crypto/ux500/hash/hash_alg.h new file mode 100644 index 000000000..be6eb54da --- /dev/null +++ b/drivers/crypto/ux500/hash/hash_alg.h @@ -0,0 +1,398 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Shujuan Chen (shujuan.chen@stericsson.com) + * Author: Joakim Bech (joakim.xx.bech@stericsson.com) + * Author: Berne Hebark (berne.hebark@stericsson.com)) + * License terms: GNU General Public License (GPL) version 2 + */ +#ifndef _HASH_ALG_H +#define _HASH_ALG_H + +#include <linux/bitops.h> + +#define HASH_BLOCK_SIZE 64 +#define HASH_DMA_FIFO 4 +#define HASH_DMA_ALIGN_SIZE 4 +#define HASH_DMA_PERFORMANCE_MIN_SIZE 1024 +#define HASH_BYTES_PER_WORD 4 + +/* Maximum value of the length's high word */ +#define HASH_HIGH_WORD_MAX_VAL 0xFFFFFFFFUL + +/* Power on Reset values HASH registers */ +#define HASH_RESET_CR_VALUE 0x0 +#define HASH_RESET_STR_VALUE 0x0 + +/* Number of context swap registers */ +#define HASH_CSR_COUNT 52 + +#define HASH_RESET_CSRX_REG_VALUE 0x0 +#define HASH_RESET_CSFULL_REG_VALUE 0x0 +#define HASH_RESET_CSDATAIN_REG_VALUE 0x0 + +#define HASH_RESET_INDEX_VAL 0x0 +#define HASH_RESET_BIT_INDEX_VAL 0x0 +#define HASH_RESET_BUFFER_VAL 0x0 +#define HASH_RESET_LEN_HIGH_VAL 0x0 +#define HASH_RESET_LEN_LOW_VAL 0x0 + +/* Control register bitfields */ +#define HASH_CR_RESUME_MASK 0x11FCF + +#define HASH_CR_SWITCHON_POS 31 +#define HASH_CR_SWITCHON_MASK BIT(31) + +#define HASH_CR_EMPTYMSG_POS 20 +#define HASH_CR_EMPTYMSG_MASK BIT(20) + +#define HASH_CR_DINF_POS 12 +#define HASH_CR_DINF_MASK BIT(12) + +#define HASH_CR_NBW_POS 8 +#define HASH_CR_NBW_MASK 0x00000F00UL + +#define HASH_CR_LKEY_POS 16 +#define HASH_CR_LKEY_MASK BIT(16) + +#define HASH_CR_ALGO_POS 7 +#define HASH_CR_ALGO_MASK BIT(7) + +#define HASH_CR_MODE_POS 6 +#define HASH_CR_MODE_MASK BIT(6) + +#define HASH_CR_DATAFORM_POS 4 +#define HASH_CR_DATAFORM_MASK (BIT(4) | BIT(5)) + +#define HASH_CR_DMAE_POS 3 +#define HASH_CR_DMAE_MASK BIT(3) + +#define HASH_CR_INIT_POS 2 +#define HASH_CR_INIT_MASK BIT(2) + +#define HASH_CR_PRIVN_POS 1 +#define HASH_CR_PRIVN_MASK BIT(1) + +#define HASH_CR_SECN_POS 0 +#define HASH_CR_SECN_MASK BIT(0) + +/* Start register bitfields */ +#define HASH_STR_DCAL_POS 8 +#define HASH_STR_DCAL_MASK BIT(8) +#define HASH_STR_DEFAULT 0x0 + +#define HASH_STR_NBLW_POS 0 +#define HASH_STR_NBLW_MASK 0x0000001FUL + +#define HASH_NBLW_MAX_VAL 0x1F + +/* PrimeCell IDs */ +#define HASH_P_ID0 0xE0 +#define HASH_P_ID1 0x05 +#define HASH_P_ID2 0x38 +#define HASH_P_ID3 0x00 +#define HASH_CELL_ID0 0x0D +#define HASH_CELL_ID1 0xF0 +#define HASH_CELL_ID2 0x05 +#define HASH_CELL_ID3 0xB1 + +#define HASH_SET_BITS(reg_name, mask) \ + writel_relaxed((readl_relaxed(reg_name) | mask), reg_name) + +#define HASH_CLEAR_BITS(reg_name, mask) \ + writel_relaxed((readl_relaxed(reg_name) & ~mask), reg_name) + +#define HASH_PUT_BITS(reg, val, shift, mask) \ + writel_relaxed(((readl(reg) & ~(mask)) | \ + (((u32)val << shift) & (mask))), reg) + +#define HASH_SET_DIN(val, len) writesl(&device_data->base->din, (val), (len)) + +#define HASH_INITIALIZE \ + HASH_PUT_BITS( \ + &device_data->base->cr, \ + 0x01, HASH_CR_INIT_POS, \ + HASH_CR_INIT_MASK) + +#define HASH_SET_DATA_FORMAT(data_format) \ + HASH_PUT_BITS( \ + &device_data->base->cr, \ + (u32) (data_format), HASH_CR_DATAFORM_POS, \ + HASH_CR_DATAFORM_MASK) +#define HASH_SET_NBLW(val) \ + HASH_PUT_BITS( \ + &device_data->base->str, \ + (u32) (val), HASH_STR_NBLW_POS, \ + HASH_STR_NBLW_MASK) +#define HASH_SET_DCAL \ + HASH_PUT_BITS( \ + &device_data->base->str, \ + 0x01, HASH_STR_DCAL_POS, \ + HASH_STR_DCAL_MASK) + +/* Hardware access method */ +enum hash_mode { + HASH_MODE_CPU, + HASH_MODE_DMA +}; + +/** + * struct uint64 - Structure to handle 64 bits integers. + * @high_word: Most significant bits. + * @low_word: Least significant bits. + * + * Used to handle 64 bits integers. + */ +struct uint64 { + u32 high_word; + u32 low_word; +}; + +/** + * struct hash_register - Contains all registers in ux500 hash hardware. + * @cr: HASH control register (0x000). + * @din: HASH data input register (0x004). + * @str: HASH start register (0x008). + * @hx: HASH digest register 0..7 (0x00c-0x01C). + * @padding0: Reserved (0x02C). + * @itcr: Integration test control register (0x080). + * @itip: Integration test input register (0x084). + * @itop: Integration test output register (0x088). + * @padding1: Reserved (0x08C). + * @csfull: HASH context full register (0x0F8). + * @csdatain: HASH context swap data input register (0x0FC). + * @csrx: HASH context swap register 0..51 (0x100-0x1CC). + * @padding2: Reserved (0x1D0). + * @periphid0: HASH peripheral identification register 0 (0xFE0). + * @periphid1: HASH peripheral identification register 1 (0xFE4). + * @periphid2: HASH peripheral identification register 2 (0xFE8). + * @periphid3: HASH peripheral identification register 3 (0xFEC). + * @cellid0: HASH PCell identification register 0 (0xFF0). + * @cellid1: HASH PCell identification register 1 (0xFF4). + * @cellid2: HASH PCell identification register 2 (0xFF8). + * @cellid3: HASH PCell identification register 3 (0xFFC). + * + * The device communicates to the HASH via 32-bit-wide control registers + * accessible via the 32-bit width AMBA rev. 2.0 AHB Bus. Below is a structure + * with the registers used. + */ +struct hash_register { + u32 cr; + u32 din; + u32 str; + u32 hx[8]; + + u32 padding0[(0x080 - 0x02C) / sizeof(u32)]; + + u32 itcr; + u32 itip; + u32 itop; + + u32 padding1[(0x0F8 - 0x08C) / sizeof(u32)]; + + u32 csfull; + u32 csdatain; + u32 csrx[HASH_CSR_COUNT]; + + u32 padding2[(0xFE0 - 0x1D0) / sizeof(u32)]; + + u32 periphid0; + u32 periphid1; + u32 periphid2; + u32 periphid3; + + u32 cellid0; + u32 cellid1; + u32 cellid2; + u32 cellid3; +}; + +/** + * struct hash_state - Hash context state. + * @temp_cr: Temporary HASH Control Register. + * @str_reg: HASH Start Register. + * @din_reg: HASH Data Input Register. + * @csr[52]: HASH Context Swap Registers 0-39. + * @csfull: HASH Context Swap Registers 40 ie Status flags. + * @csdatain: HASH Context Swap Registers 41 ie Input data. + * @buffer: Working buffer for messages going to the hardware. + * @length: Length of the part of message hashed so far (floor(N/64) * 64). + * @index: Valid number of bytes in buffer (N % 64). + * @bit_index: Valid number of bits in buffer (N % 8). + * + * This structure is used between context switches, i.e. when ongoing jobs are + * interupted with new jobs. When this happens we need to store intermediate + * results in software. + * + * WARNING: "index" is the member of the structure, to be sure that "buffer" + * is aligned on a 4-bytes boundary. This is highly implementation dependent + * and MUST be checked whenever this code is ported on new platforms. + */ +struct hash_state { + u32 temp_cr; + u32 str_reg; + u32 din_reg; + u32 csr[52]; + u32 csfull; + u32 csdatain; + u32 buffer[HASH_BLOCK_SIZE / sizeof(u32)]; + struct uint64 length; + u8 index; + u8 bit_index; +}; + +/** + * enum hash_device_id - HASH device ID. + * @HASH_DEVICE_ID_0: Hash hardware with ID 0 + * @HASH_DEVICE_ID_1: Hash hardware with ID 1 + */ +enum hash_device_id { + HASH_DEVICE_ID_0 = 0, + HASH_DEVICE_ID_1 = 1 +}; + +/** + * enum hash_data_format - HASH data format. + * @HASH_DATA_32_BITS: 32 bits data format + * @HASH_DATA_16_BITS: 16 bits data format + * @HASH_DATA_8_BITS: 8 bits data format. + * @HASH_DATA_1_BITS: 1 bit data format. + */ +enum hash_data_format { + HASH_DATA_32_BITS = 0x0, + HASH_DATA_16_BITS = 0x1, + HASH_DATA_8_BITS = 0x2, + HASH_DATA_1_BIT = 0x3 +}; + +/** + * enum hash_algo - Enumeration for selecting between SHA1 or SHA2 algorithm. + * @HASH_ALGO_SHA1: Indicates that SHA1 is used. + * @HASH_ALGO_SHA2: Indicates that SHA2 (SHA256) is used. + */ +enum hash_algo { + HASH_ALGO_SHA1 = 0x0, + HASH_ALGO_SHA256 = 0x1 +}; + +/** + * enum hash_op - Enumeration for selecting between HASH or HMAC mode. + * @HASH_OPER_MODE_HASH: Indicates usage of normal HASH mode. + * @HASH_OPER_MODE_HMAC: Indicates usage of HMAC. + */ +enum hash_op { + HASH_OPER_MODE_HASH = 0x0, + HASH_OPER_MODE_HMAC = 0x1 +}; + +/** + * struct hash_config - Configuration data for the hardware. + * @data_format: Format of data entered into the hash data in register. + * @algorithm: Algorithm selection bit. + * @oper_mode: Operating mode selection bit. + */ +struct hash_config { + int data_format; + int algorithm; + int oper_mode; +}; + +/** + * struct hash_dma - Structure used for dma. + * @mask: DMA capabilities bitmap mask. + * @complete: Used to maintain state for a "completion". + * @chan_mem2hash: DMA channel. + * @cfg_mem2hash: DMA channel configuration. + * @sg_len: Scatterlist length. + * @sg: Scatterlist. + * @nents: Number of sg entries. + */ +struct hash_dma { + dma_cap_mask_t mask; + struct completion complete; + struct dma_chan *chan_mem2hash; + void *cfg_mem2hash; + int sg_len; + struct scatterlist *sg; + int nents; +}; + +/** + * struct hash_ctx - The context used for hash calculations. + * @key: The key used in the operation. + * @keylen: The length of the key. + * @state: The state of the current calculations. + * @config: The current configuration. + * @digestsize: The size of current digest. + * @device: Pointer to the device structure. + */ +struct hash_ctx { + u8 *key; + u32 keylen; + struct hash_config config; + int digestsize; + struct hash_device_data *device; +}; + +/** + * struct hash_ctx - The request context used for hash calculations. + * @state: The state of the current calculations. + * @dma_mode: Used in special cases (workaround), e.g. need to change to + * cpu mode, if not supported/working in dma mode. + * @updated: Indicates if hardware is initialized for new operations. + */ +struct hash_req_ctx { + struct hash_state state; + bool dma_mode; + u8 updated; +}; + +/** + * struct hash_device_data - structure for a hash device. + * @base: Pointer to virtual base address of the hash device. + * @phybase: Pointer to physical memory location of the hash device. + * @list_node: For inclusion in klist. + * @dev: Pointer to the device dev structure. + * @ctx_lock: Spinlock for current_ctx. + * @current_ctx: Pointer to the currently allocated context. + * @power_state: TRUE = power state on, FALSE = power state off. + * @power_state_lock: Spinlock for power_state. + * @regulator: Pointer to the device's power control. + * @clk: Pointer to the device's clock control. + * @restore_dev_state: TRUE = saved state, FALSE = no saved state. + * @dma: Structure used for dma. + */ +struct hash_device_data { + struct hash_register __iomem *base; + phys_addr_t phybase; + struct klist_node list_node; + struct device *dev; + struct spinlock ctx_lock; + struct hash_ctx *current_ctx; + bool power_state; + struct spinlock power_state_lock; + struct regulator *regulator; + struct clk *clk; + bool restore_dev_state; + struct hash_state state; /* Used for saving and resuming state */ + struct hash_dma dma; +}; + +int hash_check_hw(struct hash_device_data *device_data); + +int hash_setconfiguration(struct hash_device_data *device_data, + struct hash_config *config); + +void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx); + +void hash_get_digest(struct hash_device_data *device_data, + u8 *digest, int algorithm); + +int hash_hw_update(struct ahash_request *req); + +int hash_save_state(struct hash_device_data *device_data, + struct hash_state *state); + +int hash_resume_state(struct hash_device_data *device_data, + const struct hash_state *state); + +#endif diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c new file mode 100644 index 000000000..5f5f36062 --- /dev/null +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -0,0 +1,2002 @@ +/* + * Cryptographic API. + * Support for Nomadik hardware crypto engine. + + * Copyright (C) ST-Ericsson SA 2010 + * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson + * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson + * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson. + * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson. + * Author: Andreas Westin <andreas.westin@stericsson.com> for ST-Ericsson. + * License terms: GNU General Public License (GPL) version 2 + */ + +#define pr_fmt(fmt) "hashX hashX: " fmt + +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/klist.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/crypto.h> + +#include <linux/regulator/consumer.h> +#include <linux/dmaengine.h> +#include <linux/bitops.h> + +#include <crypto/internal/hash.h> +#include <crypto/sha.h> +#include <crypto/scatterwalk.h> +#include <crypto/algapi.h> + +#include <linux/platform_data/crypto-ux500.h> + +#include "hash_alg.h" + +static int hash_mode; +module_param(hash_mode, int, 0); +MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1"); + +/** + * Pre-calculated empty message digests. + */ +static const u8 zero_message_hash_sha1[SHA1_DIGEST_SIZE] = { + 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, + 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, + 0xaf, 0xd8, 0x07, 0x09 +}; + +static const u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = { + 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, + 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, + 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, + 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55 +}; + +/* HMAC-SHA1, no key */ +static const u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = { + 0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08, + 0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63, + 0x70, 0x69, 0x0e, 0x1d +}; + +/* HMAC-SHA256, no key */ +static const u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = { + 0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec, + 0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5, + 0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53, + 0xc6, 0xc7, 0x12, 0x14, 0x42, 0x92, 0xc5, 0xad +}; + +/** + * struct hash_driver_data - data specific to the driver. + * + * @device_list: A list of registered devices to choose from. + * @device_allocation: A semaphore initialized with number of devices. + */ +struct hash_driver_data { + struct klist device_list; + struct semaphore device_allocation; +}; + +static struct hash_driver_data driver_data; + +/* Declaration of functions */ +/** + * hash_messagepad - Pads a message and write the nblw bits. + * @device_data: Structure for the hash device. + * @message: Last word of a message + * @index_bytes: The number of bytes in the last message + * + * This function manages the final part of the digest calculation, when less + * than 512 bits (64 bytes) remain in message. This means index_bytes < 64. + * + */ +static void hash_messagepad(struct hash_device_data *device_data, + const u32 *message, u8 index_bytes); + +/** + * release_hash_device - Releases a previously allocated hash device. + * @device_data: Structure for the hash device. + * + */ +static void release_hash_device(struct hash_device_data *device_data) +{ + spin_lock(&device_data->ctx_lock); + device_data->current_ctx->device = NULL; + device_data->current_ctx = NULL; + spin_unlock(&device_data->ctx_lock); + + /* + * The down_interruptible part for this semaphore is called in + * cryp_get_device_data. + */ + up(&driver_data.device_allocation); +} + +static void hash_dma_setup_channel(struct hash_device_data *device_data, + struct device *dev) +{ + struct hash_platform_data *platform_data = dev->platform_data; + struct dma_slave_config conf = { + .direction = DMA_MEM_TO_DEV, + .dst_addr = device_data->phybase + HASH_DMA_FIFO, + .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES, + .dst_maxburst = 16, + }; + + dma_cap_zero(device_data->dma.mask); + dma_cap_set(DMA_SLAVE, device_data->dma.mask); + + device_data->dma.cfg_mem2hash = platform_data->mem_to_engine; + device_data->dma.chan_mem2hash = + dma_request_channel(device_data->dma.mask, + platform_data->dma_filter, + device_data->dma.cfg_mem2hash); + + dmaengine_slave_config(device_data->dma.chan_mem2hash, &conf); + + init_completion(&device_data->dma.complete); +} + +static void hash_dma_callback(void *data) +{ + struct hash_ctx *ctx = data; + + complete(&ctx->device->dma.complete); +} + +static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg, + int len, enum dma_data_direction direction) +{ + struct dma_async_tx_descriptor *desc = NULL; + struct dma_chan *channel = NULL; + dma_cookie_t cookie; + + if (direction != DMA_TO_DEVICE) { + dev_err(ctx->device->dev, "%s: Invalid DMA direction\n", + __func__); + return -EFAULT; + } + + sg->length = ALIGN(sg->length, HASH_DMA_ALIGN_SIZE); + + channel = ctx->device->dma.chan_mem2hash; + ctx->device->dma.sg = sg; + ctx->device->dma.sg_len = dma_map_sg(channel->device->dev, + ctx->device->dma.sg, ctx->device->dma.nents, + direction); + + if (!ctx->device->dma.sg_len) { + dev_err(ctx->device->dev, "%s: Could not map the sg list (TO_DEVICE)\n", + __func__); + return -EFAULT; + } + + dev_dbg(ctx->device->dev, "%s: Setting up DMA for buffer (TO_DEVICE)\n", + __func__); + desc = dmaengine_prep_slave_sg(channel, + ctx->device->dma.sg, ctx->device->dma.sg_len, + direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT); + if (!desc) { + dev_err(ctx->device->dev, + "%s: dmaengine_prep_slave_sg() failed!\n", __func__); + return -EFAULT; + } + + desc->callback = hash_dma_callback; + desc->callback_param = ctx; + + cookie = dmaengine_submit(desc); + dma_async_issue_pending(channel); + + return 0; +} + +static void hash_dma_done(struct hash_ctx *ctx) +{ + struct dma_chan *chan; + + chan = ctx->device->dma.chan_mem2hash; + dmaengine_terminate_all(chan); + dma_unmap_sg(chan->device->dev, ctx->device->dma.sg, + ctx->device->dma.sg_len, DMA_TO_DEVICE); +} + +static int hash_dma_write(struct hash_ctx *ctx, + struct scatterlist *sg, int len) +{ + int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE); + if (error) { + dev_dbg(ctx->device->dev, + "%s: hash_set_dma_transfer() failed\n", __func__); + return error; + } + + return len; +} + +/** + * get_empty_message_digest - Returns a pre-calculated digest for + * the empty message. + * @device_data: Structure for the hash device. + * @zero_hash: Buffer to return the empty message digest. + * @zero_hash_size: Hash size of the empty message digest. + * @zero_digest: True if zero_digest returned. + */ +static int get_empty_message_digest( + struct hash_device_data *device_data, + u8 *zero_hash, u32 *zero_hash_size, bool *zero_digest) +{ + int ret = 0; + struct hash_ctx *ctx = device_data->current_ctx; + *zero_digest = false; + + /** + * Caller responsible for ctx != NULL. + */ + + if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) { + if (HASH_ALGO_SHA1 == ctx->config.algorithm) { + memcpy(zero_hash, &zero_message_hash_sha1[0], + SHA1_DIGEST_SIZE); + *zero_hash_size = SHA1_DIGEST_SIZE; + *zero_digest = true; + } else if (HASH_ALGO_SHA256 == + ctx->config.algorithm) { + memcpy(zero_hash, &zero_message_hash_sha256[0], + SHA256_DIGEST_SIZE); + *zero_hash_size = SHA256_DIGEST_SIZE; + *zero_digest = true; + } else { + dev_err(device_data->dev, "%s: Incorrect algorithm!\n", + __func__); + ret = -EINVAL; + goto out; + } + } else if (HASH_OPER_MODE_HMAC == ctx->config.oper_mode) { + if (!ctx->keylen) { + if (HASH_ALGO_SHA1 == ctx->config.algorithm) { + memcpy(zero_hash, &zero_message_hmac_sha1[0], + SHA1_DIGEST_SIZE); + *zero_hash_size = SHA1_DIGEST_SIZE; + *zero_digest = true; + } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) { + memcpy(zero_hash, &zero_message_hmac_sha256[0], + SHA256_DIGEST_SIZE); + *zero_hash_size = SHA256_DIGEST_SIZE; + *zero_digest = true; + } else { + dev_err(device_data->dev, "%s: Incorrect algorithm!\n", + __func__); + ret = -EINVAL; + goto out; + } + } else { + dev_dbg(device_data->dev, + "%s: Continue hash calculation, since hmac key available\n", + __func__); + } + } +out: + + return ret; +} + +/** + * hash_disable_power - Request to disable power and clock. + * @device_data: Structure for the hash device. + * @save_device_state: If true, saves the current hw state. + * + * This function request for disabling power (regulator) and clock, + * and could also save current hw state. + */ +static int hash_disable_power(struct hash_device_data *device_data, + bool save_device_state) +{ + int ret = 0; + struct device *dev = device_data->dev; + + spin_lock(&device_data->power_state_lock); + if (!device_data->power_state) + goto out; + + if (save_device_state) { + hash_save_state(device_data, + &device_data->state); + device_data->restore_dev_state = true; + } + + clk_disable(device_data->clk); + ret = regulator_disable(device_data->regulator); + if (ret) + dev_err(dev, "%s: regulator_disable() failed!\n", __func__); + + device_data->power_state = false; + +out: + spin_unlock(&device_data->power_state_lock); + + return ret; +} + +/** + * hash_enable_power - Request to enable power and clock. + * @device_data: Structure for the hash device. + * @restore_device_state: If true, restores a previous saved hw state. + * + * This function request for enabling power (regulator) and clock, + * and could also restore a previously saved hw state. + */ +static int hash_enable_power(struct hash_device_data *device_data, + bool restore_device_state) +{ + int ret = 0; + struct device *dev = device_data->dev; + + spin_lock(&device_data->power_state_lock); + if (!device_data->power_state) { + ret = regulator_enable(device_data->regulator); + if (ret) { + dev_err(dev, "%s: regulator_enable() failed!\n", + __func__); + goto out; + } + ret = clk_enable(device_data->clk); + if (ret) { + dev_err(dev, "%s: clk_enable() failed!\n", __func__); + ret = regulator_disable( + device_data->regulator); + goto out; + } + device_data->power_state = true; + } + + if (device_data->restore_dev_state) { + if (restore_device_state) { + device_data->restore_dev_state = false; + hash_resume_state(device_data, &device_data->state); + } + } +out: + spin_unlock(&device_data->power_state_lock); + + return ret; +} + +/** + * hash_get_device_data - Checks for an available hash device and return it. + * @hash_ctx: Structure for the hash context. + * @device_data: Structure for the hash device. + * + * This function check for an available hash device and return it to + * the caller. + * Note! Caller need to release the device, calling up(). + */ +static int hash_get_device_data(struct hash_ctx *ctx, + struct hash_device_data **device_data) +{ + int ret; + struct klist_iter device_iterator; + struct klist_node *device_node; + struct hash_device_data *local_device_data = NULL; + + /* Wait until a device is available */ + ret = down_interruptible(&driver_data.device_allocation); + if (ret) + return ret; /* Interrupted */ + + /* Select a device */ + klist_iter_init(&driver_data.device_list, &device_iterator); + device_node = klist_next(&device_iterator); + while (device_node) { + local_device_data = container_of(device_node, + struct hash_device_data, list_node); + spin_lock(&local_device_data->ctx_lock); + /* current_ctx allocates a device, NULL = unallocated */ + if (local_device_data->current_ctx) { + device_node = klist_next(&device_iterator); + } else { + local_device_data->current_ctx = ctx; + ctx->device = local_device_data; + spin_unlock(&local_device_data->ctx_lock); + break; + } + spin_unlock(&local_device_data->ctx_lock); + } + klist_iter_exit(&device_iterator); + + if (!device_node) { + /** + * No free device found. + * Since we allocated a device with down_interruptible, this + * should not be able to happen. + * Number of available devices, which are contained in + * device_allocation, is therefore decremented by not doing + * an up(device_allocation). + */ + return -EBUSY; + } + + *device_data = local_device_data; + + return 0; +} + +/** + * hash_hw_write_key - Writes the key to the hardware registries. + * + * @device_data: Structure for the hash device. + * @key: Key to be written. + * @keylen: The lengt of the key. + * + * Note! This function DOES NOT write to the NBLW registry, even though + * specified in the the hw design spec. Either due to incorrect info in the + * spec or due to a bug in the hw. + */ +static void hash_hw_write_key(struct hash_device_data *device_data, + const u8 *key, unsigned int keylen) +{ + u32 word = 0; + int nwords = 1; + + HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK); + + while (keylen >= 4) { + u32 *key_word = (u32 *)key; + + HASH_SET_DIN(key_word, nwords); + keylen -= 4; + key += 4; + } + + /* Take care of the remaining bytes in the last word */ + if (keylen) { + word = 0; + while (keylen) { + word |= (key[keylen - 1] << (8 * (keylen - 1))); + keylen--; + } + + HASH_SET_DIN(&word, nwords); + } + + while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) + cpu_relax(); + + HASH_SET_DCAL; + + while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) + cpu_relax(); +} + +/** + * init_hash_hw - Initialise the hash hardware for a new calculation. + * @device_data: Structure for the hash device. + * @ctx: The hash context. + * + * This function will enable the bits needed to clear and start a new + * calculation. + */ +static int init_hash_hw(struct hash_device_data *device_data, + struct hash_ctx *ctx) +{ + int ret = 0; + + ret = hash_setconfiguration(device_data, &ctx->config); + if (ret) { + dev_err(device_data->dev, "%s: hash_setconfiguration() failed!\n", + __func__); + return ret; + } + + hash_begin(device_data, ctx); + + if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC) + hash_hw_write_key(device_data, ctx->key, ctx->keylen); + + return ret; +} + +/** + * hash_get_nents - Return number of entries (nents) in scatterlist (sg). + * + * @sg: Scatterlist. + * @size: Size in bytes. + * @aligned: True if sg data aligned to work in DMA mode. + * + */ +static int hash_get_nents(struct scatterlist *sg, int size, bool *aligned) +{ + int nents = 0; + bool aligned_data = true; + + while (size > 0 && sg) { + nents++; + size -= sg->length; + + /* hash_set_dma_transfer will align last nent */ + if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE)) || + (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) && size > 0)) + aligned_data = false; + + sg = sg_next(sg); + } + + if (aligned) + *aligned = aligned_data; + + if (size != 0) + return -EFAULT; + + return nents; +} + +/** + * hash_dma_valid_data - checks for dma valid sg data. + * @sg: Scatterlist. + * @datasize: Datasize in bytes. + * + * NOTE! This function checks for dma valid sg data, since dma + * only accept datasizes of even wordsize. + */ +static bool hash_dma_valid_data(struct scatterlist *sg, int datasize) +{ + bool aligned; + + /* Need to include at least one nent, else error */ + if (hash_get_nents(sg, datasize, &aligned) < 1) + return false; + + return aligned; +} + +/** + * hash_init - Common hash init function for SHA1/SHA2 (SHA256). + * @req: The hash request for the job. + * + * Initialize structures. + */ +static int hash_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct hash_ctx *ctx = crypto_ahash_ctx(tfm); + struct hash_req_ctx *req_ctx = ahash_request_ctx(req); + + if (!ctx->key) + ctx->keylen = 0; + + memset(&req_ctx->state, 0, sizeof(struct hash_state)); + req_ctx->updated = 0; + if (hash_mode == HASH_MODE_DMA) { + if (req->nbytes < HASH_DMA_ALIGN_SIZE) { + req_ctx->dma_mode = false; /* Don't use DMA */ + + pr_debug("%s: DMA mode, but direct to CPU mode for data size < %d\n", + __func__, HASH_DMA_ALIGN_SIZE); + } else { + if (req->nbytes >= HASH_DMA_PERFORMANCE_MIN_SIZE && + hash_dma_valid_data(req->src, req->nbytes)) { + req_ctx->dma_mode = true; + } else { + req_ctx->dma_mode = false; + pr_debug("%s: DMA mode, but use CPU mode for datalength < %d or non-aligned data, except in last nent\n", + __func__, + HASH_DMA_PERFORMANCE_MIN_SIZE); + } + } + } + return 0; +} + +/** + * hash_processblock - This function processes a single block of 512 bits (64 + * bytes), word aligned, starting at message. + * @device_data: Structure for the hash device. + * @message: Block (512 bits) of message to be written to + * the HASH hardware. + * + */ +static void hash_processblock(struct hash_device_data *device_data, + const u32 *message, int length) +{ + int len = length / HASH_BYTES_PER_WORD; + /* + * NBLW bits. Reset the number of bits in last word (NBLW). + */ + HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK); + + /* + * Write message data to the HASH_DIN register. + */ + HASH_SET_DIN(message, len); +} + +/** + * hash_messagepad - Pads a message and write the nblw bits. + * @device_data: Structure for the hash device. + * @message: Last word of a message. + * @index_bytes: The number of bytes in the last message. + * + * This function manages the final part of the digest calculation, when less + * than 512 bits (64 bytes) remain in message. This means index_bytes < 64. + * + */ +static void hash_messagepad(struct hash_device_data *device_data, + const u32 *message, u8 index_bytes) +{ + int nwords = 1; + + /* + * Clear hash str register, only clear NBLW + * since DCAL will be reset by hardware. + */ + HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK); + + /* Main loop */ + while (index_bytes >= 4) { + HASH_SET_DIN(message, nwords); + index_bytes -= 4; + message++; + } + + if (index_bytes) + HASH_SET_DIN(message, nwords); + + while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) + cpu_relax(); + + /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */ + HASH_SET_NBLW(index_bytes * 8); + dev_dbg(device_data->dev, "%s: DIN=0x%08x NBLW=%lu\n", + __func__, readl_relaxed(&device_data->base->din), + readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK); + HASH_SET_DCAL; + dev_dbg(device_data->dev, "%s: after dcal -> DIN=0x%08x NBLW=%lu\n", + __func__, readl_relaxed(&device_data->base->din), + readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK); + + while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) + cpu_relax(); +} + +/** + * hash_incrementlength - Increments the length of the current message. + * @ctx: Hash context + * @incr: Length of message processed already + * + * Overflow cannot occur, because conditions for overflow are checked in + * hash_hw_update. + */ +static void hash_incrementlength(struct hash_req_ctx *ctx, u32 incr) +{ + ctx->state.length.low_word += incr; + + /* Check for wrap-around */ + if (ctx->state.length.low_word < incr) + ctx->state.length.high_word++; +} + +/** + * hash_setconfiguration - Sets the required configuration for the hash + * hardware. + * @device_data: Structure for the hash device. + * @config: Pointer to a configuration structure. + */ +int hash_setconfiguration(struct hash_device_data *device_data, + struct hash_config *config) +{ + int ret = 0; + + if (config->algorithm != HASH_ALGO_SHA1 && + config->algorithm != HASH_ALGO_SHA256) + return -EPERM; + + /* + * DATAFORM bits. Set the DATAFORM bits to 0b11, which means the data + * to be written to HASH_DIN is considered as 32 bits. + */ + HASH_SET_DATA_FORMAT(config->data_format); + + /* + * ALGO bit. Set to 0b1 for SHA-1 and 0b0 for SHA-256 + */ + switch (config->algorithm) { + case HASH_ALGO_SHA1: + HASH_SET_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK); + break; + + case HASH_ALGO_SHA256: + HASH_CLEAR_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK); + break; + + default: + dev_err(device_data->dev, "%s: Incorrect algorithm\n", + __func__); + return -EPERM; + } + + /* + * MODE bit. This bit selects between HASH or HMAC mode for the + * selected algorithm. 0b0 = HASH and 0b1 = HMAC. + */ + if (HASH_OPER_MODE_HASH == config->oper_mode) + HASH_CLEAR_BITS(&device_data->base->cr, + HASH_CR_MODE_MASK); + else if (HASH_OPER_MODE_HMAC == config->oper_mode) { + HASH_SET_BITS(&device_data->base->cr, HASH_CR_MODE_MASK); + if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) { + /* Truncate key to blocksize */ + dev_dbg(device_data->dev, "%s: LKEY set\n", __func__); + HASH_SET_BITS(&device_data->base->cr, + HASH_CR_LKEY_MASK); + } else { + dev_dbg(device_data->dev, "%s: LKEY cleared\n", + __func__); + HASH_CLEAR_BITS(&device_data->base->cr, + HASH_CR_LKEY_MASK); + } + } else { /* Wrong hash mode */ + ret = -EPERM; + dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n", + __func__); + } + return ret; +} + +/** + * hash_begin - This routine resets some globals and initializes the hash + * hardware. + * @device_data: Structure for the hash device. + * @ctx: Hash context. + */ +void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx) +{ + /* HW and SW initializations */ + /* Note: there is no need to initialize buffer and digest members */ + + while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) + cpu_relax(); + + /* + * INIT bit. Set this bit to 0b1 to reset the HASH processor core and + * prepare the initialize the HASH accelerator to compute the message + * digest of a new message. + */ + HASH_INITIALIZE; + + /* + * NBLW bits. Reset the number of bits in last word (NBLW). + */ + HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK); +} + +static int hash_process_data(struct hash_device_data *device_data, + struct hash_ctx *ctx, struct hash_req_ctx *req_ctx, + int msg_length, u8 *data_buffer, u8 *buffer, + u8 *index) +{ + int ret = 0; + u32 count; + + do { + if ((*index + msg_length) < HASH_BLOCK_SIZE) { + for (count = 0; count < msg_length; count++) { + buffer[*index + count] = + *(data_buffer + count); + } + *index += msg_length; + msg_length = 0; + } else { + if (req_ctx->updated) { + ret = hash_resume_state(device_data, + &device_data->state); + memmove(req_ctx->state.buffer, + device_data->state.buffer, + HASH_BLOCK_SIZE / sizeof(u32)); + if (ret) { + dev_err(device_data->dev, + "%s: hash_resume_state() failed!\n", + __func__); + goto out; + } + } else { + ret = init_hash_hw(device_data, ctx); + if (ret) { + dev_err(device_data->dev, + "%s: init_hash_hw() failed!\n", + __func__); + goto out; + } + req_ctx->updated = 1; + } + /* + * If 'data_buffer' is four byte aligned and + * local buffer does not have any data, we can + * write data directly from 'data_buffer' to + * HW peripheral, otherwise we first copy data + * to a local buffer + */ + if ((0 == (((u32)data_buffer) % 4)) && + (0 == *index)) + hash_processblock(device_data, + (const u32 *)data_buffer, + HASH_BLOCK_SIZE); + else { + for (count = 0; + count < (u32)(HASH_BLOCK_SIZE - *index); + count++) { + buffer[*index + count] = + *(data_buffer + count); + } + hash_processblock(device_data, + (const u32 *)buffer, + HASH_BLOCK_SIZE); + } + hash_incrementlength(req_ctx, HASH_BLOCK_SIZE); + data_buffer += (HASH_BLOCK_SIZE - *index); + + msg_length -= (HASH_BLOCK_SIZE - *index); + *index = 0; + + ret = hash_save_state(device_data, + &device_data->state); + + memmove(device_data->state.buffer, + req_ctx->state.buffer, + HASH_BLOCK_SIZE / sizeof(u32)); + if (ret) { + dev_err(device_data->dev, "%s: hash_save_state() failed!\n", + __func__); + goto out; + } + } + } while (msg_length != 0); +out: + + return ret; +} + +/** + * hash_dma_final - The hash dma final function for SHA1/SHA256. + * @req: The hash request for the job. + */ +static int hash_dma_final(struct ahash_request *req) +{ + int ret = 0; + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct hash_ctx *ctx = crypto_ahash_ctx(tfm); + struct hash_req_ctx *req_ctx = ahash_request_ctx(req); + struct hash_device_data *device_data; + u8 digest[SHA256_DIGEST_SIZE]; + int bytes_written = 0; + + ret = hash_get_device_data(ctx, &device_data); + if (ret) + return ret; + + dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx); + + if (req_ctx->updated) { + ret = hash_resume_state(device_data, &device_data->state); + + if (ret) { + dev_err(device_data->dev, "%s: hash_resume_state() failed!\n", + __func__); + goto out; + } + } + + if (!req_ctx->updated) { + ret = hash_setconfiguration(device_data, &ctx->config); + if (ret) { + dev_err(device_data->dev, + "%s: hash_setconfiguration() failed!\n", + __func__); + goto out; + } + + /* Enable DMA input */ + if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode) { + HASH_CLEAR_BITS(&device_data->base->cr, + HASH_CR_DMAE_MASK); + } else { + HASH_SET_BITS(&device_data->base->cr, + HASH_CR_DMAE_MASK); + HASH_SET_BITS(&device_data->base->cr, + HASH_CR_PRIVN_MASK); + } + + HASH_INITIALIZE; + + if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC) + hash_hw_write_key(device_data, ctx->key, ctx->keylen); + + /* Number of bits in last word = (nbytes * 8) % 32 */ + HASH_SET_NBLW((req->nbytes * 8) % 32); + req_ctx->updated = 1; + } + + /* Store the nents in the dma struct. */ + ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL); + if (!ctx->device->dma.nents) { + dev_err(device_data->dev, "%s: ctx->device->dma.nents = 0\n", + __func__); + ret = ctx->device->dma.nents; + goto out; + } + + bytes_written = hash_dma_write(ctx, req->src, req->nbytes); + if (bytes_written != req->nbytes) { + dev_err(device_data->dev, "%s: hash_dma_write() failed!\n", + __func__); + ret = bytes_written; + goto out; + } + + wait_for_completion(&ctx->device->dma.complete); + hash_dma_done(ctx); + + while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) + cpu_relax(); + + if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) { + unsigned int keylen = ctx->keylen; + u8 *key = ctx->key; + + dev_dbg(device_data->dev, "%s: keylen: %d\n", + __func__, ctx->keylen); + hash_hw_write_key(device_data, key, keylen); + } + + hash_get_digest(device_data, digest, ctx->config.algorithm); + memcpy(req->result, digest, ctx->digestsize); + +out: + release_hash_device(device_data); + + /** + * Allocated in setkey, and only used in HMAC. + */ + kfree(ctx->key); + + return ret; +} + +/** + * hash_hw_final - The final hash calculation function + * @req: The hash request for the job. + */ +static int hash_hw_final(struct ahash_request *req) +{ + int ret = 0; + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct hash_ctx *ctx = crypto_ahash_ctx(tfm); + struct hash_req_ctx *req_ctx = ahash_request_ctx(req); + struct hash_device_data *device_data; + u8 digest[SHA256_DIGEST_SIZE]; + + ret = hash_get_device_data(ctx, &device_data); + if (ret) + return ret; + + dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx); + + if (req_ctx->updated) { + ret = hash_resume_state(device_data, &device_data->state); + + if (ret) { + dev_err(device_data->dev, + "%s: hash_resume_state() failed!\n", __func__); + goto out; + } + } else if (req->nbytes == 0 && ctx->keylen == 0) { + u8 zero_hash[SHA256_DIGEST_SIZE]; + u32 zero_hash_size = 0; + bool zero_digest = false; + /** + * Use a pre-calculated empty message digest + * (workaround since hw return zeroes, hw bug!?) + */ + ret = get_empty_message_digest(device_data, &zero_hash[0], + &zero_hash_size, &zero_digest); + if (!ret && likely(zero_hash_size == ctx->digestsize) && + zero_digest) { + memcpy(req->result, &zero_hash[0], ctx->digestsize); + goto out; + } else if (!ret && !zero_digest) { + dev_dbg(device_data->dev, + "%s: HMAC zero msg with key, continue...\n", + __func__); + } else { + dev_err(device_data->dev, + "%s: ret=%d, or wrong digest size? %s\n", + __func__, ret, + zero_hash_size == ctx->digestsize ? + "true" : "false"); + /* Return error */ + goto out; + } + } else if (req->nbytes == 0 && ctx->keylen > 0) { + dev_err(device_data->dev, "%s: Empty message with keylength > 0, NOT supported\n", + __func__); + goto out; + } + + if (!req_ctx->updated) { + ret = init_hash_hw(device_data, ctx); + if (ret) { + dev_err(device_data->dev, + "%s: init_hash_hw() failed!\n", __func__); + goto out; + } + } + + if (req_ctx->state.index) { + hash_messagepad(device_data, req_ctx->state.buffer, + req_ctx->state.index); + } else { + HASH_SET_DCAL; + while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) + cpu_relax(); + } + + if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) { + unsigned int keylen = ctx->keylen; + u8 *key = ctx->key; + + dev_dbg(device_data->dev, "%s: keylen: %d\n", + __func__, ctx->keylen); + hash_hw_write_key(device_data, key, keylen); + } + + hash_get_digest(device_data, digest, ctx->config.algorithm); + memcpy(req->result, digest, ctx->digestsize); + +out: + release_hash_device(device_data); + + /** + * Allocated in setkey, and only used in HMAC. + */ + kfree(ctx->key); + + return ret; +} + +/** + * hash_hw_update - Updates current HASH computation hashing another part of + * the message. + * @req: Byte array containing the message to be hashed (caller + * allocated). + */ +int hash_hw_update(struct ahash_request *req) +{ + int ret = 0; + u8 index = 0; + u8 *buffer; + struct hash_device_data *device_data; + u8 *data_buffer; + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct hash_ctx *ctx = crypto_ahash_ctx(tfm); + struct hash_req_ctx *req_ctx = ahash_request_ctx(req); + struct crypto_hash_walk walk; + int msg_length = crypto_hash_walk_first(req, &walk); + + /* Empty message ("") is correct indata */ + if (msg_length == 0) + return ret; + + index = req_ctx->state.index; + buffer = (u8 *)req_ctx->state.buffer; + + /* Check if ctx->state.length + msg_length + overflows */ + if (msg_length > (req_ctx->state.length.low_word + msg_length) && + HASH_HIGH_WORD_MAX_VAL == req_ctx->state.length.high_word) { + pr_err("%s: HASH_MSG_LENGTH_OVERFLOW!\n", __func__); + return -EPERM; + } + + ret = hash_get_device_data(ctx, &device_data); + if (ret) + return ret; + + /* Main loop */ + while (0 != msg_length) { + data_buffer = walk.data; + ret = hash_process_data(device_data, ctx, req_ctx, msg_length, + data_buffer, buffer, &index); + + if (ret) { + dev_err(device_data->dev, "%s: hash_internal_hw_update() failed!\n", + __func__); + goto out; + } + + msg_length = crypto_hash_walk_done(&walk, 0); + } + + req_ctx->state.index = index; + dev_dbg(device_data->dev, "%s: indata length=%d, bin=%d\n", + __func__, req_ctx->state.index, req_ctx->state.bit_index); + +out: + release_hash_device(device_data); + + return ret; +} + +/** + * hash_resume_state - Function that resumes the state of an calculation. + * @device_data: Pointer to the device structure. + * @device_state: The state to be restored in the hash hardware + */ +int hash_resume_state(struct hash_device_data *device_data, + const struct hash_state *device_state) +{ + u32 temp_cr; + s32 count; + int hash_mode = HASH_OPER_MODE_HASH; + + if (NULL == device_state) { + dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n", + __func__); + return -EPERM; + } + + /* Check correctness of index and length members */ + if (device_state->index > HASH_BLOCK_SIZE || + (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) { + dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n", + __func__); + return -EPERM; + } + + /* + * INIT bit. Set this bit to 0b1 to reset the HASH processor core and + * prepare the initialize the HASH accelerator to compute the message + * digest of a new message. + */ + HASH_INITIALIZE; + + temp_cr = device_state->temp_cr; + writel_relaxed(temp_cr & HASH_CR_RESUME_MASK, &device_data->base->cr); + + if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK) + hash_mode = HASH_OPER_MODE_HMAC; + else + hash_mode = HASH_OPER_MODE_HASH; + + for (count = 0; count < HASH_CSR_COUNT; count++) { + if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH)) + break; + + writel_relaxed(device_state->csr[count], + &device_data->base->csrx[count]); + } + + writel_relaxed(device_state->csfull, &device_data->base->csfull); + writel_relaxed(device_state->csdatain, &device_data->base->csdatain); + + writel_relaxed(device_state->str_reg, &device_data->base->str); + writel_relaxed(temp_cr, &device_data->base->cr); + + return 0; +} + +/** + * hash_save_state - Function that saves the state of hardware. + * @device_data: Pointer to the device structure. + * @device_state: The strucure where the hardware state should be saved. + */ +int hash_save_state(struct hash_device_data *device_data, + struct hash_state *device_state) +{ + u32 temp_cr; + u32 count; + int hash_mode = HASH_OPER_MODE_HASH; + + if (NULL == device_state) { + dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n", + __func__); + return -ENOTSUPP; + } + + /* Write dummy value to force digest intermediate calculation. This + * actually makes sure that there isn't any ongoing calculation in the + * hardware. + */ + while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) + cpu_relax(); + + temp_cr = readl_relaxed(&device_data->base->cr); + + device_state->str_reg = readl_relaxed(&device_data->base->str); + + device_state->din_reg = readl_relaxed(&device_data->base->din); + + if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK) + hash_mode = HASH_OPER_MODE_HMAC; + else + hash_mode = HASH_OPER_MODE_HASH; + + for (count = 0; count < HASH_CSR_COUNT; count++) { + if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH)) + break; + + device_state->csr[count] = + readl_relaxed(&device_data->base->csrx[count]); + } + + device_state->csfull = readl_relaxed(&device_data->base->csfull); + device_state->csdatain = readl_relaxed(&device_data->base->csdatain); + + device_state->temp_cr = temp_cr; + + return 0; +} + +/** + * hash_check_hw - This routine checks for peripheral Ids and PCell Ids. + * @device_data: + * + */ +int hash_check_hw(struct hash_device_data *device_data) +{ + /* Checking Peripheral Ids */ + if (HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0) && + HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1) && + HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2) && + HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3) && + HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0) && + HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1) && + HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2) && + HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3)) { + return 0; + } + + dev_err(device_data->dev, "%s: HASH_UNSUPPORTED_HW!\n", __func__); + return -ENOTSUPP; +} + +/** + * hash_get_digest - Gets the digest. + * @device_data: Pointer to the device structure. + * @digest: User allocated byte array for the calculated digest. + * @algorithm: The algorithm in use. + */ +void hash_get_digest(struct hash_device_data *device_data, + u8 *digest, int algorithm) +{ + u32 temp_hx_val, count; + int loop_ctr; + + if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) { + dev_err(device_data->dev, "%s: Incorrect algorithm %d\n", + __func__, algorithm); + return; + } + + if (algorithm == HASH_ALGO_SHA1) + loop_ctr = SHA1_DIGEST_SIZE / sizeof(u32); + else + loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32); + + dev_dbg(device_data->dev, "%s: digest array:(0x%x)\n", + __func__, (u32) digest); + + /* Copy result into digest array */ + for (count = 0; count < loop_ctr; count++) { + temp_hx_val = readl_relaxed(&device_data->base->hx[count]); + digest[count * 4] = (u8) ((temp_hx_val >> 24) & 0xFF); + digest[count * 4 + 1] = (u8) ((temp_hx_val >> 16) & 0xFF); + digest[count * 4 + 2] = (u8) ((temp_hx_val >> 8) & 0xFF); + digest[count * 4 + 3] = (u8) ((temp_hx_val >> 0) & 0xFF); + } +} + +/** + * hash_update - The hash update function for SHA1/SHA2 (SHA256). + * @req: The hash request for the job. + */ +static int ahash_update(struct ahash_request *req) +{ + int ret = 0; + struct hash_req_ctx *req_ctx = ahash_request_ctx(req); + + if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode) + ret = hash_hw_update(req); + /* Skip update for DMA, all data will be passed to DMA in final */ + + if (ret) { + pr_err("%s: hash_hw_update() failed!\n", __func__); + } + + return ret; +} + +/** + * hash_final - The hash final function for SHA1/SHA2 (SHA256). + * @req: The hash request for the job. + */ +static int ahash_final(struct ahash_request *req) +{ + int ret = 0; + struct hash_req_ctx *req_ctx = ahash_request_ctx(req); + + pr_debug("%s: data size: %d\n", __func__, req->nbytes); + + if ((hash_mode == HASH_MODE_DMA) && req_ctx->dma_mode) + ret = hash_dma_final(req); + else + ret = hash_hw_final(req); + + if (ret) { + pr_err("%s: hash_hw/dma_final() failed\n", __func__); + } + + return ret; +} + +static int hash_setkey(struct crypto_ahash *tfm, + const u8 *key, unsigned int keylen, int alg) +{ + int ret = 0; + struct hash_ctx *ctx = crypto_ahash_ctx(tfm); + + /** + * Freed in final. + */ + ctx->key = kmemdup(key, keylen, GFP_KERNEL); + if (!ctx->key) { + pr_err("%s: Failed to allocate ctx->key for %d\n", + __func__, alg); + return -ENOMEM; + } + ctx->keylen = keylen; + + return ret; +} + +static int ahash_sha1_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct hash_ctx *ctx = crypto_ahash_ctx(tfm); + + ctx->config.data_format = HASH_DATA_8_BITS; + ctx->config.algorithm = HASH_ALGO_SHA1; + ctx->config.oper_mode = HASH_OPER_MODE_HASH; + ctx->digestsize = SHA1_DIGEST_SIZE; + + return hash_init(req); +} + +static int ahash_sha256_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct hash_ctx *ctx = crypto_ahash_ctx(tfm); + + ctx->config.data_format = HASH_DATA_8_BITS; + ctx->config.algorithm = HASH_ALGO_SHA256; + ctx->config.oper_mode = HASH_OPER_MODE_HASH; + ctx->digestsize = SHA256_DIGEST_SIZE; + + return hash_init(req); +} + +static int ahash_sha1_digest(struct ahash_request *req) +{ + int ret2, ret1; + + ret1 = ahash_sha1_init(req); + if (ret1) + goto out; + + ret1 = ahash_update(req); + ret2 = ahash_final(req); + +out: + return ret1 ? ret1 : ret2; +} + +static int ahash_sha256_digest(struct ahash_request *req) +{ + int ret2, ret1; + + ret1 = ahash_sha256_init(req); + if (ret1) + goto out; + + ret1 = ahash_update(req); + ret2 = ahash_final(req); + +out: + return ret1 ? ret1 : ret2; +} + +static int hmac_sha1_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct hash_ctx *ctx = crypto_ahash_ctx(tfm); + + ctx->config.data_format = HASH_DATA_8_BITS; + ctx->config.algorithm = HASH_ALGO_SHA1; + ctx->config.oper_mode = HASH_OPER_MODE_HMAC; + ctx->digestsize = SHA1_DIGEST_SIZE; + + return hash_init(req); +} + +static int hmac_sha256_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct hash_ctx *ctx = crypto_ahash_ctx(tfm); + + ctx->config.data_format = HASH_DATA_8_BITS; + ctx->config.algorithm = HASH_ALGO_SHA256; + ctx->config.oper_mode = HASH_OPER_MODE_HMAC; + ctx->digestsize = SHA256_DIGEST_SIZE; + + return hash_init(req); +} + +static int hmac_sha1_digest(struct ahash_request *req) +{ + int ret2, ret1; + + ret1 = hmac_sha1_init(req); + if (ret1) + goto out; + + ret1 = ahash_update(req); + ret2 = ahash_final(req); + +out: + return ret1 ? ret1 : ret2; +} + +static int hmac_sha256_digest(struct ahash_request *req) +{ + int ret2, ret1; + + ret1 = hmac_sha256_init(req); + if (ret1) + goto out; + + ret1 = ahash_update(req); + ret2 = ahash_final(req); + +out: + return ret1 ? ret1 : ret2; +} + +static int hmac_sha1_setkey(struct crypto_ahash *tfm, + const u8 *key, unsigned int keylen) +{ + return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1); +} + +static int hmac_sha256_setkey(struct crypto_ahash *tfm, + const u8 *key, unsigned int keylen) +{ + return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256); +} + +struct hash_algo_template { + struct hash_config conf; + struct ahash_alg hash; +}; + +static int hash_cra_init(struct crypto_tfm *tfm) +{ + struct hash_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_alg *alg = tfm->__crt_alg; + struct hash_algo_template *hash_alg; + + hash_alg = container_of(__crypto_ahash_alg(alg), + struct hash_algo_template, + hash); + + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct hash_req_ctx)); + + ctx->config.data_format = HASH_DATA_8_BITS; + ctx->config.algorithm = hash_alg->conf.algorithm; + ctx->config.oper_mode = hash_alg->conf.oper_mode; + + ctx->digestsize = hash_alg->hash.halg.digestsize; + + return 0; +} + +static struct hash_algo_template hash_algs[] = { + { + .conf.algorithm = HASH_ALGO_SHA1, + .conf.oper_mode = HASH_OPER_MODE_HASH, + .hash = { + .init = hash_init, + .update = ahash_update, + .final = ahash_final, + .digest = ahash_sha1_digest, + .halg.digestsize = SHA1_DIGEST_SIZE, + .halg.statesize = sizeof(struct hash_ctx), + .halg.base = { + .cra_name = "sha1", + .cra_driver_name = "sha1-ux500", + .cra_flags = (CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC), + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct hash_ctx), + .cra_init = hash_cra_init, + .cra_module = THIS_MODULE, + } + } + }, + { + .conf.algorithm = HASH_ALGO_SHA256, + .conf.oper_mode = HASH_OPER_MODE_HASH, + .hash = { + .init = hash_init, + .update = ahash_update, + .final = ahash_final, + .digest = ahash_sha256_digest, + .halg.digestsize = SHA256_DIGEST_SIZE, + .halg.statesize = sizeof(struct hash_ctx), + .halg.base = { + .cra_name = "sha256", + .cra_driver_name = "sha256-ux500", + .cra_flags = (CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC), + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct hash_ctx), + .cra_type = &crypto_ahash_type, + .cra_init = hash_cra_init, + .cra_module = THIS_MODULE, + } + } + }, + { + .conf.algorithm = HASH_ALGO_SHA1, + .conf.oper_mode = HASH_OPER_MODE_HMAC, + .hash = { + .init = hash_init, + .update = ahash_update, + .final = ahash_final, + .digest = hmac_sha1_digest, + .setkey = hmac_sha1_setkey, + .halg.digestsize = SHA1_DIGEST_SIZE, + .halg.statesize = sizeof(struct hash_ctx), + .halg.base = { + .cra_name = "hmac(sha1)", + .cra_driver_name = "hmac-sha1-ux500", + .cra_flags = (CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC), + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct hash_ctx), + .cra_type = &crypto_ahash_type, + .cra_init = hash_cra_init, + .cra_module = THIS_MODULE, + } + } + }, + { + .conf.algorithm = HASH_ALGO_SHA256, + .conf.oper_mode = HASH_OPER_MODE_HMAC, + .hash = { + .init = hash_init, + .update = ahash_update, + .final = ahash_final, + .digest = hmac_sha256_digest, + .setkey = hmac_sha256_setkey, + .halg.digestsize = SHA256_DIGEST_SIZE, + .halg.statesize = sizeof(struct hash_ctx), + .halg.base = { + .cra_name = "hmac(sha256)", + .cra_driver_name = "hmac-sha256-ux500", + .cra_flags = (CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC), + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct hash_ctx), + .cra_type = &crypto_ahash_type, + .cra_init = hash_cra_init, + .cra_module = THIS_MODULE, + } + } + } +}; + +/** + * hash_algs_register_all - + */ +static int ahash_algs_register_all(struct hash_device_data *device_data) +{ + int ret; + int i; + int count; + + for (i = 0; i < ARRAY_SIZE(hash_algs); i++) { + ret = crypto_register_ahash(&hash_algs[i].hash); + if (ret) { + count = i; + dev_err(device_data->dev, "%s: alg registration failed\n", + hash_algs[i].hash.halg.base.cra_driver_name); + goto unreg; + } + } + return 0; +unreg: + for (i = 0; i < count; i++) + crypto_unregister_ahash(&hash_algs[i].hash); + return ret; +} + +/** + * hash_algs_unregister_all - + */ +static void ahash_algs_unregister_all(struct hash_device_data *device_data) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(hash_algs); i++) + crypto_unregister_ahash(&hash_algs[i].hash); +} + +/** + * ux500_hash_probe - Function that probes the hash hardware. + * @pdev: The platform device. + */ +static int ux500_hash_probe(struct platform_device *pdev) +{ + int ret = 0; + struct resource *res = NULL; + struct hash_device_data *device_data; + struct device *dev = &pdev->dev; + + device_data = kzalloc(sizeof(*device_data), GFP_ATOMIC); + if (!device_data) { + ret = -ENOMEM; + goto out; + } + + device_data->dev = dev; + device_data->current_ctx = NULL; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_dbg(dev, "%s: platform_get_resource() failed!\n", __func__); + ret = -ENODEV; + goto out_kfree; + } + + res = request_mem_region(res->start, resource_size(res), pdev->name); + if (res == NULL) { + dev_dbg(dev, "%s: request_mem_region() failed!\n", __func__); + ret = -EBUSY; + goto out_kfree; + } + + device_data->phybase = res->start; + device_data->base = ioremap(res->start, resource_size(res)); + if (!device_data->base) { + dev_err(dev, "%s: ioremap() failed!\n", __func__); + ret = -ENOMEM; + goto out_free_mem; + } + spin_lock_init(&device_data->ctx_lock); + spin_lock_init(&device_data->power_state_lock); + + /* Enable power for HASH1 hardware block */ + device_data->regulator = regulator_get(dev, "v-ape"); + if (IS_ERR(device_data->regulator)) { + dev_err(dev, "%s: regulator_get() failed!\n", __func__); + ret = PTR_ERR(device_data->regulator); + device_data->regulator = NULL; + goto out_unmap; + } + + /* Enable the clock for HASH1 hardware block */ + device_data->clk = clk_get(dev, NULL); + if (IS_ERR(device_data->clk)) { + dev_err(dev, "%s: clk_get() failed!\n", __func__); + ret = PTR_ERR(device_data->clk); + goto out_regulator; + } + + ret = clk_prepare(device_data->clk); + if (ret) { + dev_err(dev, "%s: clk_prepare() failed!\n", __func__); + goto out_clk; + } + + /* Enable device power (and clock) */ + ret = hash_enable_power(device_data, false); + if (ret) { + dev_err(dev, "%s: hash_enable_power() failed!\n", __func__); + goto out_clk_unprepare; + } + + ret = hash_check_hw(device_data); + if (ret) { + dev_err(dev, "%s: hash_check_hw() failed!\n", __func__); + goto out_power; + } + + if (hash_mode == HASH_MODE_DMA) + hash_dma_setup_channel(device_data, dev); + + platform_set_drvdata(pdev, device_data); + + /* Put the new device into the device list... */ + klist_add_tail(&device_data->list_node, &driver_data.device_list); + /* ... and signal that a new device is available. */ + up(&driver_data.device_allocation); + + ret = ahash_algs_register_all(device_data); + if (ret) { + dev_err(dev, "%s: ahash_algs_register_all() failed!\n", + __func__); + goto out_power; + } + + dev_info(dev, "successfully registered\n"); + return 0; + +out_power: + hash_disable_power(device_data, false); + +out_clk_unprepare: + clk_unprepare(device_data->clk); + +out_clk: + clk_put(device_data->clk); + +out_regulator: + regulator_put(device_data->regulator); + +out_unmap: + iounmap(device_data->base); + +out_free_mem: + release_mem_region(res->start, resource_size(res)); + +out_kfree: + kfree(device_data); +out: + return ret; +} + +/** + * ux500_hash_remove - Function that removes the hash device from the platform. + * @pdev: The platform device. + */ +static int ux500_hash_remove(struct platform_device *pdev) +{ + struct resource *res; + struct hash_device_data *device_data; + struct device *dev = &pdev->dev; + + device_data = platform_get_drvdata(pdev); + if (!device_data) { + dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__); + return -ENOMEM; + } + + /* Try to decrease the number of available devices. */ + if (down_trylock(&driver_data.device_allocation)) + return -EBUSY; + + /* Check that the device is free */ + spin_lock(&device_data->ctx_lock); + /* current_ctx allocates a device, NULL = unallocated */ + if (device_data->current_ctx) { + /* The device is busy */ + spin_unlock(&device_data->ctx_lock); + /* Return the device to the pool. */ + up(&driver_data.device_allocation); + return -EBUSY; + } + + spin_unlock(&device_data->ctx_lock); + + /* Remove the device from the list */ + if (klist_node_attached(&device_data->list_node)) + klist_remove(&device_data->list_node); + + /* If this was the last device, remove the services */ + if (list_empty(&driver_data.device_list.k_list)) + ahash_algs_unregister_all(device_data); + + if (hash_disable_power(device_data, false)) + dev_err(dev, "%s: hash_disable_power() failed\n", + __func__); + + clk_unprepare(device_data->clk); + clk_put(device_data->clk); + regulator_put(device_data->regulator); + + iounmap(device_data->base); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res) + release_mem_region(res->start, resource_size(res)); + + kfree(device_data); + + return 0; +} + +/** + * ux500_hash_shutdown - Function that shutdown the hash device. + * @pdev: The platform device + */ +static void ux500_hash_shutdown(struct platform_device *pdev) +{ + struct resource *res = NULL; + struct hash_device_data *device_data; + + device_data = platform_get_drvdata(pdev); + if (!device_data) { + dev_err(&pdev->dev, "%s: platform_get_drvdata() failed!\n", + __func__); + return; + } + + /* Check that the device is free */ + spin_lock(&device_data->ctx_lock); + /* current_ctx allocates a device, NULL = unallocated */ + if (!device_data->current_ctx) { + if (down_trylock(&driver_data.device_allocation)) + dev_dbg(&pdev->dev, "%s: Cryp still in use! Shutting down anyway...\n", + __func__); + /** + * (Allocate the device) + * Need to set this to non-null (dummy) value, + * to avoid usage if context switching. + */ + device_data->current_ctx++; + } + spin_unlock(&device_data->ctx_lock); + + /* Remove the device from the list */ + if (klist_node_attached(&device_data->list_node)) + klist_remove(&device_data->list_node); + + /* If this was the last device, remove the services */ + if (list_empty(&driver_data.device_list.k_list)) + ahash_algs_unregister_all(device_data); + + iounmap(device_data->base); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res) + release_mem_region(res->start, resource_size(res)); + + if (hash_disable_power(device_data, false)) + dev_err(&pdev->dev, "%s: hash_disable_power() failed\n", + __func__); +} + +#ifdef CONFIG_PM_SLEEP +/** + * ux500_hash_suspend - Function that suspends the hash device. + * @dev: Device to suspend. + */ +static int ux500_hash_suspend(struct device *dev) +{ + int ret; + struct hash_device_data *device_data; + struct hash_ctx *temp_ctx = NULL; + + device_data = dev_get_drvdata(dev); + if (!device_data) { + dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__); + return -ENOMEM; + } + + spin_lock(&device_data->ctx_lock); + if (!device_data->current_ctx) + device_data->current_ctx++; + spin_unlock(&device_data->ctx_lock); + + if (device_data->current_ctx == ++temp_ctx) { + if (down_interruptible(&driver_data.device_allocation)) + dev_dbg(dev, "%s: down_interruptible() failed\n", + __func__); + ret = hash_disable_power(device_data, false); + + } else { + ret = hash_disable_power(device_data, true); + } + + if (ret) + dev_err(dev, "%s: hash_disable_power()\n", __func__); + + return ret; +} + +/** + * ux500_hash_resume - Function that resume the hash device. + * @dev: Device to resume. + */ +static int ux500_hash_resume(struct device *dev) +{ + int ret = 0; + struct hash_device_data *device_data; + struct hash_ctx *temp_ctx = NULL; + + device_data = dev_get_drvdata(dev); + if (!device_data) { + dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__); + return -ENOMEM; + } + + spin_lock(&device_data->ctx_lock); + if (device_data->current_ctx == ++temp_ctx) + device_data->current_ctx = NULL; + spin_unlock(&device_data->ctx_lock); + + if (!device_data->current_ctx) + up(&driver_data.device_allocation); + else + ret = hash_enable_power(device_data, true); + + if (ret) + dev_err(dev, "%s: hash_enable_power() failed!\n", __func__); + + return ret; +} +#endif + +static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume); + +static const struct of_device_id ux500_hash_match[] = { + { .compatible = "stericsson,ux500-hash" }, + { }, +}; + +static struct platform_driver hash_driver = { + .probe = ux500_hash_probe, + .remove = ux500_hash_remove, + .shutdown = ux500_hash_shutdown, + .driver = { + .name = "hash1", + .of_match_table = ux500_hash_match, + .pm = &ux500_hash_pm, + } +}; + +/** + * ux500_hash_mod_init - The kernel module init function. + */ +static int __init ux500_hash_mod_init(void) +{ + klist_init(&driver_data.device_list, NULL, NULL); + /* Initialize the semaphore to 0 devices (locked state) */ + sema_init(&driver_data.device_allocation, 0); + + return platform_driver_register(&hash_driver); +} + +/** + * ux500_hash_mod_fini - The kernel module exit function. + */ +static void __exit ux500_hash_mod_fini(void) +{ + platform_driver_unregister(&hash_driver); +} + +module_init(ux500_hash_mod_init); +module_exit(ux500_hash_mod_fini); + +MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine."); +MODULE_LICENSE("GPL"); + +MODULE_ALIAS_CRYPTO("sha1-all"); +MODULE_ALIAS_CRYPTO("sha256-all"); +MODULE_ALIAS_CRYPTO("hmac-sha1-all"); +MODULE_ALIAS_CRYPTO("hmac-sha256-all"); diff --git a/drivers/crypto/vmx/Kconfig b/drivers/crypto/vmx/Kconfig new file mode 100644 index 000000000..771babf16 --- /dev/null +++ b/drivers/crypto/vmx/Kconfig @@ -0,0 +1,8 @@ +config CRYPTO_DEV_VMX_ENCRYPT + tristate "Encryption acceleration support on P8 CPU" + depends on PPC64 && CRYPTO_DEV_VMX + default y + help + Support for VMX cryptographic acceleration instructions on Power8 CPU. + This module supports acceleration for AES and GHASH in hardware. If you + choose 'M' here, this module will be called vmx-crypto. diff --git a/drivers/crypto/vmx/Makefile b/drivers/crypto/vmx/Makefile new file mode 100644 index 000000000..c699c6e6c --- /dev/null +++ b/drivers/crypto/vmx/Makefile @@ -0,0 +1,19 @@ +obj-$(CONFIG_CRYPTO_DEV_VMX_ENCRYPT) += vmx-crypto.o +vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o ghash.o + +ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y) +TARGET := linux-ppc64le +else +TARGET := linux-pcc64 +endif + +quiet_cmd_perl = PERL $@ + cmd_perl = $(PERL) $(<) $(TARGET) > $(@) + +$(src)/aesp8-ppc.S: $(src)/aesp8-ppc.pl + $(call cmd,perl) + +$(src)/ghashp8-ppc.S: $(src)/ghashp8-ppc.pl + $(call cmd,perl) + +.PRECIOUS: $(obj)/aesp8-ppc.S $(obj)/ghashp8-ppc.S diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c new file mode 100644 index 000000000..ab300ea19 --- /dev/null +++ b/drivers/crypto/vmx/aes.c @@ -0,0 +1,139 @@ +/** + * AES routines supporting VMX instructions on the Power 8 + * + * Copyright (C) 2015 International Business Machines Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 only. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com> + */ + +#include <linux/types.h> +#include <linux/err.h> +#include <linux/crypto.h> +#include <linux/delay.h> +#include <linux/hardirq.h> +#include <asm/switch_to.h> +#include <crypto/aes.h> + +#include "aesp8-ppc.h" + +struct p8_aes_ctx { + struct crypto_cipher *fallback; + struct aes_key enc_key; + struct aes_key dec_key; +}; + +static int p8_aes_init(struct crypto_tfm *tfm) +{ + const char *alg; + struct crypto_cipher *fallback; + struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); + + if (!(alg = crypto_tfm_alg_name(tfm))) { + printk(KERN_ERR "Failed to get algorithm name.\n"); + return -ENOENT; + } + + fallback = crypto_alloc_cipher(alg, 0 ,CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback)) { + printk(KERN_ERR "Failed to allocate transformation for '%s': %ld\n", + alg, PTR_ERR(fallback)); + return PTR_ERR(fallback); + } + printk(KERN_INFO "Using '%s' as fallback implementation.\n", + crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback)); + + crypto_cipher_set_flags(fallback, + crypto_cipher_get_flags((struct crypto_cipher *) tfm)); + ctx->fallback = fallback; + + return 0; +} + +static void p8_aes_exit(struct crypto_tfm *tfm) +{ + struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); + + if (ctx->fallback) { + crypto_free_cipher(ctx->fallback); + ctx->fallback = NULL; + } +} + +static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen) +{ + int ret; + struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); + + pagefault_disable(); + enable_kernel_altivec(); + ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); + ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); + pagefault_enable(); + + ret += crypto_cipher_setkey(ctx->fallback, key, keylen); + return ret; +} + +static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) +{ + struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); + + if (in_interrupt()) { + crypto_cipher_encrypt_one(ctx->fallback, dst, src); + } else { + pagefault_disable(); + enable_kernel_altivec(); + aes_p8_encrypt(src, dst, &ctx->enc_key); + pagefault_enable(); + } +} + +static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) +{ + struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); + + if (in_interrupt()) { + crypto_cipher_decrypt_one(ctx->fallback, dst, src); + } else { + pagefault_disable(); + enable_kernel_altivec(); + aes_p8_decrypt(src, dst, &ctx->dec_key); + pagefault_enable(); + } +} + +struct crypto_alg p8_aes_alg = { + .cra_name = "aes", + .cra_driver_name = "p8_aes", + .cra_module = THIS_MODULE, + .cra_priority = 1000, + .cra_type = NULL, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_NEED_FALLBACK, + .cra_alignmask = 0, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct p8_aes_ctx), + .cra_init = p8_aes_init, + .cra_exit = p8_aes_exit, + .cra_cipher = { + .cia_min_keysize = AES_MIN_KEY_SIZE, + .cia_max_keysize = AES_MAX_KEY_SIZE, + .cia_setkey = p8_aes_setkey, + .cia_encrypt = p8_aes_encrypt, + .cia_decrypt = p8_aes_decrypt, + }, +}; + diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c new file mode 100644 index 000000000..1a559b7dd --- /dev/null +++ b/drivers/crypto/vmx/aes_cbc.c @@ -0,0 +1,184 @@ +/** + * AES CBC routines supporting VMX instructions on the Power 8 + * + * Copyright (C) 2015 International Business Machines Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 only. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com> + */ + +#include <linux/types.h> +#include <linux/err.h> +#include <linux/crypto.h> +#include <linux/delay.h> +#include <linux/hardirq.h> +#include <asm/switch_to.h> +#include <crypto/aes.h> +#include <crypto/scatterwalk.h> + +#include "aesp8-ppc.h" + +struct p8_aes_cbc_ctx { + struct crypto_blkcipher *fallback; + struct aes_key enc_key; + struct aes_key dec_key; +}; + +static int p8_aes_cbc_init(struct crypto_tfm *tfm) +{ + const char *alg; + struct crypto_blkcipher *fallback; + struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); + + if (!(alg = crypto_tfm_alg_name(tfm))) { + printk(KERN_ERR "Failed to get algorithm name.\n"); + return -ENOENT; + } + + fallback = crypto_alloc_blkcipher(alg, 0 ,CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback)) { + printk(KERN_ERR "Failed to allocate transformation for '%s': %ld\n", + alg, PTR_ERR(fallback)); + return PTR_ERR(fallback); + } + printk(KERN_INFO "Using '%s' as fallback implementation.\n", + crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback)); + + crypto_blkcipher_set_flags(fallback, + crypto_blkcipher_get_flags((struct crypto_blkcipher *) tfm)); + ctx->fallback = fallback; + + return 0; +} + +static void p8_aes_cbc_exit(struct crypto_tfm *tfm) +{ + struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); + + if (ctx->fallback) { + crypto_free_blkcipher(ctx->fallback); + ctx->fallback = NULL; + } +} + +static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen) +{ + int ret; + struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); + + pagefault_disable(); + enable_kernel_altivec(); + ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); + ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); + pagefault_enable(); + + ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen); + return ret; +} + +static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + int ret; + struct blkcipher_walk walk; + struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx( + crypto_blkcipher_tfm(desc->tfm)); + struct blkcipher_desc fallback_desc = { + .tfm = ctx->fallback, + .info = desc->info, + .flags = desc->flags + }; + + if (in_interrupt()) { + ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes); + } else { + pagefault_disable(); + enable_kernel_altivec(); + + blkcipher_walk_init(&walk, dst, src, nbytes); + ret = blkcipher_walk_virt(desc, &walk); + while ((nbytes = walk.nbytes)) { + aes_p8_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr, + nbytes & AES_BLOCK_MASK, &ctx->enc_key, walk.iv, 1); + nbytes &= AES_BLOCK_SIZE - 1; + ret = blkcipher_walk_done(desc, &walk, nbytes); + } + + pagefault_enable(); + } + + return ret; +} + +static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + int ret; + struct blkcipher_walk walk; + struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx( + crypto_blkcipher_tfm(desc->tfm)); + struct blkcipher_desc fallback_desc = { + .tfm = ctx->fallback, + .info = desc->info, + .flags = desc->flags + }; + + if (in_interrupt()) { + ret = crypto_blkcipher_decrypt(&fallback_desc, dst, src, nbytes); + } else { + pagefault_disable(); + enable_kernel_altivec(); + + blkcipher_walk_init(&walk, dst, src, nbytes); + ret = blkcipher_walk_virt(desc, &walk); + while ((nbytes = walk.nbytes)) { + aes_p8_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr, + nbytes & AES_BLOCK_MASK, &ctx->dec_key, walk.iv, 0); + nbytes &= AES_BLOCK_SIZE - 1; + ret = blkcipher_walk_done(desc, &walk, nbytes); + } + + pagefault_enable(); + } + + return ret; +} + + +struct crypto_alg p8_aes_cbc_alg = { + .cra_name = "cbc(aes)", + .cra_driver_name = "p8_aes_cbc", + .cra_module = THIS_MODULE, + .cra_priority = 1000, + .cra_type = &crypto_blkcipher_type, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK, + .cra_alignmask = 0, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct p8_aes_cbc_ctx), + .cra_init = p8_aes_cbc_init, + .cra_exit = p8_aes_cbc_exit, + .cra_blkcipher = { + .ivsize = 0, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = p8_aes_cbc_setkey, + .encrypt = p8_aes_cbc_encrypt, + .decrypt = p8_aes_cbc_decrypt, + }, +}; + diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c new file mode 100644 index 000000000..96dbee4bf --- /dev/null +++ b/drivers/crypto/vmx/aes_ctr.c @@ -0,0 +1,167 @@ +/** + * AES CTR routines supporting VMX instructions on the Power 8 + * + * Copyright (C) 2015 International Business Machines Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 only. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com> + */ + +#include <linux/types.h> +#include <linux/err.h> +#include <linux/crypto.h> +#include <linux/delay.h> +#include <linux/hardirq.h> +#include <asm/switch_to.h> +#include <crypto/aes.h> +#include <crypto/scatterwalk.h> +#include "aesp8-ppc.h" + +struct p8_aes_ctr_ctx { + struct crypto_blkcipher *fallback; + struct aes_key enc_key; +}; + +static int p8_aes_ctr_init(struct crypto_tfm *tfm) +{ + const char *alg; + struct crypto_blkcipher *fallback; + struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); + + if (!(alg = crypto_tfm_alg_name(tfm))) { + printk(KERN_ERR "Failed to get algorithm name.\n"); + return -ENOENT; + } + + fallback = crypto_alloc_blkcipher(alg, 0 ,CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback)) { + printk(KERN_ERR "Failed to allocate transformation for '%s': %ld\n", + alg, PTR_ERR(fallback)); + return PTR_ERR(fallback); + } + printk(KERN_INFO "Using '%s' as fallback implementation.\n", + crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback)); + + crypto_blkcipher_set_flags(fallback, + crypto_blkcipher_get_flags((struct crypto_blkcipher *) tfm)); + ctx->fallback = fallback; + + return 0; +} + +static void p8_aes_ctr_exit(struct crypto_tfm *tfm) +{ + struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); + + if (ctx->fallback) { + crypto_free_blkcipher(ctx->fallback); + ctx->fallback = NULL; + } +} + +static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen) +{ + int ret; + struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); + + pagefault_disable(); + enable_kernel_altivec(); + ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); + pagefault_enable(); + + ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen); + return ret; +} + +static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx, + struct blkcipher_walk *walk) +{ + u8 *ctrblk = walk->iv; + u8 keystream[AES_BLOCK_SIZE]; + u8 *src = walk->src.virt.addr; + u8 *dst = walk->dst.virt.addr; + unsigned int nbytes = walk->nbytes; + + pagefault_disable(); + enable_kernel_altivec(); + aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key); + pagefault_enable(); + + crypto_xor(keystream, src, nbytes); + memcpy(dst, keystream, nbytes); + crypto_inc(ctrblk, AES_BLOCK_SIZE); +} + +static int p8_aes_ctr_crypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + int ret; + struct blkcipher_walk walk; + struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx( + crypto_blkcipher_tfm(desc->tfm)); + struct blkcipher_desc fallback_desc = { + .tfm = ctx->fallback, + .info = desc->info, + .flags = desc->flags + }; + + if (in_interrupt()) { + ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes); + } else { + blkcipher_walk_init(&walk, dst, src, nbytes); + ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); + while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { + pagefault_disable(); + enable_kernel_altivec(); + aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr, walk.dst.virt.addr, + (nbytes & AES_BLOCK_MASK)/AES_BLOCK_SIZE, &ctx->enc_key, walk.iv); + pagefault_enable(); + + crypto_inc(walk.iv, AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + ret = blkcipher_walk_done(desc, &walk, nbytes); + } + if (walk.nbytes) { + p8_aes_ctr_final(ctx, &walk); + ret = blkcipher_walk_done(desc, &walk, 0); + } + } + + return ret; +} + +struct crypto_alg p8_aes_ctr_alg = { + .cra_name = "ctr(aes)", + .cra_driver_name = "p8_aes_ctr", + .cra_module = THIS_MODULE, + .cra_priority = 1000, + .cra_type = &crypto_blkcipher_type, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK, + .cra_alignmask = 0, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct p8_aes_ctr_ctx), + .cra_init = p8_aes_ctr_init, + .cra_exit = p8_aes_ctr_exit, + .cra_blkcipher = { + .ivsize = 0, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = p8_aes_ctr_setkey, + .encrypt = p8_aes_ctr_crypt, + .decrypt = p8_aes_ctr_crypt, + }, +}; diff --git a/drivers/crypto/vmx/aesp8-ppc.h b/drivers/crypto/vmx/aesp8-ppc.h new file mode 100644 index 000000000..e963945a8 --- /dev/null +++ b/drivers/crypto/vmx/aesp8-ppc.h @@ -0,0 +1,20 @@ +#include <linux/types.h> +#include <crypto/aes.h> + +#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1)) + +struct aes_key { + u8 key[AES_MAX_KEYLENGTH]; + int rounds; +}; + +int aes_p8_set_encrypt_key(const u8 *userKey, const int bits, + struct aes_key *key); +int aes_p8_set_decrypt_key(const u8 *userKey, const int bits, + struct aes_key *key); +void aes_p8_encrypt(const u8 *in, u8 *out, const struct aes_key *key); +void aes_p8_decrypt(const u8 *in, u8 *out,const struct aes_key *key); +void aes_p8_cbc_encrypt(const u8 *in, u8 *out, size_t len, + const struct aes_key *key, u8 *iv, const int enc); +void aes_p8_ctr32_encrypt_blocks(const u8 *in, u8 *out, + size_t len, const struct aes_key *key, const u8 *iv); diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/drivers/crypto/vmx/aesp8-ppc.pl new file mode 100644 index 000000000..6c5c20c61 --- /dev/null +++ b/drivers/crypto/vmx/aesp8-ppc.pl @@ -0,0 +1,1930 @@ +#!/usr/bin/env perl +# +# ==================================================================== +# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== +# +# This module implements support for AES instructions as per PowerISA +# specification version 2.07, first implemented by POWER8 processor. +# The module is endian-agnostic in sense that it supports both big- +# and little-endian cases. Data alignment in parallelizable modes is +# handled with VSX loads and stores, which implies MSR.VSX flag being +# set. It should also be noted that ISA specification doesn't prohibit +# alignment exceptions for these instructions on page boundaries. +# Initially alignment was handled in pure AltiVec/VMX way [when data +# is aligned programmatically, which in turn guarantees exception- +# free execution], but it turned to hamper performance when vcipher +# instructions are interleaved. It's reckoned that eventual +# misalignment penalties at page boundaries are in average lower +# than additional overhead in pure AltiVec approach. + +$flavour = shift; + +if ($flavour =~ /64/) { + $SIZE_T =8; + $LRSAVE =2*$SIZE_T; + $STU ="stdu"; + $POP ="ld"; + $PUSH ="std"; + $UCMP ="cmpld"; + $SHL ="sldi"; +} elsif ($flavour =~ /32/) { + $SIZE_T =4; + $LRSAVE =$SIZE_T; + $STU ="stwu"; + $POP ="lwz"; + $PUSH ="stw"; + $UCMP ="cmplw"; + $SHL ="slwi"; +} else { die "nonsense $flavour"; } + +$LITTLE_ENDIAN = ($flavour=~/le$/) ? $SIZE_T : 0; + +$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; +( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or +( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or +die "can't locate ppc-xlate.pl"; + +open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!"; + +$FRAME=8*$SIZE_T; +$prefix="aes_p8"; + +$sp="r1"; +$vrsave="r12"; + +######################################################################### +{{{ # Key setup procedures # +my ($inp,$bits,$out,$ptr,$cnt,$rounds)=map("r$_",(3..8)); +my ($zero,$in0,$in1,$key,$rcon,$mask,$tmp)=map("v$_",(0..6)); +my ($stage,$outperm,$outmask,$outhead,$outtail)=map("v$_",(7..11)); + +$code.=<<___; +.machine "any" + +.text + +.align 7 +rcon: +.long 0x01000000, 0x01000000, 0x01000000, 0x01000000 ?rev +.long 0x1b000000, 0x1b000000, 0x1b000000, 0x1b000000 ?rev +.long 0x0d0e0f0c, 0x0d0e0f0c, 0x0d0e0f0c, 0x0d0e0f0c ?rev +.long 0,0,0,0 ?asis +Lconsts: + mflr r0 + bcl 20,31,\$+4 + mflr $ptr #vvvvv "distance between . and rcon + addi $ptr,$ptr,-0x48 + mtlr r0 + blr + .long 0 + .byte 0,12,0x14,0,0,0,0,0 +.asciz "AES for PowerISA 2.07, CRYPTOGAMS by <appro\@openssl.org>" + +.globl .${prefix}_set_encrypt_key +Lset_encrypt_key: + mflr r11 + $PUSH r11,$LRSAVE($sp) + + li $ptr,-1 + ${UCMP}i $inp,0 + beq- Lenc_key_abort # if ($inp==0) return -1; + ${UCMP}i $out,0 + beq- Lenc_key_abort # if ($out==0) return -1; + li $ptr,-2 + cmpwi $bits,128 + blt- Lenc_key_abort + cmpwi $bits,256 + bgt- Lenc_key_abort + andi. r0,$bits,0x3f + bne- Lenc_key_abort + + lis r0,0xfff0 + mfspr $vrsave,256 + mtspr 256,r0 + + bl Lconsts + mtlr r11 + + neg r9,$inp + lvx $in0,0,$inp + addi $inp,$inp,15 # 15 is not typo + lvsr $key,0,r9 # borrow $key + li r8,0x20 + cmpwi $bits,192 + lvx $in1,0,$inp + le?vspltisb $mask,0x0f # borrow $mask + lvx $rcon,0,$ptr + le?vxor $key,$key,$mask # adjust for byte swap + lvx $mask,r8,$ptr + addi $ptr,$ptr,0x10 + vperm $in0,$in0,$in1,$key # align [and byte swap in LE] + li $cnt,8 + vxor $zero,$zero,$zero + mtctr $cnt + + ?lvsr $outperm,0,$out + vspltisb $outmask,-1 + lvx $outhead,0,$out + ?vperm $outmask,$zero,$outmask,$outperm + + blt Loop128 + addi $inp,$inp,8 + beq L192 + addi $inp,$inp,8 + b L256 + +.align 4 +Loop128: + vperm $key,$in0,$in0,$mask # rotate-n-splat + vsldoi $tmp,$zero,$in0,12 # >>32 + vperm $outtail,$in0,$in0,$outperm # rotate + vsel $stage,$outhead,$outtail,$outmask + vmr $outhead,$outtail + vcipherlast $key,$key,$rcon + stvx $stage,0,$out + addi $out,$out,16 + + vxor $in0,$in0,$tmp + vsldoi $tmp,$zero,$tmp,12 # >>32 + vxor $in0,$in0,$tmp + vsldoi $tmp,$zero,$tmp,12 # >>32 + vxor $in0,$in0,$tmp + vadduwm $rcon,$rcon,$rcon + vxor $in0,$in0,$key + bdnz Loop128 + + lvx $rcon,0,$ptr # last two round keys + + vperm $key,$in0,$in0,$mask # rotate-n-splat + vsldoi $tmp,$zero,$in0,12 # >>32 + vperm $outtail,$in0,$in0,$outperm # rotate + vsel $stage,$outhead,$outtail,$outmask + vmr $outhead,$outtail + vcipherlast $key,$key,$rcon + stvx $stage,0,$out + addi $out,$out,16 + + vxor $in0,$in0,$tmp + vsldoi $tmp,$zero,$tmp,12 # >>32 + vxor $in0,$in0,$tmp + vsldoi $tmp,$zero,$tmp,12 # >>32 + vxor $in0,$in0,$tmp + vadduwm $rcon,$rcon,$rcon + vxor $in0,$in0,$key + + vperm $key,$in0,$in0,$mask # rotate-n-splat + vsldoi $tmp,$zero,$in0,12 # >>32 + vperm $outtail,$in0,$in0,$outperm # rotate + vsel $stage,$outhead,$outtail,$outmask + vmr $outhead,$outtail + vcipherlast $key,$key,$rcon + stvx $stage,0,$out + addi $out,$out,16 + + vxor $in0,$in0,$tmp + vsldoi $tmp,$zero,$tmp,12 # >>32 + vxor $in0,$in0,$tmp + vsldoi $tmp,$zero,$tmp,12 # >>32 + vxor $in0,$in0,$tmp + vxor $in0,$in0,$key + vperm $outtail,$in0,$in0,$outperm # rotate + vsel $stage,$outhead,$outtail,$outmask + vmr $outhead,$outtail + stvx $stage,0,$out + + addi $inp,$out,15 # 15 is not typo + addi $out,$out,0x50 + + li $rounds,10 + b Ldone + +.align 4 +L192: + lvx $tmp,0,$inp + li $cnt,4 + vperm $outtail,$in0,$in0,$outperm # rotate + vsel $stage,$outhead,$outtail,$outmask + vmr $outhead,$outtail + stvx $stage,0,$out + addi $out,$out,16 + vperm $in1,$in1,$tmp,$key # align [and byte swap in LE] + vspltisb $key,8 # borrow $key + mtctr $cnt + vsububm $mask,$mask,$key # adjust the mask + +Loop192: + vperm $key,$in1,$in1,$mask # roate-n-splat + vsldoi $tmp,$zero,$in0,12 # >>32 + vcipherlast $key,$key,$rcon + + vxor $in0,$in0,$tmp + vsldoi $tmp,$zero,$tmp,12 # >>32 + vxor $in0,$in0,$tmp + vsldoi $tmp,$zero,$tmp,12 # >>32 + vxor $in0,$in0,$tmp + + vsldoi $stage,$zero,$in1,8 + vspltw $tmp,$in0,3 + vxor $tmp,$tmp,$in1 + vsldoi $in1,$zero,$in1,12 # >>32 + vadduwm $rcon,$rcon,$rcon + vxor $in1,$in1,$tmp + vxor $in0,$in0,$key + vxor $in1,$in1,$key + vsldoi $stage,$stage,$in0,8 + + vperm $key,$in1,$in1,$mask # rotate-n-splat + vsldoi $tmp,$zero,$in0,12 # >>32 + vperm $outtail,$stage,$stage,$outperm # rotate + vsel $stage,$outhead,$outtail,$outmask + vmr $outhead,$outtail + vcipherlast $key,$key,$rcon + stvx $stage,0,$out + addi $out,$out,16 + + vsldoi $stage,$in0,$in1,8 + vxor $in0,$in0,$tmp + vsldoi $tmp,$zero,$tmp,12 # >>32 + vperm $outtail,$stage,$stage,$outperm # rotate + vsel $stage,$outhead,$outtail,$outmask + vmr $outhead,$outtail + vxor $in0,$in0,$tmp + vsldoi $tmp,$zero,$tmp,12 # >>32 + vxor $in0,$in0,$tmp + stvx $stage,0,$out + addi $out,$out,16 + + vspltw $tmp,$in0,3 + vxor $tmp,$tmp,$in1 + vsldoi $in1,$zero,$in1,12 # >>32 + vadduwm $rcon,$rcon,$rcon + vxor $in1,$in1,$tmp + vxor $in0,$in0,$key + vxor $in1,$in1,$key + vperm $outtail,$in0,$in0,$outperm # rotate + vsel $stage,$outhead,$outtail,$outmask + vmr $outhead,$outtail + stvx $stage,0,$out + addi $inp,$out,15 # 15 is not typo + addi $out,$out,16 + bdnz Loop192 + + li $rounds,12 + addi $out,$out,0x20 + b Ldone + +.align 4 +L256: + lvx $tmp,0,$inp + li $cnt,7 + li $rounds,14 + vperm $outtail,$in0,$in0,$outperm # rotate + vsel $stage,$outhead,$outtail,$outmask + vmr $outhead,$outtail + stvx $stage,0,$out + addi $out,$out,16 + vperm $in1,$in1,$tmp,$key # align [and byte swap in LE] + mtctr $cnt + +Loop256: + vperm $key,$in1,$in1,$mask # rotate-n-splat + vsldoi $tmp,$zero,$in0,12 # >>32 + vperm $outtail,$in1,$in1,$outperm # rotate + vsel $stage,$outhead,$outtail,$outmask + vmr $outhead,$outtail + vcipherlast $key,$key,$rcon + stvx $stage,0,$out + addi $out,$out,16 + + vxor $in0,$in0,$tmp + vsldoi $tmp,$zero,$tmp,12 # >>32 + vxor $in0,$in0,$tmp + vsldoi $tmp,$zero,$tmp,12 # >>32 + vxor $in0,$in0,$tmp + vadduwm $rcon,$rcon,$rcon + vxor $in0,$in0,$key + vperm $outtail,$in0,$in0,$outperm # rotate + vsel $stage,$outhead,$outtail,$outmask + vmr $outhead,$outtail + stvx $stage,0,$out + addi $inp,$out,15 # 15 is not typo + addi $out,$out,16 + bdz Ldone + + vspltw $key,$in0,3 # just splat + vsldoi $tmp,$zero,$in1,12 # >>32 + vsbox $key,$key + + vxor $in1,$in1,$tmp + vsldoi $tmp,$zero,$tmp,12 # >>32 + vxor $in1,$in1,$tmp + vsldoi $tmp,$zero,$tmp,12 # >>32 + vxor $in1,$in1,$tmp + + vxor $in1,$in1,$key + b Loop256 + +.align 4 +Ldone: + lvx $in1,0,$inp # redundant in aligned case + vsel $in1,$outhead,$in1,$outmask + stvx $in1,0,$inp + li $ptr,0 + mtspr 256,$vrsave + stw $rounds,0($out) + +Lenc_key_abort: + mr r3,$ptr + blr + .long 0 + .byte 0,12,0x14,1,0,0,3,0 + .long 0 +.size .${prefix}_set_encrypt_key,.-.${prefix}_set_encrypt_key + +.globl .${prefix}_set_decrypt_key + $STU $sp,-$FRAME($sp) + mflr r10 + $PUSH r10,$FRAME+$LRSAVE($sp) + bl Lset_encrypt_key + mtlr r10 + + cmpwi r3,0 + bne- Ldec_key_abort + + slwi $cnt,$rounds,4 + subi $inp,$out,240 # first round key + srwi $rounds,$rounds,1 + add $out,$inp,$cnt # last round key + mtctr $rounds + +Ldeckey: + lwz r0, 0($inp) + lwz r6, 4($inp) + lwz r7, 8($inp) + lwz r8, 12($inp) + addi $inp,$inp,16 + lwz r9, 0($out) + lwz r10,4($out) + lwz r11,8($out) + lwz r12,12($out) + stw r0, 0($out) + stw r6, 4($out) + stw r7, 8($out) + stw r8, 12($out) + subi $out,$out,16 + stw r9, -16($inp) + stw r10,-12($inp) + stw r11,-8($inp) + stw r12,-4($inp) + bdnz Ldeckey + + xor r3,r3,r3 # return value +Ldec_key_abort: + addi $sp,$sp,$FRAME + blr + .long 0 + .byte 0,12,4,1,0x80,0,3,0 + .long 0 +.size .${prefix}_set_decrypt_key,.-.${prefix}_set_decrypt_key +___ +}}} +######################################################################### +{{{ # Single block en- and decrypt procedures # +sub gen_block () { +my $dir = shift; +my $n = $dir eq "de" ? "n" : ""; +my ($inp,$out,$key,$rounds,$idx)=map("r$_",(3..7)); + +$code.=<<___; +.globl .${prefix}_${dir}crypt + lwz $rounds,240($key) + lis r0,0xfc00 + mfspr $vrsave,256 + li $idx,15 # 15 is not typo + mtspr 256,r0 + + lvx v0,0,$inp + neg r11,$out + lvx v1,$idx,$inp + lvsl v2,0,$inp # inpperm + le?vspltisb v4,0x0f + ?lvsl v3,0,r11 # outperm + le?vxor v2,v2,v4 + li $idx,16 + vperm v0,v0,v1,v2 # align [and byte swap in LE] + lvx v1,0,$key + ?lvsl v5,0,$key # keyperm + srwi $rounds,$rounds,1 + lvx v2,$idx,$key + addi $idx,$idx,16 + subi $rounds,$rounds,1 + ?vperm v1,v1,v2,v5 # align round key + + vxor v0,v0,v1 + lvx v1,$idx,$key + addi $idx,$idx,16 + mtctr $rounds + +Loop_${dir}c: + ?vperm v2,v2,v1,v5 + v${n}cipher v0,v0,v2 + lvx v2,$idx,$key + addi $idx,$idx,16 + ?vperm v1,v1,v2,v5 + v${n}cipher v0,v0,v1 + lvx v1,$idx,$key + addi $idx,$idx,16 + bdnz Loop_${dir}c + + ?vperm v2,v2,v1,v5 + v${n}cipher v0,v0,v2 + lvx v2,$idx,$key + ?vperm v1,v1,v2,v5 + v${n}cipherlast v0,v0,v1 + + vspltisb v2,-1 + vxor v1,v1,v1 + li $idx,15 # 15 is not typo + ?vperm v2,v1,v2,v3 # outmask + le?vxor v3,v3,v4 + lvx v1,0,$out # outhead + vperm v0,v0,v0,v3 # rotate [and byte swap in LE] + vsel v1,v1,v0,v2 + lvx v4,$idx,$out + stvx v1,0,$out + vsel v0,v0,v4,v2 + stvx v0,$idx,$out + + mtspr 256,$vrsave + blr + .long 0 + .byte 0,12,0x14,0,0,0,3,0 + .long 0 +.size .${prefix}_${dir}crypt,.-.${prefix}_${dir}crypt +___ +} +&gen_block("en"); +&gen_block("de"); +}}} +######################################################################### +{{{ # CBC en- and decrypt procedures # +my ($inp,$out,$len,$key,$ivp,$enc,$rounds,$idx)=map("r$_",(3..10)); +my ($rndkey0,$rndkey1,$inout,$tmp)= map("v$_",(0..3)); +my ($ivec,$inptail,$inpperm,$outhead,$outperm,$outmask,$keyperm)= + map("v$_",(4..10)); +$code.=<<___; +.globl .${prefix}_cbc_encrypt + ${UCMP}i $len,16 + bltlr- + + cmpwi $enc,0 # test direction + lis r0,0xffe0 + mfspr $vrsave,256 + mtspr 256,r0 + + li $idx,15 + vxor $rndkey0,$rndkey0,$rndkey0 + le?vspltisb $tmp,0x0f + + lvx $ivec,0,$ivp # load [unaligned] iv + lvsl $inpperm,0,$ivp + lvx $inptail,$idx,$ivp + le?vxor $inpperm,$inpperm,$tmp + vperm $ivec,$ivec,$inptail,$inpperm + + neg r11,$inp + ?lvsl $keyperm,0,$key # prepare for unaligned key + lwz $rounds,240($key) + + lvsr $inpperm,0,r11 # prepare for unaligned load + lvx $inptail,0,$inp + addi $inp,$inp,15 # 15 is not typo + le?vxor $inpperm,$inpperm,$tmp + + ?lvsr $outperm,0,$out # prepare for unaligned store + vspltisb $outmask,-1 + lvx $outhead,0,$out + ?vperm $outmask,$rndkey0,$outmask,$outperm + le?vxor $outperm,$outperm,$tmp + + srwi $rounds,$rounds,1 + li $idx,16 + subi $rounds,$rounds,1 + beq Lcbc_dec + +Lcbc_enc: + vmr $inout,$inptail + lvx $inptail,0,$inp + addi $inp,$inp,16 + mtctr $rounds + subi $len,$len,16 # len-=16 + + lvx $rndkey0,0,$key + vperm $inout,$inout,$inptail,$inpperm + lvx $rndkey1,$idx,$key + addi $idx,$idx,16 + ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm + vxor $inout,$inout,$rndkey0 + lvx $rndkey0,$idx,$key + addi $idx,$idx,16 + vxor $inout,$inout,$ivec + +Loop_cbc_enc: + ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm + vcipher $inout,$inout,$rndkey1 + lvx $rndkey1,$idx,$key + addi $idx,$idx,16 + ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm + vcipher $inout,$inout,$rndkey0 + lvx $rndkey0,$idx,$key + addi $idx,$idx,16 + bdnz Loop_cbc_enc + + ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm + vcipher $inout,$inout,$rndkey1 + lvx $rndkey1,$idx,$key + li $idx,16 + ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm + vcipherlast $ivec,$inout,$rndkey0 + ${UCMP}i $len,16 + + vperm $tmp,$ivec,$ivec,$outperm + vsel $inout,$outhead,$tmp,$outmask + vmr $outhead,$tmp + stvx $inout,0,$out + addi $out,$out,16 + bge Lcbc_enc + + b Lcbc_done + +.align 4 +Lcbc_dec: + ${UCMP}i $len,128 + bge _aesp8_cbc_decrypt8x + vmr $tmp,$inptail + lvx $inptail,0,$inp + addi $inp,$inp,16 + mtctr $rounds + subi $len,$len,16 # len-=16 + + lvx $rndkey0,0,$key + vperm $tmp,$tmp,$inptail,$inpperm + lvx $rndkey1,$idx,$key + addi $idx,$idx,16 + ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm + vxor $inout,$tmp,$rndkey0 + lvx $rndkey0,$idx,$key + addi $idx,$idx,16 + +Loop_cbc_dec: + ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm + vncipher $inout,$inout,$rndkey1 + lvx $rndkey1,$idx,$key + addi $idx,$idx,16 + ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm + vncipher $inout,$inout,$rndkey0 + lvx $rndkey0,$idx,$key + addi $idx,$idx,16 + bdnz Loop_cbc_dec + + ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm + vncipher $inout,$inout,$rndkey1 + lvx $rndkey1,$idx,$key + li $idx,16 + ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm + vncipherlast $inout,$inout,$rndkey0 + ${UCMP}i $len,16 + + vxor $inout,$inout,$ivec + vmr $ivec,$tmp + vperm $tmp,$inout,$inout,$outperm + vsel $inout,$outhead,$tmp,$outmask + vmr $outhead,$tmp + stvx $inout,0,$out + addi $out,$out,16 + bge Lcbc_dec + +Lcbc_done: + addi $out,$out,-1 + lvx $inout,0,$out # redundant in aligned case + vsel $inout,$outhead,$inout,$outmask + stvx $inout,0,$out + + neg $enc,$ivp # write [unaligned] iv + li $idx,15 # 15 is not typo + vxor $rndkey0,$rndkey0,$rndkey0 + vspltisb $outmask,-1 + le?vspltisb $tmp,0x0f + ?lvsl $outperm,0,$enc + ?vperm $outmask,$rndkey0,$outmask,$outperm + le?vxor $outperm,$outperm,$tmp + lvx $outhead,0,$ivp + vperm $ivec,$ivec,$ivec,$outperm + vsel $inout,$outhead,$ivec,$outmask + lvx $inptail,$idx,$ivp + stvx $inout,0,$ivp + vsel $inout,$ivec,$inptail,$outmask + stvx $inout,$idx,$ivp + + mtspr 256,$vrsave + blr + .long 0 + .byte 0,12,0x14,0,0,0,6,0 + .long 0 +___ +######################################################################### +{{ # Optimized CBC decrypt procedure # +my $key_="r11"; +my ($x00,$x10,$x20,$x30,$x40,$x50,$x60,$x70)=map("r$_",(0,8,26..31)); +my ($in0, $in1, $in2, $in3, $in4, $in5, $in6, $in7 )=map("v$_",(0..3,10..13)); +my ($out0,$out1,$out2,$out3,$out4,$out5,$out6,$out7)=map("v$_",(14..21)); +my $rndkey0="v23"; # v24-v25 rotating buffer for first found keys + # v26-v31 last 6 round keys +my ($tmp,$keyperm)=($in3,$in4); # aliases with "caller", redundant assignment + +$code.=<<___; +.align 5 +_aesp8_cbc_decrypt8x: + $STU $sp,-`($FRAME+21*16+6*$SIZE_T)`($sp) + li r10,`$FRAME+8*16+15` + li r11,`$FRAME+8*16+31` + stvx v20,r10,$sp # ABI says so + addi r10,r10,32 + stvx v21,r11,$sp + addi r11,r11,32 + stvx v22,r10,$sp + addi r10,r10,32 + stvx v23,r11,$sp + addi r11,r11,32 + stvx v24,r10,$sp + addi r10,r10,32 + stvx v25,r11,$sp + addi r11,r11,32 + stvx v26,r10,$sp + addi r10,r10,32 + stvx v27,r11,$sp + addi r11,r11,32 + stvx v28,r10,$sp + addi r10,r10,32 + stvx v29,r11,$sp + addi r11,r11,32 + stvx v30,r10,$sp + stvx v31,r11,$sp + li r0,-1 + stw $vrsave,`$FRAME+21*16-4`($sp) # save vrsave + li $x10,0x10 + $PUSH r26,`$FRAME+21*16+0*$SIZE_T`($sp) + li $x20,0x20 + $PUSH r27,`$FRAME+21*16+1*$SIZE_T`($sp) + li $x30,0x30 + $PUSH r28,`$FRAME+21*16+2*$SIZE_T`($sp) + li $x40,0x40 + $PUSH r29,`$FRAME+21*16+3*$SIZE_T`($sp) + li $x50,0x50 + $PUSH r30,`$FRAME+21*16+4*$SIZE_T`($sp) + li $x60,0x60 + $PUSH r31,`$FRAME+21*16+5*$SIZE_T`($sp) + li $x70,0x70 + mtspr 256,r0 + + subi $rounds,$rounds,3 # -4 in total + subi $len,$len,128 # bias + + lvx $rndkey0,$x00,$key # load key schedule + lvx v30,$x10,$key + addi $key,$key,0x20 + lvx v31,$x00,$key + ?vperm $rndkey0,$rndkey0,v30,$keyperm + addi $key_,$sp,$FRAME+15 + mtctr $rounds + +Load_cbc_dec_key: + ?vperm v24,v30,v31,$keyperm + lvx v30,$x10,$key + addi $key,$key,0x20 + stvx v24,$x00,$key_ # off-load round[1] + ?vperm v25,v31,v30,$keyperm + lvx v31,$x00,$key + stvx v25,$x10,$key_ # off-load round[2] + addi $key_,$key_,0x20 + bdnz Load_cbc_dec_key + + lvx v26,$x10,$key + ?vperm v24,v30,v31,$keyperm + lvx v27,$x20,$key + stvx v24,$x00,$key_ # off-load round[3] + ?vperm v25,v31,v26,$keyperm + lvx v28,$x30,$key + stvx v25,$x10,$key_ # off-load round[4] + addi $key_,$sp,$FRAME+15 # rewind $key_ + ?vperm v26,v26,v27,$keyperm + lvx v29,$x40,$key + ?vperm v27,v27,v28,$keyperm + lvx v30,$x50,$key + ?vperm v28,v28,v29,$keyperm + lvx v31,$x60,$key + ?vperm v29,v29,v30,$keyperm + lvx $out0,$x70,$key # borrow $out0 + ?vperm v30,v30,v31,$keyperm + lvx v24,$x00,$key_ # pre-load round[1] + ?vperm v31,v31,$out0,$keyperm + lvx v25,$x10,$key_ # pre-load round[2] + + #lvx $inptail,0,$inp # "caller" already did this + #addi $inp,$inp,15 # 15 is not typo + subi $inp,$inp,15 # undo "caller" + + le?li $idx,8 + lvx_u $in0,$x00,$inp # load first 8 "words" + le?lvsl $inpperm,0,$idx + le?vspltisb $tmp,0x0f + lvx_u $in1,$x10,$inp + le?vxor $inpperm,$inpperm,$tmp # transform for lvx_u/stvx_u + lvx_u $in2,$x20,$inp + le?vperm $in0,$in0,$in0,$inpperm + lvx_u $in3,$x30,$inp + le?vperm $in1,$in1,$in1,$inpperm + lvx_u $in4,$x40,$inp + le?vperm $in2,$in2,$in2,$inpperm + vxor $out0,$in0,$rndkey0 + lvx_u $in5,$x50,$inp + le?vperm $in3,$in3,$in3,$inpperm + vxor $out1,$in1,$rndkey0 + lvx_u $in6,$x60,$inp + le?vperm $in4,$in4,$in4,$inpperm + vxor $out2,$in2,$rndkey0 + lvx_u $in7,$x70,$inp + addi $inp,$inp,0x80 + le?vperm $in5,$in5,$in5,$inpperm + vxor $out3,$in3,$rndkey0 + le?vperm $in6,$in6,$in6,$inpperm + vxor $out4,$in4,$rndkey0 + le?vperm $in7,$in7,$in7,$inpperm + vxor $out5,$in5,$rndkey0 + vxor $out6,$in6,$rndkey0 + vxor $out7,$in7,$rndkey0 + + mtctr $rounds + b Loop_cbc_dec8x +.align 5 +Loop_cbc_dec8x: + vncipher $out0,$out0,v24 + vncipher $out1,$out1,v24 + vncipher $out2,$out2,v24 + vncipher $out3,$out3,v24 + vncipher $out4,$out4,v24 + vncipher $out5,$out5,v24 + vncipher $out6,$out6,v24 + vncipher $out7,$out7,v24 + lvx v24,$x20,$key_ # round[3] + addi $key_,$key_,0x20 + + vncipher $out0,$out0,v25 + vncipher $out1,$out1,v25 + vncipher $out2,$out2,v25 + vncipher $out3,$out3,v25 + vncipher $out4,$out4,v25 + vncipher $out5,$out5,v25 + vncipher $out6,$out6,v25 + vncipher $out7,$out7,v25 + lvx v25,$x10,$key_ # round[4] + bdnz Loop_cbc_dec8x + + subic $len,$len,128 # $len-=128 + vncipher $out0,$out0,v24 + vncipher $out1,$out1,v24 + vncipher $out2,$out2,v24 + vncipher $out3,$out3,v24 + vncipher $out4,$out4,v24 + vncipher $out5,$out5,v24 + vncipher $out6,$out6,v24 + vncipher $out7,$out7,v24 + + subfe. r0,r0,r0 # borrow?-1:0 + vncipher $out0,$out0,v25 + vncipher $out1,$out1,v25 + vncipher $out2,$out2,v25 + vncipher $out3,$out3,v25 + vncipher $out4,$out4,v25 + vncipher $out5,$out5,v25 + vncipher $out6,$out6,v25 + vncipher $out7,$out7,v25 + + and r0,r0,$len + vncipher $out0,$out0,v26 + vncipher $out1,$out1,v26 + vncipher $out2,$out2,v26 + vncipher $out3,$out3,v26 + vncipher $out4,$out4,v26 + vncipher $out5,$out5,v26 + vncipher $out6,$out6,v26 + vncipher $out7,$out7,v26 + + add $inp,$inp,r0 # $inp is adjusted in such + # way that at exit from the + # loop inX-in7 are loaded + # with last "words" + vncipher $out0,$out0,v27 + vncipher $out1,$out1,v27 + vncipher $out2,$out2,v27 + vncipher $out3,$out3,v27 + vncipher $out4,$out4,v27 + vncipher $out5,$out5,v27 + vncipher $out6,$out6,v27 + vncipher $out7,$out7,v27 + + addi $key_,$sp,$FRAME+15 # rewind $key_ + vncipher $out0,$out0,v28 + vncipher $out1,$out1,v28 + vncipher $out2,$out2,v28 + vncipher $out3,$out3,v28 + vncipher $out4,$out4,v28 + vncipher $out5,$out5,v28 + vncipher $out6,$out6,v28 + vncipher $out7,$out7,v28 + lvx v24,$x00,$key_ # re-pre-load round[1] + + vncipher $out0,$out0,v29 + vncipher $out1,$out1,v29 + vncipher $out2,$out2,v29 + vncipher $out3,$out3,v29 + vncipher $out4,$out4,v29 + vncipher $out5,$out5,v29 + vncipher $out6,$out6,v29 + vncipher $out7,$out7,v29 + lvx v25,$x10,$key_ # re-pre-load round[2] + + vncipher $out0,$out0,v30 + vxor $ivec,$ivec,v31 # xor with last round key + vncipher $out1,$out1,v30 + vxor $in0,$in0,v31 + vncipher $out2,$out2,v30 + vxor $in1,$in1,v31 + vncipher $out3,$out3,v30 + vxor $in2,$in2,v31 + vncipher $out4,$out4,v30 + vxor $in3,$in3,v31 + vncipher $out5,$out5,v30 + vxor $in4,$in4,v31 + vncipher $out6,$out6,v30 + vxor $in5,$in5,v31 + vncipher $out7,$out7,v30 + vxor $in6,$in6,v31 + + vncipherlast $out0,$out0,$ivec + vncipherlast $out1,$out1,$in0 + lvx_u $in0,$x00,$inp # load next input block + vncipherlast $out2,$out2,$in1 + lvx_u $in1,$x10,$inp + vncipherlast $out3,$out3,$in2 + le?vperm $in0,$in0,$in0,$inpperm + lvx_u $in2,$x20,$inp + vncipherlast $out4,$out4,$in3 + le?vperm $in1,$in1,$in1,$inpperm + lvx_u $in3,$x30,$inp + vncipherlast $out5,$out5,$in4 + le?vperm $in2,$in2,$in2,$inpperm + lvx_u $in4,$x40,$inp + vncipherlast $out6,$out6,$in5 + le?vperm $in3,$in3,$in3,$inpperm + lvx_u $in5,$x50,$inp + vncipherlast $out7,$out7,$in6 + le?vperm $in4,$in4,$in4,$inpperm + lvx_u $in6,$x60,$inp + vmr $ivec,$in7 + le?vperm $in5,$in5,$in5,$inpperm + lvx_u $in7,$x70,$inp + addi $inp,$inp,0x80 + + le?vperm $out0,$out0,$out0,$inpperm + le?vperm $out1,$out1,$out1,$inpperm + stvx_u $out0,$x00,$out + le?vperm $in6,$in6,$in6,$inpperm + vxor $out0,$in0,$rndkey0 + le?vperm $out2,$out2,$out2,$inpperm + stvx_u $out1,$x10,$out + le?vperm $in7,$in7,$in7,$inpperm + vxor $out1,$in1,$rndkey0 + le?vperm $out3,$out3,$out3,$inpperm + stvx_u $out2,$x20,$out + vxor $out2,$in2,$rndkey0 + le?vperm $out4,$out4,$out4,$inpperm + stvx_u $out3,$x30,$out + vxor $out3,$in3,$rndkey0 + le?vperm $out5,$out5,$out5,$inpperm + stvx_u $out4,$x40,$out + vxor $out4,$in4,$rndkey0 + le?vperm $out6,$out6,$out6,$inpperm + stvx_u $out5,$x50,$out + vxor $out5,$in5,$rndkey0 + le?vperm $out7,$out7,$out7,$inpperm + stvx_u $out6,$x60,$out + vxor $out6,$in6,$rndkey0 + stvx_u $out7,$x70,$out + addi $out,$out,0x80 + vxor $out7,$in7,$rndkey0 + + mtctr $rounds + beq Loop_cbc_dec8x # did $len-=128 borrow? + + addic. $len,$len,128 + beq Lcbc_dec8x_done + nop + nop + +Loop_cbc_dec8x_tail: # up to 7 "words" tail... + vncipher $out1,$out1,v24 + vncipher $out2,$out2,v24 + vncipher $out3,$out3,v24 + vncipher $out4,$out4,v24 + vncipher $out5,$out5,v24 + vncipher $out6,$out6,v24 + vncipher $out7,$out7,v24 + lvx v24,$x20,$key_ # round[3] + addi $key_,$key_,0x20 + + vncipher $out1,$out1,v25 + vncipher $out2,$out2,v25 + vncipher $out3,$out3,v25 + vncipher $out4,$out4,v25 + vncipher $out5,$out5,v25 + vncipher $out6,$out6,v25 + vncipher $out7,$out7,v25 + lvx v25,$x10,$key_ # round[4] + bdnz Loop_cbc_dec8x_tail + + vncipher $out1,$out1,v24 + vncipher $out2,$out2,v24 + vncipher $out3,$out3,v24 + vncipher $out4,$out4,v24 + vncipher $out5,$out5,v24 + vncipher $out6,$out6,v24 + vncipher $out7,$out7,v24 + + vncipher $out1,$out1,v25 + vncipher $out2,$out2,v25 + vncipher $out3,$out3,v25 + vncipher $out4,$out4,v25 + vncipher $out5,$out5,v25 + vncipher $out6,$out6,v25 + vncipher $out7,$out7,v25 + + vncipher $out1,$out1,v26 + vncipher $out2,$out2,v26 + vncipher $out3,$out3,v26 + vncipher $out4,$out4,v26 + vncipher $out5,$out5,v26 + vncipher $out6,$out6,v26 + vncipher $out7,$out7,v26 + + vncipher $out1,$out1,v27 + vncipher $out2,$out2,v27 + vncipher $out3,$out3,v27 + vncipher $out4,$out4,v27 + vncipher $out5,$out5,v27 + vncipher $out6,$out6,v27 + vncipher $out7,$out7,v27 + + vncipher $out1,$out1,v28 + vncipher $out2,$out2,v28 + vncipher $out3,$out3,v28 + vncipher $out4,$out4,v28 + vncipher $out5,$out5,v28 + vncipher $out6,$out6,v28 + vncipher $out7,$out7,v28 + + vncipher $out1,$out1,v29 + vncipher $out2,$out2,v29 + vncipher $out3,$out3,v29 + vncipher $out4,$out4,v29 + vncipher $out5,$out5,v29 + vncipher $out6,$out6,v29 + vncipher $out7,$out7,v29 + + vncipher $out1,$out1,v30 + vxor $ivec,$ivec,v31 # last round key + vncipher $out2,$out2,v30 + vxor $in1,$in1,v31 + vncipher $out3,$out3,v30 + vxor $in2,$in2,v31 + vncipher $out4,$out4,v30 + vxor $in3,$in3,v31 + vncipher $out5,$out5,v30 + vxor $in4,$in4,v31 + vncipher $out6,$out6,v30 + vxor $in5,$in5,v31 + vncipher $out7,$out7,v30 + vxor $in6,$in6,v31 + + cmplwi $len,32 # switch($len) + blt Lcbc_dec8x_one + nop + beq Lcbc_dec8x_two + cmplwi $len,64 + blt Lcbc_dec8x_three + nop + beq Lcbc_dec8x_four + cmplwi $len,96 + blt Lcbc_dec8x_five + nop + beq Lcbc_dec8x_six + +Lcbc_dec8x_seven: + vncipherlast $out1,$out1,$ivec + vncipherlast $out2,$out2,$in1 + vncipherlast $out3,$out3,$in2 + vncipherlast $out4,$out4,$in3 + vncipherlast $out5,$out5,$in4 + vncipherlast $out6,$out6,$in5 + vncipherlast $out7,$out7,$in6 + vmr $ivec,$in7 + + le?vperm $out1,$out1,$out1,$inpperm + le?vperm $out2,$out2,$out2,$inpperm + stvx_u $out1,$x00,$out + le?vperm $out3,$out3,$out3,$inpperm + stvx_u $out2,$x10,$out + le?vperm $out4,$out4,$out4,$inpperm + stvx_u $out3,$x20,$out + le?vperm $out5,$out5,$out5,$inpperm + stvx_u $out4,$x30,$out + le?vperm $out6,$out6,$out6,$inpperm + stvx_u $out5,$x40,$out + le?vperm $out7,$out7,$out7,$inpperm + stvx_u $out6,$x50,$out + stvx_u $out7,$x60,$out + addi $out,$out,0x70 + b Lcbc_dec8x_done + +.align 5 +Lcbc_dec8x_six: + vncipherlast $out2,$out2,$ivec + vncipherlast $out3,$out3,$in2 + vncipherlast $out4,$out4,$in3 + vncipherlast $out5,$out5,$in4 + vncipherlast $out6,$out6,$in5 + vncipherlast $out7,$out7,$in6 + vmr $ivec,$in7 + + le?vperm $out2,$out2,$out2,$inpperm + le?vperm $out3,$out3,$out3,$inpperm + stvx_u $out2,$x00,$out + le?vperm $out4,$out4,$out4,$inpperm + stvx_u $out3,$x10,$out + le?vperm $out5,$out5,$out5,$inpperm + stvx_u $out4,$x20,$out + le?vperm $out6,$out6,$out6,$inpperm + stvx_u $out5,$x30,$out + le?vperm $out7,$out7,$out7,$inpperm + stvx_u $out6,$x40,$out + stvx_u $out7,$x50,$out + addi $out,$out,0x60 + b Lcbc_dec8x_done + +.align 5 +Lcbc_dec8x_five: + vncipherlast $out3,$out3,$ivec + vncipherlast $out4,$out4,$in3 + vncipherlast $out5,$out5,$in4 + vncipherlast $out6,$out6,$in5 + vncipherlast $out7,$out7,$in6 + vmr $ivec,$in7 + + le?vperm $out3,$out3,$out3,$inpperm + le?vperm $out4,$out4,$out4,$inpperm + stvx_u $out3,$x00,$out + le?vperm $out5,$out5,$out5,$inpperm + stvx_u $out4,$x10,$out + le?vperm $out6,$out6,$out6,$inpperm + stvx_u $out5,$x20,$out + le?vperm $out7,$out7,$out7,$inpperm + stvx_u $out6,$x30,$out + stvx_u $out7,$x40,$out + addi $out,$out,0x50 + b Lcbc_dec8x_done + +.align 5 +Lcbc_dec8x_four: + vncipherlast $out4,$out4,$ivec + vncipherlast $out5,$out5,$in4 + vncipherlast $out6,$out6,$in5 + vncipherlast $out7,$out7,$in6 + vmr $ivec,$in7 + + le?vperm $out4,$out4,$out4,$inpperm + le?vperm $out5,$out5,$out5,$inpperm + stvx_u $out4,$x00,$out + le?vperm $out6,$out6,$out6,$inpperm + stvx_u $out5,$x10,$out + le?vperm $out7,$out7,$out7,$inpperm + stvx_u $out6,$x20,$out + stvx_u $out7,$x30,$out + addi $out,$out,0x40 + b Lcbc_dec8x_done + +.align 5 +Lcbc_dec8x_three: + vncipherlast $out5,$out5,$ivec + vncipherlast $out6,$out6,$in5 + vncipherlast $out7,$out7,$in6 + vmr $ivec,$in7 + + le?vperm $out5,$out5,$out5,$inpperm + le?vperm $out6,$out6,$out6,$inpperm + stvx_u $out5,$x00,$out + le?vperm $out7,$out7,$out7,$inpperm + stvx_u $out6,$x10,$out + stvx_u $out7,$x20,$out + addi $out,$out,0x30 + b Lcbc_dec8x_done + +.align 5 +Lcbc_dec8x_two: + vncipherlast $out6,$out6,$ivec + vncipherlast $out7,$out7,$in6 + vmr $ivec,$in7 + + le?vperm $out6,$out6,$out6,$inpperm + le?vperm $out7,$out7,$out7,$inpperm + stvx_u $out6,$x00,$out + stvx_u $out7,$x10,$out + addi $out,$out,0x20 + b Lcbc_dec8x_done + +.align 5 +Lcbc_dec8x_one: + vncipherlast $out7,$out7,$ivec + vmr $ivec,$in7 + + le?vperm $out7,$out7,$out7,$inpperm + stvx_u $out7,0,$out + addi $out,$out,0x10 + +Lcbc_dec8x_done: + le?vperm $ivec,$ivec,$ivec,$inpperm + stvx_u $ivec,0,$ivp # write [unaligned] iv + + li r10,`$FRAME+15` + li r11,`$FRAME+31` + stvx $inpperm,r10,$sp # wipe copies of round keys + addi r10,r10,32 + stvx $inpperm,r11,$sp + addi r11,r11,32 + stvx $inpperm,r10,$sp + addi r10,r10,32 + stvx $inpperm,r11,$sp + addi r11,r11,32 + stvx $inpperm,r10,$sp + addi r10,r10,32 + stvx $inpperm,r11,$sp + addi r11,r11,32 + stvx $inpperm,r10,$sp + addi r10,r10,32 + stvx $inpperm,r11,$sp + addi r11,r11,32 + + mtspr 256,$vrsave + lvx v20,r10,$sp # ABI says so + addi r10,r10,32 + lvx v21,r11,$sp + addi r11,r11,32 + lvx v22,r10,$sp + addi r10,r10,32 + lvx v23,r11,$sp + addi r11,r11,32 + lvx v24,r10,$sp + addi r10,r10,32 + lvx v25,r11,$sp + addi r11,r11,32 + lvx v26,r10,$sp + addi r10,r10,32 + lvx v27,r11,$sp + addi r11,r11,32 + lvx v28,r10,$sp + addi r10,r10,32 + lvx v29,r11,$sp + addi r11,r11,32 + lvx v30,r10,$sp + lvx v31,r11,$sp + $POP r26,`$FRAME+21*16+0*$SIZE_T`($sp) + $POP r27,`$FRAME+21*16+1*$SIZE_T`($sp) + $POP r28,`$FRAME+21*16+2*$SIZE_T`($sp) + $POP r29,`$FRAME+21*16+3*$SIZE_T`($sp) + $POP r30,`$FRAME+21*16+4*$SIZE_T`($sp) + $POP r31,`$FRAME+21*16+5*$SIZE_T`($sp) + addi $sp,$sp,`$FRAME+21*16+6*$SIZE_T` + blr + .long 0 + .byte 0,12,0x14,0,0x80,6,6,0 + .long 0 +.size .${prefix}_cbc_encrypt,.-.${prefix}_cbc_encrypt +___ +}} }}} + +######################################################################### +{{{ # CTR procedure[s] # +my ($inp,$out,$len,$key,$ivp,$x10,$rounds,$idx)=map("r$_",(3..10)); +my ($rndkey0,$rndkey1,$inout,$tmp)= map("v$_",(0..3)); +my ($ivec,$inptail,$inpperm,$outhead,$outperm,$outmask,$keyperm,$one)= + map("v$_",(4..11)); +my $dat=$tmp; + +$code.=<<___; +.globl .${prefix}_ctr32_encrypt_blocks + ${UCMP}i $len,1 + bltlr- + + lis r0,0xfff0 + mfspr $vrsave,256 + mtspr 256,r0 + + li $idx,15 + vxor $rndkey0,$rndkey0,$rndkey0 + le?vspltisb $tmp,0x0f + + lvx $ivec,0,$ivp # load [unaligned] iv + lvsl $inpperm,0,$ivp + lvx $inptail,$idx,$ivp + vspltisb $one,1 + le?vxor $inpperm,$inpperm,$tmp + vperm $ivec,$ivec,$inptail,$inpperm + vsldoi $one,$rndkey0,$one,1 + + neg r11,$inp + ?lvsl $keyperm,0,$key # prepare for unaligned key + lwz $rounds,240($key) + + lvsr $inpperm,0,r11 # prepare for unaligned load + lvx $inptail,0,$inp + addi $inp,$inp,15 # 15 is not typo + le?vxor $inpperm,$inpperm,$tmp + + srwi $rounds,$rounds,1 + li $idx,16 + subi $rounds,$rounds,1 + + ${UCMP}i $len,8 + bge _aesp8_ctr32_encrypt8x + + ?lvsr $outperm,0,$out # prepare for unaligned store + vspltisb $outmask,-1 + lvx $outhead,0,$out + ?vperm $outmask,$rndkey0,$outmask,$outperm + le?vxor $outperm,$outperm,$tmp + + lvx $rndkey0,0,$key + mtctr $rounds + lvx $rndkey1,$idx,$key + addi $idx,$idx,16 + ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm + vxor $inout,$ivec,$rndkey0 + lvx $rndkey0,$idx,$key + addi $idx,$idx,16 + b Loop_ctr32_enc + +.align 5 +Loop_ctr32_enc: + ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm + vcipher $inout,$inout,$rndkey1 + lvx $rndkey1,$idx,$key + addi $idx,$idx,16 + ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm + vcipher $inout,$inout,$rndkey0 + lvx $rndkey0,$idx,$key + addi $idx,$idx,16 + bdnz Loop_ctr32_enc + + vadduwm $ivec,$ivec,$one + vmr $dat,$inptail + lvx $inptail,0,$inp + addi $inp,$inp,16 + subic. $len,$len,1 # blocks-- + + ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm + vcipher $inout,$inout,$rndkey1 + lvx $rndkey1,$idx,$key + vperm $dat,$dat,$inptail,$inpperm + li $idx,16 + ?vperm $rndkey1,$rndkey0,$rndkey1,$keyperm + lvx $rndkey0,0,$key + vxor $dat,$dat,$rndkey1 # last round key + vcipherlast $inout,$inout,$dat + + lvx $rndkey1,$idx,$key + addi $idx,$idx,16 + vperm $inout,$inout,$inout,$outperm + vsel $dat,$outhead,$inout,$outmask + mtctr $rounds + ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm + vmr $outhead,$inout + vxor $inout,$ivec,$rndkey0 + lvx $rndkey0,$idx,$key + addi $idx,$idx,16 + stvx $dat,0,$out + addi $out,$out,16 + bne Loop_ctr32_enc + + addi $out,$out,-1 + lvx $inout,0,$out # redundant in aligned case + vsel $inout,$outhead,$inout,$outmask + stvx $inout,0,$out + + mtspr 256,$vrsave + blr + .long 0 + .byte 0,12,0x14,0,0,0,6,0 + .long 0 +___ +######################################################################### +{{ # Optimized CTR procedure # +my $key_="r11"; +my ($x00,$x10,$x20,$x30,$x40,$x50,$x60,$x70)=map("r$_",(0,8,26..31)); +my ($in0, $in1, $in2, $in3, $in4, $in5, $in6, $in7 )=map("v$_",(0..3,10,12..14)); +my ($out0,$out1,$out2,$out3,$out4,$out5,$out6,$out7)=map("v$_",(15..22)); +my $rndkey0="v23"; # v24-v25 rotating buffer for first found keys + # v26-v31 last 6 round keys +my ($tmp,$keyperm)=($in3,$in4); # aliases with "caller", redundant assignment +my ($two,$three,$four)=($outhead,$outperm,$outmask); + +$code.=<<___; +.align 5 +_aesp8_ctr32_encrypt8x: + $STU $sp,-`($FRAME+21*16+6*$SIZE_T)`($sp) + li r10,`$FRAME+8*16+15` + li r11,`$FRAME+8*16+31` + stvx v20,r10,$sp # ABI says so + addi r10,r10,32 + stvx v21,r11,$sp + addi r11,r11,32 + stvx v22,r10,$sp + addi r10,r10,32 + stvx v23,r11,$sp + addi r11,r11,32 + stvx v24,r10,$sp + addi r10,r10,32 + stvx v25,r11,$sp + addi r11,r11,32 + stvx v26,r10,$sp + addi r10,r10,32 + stvx v27,r11,$sp + addi r11,r11,32 + stvx v28,r10,$sp + addi r10,r10,32 + stvx v29,r11,$sp + addi r11,r11,32 + stvx v30,r10,$sp + stvx v31,r11,$sp + li r0,-1 + stw $vrsave,`$FRAME+21*16-4`($sp) # save vrsave + li $x10,0x10 + $PUSH r26,`$FRAME+21*16+0*$SIZE_T`($sp) + li $x20,0x20 + $PUSH r27,`$FRAME+21*16+1*$SIZE_T`($sp) + li $x30,0x30 + $PUSH r28,`$FRAME+21*16+2*$SIZE_T`($sp) + li $x40,0x40 + $PUSH r29,`$FRAME+21*16+3*$SIZE_T`($sp) + li $x50,0x50 + $PUSH r30,`$FRAME+21*16+4*$SIZE_T`($sp) + li $x60,0x60 + $PUSH r31,`$FRAME+21*16+5*$SIZE_T`($sp) + li $x70,0x70 + mtspr 256,r0 + + subi $rounds,$rounds,3 # -4 in total + + lvx $rndkey0,$x00,$key # load key schedule + lvx v30,$x10,$key + addi $key,$key,0x20 + lvx v31,$x00,$key + ?vperm $rndkey0,$rndkey0,v30,$keyperm + addi $key_,$sp,$FRAME+15 + mtctr $rounds + +Load_ctr32_enc_key: + ?vperm v24,v30,v31,$keyperm + lvx v30,$x10,$key + addi $key,$key,0x20 + stvx v24,$x00,$key_ # off-load round[1] + ?vperm v25,v31,v30,$keyperm + lvx v31,$x00,$key + stvx v25,$x10,$key_ # off-load round[2] + addi $key_,$key_,0x20 + bdnz Load_ctr32_enc_key + + lvx v26,$x10,$key + ?vperm v24,v30,v31,$keyperm + lvx v27,$x20,$key + stvx v24,$x00,$key_ # off-load round[3] + ?vperm v25,v31,v26,$keyperm + lvx v28,$x30,$key + stvx v25,$x10,$key_ # off-load round[4] + addi $key_,$sp,$FRAME+15 # rewind $key_ + ?vperm v26,v26,v27,$keyperm + lvx v29,$x40,$key + ?vperm v27,v27,v28,$keyperm + lvx v30,$x50,$key + ?vperm v28,v28,v29,$keyperm + lvx v31,$x60,$key + ?vperm v29,v29,v30,$keyperm + lvx $out0,$x70,$key # borrow $out0 + ?vperm v30,v30,v31,$keyperm + lvx v24,$x00,$key_ # pre-load round[1] + ?vperm v31,v31,$out0,$keyperm + lvx v25,$x10,$key_ # pre-load round[2] + + vadduwm $two,$one,$one + subi $inp,$inp,15 # undo "caller" + $SHL $len,$len,4 + + vadduwm $out1,$ivec,$one # counter values ... + vadduwm $out2,$ivec,$two + vxor $out0,$ivec,$rndkey0 # ... xored with rndkey[0] + le?li $idx,8 + vadduwm $out3,$out1,$two + vxor $out1,$out1,$rndkey0 + le?lvsl $inpperm,0,$idx + vadduwm $out4,$out2,$two + vxor $out2,$out2,$rndkey0 + le?vspltisb $tmp,0x0f + vadduwm $out5,$out3,$two + vxor $out3,$out3,$rndkey0 + le?vxor $inpperm,$inpperm,$tmp # transform for lvx_u/stvx_u + vadduwm $out6,$out4,$two + vxor $out4,$out4,$rndkey0 + vadduwm $out7,$out5,$two + vxor $out5,$out5,$rndkey0 + vadduwm $ivec,$out6,$two # next counter value + vxor $out6,$out6,$rndkey0 + vxor $out7,$out7,$rndkey0 + + mtctr $rounds + b Loop_ctr32_enc8x +.align 5 +Loop_ctr32_enc8x: + vcipher $out0,$out0,v24 + vcipher $out1,$out1,v24 + vcipher $out2,$out2,v24 + vcipher $out3,$out3,v24 + vcipher $out4,$out4,v24 + vcipher $out5,$out5,v24 + vcipher $out6,$out6,v24 + vcipher $out7,$out7,v24 +Loop_ctr32_enc8x_middle: + lvx v24,$x20,$key_ # round[3] + addi $key_,$key_,0x20 + + vcipher $out0,$out0,v25 + vcipher $out1,$out1,v25 + vcipher $out2,$out2,v25 + vcipher $out3,$out3,v25 + vcipher $out4,$out4,v25 + vcipher $out5,$out5,v25 + vcipher $out6,$out6,v25 + vcipher $out7,$out7,v25 + lvx v25,$x10,$key_ # round[4] + bdnz Loop_ctr32_enc8x + + subic r11,$len,256 # $len-256, borrow $key_ + vcipher $out0,$out0,v24 + vcipher $out1,$out1,v24 + vcipher $out2,$out2,v24 + vcipher $out3,$out3,v24 + vcipher $out4,$out4,v24 + vcipher $out5,$out5,v24 + vcipher $out6,$out6,v24 + vcipher $out7,$out7,v24 + + subfe r0,r0,r0 # borrow?-1:0 + vcipher $out0,$out0,v25 + vcipher $out1,$out1,v25 + vcipher $out2,$out2,v25 + vcipher $out3,$out3,v25 + vcipher $out4,$out4,v25 + vcipher $out5,$out5,v25 + vcipher $out6,$out6,v25 + vcipher $out7,$out7,v25 + + and r0,r0,r11 + addi $key_,$sp,$FRAME+15 # rewind $key_ + vcipher $out0,$out0,v26 + vcipher $out1,$out1,v26 + vcipher $out2,$out2,v26 + vcipher $out3,$out3,v26 + vcipher $out4,$out4,v26 + vcipher $out5,$out5,v26 + vcipher $out6,$out6,v26 + vcipher $out7,$out7,v26 + lvx v24,$x00,$key_ # re-pre-load round[1] + + subic $len,$len,129 # $len-=129 + vcipher $out0,$out0,v27 + addi $len,$len,1 # $len-=128 really + vcipher $out1,$out1,v27 + vcipher $out2,$out2,v27 + vcipher $out3,$out3,v27 + vcipher $out4,$out4,v27 + vcipher $out5,$out5,v27 + vcipher $out6,$out6,v27 + vcipher $out7,$out7,v27 + lvx v25,$x10,$key_ # re-pre-load round[2] + + vcipher $out0,$out0,v28 + lvx_u $in0,$x00,$inp # load input + vcipher $out1,$out1,v28 + lvx_u $in1,$x10,$inp + vcipher $out2,$out2,v28 + lvx_u $in2,$x20,$inp + vcipher $out3,$out3,v28 + lvx_u $in3,$x30,$inp + vcipher $out4,$out4,v28 + lvx_u $in4,$x40,$inp + vcipher $out5,$out5,v28 + lvx_u $in5,$x50,$inp + vcipher $out6,$out6,v28 + lvx_u $in6,$x60,$inp + vcipher $out7,$out7,v28 + lvx_u $in7,$x70,$inp + addi $inp,$inp,0x80 + + vcipher $out0,$out0,v29 + le?vperm $in0,$in0,$in0,$inpperm + vcipher $out1,$out1,v29 + le?vperm $in1,$in1,$in1,$inpperm + vcipher $out2,$out2,v29 + le?vperm $in2,$in2,$in2,$inpperm + vcipher $out3,$out3,v29 + le?vperm $in3,$in3,$in3,$inpperm + vcipher $out4,$out4,v29 + le?vperm $in4,$in4,$in4,$inpperm + vcipher $out5,$out5,v29 + le?vperm $in5,$in5,$in5,$inpperm + vcipher $out6,$out6,v29 + le?vperm $in6,$in6,$in6,$inpperm + vcipher $out7,$out7,v29 + le?vperm $in7,$in7,$in7,$inpperm + + add $inp,$inp,r0 # $inp is adjusted in such + # way that at exit from the + # loop inX-in7 are loaded + # with last "words" + subfe. r0,r0,r0 # borrow?-1:0 + vcipher $out0,$out0,v30 + vxor $in0,$in0,v31 # xor with last round key + vcipher $out1,$out1,v30 + vxor $in1,$in1,v31 + vcipher $out2,$out2,v30 + vxor $in2,$in2,v31 + vcipher $out3,$out3,v30 + vxor $in3,$in3,v31 + vcipher $out4,$out4,v30 + vxor $in4,$in4,v31 + vcipher $out5,$out5,v30 + vxor $in5,$in5,v31 + vcipher $out6,$out6,v30 + vxor $in6,$in6,v31 + vcipher $out7,$out7,v30 + vxor $in7,$in7,v31 + + bne Lctr32_enc8x_break # did $len-129 borrow? + + vcipherlast $in0,$out0,$in0 + vcipherlast $in1,$out1,$in1 + vadduwm $out1,$ivec,$one # counter values ... + vcipherlast $in2,$out2,$in2 + vadduwm $out2,$ivec,$two + vxor $out0,$ivec,$rndkey0 # ... xored with rndkey[0] + vcipherlast $in3,$out3,$in3 + vadduwm $out3,$out1,$two + vxor $out1,$out1,$rndkey0 + vcipherlast $in4,$out4,$in4 + vadduwm $out4,$out2,$two + vxor $out2,$out2,$rndkey0 + vcipherlast $in5,$out5,$in5 + vadduwm $out5,$out3,$two + vxor $out3,$out3,$rndkey0 + vcipherlast $in6,$out6,$in6 + vadduwm $out6,$out4,$two + vxor $out4,$out4,$rndkey0 + vcipherlast $in7,$out7,$in7 + vadduwm $out7,$out5,$two + vxor $out5,$out5,$rndkey0 + le?vperm $in0,$in0,$in0,$inpperm + vadduwm $ivec,$out6,$two # next counter value + vxor $out6,$out6,$rndkey0 + le?vperm $in1,$in1,$in1,$inpperm + vxor $out7,$out7,$rndkey0 + mtctr $rounds + + vcipher $out0,$out0,v24 + stvx_u $in0,$x00,$out + le?vperm $in2,$in2,$in2,$inpperm + vcipher $out1,$out1,v24 + stvx_u $in1,$x10,$out + le?vperm $in3,$in3,$in3,$inpperm + vcipher $out2,$out2,v24 + stvx_u $in2,$x20,$out + le?vperm $in4,$in4,$in4,$inpperm + vcipher $out3,$out3,v24 + stvx_u $in3,$x30,$out + le?vperm $in5,$in5,$in5,$inpperm + vcipher $out4,$out4,v24 + stvx_u $in4,$x40,$out + le?vperm $in6,$in6,$in6,$inpperm + vcipher $out5,$out5,v24 + stvx_u $in5,$x50,$out + le?vperm $in7,$in7,$in7,$inpperm + vcipher $out6,$out6,v24 + stvx_u $in6,$x60,$out + vcipher $out7,$out7,v24 + stvx_u $in7,$x70,$out + addi $out,$out,0x80 + + b Loop_ctr32_enc8x_middle + +.align 5 +Lctr32_enc8x_break: + cmpwi $len,-0x60 + blt Lctr32_enc8x_one + nop + beq Lctr32_enc8x_two + cmpwi $len,-0x40 + blt Lctr32_enc8x_three + nop + beq Lctr32_enc8x_four + cmpwi $len,-0x20 + blt Lctr32_enc8x_five + nop + beq Lctr32_enc8x_six + cmpwi $len,0x00 + blt Lctr32_enc8x_seven + +Lctr32_enc8x_eight: + vcipherlast $out0,$out0,$in0 + vcipherlast $out1,$out1,$in1 + vcipherlast $out2,$out2,$in2 + vcipherlast $out3,$out3,$in3 + vcipherlast $out4,$out4,$in4 + vcipherlast $out5,$out5,$in5 + vcipherlast $out6,$out6,$in6 + vcipherlast $out7,$out7,$in7 + + le?vperm $out0,$out0,$out0,$inpperm + le?vperm $out1,$out1,$out1,$inpperm + stvx_u $out0,$x00,$out + le?vperm $out2,$out2,$out2,$inpperm + stvx_u $out1,$x10,$out + le?vperm $out3,$out3,$out3,$inpperm + stvx_u $out2,$x20,$out + le?vperm $out4,$out4,$out4,$inpperm + stvx_u $out3,$x30,$out + le?vperm $out5,$out5,$out5,$inpperm + stvx_u $out4,$x40,$out + le?vperm $out6,$out6,$out6,$inpperm + stvx_u $out5,$x50,$out + le?vperm $out7,$out7,$out7,$inpperm + stvx_u $out6,$x60,$out + stvx_u $out7,$x70,$out + addi $out,$out,0x80 + b Lctr32_enc8x_done + +.align 5 +Lctr32_enc8x_seven: + vcipherlast $out0,$out0,$in1 + vcipherlast $out1,$out1,$in2 + vcipherlast $out2,$out2,$in3 + vcipherlast $out3,$out3,$in4 + vcipherlast $out4,$out4,$in5 + vcipherlast $out5,$out5,$in6 + vcipherlast $out6,$out6,$in7 + + le?vperm $out0,$out0,$out0,$inpperm + le?vperm $out1,$out1,$out1,$inpperm + stvx_u $out0,$x00,$out + le?vperm $out2,$out2,$out2,$inpperm + stvx_u $out1,$x10,$out + le?vperm $out3,$out3,$out3,$inpperm + stvx_u $out2,$x20,$out + le?vperm $out4,$out4,$out4,$inpperm + stvx_u $out3,$x30,$out + le?vperm $out5,$out5,$out5,$inpperm + stvx_u $out4,$x40,$out + le?vperm $out6,$out6,$out6,$inpperm + stvx_u $out5,$x50,$out + stvx_u $out6,$x60,$out + addi $out,$out,0x70 + b Lctr32_enc8x_done + +.align 5 +Lctr32_enc8x_six: + vcipherlast $out0,$out0,$in2 + vcipherlast $out1,$out1,$in3 + vcipherlast $out2,$out2,$in4 + vcipherlast $out3,$out3,$in5 + vcipherlast $out4,$out4,$in6 + vcipherlast $out5,$out5,$in7 + + le?vperm $out0,$out0,$out0,$inpperm + le?vperm $out1,$out1,$out1,$inpperm + stvx_u $out0,$x00,$out + le?vperm $out2,$out2,$out2,$inpperm + stvx_u $out1,$x10,$out + le?vperm $out3,$out3,$out3,$inpperm + stvx_u $out2,$x20,$out + le?vperm $out4,$out4,$out4,$inpperm + stvx_u $out3,$x30,$out + le?vperm $out5,$out5,$out5,$inpperm + stvx_u $out4,$x40,$out + stvx_u $out5,$x50,$out + addi $out,$out,0x60 + b Lctr32_enc8x_done + +.align 5 +Lctr32_enc8x_five: + vcipherlast $out0,$out0,$in3 + vcipherlast $out1,$out1,$in4 + vcipherlast $out2,$out2,$in5 + vcipherlast $out3,$out3,$in6 + vcipherlast $out4,$out4,$in7 + + le?vperm $out0,$out0,$out0,$inpperm + le?vperm $out1,$out1,$out1,$inpperm + stvx_u $out0,$x00,$out + le?vperm $out2,$out2,$out2,$inpperm + stvx_u $out1,$x10,$out + le?vperm $out3,$out3,$out3,$inpperm + stvx_u $out2,$x20,$out + le?vperm $out4,$out4,$out4,$inpperm + stvx_u $out3,$x30,$out + stvx_u $out4,$x40,$out + addi $out,$out,0x50 + b Lctr32_enc8x_done + +.align 5 +Lctr32_enc8x_four: + vcipherlast $out0,$out0,$in4 + vcipherlast $out1,$out1,$in5 + vcipherlast $out2,$out2,$in6 + vcipherlast $out3,$out3,$in7 + + le?vperm $out0,$out0,$out0,$inpperm + le?vperm $out1,$out1,$out1,$inpperm + stvx_u $out0,$x00,$out + le?vperm $out2,$out2,$out2,$inpperm + stvx_u $out1,$x10,$out + le?vperm $out3,$out3,$out3,$inpperm + stvx_u $out2,$x20,$out + stvx_u $out3,$x30,$out + addi $out,$out,0x40 + b Lctr32_enc8x_done + +.align 5 +Lctr32_enc8x_three: + vcipherlast $out0,$out0,$in5 + vcipherlast $out1,$out1,$in6 + vcipherlast $out2,$out2,$in7 + + le?vperm $out0,$out0,$out0,$inpperm + le?vperm $out1,$out1,$out1,$inpperm + stvx_u $out0,$x00,$out + le?vperm $out2,$out2,$out2,$inpperm + stvx_u $out1,$x10,$out + stvx_u $out2,$x20,$out + addi $out,$out,0x30 + b Lcbc_dec8x_done + +.align 5 +Lctr32_enc8x_two: + vcipherlast $out0,$out0,$in6 + vcipherlast $out1,$out1,$in7 + + le?vperm $out0,$out0,$out0,$inpperm + le?vperm $out1,$out1,$out1,$inpperm + stvx_u $out0,$x00,$out + stvx_u $out1,$x10,$out + addi $out,$out,0x20 + b Lcbc_dec8x_done + +.align 5 +Lctr32_enc8x_one: + vcipherlast $out0,$out0,$in7 + + le?vperm $out0,$out0,$out0,$inpperm + stvx_u $out0,0,$out + addi $out,$out,0x10 + +Lctr32_enc8x_done: + li r10,`$FRAME+15` + li r11,`$FRAME+31` + stvx $inpperm,r10,$sp # wipe copies of round keys + addi r10,r10,32 + stvx $inpperm,r11,$sp + addi r11,r11,32 + stvx $inpperm,r10,$sp + addi r10,r10,32 + stvx $inpperm,r11,$sp + addi r11,r11,32 + stvx $inpperm,r10,$sp + addi r10,r10,32 + stvx $inpperm,r11,$sp + addi r11,r11,32 + stvx $inpperm,r10,$sp + addi r10,r10,32 + stvx $inpperm,r11,$sp + addi r11,r11,32 + + mtspr 256,$vrsave + lvx v20,r10,$sp # ABI says so + addi r10,r10,32 + lvx v21,r11,$sp + addi r11,r11,32 + lvx v22,r10,$sp + addi r10,r10,32 + lvx v23,r11,$sp + addi r11,r11,32 + lvx v24,r10,$sp + addi r10,r10,32 + lvx v25,r11,$sp + addi r11,r11,32 + lvx v26,r10,$sp + addi r10,r10,32 + lvx v27,r11,$sp + addi r11,r11,32 + lvx v28,r10,$sp + addi r10,r10,32 + lvx v29,r11,$sp + addi r11,r11,32 + lvx v30,r10,$sp + lvx v31,r11,$sp + $POP r26,`$FRAME+21*16+0*$SIZE_T`($sp) + $POP r27,`$FRAME+21*16+1*$SIZE_T`($sp) + $POP r28,`$FRAME+21*16+2*$SIZE_T`($sp) + $POP r29,`$FRAME+21*16+3*$SIZE_T`($sp) + $POP r30,`$FRAME+21*16+4*$SIZE_T`($sp) + $POP r31,`$FRAME+21*16+5*$SIZE_T`($sp) + addi $sp,$sp,`$FRAME+21*16+6*$SIZE_T` + blr + .long 0 + .byte 0,12,0x14,0,0x80,6,6,0 + .long 0 +.size .${prefix}_ctr32_encrypt_blocks,.-.${prefix}_ctr32_encrypt_blocks +___ +}} }}} + +my $consts=1; +foreach(split("\n",$code)) { + s/\`([^\`]*)\`/eval($1)/geo; + + # constants table endian-specific conversion + if ($consts && m/\.(long|byte)\s+(.+)\s+(\?[a-z]*)$/o) { + my $conv=$3; + my @bytes=(); + + # convert to endian-agnostic format + if ($1 eq "long") { + foreach (split(/,\s*/,$2)) { + my $l = /^0/?oct:int; + push @bytes,($l>>24)&0xff,($l>>16)&0xff,($l>>8)&0xff,$l&0xff; + } + } else { + @bytes = map(/^0/?oct:int,split(/,\s*/,$2)); + } + + # little-endian conversion + if ($flavour =~ /le$/o) { + SWITCH: for($conv) { + /\?inv/ && do { @bytes=map($_^0xf,@bytes); last; }; + /\?rev/ && do { @bytes=reverse(@bytes); last; }; + } + } + + #emit + print ".byte\t",join(',',map (sprintf("0x%02x",$_),@bytes)),"\n"; + next; + } + $consts=0 if (m/Lconsts:/o); # end of table + + # instructions prefixed with '?' are endian-specific and need + # to be adjusted accordingly... + if ($flavour =~ /le$/o) { # little-endian + s/le\?//o or + s/be\?/#be#/o or + s/\?lvsr/lvsl/o or + s/\?lvsl/lvsr/o or + s/\?(vperm\s+v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+)/$1$3$2$4/o or + s/\?(vsldoi\s+v[0-9]+,\s*)(v[0-9]+,)\s*(v[0-9]+,\s*)([0-9]+)/$1$3$2 16-$4/o or + s/\?(vspltw\s+v[0-9]+,\s*)(v[0-9]+,)\s*([0-9])/$1$2 3-$3/o; + } else { # big-endian + s/le\?/#le#/o or + s/be\?//o or + s/\?([a-z]+)/$1/o; + } + + print $_,"\n"; +} + +close STDOUT; diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c new file mode 100644 index 000000000..d0ffe277a --- /dev/null +++ b/drivers/crypto/vmx/ghash.c @@ -0,0 +1,214 @@ +/** + * GHASH routines supporting VMX instructions on the Power 8 + * + * Copyright (C) 2015 International Business Machines Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 only. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com> + */ + +#include <linux/types.h> +#include <linux/err.h> +#include <linux/crypto.h> +#include <linux/delay.h> +#include <linux/hardirq.h> +#include <asm/switch_to.h> +#include <crypto/aes.h> +#include <crypto/scatterwalk.h> +#include <crypto/internal/hash.h> +#include <crypto/b128ops.h> + +#define IN_INTERRUPT in_interrupt() + +#define GHASH_BLOCK_SIZE (16) +#define GHASH_DIGEST_SIZE (16) +#define GHASH_KEY_LEN (16) + +void gcm_init_p8(u128 htable[16], const u64 Xi[2]); +void gcm_gmult_p8(u64 Xi[2], const u128 htable[16]); +void gcm_ghash_p8(u64 Xi[2], const u128 htable[16], + const u8 *in,size_t len); + +struct p8_ghash_ctx { + u128 htable[16]; + struct crypto_shash *fallback; +}; + +struct p8_ghash_desc_ctx { + u64 shash[2]; + u8 buffer[GHASH_DIGEST_SIZE]; + int bytes; + struct shash_desc fallback_desc; +}; + +static int p8_ghash_init_tfm(struct crypto_tfm *tfm) +{ + const char *alg; + struct crypto_shash *fallback; + struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm); + struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm); + + if (!(alg = crypto_tfm_alg_name(tfm))) { + printk(KERN_ERR "Failed to get algorithm name.\n"); + return -ENOENT; + } + + fallback = crypto_alloc_shash(alg, 0 ,CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback)) { + printk(KERN_ERR "Failed to allocate transformation for '%s': %ld\n", + alg, PTR_ERR(fallback)); + return PTR_ERR(fallback); + } + printk(KERN_INFO "Using '%s' as fallback implementation.\n", + crypto_tfm_alg_driver_name(crypto_shash_tfm(fallback))); + + crypto_shash_set_flags(fallback, + crypto_shash_get_flags((struct crypto_shash *) tfm)); + ctx->fallback = fallback; + + shash_tfm->descsize = sizeof(struct p8_ghash_desc_ctx) + + crypto_shash_descsize(fallback); + + return 0; +} + +static void p8_ghash_exit_tfm(struct crypto_tfm *tfm) +{ + struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm); + + if (ctx->fallback) { + crypto_free_shash(ctx->fallback); + ctx->fallback = NULL; + } +} + +static int p8_ghash_init(struct shash_desc *desc) +{ + struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); + struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); + + dctx->bytes = 0; + memset(dctx->shash, 0, GHASH_DIGEST_SIZE); + dctx->fallback_desc.tfm = ctx->fallback; + dctx->fallback_desc.flags = desc->flags; + return crypto_shash_init(&dctx->fallback_desc); +} + +static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key, + unsigned int keylen) +{ + struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(tfm)); + + if (keylen != GHASH_KEY_LEN) + return -EINVAL; + + pagefault_disable(); + enable_kernel_altivec(); + enable_kernel_fp(); + gcm_init_p8(ctx->htable, (const u64 *) key); + pagefault_enable(); + return crypto_shash_setkey(ctx->fallback, key, keylen); +} + +static int p8_ghash_update(struct shash_desc *desc, + const u8 *src, unsigned int srclen) +{ + unsigned int len; + struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); + struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); + + if (IN_INTERRUPT) { + return crypto_shash_update(&dctx->fallback_desc, src, srclen); + } else { + if (dctx->bytes) { + if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) { + memcpy(dctx->buffer + dctx->bytes, src, srclen); + dctx->bytes += srclen; + return 0; + } + memcpy(dctx->buffer + dctx->bytes, src, + GHASH_DIGEST_SIZE - dctx->bytes); + pagefault_disable(); + enable_kernel_altivec(); + enable_kernel_fp(); + gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer, + GHASH_DIGEST_SIZE); + pagefault_enable(); + src += GHASH_DIGEST_SIZE - dctx->bytes; + srclen -= GHASH_DIGEST_SIZE - dctx->bytes; + dctx->bytes = 0; + } + len = srclen & ~(GHASH_DIGEST_SIZE - 1); + if (len) { + pagefault_disable(); + enable_kernel_altivec(); + enable_kernel_fp(); + gcm_ghash_p8(dctx->shash, ctx->htable, src, len); + pagefault_enable(); + src += len; + srclen -= len; + } + if (srclen) { + memcpy(dctx->buffer, src, srclen); + dctx->bytes = srclen; + } + return 0; + } +} + +static int p8_ghash_final(struct shash_desc *desc, u8 *out) +{ + int i; + struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); + struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); + + if (IN_INTERRUPT) { + return crypto_shash_final(&dctx->fallback_desc, out); + } else { + if (dctx->bytes) { + for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++) + dctx->buffer[i] = 0; + pagefault_disable(); + enable_kernel_altivec(); + enable_kernel_fp(); + gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer, + GHASH_DIGEST_SIZE); + pagefault_enable(); + dctx->bytes = 0; + } + memcpy(out, dctx->shash, GHASH_DIGEST_SIZE); + return 0; + } +} + +struct shash_alg p8_ghash_alg = { + .digestsize = GHASH_DIGEST_SIZE, + .init = p8_ghash_init, + .update = p8_ghash_update, + .final = p8_ghash_final, + .setkey = p8_ghash_setkey, + .descsize = sizeof(struct p8_ghash_desc_ctx), + .base = { + .cra_name = "ghash", + .cra_driver_name = "p8_ghash", + .cra_priority = 1000, + .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = GHASH_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct p8_ghash_ctx), + .cra_module = THIS_MODULE, + .cra_init = p8_ghash_init_tfm, + .cra_exit = p8_ghash_exit_tfm, + }, +}; diff --git a/drivers/crypto/vmx/ghashp8-ppc.pl b/drivers/crypto/vmx/ghashp8-ppc.pl new file mode 100644 index 000000000..0a6f89983 --- /dev/null +++ b/drivers/crypto/vmx/ghashp8-ppc.pl @@ -0,0 +1,228 @@ +#!/usr/bin/env perl +# +# ==================================================================== +# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== +# +# GHASH for for PowerISA v2.07. +# +# July 2014 +# +# Accurate performance measurements are problematic, because it's +# always virtualized setup with possibly throttled processor. +# Relative comparison is therefore more informative. This initial +# version is ~2.1x slower than hardware-assisted AES-128-CTR, ~12x +# faster than "4-bit" integer-only compiler-generated 64-bit code. +# "Initial version" means that there is room for futher improvement. + +$flavour=shift; +$output =shift; + +if ($flavour =~ /64/) { + $SIZE_T=8; + $LRSAVE=2*$SIZE_T; + $STU="stdu"; + $POP="ld"; + $PUSH="std"; +} elsif ($flavour =~ /32/) { + $SIZE_T=4; + $LRSAVE=$SIZE_T; + $STU="stwu"; + $POP="lwz"; + $PUSH="stw"; +} else { die "nonsense $flavour"; } + +$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; +( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or +( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or +die "can't locate ppc-xlate.pl"; + +open STDOUT,"| $^X $xlate $flavour $output" || die "can't call $xlate: $!"; + +my ($Xip,$Htbl,$inp,$len)=map("r$_",(3..6)); # argument block + +my ($Xl,$Xm,$Xh,$IN)=map("v$_",(0..3)); +my ($zero,$t0,$t1,$t2,$xC2,$H,$Hh,$Hl,$lemask)=map("v$_",(4..12)); +my $vrsave="r12"; + +$code=<<___; +.machine "any" + +.text + +.globl .gcm_init_p8 + lis r0,0xfff0 + li r8,0x10 + mfspr $vrsave,256 + li r9,0x20 + mtspr 256,r0 + li r10,0x30 + lvx_u $H,0,r4 # load H + + vspltisb $xC2,-16 # 0xf0 + vspltisb $t0,1 # one + vaddubm $xC2,$xC2,$xC2 # 0xe0 + vxor $zero,$zero,$zero + vor $xC2,$xC2,$t0 # 0xe1 + vsldoi $xC2,$xC2,$zero,15 # 0xe1... + vsldoi $t1,$zero,$t0,1 # ...1 + vaddubm $xC2,$xC2,$xC2 # 0xc2... + vspltisb $t2,7 + vor $xC2,$xC2,$t1 # 0xc2....01 + vspltb $t1,$H,0 # most significant byte + vsl $H,$H,$t0 # H<<=1 + vsrab $t1,$t1,$t2 # broadcast carry bit + vand $t1,$t1,$xC2 + vxor $H,$H,$t1 # twisted H + + vsldoi $H,$H,$H,8 # twist even more ... + vsldoi $xC2,$zero,$xC2,8 # 0xc2.0 + vsldoi $Hl,$zero,$H,8 # ... and split + vsldoi $Hh,$H,$zero,8 + + stvx_u $xC2,0,r3 # save pre-computed table + stvx_u $Hl,r8,r3 + stvx_u $H, r9,r3 + stvx_u $Hh,r10,r3 + + mtspr 256,$vrsave + blr + .long 0 + .byte 0,12,0x14,0,0,0,2,0 + .long 0 +.size .gcm_init_p8,.-.gcm_init_p8 + +.globl .gcm_gmult_p8 + lis r0,0xfff8 + li r8,0x10 + mfspr $vrsave,256 + li r9,0x20 + mtspr 256,r0 + li r10,0x30 + lvx_u $IN,0,$Xip # load Xi + + lvx_u $Hl,r8,$Htbl # load pre-computed table + le?lvsl $lemask,r0,r0 + lvx_u $H, r9,$Htbl + le?vspltisb $t0,0x07 + lvx_u $Hh,r10,$Htbl + le?vxor $lemask,$lemask,$t0 + lvx_u $xC2,0,$Htbl + le?vperm $IN,$IN,$IN,$lemask + vxor $zero,$zero,$zero + + vpmsumd $Xl,$IN,$Hl # H.lo·Xi.lo + vpmsumd $Xm,$IN,$H # H.hi·Xi.lo+H.lo·Xi.hi + vpmsumd $Xh,$IN,$Hh # H.hi·Xi.hi + + vpmsumd $t2,$Xl,$xC2 # 1st phase + + vsldoi $t0,$Xm,$zero,8 + vsldoi $t1,$zero,$Xm,8 + vxor $Xl,$Xl,$t0 + vxor $Xh,$Xh,$t1 + + vsldoi $Xl,$Xl,$Xl,8 + vxor $Xl,$Xl,$t2 + + vsldoi $t1,$Xl,$Xl,8 # 2nd phase + vpmsumd $Xl,$Xl,$xC2 + vxor $t1,$t1,$Xh + vxor $Xl,$Xl,$t1 + + le?vperm $Xl,$Xl,$Xl,$lemask + stvx_u $Xl,0,$Xip # write out Xi + + mtspr 256,$vrsave + blr + .long 0 + .byte 0,12,0x14,0,0,0,2,0 + .long 0 +.size .gcm_gmult_p8,.-.gcm_gmult_p8 + +.globl .gcm_ghash_p8 + lis r0,0xfff8 + li r8,0x10 + mfspr $vrsave,256 + li r9,0x20 + mtspr 256,r0 + li r10,0x30 + lvx_u $Xl,0,$Xip # load Xi + + lvx_u $Hl,r8,$Htbl # load pre-computed table + le?lvsl $lemask,r0,r0 + lvx_u $H, r9,$Htbl + le?vspltisb $t0,0x07 + lvx_u $Hh,r10,$Htbl + le?vxor $lemask,$lemask,$t0 + lvx_u $xC2,0,$Htbl + le?vperm $Xl,$Xl,$Xl,$lemask + vxor $zero,$zero,$zero + + lvx_u $IN,0,$inp + addi $inp,$inp,16 + subi $len,$len,16 + le?vperm $IN,$IN,$IN,$lemask + vxor $IN,$IN,$Xl + b Loop + +.align 5 +Loop: + subic $len,$len,16 + vpmsumd $Xl,$IN,$Hl # H.lo·Xi.lo + subfe. r0,r0,r0 # borrow?-1:0 + vpmsumd $Xm,$IN,$H # H.hi·Xi.lo+H.lo·Xi.hi + and r0,r0,$len + vpmsumd $Xh,$IN,$Hh # H.hi·Xi.hi + add $inp,$inp,r0 + + vpmsumd $t2,$Xl,$xC2 # 1st phase + + vsldoi $t0,$Xm,$zero,8 + vsldoi $t1,$zero,$Xm,8 + vxor $Xl,$Xl,$t0 + vxor $Xh,$Xh,$t1 + + vsldoi $Xl,$Xl,$Xl,8 + vxor $Xl,$Xl,$t2 + lvx_u $IN,0,$inp + addi $inp,$inp,16 + + vsldoi $t1,$Xl,$Xl,8 # 2nd phase + vpmsumd $Xl,$Xl,$xC2 + le?vperm $IN,$IN,$IN,$lemask + vxor $t1,$t1,$Xh + vxor $IN,$IN,$t1 + vxor $IN,$IN,$Xl + beq Loop # did $len-=16 borrow? + + vxor $Xl,$Xl,$t1 + le?vperm $Xl,$Xl,$Xl,$lemask + stvx_u $Xl,0,$Xip # write out Xi + + mtspr 256,$vrsave + blr + .long 0 + .byte 0,12,0x14,0,0,0,4,0 + .long 0 +.size .gcm_ghash_p8,.-.gcm_ghash_p8 + +.asciz "GHASH for PowerISA 2.07, CRYPTOGAMS by <appro\@openssl.org>" +.align 2 +___ + +foreach (split("\n",$code)) { + if ($flavour =~ /le$/o) { # little-endian + s/le\?//o or + s/be\?/#be#/o; + } else { + s/le\?/#le#/o or + s/be\?//o; + } + print $_,"\n"; +} + +close STDOUT; # enforce flush diff --git a/drivers/crypto/vmx/ppc-xlate.pl b/drivers/crypto/vmx/ppc-xlate.pl new file mode 100644 index 000000000..a59188494 --- /dev/null +++ b/drivers/crypto/vmx/ppc-xlate.pl @@ -0,0 +1,207 @@ +#!/usr/bin/env perl + +# PowerPC assembler distiller by <appro>. + +my $flavour = shift; +my $output = shift; +open STDOUT,">$output" || die "can't open $output: $!"; + +my %GLOBALS; +my $dotinlocallabels=($flavour=~/linux/)?1:0; + +################################################################ +# directives which need special treatment on different platforms +################################################################ +my $globl = sub { + my $junk = shift; + my $name = shift; + my $global = \$GLOBALS{$name}; + my $ret; + + $name =~ s|^[\.\_]||; + + SWITCH: for ($flavour) { + /aix/ && do { $name = ".$name"; + last; + }; + /osx/ && do { $name = "_$name"; + last; + }; + /linux/ + && do { $ret = "_GLOBAL($name)"; + last; + }; + } + + $ret = ".globl $name\nalign 5\n$name:" if (!$ret); + $$global = $name; + $ret; +}; +my $text = sub { + my $ret = ($flavour =~ /aix/) ? ".csect\t.text[PR],7" : ".text"; + $ret = ".abiversion 2\n".$ret if ($flavour =~ /linux.*64le/); + $ret; +}; +my $machine = sub { + my $junk = shift; + my $arch = shift; + if ($flavour =~ /osx/) + { $arch =~ s/\"//g; + $arch = ($flavour=~/64/) ? "ppc970-64" : "ppc970" if ($arch eq "any"); + } + ".machine $arch"; +}; +my $size = sub { + if ($flavour =~ /linux/) + { shift; + my $name = shift; $name =~ s|^[\.\_]||; + my $ret = ".size $name,.-".($flavour=~/64$/?".":"").$name; + $ret .= "\n.size .$name,.-.$name" if ($flavour=~/64$/); + $ret; + } + else + { ""; } +}; +my $asciz = sub { + shift; + my $line = join(",",@_); + if ($line =~ /^"(.*)"$/) + { ".byte " . join(",",unpack("C*",$1),0) . "\n.align 2"; } + else + { ""; } +}; +my $quad = sub { + shift; + my @ret; + my ($hi,$lo); + for (@_) { + if (/^0x([0-9a-f]*?)([0-9a-f]{1,8})$/io) + { $hi=$1?"0x$1":"0"; $lo="0x$2"; } + elsif (/^([0-9]+)$/o) + { $hi=$1>>32; $lo=$1&0xffffffff; } # error-prone with 32-bit perl + else + { $hi=undef; $lo=$_; } + + if (defined($hi)) + { push(@ret,$flavour=~/le$/o?".long\t$lo,$hi":".long\t$hi,$lo"); } + else + { push(@ret,".quad $lo"); } + } + join("\n",@ret); +}; + +################################################################ +# simplified mnemonics not handled by at least one assembler +################################################################ +my $cmplw = sub { + my $f = shift; + my $cr = 0; $cr = shift if ($#_>1); + # Some out-of-date 32-bit GNU assembler just can't handle cmplw... + ($flavour =~ /linux.*32/) ? + " .long ".sprintf "0x%x",31<<26|$cr<<23|$_[0]<<16|$_[1]<<11|64 : + " cmplw ".join(',',$cr,@_); +}; +my $bdnz = sub { + my $f = shift; + my $bo = $f=~/[\+\-]/ ? 16+9 : 16; # optional "to be taken" hint + " bc $bo,0,".shift; +} if ($flavour!~/linux/); +my $bltlr = sub { + my $f = shift; + my $bo = $f=~/\-/ ? 12+2 : 12; # optional "not to be taken" hint + ($flavour =~ /linux/) ? # GNU as doesn't allow most recent hints + " .long ".sprintf "0x%x",19<<26|$bo<<21|16<<1 : + " bclr $bo,0"; +}; +my $bnelr = sub { + my $f = shift; + my $bo = $f=~/\-/ ? 4+2 : 4; # optional "not to be taken" hint + ($flavour =~ /linux/) ? # GNU as doesn't allow most recent hints + " .long ".sprintf "0x%x",19<<26|$bo<<21|2<<16|16<<1 : + " bclr $bo,2"; +}; +my $beqlr = sub { + my $f = shift; + my $bo = $f=~/-/ ? 12+2 : 12; # optional "not to be taken" hint + ($flavour =~ /linux/) ? # GNU as doesn't allow most recent hints + " .long ".sprintf "0x%X",19<<26|$bo<<21|2<<16|16<<1 : + " bclr $bo,2"; +}; +# GNU assembler can't handle extrdi rA,rS,16,48, or when sum of last two +# arguments is 64, with "operand out of range" error. +my $extrdi = sub { + my ($f,$ra,$rs,$n,$b) = @_; + $b = ($b+$n)&63; $n = 64-$n; + " rldicl $ra,$rs,$b,$n"; +}; +my $vmr = sub { + my ($f,$vx,$vy) = @_; + " vor $vx,$vy,$vy"; +}; + +# PowerISA 2.06 stuff +sub vsxmem_op { + my ($f, $vrt, $ra, $rb, $op) = @_; + " .long ".sprintf "0x%X",(31<<26)|($vrt<<21)|($ra<<16)|($rb<<11)|($op*2+1); +} +# made-up unaligned memory reference AltiVec/VMX instructions +my $lvx_u = sub { vsxmem_op(@_, 844); }; # lxvd2x +my $stvx_u = sub { vsxmem_op(@_, 972); }; # stxvd2x +my $lvdx_u = sub { vsxmem_op(@_, 588); }; # lxsdx +my $stvdx_u = sub { vsxmem_op(@_, 716); }; # stxsdx +my $lvx_4w = sub { vsxmem_op(@_, 780); }; # lxvw4x +my $stvx_4w = sub { vsxmem_op(@_, 908); }; # stxvw4x + +# PowerISA 2.07 stuff +sub vcrypto_op { + my ($f, $vrt, $vra, $vrb, $op) = @_; + " .long ".sprintf "0x%X",(4<<26)|($vrt<<21)|($vra<<16)|($vrb<<11)|$op; +} +my $vcipher = sub { vcrypto_op(@_, 1288); }; +my $vcipherlast = sub { vcrypto_op(@_, 1289); }; +my $vncipher = sub { vcrypto_op(@_, 1352); }; +my $vncipherlast= sub { vcrypto_op(@_, 1353); }; +my $vsbox = sub { vcrypto_op(@_, 0, 1480); }; +my $vshasigmad = sub { my ($st,$six)=splice(@_,-2); vcrypto_op(@_, $st<<4|$six, 1730); }; +my $vshasigmaw = sub { my ($st,$six)=splice(@_,-2); vcrypto_op(@_, $st<<4|$six, 1666); }; +my $vpmsumb = sub { vcrypto_op(@_, 1032); }; +my $vpmsumd = sub { vcrypto_op(@_, 1224); }; +my $vpmsubh = sub { vcrypto_op(@_, 1096); }; +my $vpmsumw = sub { vcrypto_op(@_, 1160); }; +my $vaddudm = sub { vcrypto_op(@_, 192); }; + +my $mtsle = sub { + my ($f, $arg) = @_; + " .long ".sprintf "0x%X",(31<<26)|($arg<<21)|(147*2); +}; + +print "#include <asm/ppc_asm.h>\n" if $flavour =~ /linux/; + +while($line=<>) { + + $line =~ s|[#!;].*$||; # get rid of asm-style comments... + $line =~ s|/\*.*\*/||; # ... and C-style comments... + $line =~ s|^\s+||; # ... and skip white spaces in beginning... + $line =~ s|\s+$||; # ... and at the end + + { + $line =~ s|\b\.L(\w+)|L$1|g; # common denominator for Locallabel + $line =~ s|\bL(\w+)|\.L$1|g if ($dotinlocallabels); + } + + { + $line =~ s|^\s*(\.?)(\w+)([\.\+\-]?)\s*||; + my $c = $1; $c = "\t" if ($c eq ""); + my $mnemonic = $2; + my $f = $3; + my $opcode = eval("\$$mnemonic"); + $line =~ s/\b(c?[rf]|v|vs)([0-9]+)\b/$2/g if ($c ne "." and $flavour !~ /osx/); + if (ref($opcode) eq 'CODE') { $line = &$opcode($f,split(',',$line)); } + elsif ($mnemonic) { $line = $c.$mnemonic.$f."\t".$line; } + } + + print $line if ($line); + print "\n"; +} + +close STDOUT; diff --git a/drivers/crypto/vmx/vmx.c b/drivers/crypto/vmx/vmx.c new file mode 100644 index 000000000..44d8d5cfe --- /dev/null +++ b/drivers/crypto/vmx/vmx.c @@ -0,0 +1,88 @@ +/** + * Routines supporting VMX instructions on the Power 8 + * + * Copyright (C) 2015 International Business Machines Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 only. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com> + */ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/types.h> +#include <linux/err.h> +#include <linux/crypto.h> +#include <asm/cputable.h> +#include <crypto/internal/hash.h> + +extern struct shash_alg p8_ghash_alg; +extern struct crypto_alg p8_aes_alg; +extern struct crypto_alg p8_aes_cbc_alg; +extern struct crypto_alg p8_aes_ctr_alg; +static struct crypto_alg *algs[] = { + &p8_aes_alg, + &p8_aes_cbc_alg, + &p8_aes_ctr_alg, + NULL, +}; + +int __init p8_init(void) +{ + int ret = 0; + struct crypto_alg **alg_it; + + if (!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_VEC_CRYPTO)) + return -ENODEV; + + for (alg_it = algs; *alg_it; alg_it++) { + ret = crypto_register_alg(*alg_it); + printk(KERN_INFO "crypto_register_alg '%s' = %d\n", + (*alg_it)->cra_name, ret); + if (ret) { + for (alg_it--; alg_it >= algs; alg_it--) + crypto_unregister_alg(*alg_it); + break; + } + } + if (ret) + return ret; + + ret = crypto_register_shash(&p8_ghash_alg); + if (ret) { + for (alg_it = algs; *alg_it; alg_it++) + crypto_unregister_alg(*alg_it); + } + return ret; +} + +void __exit p8_exit(void) +{ + struct crypto_alg **alg_it; + + for (alg_it = algs; *alg_it; alg_it++) { + printk(KERN_INFO "Removing '%s'\n", (*alg_it)->cra_name); + crypto_unregister_alg(*alg_it); + } + crypto_unregister_shash(&p8_ghash_alg); +} + +module_init(p8_init); +module_exit(p8_exit); + +MODULE_AUTHOR("Marcelo Cerri<mhcerri@br.ibm.com>"); +MODULE_DESCRIPTION("IBM VMX cryptogaphic acceleration instructions support on Power 8"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("1.0.0"); + |