diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-08-05 17:04:01 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-08-05 17:04:01 -0300 |
commit | 57f0f512b273f60d52568b8c6b77e17f5636edc0 (patch) | |
tree | 5e910f0e82173f4ef4f51111366a3f1299037a7b /arch/mips/cavium-octeon/crypto |
Initial import
Diffstat (limited to 'arch/mips/cavium-octeon/crypto')
-rw-r--r-- | arch/mips/cavium-octeon/crypto/Makefile | 10 | ||||
-rw-r--r-- | arch/mips/cavium-octeon/crypto/octeon-crypto.c | 68 | ||||
-rw-r--r-- | arch/mips/cavium-octeon/crypto/octeon-crypto.h | 224 | ||||
-rw-r--r-- | arch/mips/cavium-octeon/crypto/octeon-md5.c | 208 | ||||
-rw-r--r-- | arch/mips/cavium-octeon/crypto/octeon-sha1.c | 241 | ||||
-rw-r--r-- | arch/mips/cavium-octeon/crypto/octeon-sha256.c | 280 | ||||
-rw-r--r-- | arch/mips/cavium-octeon/crypto/octeon-sha512.c | 277 |
7 files changed, 1308 insertions, 0 deletions
diff --git a/arch/mips/cavium-octeon/crypto/Makefile b/arch/mips/cavium-octeon/crypto/Makefile new file mode 100644 index 000000000..f7aa9d5d3 --- /dev/null +++ b/arch/mips/cavium-octeon/crypto/Makefile @@ -0,0 +1,10 @@ +# +# OCTEON-specific crypto modules. +# + +obj-y += octeon-crypto.o + +obj-$(CONFIG_CRYPTO_MD5_OCTEON) += octeon-md5.o +obj-$(CONFIG_CRYPTO_SHA1_OCTEON) += octeon-sha1.o +obj-$(CONFIG_CRYPTO_SHA256_OCTEON) += octeon-sha256.o +obj-$(CONFIG_CRYPTO_SHA512_OCTEON) += octeon-sha512.o diff --git a/arch/mips/cavium-octeon/crypto/octeon-crypto.c b/arch/mips/cavium-octeon/crypto/octeon-crypto.c new file mode 100644 index 000000000..f66bd1adc --- /dev/null +++ b/arch/mips/cavium-octeon/crypto/octeon-crypto.c @@ -0,0 +1,68 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2004-2012 Cavium Networks + */ + +#include <asm/cop2.h> +#include <linux/module.h> +#include <linux/interrupt.h> + +#include "octeon-crypto.h" + +/** + * Enable access to Octeon's COP2 crypto hardware for kernel use. Wrap any + * crypto operations in calls to octeon_crypto_enable/disable in order to make + * sure the state of COP2 isn't corrupted if userspace is also performing + * hardware crypto operations. Allocate the state parameter on the stack. + * Returns with preemption disabled. + * + * @state: Pointer to state structure to store current COP2 state in. + * + * Returns: Flags to be passed to octeon_crypto_disable() + */ +unsigned long octeon_crypto_enable(struct octeon_cop2_state *state) +{ + int status; + unsigned long flags; + + preempt_disable(); + local_irq_save(flags); + status = read_c0_status(); + write_c0_status(status | ST0_CU2); + if (KSTK_STATUS(current) & ST0_CU2) { + octeon_cop2_save(&(current->thread.cp2)); + KSTK_STATUS(current) &= ~ST0_CU2; + status &= ~ST0_CU2; + } else if (status & ST0_CU2) { + octeon_cop2_save(state); + } + local_irq_restore(flags); + return status & ST0_CU2; +} +EXPORT_SYMBOL_GPL(octeon_crypto_enable); + +/** + * Disable access to Octeon's COP2 crypto hardware in the kernel. This must be + * called after an octeon_crypto_enable() before any context switch or return to + * userspace. + * + * @state: Pointer to COP2 state to restore + * @flags: Return value from octeon_crypto_enable() + */ +void octeon_crypto_disable(struct octeon_cop2_state *state, + unsigned long crypto_flags) +{ + unsigned long flags; + + local_irq_save(flags); + if (crypto_flags & ST0_CU2) + octeon_cop2_restore(state); + else + write_c0_status(read_c0_status() & ~ST0_CU2); + local_irq_restore(flags); + preempt_enable(); +} +EXPORT_SYMBOL_GPL(octeon_crypto_disable); diff --git a/arch/mips/cavium-octeon/crypto/octeon-crypto.h b/arch/mips/cavium-octeon/crypto/octeon-crypto.h new file mode 100644 index 000000000..7315cc307 --- /dev/null +++ b/arch/mips/cavium-octeon/crypto/octeon-crypto.h @@ -0,0 +1,224 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2012-2013 Cavium Inc., All Rights Reserved. + * + * MD5/SHA1/SHA256/SHA512 instruction definitions added by + * Aaro Koskinen <aaro.koskinen@iki.fi>. + * + */ +#ifndef __LINUX_OCTEON_CRYPTO_H +#define __LINUX_OCTEON_CRYPTO_H + +#include <linux/sched.h> +#include <asm/mipsregs.h> + +#define OCTEON_CR_OPCODE_PRIORITY 300 + +extern unsigned long octeon_crypto_enable(struct octeon_cop2_state *state); +extern void octeon_crypto_disable(struct octeon_cop2_state *state, + unsigned long flags); + +/* + * Macros needed to implement MD5/SHA1/SHA256: + */ + +/* + * The index can be 0-1 (MD5) or 0-2 (SHA1), 0-3 (SHA256). + */ +#define write_octeon_64bit_hash_dword(value, index) \ +do { \ + __asm__ __volatile__ ( \ + "dmtc2 %[rt],0x0048+" STR(index) \ + : \ + : [rt] "d" (cpu_to_be64(value))); \ +} while (0) + +/* + * The index can be 0-1 (MD5) or 0-2 (SHA1), 0-3 (SHA256). + */ +#define read_octeon_64bit_hash_dword(index) \ +({ \ + u64 __value; \ + \ + __asm__ __volatile__ ( \ + "dmfc2 %[rt],0x0048+" STR(index) \ + : [rt] "=d" (__value) \ + : ); \ + \ + be64_to_cpu(__value); \ +}) + +/* + * The index can be 0-6. + */ +#define write_octeon_64bit_block_dword(value, index) \ +do { \ + __asm__ __volatile__ ( \ + "dmtc2 %[rt],0x0040+" STR(index) \ + : \ + : [rt] "d" (cpu_to_be64(value))); \ +} while (0) + +/* + * The value is the final block dword (64-bit). + */ +#define octeon_md5_start(value) \ +do { \ + __asm__ __volatile__ ( \ + "dmtc2 %[rt],0x4047" \ + : \ + : [rt] "d" (cpu_to_be64(value))); \ +} while (0) + +/* + * The value is the final block dword (64-bit). + */ +#define octeon_sha1_start(value) \ +do { \ + __asm__ __volatile__ ( \ + "dmtc2 %[rt],0x4057" \ + : \ + : [rt] "d" (value)); \ +} while (0) + +/* + * The value is the final block dword (64-bit). + */ +#define octeon_sha256_start(value) \ +do { \ + __asm__ __volatile__ ( \ + "dmtc2 %[rt],0x404f" \ + : \ + : [rt] "d" (value)); \ +} while (0) + +/* + * Macros needed to implement SHA512: + */ + +/* + * The index can be 0-7. + */ +#define write_octeon_64bit_hash_sha512(value, index) \ +do { \ + __asm__ __volatile__ ( \ + "dmtc2 %[rt],0x0250+" STR(index) \ + : \ + : [rt] "d" (value)); \ +} while (0) + +/* + * The index can be 0-7. + */ +#define read_octeon_64bit_hash_sha512(index) \ +({ \ + u64 __value; \ + \ + __asm__ __volatile__ ( \ + "dmfc2 %[rt],0x0250+" STR(index) \ + : [rt] "=d" (__value) \ + : ); \ + \ + __value; \ +}) + +/* + * The index can be 0-14. + */ +#define write_octeon_64bit_block_sha512(value, index) \ +do { \ + __asm__ __volatile__ ( \ + "dmtc2 %[rt],0x0240+" STR(index) \ + : \ + : [rt] "d" (value)); \ +} while (0) + +/* + * The value is the final block word (64-bit). + */ +#define octeon_sha512_start(value) \ +do { \ + __asm__ __volatile__ ( \ + "dmtc2 %[rt],0x424f" \ + : \ + : [rt] "d" (value)); \ +} while (0) + +/* + * The value is the final block dword (64-bit). + */ +#define octeon_sha1_start(value) \ +do { \ + __asm__ __volatile__ ( \ + "dmtc2 %[rt],0x4057" \ + : \ + : [rt] "d" (value)); \ +} while (0) + +/* + * The value is the final block dword (64-bit). + */ +#define octeon_sha256_start(value) \ +do { \ + __asm__ __volatile__ ( \ + "dmtc2 %[rt],0x404f" \ + : \ + : [rt] "d" (value)); \ +} while (0) + +/* + * Macros needed to implement SHA512: + */ + +/* + * The index can be 0-7. + */ +#define write_octeon_64bit_hash_sha512(value, index) \ +do { \ + __asm__ __volatile__ ( \ + "dmtc2 %[rt],0x0250+" STR(index) \ + : \ + : [rt] "d" (value)); \ +} while (0) + +/* + * The index can be 0-7. + */ +#define read_octeon_64bit_hash_sha512(index) \ +({ \ + u64 __value; \ + \ + __asm__ __volatile__ ( \ + "dmfc2 %[rt],0x0250+" STR(index) \ + : [rt] "=d" (__value) \ + : ); \ + \ + __value; \ +}) + +/* + * The index can be 0-14. + */ +#define write_octeon_64bit_block_sha512(value, index) \ +do { \ + __asm__ __volatile__ ( \ + "dmtc2 %[rt],0x0240+" STR(index) \ + : \ + : [rt] "d" (value)); \ +} while (0) + +/* + * The value is the final block word (64-bit). + */ +#define octeon_sha512_start(value) \ +do { \ + __asm__ __volatile__ ( \ + "dmtc2 %[rt],0x424f" \ + : \ + : [rt] "d" (value)); \ +} while (0) + +#endif /* __LINUX_OCTEON_CRYPTO_H */ diff --git a/arch/mips/cavium-octeon/crypto/octeon-md5.c b/arch/mips/cavium-octeon/crypto/octeon-md5.c new file mode 100644 index 000000000..12dccdb38 --- /dev/null +++ b/arch/mips/cavium-octeon/crypto/octeon-md5.c @@ -0,0 +1,208 @@ +/* + * Cryptographic API. + * + * MD5 Message Digest Algorithm (RFC1321). + * + * Adapted for OCTEON by Aaro Koskinen <aaro.koskinen@iki.fi>. + * + * Based on crypto/md5.c, which is: + * + * Derived from cryptoapi implementation, originally based on the + * public domain implementation written by Colin Plumb in 1993. + * + * Copyright (c) Cryptoapi developers. + * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include <crypto/md5.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/module.h> +#include <linux/string.h> +#include <asm/byteorder.h> +#include <linux/cryptohash.h> +#include <asm/octeon/octeon.h> +#include <crypto/internal/hash.h> + +#include "octeon-crypto.h" + +/* + * We pass everything as 64-bit. OCTEON can handle misaligned data. + */ + +static void octeon_md5_store_hash(struct md5_state *ctx) +{ + u64 *hash = (u64 *)ctx->hash; + + write_octeon_64bit_hash_dword(hash[0], 0); + write_octeon_64bit_hash_dword(hash[1], 1); +} + +static void octeon_md5_read_hash(struct md5_state *ctx) +{ + u64 *hash = (u64 *)ctx->hash; + + hash[0] = read_octeon_64bit_hash_dword(0); + hash[1] = read_octeon_64bit_hash_dword(1); +} + +static void octeon_md5_transform(const void *_block) +{ + const u64 *block = _block; + + write_octeon_64bit_block_dword(block[0], 0); + write_octeon_64bit_block_dword(block[1], 1); + write_octeon_64bit_block_dword(block[2], 2); + write_octeon_64bit_block_dword(block[3], 3); + write_octeon_64bit_block_dword(block[4], 4); + write_octeon_64bit_block_dword(block[5], 5); + write_octeon_64bit_block_dword(block[6], 6); + octeon_md5_start(block[7]); +} + +static int octeon_md5_init(struct shash_desc *desc) +{ + struct md5_state *mctx = shash_desc_ctx(desc); + + mctx->hash[0] = cpu_to_le32(0x67452301); + mctx->hash[1] = cpu_to_le32(0xefcdab89); + mctx->hash[2] = cpu_to_le32(0x98badcfe); + mctx->hash[3] = cpu_to_le32(0x10325476); + mctx->byte_count = 0; + + return 0; +} + +static int octeon_md5_update(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + struct md5_state *mctx = shash_desc_ctx(desc); + const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f); + struct octeon_cop2_state state; + unsigned long flags; + + mctx->byte_count += len; + + if (avail > len) { + memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), + data, len); + return 0; + } + + memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), data, + avail); + + flags = octeon_crypto_enable(&state); + octeon_md5_store_hash(mctx); + + octeon_md5_transform(mctx->block); + data += avail; + len -= avail; + + while (len >= sizeof(mctx->block)) { + octeon_md5_transform(data); + data += sizeof(mctx->block); + len -= sizeof(mctx->block); + } + + octeon_md5_read_hash(mctx); + octeon_crypto_disable(&state, flags); + + memcpy(mctx->block, data, len); + + return 0; +} + +static int octeon_md5_final(struct shash_desc *desc, u8 *out) +{ + struct md5_state *mctx = shash_desc_ctx(desc); + const unsigned int offset = mctx->byte_count & 0x3f; + char *p = (char *)mctx->block + offset; + int padding = 56 - (offset + 1); + struct octeon_cop2_state state; + unsigned long flags; + + *p++ = 0x80; + + flags = octeon_crypto_enable(&state); + octeon_md5_store_hash(mctx); + + if (padding < 0) { + memset(p, 0x00, padding + sizeof(u64)); + octeon_md5_transform(mctx->block); + p = (char *)mctx->block; + padding = 56; + } + + memset(p, 0, padding); + mctx->block[14] = cpu_to_le32(mctx->byte_count << 3); + mctx->block[15] = cpu_to_le32(mctx->byte_count >> 29); + octeon_md5_transform(mctx->block); + + octeon_md5_read_hash(mctx); + octeon_crypto_disable(&state, flags); + + memcpy(out, mctx->hash, sizeof(mctx->hash)); + memset(mctx, 0, sizeof(*mctx)); + + return 0; +} + +static int octeon_md5_export(struct shash_desc *desc, void *out) +{ + struct md5_state *ctx = shash_desc_ctx(desc); + + memcpy(out, ctx, sizeof(*ctx)); + return 0; +} + +static int octeon_md5_import(struct shash_desc *desc, const void *in) +{ + struct md5_state *ctx = shash_desc_ctx(desc); + + memcpy(ctx, in, sizeof(*ctx)); + return 0; +} + +static struct shash_alg alg = { + .digestsize = MD5_DIGEST_SIZE, + .init = octeon_md5_init, + .update = octeon_md5_update, + .final = octeon_md5_final, + .export = octeon_md5_export, + .import = octeon_md5_import, + .descsize = sizeof(struct md5_state), + .statesize = sizeof(struct md5_state), + .base = { + .cra_name = "md5", + .cra_driver_name= "octeon-md5", + .cra_priority = OCTEON_CR_OPCODE_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = MD5_HMAC_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +}; + +static int __init md5_mod_init(void) +{ + if (!octeon_has_crypto()) + return -ENOTSUPP; + return crypto_register_shash(&alg); +} + +static void __exit md5_mod_fini(void) +{ + crypto_unregister_shash(&alg); +} + +module_init(md5_mod_init); +module_exit(md5_mod_fini); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("MD5 Message Digest Algorithm (OCTEON)"); +MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>"); diff --git a/arch/mips/cavium-octeon/crypto/octeon-sha1.c b/arch/mips/cavium-octeon/crypto/octeon-sha1.c new file mode 100644 index 000000000..2b74b5b67 --- /dev/null +++ b/arch/mips/cavium-octeon/crypto/octeon-sha1.c @@ -0,0 +1,241 @@ +/* + * Cryptographic API. + * + * SHA1 Secure Hash Algorithm. + * + * Adapted for OCTEON by Aaro Koskinen <aaro.koskinen@iki.fi>. + * + * Based on crypto/sha1_generic.c, which is: + * + * Copyright (c) Alan Smithee. + * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> + * Copyright (c) Jean-Francois Dive <jef@linuxbe.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include <linux/mm.h> +#include <crypto/sha.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/module.h> +#include <asm/byteorder.h> +#include <asm/octeon/octeon.h> +#include <crypto/internal/hash.h> + +#include "octeon-crypto.h" + +/* + * We pass everything as 64-bit. OCTEON can handle misaligned data. + */ + +static void octeon_sha1_store_hash(struct sha1_state *sctx) +{ + u64 *hash = (u64 *)sctx->state; + union { + u32 word[2]; + u64 dword; + } hash_tail = { { sctx->state[4], } }; + + write_octeon_64bit_hash_dword(hash[0], 0); + write_octeon_64bit_hash_dword(hash[1], 1); + write_octeon_64bit_hash_dword(hash_tail.dword, 2); + memzero_explicit(&hash_tail.word[0], sizeof(hash_tail.word[0])); +} + +static void octeon_sha1_read_hash(struct sha1_state *sctx) +{ + u64 *hash = (u64 *)sctx->state; + union { + u32 word[2]; + u64 dword; + } hash_tail; + + hash[0] = read_octeon_64bit_hash_dword(0); + hash[1] = read_octeon_64bit_hash_dword(1); + hash_tail.dword = read_octeon_64bit_hash_dword(2); + sctx->state[4] = hash_tail.word[0]; + memzero_explicit(&hash_tail.dword, sizeof(hash_tail.dword)); +} + +static void octeon_sha1_transform(const void *_block) +{ + const u64 *block = _block; + + write_octeon_64bit_block_dword(block[0], 0); + write_octeon_64bit_block_dword(block[1], 1); + write_octeon_64bit_block_dword(block[2], 2); + write_octeon_64bit_block_dword(block[3], 3); + write_octeon_64bit_block_dword(block[4], 4); + write_octeon_64bit_block_dword(block[5], 5); + write_octeon_64bit_block_dword(block[6], 6); + octeon_sha1_start(block[7]); +} + +static int octeon_sha1_init(struct shash_desc *desc) +{ + struct sha1_state *sctx = shash_desc_ctx(desc); + + sctx->state[0] = SHA1_H0; + sctx->state[1] = SHA1_H1; + sctx->state[2] = SHA1_H2; + sctx->state[3] = SHA1_H3; + sctx->state[4] = SHA1_H4; + sctx->count = 0; + + return 0; +} + +static void __octeon_sha1_update(struct sha1_state *sctx, const u8 *data, + unsigned int len) +{ + unsigned int partial; + unsigned int done; + const u8 *src; + + partial = sctx->count % SHA1_BLOCK_SIZE; + sctx->count += len; + done = 0; + src = data; + + if ((partial + len) >= SHA1_BLOCK_SIZE) { + if (partial) { + done = -partial; + memcpy(sctx->buffer + partial, data, + done + SHA1_BLOCK_SIZE); + src = sctx->buffer; + } + + do { + octeon_sha1_transform(src); + done += SHA1_BLOCK_SIZE; + src = data + done; + } while (done + SHA1_BLOCK_SIZE <= len); + + partial = 0; + } + memcpy(sctx->buffer + partial, src, len - done); +} + +static int octeon_sha1_update(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + struct sha1_state *sctx = shash_desc_ctx(desc); + struct octeon_cop2_state state; + unsigned long flags; + + /* + * Small updates never reach the crypto engine, so the generic sha1 is + * faster because of the heavyweight octeon_crypto_enable() / + * octeon_crypto_disable(). + */ + if ((sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE) + return crypto_sha1_update(desc, data, len); + + flags = octeon_crypto_enable(&state); + octeon_sha1_store_hash(sctx); + + __octeon_sha1_update(sctx, data, len); + + octeon_sha1_read_hash(sctx); + octeon_crypto_disable(&state, flags); + + return 0; +} + +static int octeon_sha1_final(struct shash_desc *desc, u8 *out) +{ + struct sha1_state *sctx = shash_desc_ctx(desc); + static const u8 padding[64] = { 0x80, }; + struct octeon_cop2_state state; + __be32 *dst = (__be32 *)out; + unsigned int pad_len; + unsigned long flags; + unsigned int index; + __be64 bits; + int i; + + /* Save number of bits. */ + bits = cpu_to_be64(sctx->count << 3); + + /* Pad out to 56 mod 64. */ + index = sctx->count & 0x3f; + pad_len = (index < 56) ? (56 - index) : ((64+56) - index); + + flags = octeon_crypto_enable(&state); + octeon_sha1_store_hash(sctx); + + __octeon_sha1_update(sctx, padding, pad_len); + + /* Append length (before padding). */ + __octeon_sha1_update(sctx, (const u8 *)&bits, sizeof(bits)); + + octeon_sha1_read_hash(sctx); + octeon_crypto_disable(&state, flags); + + /* Store state in digest */ + for (i = 0; i < 5; i++) + dst[i] = cpu_to_be32(sctx->state[i]); + + /* Zeroize sensitive information. */ + memset(sctx, 0, sizeof(*sctx)); + + return 0; +} + +static int octeon_sha1_export(struct shash_desc *desc, void *out) +{ + struct sha1_state *sctx = shash_desc_ctx(desc); + + memcpy(out, sctx, sizeof(*sctx)); + return 0; +} + +static int octeon_sha1_import(struct shash_desc *desc, const void *in) +{ + struct sha1_state *sctx = shash_desc_ctx(desc); + + memcpy(sctx, in, sizeof(*sctx)); + return 0; +} + +static struct shash_alg octeon_sha1_alg = { + .digestsize = SHA1_DIGEST_SIZE, + .init = octeon_sha1_init, + .update = octeon_sha1_update, + .final = octeon_sha1_final, + .export = octeon_sha1_export, + .import = octeon_sha1_import, + .descsize = sizeof(struct sha1_state), + .statesize = sizeof(struct sha1_state), + .base = { + .cra_name = "sha1", + .cra_driver_name= "octeon-sha1", + .cra_priority = OCTEON_CR_OPCODE_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +}; + +static int __init octeon_sha1_mod_init(void) +{ + if (!octeon_has_crypto()) + return -ENOTSUPP; + return crypto_register_shash(&octeon_sha1_alg); +} + +static void __exit octeon_sha1_mod_fini(void) +{ + crypto_unregister_shash(&octeon_sha1_alg); +} + +module_init(octeon_sha1_mod_init); +module_exit(octeon_sha1_mod_fini); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm (OCTEON)"); +MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>"); diff --git a/arch/mips/cavium-octeon/crypto/octeon-sha256.c b/arch/mips/cavium-octeon/crypto/octeon-sha256.c new file mode 100644 index 000000000..97e96fead --- /dev/null +++ b/arch/mips/cavium-octeon/crypto/octeon-sha256.c @@ -0,0 +1,280 @@ +/* + * Cryptographic API. + * + * SHA-224 and SHA-256 Secure Hash Algorithm. + * + * Adapted for OCTEON by Aaro Koskinen <aaro.koskinen@iki.fi>. + * + * Based on crypto/sha256_generic.c, which is: + * + * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com> + * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> + * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> + * SHA224 Support Copyright 2007 Intel Corporation <jonathan.lynch@intel.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include <linux/mm.h> +#include <crypto/sha.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/module.h> +#include <asm/byteorder.h> +#include <asm/octeon/octeon.h> +#include <crypto/internal/hash.h> + +#include "octeon-crypto.h" + +/* + * We pass everything as 64-bit. OCTEON can handle misaligned data. + */ + +static void octeon_sha256_store_hash(struct sha256_state *sctx) +{ + u64 *hash = (u64 *)sctx->state; + + write_octeon_64bit_hash_dword(hash[0], 0); + write_octeon_64bit_hash_dword(hash[1], 1); + write_octeon_64bit_hash_dword(hash[2], 2); + write_octeon_64bit_hash_dword(hash[3], 3); +} + +static void octeon_sha256_read_hash(struct sha256_state *sctx) +{ + u64 *hash = (u64 *)sctx->state; + + hash[0] = read_octeon_64bit_hash_dword(0); + hash[1] = read_octeon_64bit_hash_dword(1); + hash[2] = read_octeon_64bit_hash_dword(2); + hash[3] = read_octeon_64bit_hash_dword(3); +} + +static void octeon_sha256_transform(const void *_block) +{ + const u64 *block = _block; + + write_octeon_64bit_block_dword(block[0], 0); + write_octeon_64bit_block_dword(block[1], 1); + write_octeon_64bit_block_dword(block[2], 2); + write_octeon_64bit_block_dword(block[3], 3); + write_octeon_64bit_block_dword(block[4], 4); + write_octeon_64bit_block_dword(block[5], 5); + write_octeon_64bit_block_dword(block[6], 6); + octeon_sha256_start(block[7]); +} + +static int octeon_sha224_init(struct shash_desc *desc) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + + sctx->state[0] = SHA224_H0; + sctx->state[1] = SHA224_H1; + sctx->state[2] = SHA224_H2; + sctx->state[3] = SHA224_H3; + sctx->state[4] = SHA224_H4; + sctx->state[5] = SHA224_H5; + sctx->state[6] = SHA224_H6; + sctx->state[7] = SHA224_H7; + sctx->count = 0; + + return 0; +} + +static int octeon_sha256_init(struct shash_desc *desc) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + + sctx->state[0] = SHA256_H0; + sctx->state[1] = SHA256_H1; + sctx->state[2] = SHA256_H2; + sctx->state[3] = SHA256_H3; + sctx->state[4] = SHA256_H4; + sctx->state[5] = SHA256_H5; + sctx->state[6] = SHA256_H6; + sctx->state[7] = SHA256_H7; + sctx->count = 0; + + return 0; +} + +static void __octeon_sha256_update(struct sha256_state *sctx, const u8 *data, + unsigned int len) +{ + unsigned int partial; + unsigned int done; + const u8 *src; + + partial = sctx->count % SHA256_BLOCK_SIZE; + sctx->count += len; + done = 0; + src = data; + + if ((partial + len) >= SHA256_BLOCK_SIZE) { + if (partial) { + done = -partial; + memcpy(sctx->buf + partial, data, + done + SHA256_BLOCK_SIZE); + src = sctx->buf; + } + + do { + octeon_sha256_transform(src); + done += SHA256_BLOCK_SIZE; + src = data + done; + } while (done + SHA256_BLOCK_SIZE <= len); + + partial = 0; + } + memcpy(sctx->buf + partial, src, len - done); +} + +static int octeon_sha256_update(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + struct octeon_cop2_state state; + unsigned long flags; + + /* + * Small updates never reach the crypto engine, so the generic sha256 is + * faster because of the heavyweight octeon_crypto_enable() / + * octeon_crypto_disable(). + */ + if ((sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE) + return crypto_sha256_update(desc, data, len); + + flags = octeon_crypto_enable(&state); + octeon_sha256_store_hash(sctx); + + __octeon_sha256_update(sctx, data, len); + + octeon_sha256_read_hash(sctx); + octeon_crypto_disable(&state, flags); + + return 0; +} + +static int octeon_sha256_final(struct shash_desc *desc, u8 *out) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + static const u8 padding[64] = { 0x80, }; + struct octeon_cop2_state state; + __be32 *dst = (__be32 *)out; + unsigned int pad_len; + unsigned long flags; + unsigned int index; + __be64 bits; + int i; + + /* Save number of bits. */ + bits = cpu_to_be64(sctx->count << 3); + + /* Pad out to 56 mod 64. */ + index = sctx->count & 0x3f; + pad_len = (index < 56) ? (56 - index) : ((64+56) - index); + + flags = octeon_crypto_enable(&state); + octeon_sha256_store_hash(sctx); + + __octeon_sha256_update(sctx, padding, pad_len); + + /* Append length (before padding). */ + __octeon_sha256_update(sctx, (const u8 *)&bits, sizeof(bits)); + + octeon_sha256_read_hash(sctx); + octeon_crypto_disable(&state, flags); + + /* Store state in digest */ + for (i = 0; i < 8; i++) + dst[i] = cpu_to_be32(sctx->state[i]); + + /* Zeroize sensitive information. */ + memset(sctx, 0, sizeof(*sctx)); + + return 0; +} + +static int octeon_sha224_final(struct shash_desc *desc, u8 *hash) +{ + u8 D[SHA256_DIGEST_SIZE]; + + octeon_sha256_final(desc, D); + + memcpy(hash, D, SHA224_DIGEST_SIZE); + memzero_explicit(D, SHA256_DIGEST_SIZE); + + return 0; +} + +static int octeon_sha256_export(struct shash_desc *desc, void *out) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + + memcpy(out, sctx, sizeof(*sctx)); + return 0; +} + +static int octeon_sha256_import(struct shash_desc *desc, const void *in) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + + memcpy(sctx, in, sizeof(*sctx)); + return 0; +} + +static struct shash_alg octeon_sha256_algs[2] = { { + .digestsize = SHA256_DIGEST_SIZE, + .init = octeon_sha256_init, + .update = octeon_sha256_update, + .final = octeon_sha256_final, + .export = octeon_sha256_export, + .import = octeon_sha256_import, + .descsize = sizeof(struct sha256_state), + .statesize = sizeof(struct sha256_state), + .base = { + .cra_name = "sha256", + .cra_driver_name= "octeon-sha256", + .cra_priority = OCTEON_CR_OPCODE_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +}, { + .digestsize = SHA224_DIGEST_SIZE, + .init = octeon_sha224_init, + .update = octeon_sha256_update, + .final = octeon_sha224_final, + .descsize = sizeof(struct sha256_state), + .base = { + .cra_name = "sha224", + .cra_driver_name= "octeon-sha224", + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA224_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +} }; + +static int __init octeon_sha256_mod_init(void) +{ + if (!octeon_has_crypto()) + return -ENOTSUPP; + return crypto_register_shashes(octeon_sha256_algs, + ARRAY_SIZE(octeon_sha256_algs)); +} + +static void __exit octeon_sha256_mod_fini(void) +{ + crypto_unregister_shashes(octeon_sha256_algs, + ARRAY_SIZE(octeon_sha256_algs)); +} + +module_init(octeon_sha256_mod_init); +module_exit(octeon_sha256_mod_fini); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm (OCTEON)"); +MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>"); diff --git a/arch/mips/cavium-octeon/crypto/octeon-sha512.c b/arch/mips/cavium-octeon/crypto/octeon-sha512.c new file mode 100644 index 000000000..d5fb3c6f2 --- /dev/null +++ b/arch/mips/cavium-octeon/crypto/octeon-sha512.c @@ -0,0 +1,277 @@ +/* + * Cryptographic API. + * + * SHA-512 and SHA-384 Secure Hash Algorithm. + * + * Adapted for OCTEON by Aaro Koskinen <aaro.koskinen@iki.fi>. + * + * Based on crypto/sha512_generic.c, which is: + * + * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com> + * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> + * Copyright (c) 2003 Kyle McMartin <kyle@debian.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + */ + +#include <linux/mm.h> +#include <crypto/sha.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/module.h> +#include <asm/byteorder.h> +#include <asm/octeon/octeon.h> +#include <crypto/internal/hash.h> + +#include "octeon-crypto.h" + +/* + * We pass everything as 64-bit. OCTEON can handle misaligned data. + */ + +static void octeon_sha512_store_hash(struct sha512_state *sctx) +{ + write_octeon_64bit_hash_sha512(sctx->state[0], 0); + write_octeon_64bit_hash_sha512(sctx->state[1], 1); + write_octeon_64bit_hash_sha512(sctx->state[2], 2); + write_octeon_64bit_hash_sha512(sctx->state[3], 3); + write_octeon_64bit_hash_sha512(sctx->state[4], 4); + write_octeon_64bit_hash_sha512(sctx->state[5], 5); + write_octeon_64bit_hash_sha512(sctx->state[6], 6); + write_octeon_64bit_hash_sha512(sctx->state[7], 7); +} + +static void octeon_sha512_read_hash(struct sha512_state *sctx) +{ + sctx->state[0] = read_octeon_64bit_hash_sha512(0); + sctx->state[1] = read_octeon_64bit_hash_sha512(1); + sctx->state[2] = read_octeon_64bit_hash_sha512(2); + sctx->state[3] = read_octeon_64bit_hash_sha512(3); + sctx->state[4] = read_octeon_64bit_hash_sha512(4); + sctx->state[5] = read_octeon_64bit_hash_sha512(5); + sctx->state[6] = read_octeon_64bit_hash_sha512(6); + sctx->state[7] = read_octeon_64bit_hash_sha512(7); +} + +static void octeon_sha512_transform(const void *_block) +{ + const u64 *block = _block; + + write_octeon_64bit_block_sha512(block[0], 0); + write_octeon_64bit_block_sha512(block[1], 1); + write_octeon_64bit_block_sha512(block[2], 2); + write_octeon_64bit_block_sha512(block[3], 3); + write_octeon_64bit_block_sha512(block[4], 4); + write_octeon_64bit_block_sha512(block[5], 5); + write_octeon_64bit_block_sha512(block[6], 6); + write_octeon_64bit_block_sha512(block[7], 7); + write_octeon_64bit_block_sha512(block[8], 8); + write_octeon_64bit_block_sha512(block[9], 9); + write_octeon_64bit_block_sha512(block[10], 10); + write_octeon_64bit_block_sha512(block[11], 11); + write_octeon_64bit_block_sha512(block[12], 12); + write_octeon_64bit_block_sha512(block[13], 13); + write_octeon_64bit_block_sha512(block[14], 14); + octeon_sha512_start(block[15]); +} + +static int octeon_sha512_init(struct shash_desc *desc) +{ + struct sha512_state *sctx = shash_desc_ctx(desc); + + sctx->state[0] = SHA512_H0; + sctx->state[1] = SHA512_H1; + sctx->state[2] = SHA512_H2; + sctx->state[3] = SHA512_H3; + sctx->state[4] = SHA512_H4; + sctx->state[5] = SHA512_H5; + sctx->state[6] = SHA512_H6; + sctx->state[7] = SHA512_H7; + sctx->count[0] = sctx->count[1] = 0; + + return 0; +} + +static int octeon_sha384_init(struct shash_desc *desc) +{ + struct sha512_state *sctx = shash_desc_ctx(desc); + + sctx->state[0] = SHA384_H0; + sctx->state[1] = SHA384_H1; + sctx->state[2] = SHA384_H2; + sctx->state[3] = SHA384_H3; + sctx->state[4] = SHA384_H4; + sctx->state[5] = SHA384_H5; + sctx->state[6] = SHA384_H6; + sctx->state[7] = SHA384_H7; + sctx->count[0] = sctx->count[1] = 0; + + return 0; +} + +static void __octeon_sha512_update(struct sha512_state *sctx, const u8 *data, + unsigned int len) +{ + unsigned int part_len; + unsigned int index; + unsigned int i; + + /* Compute number of bytes mod 128. */ + index = sctx->count[0] % SHA512_BLOCK_SIZE; + + /* Update number of bytes. */ + if ((sctx->count[0] += len) < len) + sctx->count[1]++; + + part_len = SHA512_BLOCK_SIZE - index; + + /* Transform as many times as possible. */ + if (len >= part_len) { + memcpy(&sctx->buf[index], data, part_len); + octeon_sha512_transform(sctx->buf); + + for (i = part_len; i + SHA512_BLOCK_SIZE <= len; + i += SHA512_BLOCK_SIZE) + octeon_sha512_transform(&data[i]); + + index = 0; + } else { + i = 0; + } + + /* Buffer remaining input. */ + memcpy(&sctx->buf[index], &data[i], len - i); +} + +static int octeon_sha512_update(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + struct sha512_state *sctx = shash_desc_ctx(desc); + struct octeon_cop2_state state; + unsigned long flags; + + /* + * Small updates never reach the crypto engine, so the generic sha512 is + * faster because of the heavyweight octeon_crypto_enable() / + * octeon_crypto_disable(). + */ + if ((sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE) + return crypto_sha512_update(desc, data, len); + + flags = octeon_crypto_enable(&state); + octeon_sha512_store_hash(sctx); + + __octeon_sha512_update(sctx, data, len); + + octeon_sha512_read_hash(sctx); + octeon_crypto_disable(&state, flags); + + return 0; +} + +static int octeon_sha512_final(struct shash_desc *desc, u8 *hash) +{ + struct sha512_state *sctx = shash_desc_ctx(desc); + static u8 padding[128] = { 0x80, }; + struct octeon_cop2_state state; + __be64 *dst = (__be64 *)hash; + unsigned int pad_len; + unsigned long flags; + unsigned int index; + __be64 bits[2]; + int i; + + /* Save number of bits. */ + bits[1] = cpu_to_be64(sctx->count[0] << 3); + bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61); + + /* Pad out to 112 mod 128. */ + index = sctx->count[0] & 0x7f; + pad_len = (index < 112) ? (112 - index) : ((128+112) - index); + + flags = octeon_crypto_enable(&state); + octeon_sha512_store_hash(sctx); + + __octeon_sha512_update(sctx, padding, pad_len); + + /* Append length (before padding). */ + __octeon_sha512_update(sctx, (const u8 *)bits, sizeof(bits)); + + octeon_sha512_read_hash(sctx); + octeon_crypto_disable(&state, flags); + + /* Store state in digest. */ + for (i = 0; i < 8; i++) + dst[i] = cpu_to_be64(sctx->state[i]); + + /* Zeroize sensitive information. */ + memset(sctx, 0, sizeof(struct sha512_state)); + + return 0; +} + +static int octeon_sha384_final(struct shash_desc *desc, u8 *hash) +{ + u8 D[64]; + + octeon_sha512_final(desc, D); + + memcpy(hash, D, 48); + memzero_explicit(D, 64); + + return 0; +} + +static struct shash_alg octeon_sha512_algs[2] = { { + .digestsize = SHA512_DIGEST_SIZE, + .init = octeon_sha512_init, + .update = octeon_sha512_update, + .final = octeon_sha512_final, + .descsize = sizeof(struct sha512_state), + .base = { + .cra_name = "sha512", + .cra_driver_name= "octeon-sha512", + .cra_priority = OCTEON_CR_OPCODE_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA512_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +}, { + .digestsize = SHA384_DIGEST_SIZE, + .init = octeon_sha384_init, + .update = octeon_sha512_update, + .final = octeon_sha384_final, + .descsize = sizeof(struct sha512_state), + .base = { + .cra_name = "sha384", + .cra_driver_name= "octeon-sha384", + .cra_priority = OCTEON_CR_OPCODE_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA384_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +} }; + +static int __init octeon_sha512_mod_init(void) +{ + if (!octeon_has_crypto()) + return -ENOTSUPP; + return crypto_register_shashes(octeon_sha512_algs, + ARRAY_SIZE(octeon_sha512_algs)); +} + +static void __exit octeon_sha512_mod_fini(void) +{ + crypto_unregister_shashes(octeon_sha512_algs, + ARRAY_SIZE(octeon_sha512_algs)); +} + +module_init(octeon_sha512_mod_init); +module_exit(octeon_sha512_mod_fini); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("SHA-512 and SHA-384 Secure Hash Algorithms (OCTEON)"); +MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>"); |