summaryrefslogtreecommitdiff
path: root/arch/s390/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/crypto')
-rw-r--r--arch/s390/crypto/Makefile11
-rw-r--r--arch/s390/crypto/aes_s390.c985
-rw-r--r--arch/s390/crypto/crypt_s390.h493
-rw-r--r--arch/s390/crypto/des_s390.c626
-rw-r--r--arch/s390/crypto/ghash_s390.c167
-rw-r--r--arch/s390/crypto/prng.c919
-rw-r--r--arch/s390/crypto/sha.h37
-rw-r--r--arch/s390/crypto/sha1_s390.c108
-rw-r--r--arch/s390/crypto/sha256_s390.c149
-rw-r--r--arch/s390/crypto/sha512_s390.c155
-rw-r--r--arch/s390/crypto/sha_common.c106
11 files changed, 3756 insertions, 0 deletions
diff --git a/arch/s390/crypto/Makefile b/arch/s390/crypto/Makefile
new file mode 100644
index 000000000..7f0b7cda6
--- /dev/null
+++ b/arch/s390/crypto/Makefile
@@ -0,0 +1,11 @@
+#
+# Cryptographic API
+#
+
+obj-$(CONFIG_CRYPTO_SHA1_S390) += sha1_s390.o sha_common.o
+obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o sha_common.o
+obj-$(CONFIG_CRYPTO_SHA512_S390) += sha512_s390.o sha_common.o
+obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o
+obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o
+obj-$(CONFIG_S390_PRNG) += prng.o
+obj-$(CONFIG_CRYPTO_GHASH_S390) += ghash_s390.o
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
new file mode 100644
index 000000000..5566ce80a
--- /dev/null
+++ b/arch/s390/crypto/aes_s390.c
@@ -0,0 +1,985 @@
+/*
+ * Cryptographic API.
+ *
+ * s390 implementation of the AES Cipher Algorithm.
+ *
+ * s390 Version:
+ * Copyright IBM Corp. 2005, 2007
+ * Author(s): Jan Glauber (jang@de.ibm.com)
+ * Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
+ *
+ * Derived from "crypto/aes_generic.c"
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#define KMSG_COMPONENT "aes_s390"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include "crypt_s390.h"
+
+#define AES_KEYLEN_128 1
+#define AES_KEYLEN_192 2
+#define AES_KEYLEN_256 4
+
+static u8 *ctrblk;
+static DEFINE_SPINLOCK(ctrblk_lock);
+static char keylen_flag;
+
+struct s390_aes_ctx {
+ u8 key[AES_MAX_KEY_SIZE];
+ long enc;
+ long dec;
+ int key_len;
+ union {
+ struct crypto_blkcipher *blk;
+ struct crypto_cipher *cip;
+ } fallback;
+};
+
+struct pcc_param {
+ u8 key[32];
+ u8 tweak[16];
+ u8 block[16];
+ u8 bit[16];
+ u8 xts[16];
+};
+
+struct s390_xts_ctx {
+ u8 key[32];
+ u8 pcc_key[32];
+ long enc;
+ long dec;
+ int key_len;
+ struct crypto_blkcipher *fallback;
+};
+
+/*
+ * Check if the key_len is supported by the HW.
+ * Returns 0 if it is, a positive number if it is not and software fallback is
+ * required or a negative number in case the key size is not valid
+ */
+static int need_fallback(unsigned int key_len)
+{
+ switch (key_len) {
+ case 16:
+ if (!(keylen_flag & AES_KEYLEN_128))
+ return 1;
+ break;
+ case 24:
+ if (!(keylen_flag & AES_KEYLEN_192))
+ return 1;
+ break;
+ case 32:
+ if (!(keylen_flag & AES_KEYLEN_256))
+ return 1;
+ break;
+ default:
+ return -1;
+ break;
+ }
+ return 0;
+}
+
+static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+ int ret;
+
+ sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+ sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
+ CRYPTO_TFM_REQ_MASK);
+
+ ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
+ if (ret) {
+ tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+ tfm->crt_flags |= (sctx->fallback.cip->base.crt_flags &
+ CRYPTO_TFM_RES_MASK);
+ }
+ return ret;
+}
+
+static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+ u32 *flags = &tfm->crt_flags;
+ int ret;
+
+ ret = need_fallback(key_len);
+ if (ret < 0) {
+ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+ }
+
+ sctx->key_len = key_len;
+ if (!ret) {
+ memcpy(sctx->key, in_key, key_len);
+ return 0;
+ }
+
+ return setkey_fallback_cip(tfm, in_key, key_len);
+}
+
+static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+
+ if (unlikely(need_fallback(sctx->key_len))) {
+ crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
+ return;
+ }
+
+ switch (sctx->key_len) {
+ case 16:
+ crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in,
+ AES_BLOCK_SIZE);
+ break;
+ case 24:
+ crypt_s390_km(KM_AES_192_ENCRYPT, &sctx->key, out, in,
+ AES_BLOCK_SIZE);
+ break;
+ case 32:
+ crypt_s390_km(KM_AES_256_ENCRYPT, &sctx->key, out, in,
+ AES_BLOCK_SIZE);
+ break;
+ }
+}
+
+static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+
+ if (unlikely(need_fallback(sctx->key_len))) {
+ crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
+ return;
+ }
+
+ switch (sctx->key_len) {
+ case 16:
+ crypt_s390_km(KM_AES_128_DECRYPT, &sctx->key, out, in,
+ AES_BLOCK_SIZE);
+ break;
+ case 24:
+ crypt_s390_km(KM_AES_192_DECRYPT, &sctx->key, out, in,
+ AES_BLOCK_SIZE);
+ break;
+ case 32:
+ crypt_s390_km(KM_AES_256_DECRYPT, &sctx->key, out, in,
+ AES_BLOCK_SIZE);
+ break;
+ }
+}
+
+static int fallback_init_cip(struct crypto_tfm *tfm)
+{
+ const char *name = tfm->__crt_alg->cra_name;
+ struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+
+ sctx->fallback.cip = crypto_alloc_cipher(name, 0,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+
+ if (IS_ERR(sctx->fallback.cip)) {
+ pr_err("Allocating AES fallback algorithm %s failed\n",
+ name);
+ return PTR_ERR(sctx->fallback.cip);
+ }
+
+ return 0;
+}
+
+static void fallback_exit_cip(struct crypto_tfm *tfm)
+{
+ struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_cipher(sctx->fallback.cip);
+ sctx->fallback.cip = NULL;
+}
+
+static struct crypto_alg aes_alg = {
+ .cra_name = "aes",
+ .cra_driver_name = "aes-s390",
+ .cra_priority = CRYPT_S390_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct s390_aes_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = fallback_init_cip,
+ .cra_exit = fallback_exit_cip,
+ .cra_u = {
+ .cipher = {
+ .cia_min_keysize = AES_MIN_KEY_SIZE,
+ .cia_max_keysize = AES_MAX_KEY_SIZE,
+ .cia_setkey = aes_set_key,
+ .cia_encrypt = aes_encrypt,
+ .cia_decrypt = aes_decrypt,
+ }
+ }
+};
+
+static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int len)
+{
+ struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+ unsigned int ret;
+
+ sctx->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+ sctx->fallback.blk->base.crt_flags |= (tfm->crt_flags &
+ CRYPTO_TFM_REQ_MASK);
+
+ ret = crypto_blkcipher_setkey(sctx->fallback.blk, key, len);
+ if (ret) {
+ tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+ tfm->crt_flags |= (sctx->fallback.blk->base.crt_flags &
+ CRYPTO_TFM_RES_MASK);
+ }
+ return ret;
+}
+
+static int fallback_blk_dec(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ unsigned int ret;
+ struct crypto_blkcipher *tfm;
+ struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+
+ tfm = desc->tfm;
+ desc->tfm = sctx->fallback.blk;
+
+ ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
+
+ desc->tfm = tfm;
+ return ret;
+}
+
+static int fallback_blk_enc(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ unsigned int ret;
+ struct crypto_blkcipher *tfm;
+ struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+
+ tfm = desc->tfm;
+ desc->tfm = sctx->fallback.blk;
+
+ ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
+
+ desc->tfm = tfm;
+ return ret;
+}
+
+static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+ int ret;
+
+ ret = need_fallback(key_len);
+ if (ret > 0) {
+ sctx->key_len = key_len;
+ return setkey_fallback_blk(tfm, in_key, key_len);
+ }
+
+ switch (key_len) {
+ case 16:
+ sctx->enc = KM_AES_128_ENCRYPT;
+ sctx->dec = KM_AES_128_DECRYPT;
+ break;
+ case 24:
+ sctx->enc = KM_AES_192_ENCRYPT;
+ sctx->dec = KM_AES_192_DECRYPT;
+ break;
+ case 32:
+ sctx->enc = KM_AES_256_ENCRYPT;
+ sctx->dec = KM_AES_256_DECRYPT;
+ break;
+ }
+
+ return aes_set_key(tfm, in_key, key_len);
+}
+
+static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
+ struct blkcipher_walk *walk)
+{
+ int ret = blkcipher_walk_virt(desc, walk);
+ unsigned int nbytes;
+
+ while ((nbytes = walk->nbytes)) {
+ /* only use complete blocks */
+ unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
+ u8 *out = walk->dst.virt.addr;
+ u8 *in = walk->src.virt.addr;
+
+ ret = crypt_s390_km(func, param, out, in, n);
+ if (ret < 0 || ret != n)
+ return -EIO;
+
+ nbytes &= AES_BLOCK_SIZE - 1;
+ ret = blkcipher_walk_done(desc, walk, nbytes);
+ }
+
+ return ret;
+}
+
+static int ecb_aes_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+
+ if (unlikely(need_fallback(sctx->key_len)))
+ return fallback_blk_enc(desc, dst, src, nbytes);
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ return ecb_aes_crypt(desc, sctx->enc, sctx->key, &walk);
+}
+
+static int ecb_aes_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+
+ if (unlikely(need_fallback(sctx->key_len)))
+ return fallback_blk_dec(desc, dst, src, nbytes);
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ return ecb_aes_crypt(desc, sctx->dec, sctx->key, &walk);
+}
+
+static int fallback_init_blk(struct crypto_tfm *tfm)
+{
+ const char *name = tfm->__crt_alg->cra_name;
+ struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+
+ sctx->fallback.blk = crypto_alloc_blkcipher(name, 0,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+
+ if (IS_ERR(sctx->fallback.blk)) {
+ pr_err("Allocating AES fallback algorithm %s failed\n",
+ name);
+ return PTR_ERR(sctx->fallback.blk);
+ }
+
+ return 0;
+}
+
+static void fallback_exit_blk(struct crypto_tfm *tfm)
+{
+ struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_blkcipher(sctx->fallback.blk);
+ sctx->fallback.blk = NULL;
+}
+
+static struct crypto_alg ecb_aes_alg = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-s390",
+ .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct s390_aes_ctx),
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = fallback_init_blk,
+ .cra_exit = fallback_exit_blk,
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = ecb_aes_set_key,
+ .encrypt = ecb_aes_encrypt,
+ .decrypt = ecb_aes_decrypt,
+ }
+ }
+};
+
+static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+ int ret;
+
+ ret = need_fallback(key_len);
+ if (ret > 0) {
+ sctx->key_len = key_len;
+ return setkey_fallback_blk(tfm, in_key, key_len);
+ }
+
+ switch (key_len) {
+ case 16:
+ sctx->enc = KMC_AES_128_ENCRYPT;
+ sctx->dec = KMC_AES_128_DECRYPT;
+ break;
+ case 24:
+ sctx->enc = KMC_AES_192_ENCRYPT;
+ sctx->dec = KMC_AES_192_DECRYPT;
+ break;
+ case 32:
+ sctx->enc = KMC_AES_256_ENCRYPT;
+ sctx->dec = KMC_AES_256_DECRYPT;
+ break;
+ }
+
+ return aes_set_key(tfm, in_key, key_len);
+}
+
+static int cbc_aes_crypt(struct blkcipher_desc *desc, long func,
+ struct blkcipher_walk *walk)
+{
+ struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+ int ret = blkcipher_walk_virt(desc, walk);
+ unsigned int nbytes = walk->nbytes;
+ struct {
+ u8 iv[AES_BLOCK_SIZE];
+ u8 key[AES_MAX_KEY_SIZE];
+ } param;
+
+ if (!nbytes)
+ goto out;
+
+ memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
+ memcpy(param.key, sctx->key, sctx->key_len);
+ do {
+ /* only use complete blocks */
+ unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
+ u8 *out = walk->dst.virt.addr;
+ u8 *in = walk->src.virt.addr;
+
+ ret = crypt_s390_kmc(func, &param, out, in, n);
+ if (ret < 0 || ret != n)
+ return -EIO;
+
+ nbytes &= AES_BLOCK_SIZE - 1;
+ ret = blkcipher_walk_done(desc, walk, nbytes);
+ } while ((nbytes = walk->nbytes));
+ memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
+
+out:
+ return ret;
+}
+
+static int cbc_aes_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+
+ if (unlikely(need_fallback(sctx->key_len)))
+ return fallback_blk_enc(desc, dst, src, nbytes);
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ return cbc_aes_crypt(desc, sctx->enc, &walk);
+}
+
+static int cbc_aes_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+
+ if (unlikely(need_fallback(sctx->key_len)))
+ return fallback_blk_dec(desc, dst, src, nbytes);
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ return cbc_aes_crypt(desc, sctx->dec, &walk);
+}
+
+static struct crypto_alg cbc_aes_alg = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-s390",
+ .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct s390_aes_ctx),
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = fallback_init_blk,
+ .cra_exit = fallback_exit_blk,
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = cbc_aes_set_key,
+ .encrypt = cbc_aes_encrypt,
+ .decrypt = cbc_aes_decrypt,
+ }
+ }
+};
+
+static int xts_fallback_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int len)
+{
+ struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
+ unsigned int ret;
+
+ xts_ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+ xts_ctx->fallback->base.crt_flags |= (tfm->crt_flags &
+ CRYPTO_TFM_REQ_MASK);
+
+ ret = crypto_blkcipher_setkey(xts_ctx->fallback, key, len);
+ if (ret) {
+ tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+ tfm->crt_flags |= (xts_ctx->fallback->base.crt_flags &
+ CRYPTO_TFM_RES_MASK);
+ }
+ return ret;
+}
+
+static int xts_fallback_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_blkcipher *tfm;
+ unsigned int ret;
+
+ tfm = desc->tfm;
+ desc->tfm = xts_ctx->fallback;
+
+ ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
+
+ desc->tfm = tfm;
+ return ret;
+}
+
+static int xts_fallback_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_blkcipher *tfm;
+ unsigned int ret;
+
+ tfm = desc->tfm;
+ desc->tfm = xts_ctx->fallback;
+
+ ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
+
+ desc->tfm = tfm;
+ return ret;
+}
+
+static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
+ u32 *flags = &tfm->crt_flags;
+
+ switch (key_len) {
+ case 32:
+ xts_ctx->enc = KM_XTS_128_ENCRYPT;
+ xts_ctx->dec = KM_XTS_128_DECRYPT;
+ memcpy(xts_ctx->key + 16, in_key, 16);
+ memcpy(xts_ctx->pcc_key + 16, in_key + 16, 16);
+ break;
+ case 48:
+ xts_ctx->enc = 0;
+ xts_ctx->dec = 0;
+ xts_fallback_setkey(tfm, in_key, key_len);
+ break;
+ case 64:
+ xts_ctx->enc = KM_XTS_256_ENCRYPT;
+ xts_ctx->dec = KM_XTS_256_DECRYPT;
+ memcpy(xts_ctx->key, in_key, 32);
+ memcpy(xts_ctx->pcc_key, in_key + 32, 32);
+ break;
+ default:
+ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+ }
+ xts_ctx->key_len = key_len;
+ return 0;
+}
+
+static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
+ struct s390_xts_ctx *xts_ctx,
+ struct blkcipher_walk *walk)
+{
+ unsigned int offset = (xts_ctx->key_len >> 1) & 0x10;
+ int ret = blkcipher_walk_virt(desc, walk);
+ unsigned int nbytes = walk->nbytes;
+ unsigned int n;
+ u8 *in, *out;
+ struct pcc_param pcc_param;
+ struct {
+ u8 key[32];
+ u8 init[16];
+ } xts_param;
+
+ if (!nbytes)
+ goto out;
+
+ memset(pcc_param.block, 0, sizeof(pcc_param.block));
+ memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
+ memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
+ memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
+ memcpy(pcc_param.key, xts_ctx->pcc_key, 32);
+ ret = crypt_s390_pcc(func, &pcc_param.key[offset]);
+ if (ret < 0)
+ return -EIO;
+
+ memcpy(xts_param.key, xts_ctx->key, 32);
+ memcpy(xts_param.init, pcc_param.xts, 16);
+ do {
+ /* only use complete blocks */
+ n = nbytes & ~(AES_BLOCK_SIZE - 1);
+ out = walk->dst.virt.addr;
+ in = walk->src.virt.addr;
+
+ ret = crypt_s390_km(func, &xts_param.key[offset], out, in, n);
+ if (ret < 0 || ret != n)
+ return -EIO;
+
+ nbytes &= AES_BLOCK_SIZE - 1;
+ ret = blkcipher_walk_done(desc, walk, nbytes);
+ } while ((nbytes = walk->nbytes));
+out:
+ return ret;
+}
+
+static int xts_aes_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+
+ if (unlikely(xts_ctx->key_len == 48))
+ return xts_fallback_encrypt(desc, dst, src, nbytes);
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ return xts_aes_crypt(desc, xts_ctx->enc, xts_ctx, &walk);
+}
+
+static int xts_aes_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+
+ if (unlikely(xts_ctx->key_len == 48))
+ return xts_fallback_decrypt(desc, dst, src, nbytes);
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ return xts_aes_crypt(desc, xts_ctx->dec, xts_ctx, &walk);
+}
+
+static int xts_fallback_init(struct crypto_tfm *tfm)
+{
+ const char *name = tfm->__crt_alg->cra_name;
+ struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
+
+ xts_ctx->fallback = crypto_alloc_blkcipher(name, 0,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+
+ if (IS_ERR(xts_ctx->fallback)) {
+ pr_err("Allocating XTS fallback algorithm %s failed\n",
+ name);
+ return PTR_ERR(xts_ctx->fallback);
+ }
+ return 0;
+}
+
+static void xts_fallback_exit(struct crypto_tfm *tfm)
+{
+ struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_blkcipher(xts_ctx->fallback);
+ xts_ctx->fallback = NULL;
+}
+
+static struct crypto_alg xts_aes_alg = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "xts-aes-s390",
+ .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct s390_xts_ctx),
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = xts_fallback_init,
+ .cra_exit = xts_fallback_exit,
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = xts_aes_set_key,
+ .encrypt = xts_aes_encrypt,
+ .decrypt = xts_aes_decrypt,
+ }
+ }
+};
+
+static int xts_aes_alg_reg;
+
+static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+
+ switch (key_len) {
+ case 16:
+ sctx->enc = KMCTR_AES_128_ENCRYPT;
+ sctx->dec = KMCTR_AES_128_DECRYPT;
+ break;
+ case 24:
+ sctx->enc = KMCTR_AES_192_ENCRYPT;
+ sctx->dec = KMCTR_AES_192_DECRYPT;
+ break;
+ case 32:
+ sctx->enc = KMCTR_AES_256_ENCRYPT;
+ sctx->dec = KMCTR_AES_256_DECRYPT;
+ break;
+ }
+
+ return aes_set_key(tfm, in_key, key_len);
+}
+
+static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
+{
+ unsigned int i, n;
+
+ /* only use complete blocks, max. PAGE_SIZE */
+ n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
+ for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
+ memcpy(ctrptr + i, ctrptr + i - AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE);
+ crypto_inc(ctrptr + i, AES_BLOCK_SIZE);
+ }
+ return n;
+}
+
+static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
+ struct s390_aes_ctx *sctx, struct blkcipher_walk *walk)
+{
+ int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
+ unsigned int n, nbytes;
+ u8 buf[AES_BLOCK_SIZE], ctrbuf[AES_BLOCK_SIZE];
+ u8 *out, *in, *ctrptr = ctrbuf;
+
+ if (!walk->nbytes)
+ return ret;
+
+ if (spin_trylock(&ctrblk_lock))
+ ctrptr = ctrblk;
+
+ memcpy(ctrptr, walk->iv, AES_BLOCK_SIZE);
+ while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
+ out = walk->dst.virt.addr;
+ in = walk->src.virt.addr;
+ while (nbytes >= AES_BLOCK_SIZE) {
+ if (ctrptr == ctrblk)
+ n = __ctrblk_init(ctrptr, nbytes);
+ else
+ n = AES_BLOCK_SIZE;
+ ret = crypt_s390_kmctr(func, sctx->key, out, in,
+ n, ctrptr);
+ if (ret < 0 || ret != n) {
+ if (ctrptr == ctrblk)
+ spin_unlock(&ctrblk_lock);
+ return -EIO;
+ }
+ if (n > AES_BLOCK_SIZE)
+ memcpy(ctrptr, ctrptr + n - AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE);
+ crypto_inc(ctrptr, AES_BLOCK_SIZE);
+ out += n;
+ in += n;
+ nbytes -= n;
+ }
+ ret = blkcipher_walk_done(desc, walk, nbytes);
+ }
+ if (ctrptr == ctrblk) {
+ if (nbytes)
+ memcpy(ctrbuf, ctrptr, AES_BLOCK_SIZE);
+ else
+ memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
+ spin_unlock(&ctrblk_lock);
+ } else {
+ if (!nbytes)
+ memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
+ }
+ /*
+ * final block may be < AES_BLOCK_SIZE, copy only nbytes
+ */
+ if (nbytes) {
+ out = walk->dst.virt.addr;
+ in = walk->src.virt.addr;
+ ret = crypt_s390_kmctr(func, sctx->key, buf, in,
+ AES_BLOCK_SIZE, ctrbuf);
+ if (ret < 0 || ret != AES_BLOCK_SIZE)
+ return -EIO;
+ memcpy(out, buf, nbytes);
+ crypto_inc(ctrbuf, AES_BLOCK_SIZE);
+ ret = blkcipher_walk_done(desc, walk, 0);
+ memcpy(walk->iv, ctrbuf, AES_BLOCK_SIZE);
+ }
+
+ return ret;
+}
+
+static int ctr_aes_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ return ctr_aes_crypt(desc, sctx->enc, sctx, &walk);
+}
+
+static int ctr_aes_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ return ctr_aes_crypt(desc, sctx->dec, sctx, &walk);
+}
+
+static struct crypto_alg ctr_aes_alg = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-s390",
+ .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct s390_aes_ctx),
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ctr_aes_set_key,
+ .encrypt = ctr_aes_encrypt,
+ .decrypt = ctr_aes_decrypt,
+ }
+ }
+};
+
+static int ctr_aes_alg_reg;
+
+static int __init aes_s390_init(void)
+{
+ int ret;
+
+ if (crypt_s390_func_available(KM_AES_128_ENCRYPT, CRYPT_S390_MSA))
+ keylen_flag |= AES_KEYLEN_128;
+ if (crypt_s390_func_available(KM_AES_192_ENCRYPT, CRYPT_S390_MSA))
+ keylen_flag |= AES_KEYLEN_192;
+ if (crypt_s390_func_available(KM_AES_256_ENCRYPT, CRYPT_S390_MSA))
+ keylen_flag |= AES_KEYLEN_256;
+
+ if (!keylen_flag)
+ return -EOPNOTSUPP;
+
+ /* z9 109 and z9 BC/EC only support 128 bit key length */
+ if (keylen_flag == AES_KEYLEN_128)
+ pr_info("AES hardware acceleration is only available for"
+ " 128-bit keys\n");
+
+ ret = crypto_register_alg(&aes_alg);
+ if (ret)
+ goto aes_err;
+
+ ret = crypto_register_alg(&ecb_aes_alg);
+ if (ret)
+ goto ecb_aes_err;
+
+ ret = crypto_register_alg(&cbc_aes_alg);
+ if (ret)
+ goto cbc_aes_err;
+
+ if (crypt_s390_func_available(KM_XTS_128_ENCRYPT,
+ CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
+ crypt_s390_func_available(KM_XTS_256_ENCRYPT,
+ CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
+ ret = crypto_register_alg(&xts_aes_alg);
+ if (ret)
+ goto xts_aes_err;
+ xts_aes_alg_reg = 1;
+ }
+
+ if (crypt_s390_func_available(KMCTR_AES_128_ENCRYPT,
+ CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
+ crypt_s390_func_available(KMCTR_AES_192_ENCRYPT,
+ CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
+ crypt_s390_func_available(KMCTR_AES_256_ENCRYPT,
+ CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
+ ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
+ if (!ctrblk) {
+ ret = -ENOMEM;
+ goto ctr_aes_err;
+ }
+ ret = crypto_register_alg(&ctr_aes_alg);
+ if (ret) {
+ free_page((unsigned long) ctrblk);
+ goto ctr_aes_err;
+ }
+ ctr_aes_alg_reg = 1;
+ }
+
+out:
+ return ret;
+
+ctr_aes_err:
+ crypto_unregister_alg(&xts_aes_alg);
+xts_aes_err:
+ crypto_unregister_alg(&cbc_aes_alg);
+cbc_aes_err:
+ crypto_unregister_alg(&ecb_aes_alg);
+ecb_aes_err:
+ crypto_unregister_alg(&aes_alg);
+aes_err:
+ goto out;
+}
+
+static void __exit aes_s390_fini(void)
+{
+ if (ctr_aes_alg_reg) {
+ crypto_unregister_alg(&ctr_aes_alg);
+ free_page((unsigned long) ctrblk);
+ }
+ if (xts_aes_alg_reg)
+ crypto_unregister_alg(&xts_aes_alg);
+ crypto_unregister_alg(&cbc_aes_alg);
+ crypto_unregister_alg(&ecb_aes_alg);
+ crypto_unregister_alg(&aes_alg);
+}
+
+module_init(aes_s390_init);
+module_exit(aes_s390_fini);
+
+MODULE_ALIAS_CRYPTO("aes-all");
+
+MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
+MODULE_LICENSE("GPL");
diff --git a/arch/s390/crypto/crypt_s390.h b/arch/s390/crypto/crypt_s390.h
new file mode 100644
index 000000000..d9c4c313f
--- /dev/null
+++ b/arch/s390/crypto/crypt_s390.h
@@ -0,0 +1,493 @@
+/*
+ * Cryptographic API.
+ *
+ * Support for s390 cryptographic instructions.
+ *
+ * Copyright IBM Corp. 2003, 2015
+ * Author(s): Thomas Spatzier
+ * Jan Glauber (jan.glauber@de.ibm.com)
+ * Harald Freudenberger (freude@de.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _CRYPTO_ARCH_S390_CRYPT_S390_H
+#define _CRYPTO_ARCH_S390_CRYPT_S390_H
+
+#include <asm/errno.h>
+#include <asm/facility.h>
+
+#define CRYPT_S390_OP_MASK 0xFF00
+#define CRYPT_S390_FUNC_MASK 0x00FF
+
+#define CRYPT_S390_PRIORITY 300
+#define CRYPT_S390_COMPOSITE_PRIORITY 400
+
+#define CRYPT_S390_MSA 0x1
+#define CRYPT_S390_MSA3 0x2
+#define CRYPT_S390_MSA4 0x4
+#define CRYPT_S390_MSA5 0x8
+
+/* s390 cryptographic operations */
+enum crypt_s390_operations {
+ CRYPT_S390_KM = 0x0100,
+ CRYPT_S390_KMC = 0x0200,
+ CRYPT_S390_KIMD = 0x0300,
+ CRYPT_S390_KLMD = 0x0400,
+ CRYPT_S390_KMAC = 0x0500,
+ CRYPT_S390_KMCTR = 0x0600,
+ CRYPT_S390_PPNO = 0x0700
+};
+
+/*
+ * function codes for KM (CIPHER MESSAGE) instruction
+ * 0x80 is the decipher modifier bit
+ */
+enum crypt_s390_km_func {
+ KM_QUERY = CRYPT_S390_KM | 0x0,
+ KM_DEA_ENCRYPT = CRYPT_S390_KM | 0x1,
+ KM_DEA_DECRYPT = CRYPT_S390_KM | 0x1 | 0x80,
+ KM_TDEA_128_ENCRYPT = CRYPT_S390_KM | 0x2,
+ KM_TDEA_128_DECRYPT = CRYPT_S390_KM | 0x2 | 0x80,
+ KM_TDEA_192_ENCRYPT = CRYPT_S390_KM | 0x3,
+ KM_TDEA_192_DECRYPT = CRYPT_S390_KM | 0x3 | 0x80,
+ KM_AES_128_ENCRYPT = CRYPT_S390_KM | 0x12,
+ KM_AES_128_DECRYPT = CRYPT_S390_KM | 0x12 | 0x80,
+ KM_AES_192_ENCRYPT = CRYPT_S390_KM | 0x13,
+ KM_AES_192_DECRYPT = CRYPT_S390_KM | 0x13 | 0x80,
+ KM_AES_256_ENCRYPT = CRYPT_S390_KM | 0x14,
+ KM_AES_256_DECRYPT = CRYPT_S390_KM | 0x14 | 0x80,
+ KM_XTS_128_ENCRYPT = CRYPT_S390_KM | 0x32,
+ KM_XTS_128_DECRYPT = CRYPT_S390_KM | 0x32 | 0x80,
+ KM_XTS_256_ENCRYPT = CRYPT_S390_KM | 0x34,
+ KM_XTS_256_DECRYPT = CRYPT_S390_KM | 0x34 | 0x80,
+};
+
+/*
+ * function codes for KMC (CIPHER MESSAGE WITH CHAINING)
+ * instruction
+ */
+enum crypt_s390_kmc_func {
+ KMC_QUERY = CRYPT_S390_KMC | 0x0,
+ KMC_DEA_ENCRYPT = CRYPT_S390_KMC | 0x1,
+ KMC_DEA_DECRYPT = CRYPT_S390_KMC | 0x1 | 0x80,
+ KMC_TDEA_128_ENCRYPT = CRYPT_S390_KMC | 0x2,
+ KMC_TDEA_128_DECRYPT = CRYPT_S390_KMC | 0x2 | 0x80,
+ KMC_TDEA_192_ENCRYPT = CRYPT_S390_KMC | 0x3,
+ KMC_TDEA_192_DECRYPT = CRYPT_S390_KMC | 0x3 | 0x80,
+ KMC_AES_128_ENCRYPT = CRYPT_S390_KMC | 0x12,
+ KMC_AES_128_DECRYPT = CRYPT_S390_KMC | 0x12 | 0x80,
+ KMC_AES_192_ENCRYPT = CRYPT_S390_KMC | 0x13,
+ KMC_AES_192_DECRYPT = CRYPT_S390_KMC | 0x13 | 0x80,
+ KMC_AES_256_ENCRYPT = CRYPT_S390_KMC | 0x14,
+ KMC_AES_256_DECRYPT = CRYPT_S390_KMC | 0x14 | 0x80,
+ KMC_PRNG = CRYPT_S390_KMC | 0x43,
+};
+
+/*
+ * function codes for KMCTR (CIPHER MESSAGE WITH COUNTER)
+ * instruction
+ */
+enum crypt_s390_kmctr_func {
+ KMCTR_QUERY = CRYPT_S390_KMCTR | 0x0,
+ KMCTR_DEA_ENCRYPT = CRYPT_S390_KMCTR | 0x1,
+ KMCTR_DEA_DECRYPT = CRYPT_S390_KMCTR | 0x1 | 0x80,
+ KMCTR_TDEA_128_ENCRYPT = CRYPT_S390_KMCTR | 0x2,
+ KMCTR_TDEA_128_DECRYPT = CRYPT_S390_KMCTR | 0x2 | 0x80,
+ KMCTR_TDEA_192_ENCRYPT = CRYPT_S390_KMCTR | 0x3,
+ KMCTR_TDEA_192_DECRYPT = CRYPT_S390_KMCTR | 0x3 | 0x80,
+ KMCTR_AES_128_ENCRYPT = CRYPT_S390_KMCTR | 0x12,
+ KMCTR_AES_128_DECRYPT = CRYPT_S390_KMCTR | 0x12 | 0x80,
+ KMCTR_AES_192_ENCRYPT = CRYPT_S390_KMCTR | 0x13,
+ KMCTR_AES_192_DECRYPT = CRYPT_S390_KMCTR | 0x13 | 0x80,
+ KMCTR_AES_256_ENCRYPT = CRYPT_S390_KMCTR | 0x14,
+ KMCTR_AES_256_DECRYPT = CRYPT_S390_KMCTR | 0x14 | 0x80,
+};
+
+/*
+ * function codes for KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST)
+ * instruction
+ */
+enum crypt_s390_kimd_func {
+ KIMD_QUERY = CRYPT_S390_KIMD | 0,
+ KIMD_SHA_1 = CRYPT_S390_KIMD | 1,
+ KIMD_SHA_256 = CRYPT_S390_KIMD | 2,
+ KIMD_SHA_512 = CRYPT_S390_KIMD | 3,
+ KIMD_GHASH = CRYPT_S390_KIMD | 65,
+};
+
+/*
+ * function codes for KLMD (COMPUTE LAST MESSAGE DIGEST)
+ * instruction
+ */
+enum crypt_s390_klmd_func {
+ KLMD_QUERY = CRYPT_S390_KLMD | 0,
+ KLMD_SHA_1 = CRYPT_S390_KLMD | 1,
+ KLMD_SHA_256 = CRYPT_S390_KLMD | 2,
+ KLMD_SHA_512 = CRYPT_S390_KLMD | 3,
+};
+
+/*
+ * function codes for KMAC (COMPUTE MESSAGE AUTHENTICATION CODE)
+ * instruction
+ */
+enum crypt_s390_kmac_func {
+ KMAC_QUERY = CRYPT_S390_KMAC | 0,
+ KMAC_DEA = CRYPT_S390_KMAC | 1,
+ KMAC_TDEA_128 = CRYPT_S390_KMAC | 2,
+ KMAC_TDEA_192 = CRYPT_S390_KMAC | 3
+};
+
+/*
+ * function codes for PPNO (PERFORM PSEUDORANDOM NUMBER
+ * OPERATION) instruction
+ */
+enum crypt_s390_ppno_func {
+ PPNO_QUERY = CRYPT_S390_PPNO | 0,
+ PPNO_SHA512_DRNG_GEN = CRYPT_S390_PPNO | 3,
+ PPNO_SHA512_DRNG_SEED = CRYPT_S390_PPNO | 0x83
+};
+
+/**
+ * crypt_s390_km:
+ * @func: the function code passed to KM; see crypt_s390_km_func
+ * @param: address of parameter block; see POP for details on each func
+ * @dest: address of destination memory area
+ * @src: address of source memory area
+ * @src_len: length of src operand in bytes
+ *
+ * Executes the KM (CIPHER MESSAGE) operation of the CPU.
+ *
+ * Returns -1 for failure, 0 for the query func, number of processed
+ * bytes for encryption/decryption funcs
+ */
+static inline int crypt_s390_km(long func, void *param,
+ u8 *dest, const u8 *src, long src_len)
+{
+ register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
+ register void *__param asm("1") = param;
+ register const u8 *__src asm("2") = src;
+ register long __src_len asm("3") = src_len;
+ register u8 *__dest asm("4") = dest;
+ int ret;
+
+ asm volatile(
+ "0: .insn rre,0xb92e0000,%3,%1\n" /* KM opcode */
+ "1: brc 1,0b\n" /* handle partial completion */
+ " la %0,0\n"
+ "2:\n"
+ EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
+ : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest)
+ : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
+ if (ret < 0)
+ return ret;
+ return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
+}
+
+/**
+ * crypt_s390_kmc:
+ * @func: the function code passed to KM; see crypt_s390_kmc_func
+ * @param: address of parameter block; see POP for details on each func
+ * @dest: address of destination memory area
+ * @src: address of source memory area
+ * @src_len: length of src operand in bytes
+ *
+ * Executes the KMC (CIPHER MESSAGE WITH CHAINING) operation of the CPU.
+ *
+ * Returns -1 for failure, 0 for the query func, number of processed
+ * bytes for encryption/decryption funcs
+ */
+static inline int crypt_s390_kmc(long func, void *param,
+ u8 *dest, const u8 *src, long src_len)
+{
+ register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
+ register void *__param asm("1") = param;
+ register const u8 *__src asm("2") = src;
+ register long __src_len asm("3") = src_len;
+ register u8 *__dest asm("4") = dest;
+ int ret;
+
+ asm volatile(
+ "0: .insn rre,0xb92f0000,%3,%1\n" /* KMC opcode */
+ "1: brc 1,0b\n" /* handle partial completion */
+ " la %0,0\n"
+ "2:\n"
+ EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
+ : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest)
+ : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
+ if (ret < 0)
+ return ret;
+ return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
+}
+
+/**
+ * crypt_s390_kimd:
+ * @func: the function code passed to KM; see crypt_s390_kimd_func
+ * @param: address of parameter block; see POP for details on each func
+ * @src: address of source memory area
+ * @src_len: length of src operand in bytes
+ *
+ * Executes the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) operation
+ * of the CPU.
+ *
+ * Returns -1 for failure, 0 for the query func, number of processed
+ * bytes for digest funcs
+ */
+static inline int crypt_s390_kimd(long func, void *param,
+ const u8 *src, long src_len)
+{
+ register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
+ register void *__param asm("1") = param;
+ register const u8 *__src asm("2") = src;
+ register long __src_len asm("3") = src_len;
+ int ret;
+
+ asm volatile(
+ "0: .insn rre,0xb93e0000,%1,%1\n" /* KIMD opcode */
+ "1: brc 1,0b\n" /* handle partial completion */
+ " la %0,0\n"
+ "2:\n"
+ EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
+ : "=d" (ret), "+a" (__src), "+d" (__src_len)
+ : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
+ if (ret < 0)
+ return ret;
+ return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
+}
+
+/**
+ * crypt_s390_klmd:
+ * @func: the function code passed to KM; see crypt_s390_klmd_func
+ * @param: address of parameter block; see POP for details on each func
+ * @src: address of source memory area
+ * @src_len: length of src operand in bytes
+ *
+ * Executes the KLMD (COMPUTE LAST MESSAGE DIGEST) operation of the CPU.
+ *
+ * Returns -1 for failure, 0 for the query func, number of processed
+ * bytes for digest funcs
+ */
+static inline int crypt_s390_klmd(long func, void *param,
+ const u8 *src, long src_len)
+{
+ register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
+ register void *__param asm("1") = param;
+ register const u8 *__src asm("2") = src;
+ register long __src_len asm("3") = src_len;
+ int ret;
+
+ asm volatile(
+ "0: .insn rre,0xb93f0000,%1,%1\n" /* KLMD opcode */
+ "1: brc 1,0b\n" /* handle partial completion */
+ " la %0,0\n"
+ "2:\n"
+ EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
+ : "=d" (ret), "+a" (__src), "+d" (__src_len)
+ : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
+ if (ret < 0)
+ return ret;
+ return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
+}
+
+/**
+ * crypt_s390_kmac:
+ * @func: the function code passed to KM; see crypt_s390_klmd_func
+ * @param: address of parameter block; see POP for details on each func
+ * @src: address of source memory area
+ * @src_len: length of src operand in bytes
+ *
+ * Executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) operation
+ * of the CPU.
+ *
+ * Returns -1 for failure, 0 for the query func, number of processed
+ * bytes for digest funcs
+ */
+static inline int crypt_s390_kmac(long func, void *param,
+ const u8 *src, long src_len)
+{
+ register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
+ register void *__param asm("1") = param;
+ register const u8 *__src asm("2") = src;
+ register long __src_len asm("3") = src_len;
+ int ret;
+
+ asm volatile(
+ "0: .insn rre,0xb91e0000,%1,%1\n" /* KLAC opcode */
+ "1: brc 1,0b\n" /* handle partial completion */
+ " la %0,0\n"
+ "2:\n"
+ EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
+ : "=d" (ret), "+a" (__src), "+d" (__src_len)
+ : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
+ if (ret < 0)
+ return ret;
+ return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
+}
+
+/**
+ * crypt_s390_kmctr:
+ * @func: the function code passed to KMCTR; see crypt_s390_kmctr_func
+ * @param: address of parameter block; see POP for details on each func
+ * @dest: address of destination memory area
+ * @src: address of source memory area
+ * @src_len: length of src operand in bytes
+ * @counter: address of counter value
+ *
+ * Executes the KMCTR (CIPHER MESSAGE WITH COUNTER) operation of the CPU.
+ *
+ * Returns -1 for failure, 0 for the query func, number of processed
+ * bytes for encryption/decryption funcs
+ */
+static inline int crypt_s390_kmctr(long func, void *param, u8 *dest,
+ const u8 *src, long src_len, u8 *counter)
+{
+ register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
+ register void *__param asm("1") = param;
+ register const u8 *__src asm("2") = src;
+ register long __src_len asm("3") = src_len;
+ register u8 *__dest asm("4") = dest;
+ register u8 *__ctr asm("6") = counter;
+ int ret = -1;
+
+ asm volatile(
+ "0: .insn rrf,0xb92d0000,%3,%1,%4,0\n" /* KMCTR opcode */
+ "1: brc 1,0b\n" /* handle partial completion */
+ " la %0,0\n"
+ "2:\n"
+ EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
+ : "+d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest),
+ "+a" (__ctr)
+ : "d" (__func), "a" (__param) : "cc", "memory");
+ if (ret < 0)
+ return ret;
+ return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
+}
+
+/**
+ * crypt_s390_ppno:
+ * @func: the function code passed to PPNO; see crypt_s390_ppno_func
+ * @param: address of parameter block; see POP for details on each func
+ * @dest: address of destination memory area
+ * @dest_len: size of destination memory area in bytes
+ * @seed: address of seed data
+ * @seed_len: size of seed data in bytes
+ *
+ * Executes the PPNO (PERFORM PSEUDORANDOM NUMBER OPERATION)
+ * operation of the CPU.
+ *
+ * Returns -1 for failure, 0 for the query func, number of random
+ * bytes stored in dest buffer for generate function
+ */
+static inline int crypt_s390_ppno(long func, void *param,
+ u8 *dest, long dest_len,
+ const u8 *seed, long seed_len)
+{
+ register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
+ register void *__param asm("1") = param; /* param block (240 bytes) */
+ register u8 *__dest asm("2") = dest; /* buf for recv random bytes */
+ register long __dest_len asm("3") = dest_len; /* requested random bytes */
+ register const u8 *__seed asm("4") = seed; /* buf with seed data */
+ register long __seed_len asm("5") = seed_len; /* bytes in seed buf */
+ int ret = -1;
+
+ asm volatile (
+ "0: .insn rre,0xb93c0000,%1,%5\n" /* PPNO opcode */
+ "1: brc 1,0b\n" /* handle partial completion */
+ " la %0,0\n"
+ "2:\n"
+ EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
+ : "+d" (ret), "+a"(__dest), "+d"(__dest_len)
+ : "d"(__func), "a"(__param), "a"(__seed), "d"(__seed_len)
+ : "cc", "memory");
+ if (ret < 0)
+ return ret;
+ return (func & CRYPT_S390_FUNC_MASK) ? dest_len - __dest_len : 0;
+}
+
+/**
+ * crypt_s390_func_available:
+ * @func: the function code of the specific function; 0 if op in general
+ *
+ * Tests if a specific crypto function is implemented on the machine.
+ *
+ * Returns 1 if func available; 0 if func or op in general not available
+ */
+static inline int crypt_s390_func_available(int func,
+ unsigned int facility_mask)
+{
+ unsigned char status[16];
+ int ret;
+
+ if (facility_mask & CRYPT_S390_MSA && !test_facility(17))
+ return 0;
+ if (facility_mask & CRYPT_S390_MSA3 && !test_facility(76))
+ return 0;
+ if (facility_mask & CRYPT_S390_MSA4 && !test_facility(77))
+ return 0;
+ if (facility_mask & CRYPT_S390_MSA5 && !test_facility(57))
+ return 0;
+
+ switch (func & CRYPT_S390_OP_MASK) {
+ case CRYPT_S390_KM:
+ ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0);
+ break;
+ case CRYPT_S390_KMC:
+ ret = crypt_s390_kmc(KMC_QUERY, &status, NULL, NULL, 0);
+ break;
+ case CRYPT_S390_KIMD:
+ ret = crypt_s390_kimd(KIMD_QUERY, &status, NULL, 0);
+ break;
+ case CRYPT_S390_KLMD:
+ ret = crypt_s390_klmd(KLMD_QUERY, &status, NULL, 0);
+ break;
+ case CRYPT_S390_KMAC:
+ ret = crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0);
+ break;
+ case CRYPT_S390_KMCTR:
+ ret = crypt_s390_kmctr(KMCTR_QUERY, &status,
+ NULL, NULL, 0, NULL);
+ break;
+ case CRYPT_S390_PPNO:
+ ret = crypt_s390_ppno(PPNO_QUERY, &status,
+ NULL, 0, NULL, 0);
+ break;
+ default:
+ return 0;
+ }
+ if (ret < 0)
+ return 0;
+ func &= CRYPT_S390_FUNC_MASK;
+ func &= 0x7f; /* mask modifier bit */
+ return (status[func >> 3] & (0x80 >> (func & 7))) != 0;
+}
+
+/**
+ * crypt_s390_pcc:
+ * @func: the function code passed to KM; see crypt_s390_km_func
+ * @param: address of parameter block; see POP for details on each func
+ *
+ * Executes the PCC (PERFORM CRYPTOGRAPHIC COMPUTATION) operation of the CPU.
+ *
+ * Returns -1 for failure, 0 for success.
+ */
+static inline int crypt_s390_pcc(long func, void *param)
+{
+ register long __func asm("0") = func & 0x7f; /* encrypt or decrypt */
+ register void *__param asm("1") = param;
+ int ret = -1;
+
+ asm volatile(
+ "0: .insn rre,0xb92c0000,0,0\n" /* PCC opcode */
+ "1: brc 1,0b\n" /* handle partial completion */
+ " la %0,0\n"
+ "2:\n"
+ EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
+ : "+d" (ret)
+ : "d" (__func), "a" (__param) : "cc", "memory");
+ return ret;
+}
+
+#endif /* _CRYPTO_ARCH_S390_CRYPT_S390_H */
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
new file mode 100644
index 000000000..9e05cc453
--- /dev/null
+++ b/arch/s390/crypto/des_s390.c
@@ -0,0 +1,626 @@
+/*
+ * Cryptographic API.
+ *
+ * s390 implementation of the DES Cipher Algorithm.
+ *
+ * Copyright IBM Corp. 2003, 2011
+ * Author(s): Thomas Spatzier
+ * Jan Glauber (jan.glauber@de.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <crypto/des.h>
+
+#include "crypt_s390.h"
+
+#define DES3_KEY_SIZE (3 * DES_KEY_SIZE)
+
+static u8 *ctrblk;
+static DEFINE_SPINLOCK(ctrblk_lock);
+
+struct s390_des_ctx {
+ u8 iv[DES_BLOCK_SIZE];
+ u8 key[DES3_KEY_SIZE];
+};
+
+static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
+ u32 *flags = &tfm->crt_flags;
+ u32 tmp[DES_EXPKEY_WORDS];
+
+ /* check for weak keys */
+ if (!des_ekey(tmp, key) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ return -EINVAL;
+ }
+
+ memcpy(ctx->key, key, key_len);
+ return 0;
+}
+
+static void des_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypt_s390_km(KM_DEA_ENCRYPT, ctx->key, out, in, DES_BLOCK_SIZE);
+}
+
+static void des_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypt_s390_km(KM_DEA_DECRYPT, ctx->key, out, in, DES_BLOCK_SIZE);
+}
+
+static struct crypto_alg des_alg = {
+ .cra_name = "des",
+ .cra_driver_name = "des-s390",
+ .cra_priority = CRYPT_S390_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct s390_des_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .cipher = {
+ .cia_min_keysize = DES_KEY_SIZE,
+ .cia_max_keysize = DES_KEY_SIZE,
+ .cia_setkey = des_setkey,
+ .cia_encrypt = des_encrypt,
+ .cia_decrypt = des_decrypt,
+ }
+ }
+};
+
+static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
+ u8 *key, struct blkcipher_walk *walk)
+{
+ int ret = blkcipher_walk_virt(desc, walk);
+ unsigned int nbytes;
+
+ while ((nbytes = walk->nbytes)) {
+ /* only use complete blocks */
+ unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1);
+ u8 *out = walk->dst.virt.addr;
+ u8 *in = walk->src.virt.addr;
+
+ ret = crypt_s390_km(func, key, out, in, n);
+ if (ret < 0 || ret != n)
+ return -EIO;
+
+ nbytes &= DES_BLOCK_SIZE - 1;
+ ret = blkcipher_walk_done(desc, walk, nbytes);
+ }
+
+ return ret;
+}
+
+static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
+ struct blkcipher_walk *walk)
+{
+ struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ int ret = blkcipher_walk_virt(desc, walk);
+ unsigned int nbytes = walk->nbytes;
+ struct {
+ u8 iv[DES_BLOCK_SIZE];
+ u8 key[DES3_KEY_SIZE];
+ } param;
+
+ if (!nbytes)
+ goto out;
+
+ memcpy(param.iv, walk->iv, DES_BLOCK_SIZE);
+ memcpy(param.key, ctx->key, DES3_KEY_SIZE);
+ do {
+ /* only use complete blocks */
+ unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1);
+ u8 *out = walk->dst.virt.addr;
+ u8 *in = walk->src.virt.addr;
+
+ ret = crypt_s390_kmc(func, &param, out, in, n);
+ if (ret < 0 || ret != n)
+ return -EIO;
+
+ nbytes &= DES_BLOCK_SIZE - 1;
+ ret = blkcipher_walk_done(desc, walk, nbytes);
+ } while ((nbytes = walk->nbytes));
+ memcpy(walk->iv, param.iv, DES_BLOCK_SIZE);
+
+out:
+ return ret;
+}
+
+static int ecb_des_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ return ecb_desall_crypt(desc, KM_DEA_ENCRYPT, ctx->key, &walk);
+}
+
+static int ecb_des_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ return ecb_desall_crypt(desc, KM_DEA_DECRYPT, ctx->key, &walk);
+}
+
+static struct crypto_alg ecb_des_alg = {
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "ecb-des-s390",
+ .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct s390_des_ctx),
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = des_setkey,
+ .encrypt = ecb_des_encrypt,
+ .decrypt = ecb_des_decrypt,
+ }
+ }
+};
+
+static int cbc_des_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct blkcipher_walk walk;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, &walk);
+}
+
+static int cbc_des_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct blkcipher_walk walk;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, &walk);
+}
+
+static struct crypto_alg cbc_des_alg = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "cbc-des-s390",
+ .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct s390_des_ctx),
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = des_setkey,
+ .encrypt = cbc_des_encrypt,
+ .decrypt = cbc_des_decrypt,
+ }
+ }
+};
+
+/*
+ * RFC2451:
+ *
+ * For DES-EDE3, there is no known need to reject weak or
+ * complementation keys. Any weakness is obviated by the use of
+ * multiple keys.
+ *
+ * However, if the first two or last two independent 64-bit keys are
+ * equal (k1 == k2 or k2 == k3), then the DES3 operation is simply the
+ * same as DES. Implementers MUST reject keys that exhibit this
+ * property.
+ *
+ */
+static int des3_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
+ u32 *flags = &tfm->crt_flags;
+
+ if (!(crypto_memneq(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) &&
+ crypto_memneq(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2],
+ DES_KEY_SIZE)) &&
+ (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ return -EINVAL;
+ }
+ memcpy(ctx->key, key, key_len);
+ return 0;
+}
+
+static void des3_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+ struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypt_s390_km(KM_TDEA_192_ENCRYPT, ctx->key, dst, src, DES_BLOCK_SIZE);
+}
+
+static void des3_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+ struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypt_s390_km(KM_TDEA_192_DECRYPT, ctx->key, dst, src, DES_BLOCK_SIZE);
+}
+
+static struct crypto_alg des3_alg = {
+ .cra_name = "des3_ede",
+ .cra_driver_name = "des3_ede-s390",
+ .cra_priority = CRYPT_S390_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct s390_des_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .cipher = {
+ .cia_min_keysize = DES3_KEY_SIZE,
+ .cia_max_keysize = DES3_KEY_SIZE,
+ .cia_setkey = des3_setkey,
+ .cia_encrypt = des3_encrypt,
+ .cia_decrypt = des3_decrypt,
+ }
+ }
+};
+
+static int ecb_des3_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ return ecb_desall_crypt(desc, KM_TDEA_192_ENCRYPT, ctx->key, &walk);
+}
+
+static int ecb_des3_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ return ecb_desall_crypt(desc, KM_TDEA_192_DECRYPT, ctx->key, &walk);
+}
+
+static struct crypto_alg ecb_des3_alg = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "ecb-des3_ede-s390",
+ .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct s390_des_ctx),
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = DES3_KEY_SIZE,
+ .max_keysize = DES3_KEY_SIZE,
+ .setkey = des3_setkey,
+ .encrypt = ecb_des3_encrypt,
+ .decrypt = ecb_des3_decrypt,
+ }
+ }
+};
+
+static int cbc_des3_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct blkcipher_walk walk;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, &walk);
+}
+
+static int cbc_des3_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct blkcipher_walk walk;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, &walk);
+}
+
+static struct crypto_alg cbc_des3_alg = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc-des3_ede-s390",
+ .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct s390_des_ctx),
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = DES3_KEY_SIZE,
+ .max_keysize = DES3_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = des3_setkey,
+ .encrypt = cbc_des3_encrypt,
+ .decrypt = cbc_des3_decrypt,
+ }
+ }
+};
+
+static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
+{
+ unsigned int i, n;
+
+ /* align to block size, max. PAGE_SIZE */
+ n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(DES_BLOCK_SIZE - 1);
+ for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) {
+ memcpy(ctrptr + i, ctrptr + i - DES_BLOCK_SIZE, DES_BLOCK_SIZE);
+ crypto_inc(ctrptr + i, DES_BLOCK_SIZE);
+ }
+ return n;
+}
+
+static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
+ struct s390_des_ctx *ctx,
+ struct blkcipher_walk *walk)
+{
+ int ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
+ unsigned int n, nbytes;
+ u8 buf[DES_BLOCK_SIZE], ctrbuf[DES_BLOCK_SIZE];
+ u8 *out, *in, *ctrptr = ctrbuf;
+
+ if (!walk->nbytes)
+ return ret;
+
+ if (spin_trylock(&ctrblk_lock))
+ ctrptr = ctrblk;
+
+ memcpy(ctrptr, walk->iv, DES_BLOCK_SIZE);
+ while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
+ out = walk->dst.virt.addr;
+ in = walk->src.virt.addr;
+ while (nbytes >= DES_BLOCK_SIZE) {
+ if (ctrptr == ctrblk)
+ n = __ctrblk_init(ctrptr, nbytes);
+ else
+ n = DES_BLOCK_SIZE;
+ ret = crypt_s390_kmctr(func, ctx->key, out, in,
+ n, ctrptr);
+ if (ret < 0 || ret != n) {
+ if (ctrptr == ctrblk)
+ spin_unlock(&ctrblk_lock);
+ return -EIO;
+ }
+ if (n > DES_BLOCK_SIZE)
+ memcpy(ctrptr, ctrptr + n - DES_BLOCK_SIZE,
+ DES_BLOCK_SIZE);
+ crypto_inc(ctrptr, DES_BLOCK_SIZE);
+ out += n;
+ in += n;
+ nbytes -= n;
+ }
+ ret = blkcipher_walk_done(desc, walk, nbytes);
+ }
+ if (ctrptr == ctrblk) {
+ if (nbytes)
+ memcpy(ctrbuf, ctrptr, DES_BLOCK_SIZE);
+ else
+ memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
+ spin_unlock(&ctrblk_lock);
+ } else {
+ if (!nbytes)
+ memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
+ }
+ /* final block may be < DES_BLOCK_SIZE, copy only nbytes */
+ if (nbytes) {
+ out = walk->dst.virt.addr;
+ in = walk->src.virt.addr;
+ ret = crypt_s390_kmctr(func, ctx->key, buf, in,
+ DES_BLOCK_SIZE, ctrbuf);
+ if (ret < 0 || ret != DES_BLOCK_SIZE)
+ return -EIO;
+ memcpy(out, buf, nbytes);
+ crypto_inc(ctrbuf, DES_BLOCK_SIZE);
+ ret = blkcipher_walk_done(desc, walk, 0);
+ memcpy(walk->iv, ctrbuf, DES_BLOCK_SIZE);
+ }
+ return ret;
+}
+
+static int ctr_des_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ return ctr_desall_crypt(desc, KMCTR_DEA_ENCRYPT, ctx, &walk);
+}
+
+static int ctr_des_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ return ctr_desall_crypt(desc, KMCTR_DEA_DECRYPT, ctx, &walk);
+}
+
+static struct crypto_alg ctr_des_alg = {
+ .cra_name = "ctr(des)",
+ .cra_driver_name = "ctr-des-s390",
+ .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct s390_des_ctx),
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = des_setkey,
+ .encrypt = ctr_des_encrypt,
+ .decrypt = ctr_des_decrypt,
+ }
+ }
+};
+
+static int ctr_des3_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ return ctr_desall_crypt(desc, KMCTR_TDEA_192_ENCRYPT, ctx, &walk);
+}
+
+static int ctr_des3_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ return ctr_desall_crypt(desc, KMCTR_TDEA_192_DECRYPT, ctx, &walk);
+}
+
+static struct crypto_alg ctr_des3_alg = {
+ .cra_name = "ctr(des3_ede)",
+ .cra_driver_name = "ctr-des3_ede-s390",
+ .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct s390_des_ctx),
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = DES3_KEY_SIZE,
+ .max_keysize = DES3_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = des3_setkey,
+ .encrypt = ctr_des3_encrypt,
+ .decrypt = ctr_des3_decrypt,
+ }
+ }
+};
+
+static int __init des_s390_init(void)
+{
+ int ret;
+
+ if (!crypt_s390_func_available(KM_DEA_ENCRYPT, CRYPT_S390_MSA) ||
+ !crypt_s390_func_available(KM_TDEA_192_ENCRYPT, CRYPT_S390_MSA))
+ return -EOPNOTSUPP;
+
+ ret = crypto_register_alg(&des_alg);
+ if (ret)
+ goto des_err;
+ ret = crypto_register_alg(&ecb_des_alg);
+ if (ret)
+ goto ecb_des_err;
+ ret = crypto_register_alg(&cbc_des_alg);
+ if (ret)
+ goto cbc_des_err;
+ ret = crypto_register_alg(&des3_alg);
+ if (ret)
+ goto des3_err;
+ ret = crypto_register_alg(&ecb_des3_alg);
+ if (ret)
+ goto ecb_des3_err;
+ ret = crypto_register_alg(&cbc_des3_alg);
+ if (ret)
+ goto cbc_des3_err;
+
+ if (crypt_s390_func_available(KMCTR_DEA_ENCRYPT,
+ CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
+ crypt_s390_func_available(KMCTR_TDEA_192_ENCRYPT,
+ CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
+ ret = crypto_register_alg(&ctr_des_alg);
+ if (ret)
+ goto ctr_des_err;
+ ret = crypto_register_alg(&ctr_des3_alg);
+ if (ret)
+ goto ctr_des3_err;
+ ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
+ if (!ctrblk) {
+ ret = -ENOMEM;
+ goto ctr_mem_err;
+ }
+ }
+out:
+ return ret;
+
+ctr_mem_err:
+ crypto_unregister_alg(&ctr_des3_alg);
+ctr_des3_err:
+ crypto_unregister_alg(&ctr_des_alg);
+ctr_des_err:
+ crypto_unregister_alg(&cbc_des3_alg);
+cbc_des3_err:
+ crypto_unregister_alg(&ecb_des3_alg);
+ecb_des3_err:
+ crypto_unregister_alg(&des3_alg);
+des3_err:
+ crypto_unregister_alg(&cbc_des_alg);
+cbc_des_err:
+ crypto_unregister_alg(&ecb_des_alg);
+ecb_des_err:
+ crypto_unregister_alg(&des_alg);
+des_err:
+ goto out;
+}
+
+static void __exit des_s390_exit(void)
+{
+ if (ctrblk) {
+ crypto_unregister_alg(&ctr_des_alg);
+ crypto_unregister_alg(&ctr_des3_alg);
+ free_page((unsigned long) ctrblk);
+ }
+ crypto_unregister_alg(&cbc_des3_alg);
+ crypto_unregister_alg(&ecb_des3_alg);
+ crypto_unregister_alg(&des3_alg);
+ crypto_unregister_alg(&cbc_des_alg);
+ crypto_unregister_alg(&ecb_des_alg);
+ crypto_unregister_alg(&des_alg);
+}
+
+module_init(des_s390_init);
+module_exit(des_s390_exit);
+
+MODULE_ALIAS_CRYPTO("des");
+MODULE_ALIAS_CRYPTO("des3_ede");
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms");
diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c
new file mode 100644
index 000000000..b258110da
--- /dev/null
+++ b/arch/s390/crypto/ghash_s390.c
@@ -0,0 +1,167 @@
+/*
+ * Cryptographic API.
+ *
+ * s390 implementation of the GHASH algorithm for GCM (Galois/Counter Mode).
+ *
+ * Copyright IBM Corp. 2011
+ * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
+ */
+
+#include <crypto/internal/hash.h>
+#include <linux/module.h>
+
+#include "crypt_s390.h"
+
+#define GHASH_BLOCK_SIZE 16
+#define GHASH_DIGEST_SIZE 16
+
+struct ghash_ctx {
+ u8 key[GHASH_BLOCK_SIZE];
+};
+
+struct ghash_desc_ctx {
+ u8 icv[GHASH_BLOCK_SIZE];
+ u8 key[GHASH_BLOCK_SIZE];
+ u8 buffer[GHASH_BLOCK_SIZE];
+ u32 bytes;
+};
+
+static int ghash_init(struct shash_desc *desc)
+{
+ struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+ struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
+
+ memset(dctx, 0, sizeof(*dctx));
+ memcpy(dctx->key, ctx->key, GHASH_BLOCK_SIZE);
+
+ return 0;
+}
+
+static int ghash_setkey(struct crypto_shash *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
+
+ if (keylen != GHASH_BLOCK_SIZE) {
+ crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ memcpy(ctx->key, key, GHASH_BLOCK_SIZE);
+
+ return 0;
+}
+
+static int ghash_update(struct shash_desc *desc,
+ const u8 *src, unsigned int srclen)
+{
+ struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+ unsigned int n;
+ u8 *buf = dctx->buffer;
+ int ret;
+
+ if (dctx->bytes) {
+ u8 *pos = buf + (GHASH_BLOCK_SIZE - dctx->bytes);
+
+ n = min(srclen, dctx->bytes);
+ dctx->bytes -= n;
+ srclen -= n;
+
+ memcpy(pos, src, n);
+ src += n;
+
+ if (!dctx->bytes) {
+ ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf,
+ GHASH_BLOCK_SIZE);
+ if (ret != GHASH_BLOCK_SIZE)
+ return -EIO;
+ }
+ }
+
+ n = srclen & ~(GHASH_BLOCK_SIZE - 1);
+ if (n) {
+ ret = crypt_s390_kimd(KIMD_GHASH, dctx, src, n);
+ if (ret != n)
+ return -EIO;
+ src += n;
+ srclen -= n;
+ }
+
+ if (srclen) {
+ dctx->bytes = GHASH_BLOCK_SIZE - srclen;
+ memcpy(buf, src, srclen);
+ }
+
+ return 0;
+}
+
+static int ghash_flush(struct ghash_desc_ctx *dctx)
+{
+ u8 *buf = dctx->buffer;
+ int ret;
+
+ if (dctx->bytes) {
+ u8 *pos = buf + (GHASH_BLOCK_SIZE - dctx->bytes);
+
+ memset(pos, 0, dctx->bytes);
+
+ ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE);
+ if (ret != GHASH_BLOCK_SIZE)
+ return -EIO;
+
+ dctx->bytes = 0;
+ }
+
+ return 0;
+}
+
+static int ghash_final(struct shash_desc *desc, u8 *dst)
+{
+ struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+ int ret;
+
+ ret = ghash_flush(dctx);
+ if (!ret)
+ memcpy(dst, dctx->icv, GHASH_BLOCK_SIZE);
+ return ret;
+}
+
+static struct shash_alg ghash_alg = {
+ .digestsize = GHASH_DIGEST_SIZE,
+ .init = ghash_init,
+ .update = ghash_update,
+ .final = ghash_final,
+ .setkey = ghash_setkey,
+ .descsize = sizeof(struct ghash_desc_ctx),
+ .base = {
+ .cra_name = "ghash",
+ .cra_driver_name = "ghash-s390",
+ .cra_priority = CRYPT_S390_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = GHASH_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ghash_ctx),
+ .cra_module = THIS_MODULE,
+ },
+};
+
+static int __init ghash_mod_init(void)
+{
+ if (!crypt_s390_func_available(KIMD_GHASH,
+ CRYPT_S390_MSA | CRYPT_S390_MSA4))
+ return -EOPNOTSUPP;
+
+ return crypto_register_shash(&ghash_alg);
+}
+
+static void __exit ghash_mod_exit(void)
+{
+ crypto_unregister_shash(&ghash_alg);
+}
+
+module_init(ghash_mod_init);
+module_exit(ghash_mod_exit);
+
+MODULE_ALIAS_CRYPTO("ghash");
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("GHASH Message Digest Algorithm, s390 implementation");
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
new file mode 100644
index 000000000..9d5192c94
--- /dev/null
+++ b/arch/s390/crypto/prng.c
@@ -0,0 +1,919 @@
+/*
+ * Copyright IBM Corp. 2006, 2015
+ * Author(s): Jan Glauber <jan.glauber@de.ibm.com>
+ * Harald Freudenberger <freude@de.ibm.com>
+ * Driver for the s390 pseudo random number generator
+ */
+
+#define KMSG_COMPONENT "prng"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/fs.h>
+#include <linux/fips.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mutex.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <asm/debug.h>
+#include <asm/uaccess.h>
+#include <asm/timex.h>
+
+#include "crypt_s390.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("s390 PRNG interface");
+
+
+#define PRNG_MODE_AUTO 0
+#define PRNG_MODE_TDES 1
+#define PRNG_MODE_SHA512 2
+
+static unsigned int prng_mode = PRNG_MODE_AUTO;
+module_param_named(mode, prng_mode, int, 0);
+MODULE_PARM_DESC(prng_mode, "PRNG mode: 0 - auto, 1 - TDES, 2 - SHA512");
+
+
+#define PRNG_CHUNKSIZE_TDES_MIN 8
+#define PRNG_CHUNKSIZE_TDES_MAX (64*1024)
+#define PRNG_CHUNKSIZE_SHA512_MIN 64
+#define PRNG_CHUNKSIZE_SHA512_MAX (64*1024)
+
+static unsigned int prng_chunk_size = 256;
+module_param_named(chunksize, prng_chunk_size, int, 0);
+MODULE_PARM_DESC(prng_chunk_size, "PRNG read chunk size in bytes");
+
+
+#define PRNG_RESEED_LIMIT_TDES 4096
+#define PRNG_RESEED_LIMIT_TDES_LOWER 4096
+#define PRNG_RESEED_LIMIT_SHA512 100000
+#define PRNG_RESEED_LIMIT_SHA512_LOWER 10000
+
+static unsigned int prng_reseed_limit;
+module_param_named(reseed_limit, prng_reseed_limit, int, 0);
+MODULE_PARM_DESC(prng_reseed_limit, "PRNG reseed limit");
+
+
+/*
+ * Any one who considers arithmetical methods of producing random digits is,
+ * of course, in a state of sin. -- John von Neumann
+ */
+
+static int prng_errorflag;
+
+#define PRNG_GEN_ENTROPY_FAILED 1
+#define PRNG_SELFTEST_FAILED 2
+#define PRNG_INSTANTIATE_FAILED 3
+#define PRNG_SEED_FAILED 4
+#define PRNG_RESEED_FAILED 5
+#define PRNG_GEN_FAILED 6
+
+struct prng_ws_s {
+ u8 parm_block[32];
+ u32 reseed_counter;
+ u64 byte_counter;
+};
+
+struct ppno_ws_s {
+ u32 res;
+ u32 reseed_counter;
+ u64 stream_bytes;
+ u8 V[112];
+ u8 C[112];
+};
+
+struct prng_data_s {
+ struct mutex mutex;
+ union {
+ struct prng_ws_s prngws;
+ struct ppno_ws_s ppnows;
+ };
+ u8 *buf;
+ u32 rest;
+ u8 *prev;
+};
+
+static struct prng_data_s *prng_data;
+
+/* initial parameter block for tdes mode, copied from libica */
+static const u8 initial_parm_block[32] __initconst = {
+ 0x0F, 0x2B, 0x8E, 0x63, 0x8C, 0x8E, 0xD2, 0x52,
+ 0x64, 0xB7, 0xA0, 0x7B, 0x75, 0x28, 0xB8, 0xF4,
+ 0x75, 0x5F, 0xD2, 0xA6, 0x8D, 0x97, 0x11, 0xFF,
+ 0x49, 0xD8, 0x23, 0xF3, 0x7E, 0x21, 0xEC, 0xA0 };
+
+
+/*** helper functions ***/
+
+static int generate_entropy(u8 *ebuf, size_t nbytes)
+{
+ int n, ret = 0;
+ u8 *pg, *h, hash[32];
+
+ pg = (u8 *) __get_free_page(GFP_KERNEL);
+ if (!pg) {
+ prng_errorflag = PRNG_GEN_ENTROPY_FAILED;
+ return -ENOMEM;
+ }
+
+ while (nbytes) {
+ /* fill page with urandom bytes */
+ get_random_bytes(pg, PAGE_SIZE);
+ /* exor page with stckf values */
+ for (n = 0; n < PAGE_SIZE / sizeof(u64); n++) {
+ u64 *p = ((u64 *)pg) + n;
+ *p ^= get_tod_clock_fast();
+ }
+ n = (nbytes < sizeof(hash)) ? nbytes : sizeof(hash);
+ if (n < sizeof(hash))
+ h = hash;
+ else
+ h = ebuf;
+ /* generate sha256 from this page */
+ if (crypt_s390_kimd(KIMD_SHA_256, h,
+ pg, PAGE_SIZE) != PAGE_SIZE) {
+ prng_errorflag = PRNG_GEN_ENTROPY_FAILED;
+ ret = -EIO;
+ goto out;
+ }
+ if (n < sizeof(hash))
+ memcpy(ebuf, hash, n);
+ ret += n;
+ ebuf += n;
+ nbytes -= n;
+ }
+
+out:
+ free_page((unsigned long)pg);
+ return ret;
+}
+
+
+/*** tdes functions ***/
+
+static void prng_tdes_add_entropy(void)
+{
+ __u64 entropy[4];
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < 16; i++) {
+ ret = crypt_s390_kmc(KMC_PRNG, prng_data->prngws.parm_block,
+ (char *)entropy, (char *)entropy,
+ sizeof(entropy));
+ BUG_ON(ret < 0 || ret != sizeof(entropy));
+ memcpy(prng_data->prngws.parm_block, entropy, sizeof(entropy));
+ }
+}
+
+
+static void prng_tdes_seed(int nbytes)
+{
+ char buf[16];
+ int i = 0;
+
+ BUG_ON(nbytes > sizeof(buf));
+
+ get_random_bytes(buf, nbytes);
+
+ /* Add the entropy */
+ while (nbytes >= 8) {
+ *((__u64 *)prng_data->prngws.parm_block) ^= *((__u64 *)(buf+i));
+ prng_tdes_add_entropy();
+ i += 8;
+ nbytes -= 8;
+ }
+ prng_tdes_add_entropy();
+ prng_data->prngws.reseed_counter = 0;
+}
+
+
+static int __init prng_tdes_instantiate(void)
+{
+ int datalen;
+
+ pr_debug("prng runs in TDES mode with "
+ "chunksize=%d and reseed_limit=%u\n",
+ prng_chunk_size, prng_reseed_limit);
+
+ /* memory allocation, prng_data struct init, mutex init */
+ datalen = sizeof(struct prng_data_s) + prng_chunk_size;
+ prng_data = kzalloc(datalen, GFP_KERNEL);
+ if (!prng_data) {
+ prng_errorflag = PRNG_INSTANTIATE_FAILED;
+ return -ENOMEM;
+ }
+ mutex_init(&prng_data->mutex);
+ prng_data->buf = ((u8 *)prng_data) + sizeof(struct prng_data_s);
+ memcpy(prng_data->prngws.parm_block, initial_parm_block, 32);
+
+ /* initialize the PRNG, add 128 bits of entropy */
+ prng_tdes_seed(16);
+
+ return 0;
+}
+
+
+static void prng_tdes_deinstantiate(void)
+{
+ pr_debug("The prng module stopped "
+ "after running in triple DES mode\n");
+ kzfree(prng_data);
+}
+
+
+/*** sha512 functions ***/
+
+static int __init prng_sha512_selftest(void)
+{
+ /* NIST DRBG testvector for Hash Drbg, Sha-512, Count #0 */
+ static const u8 seed[] __initconst = {
+ 0x6b, 0x50, 0xa7, 0xd8, 0xf8, 0xa5, 0x5d, 0x7a,
+ 0x3d, 0xf8, 0xbb, 0x40, 0xbc, 0xc3, 0xb7, 0x22,
+ 0xd8, 0x70, 0x8d, 0xe6, 0x7f, 0xda, 0x01, 0x0b,
+ 0x03, 0xc4, 0xc8, 0x4d, 0x72, 0x09, 0x6f, 0x8c,
+ 0x3e, 0xc6, 0x49, 0xcc, 0x62, 0x56, 0xd9, 0xfa,
+ 0x31, 0xdb, 0x7a, 0x29, 0x04, 0xaa, 0xf0, 0x25 };
+ static const u8 V0[] __initconst = {
+ 0x00, 0xad, 0xe3, 0x6f, 0x9a, 0x01, 0xc7, 0x76,
+ 0x61, 0x34, 0x35, 0xf5, 0x4e, 0x24, 0x74, 0x22,
+ 0x21, 0x9a, 0x29, 0x89, 0xc7, 0x93, 0x2e, 0x60,
+ 0x1e, 0xe8, 0x14, 0x24, 0x8d, 0xd5, 0x03, 0xf1,
+ 0x65, 0x5d, 0x08, 0x22, 0x72, 0xd5, 0xad, 0x95,
+ 0xe1, 0x23, 0x1e, 0x8a, 0xa7, 0x13, 0xd9, 0x2b,
+ 0x5e, 0xbc, 0xbb, 0x80, 0xab, 0x8d, 0xe5, 0x79,
+ 0xab, 0x5b, 0x47, 0x4e, 0xdd, 0xee, 0x6b, 0x03,
+ 0x8f, 0x0f, 0x5c, 0x5e, 0xa9, 0x1a, 0x83, 0xdd,
+ 0xd3, 0x88, 0xb2, 0x75, 0x4b, 0xce, 0x83, 0x36,
+ 0x57, 0x4b, 0xf1, 0x5c, 0xca, 0x7e, 0x09, 0xc0,
+ 0xd3, 0x89, 0xc6, 0xe0, 0xda, 0xc4, 0x81, 0x7e,
+ 0x5b, 0xf9, 0xe1, 0x01, 0xc1, 0x92, 0x05, 0xea,
+ 0xf5, 0x2f, 0xc6, 0xc6, 0xc7, 0x8f, 0xbc, 0xf4 };
+ static const u8 C0[] __initconst = {
+ 0x00, 0xf4, 0xa3, 0xe5, 0xa0, 0x72, 0x63, 0x95,
+ 0xc6, 0x4f, 0x48, 0xd0, 0x8b, 0x5b, 0x5f, 0x8e,
+ 0x6b, 0x96, 0x1f, 0x16, 0xed, 0xbc, 0x66, 0x94,
+ 0x45, 0x31, 0xd7, 0x47, 0x73, 0x22, 0xa5, 0x86,
+ 0xce, 0xc0, 0x4c, 0xac, 0x63, 0xb8, 0x39, 0x50,
+ 0xbf, 0xe6, 0x59, 0x6c, 0x38, 0x58, 0x99, 0x1f,
+ 0x27, 0xa7, 0x9d, 0x71, 0x2a, 0xb3, 0x7b, 0xf9,
+ 0xfb, 0x17, 0x86, 0xaa, 0x99, 0x81, 0xaa, 0x43,
+ 0xe4, 0x37, 0xd3, 0x1e, 0x6e, 0xe5, 0xe6, 0xee,
+ 0xc2, 0xed, 0x95, 0x4f, 0x53, 0x0e, 0x46, 0x8a,
+ 0xcc, 0x45, 0xa5, 0xdb, 0x69, 0x0d, 0x81, 0xc9,
+ 0x32, 0x92, 0xbc, 0x8f, 0x33, 0xe6, 0xf6, 0x09,
+ 0x7c, 0x8e, 0x05, 0x19, 0x0d, 0xf1, 0xb6, 0xcc,
+ 0xf3, 0x02, 0x21, 0x90, 0x25, 0xec, 0xed, 0x0e };
+ static const u8 random[] __initconst = {
+ 0x95, 0xb7, 0xf1, 0x7e, 0x98, 0x02, 0xd3, 0x57,
+ 0x73, 0x92, 0xc6, 0xa9, 0xc0, 0x80, 0x83, 0xb6,
+ 0x7d, 0xd1, 0x29, 0x22, 0x65, 0xb5, 0xf4, 0x2d,
+ 0x23, 0x7f, 0x1c, 0x55, 0xbb, 0x9b, 0x10, 0xbf,
+ 0xcf, 0xd8, 0x2c, 0x77, 0xa3, 0x78, 0xb8, 0x26,
+ 0x6a, 0x00, 0x99, 0x14, 0x3b, 0x3c, 0x2d, 0x64,
+ 0x61, 0x1e, 0xee, 0xb6, 0x9a, 0xcd, 0xc0, 0x55,
+ 0x95, 0x7c, 0x13, 0x9e, 0x8b, 0x19, 0x0c, 0x7a,
+ 0x06, 0x95, 0x5f, 0x2c, 0x79, 0x7c, 0x27, 0x78,
+ 0xde, 0x94, 0x03, 0x96, 0xa5, 0x01, 0xf4, 0x0e,
+ 0x91, 0x39, 0x6a, 0xcf, 0x8d, 0x7e, 0x45, 0xeb,
+ 0xdb, 0xb5, 0x3b, 0xbf, 0x8c, 0x97, 0x52, 0x30,
+ 0xd2, 0xf0, 0xff, 0x91, 0x06, 0xc7, 0x61, 0x19,
+ 0xae, 0x49, 0x8e, 0x7f, 0xbc, 0x03, 0xd9, 0x0f,
+ 0x8e, 0x4c, 0x51, 0x62, 0x7a, 0xed, 0x5c, 0x8d,
+ 0x42, 0x63, 0xd5, 0xd2, 0xb9, 0x78, 0x87, 0x3a,
+ 0x0d, 0xe5, 0x96, 0xee, 0x6d, 0xc7, 0xf7, 0xc2,
+ 0x9e, 0x37, 0xee, 0xe8, 0xb3, 0x4c, 0x90, 0xdd,
+ 0x1c, 0xf6, 0xa9, 0xdd, 0xb2, 0x2b, 0x4c, 0xbd,
+ 0x08, 0x6b, 0x14, 0xb3, 0x5d, 0xe9, 0x3d, 0xa2,
+ 0xd5, 0xcb, 0x18, 0x06, 0x69, 0x8c, 0xbd, 0x7b,
+ 0xbb, 0x67, 0xbf, 0xe3, 0xd3, 0x1f, 0xd2, 0xd1,
+ 0xdb, 0xd2, 0xa1, 0xe0, 0x58, 0xa3, 0xeb, 0x99,
+ 0xd7, 0xe5, 0x1f, 0x1a, 0x93, 0x8e, 0xed, 0x5e,
+ 0x1c, 0x1d, 0xe2, 0x3a, 0x6b, 0x43, 0x45, 0xd3,
+ 0x19, 0x14, 0x09, 0xf9, 0x2f, 0x39, 0xb3, 0x67,
+ 0x0d, 0x8d, 0xbf, 0xb6, 0x35, 0xd8, 0xe6, 0xa3,
+ 0x69, 0x32, 0xd8, 0x10, 0x33, 0xd1, 0x44, 0x8d,
+ 0x63, 0xb4, 0x03, 0xdd, 0xf8, 0x8e, 0x12, 0x1b,
+ 0x6e, 0x81, 0x9a, 0xc3, 0x81, 0x22, 0x6c, 0x13,
+ 0x21, 0xe4, 0xb0, 0x86, 0x44, 0xf6, 0x72, 0x7c,
+ 0x36, 0x8c, 0x5a, 0x9f, 0x7a, 0x4b, 0x3e, 0xe2 };
+
+ int ret = 0;
+ u8 buf[sizeof(random)];
+ struct ppno_ws_s ws;
+
+ memset(&ws, 0, sizeof(ws));
+
+ /* initial seed */
+ ret = crypt_s390_ppno(PPNO_SHA512_DRNG_SEED,
+ &ws, NULL, 0,
+ seed, sizeof(seed));
+ if (ret < 0) {
+ pr_err("The prng self test seed operation for the "
+ "SHA-512 mode failed with rc=%d\n", ret);
+ prng_errorflag = PRNG_SELFTEST_FAILED;
+ return -EIO;
+ }
+
+ /* check working states V and C */
+ if (memcmp(ws.V, V0, sizeof(V0)) != 0
+ || memcmp(ws.C, C0, sizeof(C0)) != 0) {
+ pr_err("The prng self test state test "
+ "for the SHA-512 mode failed\n");
+ prng_errorflag = PRNG_SELFTEST_FAILED;
+ return -EIO;
+ }
+
+ /* generate random bytes */
+ ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN,
+ &ws, buf, sizeof(buf),
+ NULL, 0);
+ if (ret < 0) {
+ pr_err("The prng self test generate operation for "
+ "the SHA-512 mode failed with rc=%d\n", ret);
+ prng_errorflag = PRNG_SELFTEST_FAILED;
+ return -EIO;
+ }
+ ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN,
+ &ws, buf, sizeof(buf),
+ NULL, 0);
+ if (ret < 0) {
+ pr_err("The prng self test generate operation for "
+ "the SHA-512 mode failed with rc=%d\n", ret);
+ prng_errorflag = PRNG_SELFTEST_FAILED;
+ return -EIO;
+ }
+
+ /* check against expected data */
+ if (memcmp(buf, random, sizeof(random)) != 0) {
+ pr_err("The prng self test data test "
+ "for the SHA-512 mode failed\n");
+ prng_errorflag = PRNG_SELFTEST_FAILED;
+ return -EIO;
+ }
+
+ return 0;
+}
+
+
+static int __init prng_sha512_instantiate(void)
+{
+ int ret, datalen;
+ u8 seed[64];
+
+ pr_debug("prng runs in SHA-512 mode "
+ "with chunksize=%d and reseed_limit=%u\n",
+ prng_chunk_size, prng_reseed_limit);
+
+ /* memory allocation, prng_data struct init, mutex init */
+ datalen = sizeof(struct prng_data_s) + prng_chunk_size;
+ if (fips_enabled)
+ datalen += prng_chunk_size;
+ prng_data = kzalloc(datalen, GFP_KERNEL);
+ if (!prng_data) {
+ prng_errorflag = PRNG_INSTANTIATE_FAILED;
+ return -ENOMEM;
+ }
+ mutex_init(&prng_data->mutex);
+ prng_data->buf = ((u8 *)prng_data) + sizeof(struct prng_data_s);
+
+ /* selftest */
+ ret = prng_sha512_selftest();
+ if (ret)
+ goto outfree;
+
+ /* generate initial seed bytestring, first 48 bytes of entropy */
+ ret = generate_entropy(seed, 48);
+ if (ret != 48)
+ goto outfree;
+ /* followed by 16 bytes of unique nonce */
+ get_tod_clock_ext(seed + 48);
+
+ /* initial seed of the ppno drng */
+ ret = crypt_s390_ppno(PPNO_SHA512_DRNG_SEED,
+ &prng_data->ppnows, NULL, 0,
+ seed, sizeof(seed));
+ if (ret < 0) {
+ prng_errorflag = PRNG_SEED_FAILED;
+ ret = -EIO;
+ goto outfree;
+ }
+
+ /* if fips mode is enabled, generate a first block of random
+ bytes for the FIPS 140-2 Conditional Self Test */
+ if (fips_enabled) {
+ prng_data->prev = prng_data->buf + prng_chunk_size;
+ ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN,
+ &prng_data->ppnows,
+ prng_data->prev,
+ prng_chunk_size,
+ NULL, 0);
+ if (ret < 0 || ret != prng_chunk_size) {
+ prng_errorflag = PRNG_GEN_FAILED;
+ ret = -EIO;
+ goto outfree;
+ }
+ }
+
+ return 0;
+
+outfree:
+ kfree(prng_data);
+ return ret;
+}
+
+
+static void prng_sha512_deinstantiate(void)
+{
+ pr_debug("The prng module stopped after running in SHA-512 mode\n");
+ kzfree(prng_data);
+}
+
+
+static int prng_sha512_reseed(void)
+{
+ int ret;
+ u8 seed[32];
+
+ /* generate 32 bytes of fresh entropy */
+ ret = generate_entropy(seed, sizeof(seed));
+ if (ret != sizeof(seed))
+ return ret;
+
+ /* do a reseed of the ppno drng with this bytestring */
+ ret = crypt_s390_ppno(PPNO_SHA512_DRNG_SEED,
+ &prng_data->ppnows, NULL, 0,
+ seed, sizeof(seed));
+ if (ret) {
+ prng_errorflag = PRNG_RESEED_FAILED;
+ return -EIO;
+ }
+
+ return 0;
+}
+
+
+static int prng_sha512_generate(u8 *buf, size_t nbytes)
+{
+ int ret;
+
+ /* reseed needed ? */
+ if (prng_data->ppnows.reseed_counter > prng_reseed_limit) {
+ ret = prng_sha512_reseed();
+ if (ret)
+ return ret;
+ }
+
+ /* PPNO generate */
+ ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN,
+ &prng_data->ppnows, buf, nbytes,
+ NULL, 0);
+ if (ret < 0 || ret != nbytes) {
+ prng_errorflag = PRNG_GEN_FAILED;
+ return -EIO;
+ }
+
+ /* FIPS 140-2 Conditional Self Test */
+ if (fips_enabled) {
+ if (!memcmp(prng_data->prev, buf, nbytes)) {
+ prng_errorflag = PRNG_GEN_FAILED;
+ return -EILSEQ;
+ }
+ memcpy(prng_data->prev, buf, nbytes);
+ }
+
+ return ret;
+}
+
+
+/*** file io functions ***/
+
+static int prng_open(struct inode *inode, struct file *file)
+{
+ return nonseekable_open(inode, file);
+}
+
+
+static ssize_t prng_tdes_read(struct file *file, char __user *ubuf,
+ size_t nbytes, loff_t *ppos)
+{
+ int chunk, n, tmp, ret = 0;
+
+ /* lock prng_data struct */
+ if (mutex_lock_interruptible(&prng_data->mutex))
+ return -ERESTARTSYS;
+
+ while (nbytes) {
+ if (need_resched()) {
+ if (signal_pending(current)) {
+ if (ret == 0)
+ ret = -ERESTARTSYS;
+ break;
+ }
+ /* give mutex free before calling schedule() */
+ mutex_unlock(&prng_data->mutex);
+ schedule();
+ /* occopy mutex again */
+ if (mutex_lock_interruptible(&prng_data->mutex)) {
+ if (ret == 0)
+ ret = -ERESTARTSYS;
+ return ret;
+ }
+ }
+
+ /*
+ * we lose some random bytes if an attacker issues
+ * reads < 8 bytes, but we don't care
+ */
+ chunk = min_t(int, nbytes, prng_chunk_size);
+
+ /* PRNG only likes multiples of 8 bytes */
+ n = (chunk + 7) & -8;
+
+ if (prng_data->prngws.reseed_counter > prng_reseed_limit)
+ prng_tdes_seed(8);
+
+ /* if the CPU supports PRNG stckf is present too */
+ *((unsigned long long *)prng_data->buf) = get_tod_clock_fast();
+
+ /*
+ * Beside the STCKF the input for the TDES-EDE is the output
+ * of the last operation. We differ here from X9.17 since we
+ * only store one timestamp into the buffer. Padding the whole
+ * buffer with timestamps does not improve security, since
+ * successive stckf have nearly constant offsets.
+ * If an attacker knows the first timestamp it would be
+ * trivial to guess the additional values. One timestamp
+ * is therefore enough and still guarantees unique input values.
+ *
+ * Note: you can still get strict X9.17 conformity by setting
+ * prng_chunk_size to 8 bytes.
+ */
+ tmp = crypt_s390_kmc(KMC_PRNG, prng_data->prngws.parm_block,
+ prng_data->buf, prng_data->buf, n);
+ if (tmp < 0 || tmp != n) {
+ ret = -EIO;
+ break;
+ }
+
+ prng_data->prngws.byte_counter += n;
+ prng_data->prngws.reseed_counter += n;
+
+ if (copy_to_user(ubuf, prng_data->buf, chunk))
+ return -EFAULT;
+
+ nbytes -= chunk;
+ ret += chunk;
+ ubuf += chunk;
+ }
+
+ /* unlock prng_data struct */
+ mutex_unlock(&prng_data->mutex);
+
+ return ret;
+}
+
+
+static ssize_t prng_sha512_read(struct file *file, char __user *ubuf,
+ size_t nbytes, loff_t *ppos)
+{
+ int n, ret = 0;
+ u8 *p;
+
+ /* if errorflag is set do nothing and return 'broken pipe' */
+ if (prng_errorflag)
+ return -EPIPE;
+
+ /* lock prng_data struct */
+ if (mutex_lock_interruptible(&prng_data->mutex))
+ return -ERESTARTSYS;
+
+ while (nbytes) {
+ if (need_resched()) {
+ if (signal_pending(current)) {
+ if (ret == 0)
+ ret = -ERESTARTSYS;
+ break;
+ }
+ /* give mutex free before calling schedule() */
+ mutex_unlock(&prng_data->mutex);
+ schedule();
+ /* occopy mutex again */
+ if (mutex_lock_interruptible(&prng_data->mutex)) {
+ if (ret == 0)
+ ret = -ERESTARTSYS;
+ return ret;
+ }
+ }
+ if (prng_data->rest) {
+ /* push left over random bytes from the previous read */
+ p = prng_data->buf + prng_chunk_size - prng_data->rest;
+ n = (nbytes < prng_data->rest) ?
+ nbytes : prng_data->rest;
+ prng_data->rest -= n;
+ } else {
+ /* generate one chunk of random bytes into read buf */
+ p = prng_data->buf;
+ n = prng_sha512_generate(p, prng_chunk_size);
+ if (n < 0) {
+ ret = n;
+ break;
+ }
+ if (nbytes < prng_chunk_size) {
+ n = nbytes;
+ prng_data->rest = prng_chunk_size - n;
+ } else {
+ n = prng_chunk_size;
+ prng_data->rest = 0;
+ }
+ }
+ if (copy_to_user(ubuf, p, n)) {
+ ret = -EFAULT;
+ break;
+ }
+ ubuf += n;
+ nbytes -= n;
+ ret += n;
+ }
+
+ /* unlock prng_data struct */
+ mutex_unlock(&prng_data->mutex);
+
+ return ret;
+}
+
+
+/*** sysfs stuff ***/
+
+static const struct file_operations prng_sha512_fops = {
+ .owner = THIS_MODULE,
+ .open = &prng_open,
+ .release = NULL,
+ .read = &prng_sha512_read,
+ .llseek = noop_llseek,
+};
+static const struct file_operations prng_tdes_fops = {
+ .owner = THIS_MODULE,
+ .open = &prng_open,
+ .release = NULL,
+ .read = &prng_tdes_read,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice prng_sha512_dev = {
+ .name = "prandom",
+ .minor = MISC_DYNAMIC_MINOR,
+ .fops = &prng_sha512_fops,
+};
+static struct miscdevice prng_tdes_dev = {
+ .name = "prandom",
+ .minor = MISC_DYNAMIC_MINOR,
+ .fops = &prng_tdes_fops,
+};
+
+
+/* chunksize attribute (ro) */
+static ssize_t prng_chunksize_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", prng_chunk_size);
+}
+static DEVICE_ATTR(chunksize, 0444, prng_chunksize_show, NULL);
+
+/* counter attribute (ro) */
+static ssize_t prng_counter_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u64 counter;
+
+ if (mutex_lock_interruptible(&prng_data->mutex))
+ return -ERESTARTSYS;
+ if (prng_mode == PRNG_MODE_SHA512)
+ counter = prng_data->ppnows.stream_bytes;
+ else
+ counter = prng_data->prngws.byte_counter;
+ mutex_unlock(&prng_data->mutex);
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n", counter);
+}
+static DEVICE_ATTR(byte_counter, 0444, prng_counter_show, NULL);
+
+/* errorflag attribute (ro) */
+static ssize_t prng_errorflag_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", prng_errorflag);
+}
+static DEVICE_ATTR(errorflag, 0444, prng_errorflag_show, NULL);
+
+/* mode attribute (ro) */
+static ssize_t prng_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ if (prng_mode == PRNG_MODE_TDES)
+ return snprintf(buf, PAGE_SIZE, "TDES\n");
+ else
+ return snprintf(buf, PAGE_SIZE, "SHA512\n");
+}
+static DEVICE_ATTR(mode, 0444, prng_mode_show, NULL);
+
+/* reseed attribute (w) */
+static ssize_t prng_reseed_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ if (mutex_lock_interruptible(&prng_data->mutex))
+ return -ERESTARTSYS;
+ prng_sha512_reseed();
+ mutex_unlock(&prng_data->mutex);
+
+ return count;
+}
+static DEVICE_ATTR(reseed, 0200, NULL, prng_reseed_store);
+
+/* reseed limit attribute (rw) */
+static ssize_t prng_reseed_limit_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", prng_reseed_limit);
+}
+static ssize_t prng_reseed_limit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned limit;
+
+ if (sscanf(buf, "%u\n", &limit) != 1)
+ return -EINVAL;
+
+ if (prng_mode == PRNG_MODE_SHA512) {
+ if (limit < PRNG_RESEED_LIMIT_SHA512_LOWER)
+ return -EINVAL;
+ } else {
+ if (limit < PRNG_RESEED_LIMIT_TDES_LOWER)
+ return -EINVAL;
+ }
+
+ prng_reseed_limit = limit;
+
+ return count;
+}
+static DEVICE_ATTR(reseed_limit, 0644,
+ prng_reseed_limit_show, prng_reseed_limit_store);
+
+/* strength attribute (ro) */
+static ssize_t prng_strength_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "256\n");
+}
+static DEVICE_ATTR(strength, 0444, prng_strength_show, NULL);
+
+static struct attribute *prng_sha512_dev_attrs[] = {
+ &dev_attr_errorflag.attr,
+ &dev_attr_chunksize.attr,
+ &dev_attr_byte_counter.attr,
+ &dev_attr_mode.attr,
+ &dev_attr_reseed.attr,
+ &dev_attr_reseed_limit.attr,
+ &dev_attr_strength.attr,
+ NULL
+};
+static struct attribute *prng_tdes_dev_attrs[] = {
+ &dev_attr_chunksize.attr,
+ &dev_attr_byte_counter.attr,
+ &dev_attr_mode.attr,
+ NULL
+};
+
+static struct attribute_group prng_sha512_dev_attr_group = {
+ .attrs = prng_sha512_dev_attrs
+};
+static struct attribute_group prng_tdes_dev_attr_group = {
+ .attrs = prng_tdes_dev_attrs
+};
+
+
+/*** module init and exit ***/
+
+static int __init prng_init(void)
+{
+ int ret;
+
+ /* check if the CPU has a PRNG */
+ if (!crypt_s390_func_available(KMC_PRNG, CRYPT_S390_MSA))
+ return -EOPNOTSUPP;
+
+ /* choose prng mode */
+ if (prng_mode != PRNG_MODE_TDES) {
+ /* check for MSA5 support for PPNO operations */
+ if (!crypt_s390_func_available(PPNO_SHA512_DRNG_GEN,
+ CRYPT_S390_MSA5)) {
+ if (prng_mode == PRNG_MODE_SHA512) {
+ pr_err("The prng module cannot "
+ "start in SHA-512 mode\n");
+ return -EOPNOTSUPP;
+ }
+ prng_mode = PRNG_MODE_TDES;
+ } else
+ prng_mode = PRNG_MODE_SHA512;
+ }
+
+ if (prng_mode == PRNG_MODE_SHA512) {
+
+ /* SHA512 mode */
+
+ if (prng_chunk_size < PRNG_CHUNKSIZE_SHA512_MIN
+ || prng_chunk_size > PRNG_CHUNKSIZE_SHA512_MAX)
+ return -EINVAL;
+ prng_chunk_size = (prng_chunk_size + 0x3f) & ~0x3f;
+
+ if (prng_reseed_limit == 0)
+ prng_reseed_limit = PRNG_RESEED_LIMIT_SHA512;
+ else if (prng_reseed_limit < PRNG_RESEED_LIMIT_SHA512_LOWER)
+ return -EINVAL;
+
+ ret = prng_sha512_instantiate();
+ if (ret)
+ goto out;
+
+ ret = misc_register(&prng_sha512_dev);
+ if (ret) {
+ prng_sha512_deinstantiate();
+ goto out;
+ }
+ ret = sysfs_create_group(&prng_sha512_dev.this_device->kobj,
+ &prng_sha512_dev_attr_group);
+ if (ret) {
+ misc_deregister(&prng_sha512_dev);
+ prng_sha512_deinstantiate();
+ goto out;
+ }
+
+ } else {
+
+ /* TDES mode */
+
+ if (prng_chunk_size < PRNG_CHUNKSIZE_TDES_MIN
+ || prng_chunk_size > PRNG_CHUNKSIZE_TDES_MAX)
+ return -EINVAL;
+ prng_chunk_size = (prng_chunk_size + 0x07) & ~0x07;
+
+ if (prng_reseed_limit == 0)
+ prng_reseed_limit = PRNG_RESEED_LIMIT_TDES;
+ else if (prng_reseed_limit < PRNG_RESEED_LIMIT_TDES_LOWER)
+ return -EINVAL;
+
+ ret = prng_tdes_instantiate();
+ if (ret)
+ goto out;
+
+ ret = misc_register(&prng_tdes_dev);
+ if (ret) {
+ prng_tdes_deinstantiate();
+ goto out;
+ }
+ ret = sysfs_create_group(&prng_tdes_dev.this_device->kobj,
+ &prng_tdes_dev_attr_group);
+ if (ret) {
+ misc_deregister(&prng_tdes_dev);
+ prng_tdes_deinstantiate();
+ goto out;
+ }
+
+ }
+
+out:
+ return ret;
+}
+
+
+static void __exit prng_exit(void)
+{
+ if (prng_mode == PRNG_MODE_SHA512) {
+ sysfs_remove_group(&prng_sha512_dev.this_device->kobj,
+ &prng_sha512_dev_attr_group);
+ misc_deregister(&prng_sha512_dev);
+ prng_sha512_deinstantiate();
+ } else {
+ sysfs_remove_group(&prng_tdes_dev.this_device->kobj,
+ &prng_tdes_dev_attr_group);
+ misc_deregister(&prng_tdes_dev);
+ prng_tdes_deinstantiate();
+ }
+}
+
+
+module_init(prng_init);
+module_exit(prng_exit);
diff --git a/arch/s390/crypto/sha.h b/arch/s390/crypto/sha.h
new file mode 100644
index 000000000..f4e9dc716
--- /dev/null
+++ b/arch/s390/crypto/sha.h
@@ -0,0 +1,37 @@
+/*
+ * Cryptographic API.
+ *
+ * s390 generic implementation of the SHA Secure Hash Algorithms.
+ *
+ * Copyright IBM Corp. 2007
+ * Author(s): Jan Glauber (jang@de.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _CRYPTO_ARCH_S390_SHA_H
+#define _CRYPTO_ARCH_S390_SHA_H
+
+#include <linux/crypto.h>
+#include <crypto/sha.h>
+
+/* must be big enough for the largest SHA variant */
+#define SHA_MAX_STATE_SIZE 16
+#define SHA_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
+
+struct s390_sha_ctx {
+ u64 count; /* message length in bytes */
+ u32 state[SHA_MAX_STATE_SIZE];
+ u8 buf[2 * SHA_MAX_BLOCK_SIZE];
+ int func; /* KIMD function to use */
+};
+
+struct shash_desc;
+
+int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len);
+int s390_sha_final(struct shash_desc *desc, u8 *out);
+
+#endif
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
new file mode 100644
index 000000000..5b2bee323
--- /dev/null
+++ b/arch/s390/crypto/sha1_s390.c
@@ -0,0 +1,108 @@
+/*
+ * Cryptographic API.
+ *
+ * s390 implementation of the SHA1 Secure Hash Algorithm.
+ *
+ * Derived from cryptoapi implementation, adapted for in-place
+ * scatterlist interface. Originally based on the public domain
+ * implementation written by Steve Reid.
+ *
+ * s390 Version:
+ * Copyright IBM Corp. 2003, 2007
+ * Author(s): Thomas Spatzier
+ * Jan Glauber (jan.glauber@de.ibm.com)
+ *
+ * Derived from "crypto/sha1_generic.c"
+ * Copyright (c) Alan Smithee.
+ * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
+ * Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#include <crypto/internal/hash.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <crypto/sha.h>
+
+#include "crypt_s390.h"
+#include "sha.h"
+
+static int sha1_init(struct shash_desc *desc)
+{
+ struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SHA1_H0;
+ sctx->state[1] = SHA1_H1;
+ sctx->state[2] = SHA1_H2;
+ sctx->state[3] = SHA1_H3;
+ sctx->state[4] = SHA1_H4;
+ sctx->count = 0;
+ sctx->func = KIMD_SHA_1;
+
+ return 0;
+}
+
+static int sha1_export(struct shash_desc *desc, void *out)
+{
+ struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
+ struct sha1_state *octx = out;
+
+ octx->count = sctx->count;
+ memcpy(octx->state, sctx->state, sizeof(octx->state));
+ memcpy(octx->buffer, sctx->buf, sizeof(octx->buffer));
+ return 0;
+}
+
+static int sha1_import(struct shash_desc *desc, const void *in)
+{
+ struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
+ const struct sha1_state *ictx = in;
+
+ sctx->count = ictx->count;
+ memcpy(sctx->state, ictx->state, sizeof(ictx->state));
+ memcpy(sctx->buf, ictx->buffer, sizeof(ictx->buffer));
+ sctx->func = KIMD_SHA_1;
+ return 0;
+}
+
+static struct shash_alg alg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .init = sha1_init,
+ .update = s390_sha_update,
+ .final = s390_sha_final,
+ .export = sha1_export,
+ .import = sha1_import,
+ .descsize = sizeof(struct s390_sha_ctx),
+ .statesize = sizeof(struct sha1_state),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name= "sha1-s390",
+ .cra_priority = CRYPT_S390_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static int __init sha1_s390_init(void)
+{
+ if (!crypt_s390_func_available(KIMD_SHA_1, CRYPT_S390_MSA))
+ return -EOPNOTSUPP;
+ return crypto_register_shash(&alg);
+}
+
+static void __exit sha1_s390_fini(void)
+{
+ crypto_unregister_shash(&alg);
+}
+
+module_init(sha1_s390_init);
+module_exit(sha1_s390_fini);
+
+MODULE_ALIAS_CRYPTO("sha1");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
new file mode 100644
index 000000000..b74ff1581
--- /dev/null
+++ b/arch/s390/crypto/sha256_s390.c
@@ -0,0 +1,149 @@
+/*
+ * Cryptographic API.
+ *
+ * s390 implementation of the SHA256 and SHA224 Secure Hash Algorithm.
+ *
+ * s390 Version:
+ * Copyright IBM Corp. 2005, 2011
+ * Author(s): Jan Glauber (jang@de.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#include <crypto/internal/hash.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <crypto/sha.h>
+
+#include "crypt_s390.h"
+#include "sha.h"
+
+static int sha256_init(struct shash_desc *desc)
+{
+ struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SHA256_H0;
+ sctx->state[1] = SHA256_H1;
+ sctx->state[2] = SHA256_H2;
+ sctx->state[3] = SHA256_H3;
+ sctx->state[4] = SHA256_H4;
+ sctx->state[5] = SHA256_H5;
+ sctx->state[6] = SHA256_H6;
+ sctx->state[7] = SHA256_H7;
+ sctx->count = 0;
+ sctx->func = KIMD_SHA_256;
+
+ return 0;
+}
+
+static int sha256_export(struct shash_desc *desc, void *out)
+{
+ struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
+ struct sha256_state *octx = out;
+
+ octx->count = sctx->count;
+ memcpy(octx->state, sctx->state, sizeof(octx->state));
+ memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
+ return 0;
+}
+
+static int sha256_import(struct shash_desc *desc, const void *in)
+{
+ struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
+ const struct sha256_state *ictx = in;
+
+ sctx->count = ictx->count;
+ memcpy(sctx->state, ictx->state, sizeof(ictx->state));
+ memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
+ sctx->func = KIMD_SHA_256;
+ return 0;
+}
+
+static struct shash_alg sha256_alg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .init = sha256_init,
+ .update = s390_sha_update,
+ .final = s390_sha_final,
+ .export = sha256_export,
+ .import = sha256_import,
+ .descsize = sizeof(struct s390_sha_ctx),
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name= "sha256-s390",
+ .cra_priority = CRYPT_S390_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static int sha224_init(struct shash_desc *desc)
+{
+ struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SHA224_H0;
+ sctx->state[1] = SHA224_H1;
+ sctx->state[2] = SHA224_H2;
+ sctx->state[3] = SHA224_H3;
+ sctx->state[4] = SHA224_H4;
+ sctx->state[5] = SHA224_H5;
+ sctx->state[6] = SHA224_H6;
+ sctx->state[7] = SHA224_H7;
+ sctx->count = 0;
+ sctx->func = KIMD_SHA_256;
+
+ return 0;
+}
+
+static struct shash_alg sha224_alg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .init = sha224_init,
+ .update = s390_sha_update,
+ .final = s390_sha_final,
+ .export = sha256_export,
+ .import = sha256_import,
+ .descsize = sizeof(struct s390_sha_ctx),
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name= "sha224-s390",
+ .cra_priority = CRYPT_S390_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static int __init sha256_s390_init(void)
+{
+ int ret;
+
+ if (!crypt_s390_func_available(KIMD_SHA_256, CRYPT_S390_MSA))
+ return -EOPNOTSUPP;
+ ret = crypto_register_shash(&sha256_alg);
+ if (ret < 0)
+ goto out;
+ ret = crypto_register_shash(&sha224_alg);
+ if (ret < 0)
+ crypto_unregister_shash(&sha256_alg);
+out:
+ return ret;
+}
+
+static void __exit sha256_s390_fini(void)
+{
+ crypto_unregister_shash(&sha224_alg);
+ crypto_unregister_shash(&sha256_alg);
+}
+
+module_init(sha256_s390_init);
+module_exit(sha256_s390_fini);
+
+MODULE_ALIAS_CRYPTO("sha256");
+MODULE_ALIAS_CRYPTO("sha224");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SHA256 and SHA224 Secure Hash Algorithm");
diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c
new file mode 100644
index 000000000..0c36989ba
--- /dev/null
+++ b/arch/s390/crypto/sha512_s390.c
@@ -0,0 +1,155 @@
+/*
+ * Cryptographic API.
+ *
+ * s390 implementation of the SHA512 and SHA38 Secure Hash Algorithm.
+ *
+ * Copyright IBM Corp. 2007
+ * Author(s): Jan Glauber (jang@de.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "sha.h"
+#include "crypt_s390.h"
+
+static int sha512_init(struct shash_desc *desc)
+{
+ struct s390_sha_ctx *ctx = shash_desc_ctx(desc);
+
+ *(__u64 *)&ctx->state[0] = 0x6a09e667f3bcc908ULL;
+ *(__u64 *)&ctx->state[2] = 0xbb67ae8584caa73bULL;
+ *(__u64 *)&ctx->state[4] = 0x3c6ef372fe94f82bULL;
+ *(__u64 *)&ctx->state[6] = 0xa54ff53a5f1d36f1ULL;
+ *(__u64 *)&ctx->state[8] = 0x510e527fade682d1ULL;
+ *(__u64 *)&ctx->state[10] = 0x9b05688c2b3e6c1fULL;
+ *(__u64 *)&ctx->state[12] = 0x1f83d9abfb41bd6bULL;
+ *(__u64 *)&ctx->state[14] = 0x5be0cd19137e2179ULL;
+ ctx->count = 0;
+ ctx->func = KIMD_SHA_512;
+
+ return 0;
+}
+
+static int sha512_export(struct shash_desc *desc, void *out)
+{
+ struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
+ struct sha512_state *octx = out;
+
+ octx->count[0] = sctx->count;
+ octx->count[1] = 0;
+ memcpy(octx->state, sctx->state, sizeof(octx->state));
+ memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
+ return 0;
+}
+
+static int sha512_import(struct shash_desc *desc, const void *in)
+{
+ struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
+ const struct sha512_state *ictx = in;
+
+ if (unlikely(ictx->count[1]))
+ return -ERANGE;
+ sctx->count = ictx->count[0];
+
+ memcpy(sctx->state, ictx->state, sizeof(ictx->state));
+ memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
+ sctx->func = KIMD_SHA_512;
+ return 0;
+}
+
+static struct shash_alg sha512_alg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .init = sha512_init,
+ .update = s390_sha_update,
+ .final = s390_sha_final,
+ .export = sha512_export,
+ .import = sha512_import,
+ .descsize = sizeof(struct s390_sha_ctx),
+ .statesize = sizeof(struct sha512_state),
+ .base = {
+ .cra_name = "sha512",
+ .cra_driver_name= "sha512-s390",
+ .cra_priority = CRYPT_S390_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+MODULE_ALIAS_CRYPTO("sha512");
+
+static int sha384_init(struct shash_desc *desc)
+{
+ struct s390_sha_ctx *ctx = shash_desc_ctx(desc);
+
+ *(__u64 *)&ctx->state[0] = 0xcbbb9d5dc1059ed8ULL;
+ *(__u64 *)&ctx->state[2] = 0x629a292a367cd507ULL;
+ *(__u64 *)&ctx->state[4] = 0x9159015a3070dd17ULL;
+ *(__u64 *)&ctx->state[6] = 0x152fecd8f70e5939ULL;
+ *(__u64 *)&ctx->state[8] = 0x67332667ffc00b31ULL;
+ *(__u64 *)&ctx->state[10] = 0x8eb44a8768581511ULL;
+ *(__u64 *)&ctx->state[12] = 0xdb0c2e0d64f98fa7ULL;
+ *(__u64 *)&ctx->state[14] = 0x47b5481dbefa4fa4ULL;
+ ctx->count = 0;
+ ctx->func = KIMD_SHA_512;
+
+ return 0;
+}
+
+static struct shash_alg sha384_alg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .init = sha384_init,
+ .update = s390_sha_update,
+ .final = s390_sha_final,
+ .export = sha512_export,
+ .import = sha512_import,
+ .descsize = sizeof(struct s390_sha_ctx),
+ .statesize = sizeof(struct sha512_state),
+ .base = {
+ .cra_name = "sha384",
+ .cra_driver_name= "sha384-s390",
+ .cra_priority = CRYPT_S390_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct s390_sha_ctx),
+ .cra_module = THIS_MODULE,
+ }
+};
+
+MODULE_ALIAS_CRYPTO("sha384");
+
+static int __init init(void)
+{
+ int ret;
+
+ if (!crypt_s390_func_available(KIMD_SHA_512, CRYPT_S390_MSA))
+ return -EOPNOTSUPP;
+ if ((ret = crypto_register_shash(&sha512_alg)) < 0)
+ goto out;
+ if ((ret = crypto_register_shash(&sha384_alg)) < 0)
+ crypto_unregister_shash(&sha512_alg);
+out:
+ return ret;
+}
+
+static void __exit fini(void)
+{
+ crypto_unregister_shash(&sha512_alg);
+ crypto_unregister_shash(&sha384_alg);
+}
+
+module_init(init);
+module_exit(fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SHA512 and SHA-384 Secure Hash Algorithm");
diff --git a/arch/s390/crypto/sha_common.c b/arch/s390/crypto/sha_common.c
new file mode 100644
index 000000000..8620b0ec9
--- /dev/null
+++ b/arch/s390/crypto/sha_common.c
@@ -0,0 +1,106 @@
+/*
+ * Cryptographic API.
+ *
+ * s390 generic implementation of the SHA Secure Hash Algorithms.
+ *
+ * Copyright IBM Corp. 2007
+ * Author(s): Jan Glauber (jang@de.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <crypto/internal/hash.h>
+#include <linux/module.h>
+#include "sha.h"
+#include "crypt_s390.h"
+
+int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
+{
+ struct s390_sha_ctx *ctx = shash_desc_ctx(desc);
+ unsigned int bsize = crypto_shash_blocksize(desc->tfm);
+ unsigned int index;
+ int ret;
+
+ /* how much is already in the buffer? */
+ index = ctx->count & (bsize - 1);
+ ctx->count += len;
+
+ if ((index + len) < bsize)
+ goto store;
+
+ /* process one stored block */
+ if (index) {
+ memcpy(ctx->buf + index, data, bsize - index);
+ ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, bsize);
+ if (ret != bsize)
+ return -EIO;
+ data += bsize - index;
+ len -= bsize - index;
+ index = 0;
+ }
+
+ /* process as many blocks as possible */
+ if (len >= bsize) {
+ ret = crypt_s390_kimd(ctx->func, ctx->state, data,
+ len & ~(bsize - 1));
+ if (ret != (len & ~(bsize - 1)))
+ return -EIO;
+ data += ret;
+ len -= ret;
+ }
+store:
+ if (len)
+ memcpy(ctx->buf + index , data, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(s390_sha_update);
+
+int s390_sha_final(struct shash_desc *desc, u8 *out)
+{
+ struct s390_sha_ctx *ctx = shash_desc_ctx(desc);
+ unsigned int bsize = crypto_shash_blocksize(desc->tfm);
+ u64 bits;
+ unsigned int index, end, plen;
+ int ret;
+
+ /* SHA-512 uses 128 bit padding length */
+ plen = (bsize > SHA256_BLOCK_SIZE) ? 16 : 8;
+
+ /* must perform manual padding */
+ index = ctx->count & (bsize - 1);
+ end = (index < bsize - plen) ? bsize : (2 * bsize);
+
+ /* start pad with 1 */
+ ctx->buf[index] = 0x80;
+ index++;
+
+ /* pad with zeros */
+ memset(ctx->buf + index, 0x00, end - index - 8);
+
+ /*
+ * Append message length. Well, SHA-512 wants a 128 bit length value,
+ * nevertheless we use u64, should be enough for now...
+ */
+ bits = ctx->count * 8;
+ memcpy(ctx->buf + end - 8, &bits, sizeof(bits));
+
+ ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, end);
+ if (ret != end)
+ return -EIO;
+
+ /* copy digest to out */
+ memcpy(out, ctx->state, crypto_shash_digestsize(desc->tfm));
+ /* wipe context */
+ memset(ctx, 0, sizeof *ctx);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(s390_sha_final);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("s390 SHA cipher common functions");