summaryrefslogtreecommitdiff
path: root/crypto
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-06-10 05:30:17 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-06-10 05:30:17 -0300
commitd635711daa98be86d4c7fd01499c34f566b54ccb (patch)
treeaa5cc3760a27c3d57146498cb82fa549547de06c /crypto
parentc91265cd0efb83778f015b4d4b1129bd2cfd075e (diff)
Linux-libre 4.6.2-gnu
Diffstat (limited to 'crypto')
-rw-r--r--crypto/Kconfig24
-rw-r--r--crypto/Makefile5
-rw-r--r--crypto/ahash.c27
-rw-r--r--crypto/algapi.c15
-rw-r--r--crypto/asymmetric_keys/Kconfig7
-rw-r--r--crypto/asymmetric_keys/Makefile8
-rw-r--r--crypto/asymmetric_keys/mscode_parser.c14
-rw-r--r--crypto/asymmetric_keys/pkcs7_parser.c33
-rw-r--r--crypto/asymmetric_keys/pkcs7_trust.c2
-rw-r--r--crypto/asymmetric_keys/pkcs7_verify.c10
-rw-r--r--crypto/asymmetric_keys/public_key.c154
-rw-r--r--crypto/asymmetric_keys/public_key.h36
-rw-r--r--crypto/asymmetric_keys/rsa.c278
-rw-r--r--crypto/asymmetric_keys/verify_pefile.c4
-rw-r--r--crypto/asymmetric_keys/verify_pefile.h2
-rw-r--r--crypto/asymmetric_keys/x509_cert_parser.c67
-rw-r--r--crypto/asymmetric_keys/x509_public_key.c34
-rw-r--r--crypto/asymmetric_keys/x509_rsakey.asn14
-rw-r--r--crypto/async_tx/async_pq.c2
-rw-r--r--crypto/crc32_generic.c (renamed from crypto/crc32.c)3
-rw-r--r--crypto/crypto_engine.c355
-rw-r--r--crypto/drbg.c64
-rw-r--r--crypto/internal.h3
-rw-r--r--crypto/mcryptd.c1
-rw-r--r--crypto/pcompress.c115
-rw-r--r--crypto/rsa-pkcs1pad.c182
-rw-r--r--crypto/shash.c147
-rw-r--r--crypto/skcipher.c4
-rw-r--r--crypto/tcrypt.c239
-rw-r--r--crypto/testmgr.c428
-rw-r--r--crypto/testmgr.h144
-rw-r--r--crypto/xts.c11
-rw-r--r--crypto/zlib.c381
33 files changed, 933 insertions, 1870 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 3be07ad1d..1d33beb6a 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -84,15 +84,6 @@ config CRYPTO_RNG_DEFAULT
tristate
select CRYPTO_DRBG_MENU
-config CRYPTO_PCOMP
- tristate
- select CRYPTO_PCOMP2
- select CRYPTO_ALGAPI
-
-config CRYPTO_PCOMP2
- tristate
- select CRYPTO_ALGAPI2
-
config CRYPTO_AKCIPHER2
tristate
select CRYPTO_ALGAPI2
@@ -105,6 +96,7 @@ config CRYPTO_AKCIPHER
config CRYPTO_RSA
tristate "RSA algorithm"
select CRYPTO_AKCIPHER
+ select CRYPTO_MANAGER
select MPILIB
select ASN1
help
@@ -122,7 +114,6 @@ config CRYPTO_MANAGER2
select CRYPTO_AEAD2
select CRYPTO_HASH2
select CRYPTO_BLKCIPHER2
- select CRYPTO_PCOMP2
select CRYPTO_AKCIPHER2
config CRYPTO_USER
@@ -227,6 +218,9 @@ config CRYPTO_GLUE_HELPER_X86
depends on X86
select CRYPTO_ALGAPI
+config CRYPTO_ENGINE
+ tristate
+
comment "Authenticated Encryption with Associated Data"
config CRYPTO_CCM
@@ -1506,15 +1500,6 @@ config CRYPTO_DEFLATE
You will most probably want this if using IPSec.
-config CRYPTO_ZLIB
- tristate "Zlib compression algorithm"
- select CRYPTO_PCOMP
- select ZLIB_INFLATE
- select ZLIB_DEFLATE
- select NLATTR
- help
- This is the zlib algorithm.
-
config CRYPTO_LZO
tristate "LZO compression algorithm"
select CRYPTO_ALGAPI
@@ -1595,6 +1580,7 @@ endif # if CRYPTO_DRBG_MENU
config CRYPTO_JITTERENTROPY
tristate "Jitterentropy Non-Deterministic Random Number Generator"
+ select CRYPTO_RNG
help
The Jitterentropy RNG is a noise that is intended
to provide seed to another RNG. The RNG does not
diff --git a/crypto/Makefile b/crypto/Makefile
index 2acdbbd30..4f4ef7eaa 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -7,6 +7,7 @@ crypto-y := api.o cipher.o compress.o memneq.o
obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o
+obj-$(CONFIG_CRYPTO_ENGINE) += crypto_engine.o
obj-$(CONFIG_CRYPTO_FIPS) += fips.o
crypto_algapi-$(CONFIG_PROC_FS) += proc.o
@@ -28,7 +29,6 @@ crypto_hash-y += ahash.o
crypto_hash-y += shash.o
obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
-obj-$(CONFIG_CRYPTO_PCOMP2) += pcompress.o
obj-$(CONFIG_CRYPTO_AKCIPHER2) += akcipher.o
$(obj)/rsapubkey-asn1.o: $(obj)/rsapubkey-asn1.c $(obj)/rsapubkey-asn1.h
@@ -99,10 +99,9 @@ obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o
obj-$(CONFIG_CRYPTO_CHACHA20) += chacha20_generic.o
obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o
obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o
-obj-$(CONFIG_CRYPTO_ZLIB) += zlib.o
obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_generic.o
-obj-$(CONFIG_CRYPTO_CRC32) += crc32.o
+obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o
obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o
obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
obj-$(CONFIG_CRYPTO_LZO) += lzo.o
diff --git a/crypto/ahash.c b/crypto/ahash.c
index d19b52324..3887a98ab 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -69,8 +69,9 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk)
struct scatterlist *sg;
sg = walk->sg;
- walk->pg = sg_page(sg);
walk->offset = sg->offset;
+ walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
+ walk->offset = offset_in_page(walk->offset);
walk->entrylen = sg->length;
if (walk->entrylen > walk->total)
@@ -166,24 +167,6 @@ int crypto_ahash_walk_first(struct ahash_request *req,
}
EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);
-int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
- struct crypto_hash_walk *walk,
- struct scatterlist *sg, unsigned int len)
-{
- walk->total = len;
-
- if (!walk->total) {
- walk->entrylen = 0;
- return 0;
- }
-
- walk->alignmask = crypto_hash_alignmask(hdesc->tfm);
- walk->sg = sg;
- walk->flags = hdesc->flags & CRYPTO_TFM_REQ_MASK;
-
- return hash_walk_new_entry(walk);
-}
-
static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
@@ -542,6 +525,12 @@ struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
}
EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
+int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
+{
+ return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_has_ahash);
+
static int ahash_prepare_alg(struct ahash_alg *alg)
{
struct crypto_alg *base = &alg->halg.base;
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 7be76aa31..731255a61 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -987,6 +987,21 @@ unsigned int crypto_alg_extsize(struct crypto_alg *alg)
}
EXPORT_SYMBOL_GPL(crypto_alg_extsize);
+int crypto_type_has_alg(const char *name, const struct crypto_type *frontend,
+ u32 type, u32 mask)
+{
+ int ret = 0;
+ struct crypto_alg *alg = crypto_find_alg(name, frontend, type, mask);
+
+ if (!IS_ERR(alg)) {
+ crypto_mod_put(alg);
+ ret = 1;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_type_has_alg);
+
static int __init crypto_algapi_init(void)
{
crypto_init_proc();
diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig
index 4870f2840..91a7e047a 100644
--- a/crypto/asymmetric_keys/Kconfig
+++ b/crypto/asymmetric_keys/Kconfig
@@ -12,7 +12,6 @@ if ASYMMETRIC_KEY_TYPE
config ASYMMETRIC_PUBLIC_KEY_SUBTYPE
tristate "Asymmetric public-key crypto algorithm subtype"
select MPILIB
- select PUBLIC_KEY_ALGO_RSA
select CRYPTO_HASH_INFO
help
This option provides support for asymmetric public key type handling.
@@ -20,12 +19,6 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE
appropriate hash algorithms (such as SHA-1) must be available.
ENOPKG will be reported if the requisite algorithm is unavailable.
-config PUBLIC_KEY_ALGO_RSA
- tristate "RSA public-key algorithm"
- select MPILIB
- help
- This option enables support for the RSA algorithm (PKCS#1, RFC3447).
-
config X509_CERTIFICATE_PARSER
tristate "X.509 certificate parser"
depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE
diff --git a/crypto/asymmetric_keys/Makefile b/crypto/asymmetric_keys/Makefile
index cd1406f9b..f90486256 100644
--- a/crypto/asymmetric_keys/Makefile
+++ b/crypto/asymmetric_keys/Makefile
@@ -7,7 +7,6 @@ obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys.o
asymmetric_keys-y := asymmetric_type.o signature.o
obj-$(CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE) += public_key.o
-obj-$(CONFIG_PUBLIC_KEY_ALGO_RSA) += rsa.o
#
# X.509 Certificate handling
@@ -16,21 +15,18 @@ obj-$(CONFIG_X509_CERTIFICATE_PARSER) += x509_key_parser.o
x509_key_parser-y := \
x509-asn1.o \
x509_akid-asn1.o \
- x509_rsakey-asn1.o \
x509_cert_parser.o \
x509_public_key.o
$(obj)/x509_cert_parser.o: \
$(obj)/x509-asn1.h \
- $(obj)/x509_akid-asn1.h \
- $(obj)/x509_rsakey-asn1.h
+ $(obj)/x509_akid-asn1.h
+
$(obj)/x509-asn1.o: $(obj)/x509-asn1.c $(obj)/x509-asn1.h
$(obj)/x509_akid-asn1.o: $(obj)/x509_akid-asn1.c $(obj)/x509_akid-asn1.h
-$(obj)/x509_rsakey-asn1.o: $(obj)/x509_rsakey-asn1.c $(obj)/x509_rsakey-asn1.h
clean-files += x509-asn1.c x509-asn1.h
clean-files += x509_akid-asn1.c x509_akid-asn1.h
-clean-files += x509_rsakey-asn1.c x509_rsakey-asn1.h
#
# PKCS#7 message handling
diff --git a/crypto/asymmetric_keys/mscode_parser.c b/crypto/asymmetric_keys/mscode_parser.c
index adcef59ee..3242cbfae 100644
--- a/crypto/asymmetric_keys/mscode_parser.c
+++ b/crypto/asymmetric_keys/mscode_parser.c
@@ -86,25 +86,25 @@ int mscode_note_digest_algo(void *context, size_t hdrlen,
oid = look_up_OID(value, vlen);
switch (oid) {
case OID_md4:
- ctx->digest_algo = HASH_ALGO_MD4;
+ ctx->digest_algo = "md4";
break;
case OID_md5:
- ctx->digest_algo = HASH_ALGO_MD5;
+ ctx->digest_algo = "md5";
break;
case OID_sha1:
- ctx->digest_algo = HASH_ALGO_SHA1;
+ ctx->digest_algo = "sha1";
break;
case OID_sha256:
- ctx->digest_algo = HASH_ALGO_SHA256;
+ ctx->digest_algo = "sha256";
break;
case OID_sha384:
- ctx->digest_algo = HASH_ALGO_SHA384;
+ ctx->digest_algo = "sha384";
break;
case OID_sha512:
- ctx->digest_algo = HASH_ALGO_SHA512;
+ ctx->digest_algo = "sha512";
break;
case OID_sha224:
- ctx->digest_algo = HASH_ALGO_SHA224;
+ ctx->digest_algo = "sha224";
break;
case OID__NR:
diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c
index 8f3056cd0..bdd0d753c 100644
--- a/crypto/asymmetric_keys/pkcs7_parser.c
+++ b/crypto/asymmetric_keys/pkcs7_parser.c
@@ -15,7 +15,7 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/oid_registry.h>
-#include "public_key.h"
+#include <crypto/public_key.h>
#include "pkcs7_parser.h"
#include "pkcs7-asn1.h"
@@ -44,7 +44,7 @@ struct pkcs7_parse_context {
static void pkcs7_free_signed_info(struct pkcs7_signed_info *sinfo)
{
if (sinfo) {
- mpi_free(sinfo->sig.mpi[0]);
+ kfree(sinfo->sig.s);
kfree(sinfo->sig.digest);
kfree(sinfo->signing_cert_id);
kfree(sinfo);
@@ -87,7 +87,7 @@ EXPORT_SYMBOL_GPL(pkcs7_free_message);
static int pkcs7_check_authattrs(struct pkcs7_message *msg)
{
struct pkcs7_signed_info *sinfo;
- bool want;
+ bool want = false;
sinfo = msg->signed_infos;
if (sinfo->authattrs) {
@@ -218,25 +218,26 @@ int pkcs7_sig_note_digest_algo(void *context, size_t hdrlen,
switch (ctx->last_oid) {
case OID_md4:
- ctx->sinfo->sig.pkey_hash_algo = HASH_ALGO_MD4;
+ ctx->sinfo->sig.hash_algo = "md4";
break;
case OID_md5:
- ctx->sinfo->sig.pkey_hash_algo = HASH_ALGO_MD5;
+ ctx->sinfo->sig.hash_algo = "md5";
break;
case OID_sha1:
- ctx->sinfo->sig.pkey_hash_algo = HASH_ALGO_SHA1;
+ ctx->sinfo->sig.hash_algo = "sha1";
break;
case OID_sha256:
- ctx->sinfo->sig.pkey_hash_algo = HASH_ALGO_SHA256;
+ ctx->sinfo->sig.hash_algo = "sha256";
break;
case OID_sha384:
- ctx->sinfo->sig.pkey_hash_algo = HASH_ALGO_SHA384;
+ ctx->sinfo->sig.hash_algo = "sha384";
break;
case OID_sha512:
- ctx->sinfo->sig.pkey_hash_algo = HASH_ALGO_SHA512;
+ ctx->sinfo->sig.hash_algo = "sha512";
break;
case OID_sha224:
- ctx->sinfo->sig.pkey_hash_algo = HASH_ALGO_SHA224;
+ ctx->sinfo->sig.hash_algo = "sha224";
+ break;
default:
printk("Unsupported digest algo: %u\n", ctx->last_oid);
return -ENOPKG;
@@ -255,7 +256,7 @@ int pkcs7_sig_note_pkey_algo(void *context, size_t hdrlen,
switch (ctx->last_oid) {
case OID_rsaEncryption:
- ctx->sinfo->sig.pkey_algo = PKEY_ALGO_RSA;
+ ctx->sinfo->sig.pkey_algo = "rsa";
break;
default:
printk("Unsupported pkey algo: %u\n", ctx->last_oid);
@@ -614,16 +615,12 @@ int pkcs7_sig_note_signature(void *context, size_t hdrlen,
const void *value, size_t vlen)
{
struct pkcs7_parse_context *ctx = context;
- MPI mpi;
-
- BUG_ON(ctx->sinfo->sig.pkey_algo != PKEY_ALGO_RSA);
- mpi = mpi_read_raw_data(value, vlen);
- if (!mpi)
+ ctx->sinfo->sig.s = kmemdup(value, vlen, GFP_KERNEL);
+ if (!ctx->sinfo->sig.s)
return -ENOMEM;
- ctx->sinfo->sig.mpi[0] = mpi;
- ctx->sinfo->sig.nr_mpi = 1;
+ ctx->sinfo->sig.s_size = vlen;
return 0;
}
diff --git a/crypto/asymmetric_keys/pkcs7_trust.c b/crypto/asymmetric_keys/pkcs7_trust.c
index ecdb5a2ce..7d7a39b47 100644
--- a/crypto/asymmetric_keys/pkcs7_trust.c
+++ b/crypto/asymmetric_keys/pkcs7_trust.c
@@ -17,7 +17,7 @@
#include <linux/asn1.h>
#include <linux/key.h>
#include <keys/asymmetric-type.h>
-#include "public_key.h"
+#include <crypto/public_key.h>
#include "pkcs7_parser.h"
/**
diff --git a/crypto/asymmetric_keys/pkcs7_verify.c b/crypto/asymmetric_keys/pkcs7_verify.c
index 325575caf..50be2a15e 100644
--- a/crypto/asymmetric_keys/pkcs7_verify.c
+++ b/crypto/asymmetric_keys/pkcs7_verify.c
@@ -16,7 +16,7 @@
#include <linux/err.h>
#include <linux/asn1.h>
#include <crypto/hash.h>
-#include "public_key.h"
+#include <crypto/public_key.h>
#include "pkcs7_parser.h"
/*
@@ -31,17 +31,15 @@ static int pkcs7_digest(struct pkcs7_message *pkcs7,
void *digest;
int ret;
- kenter(",%u,%u", sinfo->index, sinfo->sig.pkey_hash_algo);
+ kenter(",%u,%s", sinfo->index, sinfo->sig.hash_algo);
- if (sinfo->sig.pkey_hash_algo >= PKEY_HASH__LAST ||
- !hash_algo_name[sinfo->sig.pkey_hash_algo])
+ if (!sinfo->sig.hash_algo)
return -ENOPKG;
/* Allocate the hashing algorithm we're going to need and find out how
* big the hash operational data will be.
*/
- tfm = crypto_alloc_shash(hash_algo_name[sinfo->sig.pkey_hash_algo],
- 0, 0);
+ tfm = crypto_alloc_shash(sinfo->sig.hash_algo, 0, 0);
if (IS_ERR(tfm))
return (PTR_ERR(tfm) == -ENOENT) ? -ENOPKG : PTR_ERR(tfm);
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index 6db4c01c6..0f8b264b3 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -17,32 +17,13 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
+#include <linux/scatterlist.h>
#include <keys/asymmetric-subtype.h>
-#include "public_key.h"
+#include <crypto/public_key.h>
+#include <crypto/akcipher.h>
MODULE_LICENSE("GPL");
-const char *const pkey_algo_name[PKEY_ALGO__LAST] = {
- [PKEY_ALGO_DSA] = "DSA",
- [PKEY_ALGO_RSA] = "RSA",
-};
-EXPORT_SYMBOL_GPL(pkey_algo_name);
-
-const struct public_key_algorithm *pkey_algo[PKEY_ALGO__LAST] = {
-#if defined(CONFIG_PUBLIC_KEY_ALGO_RSA) || \
- defined(CONFIG_PUBLIC_KEY_ALGO_RSA_MODULE)
- [PKEY_ALGO_RSA] = &RSA_public_key_algorithm,
-#endif
-};
-EXPORT_SYMBOL_GPL(pkey_algo);
-
-const char *const pkey_id_type_name[PKEY_ID_TYPE__LAST] = {
- [PKEY_ID_PGP] = "PGP",
- [PKEY_ID_X509] = "X509",
- [PKEY_ID_PKCS7] = "PKCS#7",
-};
-EXPORT_SYMBOL_GPL(pkey_id_type_name);
-
/*
* Provide a part of a description of the key for /proc/keys.
*/
@@ -52,8 +33,7 @@ static void public_key_describe(const struct key *asymmetric_key,
struct public_key *key = asymmetric_key->payload.data[asym_crypto];
if (key)
- seq_printf(m, "%s.%s",
- pkey_id_type_name[key->id_type], key->algo->name);
+ seq_printf(m, "%s.%s", key->id_type, key->pkey_algo);
}
/*
@@ -62,50 +42,116 @@ static void public_key_describe(const struct key *asymmetric_key,
void public_key_destroy(void *payload)
{
struct public_key *key = payload;
- int i;
- if (key) {
- for (i = 0; i < ARRAY_SIZE(key->mpi); i++)
- mpi_free(key->mpi[i]);
- kfree(key);
- }
+ if (key)
+ kfree(key->key);
+ kfree(key);
}
EXPORT_SYMBOL_GPL(public_key_destroy);
+struct public_key_completion {
+ struct completion completion;
+ int err;
+};
+
+static void public_key_verify_done(struct crypto_async_request *req, int err)
+{
+ struct public_key_completion *compl = req->data;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ compl->err = err;
+ complete(&compl->completion);
+}
+
/*
* Verify a signature using a public key.
*/
-int public_key_verify_signature(const struct public_key *pk,
+int public_key_verify_signature(const struct public_key *pkey,
const struct public_key_signature *sig)
{
- const struct public_key_algorithm *algo;
-
- BUG_ON(!pk);
- BUG_ON(!pk->mpi[0]);
- BUG_ON(!pk->mpi[1]);
+ struct public_key_completion compl;
+ struct crypto_akcipher *tfm;
+ struct akcipher_request *req;
+ struct scatterlist sig_sg, digest_sg;
+ const char *alg_name;
+ char alg_name_buf[CRYPTO_MAX_ALG_NAME];
+ void *output;
+ unsigned int outlen;
+ int ret = -ENOMEM;
+
+ pr_devel("==>%s()\n", __func__);
+
+ BUG_ON(!pkey);
BUG_ON(!sig);
BUG_ON(!sig->digest);
- BUG_ON(!sig->mpi[0]);
-
- algo = pk->algo;
- if (!algo) {
- if (pk->pkey_algo >= PKEY_ALGO__LAST)
- return -ENOPKG;
- algo = pkey_algo[pk->pkey_algo];
- if (!algo)
- return -ENOPKG;
+ BUG_ON(!sig->s);
+
+ alg_name = sig->pkey_algo;
+ if (strcmp(sig->pkey_algo, "rsa") == 0) {
+ /* The data wangled by the RSA algorithm is typically padded
+ * and encoded in some manner, such as EMSA-PKCS1-1_5 [RFC3447
+ * sec 8.2].
+ */
+ if (snprintf(alg_name_buf, CRYPTO_MAX_ALG_NAME,
+ "pkcs1pad(rsa,%s)", sig->hash_algo
+ ) >= CRYPTO_MAX_ALG_NAME)
+ return -EINVAL;
+ alg_name = alg_name_buf;
}
- if (!algo->verify_signature)
- return -ENOTSUPP;
-
- if (sig->nr_mpi != algo->n_sig_mpi) {
- pr_debug("Signature has %u MPI not %u\n",
- sig->nr_mpi, algo->n_sig_mpi);
- return -EINVAL;
+ tfm = crypto_alloc_akcipher(alg_name, 0, 0);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ req = akcipher_request_alloc(tfm, GFP_KERNEL);
+ if (!req)
+ goto error_free_tfm;
+
+ ret = crypto_akcipher_set_pub_key(tfm, pkey->key, pkey->keylen);
+ if (ret)
+ goto error_free_req;
+
+ outlen = crypto_akcipher_maxsize(tfm);
+ output = kmalloc(outlen, GFP_KERNEL);
+ if (!output)
+ goto error_free_req;
+
+ sg_init_one(&sig_sg, sig->s, sig->s_size);
+ sg_init_one(&digest_sg, output, outlen);
+ akcipher_request_set_crypt(req, &sig_sg, &digest_sg, sig->s_size,
+ outlen);
+ init_completion(&compl.completion);
+ akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ public_key_verify_done, &compl);
+
+ /* Perform the verification calculation. This doesn't actually do the
+ * verification, but rather calculates the hash expected by the
+ * signature and returns that to us.
+ */
+ ret = crypto_akcipher_verify(req);
+ if (ret == -EINPROGRESS) {
+ wait_for_completion(&compl.completion);
+ ret = compl.err;
}
-
- return algo->verify_signature(pk, sig);
+ if (ret < 0)
+ goto out_free_output;
+
+ /* Do the actual verification step. */
+ if (req->dst_len != sig->digest_size ||
+ memcmp(sig->digest, output, sig->digest_size) != 0)
+ ret = -EKEYREJECTED;
+
+out_free_output:
+ kfree(output);
+error_free_req:
+ akcipher_request_free(req);
+error_free_tfm:
+ crypto_free_akcipher(tfm);
+ pr_devel("<==%s() = %d\n", __func__, ret);
+ return ret;
}
EXPORT_SYMBOL_GPL(public_key_verify_signature);
diff --git a/crypto/asymmetric_keys/public_key.h b/crypto/asymmetric_keys/public_key.h
deleted file mode 100644
index 5c37a22a0..000000000
--- a/crypto/asymmetric_keys/public_key.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* Public key algorithm internals
- *
- * See Documentation/crypto/asymmetric-keys.txt
- *
- * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-
-#include <crypto/public_key.h>
-
-extern struct asymmetric_key_subtype public_key_subtype;
-
-/*
- * Public key algorithm definition.
- */
-struct public_key_algorithm {
- const char *name;
- u8 n_pub_mpi; /* Number of MPIs in public key */
- u8 n_sec_mpi; /* Number of MPIs in secret key */
- u8 n_sig_mpi; /* Number of MPIs in a signature */
- int (*verify_signature)(const struct public_key *key,
- const struct public_key_signature *sig);
-};
-
-extern const struct public_key_algorithm RSA_public_key_algorithm;
-
-/*
- * public_key.c
- */
-extern int public_key_verify_signature(const struct public_key *pk,
- const struct public_key_signature *sig);
diff --git a/crypto/asymmetric_keys/rsa.c b/crypto/asymmetric_keys/rsa.c
deleted file mode 100644
index 508b57b77..000000000
--- a/crypto/asymmetric_keys/rsa.c
+++ /dev/null
@@ -1,278 +0,0 @@
-/* RSA asymmetric public-key algorithm [RFC3447]
- *
- * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-
-#define pr_fmt(fmt) "RSA: "fmt
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <crypto/algapi.h>
-#include "public_key.h"
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("RSA Public Key Algorithm");
-
-#define kenter(FMT, ...) \
- pr_devel("==> %s("FMT")\n", __func__, ##__VA_ARGS__)
-#define kleave(FMT, ...) \
- pr_devel("<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
-
-/*
- * Hash algorithm OIDs plus ASN.1 DER wrappings [RFC4880 sec 5.2.2].
- */
-static const u8 RSA_digest_info_MD5[] = {
- 0x30, 0x20, 0x30, 0x0C, 0x06, 0x08,
- 0x2A, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x02, 0x05, /* OID */
- 0x05, 0x00, 0x04, 0x10
-};
-
-static const u8 RSA_digest_info_SHA1[] = {
- 0x30, 0x21, 0x30, 0x09, 0x06, 0x05,
- 0x2B, 0x0E, 0x03, 0x02, 0x1A,
- 0x05, 0x00, 0x04, 0x14
-};
-
-static const u8 RSA_digest_info_RIPE_MD_160[] = {
- 0x30, 0x21, 0x30, 0x09, 0x06, 0x05,
- 0x2B, 0x24, 0x03, 0x02, 0x01,
- 0x05, 0x00, 0x04, 0x14
-};
-
-static const u8 RSA_digest_info_SHA224[] = {
- 0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09,
- 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04,
- 0x05, 0x00, 0x04, 0x1C
-};
-
-static const u8 RSA_digest_info_SHA256[] = {
- 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09,
- 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01,
- 0x05, 0x00, 0x04, 0x20
-};
-
-static const u8 RSA_digest_info_SHA384[] = {
- 0x30, 0x41, 0x30, 0x0d, 0x06, 0x09,
- 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02,
- 0x05, 0x00, 0x04, 0x30
-};
-
-static const u8 RSA_digest_info_SHA512[] = {
- 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09,
- 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03,
- 0x05, 0x00, 0x04, 0x40
-};
-
-static const struct {
- const u8 *data;
- size_t size;
-} RSA_ASN1_templates[PKEY_HASH__LAST] = {
-#define _(X) { RSA_digest_info_##X, sizeof(RSA_digest_info_##X) }
- [HASH_ALGO_MD5] = _(MD5),
- [HASH_ALGO_SHA1] = _(SHA1),
- [HASH_ALGO_RIPE_MD_160] = _(RIPE_MD_160),
- [HASH_ALGO_SHA256] = _(SHA256),
- [HASH_ALGO_SHA384] = _(SHA384),
- [HASH_ALGO_SHA512] = _(SHA512),
- [HASH_ALGO_SHA224] = _(SHA224),
-#undef _
-};
-
-/*
- * RSAVP1() function [RFC3447 sec 5.2.2]
- */
-static int RSAVP1(const struct public_key *key, MPI s, MPI *_m)
-{
- MPI m;
- int ret;
-
- /* (1) Validate 0 <= s < n */
- if (mpi_cmp_ui(s, 0) < 0) {
- kleave(" = -EBADMSG [s < 0]");
- return -EBADMSG;
- }
- if (mpi_cmp(s, key->rsa.n) >= 0) {
- kleave(" = -EBADMSG [s >= n]");
- return -EBADMSG;
- }
-
- m = mpi_alloc(0);
- if (!m)
- return -ENOMEM;
-
- /* (2) m = s^e mod n */
- ret = mpi_powm(m, s, key->rsa.e, key->rsa.n);
- if (ret < 0) {
- mpi_free(m);
- return ret;
- }
-
- *_m = m;
- return 0;
-}
-
-/*
- * Integer to Octet String conversion [RFC3447 sec 4.1]
- */
-static int RSA_I2OSP(MPI x, size_t xLen, u8 **pX)
-{
- unsigned X_size, x_size;
- int X_sign;
- u8 *X;
-
- /* Make sure the string is the right length. The number should begin
- * with { 0x00, 0x01, ... } so we have to account for 15 leading zero
- * bits not being reported by MPI.
- */
- x_size = mpi_get_nbits(x);
- pr_devel("size(x)=%u xLen*8=%zu\n", x_size, xLen * 8);
- if (x_size != xLen * 8 - 15)
- return -ERANGE;
-
- X = mpi_get_buffer(x, &X_size, &X_sign);
- if (!X)
- return -ENOMEM;
- if (X_sign < 0) {
- kfree(X);
- return -EBADMSG;
- }
- if (X_size != xLen - 1) {
- kfree(X);
- return -EBADMSG;
- }
-
- *pX = X;
- return 0;
-}
-
-/*
- * Perform the RSA signature verification.
- * @H: Value of hash of data and metadata
- * @EM: The computed signature value
- * @k: The size of EM (EM[0] is an invalid location but should hold 0x00)
- * @hash_size: The size of H
- * @asn1_template: The DigestInfo ASN.1 template
- * @asn1_size: Size of asm1_template[]
- */
-static int RSA_verify(const u8 *H, const u8 *EM, size_t k, size_t hash_size,
- const u8 *asn1_template, size_t asn1_size)
-{
- unsigned PS_end, T_offset, i;
-
- kenter(",,%zu,%zu,%zu", k, hash_size, asn1_size);
-
- if (k < 2 + 1 + asn1_size + hash_size)
- return -EBADMSG;
-
- /* Decode the EMSA-PKCS1-v1_5 */
- if (EM[1] != 0x01) {
- kleave(" = -EBADMSG [EM[1] == %02u]", EM[1]);
- return -EBADMSG;
- }
-
- T_offset = k - (asn1_size + hash_size);
- PS_end = T_offset - 1;
- if (EM[PS_end] != 0x00) {
- kleave(" = -EBADMSG [EM[T-1] == %02u]", EM[PS_end]);
- return -EBADMSG;
- }
-
- for (i = 2; i < PS_end; i++) {
- if (EM[i] != 0xff) {
- kleave(" = -EBADMSG [EM[PS%x] == %02u]", i - 2, EM[i]);
- return -EBADMSG;
- }
- }
-
- if (crypto_memneq(asn1_template, EM + T_offset, asn1_size) != 0) {
- kleave(" = -EBADMSG [EM[T] ASN.1 mismatch]");
- return -EBADMSG;
- }
-
- if (crypto_memneq(H, EM + T_offset + asn1_size, hash_size) != 0) {
- kleave(" = -EKEYREJECTED [EM[T] hash mismatch]");
- return -EKEYREJECTED;
- }
-
- kleave(" = 0");
- return 0;
-}
-
-/*
- * Perform the verification step [RFC3447 sec 8.2.2].
- */
-static int RSA_verify_signature(const struct public_key *key,
- const struct public_key_signature *sig)
-{
- size_t tsize;
- int ret;
-
- /* Variables as per RFC3447 sec 8.2.2 */
- const u8 *H = sig->digest;
- u8 *EM = NULL;
- MPI m = NULL;
- size_t k;
-
- kenter("");
-
- if (!RSA_ASN1_templates[sig->pkey_hash_algo].data)
- return -ENOTSUPP;
-
- /* (1) Check the signature size against the public key modulus size */
- k = mpi_get_nbits(key->rsa.n);
- tsize = mpi_get_nbits(sig->rsa.s);
-
- /* According to RFC 4880 sec 3.2, length of MPI is computed starting
- * from most significant bit. So the RFC 3447 sec 8.2.2 size check
- * must be relaxed to conform with shorter signatures - so we fail here
- * only if signature length is longer than modulus size.
- */
- pr_devel("step 1: k=%zu size(S)=%zu\n", k, tsize);
- if (k < tsize) {
- ret = -EBADMSG;
- goto error;
- }
-
- /* Round up and convert to octets */
- k = (k + 7) / 8;
-
- /* (2b) Apply the RSAVP1 verification primitive to the public key */
- ret = RSAVP1(key, sig->rsa.s, &m);
- if (ret < 0)
- goto error;
-
- /* (2c) Convert the message representative (m) to an encoded message
- * (EM) of length k octets.
- *
- * NOTE! The leading zero byte is suppressed by MPI, so we pass a
- * pointer to the _preceding_ byte to RSA_verify()!
- */
- ret = RSA_I2OSP(m, k, &EM);
- if (ret < 0)
- goto error;
-
- ret = RSA_verify(H, EM - 1, k, sig->digest_size,
- RSA_ASN1_templates[sig->pkey_hash_algo].data,
- RSA_ASN1_templates[sig->pkey_hash_algo].size);
-
-error:
- kfree(EM);
- mpi_free(m);
- kleave(" = %d", ret);
- return ret;
-}
-
-const struct public_key_algorithm RSA_public_key_algorithm = {
- .name = "RSA",
- .n_pub_mpi = 2,
- .n_sec_mpi = 3,
- .n_sig_mpi = 1,
- .verify_signature = RSA_verify_signature,
-};
-EXPORT_SYMBOL_GPL(RSA_public_key_algorithm);
diff --git a/crypto/asymmetric_keys/verify_pefile.c b/crypto/asymmetric_keys/verify_pefile.c
index 897b734da..7e8c2338a 100644
--- a/crypto/asymmetric_keys/verify_pefile.c
+++ b/crypto/asymmetric_keys/verify_pefile.c
@@ -328,12 +328,12 @@ static int pefile_digest_pe(const void *pebuf, unsigned int pelen,
void *digest;
int ret;
- kenter(",%u", ctx->digest_algo);
+ kenter(",%s", ctx->digest_algo);
/* Allocate the hashing algorithm we're going to need and find out how
* big the hash operational data will be.
*/
- tfm = crypto_alloc_shash(hash_algo_name[ctx->digest_algo], 0, 0);
+ tfm = crypto_alloc_shash(ctx->digest_algo, 0, 0);
if (IS_ERR(tfm))
return (PTR_ERR(tfm) == -ENOENT) ? -ENOPKG : PTR_ERR(tfm);
diff --git a/crypto/asymmetric_keys/verify_pefile.h b/crypto/asymmetric_keys/verify_pefile.h
index 55d5f7ebc..a133eb81a 100644
--- a/crypto/asymmetric_keys/verify_pefile.h
+++ b/crypto/asymmetric_keys/verify_pefile.h
@@ -28,7 +28,7 @@ struct pefile_context {
/* PKCS#7 MS Individual Code Signing content */
const void *digest; /* Digest */
unsigned digest_len; /* Digest length */
- enum hash_algo digest_algo; /* Digest algorithm */
+ const char *digest_algo; /* Digest algorithm */
};
#define kenter(FMT, ...) \
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index 13c4e5a5f..4a29bac70 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -15,11 +15,10 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/oid_registry.h>
-#include "public_key.h"
+#include <crypto/public_key.h>
#include "x509_parser.h"
#include "x509-asn1.h"
#include "x509_akid-asn1.h"
-#include "x509_rsakey-asn1.h"
struct x509_parse_context {
struct x509_certificate *cert; /* Certificate being constructed */
@@ -56,7 +55,7 @@ void x509_free_certificate(struct x509_certificate *cert)
kfree(cert->akid_id);
kfree(cert->akid_skid);
kfree(cert->sig.digest);
- mpi_free(cert->sig.rsa.s);
+ kfree(cert->sig.s);
kfree(cert);
}
}
@@ -103,12 +102,12 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen)
}
}
- /* Decode the public key */
- ret = asn1_ber_decoder(&x509_rsakey_decoder, ctx,
- ctx->key, ctx->key_size);
- if (ret < 0)
+ cert->pub->key = kmemdup(ctx->key, ctx->key_size, GFP_KERNEL);
+ if (!cert->pub->key)
goto error_decode;
+ cert->pub->keylen = ctx->key_size;
+
/* Generate cert issuer + serial number key ID */
kid = asymmetric_key_generate_id(cert->raw_serial,
cert->raw_serial_size,
@@ -124,6 +123,7 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen)
return cert;
error_decode:
+ kfree(cert->pub->key);
kfree(ctx);
error_no_ctx:
x509_free_certificate(cert);
@@ -188,33 +188,33 @@ int x509_note_pkey_algo(void *context, size_t hdrlen,
return -ENOPKG; /* Unsupported combination */
case OID_md4WithRSAEncryption:
- ctx->cert->sig.pkey_hash_algo = HASH_ALGO_MD5;
- ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA;
+ ctx->cert->sig.hash_algo = "md4";
+ ctx->cert->sig.pkey_algo = "rsa";
break;
case OID_sha1WithRSAEncryption:
- ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA1;
- ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA;
+ ctx->cert->sig.hash_algo = "sha1";
+ ctx->cert->sig.pkey_algo = "rsa";
break;
case OID_sha256WithRSAEncryption:
- ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA256;
- ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA;
+ ctx->cert->sig.hash_algo = "sha256";
+ ctx->cert->sig.pkey_algo = "rsa";
break;
case OID_sha384WithRSAEncryption:
- ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA384;
- ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA;
+ ctx->cert->sig.hash_algo = "sha384";
+ ctx->cert->sig.pkey_algo = "rsa";
break;
case OID_sha512WithRSAEncryption:
- ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA512;
- ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA;
+ ctx->cert->sig.hash_algo = "sha512";
+ ctx->cert->sig.pkey_algo = "rsa";
break;
case OID_sha224WithRSAEncryption:
- ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA224;
- ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA;
+ ctx->cert->sig.hash_algo = "sha224";
+ ctx->cert->sig.pkey_algo = "rsa";
break;
}
@@ -396,7 +396,7 @@ int x509_extract_key_data(void *context, size_t hdrlen,
if (ctx->last_oid != OID_rsaEncryption)
return -ENOPKG;
- ctx->cert->pub->pkey_algo = PKEY_ALGO_RSA;
+ ctx->cert->pub->pkey_algo = "rsa";
/* Discard the BIT STRING metadata */
ctx->key = value + 1;
@@ -404,29 +404,6 @@ int x509_extract_key_data(void *context, size_t hdrlen,
return 0;
}
-/*
- * Extract a RSA public key value
- */
-int rsa_extract_mpi(void *context, size_t hdrlen,
- unsigned char tag,
- const void *value, size_t vlen)
-{
- struct x509_parse_context *ctx = context;
- MPI mpi;
-
- if (ctx->nr_mpi >= ARRAY_SIZE(ctx->cert->pub->mpi)) {
- pr_err("Too many public key MPIs in certificate\n");
- return -EBADMSG;
- }
-
- mpi = mpi_read_raw_data(value, vlen);
- if (!mpi)
- return -ENOMEM;
-
- ctx->cert->pub->mpi[ctx->nr_mpi++] = mpi;
- return 0;
-}
-
/* The keyIdentifier in AuthorityKeyIdentifier SEQUENCE is tag(CONT,PRIM,0) */
#define SEQ_TAG_KEYID (ASN1_CONT << 6)
@@ -548,9 +525,9 @@ int x509_decode_time(time64_t *_t, size_t hdrlen,
}
if (day < 1 || day > mon_len ||
- hour > 23 ||
+ hour > 24 || /* ISO 8601 permits 24:00:00 as midnight tomorrow */
min > 59 ||
- sec > 59)
+ sec > 60) /* ISO 8601 permits leap seconds [X.680 46.3] */
goto invalid_time;
*_t = mktime64(year, mon, day, hour, min, sec);
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
index 9e9e5a6a9..733c046aa 100644
--- a/crypto/asymmetric_keys/x509_public_key.c
+++ b/crypto/asymmetric_keys/x509_public_key.c
@@ -13,15 +13,11 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/err.h>
-#include <linux/mpi.h>
-#include <linux/asn1_decoder.h>
#include <keys/asymmetric-subtype.h>
#include <keys/asymmetric-parser.h>
#include <keys/system_keyring.h>
#include <crypto/hash.h>
#include "asymmetric_keys.h"
-#include "public_key.h"
#include "x509_parser.h"
static bool use_builtin_keys;
@@ -167,18 +163,20 @@ int x509_get_sig_params(struct x509_certificate *cert)
if (cert->unsupported_crypto)
return -ENOPKG;
- if (cert->sig.rsa.s)
+ if (cert->sig.s)
return 0;
- cert->sig.rsa.s = mpi_read_raw_data(cert->raw_sig, cert->raw_sig_size);
- if (!cert->sig.rsa.s)
+ cert->sig.s = kmemdup(cert->raw_sig, cert->raw_sig_size,
+ GFP_KERNEL);
+ if (!cert->sig.s)
return -ENOMEM;
- cert->sig.nr_mpi = 1;
+
+ cert->sig.s_size = cert->raw_sig_size;
/* Allocate the hashing algorithm we're going to need and find out how
* big the hash operational data will be.
*/
- tfm = crypto_alloc_shash(hash_algo_name[cert->sig.pkey_hash_algo], 0, 0);
+ tfm = crypto_alloc_shash(cert->sig.hash_algo, 0, 0);
if (IS_ERR(tfm)) {
if (PTR_ERR(tfm) == -ENOENT) {
cert->unsupported_crypto = true;
@@ -293,24 +291,20 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)
pr_devel("Cert Issuer: %s\n", cert->issuer);
pr_devel("Cert Subject: %s\n", cert->subject);
- if (cert->pub->pkey_algo >= PKEY_ALGO__LAST ||
- cert->sig.pkey_algo >= PKEY_ALGO__LAST ||
- cert->sig.pkey_hash_algo >= PKEY_HASH__LAST ||
- !pkey_algo[cert->pub->pkey_algo] ||
- !pkey_algo[cert->sig.pkey_algo] ||
- !hash_algo_name[cert->sig.pkey_hash_algo]) {
+ if (!cert->pub->pkey_algo ||
+ !cert->sig.pkey_algo ||
+ !cert->sig.hash_algo) {
ret = -ENOPKG;
goto error_free_cert;
}
- pr_devel("Cert Key Algo: %s\n", pkey_algo_name[cert->pub->pkey_algo]);
+ pr_devel("Cert Key Algo: %s\n", cert->pub->pkey_algo);
pr_devel("Cert Valid period: %lld-%lld\n", cert->valid_from, cert->valid_to);
pr_devel("Cert Signature: %s + %s\n",
- pkey_algo_name[cert->sig.pkey_algo],
- hash_algo_name[cert->sig.pkey_hash_algo]);
+ cert->sig.pkey_algo,
+ cert->sig.hash_algo);
- cert->pub->algo = pkey_algo[cert->pub->pkey_algo];
- cert->pub->id_type = PKEY_ID_X509;
+ cert->pub->id_type = "X509";
/* Check the signature on the key if it appears to be self-signed */
if ((!cert->akid_skid && !cert->akid_id) ||
diff --git a/crypto/asymmetric_keys/x509_rsakey.asn1 b/crypto/asymmetric_keys/x509_rsakey.asn1
deleted file mode 100644
index 4ec7cc653..000000000
--- a/crypto/asymmetric_keys/x509_rsakey.asn1
+++ /dev/null
@@ -1,4 +0,0 @@
-RSAPublicKey ::= SEQUENCE {
- modulus INTEGER ({ rsa_extract_mpi }), -- n
- publicExponent INTEGER ({ rsa_extract_mpi }) -- e
- }
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index c0748bbd4..08b3ac689 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -444,7 +444,7 @@ static int __init async_pq_init(void)
static void __exit async_pq_exit(void)
{
- put_page(pq_scribble_page);
+ __free_page(pq_scribble_page);
}
module_init(async_pq_init);
diff --git a/crypto/crc32.c b/crypto/crc32_generic.c
index 187ded28c..aa2a25fc7 100644
--- a/crypto/crc32.c
+++ b/crypto/crc32_generic.c
@@ -131,7 +131,7 @@ static struct shash_alg alg = {
.digestsize = CHKSUM_DIGEST_SIZE,
.base = {
.cra_name = "crc32",
- .cra_driver_name = "crc32-table",
+ .cra_driver_name = "crc32-generic",
.cra_priority = 100,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(u32),
@@ -157,3 +157,4 @@ MODULE_AUTHOR("Alexander Boyko <alexander_boyko@xyratex.com>");
MODULE_DESCRIPTION("CRC32 calculations wrapper for lib/crc32");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("crc32");
+MODULE_ALIAS_CRYPTO("crc32-generic");
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
new file mode 100644
index 000000000..a55c82dd4
--- /dev/null
+++ b/crypto/crypto_engine.c
@@ -0,0 +1,355 @@
+/*
+ * Handle async block request by crypto hardware engine.
+ *
+ * Copyright (C) 2016 Linaro, Inc.
+ *
+ * Author: Baolin Wang <baolin.wang@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/delay.h>
+#include "internal.h"
+
+#define CRYPTO_ENGINE_MAX_QLEN 10
+
+void crypto_finalize_request(struct crypto_engine *engine,
+ struct ablkcipher_request *req, int err);
+
+/**
+ * crypto_pump_requests - dequeue one request from engine queue to process
+ * @engine: the hardware engine
+ * @in_kthread: true if we are in the context of the request pump thread
+ *
+ * This function checks if there is any request in the engine queue that
+ * needs processing and if so call out to the driver to initialize hardware
+ * and handle each request.
+ */
+static void crypto_pump_requests(struct crypto_engine *engine,
+ bool in_kthread)
+{
+ struct crypto_async_request *async_req, *backlog;
+ struct ablkcipher_request *req;
+ unsigned long flags;
+ bool was_busy = false;
+ int ret;
+
+ spin_lock_irqsave(&engine->queue_lock, flags);
+
+ /* Make sure we are not already running a request */
+ if (engine->cur_req)
+ goto out;
+
+ /* If another context is idling then defer */
+ if (engine->idling) {
+ queue_kthread_work(&engine->kworker, &engine->pump_requests);
+ goto out;
+ }
+
+ /* Check if the engine queue is idle */
+ if (!crypto_queue_len(&engine->queue) || !engine->running) {
+ if (!engine->busy)
+ goto out;
+
+ /* Only do teardown in the thread */
+ if (!in_kthread) {
+ queue_kthread_work(&engine->kworker,
+ &engine->pump_requests);
+ goto out;
+ }
+
+ engine->busy = false;
+ engine->idling = true;
+ spin_unlock_irqrestore(&engine->queue_lock, flags);
+
+ if (engine->unprepare_crypt_hardware &&
+ engine->unprepare_crypt_hardware(engine))
+ pr_err("failed to unprepare crypt hardware\n");
+
+ spin_lock_irqsave(&engine->queue_lock, flags);
+ engine->idling = false;
+ goto out;
+ }
+
+ /* Get the fist request from the engine queue to handle */
+ backlog = crypto_get_backlog(&engine->queue);
+ async_req = crypto_dequeue_request(&engine->queue);
+ if (!async_req)
+ goto out;
+
+ req = ablkcipher_request_cast(async_req);
+
+ engine->cur_req = req;
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+
+ if (engine->busy)
+ was_busy = true;
+ else
+ engine->busy = true;
+
+ spin_unlock_irqrestore(&engine->queue_lock, flags);
+
+ /* Until here we get the request need to be encrypted successfully */
+ if (!was_busy && engine->prepare_crypt_hardware) {
+ ret = engine->prepare_crypt_hardware(engine);
+ if (ret) {
+ pr_err("failed to prepare crypt hardware\n");
+ goto req_err;
+ }
+ }
+
+ if (engine->prepare_request) {
+ ret = engine->prepare_request(engine, engine->cur_req);
+ if (ret) {
+ pr_err("failed to prepare request: %d\n", ret);
+ goto req_err;
+ }
+ engine->cur_req_prepared = true;
+ }
+
+ ret = engine->crypt_one_request(engine, engine->cur_req);
+ if (ret) {
+ pr_err("failed to crypt one request from queue\n");
+ goto req_err;
+ }
+ return;
+
+req_err:
+ crypto_finalize_request(engine, engine->cur_req, ret);
+ return;
+
+out:
+ spin_unlock_irqrestore(&engine->queue_lock, flags);
+}
+
+static void crypto_pump_work(struct kthread_work *work)
+{
+ struct crypto_engine *engine =
+ container_of(work, struct crypto_engine, pump_requests);
+
+ crypto_pump_requests(engine, true);
+}
+
+/**
+ * crypto_transfer_request - transfer the new request into the engine queue
+ * @engine: the hardware engine
+ * @req: the request need to be listed into the engine queue
+ */
+int crypto_transfer_request(struct crypto_engine *engine,
+ struct ablkcipher_request *req, bool need_pump)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&engine->queue_lock, flags);
+
+ if (!engine->running) {
+ spin_unlock_irqrestore(&engine->queue_lock, flags);
+ return -ESHUTDOWN;
+ }
+
+ ret = ablkcipher_enqueue_request(&engine->queue, req);
+
+ if (!engine->busy && need_pump)
+ queue_kthread_work(&engine->kworker, &engine->pump_requests);
+
+ spin_unlock_irqrestore(&engine->queue_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_transfer_request);
+
+/**
+ * crypto_transfer_request_to_engine - transfer one request to list into the
+ * engine queue
+ * @engine: the hardware engine
+ * @req: the request need to be listed into the engine queue
+ */
+int crypto_transfer_request_to_engine(struct crypto_engine *engine,
+ struct ablkcipher_request *req)
+{
+ return crypto_transfer_request(engine, req, true);
+}
+EXPORT_SYMBOL_GPL(crypto_transfer_request_to_engine);
+
+/**
+ * crypto_finalize_request - finalize one request if the request is done
+ * @engine: the hardware engine
+ * @req: the request need to be finalized
+ * @err: error number
+ */
+void crypto_finalize_request(struct crypto_engine *engine,
+ struct ablkcipher_request *req, int err)
+{
+ unsigned long flags;
+ bool finalize_cur_req = false;
+ int ret;
+
+ spin_lock_irqsave(&engine->queue_lock, flags);
+ if (engine->cur_req == req)
+ finalize_cur_req = true;
+ spin_unlock_irqrestore(&engine->queue_lock, flags);
+
+ if (finalize_cur_req) {
+ if (engine->cur_req_prepared && engine->unprepare_request) {
+ ret = engine->unprepare_request(engine, req);
+ if (ret)
+ pr_err("failed to unprepare request\n");
+ }
+
+ spin_lock_irqsave(&engine->queue_lock, flags);
+ engine->cur_req = NULL;
+ engine->cur_req_prepared = false;
+ spin_unlock_irqrestore(&engine->queue_lock, flags);
+ }
+
+ req->base.complete(&req->base, err);
+
+ queue_kthread_work(&engine->kworker, &engine->pump_requests);
+}
+EXPORT_SYMBOL_GPL(crypto_finalize_request);
+
+/**
+ * crypto_engine_start - start the hardware engine
+ * @engine: the hardware engine need to be started
+ *
+ * Return 0 on success, else on fail.
+ */
+int crypto_engine_start(struct crypto_engine *engine)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->queue_lock, flags);
+
+ if (engine->running || engine->busy) {
+ spin_unlock_irqrestore(&engine->queue_lock, flags);
+ return -EBUSY;
+ }
+
+ engine->running = true;
+ spin_unlock_irqrestore(&engine->queue_lock, flags);
+
+ queue_kthread_work(&engine->kworker, &engine->pump_requests);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(crypto_engine_start);
+
+/**
+ * crypto_engine_stop - stop the hardware engine
+ * @engine: the hardware engine need to be stopped
+ *
+ * Return 0 on success, else on fail.
+ */
+int crypto_engine_stop(struct crypto_engine *engine)
+{
+ unsigned long flags;
+ unsigned limit = 500;
+ int ret = 0;
+
+ spin_lock_irqsave(&engine->queue_lock, flags);
+
+ /*
+ * If the engine queue is not empty or the engine is on busy state,
+ * we need to wait for a while to pump the requests of engine queue.
+ */
+ while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
+ spin_unlock_irqrestore(&engine->queue_lock, flags);
+ msleep(20);
+ spin_lock_irqsave(&engine->queue_lock, flags);
+ }
+
+ if (crypto_queue_len(&engine->queue) || engine->busy)
+ ret = -EBUSY;
+ else
+ engine->running = false;
+
+ spin_unlock_irqrestore(&engine->queue_lock, flags);
+
+ if (ret)
+ pr_warn("could not stop engine\n");
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_engine_stop);
+
+/**
+ * crypto_engine_alloc_init - allocate crypto hardware engine structure and
+ * initialize it.
+ * @dev: the device attached with one hardware engine
+ * @rt: whether this queue is set to run as a realtime task
+ *
+ * This must be called from context that can sleep.
+ * Return: the crypto engine structure on success, else NULL.
+ */
+struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
+{
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
+ struct crypto_engine *engine;
+
+ if (!dev)
+ return NULL;
+
+ engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
+ if (!engine)
+ return NULL;
+
+ engine->rt = rt;
+ engine->running = false;
+ engine->busy = false;
+ engine->idling = false;
+ engine->cur_req_prepared = false;
+ engine->priv_data = dev;
+ snprintf(engine->name, sizeof(engine->name),
+ "%s-engine", dev_name(dev));
+
+ crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
+ spin_lock_init(&engine->queue_lock);
+
+ init_kthread_worker(&engine->kworker);
+ engine->kworker_task = kthread_run(kthread_worker_fn,
+ &engine->kworker, "%s",
+ engine->name);
+ if (IS_ERR(engine->kworker_task)) {
+ dev_err(dev, "failed to create crypto request pump task\n");
+ return NULL;
+ }
+ init_kthread_work(&engine->pump_requests, crypto_pump_work);
+
+ if (engine->rt) {
+ dev_info(dev, "will run requests pump with realtime priority\n");
+ sched_setscheduler(engine->kworker_task, SCHED_FIFO, &param);
+ }
+
+ return engine;
+}
+EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
+
+/**
+ * crypto_engine_exit - free the resources of hardware engine when exit
+ * @engine: the hardware engine need to be freed
+ *
+ * Return 0 for success.
+ */
+int crypto_engine_exit(struct crypto_engine *engine)
+{
+ int ret;
+
+ ret = crypto_engine_stop(engine);
+ if (ret)
+ return ret;
+
+ flush_kthread_worker(&engine->kworker);
+ kthread_stop(engine->kworker_task);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(crypto_engine_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Crypto hardware engine framework");
diff --git a/crypto/drbg.c b/crypto/drbg.c
index ab6ef1d08..1b86310db 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -220,48 +220,6 @@ static inline unsigned short drbg_sec_strength(drbg_flag_t flags)
}
/*
- * FIPS 140-2 continuous self test
- * The test is performed on the result of one round of the output
- * function. Thus, the function implicitly knows the size of the
- * buffer.
- *
- * @drbg DRBG handle
- * @buf output buffer of random data to be checked
- *
- * return:
- * true on success
- * false on error
- */
-static bool drbg_fips_continuous_test(struct drbg_state *drbg,
- const unsigned char *buf)
-{
-#ifdef CONFIG_CRYPTO_FIPS
- int ret = 0;
- /* skip test if we test the overall system */
- if (list_empty(&drbg->test_data.list))
- return true;
- /* only perform test in FIPS mode */
- if (0 == fips_enabled)
- return true;
- if (!drbg->fips_primed) {
- /* Priming of FIPS test */
- memcpy(drbg->prev, buf, drbg_blocklen(drbg));
- drbg->fips_primed = true;
- /* return false due to priming, i.e. another round is needed */
- return false;
- }
- ret = memcmp(drbg->prev, buf, drbg_blocklen(drbg));
- if (!ret)
- panic("DRBG continuous self test failed\n");
- memcpy(drbg->prev, buf, drbg_blocklen(drbg));
- /* the test shall pass when the two compared values are not equal */
- return ret != 0;
-#else
- return true;
-#endif /* CONFIG_CRYPTO_FIPS */
-}
-
-/*
* Convert an integer into a byte representation of this integer.
* The byte representation is big-endian
*
@@ -603,11 +561,6 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
}
outlen = (drbg_blocklen(drbg) < (buflen - len)) ?
drbg_blocklen(drbg) : (buflen - len);
- if (!drbg_fips_continuous_test(drbg, drbg->scratchpad)) {
- /* 10.2.1.5.2 step 6 */
- crypto_inc(drbg->V, drbg_blocklen(drbg));
- continue;
- }
/* 10.2.1.5.2 step 4.3 */
memcpy(buf + len, drbg->scratchpad, outlen);
len += outlen;
@@ -733,8 +686,6 @@ static int drbg_hmac_generate(struct drbg_state *drbg,
return ret;
outlen = (drbg_blocklen(drbg) < (buflen - len)) ?
drbg_blocklen(drbg) : (buflen - len);
- if (!drbg_fips_continuous_test(drbg, drbg->V))
- continue;
/* 10.1.2.5 step 4.2 */
memcpy(buf + len, drbg->V, outlen);
@@ -963,10 +914,6 @@ static int drbg_hash_hashgen(struct drbg_state *drbg,
}
outlen = (drbg_blocklen(drbg) < (buflen - len)) ?
drbg_blocklen(drbg) : (buflen - len);
- if (!drbg_fips_continuous_test(drbg, dst)) {
- crypto_inc(src, drbg_statelen(drbg));
- continue;
- }
/* 10.1.1.4 step hashgen 4.2 */
memcpy(buf + len, dst, outlen);
len += outlen;
@@ -1201,11 +1148,6 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg)
drbg->reseed_ctr = 0;
drbg->d_ops = NULL;
drbg->core = NULL;
-#ifdef CONFIG_CRYPTO_FIPS
- kzfree(drbg->prev);
- drbg->prev = NULL;
- drbg->fips_primed = false;
-#endif
}
/*
@@ -1244,12 +1186,6 @@ static inline int drbg_alloc_state(struct drbg_state *drbg)
drbg->C = kmalloc(drbg_statelen(drbg), GFP_KERNEL);
if (!drbg->C)
goto err;
-#ifdef CONFIG_CRYPTO_FIPS
- drbg->prev = kmalloc(drbg_blocklen(drbg), GFP_KERNEL);
- if (!drbg->prev)
- goto err;
- drbg->fips_primed = false;
-#endif
/* scratchpad is only generated for CTR and Hash */
if (drbg->core->flags & DRBG_HMAC)
sb_size = 0;
diff --git a/crypto/internal.h b/crypto/internal.h
index 00e42a3ed..7eefcdb00 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -104,6 +104,9 @@ int crypto_probing_notify(unsigned long val, void *v);
unsigned int crypto_alg_extsize(struct crypto_alg *alg);
+int crypto_type_has_alg(const char *name, const struct crypto_type *frontend,
+ u32 type, u32 mask);
+
static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
{
atomic_inc(&alg->cra_refcnt);
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
index f78d4fc4e..c4eb9da49 100644
--- a/crypto/mcryptd.c
+++ b/crypto/mcryptd.c
@@ -522,6 +522,7 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
inst->alg.halg.base.cra_flags = type;
inst->alg.halg.digestsize = salg->digestsize;
+ inst->alg.halg.statesize = salg->statesize;
inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx);
inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm;
diff --git a/crypto/pcompress.c b/crypto/pcompress.c
deleted file mode 100644
index 7a13b4088..000000000
--- a/crypto/pcompress.c
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Cryptographic API.
- *
- * Partial (de)compression operations.
- *
- * Copyright 2008 Sony Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.
- * If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/crypto.h>
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/seq_file.h>
-#include <linux/string.h>
-#include <linux/cryptouser.h>
-#include <net/netlink.h>
-
-#include <crypto/compress.h>
-#include <crypto/internal/compress.h>
-
-#include "internal.h"
-
-
-static int crypto_pcomp_init(struct crypto_tfm *tfm, u32 type, u32 mask)
-{
- return 0;
-}
-
-static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm)
-{
- return 0;
-}
-
-#ifdef CONFIG_NET
-static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct crypto_report_comp rpcomp;
-
- strncpy(rpcomp.type, "pcomp", sizeof(rpcomp.type));
- if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
- sizeof(struct crypto_report_comp), &rpcomp))
- goto nla_put_failure;
- return 0;
-
-nla_put_failure:
- return -EMSGSIZE;
-}
-#else
-static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
- return -ENOSYS;
-}
-#endif
-
-static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg)
- __attribute__ ((unused));
-static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg)
-{
- seq_printf(m, "type : pcomp\n");
-}
-
-static const struct crypto_type crypto_pcomp_type = {
- .extsize = crypto_alg_extsize,
- .init = crypto_pcomp_init,
- .init_tfm = crypto_pcomp_init_tfm,
-#ifdef CONFIG_PROC_FS
- .show = crypto_pcomp_show,
-#endif
- .report = crypto_pcomp_report,
- .maskclear = ~CRYPTO_ALG_TYPE_MASK,
- .maskset = CRYPTO_ALG_TYPE_MASK,
- .type = CRYPTO_ALG_TYPE_PCOMPRESS,
- .tfmsize = offsetof(struct crypto_pcomp, base),
-};
-
-struct crypto_pcomp *crypto_alloc_pcomp(const char *alg_name, u32 type,
- u32 mask)
-{
- return crypto_alloc_tfm(alg_name, &crypto_pcomp_type, type, mask);
-}
-EXPORT_SYMBOL_GPL(crypto_alloc_pcomp);
-
-int crypto_register_pcomp(struct pcomp_alg *alg)
-{
- struct crypto_alg *base = &alg->base;
-
- base->cra_type = &crypto_pcomp_type;
- base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
- base->cra_flags |= CRYPTO_ALG_TYPE_PCOMPRESS;
-
- return crypto_register_alg(base);
-}
-EXPORT_SYMBOL_GPL(crypto_register_pcomp);
-
-int crypto_unregister_pcomp(struct pcomp_alg *alg)
-{
- return crypto_unregister_alg(&alg->base);
-}
-EXPORT_SYMBOL_GPL(crypto_unregister_pcomp);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Partial (de)compression type");
-MODULE_AUTHOR("Sony Corporation");
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index 0cbc5a502..ead8dc0d0 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -18,12 +18,89 @@
#include <linux/module.h>
#include <linux/random.h>
+/*
+ * Hash algorithm OIDs plus ASN.1 DER wrappings [RFC4880 sec 5.2.2].
+ */
+static const u8 rsa_digest_info_md5[] = {
+ 0x30, 0x20, 0x30, 0x0c, 0x06, 0x08,
+ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05, /* OID */
+ 0x05, 0x00, 0x04, 0x10
+};
+
+static const u8 rsa_digest_info_sha1[] = {
+ 0x30, 0x21, 0x30, 0x09, 0x06, 0x05,
+ 0x2b, 0x0e, 0x03, 0x02, 0x1a,
+ 0x05, 0x00, 0x04, 0x14
+};
+
+static const u8 rsa_digest_info_rmd160[] = {
+ 0x30, 0x21, 0x30, 0x09, 0x06, 0x05,
+ 0x2b, 0x24, 0x03, 0x02, 0x01,
+ 0x05, 0x00, 0x04, 0x14
+};
+
+static const u8 rsa_digest_info_sha224[] = {
+ 0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09,
+ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04,
+ 0x05, 0x00, 0x04, 0x1c
+};
+
+static const u8 rsa_digest_info_sha256[] = {
+ 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09,
+ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01,
+ 0x05, 0x00, 0x04, 0x20
+};
+
+static const u8 rsa_digest_info_sha384[] = {
+ 0x30, 0x41, 0x30, 0x0d, 0x06, 0x09,
+ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02,
+ 0x05, 0x00, 0x04, 0x30
+};
+
+static const u8 rsa_digest_info_sha512[] = {
+ 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09,
+ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03,
+ 0x05, 0x00, 0x04, 0x40
+};
+
+static const struct rsa_asn1_template {
+ const char *name;
+ const u8 *data;
+ size_t size;
+} rsa_asn1_templates[] = {
+#define _(X) { #X, rsa_digest_info_##X, sizeof(rsa_digest_info_##X) }
+ _(md5),
+ _(sha1),
+ _(rmd160),
+ _(sha256),
+ _(sha384),
+ _(sha512),
+ _(sha224),
+ { NULL }
+#undef _
+};
+
+static const struct rsa_asn1_template *rsa_lookup_asn1(const char *name)
+{
+ const struct rsa_asn1_template *p;
+
+ for (p = rsa_asn1_templates; p->name; p++)
+ if (strcmp(name, p->name) == 0)
+ return p;
+ return NULL;
+}
+
struct pkcs1pad_ctx {
struct crypto_akcipher *child;
-
+ const char *hash_name;
unsigned int key_size;
};
+struct pkcs1pad_inst_ctx {
+ struct crypto_akcipher_spawn spawn;
+ const char *hash_name;
+};
+
struct pkcs1pad_request {
struct akcipher_request child_req;
@@ -339,13 +416,22 @@ static int pkcs1pad_sign(struct akcipher_request *req)
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
+ const struct rsa_asn1_template *digest_info = NULL;
int err;
- unsigned int ps_end;
+ unsigned int ps_end, digest_size = 0;
if (!ctx->key_size)
return -EINVAL;
- if (req->src_len > ctx->key_size - 11)
+ if (ctx->hash_name) {
+ digest_info = rsa_lookup_asn1(ctx->hash_name);
+ if (!digest_info)
+ return -EINVAL;
+
+ digest_size = digest_info->size;
+ }
+
+ if (req->src_len + digest_size > ctx->key_size - 11)
return -EOVERFLOW;
if (req->dst_len < ctx->key_size) {
@@ -371,11 +457,16 @@ static int pkcs1pad_sign(struct akcipher_request *req)
if (!req_ctx->in_buf)
return -ENOMEM;
- ps_end = ctx->key_size - req->src_len - 2;
+ ps_end = ctx->key_size - digest_size - req->src_len - 2;
req_ctx->in_buf[0] = 0x01;
memset(req_ctx->in_buf + 1, 0xff, ps_end - 1);
req_ctx->in_buf[ps_end] = 0x00;
+ if (digest_info) {
+ memcpy(req_ctx->in_buf + ps_end + 1, digest_info->data,
+ digest_info->size);
+ }
+
pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
ctx->key_size - 1 - req->src_len, req->src);
@@ -408,6 +499,7 @@ static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
+ const struct rsa_asn1_template *digest_info;
unsigned int pos;
if (err == -EOVERFLOW)
@@ -422,20 +514,33 @@ static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
goto done;
}
- if (req_ctx->out_buf[0] != 0x01) {
- err = -EINVAL;
+ err = -EBADMSG;
+ if (req_ctx->out_buf[0] != 0x01)
goto done;
- }
+
for (pos = 1; pos < req_ctx->child_req.dst_len; pos++)
if (req_ctx->out_buf[pos] != 0xff)
break;
+
if (pos < 9 || pos == req_ctx->child_req.dst_len ||
- req_ctx->out_buf[pos] != 0x00) {
- err = -EINVAL;
+ req_ctx->out_buf[pos] != 0x00)
goto done;
- }
pos++;
+ if (ctx->hash_name) {
+ digest_info = rsa_lookup_asn1(ctx->hash_name);
+ if (!digest_info)
+ goto done;
+
+ if (memcmp(req_ctx->out_buf + pos, digest_info->data,
+ digest_info->size))
+ goto done;
+
+ pos += digest_info->size;
+ }
+
+ err = 0;
+
if (req->dst_len < req_ctx->child_req.dst_len - pos)
err = -EOVERFLOW;
req->dst_len = req_ctx->child_req.dst_len - pos;
@@ -444,7 +549,6 @@ static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
sg_copy_from_buffer(req->dst,
sg_nents_for_len(req->dst, req->dst_len),
req_ctx->out_buf + pos, req->dst_len);
-
done:
kzfree(req_ctx->out_buf);
@@ -481,7 +585,7 @@ static int pkcs1pad_verify(struct akcipher_request *req)
struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
int err;
- if (!ctx->key_size || req->src_len != ctx->key_size)
+ if (!ctx->key_size || req->src_len < ctx->key_size)
return -EINVAL;
if (ctx->key_size > PAGE_SIZE)
@@ -518,6 +622,7 @@ static int pkcs1pad_verify(struct akcipher_request *req)
static int pkcs1pad_init_tfm(struct crypto_akcipher *tfm)
{
struct akcipher_instance *inst = akcipher_alg_instance(tfm);
+ struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst);
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
struct crypto_akcipher *child_tfm;
@@ -526,7 +631,7 @@ static int pkcs1pad_init_tfm(struct crypto_akcipher *tfm)
return PTR_ERR(child_tfm);
ctx->child = child_tfm;
-
+ ctx->hash_name = ictx->hash_name;
return 0;
}
@@ -539,10 +644,11 @@ static void pkcs1pad_exit_tfm(struct crypto_akcipher *tfm)
static void pkcs1pad_free(struct akcipher_instance *inst)
{
- struct crypto_akcipher_spawn *spawn = akcipher_instance_ctx(inst);
+ struct pkcs1pad_inst_ctx *ctx = akcipher_instance_ctx(inst);
+ struct crypto_akcipher_spawn *spawn = &ctx->spawn;
crypto_drop_akcipher(spawn);
-
+ kfree(ctx->hash_name);
kfree(inst);
}
@@ -550,9 +656,11 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct crypto_attr_type *algt;
struct akcipher_instance *inst;
+ struct pkcs1pad_inst_ctx *ctx;
struct crypto_akcipher_spawn *spawn;
struct akcipher_alg *rsa_alg;
const char *rsa_alg_name;
+ const char *hash_name;
int err;
algt = crypto_get_attr_type(tb);
@@ -566,11 +674,18 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
if (IS_ERR(rsa_alg_name))
return PTR_ERR(rsa_alg_name);
- inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+ hash_name = crypto_attr_alg_name(tb[2]);
+ if (IS_ERR(hash_name))
+ hash_name = NULL;
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
return -ENOMEM;
- spawn = akcipher_instance_ctx(inst);
+ ctx = akcipher_instance_ctx(inst);
+ spawn = &ctx->spawn;
+ ctx->hash_name = hash_name ? kstrdup(hash_name, GFP_KERNEL) : NULL;
+
crypto_set_spawn(&spawn->base, akcipher_crypto_instance(inst));
err = crypto_grab_akcipher(spawn, rsa_alg_name, 0,
crypto_requires_sync(algt->type, algt->mask));
@@ -580,15 +695,28 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
rsa_alg = crypto_spawn_akcipher_alg(spawn);
err = -ENAMETOOLONG;
- if (snprintf(inst->alg.base.cra_name,
- CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
- rsa_alg->base.cra_name) >=
- CRYPTO_MAX_ALG_NAME ||
- snprintf(inst->alg.base.cra_driver_name,
- CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
- rsa_alg->base.cra_driver_name) >=
- CRYPTO_MAX_ALG_NAME)
+
+ if (!hash_name) {
+ if (snprintf(inst->alg.base.cra_name,
+ CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
+ rsa_alg->base.cra_name) >=
+ CRYPTO_MAX_ALG_NAME ||
+ snprintf(inst->alg.base.cra_driver_name,
+ CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
+ rsa_alg->base.cra_driver_name) >=
+ CRYPTO_MAX_ALG_NAME)
goto out_drop_alg;
+ } else {
+ if (snprintf(inst->alg.base.cra_name,
+ CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s,%s)",
+ rsa_alg->base.cra_name, hash_name) >=
+ CRYPTO_MAX_ALG_NAME ||
+ snprintf(inst->alg.base.cra_driver_name,
+ CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s,%s)",
+ rsa_alg->base.cra_driver_name, hash_name) >=
+ CRYPTO_MAX_ALG_NAME)
+ goto out_free_hash;
+ }
inst->alg.base.cra_flags = rsa_alg->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = rsa_alg->base.cra_priority;
@@ -610,10 +738,12 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
err = akcipher_register_instance(tmpl, inst);
if (err)
- goto out_drop_alg;
+ goto out_free_hash;
return 0;
+out_free_hash:
+ kfree(ctx->hash_name);
out_drop_alg:
crypto_drop_akcipher(spawn);
out_free_inst:
diff --git a/crypto/shash.c b/crypto/shash.c
index 359754591..a051541a4 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -368,151 +368,6 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
return 0;
}
-static int shash_compat_setkey(struct crypto_hash *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct shash_desc **descp = crypto_hash_ctx(tfm);
- struct shash_desc *desc = *descp;
-
- return crypto_shash_setkey(desc->tfm, key, keylen);
-}
-
-static int shash_compat_init(struct hash_desc *hdesc)
-{
- struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm);
- struct shash_desc *desc = *descp;
-
- desc->flags = hdesc->flags;
-
- return crypto_shash_init(desc);
-}
-
-static int shash_compat_update(struct hash_desc *hdesc, struct scatterlist *sg,
- unsigned int len)
-{
- struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm);
- struct shash_desc *desc = *descp;
- struct crypto_hash_walk walk;
- int nbytes;
-
- for (nbytes = crypto_hash_walk_first_compat(hdesc, &walk, sg, len);
- nbytes > 0; nbytes = crypto_hash_walk_done(&walk, nbytes))
- nbytes = crypto_shash_update(desc, walk.data, nbytes);
-
- return nbytes;
-}
-
-static int shash_compat_final(struct hash_desc *hdesc, u8 *out)
-{
- struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm);
-
- return crypto_shash_final(*descp, out);
-}
-
-static int shash_compat_digest(struct hash_desc *hdesc, struct scatterlist *sg,
- unsigned int nbytes, u8 *out)
-{
- unsigned int offset = sg->offset;
- int err;
-
- if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) {
- struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm);
- struct shash_desc *desc = *descp;
- void *data;
-
- desc->flags = hdesc->flags;
-
- data = kmap_atomic(sg_page(sg));
- err = crypto_shash_digest(desc, data + offset, nbytes, out);
- kunmap_atomic(data);
- crypto_yield(desc->flags);
- goto out;
- }
-
- err = shash_compat_init(hdesc);
- if (err)
- goto out;
-
- err = shash_compat_update(hdesc, sg, nbytes);
- if (err)
- goto out;
-
- err = shash_compat_final(hdesc, out);
-
-out:
- return err;
-}
-
-static void crypto_exit_shash_ops_compat(struct crypto_tfm *tfm)
-{
- struct shash_desc **descp = crypto_tfm_ctx(tfm);
- struct shash_desc *desc = *descp;
-
- crypto_free_shash(desc->tfm);
- kzfree(desc);
-}
-
-static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm)
-{
- struct hash_tfm *crt = &tfm->crt_hash;
- struct crypto_alg *calg = tfm->__crt_alg;
- struct shash_alg *alg = __crypto_shash_alg(calg);
- struct shash_desc **descp = crypto_tfm_ctx(tfm);
- struct crypto_shash *shash;
- struct shash_desc *desc;
-
- if (!crypto_mod_get(calg))
- return -EAGAIN;
-
- shash = crypto_create_tfm(calg, &crypto_shash_type);
- if (IS_ERR(shash)) {
- crypto_mod_put(calg);
- return PTR_ERR(shash);
- }
-
- desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(shash),
- GFP_KERNEL);
- if (!desc) {
- crypto_free_shash(shash);
- return -ENOMEM;
- }
-
- *descp = desc;
- desc->tfm = shash;
- tfm->exit = crypto_exit_shash_ops_compat;
-
- crt->init = shash_compat_init;
- crt->update = shash_compat_update;
- crt->final = shash_compat_final;
- crt->digest = shash_compat_digest;
- crt->setkey = shash_compat_setkey;
-
- crt->digestsize = alg->digestsize;
-
- return 0;
-}
-
-static int crypto_init_shash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
-{
- switch (mask & CRYPTO_ALG_TYPE_MASK) {
- case CRYPTO_ALG_TYPE_HASH_MASK:
- return crypto_init_shash_ops_compat(tfm);
- }
-
- return -EINVAL;
-}
-
-static unsigned int crypto_shash_ctxsize(struct crypto_alg *alg, u32 type,
- u32 mask)
-{
- switch (mask & CRYPTO_ALG_TYPE_MASK) {
- case CRYPTO_ALG_TYPE_HASH_MASK:
- return sizeof(struct shash_desc *);
- }
-
- return 0;
-}
-
static int crypto_shash_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_shash *hash = __crypto_shash_cast(tfm);
@@ -559,9 +414,7 @@ static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
}
static const struct crypto_type crypto_shash_type = {
- .ctxsize = crypto_shash_ctxsize,
.extsize = crypto_alg_extsize,
- .init = crypto_init_shash_ops,
.init_tfm = crypto_shash_init_tfm,
#ifdef CONFIG_PROC_FS
.show = crypto_shash_show,
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index d199c0b17..69230e9d4 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -118,7 +118,7 @@ static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
skcipher->decrypt = skcipher_decrypt_blkcipher;
skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
- skcipher->has_setkey = calg->cra_blkcipher.max_keysize;
+ skcipher->keysize = calg->cra_blkcipher.max_keysize;
return 0;
}
@@ -211,7 +211,7 @@ static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) +
sizeof(struct ablkcipher_request);
- skcipher->has_setkey = calg->cra_ablkcipher.max_keysize;
+ skcipher->keysize = calg->cra_ablkcipher.max_keysize;
return 0;
}
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 270bc4b82..579dce071 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -554,164 +554,6 @@ out:
crypto_free_blkcipher(tfm);
}
-static int test_hash_jiffies_digest(struct hash_desc *desc,
- struct scatterlist *sg, int blen,
- char *out, int secs)
-{
- unsigned long start, end;
- int bcount;
- int ret;
-
- for (start = jiffies, end = start + secs * HZ, bcount = 0;
- time_before(jiffies, end); bcount++) {
- ret = crypto_hash_digest(desc, sg, blen, out);
- if (ret)
- return ret;
- }
-
- printk("%6u opers/sec, %9lu bytes/sec\n",
- bcount / secs, ((long)bcount * blen) / secs);
-
- return 0;
-}
-
-static int test_hash_jiffies(struct hash_desc *desc, struct scatterlist *sg,
- int blen, int plen, char *out, int secs)
-{
- unsigned long start, end;
- int bcount, pcount;
- int ret;
-
- if (plen == blen)
- return test_hash_jiffies_digest(desc, sg, blen, out, secs);
-
- for (start = jiffies, end = start + secs * HZ, bcount = 0;
- time_before(jiffies, end); bcount++) {
- ret = crypto_hash_init(desc);
- if (ret)
- return ret;
- for (pcount = 0; pcount < blen; pcount += plen) {
- ret = crypto_hash_update(desc, sg, plen);
- if (ret)
- return ret;
- }
- /* we assume there is enough space in 'out' for the result */
- ret = crypto_hash_final(desc, out);
- if (ret)
- return ret;
- }
-
- printk("%6u opers/sec, %9lu bytes/sec\n",
- bcount / secs, ((long)bcount * blen) / secs);
-
- return 0;
-}
-
-static int test_hash_cycles_digest(struct hash_desc *desc,
- struct scatterlist *sg, int blen, char *out)
-{
- unsigned long cycles = 0;
- int i;
- int ret;
-
- local_irq_disable();
-
- /* Warm-up run. */
- for (i = 0; i < 4; i++) {
- ret = crypto_hash_digest(desc, sg, blen, out);
- if (ret)
- goto out;
- }
-
- /* The real thing. */
- for (i = 0; i < 8; i++) {
- cycles_t start, end;
-
- start = get_cycles();
-
- ret = crypto_hash_digest(desc, sg, blen, out);
- if (ret)
- goto out;
-
- end = get_cycles();
-
- cycles += end - start;
- }
-
-out:
- local_irq_enable();
-
- if (ret)
- return ret;
-
- printk("%6lu cycles/operation, %4lu cycles/byte\n",
- cycles / 8, cycles / (8 * blen));
-
- return 0;
-}
-
-static int test_hash_cycles(struct hash_desc *desc, struct scatterlist *sg,
- int blen, int plen, char *out)
-{
- unsigned long cycles = 0;
- int i, pcount;
- int ret;
-
- if (plen == blen)
- return test_hash_cycles_digest(desc, sg, blen, out);
-
- local_irq_disable();
-
- /* Warm-up run. */
- for (i = 0; i < 4; i++) {
- ret = crypto_hash_init(desc);
- if (ret)
- goto out;
- for (pcount = 0; pcount < blen; pcount += plen) {
- ret = crypto_hash_update(desc, sg, plen);
- if (ret)
- goto out;
- }
- ret = crypto_hash_final(desc, out);
- if (ret)
- goto out;
- }
-
- /* The real thing. */
- for (i = 0; i < 8; i++) {
- cycles_t start, end;
-
- start = get_cycles();
-
- ret = crypto_hash_init(desc);
- if (ret)
- goto out;
- for (pcount = 0; pcount < blen; pcount += plen) {
- ret = crypto_hash_update(desc, sg, plen);
- if (ret)
- goto out;
- }
- ret = crypto_hash_final(desc, out);
- if (ret)
- goto out;
-
- end = get_cycles();
-
- cycles += end - start;
- }
-
-out:
- local_irq_enable();
-
- if (ret)
- return ret;
-
- printk("%6lu cycles/operation, %4lu cycles/byte\n",
- cycles / 8, cycles / (8 * blen));
-
- return 0;
-}
-
static void test_hash_sg_init(struct scatterlist *sg)
{
int i;
@@ -723,69 +565,6 @@ static void test_hash_sg_init(struct scatterlist *sg)
}
}
-static void test_hash_speed(const char *algo, unsigned int secs,
- struct hash_speed *speed)
-{
- struct scatterlist sg[TVMEMSIZE];
- struct crypto_hash *tfm;
- struct hash_desc desc;
- static char output[1024];
- int i;
- int ret;
-
- tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC);
-
- if (IS_ERR(tfm)) {
- printk(KERN_ERR "failed to load transform for %s: %ld\n", algo,
- PTR_ERR(tfm));
- return;
- }
-
- printk(KERN_INFO "\ntesting speed of %s (%s)\n", algo,
- get_driver_name(crypto_hash, tfm));
-
- desc.tfm = tfm;
- desc.flags = 0;
-
- if (crypto_hash_digestsize(tfm) > sizeof(output)) {
- printk(KERN_ERR "digestsize(%u) > outputbuffer(%zu)\n",
- crypto_hash_digestsize(tfm), sizeof(output));
- goto out;
- }
-
- test_hash_sg_init(sg);
- for (i = 0; speed[i].blen != 0; i++) {
- if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
- printk(KERN_ERR
- "template (%u) too big for tvmem (%lu)\n",
- speed[i].blen, TVMEMSIZE * PAGE_SIZE);
- goto out;
- }
-
- if (speed[i].klen)
- crypto_hash_setkey(tfm, tvmem[0], speed[i].klen);
-
- printk(KERN_INFO "test%3u "
- "(%5u byte blocks,%5u bytes per update,%4u updates): ",
- i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
-
- if (secs)
- ret = test_hash_jiffies(&desc, sg, speed[i].blen,
- speed[i].plen, output, secs);
- else
- ret = test_hash_cycles(&desc, sg, speed[i].blen,
- speed[i].plen, output);
-
- if (ret) {
- printk(KERN_ERR "hashing failed ret=%d\n", ret);
- break;
- }
- }
-
-out:
- crypto_free_hash(tfm);
-}
-
static inline int do_one_ahash_op(struct ahash_request *req, int ret)
{
if (ret == -EINPROGRESS || ret == -EBUSY) {
@@ -945,8 +724,8 @@ out:
return 0;
}
-static void test_ahash_speed(const char *algo, unsigned int secs,
- struct hash_speed *speed)
+static void test_ahash_speed_common(const char *algo, unsigned int secs,
+ struct hash_speed *speed, unsigned mask)
{
struct scatterlist sg[TVMEMSIZE];
struct tcrypt_result tresult;
@@ -955,7 +734,7 @@ static void test_ahash_speed(const char *algo, unsigned int secs,
char *output;
int i, ret;
- tfm = crypto_alloc_ahash(algo, 0, 0);
+ tfm = crypto_alloc_ahash(algo, 0, mask);
if (IS_ERR(tfm)) {
pr_err("failed to load transform for %s: %ld\n",
algo, PTR_ERR(tfm));
@@ -1021,6 +800,18 @@ out:
crypto_free_ahash(tfm);
}
+static void test_ahash_speed(const char *algo, unsigned int secs,
+ struct hash_speed *speed)
+{
+ return test_ahash_speed_common(algo, secs, speed, 0);
+}
+
+static void test_hash_speed(const char *algo, unsigned int secs,
+ struct hash_speed *speed)
+{
+ return test_ahash_speed_common(algo, secs, speed, CRYPTO_ALG_ASYNC);
+}
+
static inline int do_one_acipher_op(struct ablkcipher_request *req, int ret)
{
if (ret == -EINPROGRESS || ret == -EBUSY) {
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index ae8c57fd8..7d4acc449 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -96,13 +96,6 @@ struct comp_test_suite {
} comp, decomp;
};
-struct pcomp_test_suite {
- struct {
- struct pcomp_testvec *vecs;
- unsigned int count;
- } comp, decomp;
-};
-
struct hash_test_suite {
struct hash_testvec *vecs;
unsigned int count;
@@ -133,7 +126,6 @@ struct alg_test_desc {
struct aead_test_suite aead;
struct cipher_test_suite cipher;
struct comp_test_suite comp;
- struct pcomp_test_suite pcomp;
struct hash_test_suite hash;
struct cprng_test_suite cprng;
struct drbg_test_suite drbg;
@@ -198,6 +190,61 @@ static int wait_async_op(struct tcrypt_result *tr, int ret)
return ret;
}
+static int ahash_partial_update(struct ahash_request **preq,
+ struct crypto_ahash *tfm, struct hash_testvec *template,
+ void *hash_buff, int k, int temp, struct scatterlist *sg,
+ const char *algo, char *result, struct tcrypt_result *tresult)
+{
+ char *state;
+ struct ahash_request *req;
+ int statesize, ret = -EINVAL;
+
+ req = *preq;
+ statesize = crypto_ahash_statesize(
+ crypto_ahash_reqtfm(req));
+ state = kmalloc(statesize, GFP_KERNEL);
+ if (!state) {
+ pr_err("alt: hash: Failed to alloc state for %s\n", algo);
+ goto out_nostate;
+ }
+ ret = crypto_ahash_export(req, state);
+ if (ret) {
+ pr_err("alt: hash: Failed to export() for %s\n", algo);
+ goto out;
+ }
+ ahash_request_free(req);
+ req = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (!req) {
+ pr_err("alg: hash: Failed to alloc request for %s\n", algo);
+ goto out_noreq;
+ }
+ ahash_request_set_callback(req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ tcrypt_complete, tresult);
+
+ memcpy(hash_buff, template->plaintext + temp,
+ template->tap[k]);
+ sg_init_one(&sg[0], hash_buff, template->tap[k]);
+ ahash_request_set_crypt(req, sg, result, template->tap[k]);
+ ret = crypto_ahash_import(req, state);
+ if (ret) {
+ pr_err("alg: hash: Failed to import() for %s\n", algo);
+ goto out;
+ }
+ ret = wait_async_op(tresult, crypto_ahash_update(req));
+ if (ret)
+ goto out;
+ *preq = req;
+ ret = 0;
+ goto out_noreq;
+out:
+ ahash_request_free(req);
+out_noreq:
+ kfree(state);
+out_nostate:
+ return ret;
+}
+
static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
unsigned int tcount, bool use_digest,
const int align_offset)
@@ -385,6 +432,84 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
}
}
+ /* partial update exercise */
+ j = 0;
+ for (i = 0; i < tcount; i++) {
+ /* alignment tests are only done with continuous buffers */
+ if (align_offset != 0)
+ break;
+
+ if (template[i].np < 2)
+ continue;
+
+ j++;
+ memset(result, 0, MAX_DIGEST_SIZE);
+
+ ret = -EINVAL;
+ hash_buff = xbuf[0];
+ memcpy(hash_buff, template[i].plaintext,
+ template[i].tap[0]);
+ sg_init_one(&sg[0], hash_buff, template[i].tap[0]);
+
+ if (template[i].ksize) {
+ crypto_ahash_clear_flags(tfm, ~0);
+ if (template[i].ksize > MAX_KEYLEN) {
+ pr_err("alg: hash: setkey failed on test %d for %s: key size %d > %d\n",
+ j, algo, template[i].ksize, MAX_KEYLEN);
+ ret = -EINVAL;
+ goto out;
+ }
+ memcpy(key, template[i].key, template[i].ksize);
+ ret = crypto_ahash_setkey(tfm, key, template[i].ksize);
+ if (ret) {
+ pr_err("alg: hash: setkey failed on test %d for %s: ret=%d\n",
+ j, algo, -ret);
+ goto out;
+ }
+ }
+
+ ahash_request_set_crypt(req, sg, result, template[i].tap[0]);
+ ret = wait_async_op(&tresult, crypto_ahash_init(req));
+ if (ret) {
+ pr_err("alt: hash: init failed on test %d for %s: ret=%d\n",
+ j, algo, -ret);
+ goto out;
+ }
+ ret = wait_async_op(&tresult, crypto_ahash_update(req));
+ if (ret) {
+ pr_err("alt: hash: update failed on test %d for %s: ret=%d\n",
+ j, algo, -ret);
+ goto out;
+ }
+
+ temp = template[i].tap[0];
+ for (k = 1; k < template[i].np; k++) {
+ ret = ahash_partial_update(&req, tfm, &template[i],
+ hash_buff, k, temp, &sg[0], algo, result,
+ &tresult);
+ if (ret) {
+ pr_err("hash: partial update failed on test %d for %s: ret=%d\n",
+ j, algo, -ret);
+ goto out_noreq;
+ }
+ temp += template[i].tap[k];
+ }
+ ret = wait_async_op(&tresult, crypto_ahash_final(req));
+ if (ret) {
+ pr_err("alt: hash: final failed on test %d for %s: ret=%d\n",
+ j, algo, -ret);
+ goto out;
+ }
+ if (memcmp(result, template[i].digest,
+ crypto_ahash_digestsize(tfm))) {
+ pr_err("alg: hash: Partial Test %d failed for %s\n",
+ j, algo);
+ hexdump(result, crypto_ahash_digestsize(tfm));
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
ret = 0;
out:
@@ -488,6 +613,8 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
tcrypt_complete, &result);
+ iv_len = crypto_aead_ivsize(tfm);
+
for (i = 0, j = 0; i < tcount; i++) {
if (template[i].np)
continue;
@@ -508,7 +635,6 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
memcpy(input, template[i].input, template[i].ilen);
memcpy(assoc, template[i].assoc, template[i].alen);
- iv_len = crypto_aead_ivsize(tfm);
if (template[i].iv)
memcpy(iv, template[i].iv, iv_len);
else
@@ -617,7 +743,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
j++;
if (template[i].iv)
- memcpy(iv, template[i].iv, MAX_IVLEN);
+ memcpy(iv, template[i].iv, iv_len);
else
memset(iv, 0, MAX_IVLEN);
@@ -1293,183 +1419,6 @@ out:
return ret;
}
-static int test_pcomp(struct crypto_pcomp *tfm,
- struct pcomp_testvec *ctemplate,
- struct pcomp_testvec *dtemplate, int ctcount,
- int dtcount)
-{
- const char *algo = crypto_tfm_alg_driver_name(crypto_pcomp_tfm(tfm));
- unsigned int i;
- char result[COMP_BUF_SIZE];
- int res;
-
- for (i = 0; i < ctcount; i++) {
- struct comp_request req;
- unsigned int produced = 0;
-
- res = crypto_compress_setup(tfm, ctemplate[i].params,
- ctemplate[i].paramsize);
- if (res) {
- pr_err("alg: pcomp: compression setup failed on test "
- "%d for %s: error=%d\n", i + 1, algo, res);
- return res;
- }
-
- res = crypto_compress_init(tfm);
- if (res) {
- pr_err("alg: pcomp: compression init failed on test "
- "%d for %s: error=%d\n", i + 1, algo, res);
- return res;
- }
-
- memset(result, 0, sizeof(result));
-
- req.next_in = ctemplate[i].input;
- req.avail_in = ctemplate[i].inlen / 2;
- req.next_out = result;
- req.avail_out = ctemplate[i].outlen / 2;
-
- res = crypto_compress_update(tfm, &req);
- if (res < 0 && (res != -EAGAIN || req.avail_in)) {
- pr_err("alg: pcomp: compression update failed on test "
- "%d for %s: error=%d\n", i + 1, algo, res);
- return res;
- }
- if (res > 0)
- produced += res;
-
- /* Add remaining input data */
- req.avail_in += (ctemplate[i].inlen + 1) / 2;
-
- res = crypto_compress_update(tfm, &req);
- if (res < 0 && (res != -EAGAIN || req.avail_in)) {
- pr_err("alg: pcomp: compression update failed on test "
- "%d for %s: error=%d\n", i + 1, algo, res);
- return res;
- }
- if (res > 0)
- produced += res;
-
- /* Provide remaining output space */
- req.avail_out += COMP_BUF_SIZE - ctemplate[i].outlen / 2;
-
- res = crypto_compress_final(tfm, &req);
- if (res < 0) {
- pr_err("alg: pcomp: compression final failed on test "
- "%d for %s: error=%d\n", i + 1, algo, res);
- return res;
- }
- produced += res;
-
- if (COMP_BUF_SIZE - req.avail_out != ctemplate[i].outlen) {
- pr_err("alg: comp: Compression test %d failed for %s: "
- "output len = %d (expected %d)\n", i + 1, algo,
- COMP_BUF_SIZE - req.avail_out,
- ctemplate[i].outlen);
- return -EINVAL;
- }
-
- if (produced != ctemplate[i].outlen) {
- pr_err("alg: comp: Compression test %d failed for %s: "
- "returned len = %u (expected %d)\n", i + 1,
- algo, produced, ctemplate[i].outlen);
- return -EINVAL;
- }
-
- if (memcmp(result, ctemplate[i].output, ctemplate[i].outlen)) {
- pr_err("alg: pcomp: Compression test %d failed for "
- "%s\n", i + 1, algo);
- hexdump(result, ctemplate[i].outlen);
- return -EINVAL;
- }
- }
-
- for (i = 0; i < dtcount; i++) {
- struct comp_request req;
- unsigned int produced = 0;
-
- res = crypto_decompress_setup(tfm, dtemplate[i].params,
- dtemplate[i].paramsize);
- if (res) {
- pr_err("alg: pcomp: decompression setup failed on "
- "test %d for %s: error=%d\n", i + 1, algo, res);
- return res;
- }
-
- res = crypto_decompress_init(tfm);
- if (res) {
- pr_err("alg: pcomp: decompression init failed on test "
- "%d for %s: error=%d\n", i + 1, algo, res);
- return res;
- }
-
- memset(result, 0, sizeof(result));
-
- req.next_in = dtemplate[i].input;
- req.avail_in = dtemplate[i].inlen / 2;
- req.next_out = result;
- req.avail_out = dtemplate[i].outlen / 2;
-
- res = crypto_decompress_update(tfm, &req);
- if (res < 0 && (res != -EAGAIN || req.avail_in)) {
- pr_err("alg: pcomp: decompression update failed on "
- "test %d for %s: error=%d\n", i + 1, algo, res);
- return res;
- }
- if (res > 0)
- produced += res;
-
- /* Add remaining input data */
- req.avail_in += (dtemplate[i].inlen + 1) / 2;
-
- res = crypto_decompress_update(tfm, &req);
- if (res < 0 && (res != -EAGAIN || req.avail_in)) {
- pr_err("alg: pcomp: decompression update failed on "
- "test %d for %s: error=%d\n", i + 1, algo, res);
- return res;
- }
- if (res > 0)
- produced += res;
-
- /* Provide remaining output space */
- req.avail_out += COMP_BUF_SIZE - dtemplate[i].outlen / 2;
-
- res = crypto_decompress_final(tfm, &req);
- if (res < 0 && (res != -EAGAIN || req.avail_in)) {
- pr_err("alg: pcomp: decompression final failed on "
- "test %d for %s: error=%d\n", i + 1, algo, res);
- return res;
- }
- if (res > 0)
- produced += res;
-
- if (COMP_BUF_SIZE - req.avail_out != dtemplate[i].outlen) {
- pr_err("alg: comp: Decompression test %d failed for "
- "%s: output len = %d (expected %d)\n", i + 1,
- algo, COMP_BUF_SIZE - req.avail_out,
- dtemplate[i].outlen);
- return -EINVAL;
- }
-
- if (produced != dtemplate[i].outlen) {
- pr_err("alg: comp: Decompression test %d failed for "
- "%s: returned len = %u (expected %d)\n", i + 1,
- algo, produced, dtemplate[i].outlen);
- return -EINVAL;
- }
-
- if (memcmp(result, dtemplate[i].output, dtemplate[i].outlen)) {
- pr_err("alg: pcomp: Decompression test %d failed for "
- "%s\n", i + 1, algo);
- hexdump(result, dtemplate[i].outlen);
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-
static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template,
unsigned int tcount)
{
@@ -1640,28 +1589,6 @@ static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
return err;
}
-static int alg_test_pcomp(const struct alg_test_desc *desc, const char *driver,
- u32 type, u32 mask)
-{
- struct crypto_pcomp *tfm;
- int err;
-
- tfm = crypto_alloc_pcomp(driver, type, mask);
- if (IS_ERR(tfm)) {
- pr_err("alg: pcomp: Failed to load transform for %s: %ld\n",
- driver, PTR_ERR(tfm));
- return PTR_ERR(tfm);
- }
-
- err = test_pcomp(tfm, desc->suite.pcomp.comp.vecs,
- desc->suite.pcomp.decomp.vecs,
- desc->suite.pcomp.comp.count,
- desc->suite.pcomp.decomp.count);
-
- crypto_free_pcomp(tfm);
- return err;
-}
-
static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
u32 type, u32 mask)
{
@@ -1849,6 +1776,7 @@ static int alg_test_drbg(const struct alg_test_desc *desc, const char *driver,
static int do_test_rsa(struct crypto_akcipher *tfm,
struct akcipher_testvec *vecs)
{
+ char *xbuf[XBUFSIZE];
struct akcipher_request *req;
void *outbuf_enc = NULL;
void *outbuf_dec = NULL;
@@ -1857,9 +1785,12 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
int err = -ENOMEM;
struct scatterlist src, dst, src_tab[2];
+ if (testmgr_alloc_buf(xbuf))
+ return err;
+
req = akcipher_request_alloc(tfm, GFP_KERNEL);
if (!req)
- return err;
+ goto free_xbuf;
init_completion(&result.completion);
@@ -1877,9 +1808,14 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
if (!outbuf_enc)
goto free_req;
+ if (WARN_ON(vecs->m_size > PAGE_SIZE))
+ goto free_all;
+
+ memcpy(xbuf[0], vecs->m, vecs->m_size);
+
sg_init_table(src_tab, 2);
- sg_set_buf(&src_tab[0], vecs->m, 8);
- sg_set_buf(&src_tab[1], vecs->m + 8, vecs->m_size - 8);
+ sg_set_buf(&src_tab[0], xbuf[0], 8);
+ sg_set_buf(&src_tab[1], xbuf[0] + 8, vecs->m_size - 8);
sg_init_one(&dst, outbuf_enc, out_len_max);
akcipher_request_set_crypt(req, src_tab, &dst, vecs->m_size,
out_len_max);
@@ -1898,7 +1834,7 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
goto free_all;
}
/* verify that encrypted message is equal to expected */
- if (memcmp(vecs->c, sg_virt(req->dst), vecs->c_size)) {
+ if (memcmp(vecs->c, outbuf_enc, vecs->c_size)) {
pr_err("alg: rsa: encrypt test failed. Invalid output\n");
err = -EINVAL;
goto free_all;
@@ -1913,7 +1849,13 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
err = -ENOMEM;
goto free_all;
}
- sg_init_one(&src, vecs->c, vecs->c_size);
+
+ if (WARN_ON(vecs->c_size > PAGE_SIZE))
+ goto free_all;
+
+ memcpy(xbuf[0], vecs->c, vecs->c_size);
+
+ sg_init_one(&src, xbuf[0], vecs->c_size);
sg_init_one(&dst, outbuf_dec, out_len_max);
init_completion(&result.completion);
akcipher_request_set_crypt(req, &src, &dst, vecs->c_size, out_len_max);
@@ -1940,6 +1882,8 @@ free_all:
kfree(outbuf_enc);
free_req:
akcipher_request_free(req);
+free_xbuf:
+ testmgr_free_buf(xbuf);
return err;
}
@@ -2081,7 +2025,6 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
.alg = "ansi_cprng",
.test = alg_test_cprng,
- .fips_allowed = 1,
.suite = {
.cprng = {
.vecs = ansi_cprng_aes_tv_template,
@@ -2132,6 +2075,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
.alg = "authenc(hmac(sha1),cbc(des3_ede))",
.test = alg_test_aead,
+ .fips_allowed = 1,
.suite = {
.aead = {
.enc = {
@@ -2143,6 +2087,10 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
+ .alg = "authenc(hmac(sha1),ctr(aes))",
+ .test = alg_test_null,
+ .fips_allowed = 1,
+ }, {
.alg = "authenc(hmac(sha1),ecb(cipher_null))",
.test = alg_test_aead,
.suite = {
@@ -2162,6 +2110,10 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
+ .alg = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
+ .test = alg_test_null,
+ .fips_allowed = 1,
+ }, {
.alg = "authenc(hmac(sha224),cbc(des))",
.test = alg_test_aead,
.suite = {
@@ -2177,6 +2129,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
.alg = "authenc(hmac(sha224),cbc(des3_ede))",
.test = alg_test_aead,
+ .fips_allowed = 1,
.suite = {
.aead = {
.enc = {
@@ -2190,6 +2143,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
.alg = "authenc(hmac(sha256),cbc(aes))",
.test = alg_test_aead,
+ .fips_allowed = 1,
.suite = {
.aead = {
.enc = {
@@ -2216,6 +2170,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
.alg = "authenc(hmac(sha256),cbc(des3_ede))",
.test = alg_test_aead,
+ .fips_allowed = 1,
.suite = {
.aead = {
.enc = {
@@ -2227,6 +2182,14 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
+ .alg = "authenc(hmac(sha256),ctr(aes))",
+ .test = alg_test_null,
+ .fips_allowed = 1,
+ }, {
+ .alg = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
+ .test = alg_test_null,
+ .fips_allowed = 1,
+ }, {
.alg = "authenc(hmac(sha384),cbc(des))",
.test = alg_test_aead,
.suite = {
@@ -2242,6 +2205,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
.alg = "authenc(hmac(sha384),cbc(des3_ede))",
.test = alg_test_aead,
+ .fips_allowed = 1,
.suite = {
.aead = {
.enc = {
@@ -2253,7 +2217,16 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
+ .alg = "authenc(hmac(sha384),ctr(aes))",
+ .test = alg_test_null,
+ .fips_allowed = 1,
+ }, {
+ .alg = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
+ .test = alg_test_null,
+ .fips_allowed = 1,
+ }, {
.alg = "authenc(hmac(sha512),cbc(aes))",
+ .fips_allowed = 1,
.test = alg_test_aead,
.suite = {
.aead = {
@@ -2281,6 +2254,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
.alg = "authenc(hmac(sha512),cbc(des3_ede))",
.test = alg_test_aead,
+ .fips_allowed = 1,
.suite = {
.aead = {
.enc = {
@@ -2292,6 +2266,14 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
+ .alg = "authenc(hmac(sha512),ctr(aes))",
+ .test = alg_test_null,
+ .fips_allowed = 1,
+ }, {
+ .alg = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
+ .test = alg_test_null,
+ .fips_allowed = 1,
+ }, {
.alg = "cbc(aes)",
.test = alg_test_skcipher,
.fips_allowed = 1,
@@ -3840,22 +3822,6 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}
- }, {
- .alg = "zlib",
- .test = alg_test_pcomp,
- .fips_allowed = 1,
- .suite = {
- .pcomp = {
- .comp = {
- .vecs = zlib_comp_tv_template,
- .count = ZLIB_COMP_TEST_VECTORS
- },
- .decomp = {
- .vecs = zlib_decomp_tv_template,
- .count = ZLIB_DECOMP_TEST_VECTORS
- }
- }
- }
}
};
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index da0a8fd76..487ec880e 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -25,9 +25,6 @@
#define _CRYPTO_TESTMGR_H
#include <linux/netlink.h>
-#include <linux/zlib.h>
-
-#include <crypto/compress.h>
#define MAX_DIGEST_SIZE 64
#define MAX_TAP 8
@@ -32268,14 +32265,6 @@ struct comp_testvec {
char output[COMP_BUF_SIZE];
};
-struct pcomp_testvec {
- const void *params;
- unsigned int paramsize;
- int inlen, outlen;
- char input[COMP_BUF_SIZE];
- char output[COMP_BUF_SIZE];
-};
-
/*
* Deflate test vectors (null-terminated strings).
* Params: winbits=-11, Z_DEFAULT_COMPRESSION, MAX_MEM_LEVEL.
@@ -32356,139 +32345,6 @@ static struct comp_testvec deflate_decomp_tv_template[] = {
},
};
-#define ZLIB_COMP_TEST_VECTORS 2
-#define ZLIB_DECOMP_TEST_VECTORS 2
-
-static const struct {
- struct nlattr nla;
- int val;
-} deflate_comp_params[] = {
- {
- .nla = {
- .nla_len = NLA_HDRLEN + sizeof(int),
- .nla_type = ZLIB_COMP_LEVEL,
- },
- .val = Z_DEFAULT_COMPRESSION,
- }, {
- .nla = {
- .nla_len = NLA_HDRLEN + sizeof(int),
- .nla_type = ZLIB_COMP_METHOD,
- },
- .val = Z_DEFLATED,
- }, {
- .nla = {
- .nla_len = NLA_HDRLEN + sizeof(int),
- .nla_type = ZLIB_COMP_WINDOWBITS,
- },
- .val = -11,
- }, {
- .nla = {
- .nla_len = NLA_HDRLEN + sizeof(int),
- .nla_type = ZLIB_COMP_MEMLEVEL,
- },
- .val = MAX_MEM_LEVEL,
- }, {
- .nla = {
- .nla_len = NLA_HDRLEN + sizeof(int),
- .nla_type = ZLIB_COMP_STRATEGY,
- },
- .val = Z_DEFAULT_STRATEGY,
- }
-};
-
-static const struct {
- struct nlattr nla;
- int val;
-} deflate_decomp_params[] = {
- {
- .nla = {
- .nla_len = NLA_HDRLEN + sizeof(int),
- .nla_type = ZLIB_DECOMP_WINDOWBITS,
- },
- .val = -11,
- }
-};
-
-static struct pcomp_testvec zlib_comp_tv_template[] = {
- {
- .params = &deflate_comp_params,
- .paramsize = sizeof(deflate_comp_params),
- .inlen = 70,
- .outlen = 38,
- .input = "Join us now and share the software "
- "Join us now and share the software ",
- .output = "\xf3\xca\xcf\xcc\x53\x28\x2d\x56"
- "\xc8\xcb\x2f\x57\x48\xcc\x4b\x51"
- "\x28\xce\x48\x2c\x4a\x55\x28\xc9"
- "\x48\x55\x28\xce\x4f\x2b\x29\x07"
- "\x71\xbc\x08\x2b\x01\x00",
- }, {
- .params = &deflate_comp_params,
- .paramsize = sizeof(deflate_comp_params),
- .inlen = 191,
- .outlen = 122,
- .input = "This document describes a compression method based on the DEFLATE"
- "compression algorithm. This document defines the application of "
- "the DEFLATE algorithm to the IP Payload Compression Protocol.",
- .output = "\x5d\x8d\x31\x0e\xc2\x30\x10\x04"
- "\xbf\xb2\x2f\xc8\x1f\x10\x04\x09"
- "\x89\xc2\x85\x3f\x70\xb1\x2f\xf8"
- "\x24\xdb\x67\xd9\x47\xc1\xef\x49"
- "\x68\x12\x51\xae\x76\x67\xd6\x27"
- "\x19\x88\x1a\xde\x85\xab\x21\xf2"
- "\x08\x5d\x16\x1e\x20\x04\x2d\xad"
- "\xf3\x18\xa2\x15\x85\x2d\x69\xc4"
- "\x42\x83\x23\xb6\x6c\x89\x71\x9b"
- "\xef\xcf\x8b\x9f\xcf\x33\xca\x2f"
- "\xed\x62\xa9\x4c\x80\xff\x13\xaf"
- "\x52\x37\xed\x0e\x52\x6b\x59\x02"
- "\xd9\x4e\xe8\x7a\x76\x1d\x02\x98"
- "\xfe\x8a\x87\x83\xa3\x4f\x56\x8a"
- "\xb8\x9e\x8e\x5c\x57\xd3\xa0\x79"
- "\xfa\x02",
- },
-};
-
-static struct pcomp_testvec zlib_decomp_tv_template[] = {
- {
- .params = &deflate_decomp_params,
- .paramsize = sizeof(deflate_decomp_params),
- .inlen = 122,
- .outlen = 191,
- .input = "\x5d\x8d\x31\x0e\xc2\x30\x10\x04"
- "\xbf\xb2\x2f\xc8\x1f\x10\x04\x09"
- "\x89\xc2\x85\x3f\x70\xb1\x2f\xf8"
- "\x24\xdb\x67\xd9\x47\xc1\xef\x49"
- "\x68\x12\x51\xae\x76\x67\xd6\x27"
- "\x19\x88\x1a\xde\x85\xab\x21\xf2"
- "\x08\x5d\x16\x1e\x20\x04\x2d\xad"
- "\xf3\x18\xa2\x15\x85\x2d\x69\xc4"
- "\x42\x83\x23\xb6\x6c\x89\x71\x9b"
- "\xef\xcf\x8b\x9f\xcf\x33\xca\x2f"
- "\xed\x62\xa9\x4c\x80\xff\x13\xaf"
- "\x52\x37\xed\x0e\x52\x6b\x59\x02"
- "\xd9\x4e\xe8\x7a\x76\x1d\x02\x98"
- "\xfe\x8a\x87\x83\xa3\x4f\x56\x8a"
- "\xb8\x9e\x8e\x5c\x57\xd3\xa0\x79"
- "\xfa\x02",
- .output = "This document describes a compression method based on the DEFLATE"
- "compression algorithm. This document defines the application of "
- "the DEFLATE algorithm to the IP Payload Compression Protocol.",
- }, {
- .params = &deflate_decomp_params,
- .paramsize = sizeof(deflate_decomp_params),
- .inlen = 38,
- .outlen = 70,
- .input = "\xf3\xca\xcf\xcc\x53\x28\x2d\x56"
- "\xc8\xcb\x2f\x57\x48\xcc\x4b\x51"
- "\x28\xce\x48\x2c\x4a\x55\x28\xc9"
- "\x48\x55\x28\xce\x4f\x2b\x29\x07"
- "\x71\xbc\x08\x2b\x01\x00",
- .output = "Join us now and share the software "
- "Join us now and share the software ",
- },
-};
-
/*
* LZO test vectors (null-terminated strings).
*/
diff --git a/crypto/xts.c b/crypto/xts.c
index f6fd43f10..26ba5833b 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -35,16 +35,11 @@ static int setkey(struct crypto_tfm *parent, const u8 *key,
{
struct priv *ctx = crypto_tfm_ctx(parent);
struct crypto_cipher *child = ctx->tweak;
- u32 *flags = &parent->crt_flags;
int err;
- /* key consists of keys of equal size concatenated, therefore
- * the length must be even */
- if (keylen % 2) {
- /* tell the user why there was an error */
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
- return -EINVAL;
- }
+ err = xts_check_key(parent, key, keylen);
+ if (err)
+ return err;
/* we need two cipher instances: one to compute the initial 'tweak'
* by encrypting the IV (usually the 'plain' iv) and the other
diff --git a/crypto/zlib.c b/crypto/zlib.c
deleted file mode 100644
index d51a30a29..000000000
--- a/crypto/zlib.c
+++ /dev/null
@@ -1,381 +0,0 @@
-/*
- * Cryptographic API.
- *
- * Zlib algorithm
- *
- * Copyright 2008 Sony Corporation
- *
- * Based on deflate.c, which is
- * Copyright (c) 2003 James Morris <jmorris@intercode.com.au>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * FIXME: deflate transforms will require up to a total of about 436k of kernel
- * memory on i386 (390k for compression, the rest for decompression), as the
- * current zlib kernel code uses a worst case pre-allocation system by default.
- * This needs to be fixed so that the amount of memory required is properly
- * related to the winbits and memlevel parameters.
- */
-
-#define pr_fmt(fmt) "%s: " fmt, __func__
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/zlib.h>
-#include <linux/vmalloc.h>
-#include <linux/interrupt.h>
-#include <linux/mm.h>
-#include <linux/net.h>
-
-#include <crypto/internal/compress.h>
-
-#include <net/netlink.h>
-
-
-struct zlib_ctx {
- struct z_stream_s comp_stream;
- struct z_stream_s decomp_stream;
- int decomp_windowBits;
-};
-
-
-static void zlib_comp_exit(struct zlib_ctx *ctx)
-{
- struct z_stream_s *stream = &ctx->comp_stream;
-
- if (stream->workspace) {
- zlib_deflateEnd(stream);
- vfree(stream->workspace);
- stream->workspace = NULL;
- }
-}
-
-static void zlib_decomp_exit(struct zlib_ctx *ctx)
-{
- struct z_stream_s *stream = &ctx->decomp_stream;
-
- if (stream->workspace) {
- zlib_inflateEnd(stream);
- vfree(stream->workspace);
- stream->workspace = NULL;
- }
-}
-
-static int zlib_init(struct crypto_tfm *tfm)
-{
- return 0;
-}
-
-static void zlib_exit(struct crypto_tfm *tfm)
-{
- struct zlib_ctx *ctx = crypto_tfm_ctx(tfm);
-
- zlib_comp_exit(ctx);
- zlib_decomp_exit(ctx);
-}
-
-
-static int zlib_compress_setup(struct crypto_pcomp *tfm, const void *params,
- unsigned int len)
-{
- struct zlib_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
- struct z_stream_s *stream = &ctx->comp_stream;
- struct nlattr *tb[ZLIB_COMP_MAX + 1];
- int window_bits, mem_level;
- size_t workspacesize;
- int ret;
-
- ret = nla_parse(tb, ZLIB_COMP_MAX, params, len, NULL);
- if (ret)
- return ret;
-
- zlib_comp_exit(ctx);
-
- window_bits = tb[ZLIB_COMP_WINDOWBITS]
- ? nla_get_u32(tb[ZLIB_COMP_WINDOWBITS])
- : MAX_WBITS;
- mem_level = tb[ZLIB_COMP_MEMLEVEL]
- ? nla_get_u32(tb[ZLIB_COMP_MEMLEVEL])
- : DEF_MEM_LEVEL;
-
- workspacesize = zlib_deflate_workspacesize(window_bits, mem_level);
- stream->workspace = vzalloc(workspacesize);
- if (!stream->workspace)
- return -ENOMEM;
-
- ret = zlib_deflateInit2(stream,
- tb[ZLIB_COMP_LEVEL]
- ? nla_get_u32(tb[ZLIB_COMP_LEVEL])
- : Z_DEFAULT_COMPRESSION,
- tb[ZLIB_COMP_METHOD]
- ? nla_get_u32(tb[ZLIB_COMP_METHOD])
- : Z_DEFLATED,
- window_bits,
- mem_level,
- tb[ZLIB_COMP_STRATEGY]
- ? nla_get_u32(tb[ZLIB_COMP_STRATEGY])
- : Z_DEFAULT_STRATEGY);
- if (ret != Z_OK) {
- vfree(stream->workspace);
- stream->workspace = NULL;
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int zlib_compress_init(struct crypto_pcomp *tfm)
-{
- int ret;
- struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
- struct z_stream_s *stream = &dctx->comp_stream;
-
- ret = zlib_deflateReset(stream);
- if (ret != Z_OK)
- return -EINVAL;
-
- return 0;
-}
-
-static int zlib_compress_update(struct crypto_pcomp *tfm,
- struct comp_request *req)
-{
- int ret;
- struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
- struct z_stream_s *stream = &dctx->comp_stream;
-
- pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out);
- stream->next_in = req->next_in;
- stream->avail_in = req->avail_in;
- stream->next_out = req->next_out;
- stream->avail_out = req->avail_out;
-
- ret = zlib_deflate(stream, Z_NO_FLUSH);
- switch (ret) {
- case Z_OK:
- break;
-
- case Z_BUF_ERROR:
- pr_debug("zlib_deflate could not make progress\n");
- return -EAGAIN;
-
- default:
- pr_debug("zlib_deflate failed %d\n", ret);
- return -EINVAL;
- }
-
- ret = req->avail_out - stream->avail_out;
- pr_debug("avail_in %lu, avail_out %lu (consumed %lu, produced %u)\n",
- stream->avail_in, stream->avail_out,
- req->avail_in - stream->avail_in, ret);
- req->next_in = stream->next_in;
- req->avail_in = stream->avail_in;
- req->next_out = stream->next_out;
- req->avail_out = stream->avail_out;
- return ret;
-}
-
-static int zlib_compress_final(struct crypto_pcomp *tfm,
- struct comp_request *req)
-{
- int ret;
- struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
- struct z_stream_s *stream = &dctx->comp_stream;
-
- pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out);
- stream->next_in = req->next_in;
- stream->avail_in = req->avail_in;
- stream->next_out = req->next_out;
- stream->avail_out = req->avail_out;
-
- ret = zlib_deflate(stream, Z_FINISH);
- if (ret != Z_STREAM_END) {
- pr_debug("zlib_deflate failed %d\n", ret);
- return -EINVAL;
- }
-
- ret = req->avail_out - stream->avail_out;
- pr_debug("avail_in %lu, avail_out %lu (consumed %lu, produced %u)\n",
- stream->avail_in, stream->avail_out,
- req->avail_in - stream->avail_in, ret);
- req->next_in = stream->next_in;
- req->avail_in = stream->avail_in;
- req->next_out = stream->next_out;
- req->avail_out = stream->avail_out;
- return ret;
-}
-
-
-static int zlib_decompress_setup(struct crypto_pcomp *tfm, const void *params,
- unsigned int len)
-{
- struct zlib_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
- struct z_stream_s *stream = &ctx->decomp_stream;
- struct nlattr *tb[ZLIB_DECOMP_MAX + 1];
- int ret = 0;
-
- ret = nla_parse(tb, ZLIB_DECOMP_MAX, params, len, NULL);
- if (ret)
- return ret;
-
- zlib_decomp_exit(ctx);
-
- ctx->decomp_windowBits = tb[ZLIB_DECOMP_WINDOWBITS]
- ? nla_get_u32(tb[ZLIB_DECOMP_WINDOWBITS])
- : DEF_WBITS;
-
- stream->workspace = vzalloc(zlib_inflate_workspacesize());
- if (!stream->workspace)
- return -ENOMEM;
-
- ret = zlib_inflateInit2(stream, ctx->decomp_windowBits);
- if (ret != Z_OK) {
- vfree(stream->workspace);
- stream->workspace = NULL;
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int zlib_decompress_init(struct crypto_pcomp *tfm)
-{
- int ret;
- struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
- struct z_stream_s *stream = &dctx->decomp_stream;
-
- ret = zlib_inflateReset(stream);
- if (ret != Z_OK)
- return -EINVAL;
-
- return 0;
-}
-
-static int zlib_decompress_update(struct crypto_pcomp *tfm,
- struct comp_request *req)
-{
- int ret;
- struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
- struct z_stream_s *stream = &dctx->decomp_stream;
-
- pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out);
- stream->next_in = req->next_in;
- stream->avail_in = req->avail_in;
- stream->next_out = req->next_out;
- stream->avail_out = req->avail_out;
-
- ret = zlib_inflate(stream, Z_SYNC_FLUSH);
- switch (ret) {
- case Z_OK:
- case Z_STREAM_END:
- break;
-
- case Z_BUF_ERROR:
- pr_debug("zlib_inflate could not make progress\n");
- return -EAGAIN;
-
- default:
- pr_debug("zlib_inflate failed %d\n", ret);
- return -EINVAL;
- }
-
- ret = req->avail_out - stream->avail_out;
- pr_debug("avail_in %lu, avail_out %lu (consumed %lu, produced %u)\n",
- stream->avail_in, stream->avail_out,
- req->avail_in - stream->avail_in, ret);
- req->next_in = stream->next_in;
- req->avail_in = stream->avail_in;
- req->next_out = stream->next_out;
- req->avail_out = stream->avail_out;
- return ret;
-}
-
-static int zlib_decompress_final(struct crypto_pcomp *tfm,
- struct comp_request *req)
-{
- int ret;
- struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
- struct z_stream_s *stream = &dctx->decomp_stream;
-
- pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out);
- stream->next_in = req->next_in;
- stream->avail_in = req->avail_in;
- stream->next_out = req->next_out;
- stream->avail_out = req->avail_out;
-
- if (dctx->decomp_windowBits < 0) {
- ret = zlib_inflate(stream, Z_SYNC_FLUSH);
- /*
- * Work around a bug in zlib, which sometimes wants to taste an
- * extra byte when being used in the (undocumented) raw deflate
- * mode. (From USAGI).
- */
- if (ret == Z_OK && !stream->avail_in && stream->avail_out) {
- const void *saved_next_in = stream->next_in;
- u8 zerostuff = 0;
-
- stream->next_in = &zerostuff;
- stream->avail_in = 1;
- ret = zlib_inflate(stream, Z_FINISH);
- stream->next_in = saved_next_in;
- stream->avail_in = 0;
- }
- } else
- ret = zlib_inflate(stream, Z_FINISH);
- if (ret != Z_STREAM_END) {
- pr_debug("zlib_inflate failed %d\n", ret);
- return -EINVAL;
- }
-
- ret = req->avail_out - stream->avail_out;
- pr_debug("avail_in %lu, avail_out %lu (consumed %lu, produced %u)\n",
- stream->avail_in, stream->avail_out,
- req->avail_in - stream->avail_in, ret);
- req->next_in = stream->next_in;
- req->avail_in = stream->avail_in;
- req->next_out = stream->next_out;
- req->avail_out = stream->avail_out;
- return ret;
-}
-
-
-static struct pcomp_alg zlib_alg = {
- .compress_setup = zlib_compress_setup,
- .compress_init = zlib_compress_init,
- .compress_update = zlib_compress_update,
- .compress_final = zlib_compress_final,
- .decompress_setup = zlib_decompress_setup,
- .decompress_init = zlib_decompress_init,
- .decompress_update = zlib_decompress_update,
- .decompress_final = zlib_decompress_final,
-
- .base = {
- .cra_name = "zlib",
- .cra_flags = CRYPTO_ALG_TYPE_PCOMPRESS,
- .cra_ctxsize = sizeof(struct zlib_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = zlib_init,
- .cra_exit = zlib_exit,
- }
-};
-
-static int __init zlib_mod_init(void)
-{
- return crypto_register_pcomp(&zlib_alg);
-}
-
-static void __exit zlib_mod_fini(void)
-{
- crypto_unregister_pcomp(&zlib_alg);
-}
-
-module_init(zlib_mod_init);
-module_exit(zlib_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Zlib Compression Algorithm");
-MODULE_AUTHOR("Sony Corporation");
-MODULE_ALIAS_CRYPTO("zlib");