summaryrefslogtreecommitdiff
path: root/crypto
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2014-04-07 23:49:35 (GMT)
committerScott Wood <scottwood@freescale.com>2014-04-07 23:49:35 (GMT)
commit62b8c978ee6b8d135d9e7953221de58000dba986 (patch)
tree683b04b2e627f6710c22c151b23c8cc9a165315e /crypto
parent78fd82238d0e5716578c326404184a27ba67fd6e (diff)
downloadlinux-fsl-qoriq-62b8c978ee6b8d135d9e7953221de58000dba986.tar.xz
Rewind v3.13-rc3+ (78fd82238d0e5716) to v3.12
Diffstat (limited to 'crypto')
-rw-r--r--crypto/Kconfig42
-rw-r--r--crypto/Makefile9
-rw-r--r--crypto/ablk_helper.c150
-rw-r--r--crypto/ablkcipher.c21
-rw-r--r--crypto/af_alg.c2
-rw-r--r--crypto/algif_hash.c5
-rw-r--r--crypto/algif_skcipher.c4
-rw-r--r--crypto/ansi_cprng.c4
-rw-r--r--crypto/asymmetric_keys/Kconfig4
-rw-r--r--crypto/asymmetric_keys/asymmetric_type.c1
-rw-r--r--crypto/asymmetric_keys/public_key.c66
-rw-r--r--crypto/asymmetric_keys/public_key.h6
-rw-r--r--crypto/asymmetric_keys/rsa.c19
-rw-r--r--crypto/asymmetric_keys/x509_cert_parser.c35
-rw-r--r--crypto/asymmetric_keys/x509_parser.h18
-rw-r--r--crypto/asymmetric_keys/x509_public_key.c157
-rw-r--r--crypto/async_tx/async_memcpy.c37
-rw-r--r--crypto/async_tx/async_pq.c174
-rw-r--r--crypto/async_tx/async_raid6_recov.c61
-rw-r--r--crypto/async_tx/async_tx.c4
-rw-r--r--crypto/async_tx/async_xor.c123
-rw-r--r--crypto/async_tx/raid6test.c10
-rw-r--r--crypto/authenc.c61
-rw-r--r--crypto/authencesn.c34
-rw-r--r--crypto/ccm.c7
-rw-r--r--crypto/gcm.c2
-rw-r--r--crypto/hash_info.c56
-rw-r--r--crypto/memneq.c138
-rw-r--r--crypto/tcrypt.c8
-rw-r--r--crypto/testmgr.c38
30 files changed, 428 insertions, 868 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 7bcb70d..69ce573 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -174,8 +174,9 @@ config CRYPTO_TEST
help
Quick & dirty crypto test module.
-config CRYPTO_ABLK_HELPER
+config CRYPTO_ABLK_HELPER_X86
tristate
+ depends on X86
select CRYPTO_CRYPTD
config CRYPTO_GLUE_HELPER_X86
@@ -694,7 +695,7 @@ config CRYPTO_AES_NI_INTEL
select CRYPTO_AES_X86_64 if 64BIT
select CRYPTO_AES_586 if !64BIT
select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER
+ select CRYPTO_ABLK_HELPER_X86
select CRYPTO_ALGAPI
select CRYPTO_GLUE_HELPER_X86 if 64BIT
select CRYPTO_LRW
@@ -775,22 +776,6 @@ config CRYPTO_AES_ARM
See <http://csrc.nist.gov/encryption/aes/> for more information.
-config CRYPTO_AES_ARM_BS
- tristate "Bit sliced AES using NEON instructions"
- depends on ARM && KERNEL_MODE_NEON
- select CRYPTO_ALGAPI
- select CRYPTO_AES_ARM
- select CRYPTO_ABLK_HELPER
- help
- Use a faster and more secure NEON based implementation of AES in CBC,
- CTR and XTS modes
-
- Bit sliced AES gives around 45% speedup on Cortex-A15 for CTR mode
- and for XTS mode encryption, CBC and XTS mode decryption speedup is
- around 25%. (CBC encryption speed is not affected by this driver.)
- This implementation does not rely on any lookup tables so it is
- believed to be invulnerable to cache timing attacks.
-
config CRYPTO_ANUBIS
tristate "Anubis cipher algorithm"
select CRYPTO_ALGAPI
@@ -894,7 +879,7 @@ config CRYPTO_CAMELLIA_AESNI_AVX_X86_64
depends on CRYPTO
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER
+ select CRYPTO_ABLK_HELPER_X86
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_CAMELLIA_X86_64
select CRYPTO_LRW
@@ -916,7 +901,7 @@ config CRYPTO_CAMELLIA_AESNI_AVX2_X86_64
depends on CRYPTO
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER
+ select CRYPTO_ABLK_HELPER_X86
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_CAMELLIA_X86_64
select CRYPTO_CAMELLIA_AESNI_AVX_X86_64
@@ -968,7 +953,7 @@ config CRYPTO_CAST5_AVX_X86_64
depends on X86 && 64BIT
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER
+ select CRYPTO_ABLK_HELPER_X86
select CRYPTO_CAST_COMMON
select CRYPTO_CAST5
help
@@ -991,7 +976,7 @@ config CRYPTO_CAST6_AVX_X86_64
depends on X86 && 64BIT
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER
+ select CRYPTO_ABLK_HELPER_X86
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_CAST_COMMON
select CRYPTO_CAST6
@@ -1109,7 +1094,7 @@ config CRYPTO_SERPENT_SSE2_X86_64
depends on X86 && 64BIT
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER
+ select CRYPTO_ABLK_HELPER_X86
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_SERPENT
select CRYPTO_LRW
@@ -1131,7 +1116,7 @@ config CRYPTO_SERPENT_SSE2_586
depends on X86 && !64BIT
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER
+ select CRYPTO_ABLK_HELPER_X86
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_SERPENT
select CRYPTO_LRW
@@ -1153,7 +1138,7 @@ config CRYPTO_SERPENT_AVX_X86_64
depends on X86 && 64BIT
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER
+ select CRYPTO_ABLK_HELPER_X86
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_SERPENT
select CRYPTO_LRW
@@ -1175,7 +1160,7 @@ config CRYPTO_SERPENT_AVX2_X86_64
depends on X86 && 64BIT
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER
+ select CRYPTO_ABLK_HELPER_X86
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_SERPENT
select CRYPTO_SERPENT_AVX_X86_64
@@ -1291,7 +1276,7 @@ config CRYPTO_TWOFISH_AVX_X86_64
depends on X86 && 64BIT
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER
+ select CRYPTO_ABLK_HELPER_X86
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_TWOFISH_COMMON
select CRYPTO_TWOFISH_X86_64
@@ -1401,9 +1386,6 @@ config CRYPTO_USER_API_SKCIPHER
This option enables the user-spaces interface for symmetric
key cipher algorithms.
-config CRYPTO_HASH_INFO
- bool
-
source "drivers/crypto/Kconfig"
source crypto/asymmetric_keys/Kconfig
diff --git a/crypto/Makefile b/crypto/Makefile
index 989c510..80019ba 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -2,13 +2,8 @@
# Cryptographic API
#
-# memneq MUST be built with -Os or -O0 to prevent early-return optimizations
-# that will defeat memneq's actual purpose to prevent timing attacks.
-CFLAGS_REMOVE_memneq.o := -O1 -O2 -O3
-CFLAGS_memneq.o := -Os
-
obj-$(CONFIG_CRYPTO) += crypto.o
-crypto-y := api.o cipher.o compress.o memneq.o
+crypto-y := api.o cipher.o compress.o
obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o
@@ -109,5 +104,3 @@ obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o
obj-$(CONFIG_XOR_BLOCKS) += xor.o
obj-$(CONFIG_ASYNC_CORE) += async_tx/
obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/
-obj-$(CONFIG_CRYPTO_HASH_INFO) += hash_info.o
-obj-$(CONFIG_CRYPTO_ABLK_HELPER) += ablk_helper.o
diff --git a/crypto/ablk_helper.c b/crypto/ablk_helper.c
deleted file mode 100644
index ffe7278..0000000
--- a/crypto/ablk_helper.c
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Shared async block cipher helpers
- *
- * Copyright (c) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
- *
- * Based on aesni-intel_glue.c by:
- * Copyright (C) 2008, Intel Corp.
- * Author: Huang Ying <ying.huang@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/crypto.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/hardirq.h>
-#include <crypto/algapi.h>
-#include <crypto/cryptd.h>
-#include <crypto/ablk_helper.h>
-#include <asm/simd.h>
-
-int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
- unsigned int key_len)
-{
- struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base;
- int err;
-
- crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm)
- & CRYPTO_TFM_REQ_MASK);
- err = crypto_ablkcipher_setkey(child, key, key_len);
- crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child)
- & CRYPTO_TFM_RES_MASK);
- return err;
-}
-EXPORT_SYMBOL_GPL(ablk_set_key);
-
-int __ablk_encrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct blkcipher_desc desc;
-
- desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
- desc.info = req->info;
- desc.flags = 0;
-
- return crypto_blkcipher_crt(desc.tfm)->encrypt(
- &desc, req->dst, req->src, req->nbytes);
-}
-EXPORT_SYMBOL_GPL(__ablk_encrypt);
-
-int ablk_encrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
-
- if (!may_use_simd()) {
- struct ablkcipher_request *cryptd_req =
- ablkcipher_request_ctx(req);
-
- *cryptd_req = *req;
- ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
-
- return crypto_ablkcipher_encrypt(cryptd_req);
- } else {
- return __ablk_encrypt(req);
- }
-}
-EXPORT_SYMBOL_GPL(ablk_encrypt);
-
-int ablk_decrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
-
- if (!may_use_simd()) {
- struct ablkcipher_request *cryptd_req =
- ablkcipher_request_ctx(req);
-
- *cryptd_req = *req;
- ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
-
- return crypto_ablkcipher_decrypt(cryptd_req);
- } else {
- struct blkcipher_desc desc;
-
- desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
- desc.info = req->info;
- desc.flags = 0;
-
- return crypto_blkcipher_crt(desc.tfm)->decrypt(
- &desc, req->dst, req->src, req->nbytes);
- }
-}
-EXPORT_SYMBOL_GPL(ablk_decrypt);
-
-void ablk_exit(struct crypto_tfm *tfm)
-{
- struct async_helper_ctx *ctx = crypto_tfm_ctx(tfm);
-
- cryptd_free_ablkcipher(ctx->cryptd_tfm);
-}
-EXPORT_SYMBOL_GPL(ablk_exit);
-
-int ablk_init_common(struct crypto_tfm *tfm, const char *drv_name)
-{
- struct async_helper_ctx *ctx = crypto_tfm_ctx(tfm);
- struct cryptd_ablkcipher *cryptd_tfm;
-
- cryptd_tfm = cryptd_alloc_ablkcipher(drv_name, 0, 0);
- if (IS_ERR(cryptd_tfm))
- return PTR_ERR(cryptd_tfm);
-
- ctx->cryptd_tfm = cryptd_tfm;
- tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
- crypto_ablkcipher_reqsize(&cryptd_tfm->base);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ablk_init_common);
-
-int ablk_init(struct crypto_tfm *tfm)
-{
- char drv_name[CRYPTO_MAX_ALG_NAME];
-
- snprintf(drv_name, sizeof(drv_name), "__driver-%s",
- crypto_tfm_alg_driver_name(tfm));
-
- return ablk_init_common(tfm, drv_name);
-}
-EXPORT_SYMBOL_GPL(ablk_init);
-
-MODULE_LICENSE("GPL");
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index 40886c4..7d4a8d2 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -16,7 +16,9 @@
#include <crypto/internal/skcipher.h>
#include <linux/cpumask.h>
#include <linux/err.h>
+#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/rtnetlink.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -28,6 +30,8 @@
#include "internal.h"
+static const char *skcipher_default_geniv __read_mostly;
+
struct ablkcipher_buffer {
struct list_head entry;
struct scatter_walk dst;
@@ -523,7 +527,8 @@ const char *crypto_default_geniv(const struct crypto_alg *alg)
alg->cra_blocksize)
return "chainiv";
- return "eseqiv";
+ return alg->cra_flags & CRYPTO_ALG_ASYNC ?
+ "eseqiv" : skcipher_default_geniv;
}
static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
@@ -704,3 +709,17 @@ err:
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher);
+
+static int __init skcipher_module_init(void)
+{
+ skcipher_default_geniv = num_possible_cpus() > 1 ?
+ "eseqiv" : "chainiv";
+ return 0;
+}
+
+static void skcipher_module_exit(void)
+{
+}
+
+module_init(skcipher_module_init);
+module_exit(skcipher_module_exit);
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 966f893..ac33d5f 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -434,7 +434,7 @@ int af_alg_wait_for_completion(int err, struct af_alg_completion *completion)
case -EINPROGRESS:
case -EBUSY:
wait_for_completion(&completion->completion);
- reinit_completion(&completion->completion);
+ INIT_COMPLETION(completion->completion);
err = completion->err;
break;
};
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 8502462..0262210 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -114,9 +114,6 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
struct hash_ctx *ctx = ask->private;
int err;
- if (flags & MSG_SENDPAGE_NOTLAST)
- flags |= MSG_MORE;
-
lock_sock(sk);
sg_init_table(ctx->sgl.sg, 1);
sg_set_page(ctx->sgl.sg, page, size, offset);
@@ -164,6 +161,8 @@ static int hash_recvmsg(struct kiocb *unused, struct socket *sock,
else if (len < ds)
msg->msg_flags |= MSG_TRUNC;
+ msg->msg_namelen = 0;
+
lock_sock(sk);
if (ctx->more) {
ctx->more = 0;
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index a19c027..a1c4f0a 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -378,9 +378,6 @@ static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
struct skcipher_sg_list *sgl;
int err = -EINVAL;
- if (flags & MSG_SENDPAGE_NOTLAST)
- flags |= MSG_MORE;
-
lock_sock(sk);
if (!ctx->more && ctx->used)
goto unlock;
@@ -435,6 +432,7 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
long copied = 0;
lock_sock(sk);
+ msg->msg_namelen = 0;
for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0;
iovlen--, iov++) {
unsigned long seglen = iov->iov_len;
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
index 666f196..c0bb377 100644
--- a/crypto/ansi_cprng.c
+++ b/crypto/ansi_cprng.c
@@ -230,11 +230,11 @@ remainder:
*/
if (byte_count < DEFAULT_BLK_SZ) {
empty_rbuf:
- while (ctx->rand_data_valid < DEFAULT_BLK_SZ) {
+ for (; ctx->rand_data_valid < DEFAULT_BLK_SZ;
+ ctx->rand_data_valid++) {
*ptr = ctx->rand_data[ctx->rand_data_valid];
ptr++;
byte_count--;
- ctx->rand_data_valid++;
if (byte_count == 0)
goto done;
}
diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig
index 03a6eb9..6d2c2ea 100644
--- a/crypto/asymmetric_keys/Kconfig
+++ b/crypto/asymmetric_keys/Kconfig
@@ -12,8 +12,6 @@ if ASYMMETRIC_KEY_TYPE
config ASYMMETRIC_PUBLIC_KEY_SUBTYPE
tristate "Asymmetric public-key crypto algorithm subtype"
select MPILIB
- select PUBLIC_KEY_ALGO_RSA
- select CRYPTO_HASH_INFO
help
This option provides support for asymmetric public key type handling.
If signature generation and/or verification are to be used,
@@ -22,8 +20,8 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE
config PUBLIC_KEY_ALGO_RSA
tristate "RSA public-key algorithm"
+ depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE
select MPILIB_EXTRA
- select MPILIB
help
This option enables support for the RSA algorithm (PKCS#1, RFC3447).
diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c
index b77eb53..cf80765 100644
--- a/crypto/asymmetric_keys/asymmetric_type.c
+++ b/crypto/asymmetric_keys/asymmetric_type.c
@@ -209,7 +209,6 @@ struct key_type key_type_asymmetric = {
.match = asymmetric_key_match,
.destroy = asymmetric_key_destroy,
.describe = asymmetric_key_describe,
- .def_lookup_type = KEYRING_SEARCH_LOOKUP_ITERATE,
};
EXPORT_SYMBOL_GPL(key_type_asymmetric);
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index 97eb001..cb2e291 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -22,25 +22,29 @@
MODULE_LICENSE("GPL");
-const char *const pkey_algo_name[PKEY_ALGO__LAST] = {
+const char *const pkey_algo[PKEY_ALGO__LAST] = {
[PKEY_ALGO_DSA] = "DSA",
[PKEY_ALGO_RSA] = "RSA",
};
-EXPORT_SYMBOL_GPL(pkey_algo_name);
+EXPORT_SYMBOL_GPL(pkey_algo);
-const struct public_key_algorithm *pkey_algo[PKEY_ALGO__LAST] = {
-#if defined(CONFIG_PUBLIC_KEY_ALGO_RSA) || \
- defined(CONFIG_PUBLIC_KEY_ALGO_RSA_MODULE)
- [PKEY_ALGO_RSA] = &RSA_public_key_algorithm,
-#endif
+const char *const pkey_hash_algo[PKEY_HASH__LAST] = {
+ [PKEY_HASH_MD4] = "md4",
+ [PKEY_HASH_MD5] = "md5",
+ [PKEY_HASH_SHA1] = "sha1",
+ [PKEY_HASH_RIPE_MD_160] = "rmd160",
+ [PKEY_HASH_SHA256] = "sha256",
+ [PKEY_HASH_SHA384] = "sha384",
+ [PKEY_HASH_SHA512] = "sha512",
+ [PKEY_HASH_SHA224] = "sha224",
};
-EXPORT_SYMBOL_GPL(pkey_algo);
+EXPORT_SYMBOL_GPL(pkey_hash_algo);
-const char *const pkey_id_type_name[PKEY_ID_TYPE__LAST] = {
+const char *const pkey_id_type[PKEY_ID_TYPE__LAST] = {
[PKEY_ID_PGP] = "PGP",
[PKEY_ID_X509] = "X509",
};
-EXPORT_SYMBOL_GPL(pkey_id_type_name);
+EXPORT_SYMBOL_GPL(pkey_id_type);
/*
* Provide a part of a description of the key for /proc/keys.
@@ -52,7 +56,7 @@ static void public_key_describe(const struct key *asymmetric_key,
if (key)
seq_printf(m, "%s.%s",
- pkey_id_type_name[key->id_type], key->algo->name);
+ pkey_id_type[key->id_type], key->algo->name);
}
/*
@@ -74,45 +78,21 @@ EXPORT_SYMBOL_GPL(public_key_destroy);
/*
* Verify a signature using a public key.
*/
-int public_key_verify_signature(const struct public_key *pk,
- const struct public_key_signature *sig)
+static int public_key_verify_signature(const struct key *key,
+ const struct public_key_signature *sig)
{
- const struct public_key_algorithm *algo;
-
- BUG_ON(!pk);
- BUG_ON(!pk->mpi[0]);
- BUG_ON(!pk->mpi[1]);
- BUG_ON(!sig);
- BUG_ON(!sig->digest);
- BUG_ON(!sig->mpi[0]);
-
- algo = pk->algo;
- if (!algo) {
- if (pk->pkey_algo >= PKEY_ALGO__LAST)
- return -ENOPKG;
- algo = pkey_algo[pk->pkey_algo];
- if (!algo)
- return -ENOPKG;
- }
+ const struct public_key *pk = key->payload.data;
- if (!algo->verify_signature)
+ if (!pk->algo->verify_signature)
return -ENOTSUPP;
- if (sig->nr_mpi != algo->n_sig_mpi) {
+ if (sig->nr_mpi != pk->algo->n_sig_mpi) {
pr_debug("Signature has %u MPI not %u\n",
- sig->nr_mpi, algo->n_sig_mpi);
+ sig->nr_mpi, pk->algo->n_sig_mpi);
return -EINVAL;
}
- return algo->verify_signature(pk, sig);
-}
-EXPORT_SYMBOL_GPL(public_key_verify_signature);
-
-static int public_key_verify_signature_2(const struct key *key,
- const struct public_key_signature *sig)
-{
- const struct public_key *pk = key->payload.data;
- return public_key_verify_signature(pk, sig);
+ return pk->algo->verify_signature(pk, sig);
}
/*
@@ -123,6 +103,6 @@ struct asymmetric_key_subtype public_key_subtype = {
.name = "public_key",
.describe = public_key_describe,
.destroy = public_key_destroy,
- .verify_signature = public_key_verify_signature_2,
+ .verify_signature = public_key_verify_signature,
};
EXPORT_SYMBOL_GPL(public_key_subtype);
diff --git a/crypto/asymmetric_keys/public_key.h b/crypto/asymmetric_keys/public_key.h
index 5c37a22..5e5e356 100644
--- a/crypto/asymmetric_keys/public_key.h
+++ b/crypto/asymmetric_keys/public_key.h
@@ -28,9 +28,3 @@ struct public_key_algorithm {
};
extern const struct public_key_algorithm RSA_public_key_algorithm;
-
-/*
- * public_key.c
- */
-extern int public_key_verify_signature(const struct public_key *pk,
- const struct public_key_signature *sig);
diff --git a/crypto/asymmetric_keys/rsa.c b/crypto/asymmetric_keys/rsa.c
index 459cf97..4a6a069 100644
--- a/crypto/asymmetric_keys/rsa.c
+++ b/crypto/asymmetric_keys/rsa.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <crypto/algapi.h>
#include "public_key.h"
MODULE_LICENSE("GPL");
@@ -74,13 +73,13 @@ static const struct {
size_t size;
} RSA_ASN1_templates[PKEY_HASH__LAST] = {
#define _(X) { RSA_digest_info_##X, sizeof(RSA_digest_info_##X) }
- [HASH_ALGO_MD5] = _(MD5),
- [HASH_ALGO_SHA1] = _(SHA1),
- [HASH_ALGO_RIPE_MD_160] = _(RIPE_MD_160),
- [HASH_ALGO_SHA256] = _(SHA256),
- [HASH_ALGO_SHA384] = _(SHA384),
- [HASH_ALGO_SHA512] = _(SHA512),
- [HASH_ALGO_SHA224] = _(SHA224),
+ [PKEY_HASH_MD5] = _(MD5),
+ [PKEY_HASH_SHA1] = _(SHA1),
+ [PKEY_HASH_RIPE_MD_160] = _(RIPE_MD_160),
+ [PKEY_HASH_SHA256] = _(SHA256),
+ [PKEY_HASH_SHA384] = _(SHA384),
+ [PKEY_HASH_SHA512] = _(SHA512),
+ [PKEY_HASH_SHA224] = _(SHA224),
#undef _
};
@@ -190,12 +189,12 @@ static int RSA_verify(const u8 *H, const u8 *EM, size_t k, size_t hash_size,
}
}
- if (crypto_memneq(asn1_template, EM + T_offset, asn1_size) != 0) {
+ if (memcmp(asn1_template, EM + T_offset, asn1_size) != 0) {
kleave(" = -EBADMSG [EM[T] ASN.1 mismatch]");
return -EBADMSG;
}
- if (crypto_memneq(H, EM + T_offset + asn1_size, hash_size) != 0) {
+ if (memcmp(H, EM + T_offset + asn1_size, hash_size) != 0) {
kleave(" = -EKEYREJECTED [EM[T] hash mismatch]");
return -EKEYREJECTED;
}
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index 2989316..facbf26 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -47,8 +47,6 @@ void x509_free_certificate(struct x509_certificate *cert)
kfree(cert->subject);
kfree(cert->fingerprint);
kfree(cert->authority);
- kfree(cert->sig.digest);
- mpi_free(cert->sig.rsa.s);
kfree(cert);
}
}
@@ -154,33 +152,33 @@ int x509_note_pkey_algo(void *context, size_t hdrlen,
return -ENOPKG; /* Unsupported combination */
case OID_md4WithRSAEncryption:
- ctx->cert->sig.pkey_hash_algo = HASH_ALGO_MD5;
- ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA;
+ ctx->cert->sig_hash_algo = PKEY_HASH_MD5;
+ ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA;
break;
case OID_sha1WithRSAEncryption:
- ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA1;
- ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA;
+ ctx->cert->sig_hash_algo = PKEY_HASH_SHA1;
+ ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA;
break;
case OID_sha256WithRSAEncryption:
- ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA256;
- ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA;
+ ctx->cert->sig_hash_algo = PKEY_HASH_SHA256;
+ ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA;
break;
case OID_sha384WithRSAEncryption:
- ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA384;
- ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA;
+ ctx->cert->sig_hash_algo = PKEY_HASH_SHA384;
+ ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA;
break;
case OID_sha512WithRSAEncryption:
- ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA512;
- ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA;
+ ctx->cert->sig_hash_algo = PKEY_HASH_SHA512;
+ ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA;
break;
case OID_sha224WithRSAEncryption:
- ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA224;
- ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA;
+ ctx->cert->sig_hash_algo = PKEY_HASH_SHA224;
+ ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA;
break;
}
@@ -205,8 +203,8 @@ int x509_note_signature(void *context, size_t hdrlen,
return -EINVAL;
}
- ctx->cert->raw_sig = value;
- ctx->cert->raw_sig_size = vlen;
+ ctx->cert->sig = value;
+ ctx->cert->sig_size = vlen;
return 0;
}
@@ -345,9 +343,8 @@ int x509_extract_key_data(void *context, size_t hdrlen,
if (ctx->last_oid != OID_rsaEncryption)
return -ENOPKG;
- ctx->cert->pub->pkey_algo = PKEY_ALGO_RSA;
-
- /* Discard the BIT STRING metadata */
+ /* There seems to be an extraneous 0 byte on the front of the data */
+ ctx->cert->pkey_algo = PKEY_ALGO_RSA;
ctx->key = value + 1;
ctx->key_size = vlen - 1;
return 0;
diff --git a/crypto/asymmetric_keys/x509_parser.h b/crypto/asymmetric_keys/x509_parser.h
index 87d9cc2..f86dc5f 100644
--- a/crypto/asymmetric_keys/x509_parser.h
+++ b/crypto/asymmetric_keys/x509_parser.h
@@ -9,7 +9,6 @@
* 2 of the Licence, or (at your option) any later version.
*/
-#include <linux/time.h>
#include <crypto/public_key.h>
struct x509_certificate {
@@ -21,11 +20,13 @@ struct x509_certificate {
char *authority; /* Authority key fingerprint as hex */
struct tm valid_from;
struct tm valid_to;
+ enum pkey_algo pkey_algo : 8; /* Public key algorithm */
+ enum pkey_algo sig_pkey_algo : 8; /* Signature public key algorithm */
+ enum pkey_hash_algo sig_hash_algo : 8; /* Signature hash algorithm */
const void *tbs; /* Signed data */
- unsigned tbs_size; /* Size of signed data */
- unsigned raw_sig_size; /* Size of sigature */
- const void *raw_sig; /* Signature data */
- struct public_key_signature sig; /* Signature parameters */
+ size_t tbs_size; /* Size of signed data */
+ const void *sig; /* Signature data */
+ size_t sig_size; /* Size of sigature */
};
/*
@@ -33,10 +34,3 @@ struct x509_certificate {
*/
extern void x509_free_certificate(struct x509_certificate *cert);
extern struct x509_certificate *x509_cert_parse(const void *data, size_t datalen);
-
-/*
- * x509_public_key.c
- */
-extern int x509_get_sig_params(struct x509_certificate *cert);
-extern int x509_check_signature(const struct public_key *pub,
- struct x509_certificate *cert);
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
index 382ef0d..06007f0 100644
--- a/crypto/asymmetric_keys/x509_public_key.c
+++ b/crypto/asymmetric_keys/x509_public_key.c
@@ -23,84 +23,82 @@
#include "public_key.h"
#include "x509_parser.h"
+static const
+struct public_key_algorithm *x509_public_key_algorithms[PKEY_ALGO__LAST] = {
+ [PKEY_ALGO_DSA] = NULL,
+#if defined(CONFIG_PUBLIC_KEY_ALGO_RSA) || \
+ defined(CONFIG_PUBLIC_KEY_ALGO_RSA_MODULE)
+ [PKEY_ALGO_RSA] = &RSA_public_key_algorithm,
+#endif
+};
+
/*
- * Set up the signature parameters in an X.509 certificate. This involves
- * digesting the signed data and extracting the signature.
+ * Check the signature on a certificate using the provided public key
*/
-int x509_get_sig_params(struct x509_certificate *cert)
+static int x509_check_signature(const struct public_key *pub,
+ const struct x509_certificate *cert)
{
+ struct public_key_signature *sig;
struct crypto_shash *tfm;
struct shash_desc *desc;
size_t digest_size, desc_size;
- void *digest;
int ret;
pr_devel("==>%s()\n", __func__);
-
- if (cert->sig.rsa.s)
- return 0;
-
- cert->sig.rsa.s = mpi_read_raw_data(cert->raw_sig, cert->raw_sig_size);
- if (!cert->sig.rsa.s)
- return -ENOMEM;
- cert->sig.nr_mpi = 1;
-
+
/* Allocate the hashing algorithm we're going to need and find out how
* big the hash operational data will be.
*/
- tfm = crypto_alloc_shash(hash_algo_name[cert->sig.pkey_hash_algo], 0, 0);
+ tfm = crypto_alloc_shash(pkey_hash_algo[cert->sig_hash_algo], 0, 0);
if (IS_ERR(tfm))
return (PTR_ERR(tfm) == -ENOENT) ? -ENOPKG : PTR_ERR(tfm);
desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);
digest_size = crypto_shash_digestsize(tfm);
- /* We allocate the hash operational data storage on the end of the
- * digest storage space.
+ /* We allocate the hash operational data storage on the end of our
+ * context data.
*/
ret = -ENOMEM;
- digest = kzalloc(digest_size + desc_size, GFP_KERNEL);
- if (!digest)
- goto error;
+ sig = kzalloc(sizeof(*sig) + desc_size + digest_size, GFP_KERNEL);
+ if (!sig)
+ goto error_no_sig;
- cert->sig.digest = digest;
- cert->sig.digest_size = digest_size;
+ sig->pkey_hash_algo = cert->sig_hash_algo;
+ sig->digest = (u8 *)sig + sizeof(*sig) + desc_size;
+ sig->digest_size = digest_size;
- desc = digest + digest_size;
- desc->tfm = tfm;
- desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+ desc = (void *)sig + sizeof(*sig);
+ desc->tfm = tfm;
+ desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
ret = crypto_shash_init(desc);
if (ret < 0)
goto error;
- might_sleep();
- ret = crypto_shash_finup(desc, cert->tbs, cert->tbs_size, digest);
-error:
- crypto_free_shash(tfm);
- pr_devel("<==%s() = %d\n", __func__, ret);
- return ret;
-}
-EXPORT_SYMBOL_GPL(x509_get_sig_params);
-/*
- * Check the signature on a certificate using the provided public key
- */
-int x509_check_signature(const struct public_key *pub,
- struct x509_certificate *cert)
-{
- int ret;
-
- pr_devel("==>%s()\n", __func__);
+ ret = -ENOMEM;
+ sig->rsa.s = mpi_read_raw_data(cert->sig, cert->sig_size);
+ if (!sig->rsa.s)
+ goto error;
- ret = x509_get_sig_params(cert);
+ ret = crypto_shash_finup(desc, cert->tbs, cert->tbs_size, sig->digest);
if (ret < 0)
- return ret;
+ goto error_mpi;
+
+ ret = pub->algo->verify_signature(pub, sig);
- ret = public_key_verify_signature(pub, &cert->sig);
pr_debug("Cert Verification: %d\n", ret);
+
+error_mpi:
+ mpi_free(sig->rsa.s);
+error:
+ kfree(sig);
+error_no_sig:
+ crypto_free_shash(tfm);
+
+ pr_devel("<==%s() = %d\n", __func__, ret);
return ret;
}
-EXPORT_SYMBOL_GPL(x509_check_signature);
/*
* Attempt to parse a data blob for a key as an X509 certificate.
@@ -108,6 +106,7 @@ EXPORT_SYMBOL_GPL(x509_check_signature);
static int x509_key_preparse(struct key_preparsed_payload *prep)
{
struct x509_certificate *cert;
+ struct tm now;
size_t srlen, sulen;
char *desc = NULL;
int ret;
@@ -118,18 +117,7 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)
pr_devel("Cert Issuer: %s\n", cert->issuer);
pr_devel("Cert Subject: %s\n", cert->subject);
-
- if (cert->pub->pkey_algo >= PKEY_ALGO__LAST ||
- cert->sig.pkey_algo >= PKEY_ALGO__LAST ||
- cert->sig.pkey_hash_algo >= PKEY_HASH__LAST ||
- !pkey_algo[cert->pub->pkey_algo] ||
- !pkey_algo[cert->sig.pkey_algo] ||
- !hash_algo_name[cert->sig.pkey_hash_algo]) {
- ret = -ENOPKG;
- goto error_free_cert;
- }
-
- pr_devel("Cert Key Algo: %s\n", pkey_algo_name[cert->pub->pkey_algo]);
+ pr_devel("Cert Key Algo: %s\n", pkey_algo[cert->pkey_algo]);
pr_devel("Cert Valid From: %04ld-%02d-%02d %02d:%02d:%02d\n",
cert->valid_from.tm_year + 1900, cert->valid_from.tm_mon + 1,
cert->valid_from.tm_mday, cert->valid_from.tm_hour,
@@ -139,22 +127,58 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)
cert->valid_to.tm_mday, cert->valid_to.tm_hour,
cert->valid_to.tm_min, cert->valid_to.tm_sec);
pr_devel("Cert Signature: %s + %s\n",
- pkey_algo_name[cert->sig.pkey_algo],
- hash_algo_name[cert->sig.pkey_hash_algo]);
+ pkey_algo[cert->sig_pkey_algo],
+ pkey_hash_algo[cert->sig_hash_algo]);
- if (!cert->fingerprint) {
- pr_warn("Cert for '%s' must have a SubjKeyId extension\n",
+ if (!cert->fingerprint || !cert->authority) {
+ pr_warn("Cert for '%s' must have SubjKeyId and AuthKeyId extensions\n",
cert->subject);
ret = -EKEYREJECTED;
goto error_free_cert;
}
- cert->pub->algo = pkey_algo[cert->pub->pkey_algo];
+ time_to_tm(CURRENT_TIME.tv_sec, 0, &now);
+ pr_devel("Now: %04ld-%02d-%02d %02d:%02d:%02d\n",
+ now.tm_year + 1900, now.tm_mon + 1, now.tm_mday,
+ now.tm_hour, now.tm_min, now.tm_sec);
+ if (now.tm_year < cert->valid_from.tm_year ||
+ (now.tm_year == cert->valid_from.tm_year &&
+ (now.tm_mon < cert->valid_from.tm_mon ||
+ (now.tm_mon == cert->valid_from.tm_mon &&
+ (now.tm_mday < cert->valid_from.tm_mday ||
+ (now.tm_mday == cert->valid_from.tm_mday &&
+ (now.tm_hour < cert->valid_from.tm_hour ||
+ (now.tm_hour == cert->valid_from.tm_hour &&
+ (now.tm_min < cert->valid_from.tm_min ||
+ (now.tm_min == cert->valid_from.tm_min &&
+ (now.tm_sec < cert->valid_from.tm_sec
+ ))))))))))) {
+ pr_warn("Cert %s is not yet valid\n", cert->fingerprint);
+ ret = -EKEYREJECTED;
+ goto error_free_cert;
+ }
+ if (now.tm_year > cert->valid_to.tm_year ||
+ (now.tm_year == cert->valid_to.tm_year &&
+ (now.tm_mon > cert->valid_to.tm_mon ||
+ (now.tm_mon == cert->valid_to.tm_mon &&
+ (now.tm_mday > cert->valid_to.tm_mday ||
+ (now.tm_mday == cert->valid_to.tm_mday &&
+ (now.tm_hour > cert->valid_to.tm_hour ||
+ (now.tm_hour == cert->valid_to.tm_hour &&
+ (now.tm_min > cert->valid_to.tm_min ||
+ (now.tm_min == cert->valid_to.tm_min &&
+ (now.tm_sec > cert->valid_to.tm_sec
+ ))))))))))) {
+ pr_warn("Cert %s has expired\n", cert->fingerprint);
+ ret = -EKEYEXPIRED;
+ goto error_free_cert;
+ }
+
+ cert->pub->algo = x509_public_key_algorithms[cert->pkey_algo];
cert->pub->id_type = PKEY_ID_X509;
- /* Check the signature on the key if it appears to be self-signed */
- if (!cert->authority ||
- strcmp(cert->fingerprint, cert->authority) == 0) {
+ /* Check the signature on the key */
+ if (strcmp(cert->fingerprint, cert->authority) == 0) {
ret = x509_check_signature(cert->pub, cert);
if (ret < 0)
goto error_free_cert;
@@ -213,6 +237,3 @@ static void __exit x509_key_exit(void)
module_init(x509_key_init);
module_exit(x509_key_exit);
-
-MODULE_DESCRIPTION("X.509 certificate parser");
-MODULE_LICENSE("GPL");
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index f8c0b8d..9e62fef 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -50,36 +50,33 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
&dest, 1, &src, 1, len);
struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx = NULL;
- struct dmaengine_unmap_data *unmap = NULL;
- if (device)
- unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO);
-
- if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {
+ if (device && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {
+ dma_addr_t dma_dest, dma_src;
unsigned long dma_prep_flags = 0;
if (submit->cb_fn)
dma_prep_flags |= DMA_PREP_INTERRUPT;
if (submit->flags & ASYNC_TX_FENCE)
dma_prep_flags |= DMA_PREP_FENCE;
-
- unmap->to_cnt = 1;
- unmap->addr[0] = dma_map_page(device->dev, src, src_offset, len,
- DMA_TO_DEVICE);
- unmap->from_cnt = 1;
- unmap->addr[1] = dma_map_page(device->dev, dest, dest_offset, len,
- DMA_FROM_DEVICE);
- unmap->len = len;
-
- tx = device->device_prep_dma_memcpy(chan, unmap->addr[1],
- unmap->addr[0], len,
- dma_prep_flags);
+ dma_dest = dma_map_page(device->dev, dest, dest_offset, len,
+ DMA_FROM_DEVICE);
+
+ dma_src = dma_map_page(device->dev, src, src_offset, len,
+ DMA_TO_DEVICE);
+
+ tx = device->device_prep_dma_memcpy(chan, dma_dest, dma_src,
+ len, dma_prep_flags);
+ if (!tx) {
+ dma_unmap_page(device->dev, dma_dest, len,
+ DMA_FROM_DEVICE);
+ dma_unmap_page(device->dev, dma_src, len,
+ DMA_TO_DEVICE);
+ }
}
if (tx) {
pr_debug("%s: (async) len: %zu\n", __func__, len);
-
- dma_set_unmap(tx, unmap);
async_tx_submit(chan, tx, submit);
} else {
void *dest_buf, *src_buf;
@@ -99,8 +96,6 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
async_tx_sync_epilog(submit);
}
- dmaengine_unmap_put(unmap);
-
return tx;
}
EXPORT_SYMBOL_GPL(async_memcpy);
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index d05327c..91d5d38 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -46,24 +46,49 @@ static struct page *pq_scribble_page;
* do_async_gen_syndrome - asynchronously calculate P and/or Q
*/
static __async_inline struct dma_async_tx_descriptor *
-do_async_gen_syndrome(struct dma_chan *chan,
- const unsigned char *scfs, int disks,
- struct dmaengine_unmap_data *unmap,
- enum dma_ctrl_flags dma_flags,
+do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks,
+ const unsigned char *scfs, unsigned int offset, int disks,
+ size_t len, dma_addr_t *dma_src,
struct async_submit_ctl *submit)
{
struct dma_async_tx_descriptor *tx = NULL;
struct dma_device *dma = chan->device;
+ enum dma_ctrl_flags dma_flags = 0;
enum async_tx_flags flags_orig = submit->flags;
dma_async_tx_callback cb_fn_orig = submit->cb_fn;
dma_async_tx_callback cb_param_orig = submit->cb_param;
int src_cnt = disks - 2;
+ unsigned char coefs[src_cnt];
unsigned short pq_src_cnt;
dma_addr_t dma_dest[2];
int src_off = 0;
+ int idx;
+ int i;
- if (submit->flags & ASYNC_TX_FENCE)
- dma_flags |= DMA_PREP_FENCE;
+ /* DMAs use destinations as sources, so use BIDIRECTIONAL mapping */
+ if (P(blocks, disks))
+ dma_dest[0] = dma_map_page(dma->dev, P(blocks, disks), offset,
+ len, DMA_BIDIRECTIONAL);
+ else
+ dma_flags |= DMA_PREP_PQ_DISABLE_P;
+ if (Q(blocks, disks))
+ dma_dest[1] = dma_map_page(dma->dev, Q(blocks, disks), offset,
+ len, DMA_BIDIRECTIONAL);
+ else
+ dma_flags |= DMA_PREP_PQ_DISABLE_Q;
+
+ /* convert source addresses being careful to collapse 'empty'
+ * sources and update the coefficients accordingly
+ */
+ for (i = 0, idx = 0; i < src_cnt; i++) {
+ if (blocks[i] == NULL)
+ continue;
+ dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len,
+ DMA_TO_DEVICE);
+ coefs[idx] = scfs[i];
+ idx++;
+ }
+ src_cnt = idx;
while (src_cnt > 0) {
submit->flags = flags_orig;
@@ -75,25 +100,28 @@ do_async_gen_syndrome(struct dma_chan *chan,
if (src_cnt > pq_src_cnt) {
submit->flags &= ~ASYNC_TX_ACK;
submit->flags |= ASYNC_TX_FENCE;
+ dma_flags |= DMA_COMPL_SKIP_DEST_UNMAP;
submit->cb_fn = NULL;
submit->cb_param = NULL;
} else {
+ dma_flags &= ~DMA_COMPL_SKIP_DEST_UNMAP;
submit->cb_fn = cb_fn_orig;
submit->cb_param = cb_param_orig;
if (cb_fn_orig)
dma_flags |= DMA_PREP_INTERRUPT;
}
+ if (submit->flags & ASYNC_TX_FENCE)
+ dma_flags |= DMA_PREP_FENCE;
- /* Drivers force forward progress in case they can not provide
- * a descriptor
+ /* Since we have clobbered the src_list we are committed
+ * to doing this asynchronously. Drivers force forward
+ * progress in case they can not provide a descriptor
*/
for (;;) {
- dma_dest[0] = unmap->addr[disks - 2];
- dma_dest[1] = unmap->addr[disks - 1];
tx = dma->device_prep_dma_pq(chan, dma_dest,
- &unmap->addr[src_off],
+ &dma_src[src_off],
pq_src_cnt,
- &scfs[src_off], unmap->len,
+ &coefs[src_off], len,
dma_flags);
if (likely(tx))
break;
@@ -101,7 +129,6 @@ do_async_gen_syndrome(struct dma_chan *chan,
dma_async_issue_pending(chan);
}
- dma_set_unmap(tx, unmap);
async_tx_submit(chan, tx, submit);
submit->depend_tx = tx;
@@ -161,6 +188,10 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
* set to NULL those buffers will be replaced with the raid6_zero_page
* in the synchronous path and omitted in the hardware-asynchronous
* path.
+ *
+ * 'blocks' note: if submit->scribble is NULL then the contents of
+ * 'blocks' may be overwritten to perform address conversions
+ * (dma_map_page() or page_address()).
*/
struct dma_async_tx_descriptor *
async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
@@ -171,69 +202,26 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
&P(blocks, disks), 2,
blocks, src_cnt, len);
struct dma_device *device = chan ? chan->device : NULL;
- struct dmaengine_unmap_data *unmap = NULL;
+ dma_addr_t *dma_src = NULL;
BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks)));
- if (device)
- unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO);
+ if (submit->scribble)
+ dma_src = submit->scribble;
+ else if (sizeof(dma_addr_t) <= sizeof(struct page *))
+ dma_src = (dma_addr_t *) blocks;
- if (unmap &&
+ if (dma_src && device &&
(src_cnt <= dma_maxpq(device, 0) ||
dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
is_dma_pq_aligned(device, offset, 0, len)) {
- struct dma_async_tx_descriptor *tx;
- enum dma_ctrl_flags dma_flags = 0;
- unsigned char coefs[src_cnt];
- int i, j;
-
/* run the p+q asynchronously */
pr_debug("%s: (async) disks: %d len: %zu\n",
__func__, disks, len);
-
- /* convert source addresses being careful to collapse 'empty'
- * sources and update the coefficients accordingly
- */
- unmap->len = len;
- for (i = 0, j = 0; i < src_cnt; i++) {
- if (blocks[i] == NULL)
- continue;
- unmap->addr[j] = dma_map_page(device->dev, blocks[i], offset,
- len, DMA_TO_DEVICE);
- coefs[j] = raid6_gfexp[i];
- unmap->to_cnt++;
- j++;
- }
-
- /*
- * DMAs use destinations as sources,
- * so use BIDIRECTIONAL mapping
- */
- unmap->bidi_cnt++;
- if (P(blocks, disks))
- unmap->addr[j++] = dma_map_page(device->dev, P(blocks, disks),
- offset, len, DMA_BIDIRECTIONAL);
- else {
- unmap->addr[j++] = 0;
- dma_flags |= DMA_PREP_PQ_DISABLE_P;
- }
-
- unmap->bidi_cnt++;
- if (Q(blocks, disks))
- unmap->addr[j++] = dma_map_page(device->dev, Q(blocks, disks),
- offset, len, DMA_BIDIRECTIONAL);
- else {
- unmap->addr[j++] = 0;
- dma_flags |= DMA_PREP_PQ_DISABLE_Q;
- }
-
- tx = do_async_gen_syndrome(chan, coefs, j, unmap, dma_flags, submit);
- dmaengine_unmap_put(unmap);
- return tx;
+ return do_async_gen_syndrome(chan, blocks, raid6_gfexp, offset,
+ disks, len, dma_src, submit);
}
- dmaengine_unmap_put(unmap);
-
/* run the pq synchronously */
pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len);
@@ -289,60 +277,50 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
struct dma_async_tx_descriptor *tx;
unsigned char coefs[disks-2];
enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
- struct dmaengine_unmap_data *unmap = NULL;
+ dma_addr_t *dma_src = NULL;
+ int src_cnt = 0;
BUG_ON(disks < 4);
- if (device)
- unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO);
+ if (submit->scribble)
+ dma_src = submit->scribble;
+ else if (sizeof(dma_addr_t) <= sizeof(struct page *))
+ dma_src = (dma_addr_t *) blocks;
- if (unmap && disks <= dma_maxpq(device, 0) &&
+ if (dma_src && device && disks <= dma_maxpq(device, 0) &&
is_dma_pq_aligned(device, offset, 0, len)) {
struct device *dev = device->dev;
- dma_addr_t pq[2];
- int i, j = 0, src_cnt = 0;
+ dma_addr_t *pq = &dma_src[disks-2];
+ int i;
pr_debug("%s: (async) disks: %d len: %zu\n",
__func__, disks, len);
-
- unmap->len = len;
- for (i = 0; i < disks-2; i++)
- if (likely(blocks[i])) {
- unmap->addr[j] = dma_map_page(dev, blocks[i],
- offset, len,
- DMA_TO_DEVICE);
- coefs[j] = raid6_gfexp[i];
- unmap->to_cnt++;
- src_cnt++;
- j++;
- }
-
- if (!P(blocks, disks)) {
- pq[0] = 0;
+ if (!P(blocks, disks))
dma_flags |= DMA_PREP_PQ_DISABLE_P;
- } else {
+ else
pq[0] = dma_map_page(dev, P(blocks, disks),
offset, len,
DMA_TO_DEVICE);
- unmap->addr[j++] = pq[0];
- unmap->to_cnt++;
- }
- if (!Q(blocks, disks)) {
- pq[1] = 0;
+ if (!Q(blocks, disks))
dma_flags |= DMA_PREP_PQ_DISABLE_Q;
- } else {
+ else
pq[1] = dma_map_page(dev, Q(blocks, disks),
offset, len,
DMA_TO_DEVICE);
- unmap->addr[j++] = pq[1];
- unmap->to_cnt++;
- }
if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE;
+ for (i = 0; i < disks-2; i++)
+ if (likely(blocks[i])) {
+ dma_src[src_cnt] = dma_map_page(dev, blocks[i],
+ offset, len,
+ DMA_TO_DEVICE);
+ coefs[src_cnt] = raid6_gfexp[i];
+ src_cnt++;
+ }
+
for (;;) {
- tx = device->device_prep_dma_pq_val(chan, pq,
- unmap->addr,
+ tx = device->device_prep_dma_pq_val(chan, pq, dma_src,
src_cnt,
coefs,
len, pqres,
@@ -352,8 +330,6 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
async_tx_quiesce(&submit->depend_tx);
dma_async_issue_pending(chan);
}
-
- dma_set_unmap(tx, unmap);
async_tx_submit(chan, tx, submit);
return tx;
diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c
index 934a849..a9f08a6 100644
--- a/crypto/async_tx/async_raid6_recov.c
+++ b/crypto/async_tx/async_raid6_recov.c
@@ -26,7 +26,6 @@
#include <linux/dma-mapping.h>
#include <linux/raid/pq.h>
#include <linux/async_tx.h>
-#include <linux/dmaengine.h>
static struct dma_async_tx_descriptor *
async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef,
@@ -35,45 +34,35 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef,
struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
&dest, 1, srcs, 2, len);
struct dma_device *dma = chan ? chan->device : NULL;
- struct dmaengine_unmap_data *unmap = NULL;
const u8 *amul, *bmul;
u8 ax, bx;
u8 *a, *b, *c;
- if (dma)
- unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO);
-
- if (unmap) {
+ if (dma) {
+ dma_addr_t dma_dest[2];
+ dma_addr_t dma_src[2];
struct device *dev = dma->dev;
- dma_addr_t pq[2];
struct dma_async_tx_descriptor *tx;
enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;
if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE;
- unmap->addr[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE);
- unmap->addr[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE);
- unmap->to_cnt = 2;
-
- unmap->addr[2] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL);
- unmap->bidi_cnt = 1;
- /* engine only looks at Q, but expects it to follow P */
- pq[1] = unmap->addr[2];
-
- unmap->len = len;
- tx = dma->device_prep_dma_pq(chan, pq, unmap->addr, 2, coef,
+ dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL);
+ dma_src[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE);
+ dma_src[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE);
+ tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 2, coef,
len, dma_flags);
if (tx) {
- dma_set_unmap(tx, unmap);
async_tx_submit(chan, tx, submit);
- dmaengine_unmap_put(unmap);
return tx;
}
/* could not get a descriptor, unmap and fall through to
* the synchronous path
*/
- dmaengine_unmap_put(unmap);
+ dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL);
+ dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE);
+ dma_unmap_page(dev, dma_src[1], len, DMA_TO_DEVICE);
}
/* run the operation synchronously */
@@ -100,38 +89,23 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
&dest, 1, &src, 1, len);
struct dma_device *dma = chan ? chan->device : NULL;
- struct dmaengine_unmap_data *unmap = NULL;
const u8 *qmul; /* Q multiplier table */
u8 *d, *s;
- if (dma)
- unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO);
-
- if (unmap) {
+ if (dma) {
dma_addr_t dma_dest[2];
+ dma_addr_t dma_src[1];
struct device *dev = dma->dev;
struct dma_async_tx_descriptor *tx;
enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;
if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE;
- unmap->addr[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE);
- unmap->to_cnt++;
- unmap->addr[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL);
- dma_dest[1] = unmap->addr[1];
- unmap->bidi_cnt++;
- unmap->len = len;
-
- /* this looks funny, but the engine looks for Q at
- * dma_dest[1] and ignores dma_dest[0] as a dest
- * due to DMA_PREP_PQ_DISABLE_P
- */
- tx = dma->device_prep_dma_pq(chan, dma_dest, unmap->addr,
- 1, &coef, len, dma_flags);
-
+ dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL);
+ dma_src[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE);
+ tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 1, &coef,
+ len, dma_flags);
if (tx) {
- dma_set_unmap(tx, unmap);
- dmaengine_unmap_put(unmap);
async_tx_submit(chan, tx, submit);
return tx;
}
@@ -139,7 +113,8 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
/* could not get a descriptor, unmap and fall through to
* the synchronous path
*/
- dmaengine_unmap_put(unmap);
+ dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL);
+ dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE);
}
/* no channel available, or failed to allocate a descriptor, so
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index 39ea479..7be3424 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -128,7 +128,7 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
}
device->device_issue_pending(chan);
} else {
- if (dma_wait_for_async_tx(depend_tx) != DMA_COMPLETE)
+ if (dma_wait_for_async_tx(depend_tx) != DMA_SUCCESS)
panic("%s: DMA error waiting for depend_tx\n",
__func__);
tx->tx_submit(tx);
@@ -280,7 +280,7 @@ void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
* we are referring to the correct operation
*/
BUG_ON(async_tx_test_ack(*tx));
- if (dma_wait_for_async_tx(*tx) != DMA_COMPLETE)
+ if (dma_wait_for_async_tx(*tx) != DMA_SUCCESS)
panic("%s: DMA error waiting for transaction\n",
__func__);
async_tx_ack(*tx);
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 3c562f5..8ade0a0 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -33,31 +33,48 @@
/* do_async_xor - dma map the pages and perform the xor with an engine */
static __async_inline struct dma_async_tx_descriptor *
-do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap,
+do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
+ unsigned int offset, int src_cnt, size_t len, dma_addr_t *dma_src,
struct async_submit_ctl *submit)
{
struct dma_device *dma = chan->device;
struct dma_async_tx_descriptor *tx = NULL;
+ int src_off = 0;
+ int i;
dma_async_tx_callback cb_fn_orig = submit->cb_fn;
void *cb_param_orig = submit->cb_param;
enum async_tx_flags flags_orig = submit->flags;
- enum dma_ctrl_flags dma_flags = 0;
- int src_cnt = unmap->to_cnt;
- int xor_src_cnt;
- dma_addr_t dma_dest = unmap->addr[unmap->to_cnt];
- dma_addr_t *src_list = unmap->addr;
+ enum dma_ctrl_flags dma_flags;
+ int xor_src_cnt = 0;
+ dma_addr_t dma_dest;
+
+ /* map the dest bidrectional in case it is re-used as a source */
+ dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL);
+ for (i = 0; i < src_cnt; i++) {
+ /* only map the dest once */
+ if (!src_list[i])
+ continue;
+ if (unlikely(src_list[i] == dest)) {
+ dma_src[xor_src_cnt++] = dma_dest;
+ continue;
+ }
+ dma_src[xor_src_cnt++] = dma_map_page(dma->dev, src_list[i], offset,
+ len, DMA_TO_DEVICE);
+ }
+ src_cnt = xor_src_cnt;
while (src_cnt) {
- dma_addr_t tmp;
-
submit->flags = flags_orig;
+ dma_flags = 0;
xor_src_cnt = min(src_cnt, (int)dma->max_xor);
- /* if we are submitting additional xors, leave the chain open
- * and clear the callback parameters
+ /* if we are submitting additional xors, leave the chain open,
+ * clear the callback parameters, and leave the destination
+ * buffer mapped
*/
if (src_cnt > xor_src_cnt) {
submit->flags &= ~ASYNC_TX_ACK;
submit->flags |= ASYNC_TX_FENCE;
+ dma_flags = DMA_COMPL_SKIP_DEST_UNMAP;
submit->cb_fn = NULL;
submit->cb_param = NULL;
} else {
@@ -68,18 +85,12 @@ do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap,
dma_flags |= DMA_PREP_INTERRUPT;
if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE;
-
- /* Drivers force forward progress in case they can not provide a
- * descriptor
+ /* Since we have clobbered the src_list we are committed
+ * to doing this asynchronously. Drivers force forward progress
+ * in case they can not provide a descriptor
*/
- tmp = src_list[0];
- if (src_list > unmap->addr)
- src_list[0] = dma_dest;
- tx = dma->device_prep_dma_xor(chan, dma_dest, src_list,
- xor_src_cnt, unmap->len,
- dma_flags);
- src_list[0] = tmp;
-
+ tx = dma->device_prep_dma_xor(chan, dma_dest, &dma_src[src_off],
+ xor_src_cnt, len, dma_flags);
if (unlikely(!tx))
async_tx_quiesce(&submit->depend_tx);
@@ -88,21 +99,22 @@ do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap,
while (unlikely(!tx)) {
dma_async_issue_pending(chan);
tx = dma->device_prep_dma_xor(chan, dma_dest,
- src_list,
- xor_src_cnt, unmap->len,
+ &dma_src[src_off],
+ xor_src_cnt, len,
dma_flags);
}
- dma_set_unmap(tx, unmap);
async_tx_submit(chan, tx, submit);
submit->depend_tx = tx;
if (src_cnt > xor_src_cnt) {
/* drop completed sources */
src_cnt -= xor_src_cnt;
+ src_off += xor_src_cnt;
+
/* use the intermediate result a source */
+ dma_src[--src_off] = dma_dest;
src_cnt++;
- src_list += xor_src_cnt - 1;
} else
break;
}
@@ -177,40 +189,22 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR,
&dest, 1, src_list,
src_cnt, len);
- struct dma_device *device = chan ? chan->device : NULL;
- struct dmaengine_unmap_data *unmap = NULL;
+ dma_addr_t *dma_src = NULL;
BUG_ON(src_cnt <= 1);
- if (device)
- unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOIO);
-
- if (unmap && is_dma_xor_aligned(device, offset, 0, len)) {
- struct dma_async_tx_descriptor *tx;
- int i, j;
+ if (submit->scribble)
+ dma_src = submit->scribble;
+ else if (sizeof(dma_addr_t) <= sizeof(struct page *))
+ dma_src = (dma_addr_t *) src_list;
+ if (dma_src && chan && is_dma_xor_aligned(chan->device, offset, 0, len)) {
/* run the xor asynchronously */
pr_debug("%s (async): len: %zu\n", __func__, len);
- unmap->len = len;
- for (i = 0, j = 0; i < src_cnt; i++) {
- if (!src_list[i])
- continue;
- unmap->to_cnt++;
- unmap->addr[j++] = dma_map_page(device->dev, src_list[i],
- offset, len, DMA_TO_DEVICE);
- }
-
- /* map it bidirectional as it may be re-used as a source */
- unmap->addr[j] = dma_map_page(device->dev, dest, offset, len,
- DMA_BIDIRECTIONAL);
- unmap->bidi_cnt = 1;
-
- tx = do_async_xor(chan, unmap, submit);
- dmaengine_unmap_put(unmap);
- return tx;
+ return do_async_xor(chan, dest, src_list, offset, src_cnt, len,
+ dma_src, submit);
} else {
- dmaengine_unmap_put(unmap);
/* run the xor synchronously */
pr_debug("%s (sync): len: %zu\n", __func__, len);
WARN_ONCE(chan, "%s: no space for dma address conversion\n",
@@ -274,14 +268,16 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
struct dma_chan *chan = xor_val_chan(submit, dest, src_list, src_cnt, len);
struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx = NULL;
- struct dmaengine_unmap_data *unmap = NULL;
+ dma_addr_t *dma_src = NULL;
BUG_ON(src_cnt <= 1);
- if (device)
- unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOIO);
+ if (submit->scribble)
+ dma_src = submit->scribble;
+ else if (sizeof(dma_addr_t) <= sizeof(struct page *))
+ dma_src = (dma_addr_t *) src_list;
- if (unmap && src_cnt <= device->max_xor &&
+ if (dma_src && device && src_cnt <= device->max_xor &&
is_dma_xor_aligned(device, offset, 0, len)) {
unsigned long dma_prep_flags = 0;
int i;
@@ -292,15 +288,11 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
dma_prep_flags |= DMA_PREP_INTERRUPT;
if (submit->flags & ASYNC_TX_FENCE)
dma_prep_flags |= DMA_PREP_FENCE;
+ for (i = 0; i < src_cnt; i++)
+ dma_src[i] = dma_map_page(device->dev, src_list[i],
+ offset, len, DMA_TO_DEVICE);
- for (i = 0; i < src_cnt; i++) {
- unmap->addr[i] = dma_map_page(device->dev, src_list[i],
- offset, len, DMA_TO_DEVICE);
- unmap->to_cnt++;
- }
- unmap->len = len;
-
- tx = device->device_prep_dma_xor_val(chan, unmap->addr, src_cnt,
+ tx = device->device_prep_dma_xor_val(chan, dma_src, src_cnt,
len, result,
dma_prep_flags);
if (unlikely(!tx)) {
@@ -309,11 +301,11 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
while (!tx) {
dma_async_issue_pending(chan);
tx = device->device_prep_dma_xor_val(chan,
- unmap->addr, src_cnt, len, result,
+ dma_src, src_cnt, len, result,
dma_prep_flags);
}
}
- dma_set_unmap(tx, unmap);
+
async_tx_submit(chan, tx, submit);
} else {
enum async_tx_flags flags_orig = submit->flags;
@@ -335,7 +327,6 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
async_tx_sync_epilog(submit);
submit->flags = flags_orig;
}
- dmaengine_unmap_put(unmap);
return tx;
}
diff --git a/crypto/async_tx/raid6test.c b/crypto/async_tx/raid6test.c
index dad95f4..4a92bac 100644
--- a/crypto/async_tx/raid6test.c
+++ b/crypto/async_tx/raid6test.c
@@ -28,7 +28,7 @@
#undef pr
#define pr(fmt, args...) pr_info("raid6test: " fmt, ##args)
-#define NDISKS 64 /* Including P and Q */
+#define NDISKS 16 /* Including P and Q */
static struct page *dataptrs[NDISKS];
static addr_conv_t addr_conv[NDISKS];
@@ -219,14 +219,6 @@ static int raid6_test(void)
err += test(11, &tests);
err += test(12, &tests);
}
-
- /* the 24 disk case is special for ioatdma as it is the boudary point
- * at which it needs to switch from 8-source ops to 16-source
- * ops for continuation (assumes DMA_HAS_PQ_CONTINUE is not set)
- */
- if (NDISKS > 24)
- err += test(24, &tests);
-
err += test(NDISKS, &tests);
pr("\n");
diff --git a/crypto/authenc.c b/crypto/authenc.c
index e122355..ffce19d 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -52,52 +52,40 @@ static void authenc_request_complete(struct aead_request *req, int err)
aead_request_complete(req, err);
}
-int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
- unsigned int keylen)
+static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
+ unsigned int keylen)
{
- struct rtattr *rta = (struct rtattr *)key;
+ unsigned int authkeylen;
+ unsigned int enckeylen;
+ struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+ struct crypto_ahash *auth = ctx->auth;
+ struct crypto_ablkcipher *enc = ctx->enc;
+ struct rtattr *rta = (void *)key;
struct crypto_authenc_key_param *param;
+ int err = -EINVAL;
if (!RTA_OK(rta, keylen))
- return -EINVAL;
+ goto badkey;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
- return -EINVAL;
+ goto badkey;
if (RTA_PAYLOAD(rta) < sizeof(*param))
- return -EINVAL;
+ goto badkey;
param = RTA_DATA(rta);
- keys->enckeylen = be32_to_cpu(param->enckeylen);
+ enckeylen = be32_to_cpu(param->enckeylen);
key += RTA_ALIGN(rta->rta_len);
keylen -= RTA_ALIGN(rta->rta_len);
- if (keylen < keys->enckeylen)
- return -EINVAL;
-
- keys->authkeylen = keylen - keys->enckeylen;
- keys->authkey = key;
- keys->enckey = key + keys->authkeylen;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(crypto_authenc_extractkeys);
-
-static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
- struct crypto_ahash *auth = ctx->auth;
- struct crypto_ablkcipher *enc = ctx->enc;
- struct crypto_authenc_keys keys;
- int err = -EINVAL;
-
- if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+ if (keylen < enckeylen)
goto badkey;
+ authkeylen = keylen - enckeylen;
+
crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc) &
CRYPTO_TFM_REQ_MASK);
- err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
+ err = crypto_ahash_setkey(auth, key, authkeylen);
crypto_aead_set_flags(authenc, crypto_ahash_get_flags(auth) &
CRYPTO_TFM_RES_MASK);
@@ -107,7 +95,7 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc) &
CRYPTO_TFM_REQ_MASK);
- err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen);
+ err = crypto_ablkcipher_setkey(enc, key + authkeylen, enckeylen);
crypto_aead_set_flags(authenc, crypto_ablkcipher_get_flags(enc) &
CRYPTO_TFM_RES_MASK);
@@ -200,7 +188,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
- err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
+ err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
if (err)
goto out;
@@ -239,7 +227,7 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq,
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
- err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
+ err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
if (err)
goto out;
@@ -380,10 +368,9 @@ static void crypto_authenc_encrypt_done(struct crypto_async_request *req,
if (!err) {
struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
- struct authenc_request_ctx *areq_ctx = aead_request_ctx(areq);
- struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
- + ctx->reqoff);
- u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(ctx->enc);
+ struct ablkcipher_request *abreq = aead_request_ctx(areq);
+ u8 *iv = (u8 *)(abreq + 1) +
+ crypto_ablkcipher_reqsize(ctx->enc);
err = crypto_authenc_genicv(areq, iv, 0);
}
@@ -475,7 +462,7 @@ static int crypto_authenc_verify(struct aead_request *req,
ihash = ohash + authsize;
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
- return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0;
+ return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0;
}
static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 4be0dd4..ab53762 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -59,19 +59,37 @@ static void authenc_esn_request_complete(struct aead_request *req, int err)
static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *key,
unsigned int keylen)
{
+ unsigned int authkeylen;
+ unsigned int enckeylen;
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct crypto_ahash *auth = ctx->auth;
struct crypto_ablkcipher *enc = ctx->enc;
- struct crypto_authenc_keys keys;
+ struct rtattr *rta = (void *)key;
+ struct crypto_authenc_key_param *param;
int err = -EINVAL;
- if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+ if (!RTA_OK(rta, keylen))
goto badkey;
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ goto badkey;
+ if (RTA_PAYLOAD(rta) < sizeof(*param))
+ goto badkey;
+
+ param = RTA_DATA(rta);
+ enckeylen = be32_to_cpu(param->enckeylen);
+
+ key += RTA_ALIGN(rta->rta_len);
+ keylen -= RTA_ALIGN(rta->rta_len);
+
+ if (keylen < enckeylen)
+ goto badkey;
+
+ authkeylen = keylen - enckeylen;
crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc_esn) &
CRYPTO_TFM_REQ_MASK);
- err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
+ err = crypto_ahash_setkey(auth, key, authkeylen);
crypto_aead_set_flags(authenc_esn, crypto_ahash_get_flags(auth) &
CRYPTO_TFM_RES_MASK);
@@ -81,7 +99,7 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *
crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) &
CRYPTO_TFM_REQ_MASK);
- err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen);
+ err = crypto_ablkcipher_setkey(enc, key + authkeylen, enckeylen);
crypto_aead_set_flags(authenc_esn, crypto_ablkcipher_get_flags(enc) &
CRYPTO_TFM_RES_MASK);
@@ -229,7 +247,7 @@ static void authenc_esn_verify_ahash_update_done(struct crypto_async_request *ar
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
- err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
+ err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
if (err)
goto out;
@@ -278,7 +296,7 @@ static void authenc_esn_verify_ahash_update_done2(struct crypto_async_request *a
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
- err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
+ err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
if (err)
goto out;
@@ -318,7 +336,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
- err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
+ err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
if (err)
goto out;
@@ -550,7 +568,7 @@ static int crypto_authenc_esn_verify(struct aead_request *req)
ihash = ohash + authsize;
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
- return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0;
+ return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0;
}
static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
diff --git a/crypto/ccm.c b/crypto/ccm.c
index 1df8421..499c917 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -271,8 +271,7 @@ static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain,
}
/* compute plaintext into mac */
- if (cryptlen)
- get_data_to_compute(cipher, pctx, plain, cryptlen);
+ get_data_to_compute(cipher, pctx, plain, cryptlen);
out:
return err;
@@ -364,7 +363,7 @@ static void crypto_ccm_decrypt_done(struct crypto_async_request *areq,
if (!err) {
err = crypto_ccm_auth(req, req->dst, cryptlen);
- if (!err && crypto_memneq(pctx->auth_tag, pctx->odata, authsize))
+ if (!err && memcmp(pctx->auth_tag, pctx->odata, authsize))
err = -EBADMSG;
}
aead_request_complete(req, err);
@@ -423,7 +422,7 @@ static int crypto_ccm_decrypt(struct aead_request *req)
return err;
/* verify */
- if (crypto_memneq(authtag, odata, authsize))
+ if (memcmp(authtag, odata, authsize))
return -EBADMSG;
return err;
diff --git a/crypto/gcm.c b/crypto/gcm.c
index b4f0179..43e1fb0 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -582,7 +582,7 @@ static int crypto_gcm_verify(struct aead_request *req,
crypto_xor(auth_tag, iauth_tag, 16);
scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0);
- return crypto_memneq(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0;
+ return memcmp(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0;
}
static void gcm_decrypt_done(struct crypto_async_request *areq, int err)
diff --git a/crypto/hash_info.c b/crypto/hash_info.c
deleted file mode 100644
index 3e7ff46..0000000
--- a/crypto/hash_info.c
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Hash Info: Hash algorithms information
- *
- * Copyright (c) 2013 Dmitry Kasatkin <d.kasatkin@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-
-#include <linux/export.h>
-#include <crypto/hash_info.h>
-
-const char *const hash_algo_name[HASH_ALGO__LAST] = {
- [HASH_ALGO_MD4] = "md4",
- [HASH_ALGO_MD5] = "md5",
- [HASH_ALGO_SHA1] = "sha1",
- [HASH_ALGO_RIPE_MD_160] = "rmd160",
- [HASH_ALGO_SHA256] = "sha256",
- [HASH_ALGO_SHA384] = "sha384",
- [HASH_ALGO_SHA512] = "sha512",
- [HASH_ALGO_SHA224] = "sha224",
- [HASH_ALGO_RIPE_MD_128] = "rmd128",
- [HASH_ALGO_RIPE_MD_256] = "rmd256",
- [HASH_ALGO_RIPE_MD_320] = "rmd320",
- [HASH_ALGO_WP_256] = "wp256",
- [HASH_ALGO_WP_384] = "wp384",
- [HASH_ALGO_WP_512] = "wp512",
- [HASH_ALGO_TGR_128] = "tgr128",
- [HASH_ALGO_TGR_160] = "tgr160",
- [HASH_ALGO_TGR_192] = "tgr192",
-};
-EXPORT_SYMBOL_GPL(hash_algo_name);
-
-const int hash_digest_size[HASH_ALGO__LAST] = {
- [HASH_ALGO_MD4] = MD5_DIGEST_SIZE,
- [HASH_ALGO_MD5] = MD5_DIGEST_SIZE,
- [HASH_ALGO_SHA1] = SHA1_DIGEST_SIZE,
- [HASH_ALGO_RIPE_MD_160] = RMD160_DIGEST_SIZE,
- [HASH_ALGO_SHA256] = SHA256_DIGEST_SIZE,
- [HASH_ALGO_SHA384] = SHA384_DIGEST_SIZE,
- [HASH_ALGO_SHA512] = SHA512_DIGEST_SIZE,
- [HASH_ALGO_SHA224] = SHA224_DIGEST_SIZE,
- [HASH_ALGO_RIPE_MD_128] = RMD128_DIGEST_SIZE,
- [HASH_ALGO_RIPE_MD_256] = RMD256_DIGEST_SIZE,
- [HASH_ALGO_RIPE_MD_320] = RMD320_DIGEST_SIZE,
- [HASH_ALGO_WP_256] = WP256_DIGEST_SIZE,
- [HASH_ALGO_WP_384] = WP384_DIGEST_SIZE,
- [HASH_ALGO_WP_512] = WP512_DIGEST_SIZE,
- [HASH_ALGO_TGR_128] = TGR128_DIGEST_SIZE,
- [HASH_ALGO_TGR_160] = TGR160_DIGEST_SIZE,
- [HASH_ALGO_TGR_192] = TGR192_DIGEST_SIZE,
-};
-EXPORT_SYMBOL_GPL(hash_digest_size);
diff --git a/crypto/memneq.c b/crypto/memneq.c
deleted file mode 100644
index cd01622..0000000
--- a/crypto/memneq.c
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Constant-time equality testing of memory regions.
- *
- * Authors:
- *
- * James Yonan <james@openvpn.net>
- * Daniel Borkmann <dborkman@redhat.com>
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * BSD LICENSE
- *
- * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of OpenVPN Technologies nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <crypto/algapi.h>
-
-#ifndef __HAVE_ARCH_CRYPTO_MEMNEQ
-
-/* Generic path for arbitrary size */
-static inline unsigned long
-__crypto_memneq_generic(const void *a, const void *b, size_t size)
-{
- unsigned long neq = 0;
-
-#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
- while (size >= sizeof(unsigned long)) {
- neq |= *(unsigned long *)a ^ *(unsigned long *)b;
- a += sizeof(unsigned long);
- b += sizeof(unsigned long);
- size -= sizeof(unsigned long);
- }
-#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
- while (size > 0) {
- neq |= *(unsigned char *)a ^ *(unsigned char *)b;
- a += 1;
- b += 1;
- size -= 1;
- }
- return neq;
-}
-
-/* Loop-free fast-path for frequently used 16-byte size */
-static inline unsigned long __crypto_memneq_16(const void *a, const void *b)
-{
-#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
- if (sizeof(unsigned long) == 8)
- return ((*(unsigned long *)(a) ^ *(unsigned long *)(b))
- | (*(unsigned long *)(a+8) ^ *(unsigned long *)(b+8)));
- else if (sizeof(unsigned int) == 4)
- return ((*(unsigned int *)(a) ^ *(unsigned int *)(b))
- | (*(unsigned int *)(a+4) ^ *(unsigned int *)(b+4))
- | (*(unsigned int *)(a+8) ^ *(unsigned int *)(b+8))
- | (*(unsigned int *)(a+12) ^ *(unsigned int *)(b+12)));
- else
-#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
- return ((*(unsigned char *)(a) ^ *(unsigned char *)(b))
- | (*(unsigned char *)(a+1) ^ *(unsigned char *)(b+1))
- | (*(unsigned char *)(a+2) ^ *(unsigned char *)(b+2))
- | (*(unsigned char *)(a+3) ^ *(unsigned char *)(b+3))
- | (*(unsigned char *)(a+4) ^ *(unsigned char *)(b+4))
- | (*(unsigned char *)(a+5) ^ *(unsigned char *)(b+5))
- | (*(unsigned char *)(a+6) ^ *(unsigned char *)(b+6))
- | (*(unsigned char *)(a+7) ^ *(unsigned char *)(b+7))
- | (*(unsigned char *)(a+8) ^ *(unsigned char *)(b+8))
- | (*(unsigned char *)(a+9) ^ *(unsigned char *)(b+9))
- | (*(unsigned char *)(a+10) ^ *(unsigned char *)(b+10))
- | (*(unsigned char *)(a+11) ^ *(unsigned char *)(b+11))
- | (*(unsigned char *)(a+12) ^ *(unsigned char *)(b+12))
- | (*(unsigned char *)(a+13) ^ *(unsigned char *)(b+13))
- | (*(unsigned char *)(a+14) ^ *(unsigned char *)(b+14))
- | (*(unsigned char *)(a+15) ^ *(unsigned char *)(b+15)));
-}
-
-/* Compare two areas of memory without leaking timing information,
- * and with special optimizations for common sizes. Users should
- * not call this function directly, but should instead use
- * crypto_memneq defined in crypto/algapi.h.
- */
-noinline unsigned long __crypto_memneq(const void *a, const void *b,
- size_t size)
-{
- switch (size) {
- case 16:
- return __crypto_memneq_16(a, b);
- default:
- return __crypto_memneq_generic(a, b, size);
- }
-}
-EXPORT_SYMBOL(__crypto_memneq);
-
-#endif /* __HAVE_ARCH_CRYPTO_MEMNEQ */
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 001f07c..25a5934 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -493,7 +493,7 @@ static inline int do_one_ahash_op(struct ahash_request *req, int ret)
ret = wait_for_completion_interruptible(&tr->completion);
if (!ret)
ret = tr->err;
- reinit_completion(&tr->completion);
+ INIT_COMPLETION(tr->completion);
}
return ret;
}
@@ -721,7 +721,7 @@ static inline int do_one_acipher_op(struct ablkcipher_request *req, int ret)
ret = wait_for_completion_interruptible(&tr->completion);
if (!ret)
ret = tr->err;
- reinit_completion(&tr->completion);
+ INIT_COMPLETION(tr->completion);
}
return ret;
@@ -1242,10 +1242,6 @@ static int do_test(int m)
ret += tcrypt_test("cmac(des3_ede)");
break;
- case 155:
- ret += tcrypt_test("authenc(hmac(sha1),cbc(aes))");
- break;
-
case 200:
test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 7795550..e091ef6 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -179,7 +179,7 @@ static int do_one_async_hash_op(struct ahash_request *req,
ret = wait_for_completion_interruptible(&tr->completion);
if (!ret)
ret = tr->err;
- reinit_completion(&tr->completion);
+ INIT_COMPLETION(tr->completion);
}
return ret;
}
@@ -336,7 +336,7 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
ret = wait_for_completion_interruptible(
&tresult.completion);
if (!ret && !(ret = tresult.err)) {
- reinit_completion(&tresult.completion);
+ INIT_COMPLETION(tresult.completion);
break;
}
/* fall through */
@@ -503,16 +503,16 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
goto out;
}
+ sg_init_one(&sg[0], input,
+ template[i].ilen + (enc ? authsize : 0));
+
if (diff_dst) {
output = xoutbuf[0];
output += align_offset;
- sg_init_one(&sg[0], input, template[i].ilen);
sg_init_one(&sgout[0], output,
- template[i].rlen);
- } else {
- sg_init_one(&sg[0], input,
template[i].ilen +
(enc ? authsize : 0));
+ } else {
output = input;
}
@@ -543,7 +543,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
ret = wait_for_completion_interruptible(
&result.completion);
if (!ret && !(ret = result.err)) {
- reinit_completion(&result.completion);
+ INIT_COMPLETION(result.completion);
break;
}
case -EBADMSG:
@@ -612,6 +612,12 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
memcpy(q, template[i].input + temp,
template[i].tap[k]);
+ n = template[i].tap[k];
+ if (k == template[i].np - 1 && enc)
+ n += authsize;
+ if (offset_in_page(q) + n < PAGE_SIZE)
+ q[n] = 0;
+
sg_set_buf(&sg[k], q, template[i].tap[k]);
if (diff_dst) {
@@ -619,17 +625,13 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
offset_in_page(IDX[k]);
memset(q, 0, template[i].tap[k]);
+ if (offset_in_page(q) + n < PAGE_SIZE)
+ q[n] = 0;
sg_set_buf(&sgout[k], q,
template[i].tap[k]);
}
- n = template[i].tap[k];
- if (k == template[i].np - 1 && enc)
- n += authsize;
- if (offset_in_page(q) + n < PAGE_SIZE)
- q[n] = 0;
-
temp += template[i].tap[k];
}
@@ -648,10 +650,10 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
goto out;
}
+ sg[k - 1].length += authsize;
+
if (diff_dst)
sgout[k - 1].length += authsize;
- else
- sg[k - 1].length += authsize;
}
sg_init_table(asg, template[i].anp);
@@ -695,7 +697,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
ret = wait_for_completion_interruptible(
&result.completion);
if (!ret && !(ret = result.err)) {
- reinit_completion(&result.completion);
+ INIT_COMPLETION(result.completion);
break;
}
case -EBADMSG:
@@ -981,7 +983,7 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
ret = wait_for_completion_interruptible(
&result.completion);
if (!ret && !((ret = result.err))) {
- reinit_completion(&result.completion);
+ INIT_COMPLETION(result.completion);
break;
}
/* fall through */
@@ -1084,7 +1086,7 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
ret = wait_for_completion_interruptible(
&result.completion);
if (!ret && !((ret = result.err))) {
- reinit_completion(&result.completion);
+ INIT_COMPLETION(result.completion);
break;
}
/* fall through */