Commit b0e6989c authored by Takashi Iwai's avatar Takashi Iwai
Browse files

Merge tag 'asoc-v3.13-rc2' of...

Merge tag 'asoc-v3.13-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-linus

ASoC: Fixes for v3.13

A smattering of fixes here, some core ones for the rate combination
issues for things other than simple bitmasks, for readback of byte
controls and for updating the power of value muxes plus a bunch of
driver fixes of varying severity.

The warning fix in the i.MX FIQ driver is fixing a warning introduced
by a previous fix.
parents 20ce9029 29e24882
......@@ -281,7 +281,7 @@ static int __init sha256_ssse3_mod_init(void)
/* allow AVX to override SSSE3, it's a little faster */
if (avx_usable()) {
#ifdef CONFIG_AS_AVX2
if (boot_cpu_has(X86_FEATURE_AVX2))
if (boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_BMI2))
sha256_transform_asm = sha256_transform_rorx;
else
#endif
......@@ -319,4 +319,4 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, Supplemental SSE3 accelerated");
MODULE_ALIAS("sha256");
MODULE_ALIAS("sha384");
MODULE_ALIAS("sha224");
......@@ -28,6 +28,7 @@
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
#include <crypto/ablk_helper.h>
#include <crypto/algapi.h>
#include <crypto/twofish.h>
#include <crypto/cryptd.h>
......@@ -39,7 +40,6 @@
#include <asm/xcr.h>
#include <asm/xsave.h>
#include <asm/crypto/twofish.h>
#include <asm/crypto/ablk_helper.h>
#include <asm/crypto/glue_helper.h>
#include <crypto/scatterwalk.h>
#include <linux/workqueue.h>
......
#include <asm/i387.h>
/*
* may_use_simd - whether it is allowable at this time to issue SIMD
* instructions or access the SIMD register file
*/
static __must_check inline bool may_use_simd(void)
{
return irq_fpu_usable();
}
......@@ -174,9 +174,8 @@ config CRYPTO_TEST
help
Quick & dirty crypto test module.
config CRYPTO_ABLK_HELPER_X86
config CRYPTO_ABLK_HELPER
tristate
depends on X86
select CRYPTO_CRYPTD
config CRYPTO_GLUE_HELPER_X86
......@@ -695,7 +694,7 @@ config CRYPTO_AES_NI_INTEL
select CRYPTO_AES_X86_64 if 64BIT
select CRYPTO_AES_586 if !64BIT
select CRYPTO_CRYPTD
select CRYPTO_ABLK_HELPER_X86
select CRYPTO_ABLK_HELPER
select CRYPTO_ALGAPI
select CRYPTO_GLUE_HELPER_X86 if 64BIT
select CRYPTO_LRW
......@@ -895,7 +894,7 @@ config CRYPTO_CAMELLIA_AESNI_AVX_X86_64
depends on CRYPTO
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
select CRYPTO_ABLK_HELPER_X86
select CRYPTO_ABLK_HELPER
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_CAMELLIA_X86_64
select CRYPTO_LRW
......@@ -917,7 +916,7 @@ config CRYPTO_CAMELLIA_AESNI_AVX2_X86_64
depends on CRYPTO
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
select CRYPTO_ABLK_HELPER_X86
select CRYPTO_ABLK_HELPER
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_CAMELLIA_X86_64
select CRYPTO_CAMELLIA_AESNI_AVX_X86_64
......@@ -969,7 +968,7 @@ config CRYPTO_CAST5_AVX_X86_64
depends on X86 && 64BIT
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
select CRYPTO_ABLK_HELPER_X86
select CRYPTO_ABLK_HELPER
select CRYPTO_CAST_COMMON
select CRYPTO_CAST5
help
......@@ -992,7 +991,7 @@ config CRYPTO_CAST6_AVX_X86_64
depends on X86 && 64BIT
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
select CRYPTO_ABLK_HELPER_X86
select CRYPTO_ABLK_HELPER
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_CAST_COMMON
select CRYPTO_CAST6
......@@ -1110,7 +1109,7 @@ config CRYPTO_SERPENT_SSE2_X86_64
depends on X86 && 64BIT
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
select CRYPTO_ABLK_HELPER_X86
select CRYPTO_ABLK_HELPER
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_SERPENT
select CRYPTO_LRW
......@@ -1132,7 +1131,7 @@ config CRYPTO_SERPENT_SSE2_586
depends on X86 && !64BIT
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
select CRYPTO_ABLK_HELPER_X86
select CRYPTO_ABLK_HELPER
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_SERPENT
select CRYPTO_LRW
......@@ -1154,7 +1153,7 @@ config CRYPTO_SERPENT_AVX_X86_64
depends on X86 && 64BIT
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
select CRYPTO_ABLK_HELPER_X86
select CRYPTO_ABLK_HELPER
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_SERPENT
select CRYPTO_LRW
......@@ -1176,7 +1175,7 @@ config CRYPTO_SERPENT_AVX2_X86_64
depends on X86 && 64BIT
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
select CRYPTO_ABLK_HELPER_X86
select CRYPTO_ABLK_HELPER
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_SERPENT
select CRYPTO_SERPENT_AVX_X86_64
......@@ -1292,7 +1291,7 @@ config CRYPTO_TWOFISH_AVX_X86_64
depends on X86 && 64BIT
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
select CRYPTO_ABLK_HELPER_X86
select CRYPTO_ABLK_HELPER
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_TWOFISH_COMMON
select CRYPTO_TWOFISH_X86_64
......
......@@ -2,8 +2,13 @@
# Cryptographic API
#
# memneq MUST be built with -Os or -O0 to prevent early-return optimizations
# that will defeat memneq's actual purpose to prevent timing attacks.
CFLAGS_REMOVE_memneq.o := -O1 -O2 -O3
CFLAGS_memneq.o := -Os
obj-$(CONFIG_CRYPTO) += crypto.o
crypto-y := api.o cipher.o compress.o
crypto-y := api.o cipher.o compress.o memneq.o
obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o
......@@ -105,3 +110,4 @@ obj-$(CONFIG_XOR_BLOCKS) += xor.o
obj-$(CONFIG_ASYNC_CORE) += async_tx/
obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/
obj-$(CONFIG_CRYPTO_HASH_INFO) += hash_info.o
obj-$(CONFIG_CRYPTO_ABLK_HELPER) += ablk_helper.o
......@@ -28,10 +28,11 @@
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/hardirq.h>
#include <crypto/algapi.h>
#include <crypto/cryptd.h>
#include <asm/i387.h>
#include <asm/crypto/ablk_helper.h>
#include <crypto/ablk_helper.h>
#include <asm/simd.h>
int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int key_len)
......@@ -70,11 +71,11 @@ int ablk_encrypt(struct ablkcipher_request *req)
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
if (!irq_fpu_usable()) {
if (!may_use_simd()) {
struct ablkcipher_request *cryptd_req =
ablkcipher_request_ctx(req);
memcpy(cryptd_req, req, sizeof(*req));
*cryptd_req = *req;
ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
return crypto_ablkcipher_encrypt(cryptd_req);
......@@ -89,11 +90,11 @@ int ablk_decrypt(struct ablkcipher_request *req)
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
if (!irq_fpu_usable()) {
if (!may_use_simd()) {
struct ablkcipher_request *cryptd_req =
ablkcipher_request_ctx(req);
memcpy(cryptd_req, req, sizeof(*req));
*cryptd_req = *req;
ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
return crypto_ablkcipher_decrypt(cryptd_req);
......
......@@ -16,9 +16,7 @@
#include <crypto/internal/skcipher.h>
#include <linux/cpumask.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/rtnetlink.h>
#include <linux/sched.h>
#include <linux/slab.h>
......@@ -30,8 +28,6 @@
#include "internal.h"
static const char *skcipher_default_geniv __read_mostly;
struct ablkcipher_buffer {
struct list_head entry;
struct scatter_walk dst;
......@@ -527,8 +523,7 @@ const char *crypto_default_geniv(const struct crypto_alg *alg)
alg->cra_blocksize)
return "chainiv";
return alg->cra_flags & CRYPTO_ALG_ASYNC ?
"eseqiv" : skcipher_default_geniv;
return "eseqiv";
}
static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
......@@ -709,17 +704,3 @@ struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher);
static int __init skcipher_module_init(void)
{
skcipher_default_geniv = num_possible_cpus() > 1 ?
"eseqiv" : "chainiv";
return 0;
}
static void skcipher_module_exit(void)
{
}
module_init(skcipher_module_init);
module_exit(skcipher_module_exit);
......@@ -230,11 +230,11 @@ static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx,
*/
if (byte_count < DEFAULT_BLK_SZ) {
empty_rbuf:
for (; ctx->rand_data_valid < DEFAULT_BLK_SZ;
ctx->rand_data_valid++) {
while (ctx->rand_data_valid < DEFAULT_BLK_SZ) {
*ptr = ctx->rand_data[ctx->rand_data_valid];
ptr++;
byte_count--;
ctx->rand_data_valid++;
if (byte_count == 0)
goto done;
}
......
......@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <crypto/algapi.h>
#include "public_key.h"
MODULE_LICENSE("GPL");
......@@ -189,12 +190,12 @@ static int RSA_verify(const u8 *H, const u8 *EM, size_t k, size_t hash_size,
}
}
if (memcmp(asn1_template, EM + T_offset, asn1_size) != 0) {
if (crypto_memneq(asn1_template, EM + T_offset, asn1_size) != 0) {
kleave(" = -EBADMSG [EM[T] ASN.1 mismatch]");
return -EBADMSG;
}
if (memcmp(H, EM + T_offset + asn1_size, hash_size) != 0) {
if (crypto_memneq(H, EM + T_offset + asn1_size, hash_size) != 0) {
kleave(" = -EKEYREJECTED [EM[T] hash mismatch]");
return -EKEYREJECTED;
}
......
......@@ -18,59 +18,11 @@
#include <linux/asn1_decoder.h>
#include <keys/asymmetric-subtype.h>
#include <keys/asymmetric-parser.h>
#include <keys/system_keyring.h>
#include <crypto/hash.h>
#include "asymmetric_keys.h"
#include "public_key.h"
#include "x509_parser.h"
/*
* Find a key in the given keyring by issuer and authority.
*/
static struct key *x509_request_asymmetric_key(
struct key *keyring,
const char *signer, size_t signer_len,
const char *authority, size_t auth_len)
{
key_ref_t key;
char *id;
/* Construct an identifier. */
id = kmalloc(signer_len + 2 + auth_len + 1, GFP_KERNEL);
if (!id)
return ERR_PTR(-ENOMEM);
memcpy(id, signer, signer_len);
id[signer_len + 0] = ':';
id[signer_len + 1] = ' ';
memcpy(id + signer_len + 2, authority, auth_len);
id[signer_len + 2 + auth_len] = 0;
pr_debug("Look up: \"%s\"\n", id);
key = keyring_search(make_key_ref(keyring, 1),
&key_type_asymmetric, id);
if (IS_ERR(key))
pr_debug("Request for module key '%s' err %ld\n",
id, PTR_ERR(key));
kfree(id);
if (IS_ERR(key)) {
switch (PTR_ERR(key)) {
/* Hide some search errors */
case -EACCES:
case -ENOTDIR:
case -EAGAIN:
return ERR_PTR(-ENOKEY);
default:
return ERR_CAST(key);
}
}
pr_devel("<==%s() = 0 [%x]\n", __func__, key_serial(key_ref_to_ptr(key)));
return key_ref_to_ptr(key);
}
/*
* Set up the signature parameters in an X.509 certificate. This involves
* digesting the signed data and extracting the signature.
......@@ -150,33 +102,6 @@ int x509_check_signature(const struct public_key *pub,
}
EXPORT_SYMBOL_GPL(x509_check_signature);
/*
* Check the new certificate against the ones in the trust keyring. If one of
* those is the signing key and validates the new certificate, then mark the
* new certificate as being trusted.
*
* Return 0 if the new certificate was successfully validated, 1 if we couldn't
* find a matching parent certificate in the trusted list and an error if there
* is a matching certificate but the signature check fails.
*/
static int x509_validate_trust(struct x509_certificate *cert,
struct key *trust_keyring)
{
const struct public_key *pk;
struct key *key;
int ret = 1;
key = x509_request_asymmetric_key(trust_keyring,
cert->issuer, strlen(cert->issuer),
cert->authority,
strlen(cert->authority));
if (!IS_ERR(key)) {
pk = key->payload.data;
ret = x509_check_signature(pk, cert);
}
return ret;
}
/*
* Attempt to parse a data blob for a key as an X509 certificate.
*/
......@@ -230,13 +155,9 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)
/* Check the signature on the key if it appears to be self-signed */
if (!cert->authority ||
strcmp(cert->fingerprint, cert->authority) == 0) {
ret = x509_check_signature(cert->pub, cert); /* self-signed */
ret = x509_check_signature(cert->pub, cert);
if (ret < 0)
goto error_free_cert;
} else {
ret = x509_validate_trust(cert, system_trusted_keyring);
if (!ret)
prep->trusted = 1;
}
/* Propose a description */
......
......@@ -52,40 +52,52 @@ static void authenc_request_complete(struct aead_request *req, int err)
aead_request_complete(req, err);
}
static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
unsigned int keylen)
int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
unsigned int keylen)
{
unsigned int authkeylen;
unsigned int enckeylen;
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
struct crypto_ahash *auth = ctx->auth;
struct crypto_ablkcipher *enc = ctx->enc;
struct rtattr *rta = (void *)key;
struct rtattr *rta = (struct rtattr *)key;
struct crypto_authenc_key_param *param;
int err = -EINVAL;
if (!RTA_OK(rta, keylen))
goto badkey;
return -EINVAL;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
goto badkey;
return -EINVAL;
if (RTA_PAYLOAD(rta) < sizeof(*param))
goto badkey;
return -EINVAL;
param = RTA_DATA(rta);
enckeylen = be32_to_cpu(param->enckeylen);
keys->enckeylen = be32_to_cpu(param->enckeylen);
key += RTA_ALIGN(rta->rta_len);
keylen -= RTA_ALIGN(rta->rta_len);
if (keylen < enckeylen)
goto badkey;
if (keylen < keys->enckeylen)
return -EINVAL;
authkeylen = keylen - enckeylen;
keys->authkeylen = keylen - keys->enckeylen;
keys->authkey = key;
keys->enckey = key + keys->authkeylen;
return 0;
}
EXPORT_SYMBOL_GPL(crypto_authenc_extractkeys);
static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
unsigned int keylen)
{
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
struct crypto_ahash *auth = ctx->auth;
struct crypto_ablkcipher *enc = ctx->enc;
struct crypto_authenc_keys keys;
int err = -EINVAL;
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey;
crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc) &
CRYPTO_TFM_REQ_MASK);
err = crypto_ahash_setkey(auth, key, authkeylen);
err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
crypto_aead_set_flags(authenc, crypto_ahash_get_flags(auth) &
CRYPTO_TFM_RES_MASK);
......@@ -95,7 +107,7 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc) &
CRYPTO_TFM_REQ_MASK);
err = crypto_ablkcipher_setkey(enc, key + authkeylen, enckeylen);
err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen);
crypto_aead_set_flags(authenc, crypto_ablkcipher_get_flags(enc) &
CRYPTO_TFM_RES_MASK);
......@@ -188,7 +200,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
if (err)
goto out;
......@@ -227,7 +239,7 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq,
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
if (err)
goto out;
......@@ -462,7 +474,7 @@ static int crypto_authenc_verify(struct aead_request *req,
ihash = ohash + authsize;
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0;
return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0;
}
static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
......
......@@ -59,37 +59,19 @@ static void authenc_esn_request_complete(struct aead_request *req, int err)
static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *key,
unsigned int keylen)
{
unsigned int authkeylen;
unsigned int enckeylen;
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct crypto_ahash *auth = ctx->auth;
struct crypto_ablkcipher *enc = ctx->enc;
struct rtattr *rta = (void *)key;
struct crypto_authenc_key_param *param;
struct crypto_authenc_keys keys;
int err = -EINVAL;
if (!RTA_OK(rta, keylen))
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
goto badkey;
if (RTA_PAYLOAD(rta) < sizeof(*param))
goto badkey;
param = RTA_DATA(rta);
enckeylen = be32_to_cpu(param->enckeylen);
key += RTA_ALIGN(rta->rta_len);
keylen -= RTA_ALIGN(rta->rta_len);
if (keylen < enckeylen)
goto badkey;
authkeylen = keylen - enckeylen;
crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc_esn) &
CRYPTO_TFM_REQ_MASK);
err = crypto_ahash_setkey(auth, key, authkeylen);
err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
crypto_aead_set_flags(authenc_esn, crypto_ahash_get_flags(auth) &
CRYPTO_TFM_RES_MASK);
......@@ -99,7 +81,7 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *
crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) &
CRYPTO_TFM_REQ_MASK);
err = crypto_ablkcipher_setkey(enc, key + authkeylen, enckeylen);
err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen);
crypto_aead_set_flags(authenc_esn, crypto_ablkcipher_get_flags(enc) &
CRYPTO_TFM_RES_MASK);
......@@ -247,7 +229,7 @@ static void authenc_esn_verify_ahash_update_done(struct crypto_async_request *ar
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
if (err)
goto out;
......@@ -296,7 +278,7 @@ static void authenc_esn_verify_ahash_update_done2(struct crypto_async_request *a
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
if (err)
goto out;
......@@ -336,7 +318,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
if (err)
goto out;
......@@ -568,7 +550,7 @@ static int crypto_authenc_esn_verify(struct aead_request *req)
ihash = ohash + authsize;
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0;
return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0;
}
static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
......
......@@ -363,7 +363,7 @@ static void crypto_ccm_decrypt_done(struct crypto_async_request *areq,
if (!err) {
err = crypto_ccm_auth(req, req->dst, cryptlen);
if (!err && memcmp(pctx->auth_tag, pctx->odata, authsize))
if (!err && crypto_memneq(pctx->auth_tag, pctx->odata, authsize))
err = -EBADMSG;
}
aead_request_complete(req, err);
......@@ -422,7 +422,7 @@ static int crypto_ccm_decrypt(struct aead_request *req)
return err;
/* verify */
if (memcmp(authtag, odata, authsize))
if (crypto_memneq(authtag, odata, authsize))
return -EBADMSG;
return err;
......
......@@ -582,7 +582,7 @@ static int crypto_gcm_verify(struct aead_request *req,
crypto_xor(auth_tag, iauth_tag, 16);
scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0);
return memcmp(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0;
return crypto_memneq(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0;
}
static void gcm_decrypt_done(struct crypto_async_request *areq, int err)
......
/*
* Constant-time equality testing of memory regions.
*
* Authors:
*
* James Yonan <james@openvpn.net>
* Daniel Borkmann <dborkman@redhat.com>
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* BSD LICENSE
*
* Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of OpenVPN Technologies nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <crypto/algapi.h>
#ifndef __HAVE_ARCH_CRYPTO_MEMNEQ
/* Generic path for arbitrary size */
static inline unsigned long
__crypto_memneq_generic(const void *a, const void *b, size_t size)
{
unsigned long neq = 0;
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
while (size >= sizeof(unsigned long)) {
neq |= *(unsigned long *)a ^ *(unsigned long *)b;
a += sizeof(unsigned long);
b += sizeof(unsigned long);
size -= sizeof(unsigned long);
}
#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
while (size > 0) {
neq |= *(unsigned char *)a ^ *(unsigned char *)b;
a += 1;
b += 1;
size -= 1;
}
return neq;
}
/* Loop-free fast-path for frequently used 16-byte size */
static inline unsigned long __crypto_memneq_16(const void *a, const void *b)
{
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
if (sizeof(unsigned long) == 8)
return ((*(unsigned long *)(a) ^ *(unsigned long *)(b))
| (*(unsigned long *)(a+8) ^ *(unsigned long *)(b+8)));
else if (sizeof(unsigned int) == 4)
return ((*(unsigned int *)(a) ^ *(unsigned int *)(b))
| (*(unsigned int *)(a+4) ^ *(unsigned int *)(b+4))
| (*(unsigned int *)(a+8) ^ *(unsigned int *)(b+8))
| (*(unsigned int *)(a+12) ^ *(unsigned int *)(b+12)));
else
#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
return ((*(unsigned char *)(a) ^ *(unsigned char *)(b))
| (*(unsigned char *)(a+1) ^ *(unsigned char *)(b+1))
| (*(unsigned char *)(a+2) ^ *(unsigned char *)(b+2))
| (*(unsigned char *)(a+3) ^ *(unsigned char *)(b+3))
| (*(unsigned char *)(a+4) ^ *(unsigned char *)(b+4))
| (*(unsigned char *)(a+5) ^ *(unsigned char *)(b+5))
| (*(unsigned char *)(a+6) ^ *(unsigned char *)(b+6))
| (*(unsigned char *)(a+7) ^ *(unsigned char *)(b+7))
| (*(unsigned char *)(a+8) ^ *(unsigned char *)(b+8))
| (*(unsigned char *)(a+9) ^ *(unsigned char *)(b+9))
| (*(unsigned char *)(a+10) ^ *(unsigned char *)(b+10))
| (*(unsigned char *)(a+11) ^ *(unsigned char *)(b+11))
| (*(unsigned char *)(a+12) ^ *(unsigned char *)(b+12))
| (*(unsigned char *)(a+13) ^ *(unsigned char *)(b+13))
| (*(unsigned char *)(a+14) ^ *(unsigned char *)(b+14))
| (*(unsigned char *)(a+15) ^ *(unsigned char *)(b+15)));
}
/* Compare two areas of memory without leaking timing information,
* and with special optimizations for common sizes. Users should
* not call this function directly, but should instead use
* crypto_memneq defined in crypto/algapi.h.
*/
noinline unsigned long __crypto_memneq(const void *a, const void *b,
size_t size)
{
switch (size) {
case 16:
return __crypto_memneq_16(a, b);
default:
return __crypto_memneq_generic(a, b, size);
}
}
EXPORT_SYMBOL(__crypto_memneq);
#endif /* __HAVE_ARCH_CRYPTO_MEMNEQ */
......@@ -184,7 +184,7 @@ acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer,
struct acpi_buffer *output_buffer);
acpi_status
acpi_rs_create_aml_resources(struct acpi_resource *linked_list_buffer,
acpi_rs_create_aml_resources(struct acpi_buffer *resource_list,
struct acpi_buffer *output_buffer);
acpi_status
......@@ -227,8 +227,8 @@ acpi_rs_get_list_length(u8 * aml_buffer,
u32 aml_buffer_length, acpi_size * size_needed);
acpi_status
acpi_rs_get_aml_length(struct acpi_resource *linked_list_buffer,
acpi_size * size_needed);
acpi_rs_get_aml_length(struct acpi_resource *resource_list,
acpi_size resource_list_size, acpi_size * size_needed);
acpi_status
acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
......
......@@ -106,6 +106,7 @@ struct acpi_namespace_node *acpi_ns_create_node(u32 name)
void acpi_ns_delete_node(struct acpi_namespace_node *node)
{
union acpi_operand_object *obj_desc;
union acpi_operand_object *next_desc;
ACPI_FUNCTION_NAME(ns_delete_node);
......@@ -114,12 +115,13 @@ void acpi_ns_delete_node(struct acpi_namespace_node *node)
acpi_ns_detach_object(node);
/*
* Delete an attached data object if present (an object that was created
* and attached via acpi_attach_data). Note: After any normal object is
* detached above, the only possible remaining object is a data object.
* Delete an attached data object list if present (objects that were
* attached via acpi_attach_data). Note: After any normal object is
* detached above, the only possible remaining object(s) are data
* objects, in a linked list.
*/
obj_desc = node->object;
if (obj_desc && (obj_desc->common.type == ACPI_TYPE_LOCAL_DATA)) {
while (obj_desc && (obj_desc->common.type == ACPI_TYPE_LOCAL_DATA)) {
/* Invoke the attached data deletion handler if present */
......@@ -127,7 +129,15 @@ void acpi_ns_delete_node(struct acpi_namespace_node *node)
obj_desc->data.handler(node, obj_desc->data.pointer);
}
next_desc = obj_desc->common.next_object;
acpi_ut_remove_reference(obj_desc);
obj_desc = next_desc;
}
/* Special case for the statically allocated root node */
if (node == acpi_gbl_root_node) {
return;
}
/* Now we can delete the node */
......
......@@ -593,24 +593,26 @@ struct acpi_namespace_node *acpi_ns_validate_handle(acpi_handle handle)
void acpi_ns_terminate(void)
{
union acpi_operand_object *obj_desc;
acpi_status status;
ACPI_FUNCTION_TRACE(ns_terminate);
/*
* 1) Free the entire namespace -- all nodes and objects
*
* Delete all object descriptors attached to namepsace nodes
* Free the entire namespace -- all nodes and all objects
* attached to the nodes
*/
acpi_ns_delete_namespace_subtree(acpi_gbl_root_node);
/* Detach any objects attached to the root */
/* Delete any objects attached to the root node */
obj_desc = acpi_ns_get_attached_object(acpi_gbl_root_node);
if (obj_desc) {
acpi_ns_detach_object(acpi_gbl_root_node);
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
return_VOID;
}
acpi_ns_delete_node(acpi_gbl_root_node);
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Namespace freed\n"));
return_VOID;
}
......
......@@ -174,6 +174,7 @@ acpi_rs_stream_option_length(u32 resource_length,
* FUNCTION: acpi_rs_get_aml_length
*
* PARAMETERS: resource - Pointer to the resource linked list
* resource_list_size - Size of the resource linked list
* size_needed - Where the required size is returned
*
* RETURN: Status
......@@ -185,16 +186,20 @@ acpi_rs_stream_option_length(u32 resource_length,
******************************************************************************/
acpi_status
acpi_rs_get_aml_length(struct acpi_resource * resource, acpi_size * size_needed)
acpi_rs_get_aml_length(struct acpi_resource *resource,
acpi_size resource_list_size, acpi_size * size_needed)
{
acpi_size aml_size_needed = 0;
struct acpi_resource *resource_end;
acpi_rs_length total_size;
ACPI_FUNCTION_TRACE(rs_get_aml_length);
/* Traverse entire list of internal resource descriptors */
while (resource) {
resource_end =
ACPI_ADD_PTR(struct acpi_resource, resource, resource_list_size);
while (resource < resource_end) {
/* Validate the descriptor type */
......
......@@ -418,22 +418,21 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
*
* FUNCTION: acpi_rs_create_aml_resources
*
* PARAMETERS: linked_list_buffer - Pointer to the resource linked list
* output_buffer - Pointer to the user's buffer
* PARAMETERS: resource_list - Pointer to the resource list buffer
* output_buffer - Where the AML buffer is returned
*
* RETURN: Status AE_OK if okay, else a valid acpi_status code.
* If the output_buffer is too small, the error will be
* AE_BUFFER_OVERFLOW and output_buffer->Length will point
* to the size buffer needed.
*
* DESCRIPTION: Takes the linked list of device resources and
* creates a bytestream to be used as input for the
* _SRS control method.
* DESCRIPTION: Converts a list of device resources to an AML bytestream
* to be used as input for the _SRS control method.
*
******************************************************************************/
acpi_status
acpi_rs_create_aml_resources(struct acpi_resource *linked_list_buffer,
acpi_rs_create_aml_resources(struct acpi_buffer *resource_list,
struct acpi_buffer *output_buffer)
{
acpi_status status;
......@@ -441,16 +440,16 @@ acpi_rs_create_aml_resources(struct acpi_resource *linked_list_buffer,
ACPI_FUNCTION_TRACE(rs_create_aml_resources);
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "LinkedListBuffer = %p\n",
linked_list_buffer));
/* Params already validated, no need to re-validate here */
/*
* Params already validated, so we don't re-validate here
*
* Pass the linked_list_buffer into a module that calculates
* the buffer size needed for the byte stream.
*/
status = acpi_rs_get_aml_length(linked_list_buffer, &aml_size_needed);
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "ResourceList Buffer = %p\n",
resource_list->pointer));
/* Get the buffer size needed for the AML byte stream */
status = acpi_rs_get_aml_length(resource_list->pointer,
resource_list->length,
&aml_size_needed);
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "AmlSizeNeeded=%X, %s\n",
(u32)aml_size_needed, acpi_format_exception(status)));
......@@ -467,10 +466,9 @@ acpi_rs_create_aml_resources(struct acpi_resource *linked_list_buffer,
/* Do the conversion */
status =
acpi_rs_convert_resources_to_aml(linked_list_buffer,
aml_size_needed,
output_buffer->pointer);
status = acpi_rs_convert_resources_to_aml(resource_list->pointer,
aml_size_needed,
output_buffer->pointer);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment