diff --git a/arch/x86/crypto/aes-helper_glue.h b/arch/x86/crypto/aes-helper_glue.h index af422e2c9263..fbb5a70f1e1b 100644 --- a/arch/x86/crypto/aes-helper_glue.h +++ b/arch/x86/crypto/aes-helper_glue.h @@ -29,14 +29,14 @@ struct aes_xts_ctx { struct crypto_aes_ctx crypt_ctx AES_ALIGN_ATTR; }; -static inline unsigned long aes_align_addr(unsigned long addr) +static inline void *aes_align_addr(void *addr) { - return (crypto_tfm_ctx_alignment() >= AES_ALIGN) ? ALIGN(addr, 1) : ALIGN(addr, AES_ALIGN); + return (crypto_tfm_ctx_alignment() >= AES_ALIGN) ? addr : PTR_ALIGN(addr, AES_ALIGN); } static inline struct aes_xts_ctx *aes_xts_ctx(struct crypto_skcipher *tfm) { - return (struct aes_xts_ctx *)aes_align_addr((unsigned long)crypto_skcipher_ctx(tfm)); + return (struct aes_xts_ctx *)aes_align_addr(crypto_skcipher_ctx(tfm)); } static inline int diff --git a/arch/x86/crypto/aeskl-intel_asm.S b/arch/x86/crypto/aeskl-intel_asm.S index 402dd7796375..61addc61dd4e 100644 --- a/arch/x86/crypto/aeskl-intel_asm.S +++ b/arch/x86/crypto/aeskl-intel_asm.S @@ -45,9 +45,9 @@ #define GF128MUL_MASK %xmm11 /* - * int aeskl_setkey(struct crypto_aes_ctx *ctx, const u8 *in_key, unsigned int key_len) + * int __aeskl_setkey(struct crypto_aes_ctx *ctx, const u8 *in_key, unsigned int key_len) */ -SYM_FUNC_START(aeskl_setkey) +SYM_FUNC_START(__aeskl_setkey) FRAME_BEGIN movl %edx, 480(HANDLEP) movdqu (UKEYP), STATE1 @@ -70,7 +70,7 @@ SYM_FUNC_START(aeskl_setkey) xor AREG, AREG FRAME_END RET -SYM_FUNC_END(aeskl_setkey) +SYM_FUNC_END(__aeskl_setkey) /* * int __aeskl_enc(const void *ctx, u8 *dst, const u8 *src) @@ -100,34 +100,6 @@ SYM_FUNC_START(__aeskl_enc) RET SYM_FUNC_END(__aeskl_enc) -/* - * int __aeskl_dec(const void *ctx, u8 *dst, const u8 *src) - */ -SYM_FUNC_START(__aeskl_dec) - FRAME_BEGIN - movdqu (INP), STATE - mov 480(HANDLEP), KLEN - - cmp $16, KLEN - je .Ldec_128 - aesdec256kl (HANDLEP), STATE - jz .Ldec_err - jmp .Ldec_noerr -.Ldec_128: - aesdec128kl (HANDLEP), STATE - jz .Ldec_err - -.Ldec_noerr: - xor AREG, AREG - jmp .Ldec_end -.Ldec_err: - mov $(-EINVAL), AREG -.Ldec_end: - movdqu STATE, (OUTP) - FRAME_END - RET -SYM_FUNC_END(__aeskl_dec) - /* * XTS implementation */ diff --git a/arch/x86/crypto/aeskl-intel_glue.c b/arch/x86/crypto/aeskl-intel_glue.c index c6824c50fc72..0b9a96de24dc 100644 --- a/arch/x86/crypto/aeskl-intel_glue.c +++ b/arch/x86/crypto/aeskl-intel_glue.c @@ -22,18 +22,27 @@ #include "aes-helper_glue.h" #include "aesni-intel_glue.h" -asmlinkage int aeskl_setkey(struct crypto_aes_ctx *ctx, const u8 *in_key, unsigned int keylen); +asmlinkage int __aeskl_setkey(struct crypto_aes_ctx *ctx, const u8 *in_key, unsigned int keylen); asmlinkage int __aeskl_enc(const void *ctx, u8 *out, const u8 *in); -asmlinkage int __aeskl_dec(const void *ctx, u8 *out, const u8 *in); asmlinkage int __aeskl_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len, u8 *iv); asmlinkage int __aeskl_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len, u8 *iv); -static int aeskl_setkey_common(struct crypto_tfm *tfm, void *raw_ctx, const u8 *in_key, - unsigned int keylen) +/* + * In the event of hardware failure, the wrapping key can be lost + * from a sleep state. Then, it is not usable anymore. The feature + * state can be found via valid_keylocker(). + * + * Such disabling can happen anywhere preemptible. So, to avoid the + * race condition, check the availability on every use along with + * kernel_fpu_begin(). + */ + +static int aeskl_setkey(struct crypto_tfm *tfm, void *raw_ctx, const u8 *in_key, + unsigned int keylen) { /* raw_ctx is an aligned address via xts_setkey_common() */ struct crypto_aes_ctx *ctx = (struct crypto_aes_ctx *)raw_ctx; @@ -54,22 +63,13 @@ static int aeskl_setkey_common(struct crypto_tfm *tfm, void *raw_ctx, const u8 * if (!valid_keylocker()) err = -ENODEV; else - err = aeskl_setkey(ctx, in_key, keylen); + err = __aeskl_setkey(ctx, in_key, keylen); } kernel_fpu_end(); return err; } -/* - * The below wrappers for the encryption/decryption functions - * incorporate the feature availability check: - * - * In the rare event of hardware failure, the wrapping key can be lost - * after wake-up from a deep sleep state. Then, this check helps to - * avoid any subsequent misuse with populating a proper error code. - */ - static inline int aeskl_enc(const void *ctx, u8 *out, const u8 *in) { if (!valid_keylocker()) @@ -78,14 +78,6 @@ static inline int aeskl_enc(const void *ctx, u8 *out, const u8 *in) return __aeskl_enc(ctx, out, in); } -static inline int aeskl_dec(const void *ctx, u8 *out, const u8 *in) -{ - if (!valid_keylocker()) - return -ENODEV; - - return __aeskl_dec(ctx, out, in); -} - static inline int aeskl_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len, u8 *iv) { @@ -104,31 +96,22 @@ static inline int aeskl_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *out, c return __aeskl_xts_decrypt(ctx, out, in, len, iv); } -static int aeskl_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, +static int xts_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { - return xts_setkey_common(tfm, key, keylen, aeskl_setkey_common); + return xts_setkey_common(tfm, key, keylen, aeskl_setkey); } -static inline int xts_keylen(struct skcipher_request *req, u32 *keylen) +static inline u32 xts_keylen(struct skcipher_request *req) { struct aes_xts_ctx *ctx = aes_xts_ctx(crypto_skcipher_reqtfm(req)); - if (ctx->crypt_ctx.key_length != ctx->tweak_ctx.key_length) - return -EINVAL; - - *keylen = ctx->crypt_ctx.key_length; - return 0; + return ctx->crypt_ctx.key_length; } static int xts_encrypt(struct skcipher_request *req) { - u32 keylen; - int err; - - err = xts_keylen(req, &keylen); - if (err) - return err; + u32 keylen = xts_keylen(req); if (likely(keylen != AES_KEYSIZE_192)) return xts_crypt_common(req, aeskl_xts_encrypt, aeskl_enc); @@ -138,12 +121,7 @@ static int xts_encrypt(struct skcipher_request *req) static int xts_decrypt(struct skcipher_request *req) { - u32 keylen; - int rc; - - rc = xts_keylen(req, &keylen); - if (rc) - return rc; + u32 keylen = xts_keylen(req); if (likely(keylen != AES_KEYSIZE_192)) return xts_crypt_common(req, aeskl_xts_decrypt, aeskl_enc); @@ -166,7 +144,7 @@ static struct skcipher_alg aeskl_skciphers[] = { .max_keysize = 2 * AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .walksize = 2 * AES_BLOCK_SIZE, - .setkey = aeskl_xts_setkey, + .setkey = xts_setkey, .encrypt = xts_encrypt, .decrypt = xts_decrypt, } @@ -177,7 +155,6 @@ static struct simd_skcipher_alg *aeskl_simd_skciphers[ARRAY_SIZE(aeskl_skciphers static int __init aeskl_init(void) { u32 eax, ebx, ecx, edx; - int err; if (!valid_keylocker()) return -ENODEV; @@ -194,12 +171,8 @@ static int __init aeskl_init(void) if (!boot_cpu_has(X86_FEATURE_AES)) return -ENODEV; - err = simd_register_skciphers_compat(aeskl_skciphers, ARRAY_SIZE(aeskl_skciphers), - aeskl_simd_skciphers); - if (err) - return err; - - return 0; + return simd_register_skciphers_compat(aeskl_skciphers, ARRAY_SIZE(aeskl_skciphers), + aeskl_simd_skciphers); } static void __exit aeskl_exit(void) diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 3aaf5504e349..774e3a78b662 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -108,15 +108,6 @@ int aesni_enc(const void *ctx, u8 *out, const u8 *in) EXPORT_SYMBOL_GPL(aesni_enc); #endif -int aesni_dec(const void *ctx, u8 *out, const u8 *in) -{ - __aesni_dec(ctx, out, in); - return 0; -} -#if IS_MODULE(CONFIG_CRYPTO_AES_KL) -EXPORT_SYMBOL_GPL(aesni_dec); -#endif - #define AVX_GEN2_OPTSIZE 640 #define AVX_GEN4_OPTSIZE 4096 @@ -241,27 +232,19 @@ static __ro_after_init DEFINE_STATIC_KEY_FALSE(gcm_use_avx2); static inline struct aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm) { - unsigned long align = AES_ALIGN; - - if (align <= crypto_tfm_ctx_alignment()) - align = 1; - return PTR_ALIGN(crypto_aead_ctx(tfm), align); + return (struct aesni_rfc4106_gcm_ctx *)aes_align_addr(crypto_aead_ctx(tfm)); } static inline struct generic_gcmaes_ctx *generic_gcmaes_ctx_get(struct crypto_aead *tfm) { - unsigned long align = AES_ALIGN; - - if (align <= crypto_tfm_ctx_alignment()) - align = 1; - return PTR_ALIGN(crypto_aead_ctx(tfm), align); + return (struct generic_gcmaes_ctx *)aes_align_addr(crypto_aead_ctx(tfm)); } #endif static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx) { - return (struct crypto_aes_ctx *)aes_align_addr((unsigned long)raw_ctx); + return (struct crypto_aes_ctx *)aes_align_addr(raw_ctx); } static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx, diff --git a/arch/x86/crypto/aesni-intel_glue.h b/arch/x86/crypto/aesni-intel_glue.h index 81ecacb4e54c..5b1919f49efe 100644 --- a/arch/x86/crypto/aesni-intel_glue.h +++ b/arch/x86/crypto/aesni-intel_glue.h @@ -8,7 +8,6 @@ int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key, unsigned int key_len); int aesni_enc(const void *ctx, u8 *out, const u8 *in); -int aesni_dec(const void *ctx, u8 *out, const u8 *in); int aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len, u8 *iv);