Skip to content
Snippets Groups Projects
Commit d6933f2e authored by Eric Biggers's avatar Eric Biggers Committed by Greg Kroah-Hartman
Browse files

crypto: x86/aegis128 - access 32-bit arguments as 32-bit


commit 3b2f2d22fb424e9bebda4dbf6676cbfc7f9f62cd upstream.

Fix the AEGIS assembly code to access 'unsigned int' arguments as 32-bit
values instead of 64-bit, since the upper bits of the corresponding
64-bit registers are not guaranteed to be zero.

Note: there haven't been any reports of this bug actually causing
incorrect behavior.  Neither gcc nor clang guarantee zero-extension to
64 bits, but zero-extension is likely to happen in practice because most
instructions that operate on 32-bit registers zero-extend to 64 bits.

Fixes: 1d373d4e ("crypto: x86 - Add optimized AEGIS implementations")
Cc: stable@vger.kernel.org
Reviewed-by: default avatarOndrej Mosnacek <omosnace@redhat.com>
Signed-off-by: default avatarEric Biggers <ebiggers@google.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent bc9b40fa
No related branches found
No related tags found
1 merge request!176🤖 Sync Bot: Update v6.12-ktn to Latest Stable Kernel (v6.12.4)
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#define T1 %xmm7 #define T1 %xmm7
#define STATEP %rdi #define STATEP %rdi
#define LEN %rsi #define LEN %esi
#define SRC %rdx #define SRC %rdx
#define DST %rcx #define DST %rcx
...@@ -76,32 +76,32 @@ SYM_FUNC_START_LOCAL(__load_partial) ...@@ -76,32 +76,32 @@ SYM_FUNC_START_LOCAL(__load_partial)
xor %r9d, %r9d xor %r9d, %r9d
pxor MSG, MSG pxor MSG, MSG
mov LEN, %r8 mov LEN, %r8d
and $0x1, %r8 and $0x1, %r8
jz .Lld_partial_1 jz .Lld_partial_1
mov LEN, %r8 mov LEN, %r8d
and $0x1E, %r8 and $0x1E, %r8
add SRC, %r8 add SRC, %r8
mov (%r8), %r9b mov (%r8), %r9b
.Lld_partial_1: .Lld_partial_1:
mov LEN, %r8 mov LEN, %r8d
and $0x2, %r8 and $0x2, %r8
jz .Lld_partial_2 jz .Lld_partial_2
mov LEN, %r8 mov LEN, %r8d
and $0x1C, %r8 and $0x1C, %r8
add SRC, %r8 add SRC, %r8
shl $0x10, %r9 shl $0x10, %r9
mov (%r8), %r9w mov (%r8), %r9w
.Lld_partial_2: .Lld_partial_2:
mov LEN, %r8 mov LEN, %r8d
and $0x4, %r8 and $0x4, %r8
jz .Lld_partial_4 jz .Lld_partial_4
mov LEN, %r8 mov LEN, %r8d
and $0x18, %r8 and $0x18, %r8
add SRC, %r8 add SRC, %r8
shl $32, %r9 shl $32, %r9
...@@ -111,11 +111,11 @@ SYM_FUNC_START_LOCAL(__load_partial) ...@@ -111,11 +111,11 @@ SYM_FUNC_START_LOCAL(__load_partial)
.Lld_partial_4: .Lld_partial_4:
movq %r9, MSG movq %r9, MSG
mov LEN, %r8 mov LEN, %r8d
and $0x8, %r8 and $0x8, %r8
jz .Lld_partial_8 jz .Lld_partial_8
mov LEN, %r8 mov LEN, %r8d
and $0x10, %r8 and $0x10, %r8
add SRC, %r8 add SRC, %r8
pslldq $8, MSG pslldq $8, MSG
...@@ -139,7 +139,7 @@ SYM_FUNC_END(__load_partial) ...@@ -139,7 +139,7 @@ SYM_FUNC_END(__load_partial)
* %r10 * %r10
*/ */
SYM_FUNC_START_LOCAL(__store_partial) SYM_FUNC_START_LOCAL(__store_partial)
mov LEN, %r8 mov LEN, %r8d
mov DST, %r9 mov DST, %r9
movq T0, %r10 movq T0, %r10
...@@ -677,7 +677,7 @@ SYM_TYPED_FUNC_START(crypto_aegis128_aesni_dec_tail) ...@@ -677,7 +677,7 @@ SYM_TYPED_FUNC_START(crypto_aegis128_aesni_dec_tail)
call __store_partial call __store_partial
/* mask with byte count: */ /* mask with byte count: */
movq LEN, T0 movd LEN, T0
punpcklbw T0, T0 punpcklbw T0, T0
punpcklbw T0, T0 punpcklbw T0, T0
punpcklbw T0, T0 punpcklbw T0, T0
...@@ -702,7 +702,8 @@ SYM_FUNC_END(crypto_aegis128_aesni_dec_tail) ...@@ -702,7 +702,8 @@ SYM_FUNC_END(crypto_aegis128_aesni_dec_tail)
/* /*
* void crypto_aegis128_aesni_final(void *state, void *tag_xor, * void crypto_aegis128_aesni_final(void *state, void *tag_xor,
* u64 assoclen, u64 cryptlen); * unsigned int assoclen,
* unsigned int cryptlen);
*/ */
SYM_FUNC_START(crypto_aegis128_aesni_final) SYM_FUNC_START(crypto_aegis128_aesni_final)
FRAME_BEGIN FRAME_BEGIN
...@@ -715,8 +716,8 @@ SYM_FUNC_START(crypto_aegis128_aesni_final) ...@@ -715,8 +716,8 @@ SYM_FUNC_START(crypto_aegis128_aesni_final)
movdqu 0x40(STATEP), STATE4 movdqu 0x40(STATEP), STATE4
/* prepare length block: */ /* prepare length block: */
movq %rdx, MSG movd %edx, MSG
movq %rcx, T0 movd %ecx, T0
pslldq $8, T0 pslldq $8, T0
pxor T0, MSG pxor T0, MSG
psllq $3, MSG /* multiply by 8 (to get bit count) */ psllq $3, MSG /* multiply by 8 (to get bit count) */
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment