1 From 833ca409e17c10f4affb5879e22a03fdf1933439 Mon Sep 17 00:00:00 2001
2 From: Ard Biesheuvel <ardb@kernel.org>
3 Date: Wed, 8 Jul 2020 12:11:18 +0300
4 Subject: [PATCH 059/124] crypto: x86/chacha-sse3 - use unaligned loads for
7 commit e79a31715193686e92dadb4caedfbb1f5de3659c upstream.
9 Due to the fact that the x86 port does not support allocating objects
10 on the stack with an alignment that exceeds 8 bytes, we have a rather
11 ugly hack in the x86 code for ChaCha to ensure that the state array is
12 aligned to 16 bytes, allowing the SSE3 implementation of the algorithm
15 Given that the performance benefit of using of aligned loads appears to
16 be limited (~0.25% for 1k blocks using tcrypt on a Corei7-8650U), and
17 the fact that this hack has leaked into generic ChaCha code, let's just
20 Cc: Martin Willi <martin@strongswan.org>
21 Cc: Herbert Xu <herbert@gondor.apana.org.au>
22 Cc: Eric Biggers <ebiggers@kernel.org>
23 Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
24 Reviewed-by: Martin Willi <martin@strongswan.org>
25 Reviewed-by: Eric Biggers <ebiggers@google.com>
26 Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
27 Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
29 arch/x86/crypto/chacha-ssse3-x86_64.S | 16 ++++++++--------
30 arch/x86/crypto/chacha_glue.c | 17 ++---------------
31 include/crypto/chacha.h | 4 ----
32 3 files changed, 10 insertions(+), 27 deletions(-)
34 --- a/arch/x86/crypto/chacha-ssse3-x86_64.S
35 +++ b/arch/x86/crypto/chacha-ssse3-x86_64.S
36 @@ -120,10 +120,10 @@ ENTRY(chacha_block_xor_ssse3)
40 - movdqa 0x00(%rdi),%xmm0
41 - movdqa 0x10(%rdi),%xmm1
42 - movdqa 0x20(%rdi),%xmm2
43 - movdqa 0x30(%rdi),%xmm3
44 + movdqu 0x00(%rdi),%xmm0
45 + movdqu 0x10(%rdi),%xmm1
46 + movdqu 0x20(%rdi),%xmm2
47 + movdqu 0x30(%rdi),%xmm3
51 @@ -205,10 +205,10 @@ ENTRY(hchacha_block_ssse3)
55 - movdqa 0x00(%rdi),%xmm0
56 - movdqa 0x10(%rdi),%xmm1
57 - movdqa 0x20(%rdi),%xmm2
58 - movdqa 0x30(%rdi),%xmm3
59 + movdqu 0x00(%rdi),%xmm0
60 + movdqu 0x10(%rdi),%xmm1
61 + movdqu 0x20(%rdi),%xmm2
62 + movdqu 0x30(%rdi),%xmm3
66 --- a/arch/x86/crypto/chacha_glue.c
67 +++ b/arch/x86/crypto/chacha_glue.c
69 #include <linux/module.h>
72 -#define CHACHA_STATE_ALIGN 16
74 asmlinkage void chacha_block_xor_ssse3(u32 *state, u8 *dst, const u8 *src,
75 unsigned int len, int nrounds);
76 asmlinkage void chacha_4block_xor_ssse3(u32 *state, u8 *dst, const u8 *src,
77 @@ -125,8 +123,6 @@ static void chacha_dosimd(u32 *state, u8
79 void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds)
81 - state = PTR_ALIGN(state, CHACHA_STATE_ALIGN);
83 if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable()) {
84 hchacha_block_generic(state, stream, nrounds);
86 @@ -139,8 +135,6 @@ EXPORT_SYMBOL(hchacha_block_arch);
88 void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv)
90 - state = PTR_ALIGN(state, CHACHA_STATE_ALIGN);
92 chacha_init_generic(state, key, iv);
94 EXPORT_SYMBOL(chacha_init_arch);
95 @@ -148,8 +142,6 @@ EXPORT_SYMBOL(chacha_init_arch);
96 void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
99 - state = PTR_ALIGN(state, CHACHA_STATE_ALIGN);
101 if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable() ||
102 bytes <= CHACHA_BLOCK_SIZE)
103 return chacha_crypt_generic(state, dst, src, bytes, nrounds);
104 @@ -171,15 +163,12 @@ EXPORT_SYMBOL(chacha_crypt_arch);
105 static int chacha_simd_stream_xor(struct skcipher_request *req,
106 const struct chacha_ctx *ctx, const u8 *iv)
108 - u32 *state, state_buf[16 + 2] __aligned(8);
109 + u32 state[CHACHA_STATE_WORDS] __aligned(8);
110 struct skcipher_walk walk;
113 err = skcipher_walk_virt(&walk, req, false);
115 - BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16);
116 - state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN);
118 chacha_init_generic(state, ctx->key, iv);
120 while (walk.nbytes > 0) {
121 @@ -218,12 +207,10 @@ static int xchacha_simd(struct skcipher_
123 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
124 struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
125 - u32 *state, state_buf[16 + 2] __aligned(8);
126 + u32 state[CHACHA_STATE_WORDS] __aligned(8);
127 struct chacha_ctx subctx;
130 - BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16);
131 - state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN);
132 chacha_init_generic(state, ctx->key, req->iv);
134 if (req->cryptlen > CHACHA_BLOCK_SIZE && crypto_simd_usable()) {
135 --- a/include/crypto/chacha.h
136 +++ b/include/crypto/chacha.h
138 #define CHACHA_BLOCK_SIZE 64
139 #define CHACHAPOLY_IV_SIZE 12
141 -#ifdef CONFIG_X86_64
142 -#define CHACHA_STATE_WORDS ((CHACHA_BLOCK_SIZE + 12) / sizeof(u32))
144 #define CHACHA_STATE_WORDS (CHACHA_BLOCK_SIZE / sizeof(u32))
147 /* 192-bit nonce, then 64-bit stream position */
148 #define XCHACHA_IV_SIZE 32