crypto: arm64/aes-ccm - switch to AES library
authorArd Biesheuvel <ard.biesheuvel@linaro.org>
Tue, 2 Jul 2019 19:41:30 +0000 (21:41 +0200)
committerHerbert Xu <herbert@gondor.apana.org.au>
Fri, 26 Jul 2019 04:56:05 +0000 (14:56 +1000)
The CCM code calls directly into the scalar table based AES cipher for
arm64 from the fallback path, and since this implementation is known to
be non-time invariant, doing so from a time invariant SIMD cipher is a
bit nasty.

So let's switch to the AES library - this makes the code more robust,
and drops the dependency on the generic AES cipher, allowing us to
omit it entirely in the future.

Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
arch/arm64/crypto/Kconfig
arch/arm64/crypto/aes-ce-ccm-glue.c

index 1762055e70936ac539e210bd59152376e32973ca..c6032bfb44fb5106f7e68daa650eb3670ce1bb22 100644 (file)
@@ -80,8 +80,8 @@ config CRYPTO_AES_ARM64_CE_CCM
        depends on ARM64 && KERNEL_MODE_NEON
        select CRYPTO_ALGAPI
        select CRYPTO_AES_ARM64_CE
-       select CRYPTO_AES_ARM64
        select CRYPTO_AEAD
+       select CRYPTO_LIB_AES
 
 config CRYPTO_AES_ARM64_CE_BLK
        tristate "AES in ECB/CBC/CTR/XTS modes using ARMv8 Crypto Extensions"
index 827e5473e5de7601dc0637755502b70a999df4cf..541cf9165748f31c722b4508121890ddc779f715 100644 (file)
@@ -43,8 +43,6 @@ asmlinkage void ce_aes_ccm_decrypt(u8 out[], u8 const in[], u32 cbytes,
 asmlinkage void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u32 const rk[],
                                 u32 rounds);
 
-asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
-
 static int ccm_setkey(struct crypto_aead *tfm, const u8 *in_key,
                      unsigned int key_len)
 {
@@ -124,8 +122,7 @@ static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
                }
 
                while (abytes >= AES_BLOCK_SIZE) {
-                       __aes_arm64_encrypt(key->key_enc, mac, mac,
-                                           num_rounds(key));
+                       aes_encrypt(key, mac, mac);
                        crypto_xor(mac, in, AES_BLOCK_SIZE);
 
                        in += AES_BLOCK_SIZE;
@@ -133,8 +130,7 @@ static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
                }
 
                if (abytes > 0) {
-                       __aes_arm64_encrypt(key->key_enc, mac, mac,
-                                           num_rounds(key));
+                       aes_encrypt(key, mac, mac);
                        crypto_xor(mac, in, abytes);
                        *macp = abytes;
                }
@@ -206,10 +202,8 @@ static int ccm_crypt_fallback(struct skcipher_walk *walk, u8 mac[], u8 iv0[],
                                bsize = nbytes;
 
                        crypto_inc(walk->iv, AES_BLOCK_SIZE);
-                       __aes_arm64_encrypt(ctx->key_enc, buf, walk->iv,
-                                           num_rounds(ctx));
-                       __aes_arm64_encrypt(ctx->key_enc, mac, mac,
-                                           num_rounds(ctx));
+                       aes_encrypt(ctx, buf, walk->iv);
+                       aes_encrypt(ctx, mac, mac);
                        if (enc)
                                crypto_xor(mac, src, bsize);
                        crypto_xor_cpy(dst, src, buf, bsize);
@@ -224,8 +218,8 @@ static int ccm_crypt_fallback(struct skcipher_walk *walk, u8 mac[], u8 iv0[],
        }
 
        if (!err) {
-               __aes_arm64_encrypt(ctx->key_enc, buf, iv0, num_rounds(ctx));
-               __aes_arm64_encrypt(ctx->key_enc, mac, mac, num_rounds(ctx));
+               aes_encrypt(ctx, buf, iv0);
+               aes_encrypt(ctx, mac, mac);
                crypto_xor(mac, buf, AES_BLOCK_SIZE);
        }
        return err;