KS1 .req v9
INP0 .req v10
INP1 .req v11
+ HH .req v12
+ XL2 .req v13
+ XM2 .req v14
+ XH2 .req v15
.macro load_round_keys, rounds, rk
cmp \rounds, #12
.endm
.macro pmull_gcm_do_crypt, enc
+ ld1 {HH.2d}, [x4], #16
ld1 {SHASH.2d}, [x4]
ld1 {XL.2d}, [x1]
ldr x8, [x5, #8] // load lower counter
load_round_keys w7, x6
movi MASK.16b, #0xe1
- ext SHASH2.16b, SHASH.16b, SHASH.16b, #8
+ trn1 SHASH2.2d, SHASH.2d, HH.2d
+ trn2 T1.2d, SHASH.2d, HH.2d
CPU_LE( rev x8, x8 )
shl MASK.2d, MASK.2d, #57
- eor SHASH2.16b, SHASH2.16b, SHASH.16b
+ eor SHASH2.16b, SHASH2.16b, T1.16b
.if \enc == 1
ldr x10, [sp]
ins KS0.d[1], x9 // set lower counter
ins KS1.d[1], x11
- rev64 T1.16b, INP0.16b
+ rev64 T1.16b, INP1.16b
cmp w7, #12
b.ge 2f // AES-192/256?
1: enc_round KS0, v21
-
- ext T2.16b, XL.16b, XL.16b, #8
ext IN1.16b, T1.16b, T1.16b, #8
enc_round KS1, v21
-
- eor T1.16b, T1.16b, T2.16b
- eor XL.16b, XL.16b, IN1.16b
+ pmull2 XH2.1q, SHASH.2d, IN1.2d // a1 * b1
enc_round KS0, v22
-
- pmull2 XH.1q, SHASH.2d, XL.2d // a1 * b1
- eor T1.16b, T1.16b, XL.16b
+ eor T1.16b, T1.16b, IN1.16b
enc_round KS1, v22
-
- pmull XL.1q, SHASH.1d, XL.1d // a0 * b0
- pmull XM.1q, SHASH2.1d, T1.1d // (a1 + a0)(b1 + b0)
+ pmull XL2.1q, SHASH.1d, IN1.1d // a0 * b0
enc_round KS0, v23
-
- ext T1.16b, XL.16b, XH.16b, #8
- eor T2.16b, XL.16b, XH.16b
- eor XM.16b, XM.16b, T1.16b
+ pmull XM2.1q, SHASH2.1d, T1.1d // (a1 + a0)(b1 + b0)
enc_round KS1, v23
-
- eor XM.16b, XM.16b, T2.16b
- pmull T2.1q, XL.1d, MASK.1d
+ rev64 T1.16b, INP0.16b
+ ext T2.16b, XL.16b, XL.16b, #8
enc_round KS0, v24
-
- mov XH.d[0], XM.d[1]
- mov XM.d[1], XL.d[0]
+ ext IN1.16b, T1.16b, T1.16b, #8
+ eor T1.16b, T1.16b, T2.16b
enc_round KS1, v24
-
- eor XL.16b, XM.16b, T2.16b
+ eor XL.16b, XL.16b, IN1.16b
enc_round KS0, v25
-
- ext T2.16b, XL.16b, XL.16b, #8
+ eor T1.16b, T1.16b, XL.16b
enc_round KS1, v25
-
- pmull XL.1q, XL.1d, MASK.1d
- eor T2.16b, T2.16b, XH.16b
+ pmull2 XH.1q, HH.2d, XL.2d // a1 * b1
enc_round KS0, v26
-
- eor XL.16b, XL.16b, T2.16b
- rev64 T1.16b, INP1.16b
+ pmull XL.1q, HH.1d, XL.1d // a0 * b0
enc_round KS1, v26
-
- ext T2.16b, XL.16b, XL.16b, #8
- ext IN1.16b, T1.16b, T1.16b, #8
+ pmull2 XM.1q, SHASH2.2d, T1.2d // (a1 + a0)(b1 + b0)
enc_round KS0, v27
-
- eor T1.16b, T1.16b, T2.16b
- eor XL.16b, XL.16b, IN1.16b
+ eor XL.16b, XL.16b, XL2.16b
+ eor XH.16b, XH.16b, XH2.16b
enc_round KS1, v27
-
- pmull2 XH.1q, SHASH.2d, XL.2d // a1 * b1
- eor T1.16b, T1.16b, XL.16b
+ eor XM.16b, XM.16b, XM2.16b
+ ext T1.16b, XL.16b, XH.16b, #8
enc_round KS0, v28
-
- pmull XL.1q, SHASH.1d, XL.1d // a0 * b0
- pmull XM.1q, SHASH2.1d, T1.1d // (a1 + a0)(b1 + b0)
-
- enc_round KS1, v28
-
- ext T1.16b, XL.16b, XH.16b, #8
eor T2.16b, XL.16b, XH.16b
eor XM.16b, XM.16b, T1.16b
- enc_round KS0, v29
-
+ enc_round KS1, v28
eor XM.16b, XM.16b, T2.16b
+
+ enc_round KS0, v29
pmull T2.1q, XL.1d, MASK.1d
enc_round KS1, v29
-
mov XH.d[0], XM.d[1]
mov XM.d[1], XL.d[0]
aese KS0.16b, v30.16b
-
eor XL.16b, XM.16b, T2.16b
aese KS1.16b, v30.16b
-
ext T2.16b, XL.16b, XL.16b, #8
eor KS0.16b, KS0.16b, v31.16b
-
pmull XL.1q, XL.1d, MASK.1d
eor T2.16b, T2.16b, XH.16b
eor KS1.16b, KS1.16b, v31.16b
-
eor XL.16b, XL.16b, T2.16b
.if \enc == 0
struct gcm_aes_ctx {
struct crypto_aes_ctx aes_key;
+ u64 h2[2];
struct ghash_key ghash_key;
};
const char *head);
asmlinkage void pmull_gcm_encrypt(int blocks, u64 dg[], u8 dst[],
- const u8 src[], struct ghash_key const *k,
- u8 ctr[], u32 const rk[], int rounds,
- u8 ks[]);
+ const u8 src[], u64 const *k, u8 ctr[],
+ u32 const rk[], int rounds, u8 ks[]);
asmlinkage void pmull_gcm_decrypt(int blocks, u64 dg[], u8 dst[],
- const u8 src[], struct ghash_key const *k,
+ const u8 src[], u64 const *k,
u8 ctr[], u32 const rk[], int rounds);
asmlinkage void pmull_gcm_encrypt_block(u8 dst[], u8 const src[],
unsigned int keylen)
{
struct gcm_aes_ctx *ctx = crypto_aead_ctx(tfm);
- u8 key[GHASH_BLOCK_SIZE];
+ be128 h1, h2;
+ u8 *key = (u8 *)&h1;
int ret;
ret = crypto_aes_expand_key(&ctx->aes_key, inkey, keylen);
__aes_arm64_encrypt(ctx->aes_key.key_enc, key, (u8[AES_BLOCK_SIZE]){},
num_rounds(&ctx->aes_key));
- return __ghash_setkey(&ctx->ghash_key, key, sizeof(key));
+ __ghash_setkey(&ctx->ghash_key, key, sizeof(be128));
+
+ /* calculate H^2 (used for 2-way aggregation) */
+ h2 = h1;
+ gf128mul_lle(&h2, &h1);
+
+ ctx->h2[0] = (be64_to_cpu(h2.b) << 1) | (be64_to_cpu(h2.a) >> 63);
+ ctx->h2[1] = (be64_to_cpu(h2.a) << 1) | (be64_to_cpu(h2.b) >> 63);
+
+ if (be64_to_cpu(h2.a) >> 63)
+ ctx->h2[1] ^= 0xc200000000000000UL;
+
+ return 0;
}
static int gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
kernel_neon_begin();
pmull_gcm_encrypt(blocks, dg, walk.dst.virt.addr,
- walk.src.virt.addr, &ctx->ghash_key,
- iv, ctx->aes_key.key_enc, nrounds,
- ks);
+ walk.src.virt.addr, ctx->h2, iv,
+ ctx->aes_key.key_enc, nrounds, ks);
kernel_neon_end();
err = skcipher_walk_done(&walk,
kernel_neon_begin();
pmull_gcm_decrypt(blocks, dg, walk.dst.virt.addr,
- walk.src.virt.addr, &ctx->ghash_key,
- iv, ctx->aes_key.key_enc, nrounds);
+ walk.src.virt.addr, ctx->h2, iv,
+ ctx->aes_key.key_enc, nrounds);
kernel_neon_end();
err = skcipher_walk_done(&walk,