selftests: bpf: add tests for shifts by zero
authorLuke Nelson <lukenels@cs.washington.edu>
Sat, 29 Jun 2019 05:57:51 +0000 (22:57 -0700)
committerDaniel Borkmann <daniel@iogearbox.net>
Wed, 3 Jul 2019 09:14:28 +0000 (11:14 +0200)
There are currently no tests for ALU64 shift operations when the shift
amount is 0. This adds 6 new tests to make sure they are equivalent
to a no-op. The x32 JIT had such bugs that could have been caught by
these tests.

Cc: Xi Wang <xi.wang@gmail.com>
Signed-off-by: Luke Nelson <luke.r.nels@gmail.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
tools/testing/selftests/bpf/verifier/basic_instr.c

index ed91a7b9a4566b90c4ef99193411b5e3a3bc3185..071dbc889e8c6fbffc8a7c9e80edc81f0233d72c 100644 (file)
        },
        .result = ACCEPT,
 },
+{
+       "lsh64 by 0 imm",
+       .insns = {
+       BPF_LD_IMM64(BPF_REG_0, 1),
+       BPF_LD_IMM64(BPF_REG_1, 1),
+       BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 0),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
+       BPF_MOV64_IMM(BPF_REG_0, 2),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 1,
+},
+{
+       "rsh64 by 0 imm",
+       .insns = {
+       BPF_LD_IMM64(BPF_REG_0, 1),
+       BPF_LD_IMM64(BPF_REG_1, 0x100000000LL),
+       BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 0),
+       BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
+       BPF_MOV64_IMM(BPF_REG_0, 2),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 1,
+},
+{
+       "arsh64 by 0 imm",
+       .insns = {
+       BPF_LD_IMM64(BPF_REG_0, 1),
+       BPF_LD_IMM64(BPF_REG_1, 0x100000000LL),
+       BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
+       BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 0),
+       BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
+       BPF_MOV64_IMM(BPF_REG_0, 2),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 1,
+},
+{
+       "lsh64 by 0 reg",
+       .insns = {
+       BPF_LD_IMM64(BPF_REG_0, 1),
+       BPF_LD_IMM64(BPF_REG_1, 1),
+       BPF_LD_IMM64(BPF_REG_2, 0),
+       BPF_ALU64_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
+       BPF_MOV64_IMM(BPF_REG_0, 2),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 1,
+},
+{
+       "rsh64 by 0 reg",
+       .insns = {
+       BPF_LD_IMM64(BPF_REG_0, 1),
+       BPF_LD_IMM64(BPF_REG_1, 0x100000000LL),
+       BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
+       BPF_LD_IMM64(BPF_REG_3, 0),
+       BPF_ALU64_REG(BPF_RSH, BPF_REG_1, BPF_REG_3),
+       BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
+       BPF_MOV64_IMM(BPF_REG_0, 2),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 1,
+},
+{
+       "arsh64 by 0 reg",
+       .insns = {
+       BPF_LD_IMM64(BPF_REG_0, 1),
+       BPF_LD_IMM64(BPF_REG_1, 0x100000000LL),
+       BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
+       BPF_LD_IMM64(BPF_REG_3, 0),
+       BPF_ALU64_REG(BPF_ARSH, BPF_REG_1, BPF_REG_3),
+       BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
+       BPF_MOV64_IMM(BPF_REG_0, 2),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 1,
+},
 {
        "invalid 64-bit BPF_END",
        .insns = {