bpf, riscv: clear high 32 bits for ALU32 add/sub/neg/lsh/rsh/arsh
authorLuke Nelson <luke.r.nels@gmail.com>
Thu, 30 May 2019 22:29:22 +0000 (15:29 -0700)
committerAlexei Starovoitov <ast@kernel.org>
Sat, 1 Jun 2019 00:08:53 +0000 (17:08 -0700)
In BPF, 32-bit ALU operations should zero-extend their results into
the 64-bit registers.

The current BPF JIT on RISC-V emits incorrect instructions that perform
sign extension only (e.g., addw, subw) on 32-bit add, sub, lsh, rsh,
arsh, and neg. This behavior diverges from the interpreter and JITs
for other architectures.

This patch fixes the bugs by performing zero extension on the destination
register of 32-bit ALU operations.

Fixes: 2353ecc6f91f ("bpf, riscv: add BPF JIT for RV64G")
Cc: Xi Wang <xi.wang@gmail.com>
Signed-off-by: Luke Nelson <luke.r.nels@gmail.com>
Acked-by: Song Liu <songliubraving@fb.com>
Acked-by: Björn Töpel <bjorn.topel@gmail.com>
Reviewed-by: Palmer Dabbelt <palmer@sifive.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
arch/riscv/net/bpf_jit_comp.c

index e5c8d675bd6ebf2ddaf2454897956f2e1e1b3750..426d5c33ea9037e10fd7706dddb8ac18d5649c46 100644 (file)
@@ -751,10 +751,14 @@ static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
        case BPF_ALU | BPF_ADD | BPF_X:
        case BPF_ALU64 | BPF_ADD | BPF_X:
                emit(is64 ? rv_add(rd, rd, rs) : rv_addw(rd, rd, rs), ctx);
+               if (!is64)
+                       emit_zext_32(rd, ctx);
                break;
        case BPF_ALU | BPF_SUB | BPF_X:
        case BPF_ALU64 | BPF_SUB | BPF_X:
                emit(is64 ? rv_sub(rd, rd, rs) : rv_subw(rd, rd, rs), ctx);
+               if (!is64)
+                       emit_zext_32(rd, ctx);
                break;
        case BPF_ALU | BPF_AND | BPF_X:
        case BPF_ALU64 | BPF_AND | BPF_X:
@@ -795,14 +799,20 @@ static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
        case BPF_ALU | BPF_LSH | BPF_X:
        case BPF_ALU64 | BPF_LSH | BPF_X:
                emit(is64 ? rv_sll(rd, rd, rs) : rv_sllw(rd, rd, rs), ctx);
+               if (!is64)
+                       emit_zext_32(rd, ctx);
                break;
        case BPF_ALU | BPF_RSH | BPF_X:
        case BPF_ALU64 | BPF_RSH | BPF_X:
                emit(is64 ? rv_srl(rd, rd, rs) : rv_srlw(rd, rd, rs), ctx);
+               if (!is64)
+                       emit_zext_32(rd, ctx);
                break;
        case BPF_ALU | BPF_ARSH | BPF_X:
        case BPF_ALU64 | BPF_ARSH | BPF_X:
                emit(is64 ? rv_sra(rd, rd, rs) : rv_sraw(rd, rd, rs), ctx);
+               if (!is64)
+                       emit_zext_32(rd, ctx);
                break;
 
        /* dst = -dst */
@@ -810,6 +820,8 @@ static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
        case BPF_ALU64 | BPF_NEG:
                emit(is64 ? rv_sub(rd, RV_REG_ZERO, rd) :
                     rv_subw(rd, RV_REG_ZERO, rd), ctx);
+               if (!is64)
+                       emit_zext_32(rd, ctx);
                break;
 
        /* dst = BSWAP##imm(dst) */
@@ -964,14 +976,20 @@ out_be:
        case BPF_ALU | BPF_LSH | BPF_K:
        case BPF_ALU64 | BPF_LSH | BPF_K:
                emit(is64 ? rv_slli(rd, rd, imm) : rv_slliw(rd, rd, imm), ctx);
+               if (!is64)
+                       emit_zext_32(rd, ctx);
                break;
        case BPF_ALU | BPF_RSH | BPF_K:
        case BPF_ALU64 | BPF_RSH | BPF_K:
                emit(is64 ? rv_srli(rd, rd, imm) : rv_srliw(rd, rd, imm), ctx);
+               if (!is64)
+                       emit_zext_32(rd, ctx);
                break;
        case BPF_ALU | BPF_ARSH | BPF_K:
        case BPF_ALU64 | BPF_ARSH | BPF_K:
                emit(is64 ? rv_srai(rd, rd, imm) : rv_sraiw(rd, rd, imm), ctx);
+               if (!is64)
+                       emit_zext_32(rd, ctx);
                break;
 
        /* JUMP off */