}
}
+static void emit_a32_mov_i64(const s8 dst[], u64 val, struct jit_ctx *ctx)
+{
+ const s8 *tmp = bpf2a32[TMP_REG_1];
+ const s8 *rd = is_stacked(dst_lo) ? tmp : dst;
+
+ emit_mov_i(rd[1], (u32)val, ctx);
+ emit_mov_i(rd[0], val >> 32, ctx);
+
+ arm_bpf_put_reg64(dst, rd, ctx);
+}
+
/* Sign extended move */
-static inline void emit_a32_mov_i64(const bool is64, const s8 dst[],
- const u32 val, struct jit_ctx *ctx) {
+static inline void emit_a32_mov_se_i64(const bool is64, const s8 dst[],
+ const u32 val, struct jit_ctx *ctx) {
u32 hi = 0;
if (is64 && (val & (1<<31)))
break;
case BPF_K:
/* Sign-extend immediate value to destination reg */
- emit_a32_mov_i64(is64, dst, imm, ctx);
+ emit_a32_mov_se_i64(is64, dst, imm, ctx);
break;
}
break;
* value into temporary reg and then it would be
* safe to do the operation on it.
*/
- emit_a32_mov_i64(is64, tmp2, imm, ctx);
+ emit_a32_mov_se_i64(is64, tmp2, imm, ctx);
emit_a32_alu_r64(is64, dst, tmp2, ctx, BPF_OP(code));
break;
}
* reg then it would be safe to do the operation
* on it.
*/
- emit_a32_mov_i64(is64, tmp2, imm, ctx);
+ emit_a32_mov_se_i64(is64, tmp2, imm, ctx);
emit_a32_mul_r64(dst, tmp2, ctx);
break;
}
/* dst = imm64 */
case BPF_LD | BPF_IMM | BPF_DW:
{
- const struct bpf_insn insn1 = insn[1];
- u32 hi, lo = imm;
+ u64 val = (u32)imm | (u64)insn[1].imm << 32;
- hi = insn1.imm;
- emit_a32_mov_i(dst_lo, lo, ctx);
- emit_a32_mov_i(dst_hi, hi, ctx);
+ emit_a32_mov_i64(dst, val, ctx);
return 1;
}
switch (BPF_SIZE(code)) {
case BPF_DW:
/* Sign-extend immediate value into temp reg */
- emit_a32_mov_i64(true, tmp2, imm, ctx);
+ emit_a32_mov_se_i64(true, tmp2, imm, ctx);
emit_str_r(dst_lo, tmp2[1], off, ctx, BPF_W);
emit_str_r(dst_lo, tmp2[0], off+4, ctx, BPF_W);
break;
rm = tmp2[0];
rn = tmp2[1];
/* Sign-extend immediate value */
- emit_a32_mov_i64(true, tmp2, imm, ctx);
+ emit_a32_mov_se_i64(true, tmp2, imm, ctx);
go_jmp:
/* Setup destination register */
rd = arm_bpf_get_reg64(dst, tmp, ctx);