arm64: KVM: Consistently advance singlestep when emulating instructions
authorMark Rutland <mark.rutland@arm.com>
Fri, 9 Nov 2018 15:07:11 +0000 (15:07 +0000)
committerMarc Zyngier <marc.zyngier@arm.com>
Tue, 18 Dec 2018 14:11:37 +0000 (14:11 +0000)
When we emulate a guest instruction, we don't advance the hardware
singlestep state machine, and thus the guest will receive a software
step exception after a next instruction which is not emulated by the
host.

We bodge around this in an ad-hoc fashion. Sometimes we explicitly check
whether userspace requested a single step, and fake a debug exception
from within the kernel. Other times, we advance the HW singlestep state
rely on the HW to generate the exception for us. Thus, the observed step
behaviour differs for host and guest.

Let's make this simpler and consistent by always advancing the HW
singlestep state machine when we skip an instruction. Thus we can rely
on the hardware to generate the singlestep exception for us, and never
need to explicitly check for an active-pending step, nor do we need to
fake a debug exception from the guest.

Cc: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Christoffer Dall <christoffer.dall@arm.com>
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
arch/arm/include/asm/kvm_host.h
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/kvm/debug.c
arch/arm64/kvm/handle_exit.c
arch/arm64/kvm/hyp/switch.c
arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
virt/kvm/arm/arm.c
virt/kvm/arm/hyp/vgic-v3-sr.c

index 5ca5d9af0c26eade774f336aa7159b55ec06d5cd..c5634c6ffcea9808475a5544ba67901c7eae04bf 100644 (file)
@@ -296,11 +296,6 @@ static inline void kvm_arm_init_debug(void) {}
 static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {}
 static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {}
 static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {}
-static inline bool kvm_arm_handle_step_debug(struct kvm_vcpu *vcpu,
-                                            struct kvm_run *run)
-{
-       return false;
-}
 
 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
                               struct kvm_device_attr *attr);
index 21247870def7bf77d34ce403255b9d73c71542f1..506386a3eddecd2e12decd7a19ab203932e2293c 100644 (file)
@@ -24,6 +24,7 @@
 
 #include <linux/kvm_host.h>
 
+#include <asm/debug-monitors.h>
 #include <asm/esr.h>
 #include <asm/kvm_arm.h>
 #include <asm/kvm_hyp.h>
@@ -147,14 +148,6 @@ static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
        return true;
 }
 
-static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
-{
-       if (vcpu_mode_is_32bit(vcpu))
-               kvm_skip_instr32(vcpu, is_wide_instr);
-       else
-               *vcpu_pc(vcpu) += 4;
-}
-
 static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
 {
        *vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
@@ -424,4 +417,30 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
        return data;            /* Leave LE untouched */
 }
 
+static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
+{
+       if (vcpu_mode_is_32bit(vcpu))
+               kvm_skip_instr32(vcpu, is_wide_instr);
+       else
+               *vcpu_pc(vcpu) += 4;
+
+       /* advance the singlestep state machine */
+       *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
+}
+
+/*
+ * Skip an instruction which has been emulated at hyp while most guest sysregs
+ * are live.
+ */
+static inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu)
+{
+       *vcpu_pc(vcpu) = read_sysreg_el2(elr);
+       vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(spsr);
+
+       kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+
+       write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, spsr);
+       write_sysreg_el2(*vcpu_pc(vcpu), elr);
+}
+
 #endif /* __ARM64_KVM_EMULATE_H__ */
index 52fbc823ff8c7f52dcd924fe94bb0be603150e1f..7a5035f9c5c3b0a5beade49a5f2cf2775ca24467 100644 (file)
@@ -445,7 +445,6 @@ void kvm_arm_init_debug(void);
 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
-bool kvm_arm_handle_step_debug(struct kvm_vcpu *vcpu, struct kvm_run *run);
 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
                               struct kvm_device_attr *attr);
 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
index 00d422336a45225ea8f9b16c6a99ebf042ebf68c..f39801e4136cd0e27c3ba718a461d476de805992 100644 (file)
@@ -236,24 +236,3 @@ void kvm_arm_clear_debug(struct kvm_vcpu *vcpu)
                }
        }
 }
-
-
-/*
- * After successfully emulating an instruction, we might want to
- * return to user space with a KVM_EXIT_DEBUG. We can only do this
- * once the emulation is complete, though, so for userspace emulations
- * we have to wait until we have re-entered KVM before calling this
- * helper.
- *
- * Return true (and set exit_reason) to return to userspace or false
- * if no further action is required.
- */
-bool kvm_arm_handle_step_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
-       if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
-               run->exit_reason = KVM_EXIT_DEBUG;
-               run->debug.arch.hsr = ESR_ELx_EC_SOFTSTP_LOW << ESR_ELx_EC_SHIFT;
-               return true;
-       }
-       return false;
-}
index 35a81bebd02bcf30738b3121fc980e663097c17d..b0643f9c48736965f87953dca3385656f397cfd6 100644 (file)
@@ -229,13 +229,6 @@ static int handle_trap_exceptions(struct kvm_vcpu *vcpu, struct kvm_run *run)
                handled = exit_handler(vcpu, run);
        }
 
-       /*
-        * kvm_arm_handle_step_debug() sets the exit_reason on the kvm_run
-        * structure if we need to return to userspace.
-        */
-       if (handled > 0 && kvm_arm_handle_step_debug(vcpu, run))
-               handled = 0;
-
        return handled;
 }
 
@@ -269,12 +262,7 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
        case ARM_EXCEPTION_IRQ:
                return 1;
        case ARM_EXCEPTION_EL1_SERROR:
-               /* We may still need to return for single-step */
-               if (!(*vcpu_cpsr(vcpu) & DBG_SPSR_SS)
-                       && kvm_arm_handle_step_debug(vcpu, run))
-                       return 0;
-               else
-                       return 1;
+               return 1;
        case ARM_EXCEPTION_TRAP:
                return handle_trap_exceptions(vcpu, run);
        case ARM_EXCEPTION_HYP_GONE:
index 7cc175c88a37e229d5f410b0d04886b35c4e5e97..4282f05771c16b1cf17233e33402e99bccbccf4c 100644 (file)
@@ -305,33 +305,6 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
        return true;
 }
 
-/* Skip an instruction which has been emulated. Returns true if
- * execution can continue or false if we need to exit hyp mode because
- * single-step was in effect.
- */
-static bool __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
-{
-       *vcpu_pc(vcpu) = read_sysreg_el2(elr);
-
-       if (vcpu_mode_is_32bit(vcpu)) {
-               vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(spsr);
-               kvm_skip_instr32(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
-               write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, spsr);
-       } else {
-               *vcpu_pc(vcpu) += 4;
-       }
-
-       write_sysreg_el2(*vcpu_pc(vcpu), elr);
-
-       if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
-               vcpu->arch.fault.esr_el2 =
-                       (ESR_ELx_EC_SOFTSTP_LOW << ESR_ELx_EC_SHIFT) | 0x22;
-               return false;
-       } else {
-               return true;
-       }
-}
-
 static bool __hyp_text __hyp_switch_fpsimd(struct kvm_vcpu *vcpu)
 {
        struct user_fpsimd_state *host_fpsimd = vcpu->arch.host_fpsimd_state;
@@ -420,20 +393,12 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
                if (valid) {
                        int ret = __vgic_v2_perform_cpuif_access(vcpu);
 
-                       if (ret ==  1 && __skip_instr(vcpu))
+                       if (ret == 1)
                                return true;
 
-                       if (ret == -1) {
-                               /* Promote an illegal access to an
-                                * SError. If we would be returning
-                                * due to single-step clear the SS
-                                * bit so handle_exit knows what to
-                                * do after dealing with the error.
-                                */
-                               if (!__skip_instr(vcpu))
-                                       *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
+                       /* Promote an illegal access to an SError.*/
+                       if (ret == -1)
                                *exit_code = ARM_EXCEPTION_EL1_SERROR;
-                       }
 
                        goto exit;
                }
@@ -444,7 +409,7 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
             kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_CP15_32)) {
                int ret = __vgic_v3_perform_cpuif_access(vcpu);
 
-               if (ret == 1 && __skip_instr(vcpu))
+               if (ret == 1)
                        return true;
        }
 
index 215c7c0eb3b00c8fb165dea0d1aebc5220780a93..9cbdd034a5637da0a3845ffa25a9b88a9b08544e 100644 (file)
@@ -41,7 +41,7 @@ static bool __hyp_text __is_be(struct kvm_vcpu *vcpu)
  * Returns:
  *  1: GICV access successfully performed
  *  0: Not a GICV access
- * -1: Illegal GICV access
+ * -1: Illegal GICV access successfully performed
  */
 int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
 {
@@ -61,12 +61,16 @@ int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
                return 0;
 
        /* Reject anything but a 32bit access */
-       if (kvm_vcpu_dabt_get_as(vcpu) != sizeof(u32))
+       if (kvm_vcpu_dabt_get_as(vcpu) != sizeof(u32)) {
+               __kvm_skip_instr(vcpu);
                return -1;
+       }
 
        /* Not aligned? Don't bother */
-       if (fault_ipa & 3)
+       if (fault_ipa & 3) {
+               __kvm_skip_instr(vcpu);
                return -1;
+       }
 
        rd = kvm_vcpu_dabt_get_rd(vcpu);
        addr  = hyp_symbol_addr(kvm_vgic_global_state)->vcpu_hyp_va;
@@ -88,5 +92,7 @@ int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
                vcpu_set_reg(vcpu, rd, data);
        }
 
+       __kvm_skip_instr(vcpu);
+
        return 1;
 }
index 23774970c9df66fb771210df9374bf10c0abbffa..4adcee5fc12619bfbd372577cb9c6d5468386f67 100644 (file)
@@ -674,8 +674,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
                ret = kvm_handle_mmio_return(vcpu, vcpu->run);
                if (ret)
                        return ret;
-               if (kvm_arm_handle_step_debug(vcpu, vcpu->run))
-                       return 0;
        }
 
        if (run->immediate_exit)
index 616e5a433ab0f76adfb0a12e0f9f436912352f38..9652c453480f55eda8b76515aa5501d11674fd1d 100644 (file)
@@ -1012,8 +1012,10 @@ int __hyp_text __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
 
        esr = kvm_vcpu_get_hsr(vcpu);
        if (vcpu_mode_is_32bit(vcpu)) {
-               if (!kvm_condition_valid(vcpu))
+               if (!kvm_condition_valid(vcpu)) {
+                       __kvm_skip_instr(vcpu);
                        return 1;
+               }
 
                sysreg = esr_cp15_to_sysreg(esr);
        } else {
@@ -1123,6 +1125,8 @@ int __hyp_text __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
        rt = kvm_vcpu_sys_get_rt(vcpu);
        fn(vcpu, vmcr, rt);
 
+       __kvm_skip_instr(vcpu);
+
        return 1;
 }