KVM: Fix leak vCPU's VMCS value into other pCPU
authorWanpeng Li <wanpengli@tencent.com>
Mon, 5 Aug 2019 02:03:19 +0000 (10:03 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 5 Aug 2019 10:55:47 +0000 (12:55 +0200)
After commit d73eb57b80b (KVM: Boost vCPUs that are delivering interrupts), a
five years old bug is exposed. Running ebizzy benchmark in three 80 vCPUs VMs
on one 80 pCPUs Skylake server, a lot of rcu_sched stall warning splatting
in the VMs after stress testing:

 INFO: rcu_sched detected stalls on CPUs/tasks: { 4 41 57 62 77} (detected by 15, t=60004 jiffies, g=899, c=898, q=15073)
 Call Trace:
   flush_tlb_mm_range+0x68/0x140
   tlb_flush_mmu.part.75+0x37/0xe0
   tlb_finish_mmu+0x55/0x60
   zap_page_range+0x142/0x190
   SyS_madvise+0x3cd/0x9c0
   system_call_fastpath+0x1c/0x21

swait_active() sustains to be true before finish_swait() is called in
kvm_vcpu_block(), voluntarily preempted vCPUs are taken into account
by kvm_vcpu_on_spin() loop greatly increases the probability condition
kvm_arch_vcpu_runnable(vcpu) is checked and can be true, when APICv
is enabled the yield-candidate vCPU's VMCS RVI field leaks(by
vmx_sync_pir_to_irr()) into spinning-on-a-taken-lock vCPU's current
VMCS.

This patch fixes it by checking conservatively a subset of events.

Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Marc Zyngier <Marc.Zyngier@arm.com>
Cc: stable@vger.kernel.org
Fixes: 98f4a1467 (KVM: add kvm_arch_vcpu_runnable() test to kvm_vcpu_on_spin() loop)
Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/powerpc/kvm/powerpc.c
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/svm.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
include/linux/kvm_host.h
virt/kvm/kvm_main.c

index 0dba7eb24f92072616460ae7cee6f90205025917..3e34d5fa6708a2ffb44080f915b4ecab351447c6 100644 (file)
@@ -50,6 +50,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
        return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
 }
 
+bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
+{
+       return kvm_arch_vcpu_runnable(vcpu);
+}
+
 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
 {
        return false;
index e74f0711eaaf34e2bef60e5225be23ebbd1f5e08..fc046ca89d32014b23bdfea1a016788c2283a420 100644 (file)
@@ -1175,6 +1175,7 @@ struct kvm_x86_ops {
        int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
                              uint32_t guest_irq, bool set);
        void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
+       bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
 
        int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
                            bool *expired);
index 7eafc6907861e6d0f6b9ccfac1b2b6071238cd82..d685491fce4d1091f0b0bf0382cfa5d0fe77796e 100644 (file)
@@ -5190,6 +5190,11 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
                kvm_vcpu_wake_up(vcpu);
 }
 
+static bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
+{
+       return false;
+}
+
 static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
 {
        unsigned long flags;
@@ -7314,6 +7319,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
 
        .pmu_ops = &amd_pmu_ops,
        .deliver_posted_interrupt = svm_deliver_avic_intr,
+       .dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt,
        .update_pi_irte = svm_update_pi_irte,
        .setup_mce = svm_setup_mce,
 
index 074385c86c094be7c79606392dd840118331b9ef..42ed3faa6af8364733b4900895e4b503934538ec 100644 (file)
@@ -6117,6 +6117,11 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
        return max_irr;
 }
 
+static bool vmx_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
+{
+       return pi_test_on(vcpu_to_pi_desc(vcpu));
+}
+
 static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
 {
        if (!kvm_vcpu_apicv_active(vcpu))
@@ -7726,6 +7731,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
        .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
        .sync_pir_to_irr = vmx_sync_pir_to_irr,
        .deliver_posted_interrupt = vmx_deliver_posted_interrupt,
+       .dy_apicv_has_pending_interrupt = vmx_dy_apicv_has_pending_interrupt,
 
        .set_tss_addr = vmx_set_tss_addr,
        .set_identity_map_addr = vmx_set_identity_map_addr,
index c6d951cbd76ce794866495b8658b37c751421f7e..93b0bd45ac738f6fefea3c7a834758d2e5d91340 100644 (file)
@@ -9698,6 +9698,22 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
        return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
 }
 
+bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
+{
+       if (READ_ONCE(vcpu->arch.pv.pv_unhalted))
+               return true;
+
+       if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
+               kvm_test_request(KVM_REQ_SMI, vcpu) ||
+                kvm_test_request(KVM_REQ_EVENT, vcpu))
+               return true;
+
+       if (vcpu->arch.apicv_active && kvm_x86_ops->dy_apicv_has_pending_interrupt(vcpu))
+               return true;
+
+       return false;
+}
+
 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
 {
        return vcpu->arch.preempted_in_kernel;
index 5c5b5867024cd80b69b96f29721faa6871c64dc5..9e4c2bb90297fe84019a4f71ff5ccf5975fa2fc1 100644 (file)
@@ -872,6 +872,7 @@ int kvm_arch_check_processor_compat(void);
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
+bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
 
 #ifndef __KVM_HAVE_ARCH_VM_ALLOC
 /*
index ed061d8a457c8b84f64b14cc55faabaa044f6e7c..1f05aeb9da276cb66e20a9376cfca5446951b771 100644 (file)
@@ -2477,6 +2477,29 @@ static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
 #endif
 }
 
+/*
+ * Unlike kvm_arch_vcpu_runnable, this function is called outside
+ * a vcpu_load/vcpu_put pair.  However, for most architectures
+ * kvm_arch_vcpu_runnable does not require vcpu_load.
+ */
+bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
+{
+       return kvm_arch_vcpu_runnable(vcpu);
+}
+
+static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
+{
+       if (kvm_arch_dy_runnable(vcpu))
+               return true;
+
+#ifdef CONFIG_KVM_ASYNC_PF
+       if (!list_empty_careful(&vcpu->async_pf.done))
+               return true;
+#endif
+
+       return false;
+}
+
 void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
 {
        struct kvm *kvm = me->kvm;
@@ -2506,7 +2529,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
                                continue;
                        if (vcpu == me)
                                continue;
-                       if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
+                       if (swait_active(&vcpu->wq) && !vcpu_dy_runnable(vcpu))
                                continue;
                        if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode &&
                                !kvm_arch_vcpu_in_kernel(vcpu))