KVM: x86: hyperv: keep track of mismatched VP indexes
authorVitaly Kuznetsov <vkuznets@redhat.com>
Wed, 26 Sep 2018 17:02:56 +0000 (19:02 +0200)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 16 Oct 2018 22:29:45 +0000 (00:29 +0200)
In most common cases VP index of a vcpu matches its vcpu index. Userspace
is, however, free to set any mapping it wishes and we need to account for
that when we need to find a vCPU with a particular VP index. To keep search
algorithms optimal in both cases introduce 'num_mismatched_vp_indexes'
counter showing how many vCPUs with mismatching VP index we have. In case
the counter is zero we can assume vp_index == vcpu_idx.

Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Reviewed-by: Roman Kagan <rkagan@virtuozzo.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/hyperv.c

index ed4d7848ebf27a80f609fd1f7276d307c9a211f1..d81f536d024a4176345bb47f228e293f29c4a743 100644 (file)
@@ -790,6 +790,9 @@ struct kvm_hv {
        u64 hv_reenlightenment_control;
        u64 hv_tsc_emulation_control;
        u64 hv_tsc_emulation_status;
+
+       /* How many vCPUs have VP index != vCPU index */
+       atomic_t num_mismatched_vp_indexes;
 };
 
 enum kvm_irqchip_mode {
index c8764faf783bab544971f002dbdab15a0d5b48c7..f94dedd7ae6d95c1c7d0015e201e082d20675024 100644 (file)
@@ -1045,11 +1045,31 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
        struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
 
        switch (msr) {
-       case HV_X64_MSR_VP_INDEX:
-               if (!host || (u32)data >= KVM_MAX_VCPUS)
+       case HV_X64_MSR_VP_INDEX: {
+               struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
+               int vcpu_idx = kvm_vcpu_get_idx(vcpu);
+               u32 new_vp_index = (u32)data;
+
+               if (!host || new_vp_index >= KVM_MAX_VCPUS)
                        return 1;
-               hv_vcpu->vp_index = (u32)data;
+
+               if (new_vp_index == hv_vcpu->vp_index)
+                       return 0;
+
+               /*
+                * The VP index is initialized to vcpu_index by
+                * kvm_hv_vcpu_postcreate so they initially match.  Now the
+                * VP index is changing, adjust num_mismatched_vp_indexes if
+                * it now matches or no longer matches vcpu_idx.
+                */
+               if (hv_vcpu->vp_index == vcpu_idx)
+                       atomic_inc(&hv->num_mismatched_vp_indexes);
+               else if (new_vp_index == vcpu_idx)
+                       atomic_dec(&hv->num_mismatched_vp_indexes);
+
+               hv_vcpu->vp_index = new_vp_index;
                break;
+       }
        case HV_X64_MSR_VP_ASSIST_PAGE: {
                u64 gfn;
                unsigned long addr;