KVM: x86: avoid incorrect writes to host MSR_IA32_SPEC_CTRL
authorPaolo Bonzini <pbonzini@redhat.com>
Mon, 20 Jan 2020 15:33:06 +0000 (16:33 +0100)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 24 Jan 2020 08:18:47 +0000 (09:18 +0100)
If the guest is configured to have SPEC_CTRL but the host does not
(which is a nonsensical configuration but these are not explicitly
forbidden) then a host-initiated MSR write can write vmx->spec_ctrl
(respectively svm->spec_ctrl) and trigger a #GP when KVM tries to
restore the host value of the MSR.  Add a more comprehensive check
for valid bits of SPEC_CTRL, covering host CPUID flags and,
since we are at it and it is more correct that way, guest CPUID
flags too.

For AMD, remove the unnecessary is_guest_mode check around setting
the MSR interception bitmap, so that the code looks the same as
for Intel.

Cc: Jim Mattson <jmattson@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/svm.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h

index b7c5369c79989029bb2d20d9a8b0daa328578c11..235a7e51de96272e271261ce8bf88289312a2a69 100644 (file)
@@ -4324,12 +4324,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
                    !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
                        return 1;
 
-               /* The STIBP bit doesn't fault even if it's not advertised */
-               if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
+               if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
                        return 1;
 
                svm->spec_ctrl = data;
-
                if (!data)
                        break;
 
@@ -4353,13 +4351,12 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
 
                if (data & ~PRED_CMD_IBPB)
                        return 1;
-
+               if (!boot_cpu_has(X86_FEATURE_AMD_IBPB))
+                       return 1;
                if (!data)
                        break;
 
                wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
-               if (is_guest_mode(vcpu))
-                       break;
                set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
                break;
        case MSR_AMD64_VIRT_SPEC_CTRL:
index bdbf27e92851d0956be24408f9c11ab07748815e..112d2314231df7872ddd590bf83b06902f35c8e9 100644 (file)
@@ -1998,12 +1998,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                    !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
                        return 1;
 
-               /* The STIBP bit doesn't fault even if it's not advertised */
-               if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
+               if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
                        return 1;
 
                vmx->spec_ctrl = data;
-
                if (!data)
                        break;
 
@@ -2037,7 +2035,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 
                if (data & ~PRED_CMD_IBPB)
                        return 1;
-
+               if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL))
+                       return 1;
                if (!data)
                        break;
 
index 9f24f5d168545cb508f75e6aa51ab77beec12125..b690c0d707931495ee86c801c23c40781f98af96 100644 (file)
@@ -10389,6 +10389,28 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(kvm_arch_no_poll);
 
+u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu)
+{
+       uint64_t bits = SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD;
+
+       /* The STIBP bit doesn't fault even if it's not advertised */
+       if (!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
+           !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
+               bits &= ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP);
+       if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL) &&
+           !boot_cpu_has(X86_FEATURE_AMD_IBRS))
+               bits &= ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP);
+
+       if (!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL_SSBD) &&
+           !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
+               bits &= ~SPEC_CTRL_SSBD;
+       if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
+           !boot_cpu_has(X86_FEATURE_AMD_SSBD))
+               bits &= ~SPEC_CTRL_SSBD;
+
+       return bits;
+}
+EXPORT_SYMBOL_GPL(kvm_spec_ctrl_valid_bits);
 
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
index ab715cee36533432085bbf8b9d38ba9c06b3bd67..dd6e34d0a8811034a2ae219b2202c9c97a584108 100644 (file)
@@ -367,5 +367,6 @@ static inline bool kvm_pat_valid(u64 data)
 
 void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
 void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
+u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu);
 
 #endif