KVM: VMX: enable guest access to LMCE related MSRs
authorAshok Raj <ashok.raj@intel.com>
Wed, 22 Jun 2016 06:59:56 +0000 (14:59 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 23 Jun 2016 17:17:29 +0000 (19:17 +0200)
On Intel platforms, this patch adds LMCE to KVM MCE supported
capabilities and handles guest access to LMCE related MSRs.

Signed-off-by: Ashok Raj <ashok.raj@intel.com>
[Haozhong: macro KVM_MCE_CAP_SUPPORTED => variable kvm_mce_cap_supported
           Only enable LMCE on Intel platform
           Check MSR_IA32_FEATURE_CONTROL when handling guest
             access to MSR_IA32_MCG_EXT_CTL]
Signed-off-by: Haozhong Zhang <haozhong.zhang@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c

index 360c5171ea1a89412d0c308f5c16bfbe36e39e0e..7a628fb6a2c2b072d624f156a4f5ad9a98a76b4d 100644 (file)
@@ -598,6 +598,7 @@ struct kvm_vcpu_arch {
        u64 mcg_cap;
        u64 mcg_status;
        u64 mcg_ctl;
+       u64 mcg_ext_ctl;
        u64 *mce_banks;
 
        /* Cache MMIO info */
@@ -1008,6 +1009,8 @@ struct kvm_x86_ops {
 
        int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc);
        void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
+
+       void (*setup_mce)(struct kvm_vcpu *vcpu);
 };
 
 struct kvm_arch_async_pf {
@@ -1082,6 +1085,8 @@ extern u64  kvm_max_tsc_scaling_ratio;
 /* 1ull << kvm_tsc_scaling_ratio_frac_bits */
 extern u64  kvm_default_tsc_scaling_ratio;
 
+extern u64 kvm_mce_cap_supported;
+
 enum emulation_result {
        EMULATE_DONE,         /* no further processing */
        EMULATE_USER_EXIT,    /* kvm_run ready for userspace exit */
index 0a3ccb073bb4523955362425a8bf7f192662ea8b..943609f06c90c0da824bffb5e57bc3c5b63f914c 100644 (file)
@@ -2984,6 +2984,13 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                        return 1;
                msr_info->data = vmcs_read64(GUEST_BNDCFGS);
                break;
+       case MSR_IA32_MCG_EXT_CTL:
+               if (!msr_info->host_initiated &&
+                   !(to_vmx(vcpu)->msr_ia32_feature_control &
+                     FEATURE_CONTROL_LMCE))
+                       return 1;
+               msr_info->data = vcpu->arch.mcg_ext_ctl;
+               break;
        case MSR_IA32_FEATURE_CONTROL:
                msr_info->data = to_vmx(vcpu)->msr_ia32_feature_control;
                break;
@@ -3075,6 +3082,14 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case MSR_IA32_TSC_ADJUST:
                ret = kvm_set_msr_common(vcpu, msr_info);
                break;
+       case MSR_IA32_MCG_EXT_CTL:
+               if ((!msr_info->host_initiated &&
+                    !(to_vmx(vcpu)->msr_ia32_feature_control &
+                      FEATURE_CONTROL_LMCE)) ||
+                   (data & ~MCG_EXT_CTL_LMCE_EN))
+                       return 1;
+               vcpu->arch.mcg_ext_ctl = data;
+               break;
        case MSR_IA32_FEATURE_CONTROL:
                if (!vmx_feature_control_msr_valid(vcpu, data) ||
                    (to_vmx(vcpu)->msr_ia32_feature_control &
@@ -6484,6 +6499,8 @@ static __init int hardware_setup(void)
 
        kvm_set_posted_intr_wakeup_handler(wakeup_handler);
 
+       kvm_mce_cap_supported |= MCG_LMCE_P;
+
        return alloc_kvm_area();
 
 out8:
@@ -11109,6 +11126,16 @@ out:
        return ret;
 }
 
+static void vmx_setup_mce(struct kvm_vcpu *vcpu)
+{
+       if (vcpu->arch.mcg_cap & MCG_LMCE_P)
+               to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |=
+                       FEATURE_CONTROL_LMCE;
+       else
+               to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
+                       ~FEATURE_CONTROL_LMCE;
+}
+
 static struct kvm_x86_ops vmx_x86_ops = {
        .cpu_has_kvm_support = cpu_has_kvm_support,
        .disabled_by_bios = vmx_disabled_by_bios,
@@ -11238,6 +11265,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
        .set_hv_timer = vmx_set_hv_timer,
        .cancel_hv_timer = vmx_cancel_hv_timer,
 #endif
+
+       .setup_mce = vmx_setup_mce,
 };
 
 static int __init vmx_init(void)
index 299219630c9470e5e87c04553834b250b7d44755..0a42fc729ff39c1c908af175df1192f2aaa9c106 100644 (file)
@@ -70,7 +70,8 @@
 
 #define MAX_IO_MSRS 256
 #define KVM_MAX_MCE_BANKS 32
-#define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P)
+u64 __read_mostly kvm_mce_cap_supported = MCG_CTL_P | MCG_SER_P;
+EXPORT_SYMBOL_GPL(kvm_mce_cap_supported);
 
 #define emul_to_vcpu(ctxt) \
        container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt)
@@ -984,6 +985,7 @@ static u32 emulated_msrs[] = {
        MSR_IA32_MISC_ENABLE,
        MSR_IA32_MCG_STATUS,
        MSR_IA32_MCG_CTL,
+       MSR_IA32_MCG_EXT_CTL,
        MSR_IA32_SMBASE,
 };
 
@@ -2685,11 +2687,9 @@ long kvm_arch_dev_ioctl(struct file *filp,
                break;
        }
        case KVM_X86_GET_MCE_CAP_SUPPORTED: {
-               u64 mce_cap;
-
-               mce_cap = KVM_MCE_CAP_SUPPORTED;
                r = -EFAULT;
-               if (copy_to_user(argp, &mce_cap, sizeof mce_cap))
+               if (copy_to_user(argp, &kvm_mce_cap_supported,
+                                sizeof(kvm_mce_cap_supported)))
                        goto out;
                r = 0;
                break;
@@ -2872,7 +2872,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
        r = -EINVAL;
        if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
                goto out;
-       if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
+       if (mcg_cap & ~(kvm_mce_cap_supported | 0xff | 0xff0000))
                goto out;
        r = 0;
        vcpu->arch.mcg_cap = mcg_cap;
@@ -2882,6 +2882,9 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
        /* Init IA32_MCi_CTL to all 1s */
        for (bank = 0; bank < bank_num; bank++)
                vcpu->arch.mce_banks[bank*4] = ~(u64)0;
+
+       if (kvm_x86_ops->setup_mce)
+               kvm_x86_ops->setup_mce(vcpu);
 out:
        return r;
 }