x86: Remove redundant K6 MSRs
authorBrian Gerst <brgerst@gmail.com>
Sat, 17 Jul 2010 13:03:26 +0000 (09:03 -0400)
committerH. Peter Anvin <hpa@zytor.com>
Thu, 22 Jul 2010 04:23:05 +0000 (21:23 -0700)
MSR_K6_EFER is unused, and MSR_K6_STAR is redundant with MSR_STAR.

Signed-off-by: Brian Gerst <brgerst@gmail.com>
LKML-Reference: <1279371808-24804-1-git-send-email-brgerst@gmail.com>
Reviewed-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
arch/x86/include/asm/msr-index.h
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c

index 8c7ae4318629445f16b588e9535b0dc2a105836c..6068e0e06e00dfd22cd2718a0714100e61fc8b01 100644 (file)
 #define MSR_K7_FID_VID_STATUS          0xc0010042
 
 /* K6 MSRs */
-#define MSR_K6_EFER                    0xc0000080
-#define MSR_K6_STAR                    0xc0000081
 #define MSR_K6_WHCR                    0xc0000082
 #define MSR_K6_UWCCR                   0xc0000085
 #define MSR_K6_EPMR                    0xc0000086
index ce438e0fdd268f394995d604070b8b2aad783e4d..24a2206962986dcb82326c4c810281a120831acd 100644 (file)
@@ -130,7 +130,7 @@ static struct svm_direct_access_msrs {
        u32 index;   /* Index of the MSR */
        bool always; /* True if intercept is always on */
 } direct_access_msrs[] = {
-       { .index = MSR_K6_STAR,                         .always = true  },
+       { .index = MSR_STAR,                            .always = true  },
        { .index = MSR_IA32_SYSENTER_CS,                .always = true  },
 #ifdef CONFIG_X86_64
        { .index = MSR_GS_BASE,                         .always = true  },
@@ -2431,7 +2431,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
                *data = tsc_offset + native_read_tsc();
                break;
        }
-       case MSR_K6_STAR:
+       case MSR_STAR:
                *data = svm->vmcb->save.star;
                break;
 #ifdef CONFIG_X86_64
@@ -2555,7 +2555,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
 
                break;
        }
-       case MSR_K6_STAR:
+       case MSR_STAR:
                svm->vmcb->save.star = data;
                break;
 #ifdef CONFIG_X86_64
index ee03679efe788d61e73d76f818931d8c1a45fad2..b42ad25d56479840082243b8d4e6d6f688f6f84e 100644 (file)
@@ -231,14 +231,14 @@ static u64 host_efer;
 static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
 
 /*
- * Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it
+ * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it
  * away by decrementing the array size.
  */
 static const u32 vmx_msr_index[] = {
 #ifdef CONFIG_X86_64
        MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
 #endif
-       MSR_EFER, MSR_TSC_AUX, MSR_K6_STAR,
+       MSR_EFER, MSR_TSC_AUX, MSR_STAR,
 };
 #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
 
@@ -1057,10 +1057,10 @@ static void setup_msrs(struct vcpu_vmx *vmx)
                if (index >= 0 && vmx->rdtscp_enabled)
                        move_msr_up(vmx, index, save_nmsrs++);
                /*
-                * MSR_K6_STAR is only needed on long mode guests, and only
+                * MSR_STAR is only needed on long mode guests, and only
                 * if efer.sce is enabled.
                 */
-               index = __find_msr_index(vmx, MSR_K6_STAR);
+               index = __find_msr_index(vmx, MSR_STAR);
                if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
                        move_msr_up(vmx, index, save_nmsrs++);
        }
index 05d571f6f19615cab8a90dec413e0a39537e914a..6127468ebbd25e27b2f7f86e175060272242b970 100644 (file)
@@ -671,7 +671,7 @@ static u32 msrs_to_save[] = {
        HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
        HV_X64_MSR_APIC_ASSIST_PAGE,
        MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
-       MSR_K6_STAR,
+       MSR_STAR,
 #ifdef CONFIG_X86_64
        MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
 #endif