X86/KVM: Decrypt shared per-cpu variables when SEV is active
authorBrijesh Singh <brijesh.singh@amd.com>
Fri, 20 Oct 2017 14:30:58 +0000 (09:30 -0500)
committerThomas Gleixner <tglx@linutronix.de>
Tue, 7 Nov 2017 14:36:00 +0000 (15:36 +0100)
When SEV is active, guest memory is encrypted with a guest-specific key, a
guest memory region shared with the hypervisor must be mapped as decrypted
before it can be shared.

Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Borislav Petkov <bp@suse.de>
Tested-by: Borislav Petkov <bp@suse.de>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: kvm@vger.kernel.org
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Link: https://lkml.kernel.org/r/20171020143059.3291-17-brijesh.singh@amd.com
arch/x86/kernel/kvm.c

index 8bb9594d076166ee0f6bd4f70350fe3ecf8c3d8b..3df743b60c809cc680a8f3da5bf84605b0d94d44 100644 (file)
@@ -75,8 +75,8 @@ static int parse_no_kvmclock_vsyscall(char *arg)
 
 early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
 
-static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
-static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
+static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
+static DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64);
 static int has_steal_clock = 0;
 
 /*
@@ -312,7 +312,7 @@ static void kvm_register_steal_time(void)
                cpu, (unsigned long long) slow_virt_to_phys(st));
 }
 
-static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
+static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
 
 static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
 {
@@ -426,9 +426,42 @@ void kvm_disable_steal_time(void)
        wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
 }
 
+static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
+{
+       early_set_memory_decrypted((unsigned long) ptr, size);
+}
+
+/*
+ * Iterate through all possible CPUs and map the memory region pointed
+ * by apf_reason, steal_time and kvm_apic_eoi as decrypted at once.
+ *
+ * Note: we iterate through all possible CPUs to ensure that CPUs
+ * hotplugged will have their per-cpu variable already mapped as
+ * decrypted.
+ */
+static void __init sev_map_percpu_data(void)
+{
+       int cpu;
+
+       if (!sev_active())
+               return;
+
+       for_each_possible_cpu(cpu) {
+               __set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason));
+               __set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time));
+               __set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi));
+       }
+}
+
 #ifdef CONFIG_SMP
 static void __init kvm_smp_prepare_boot_cpu(void)
 {
+       /*
+        * Map the per-cpu variables as decrypted before kvm_guest_cpu_init()
+        * shares the guest physical address with the hypervisor.
+        */
+       sev_map_percpu_data();
+
        kvm_guest_cpu_init();
        native_smp_prepare_boot_cpu();
        kvm_spinlock_init();
@@ -496,6 +529,7 @@ void __init kvm_guest_init(void)
                                      kvm_cpu_online, kvm_cpu_down_prepare) < 0)
                pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n");
 #else
+       sev_map_percpu_data();
        kvm_guest_cpu_init();
 #endif