bool (*cpu_has_high_real_mode_segbase)(void);
void (*cpuid_update)(struct kvm_vcpu *vcpu);
+ struct kvm *(*vm_alloc)(void);
+ void (*vm_free)(struct kvm *);
int (*vm_init)(struct kvm *kvm);
void (*vm_destroy)(struct kvm *kvm);
extern struct kvm_x86_ops *kvm_x86_ops;
+#define __KVM_HAVE_ARCH_VM_ALLOC
+static inline struct kvm *kvm_arch_alloc_vm(void)
+{
+ return kvm_x86_ops->vm_alloc();
+}
+
+static inline void kvm_arch_free_vm(struct kvm *kvm)
+{
+ return kvm_x86_ops->vm_free(kvm);
+}
+
int kvm_mmu_module_init(void);
void kvm_mmu_module_exit(void);
kfree(region);
}
+static struct kvm *svm_vm_alloc(void)
+{
+ return kzalloc(sizeof(struct kvm), GFP_KERNEL);
+}
+
+static void svm_vm_free(struct kvm *kvm)
+{
+ kfree(kvm);
+}
+
static void sev_vm_destroy(struct kvm *kvm)
{
struct kvm_sev_info *sev = &kvm->arch.sev_info;
.vcpu_free = svm_free_vcpu,
.vcpu_reset = svm_vcpu_reset,
+ .vm_alloc = svm_vm_alloc,
+ .vm_free = svm_vm_free,
.vm_init = avic_vm_init,
.vm_destroy = svm_vm_destroy,
}
STACK_FRAME_NON_STANDARD(vmx_vcpu_run);
+static struct kvm *vmx_vm_alloc(void)
+{
+ return kzalloc(sizeof(struct kvm), GFP_KERNEL);
+}
+
+static void vmx_vm_free(struct kvm *kvm)
+{
+ kfree(kvm);
+}
+
static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
.cpu_has_high_real_mode_segbase = vmx_has_high_real_mode_segbase,
.vm_init = vmx_vm_init,
+ .vm_alloc = vmx_vm_alloc,
+ .vm_free = vmx_vm_free,
.vcpu_create = vmx_create_vcpu,
.vcpu_free = vmx_free_vcpu,