IA64 support forces us to abstract the allocation of the kvm structure.
But instead of mixing this up with arch-specific initialization and
doing the same on destruction, split both steps. This allows to move
generic destruction calls into generic code.
It also fixes error clean-up on failures of kvm_create_vm for IA64.
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
void kvm_sal_emul(struct kvm_vcpu *vcpu);
+#define __KVM_HAVE_ARCH_VM_ALLOC 1
+struct kvm *kvm_arch_alloc_vm(void);
+void kvm_arch_free_vm(struct kvm *kvm);
+
#endif /* __ASSEMBLY__*/
#endif
return r;
}
-static struct kvm *kvm_alloc_kvm(void)
+struct kvm *kvm_arch_alloc_vm(void)
{
struct kvm *kvm;
vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE));
if (!vm_base)
- return ERR_PTR(-ENOMEM);
+ return NULL;
memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
kvm = (struct kvm *)(vm_base +
#define GUEST_PHYSICAL_RR4 0x2739
#define VMM_INIT_RR 0x1660
-static void kvm_init_vm(struct kvm *kvm)
+int kvm_arch_init_vm(struct kvm *kvm)
{
BUG_ON(!kvm);
+ kvm->arch.is_sn2 = ia64_platform_is("sn2");
+
kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0;
kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4;
kvm->arch.vmm_init_rr = VMM_INIT_RR;
/* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
-}
-
-struct kvm *kvm_arch_create_vm(void)
-{
- struct kvm *kvm = kvm_alloc_kvm();
-
- if (IS_ERR(kvm))
- return ERR_PTR(-ENOMEM);
-
- kvm->arch.is_sn2 = ia64_platform_is("sn2");
-
- kvm_init_vm(kvm);
-
- return kvm;
+ return 0;
}
static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm,
return -EINVAL;
}
-static void free_kvm(struct kvm *kvm)
+void kvm_arch_free_vm(struct kvm *kvm)
{
unsigned long vm_base = kvm->arch.vm_base;
#endif
kfree(kvm->arch.vioapic);
kvm_release_vm_pages(kvm);
- kvm_free_physmem(kvm);
- cleanup_srcu_struct(&kvm->srcu);
- free_kvm(kvm);
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
*(int *)rtn = kvmppc_core_check_processor_compat();
}
-struct kvm *kvm_arch_create_vm(void)
+int kvm_arch_init_vm(struct kvm *kvm)
{
- struct kvm *kvm;
-
- kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
- if (!kvm)
- return ERR_PTR(-ENOMEM);
-
- return kvm;
+ return 0;
}
-static void kvmppc_free_vcpus(struct kvm *kvm)
+void kvm_arch_destroy_vm(struct kvm *kvm)
{
unsigned int i;
struct kvm_vcpu *vcpu;
{
}
-void kvm_arch_destroy_vm(struct kvm *kvm)
-{
- kvmppc_free_vcpus(kvm);
- kvm_free_physmem(kvm);
- cleanup_srcu_struct(&kvm->srcu);
- kfree(kvm);
-}
-
int kvm_dev_ioctl_check_extension(long ext)
{
int r;
return r;
}
-struct kvm *kvm_arch_create_vm(void)
+int kvm_arch_init_vm(struct kvm *kvm)
{
- struct kvm *kvm;
int rc;
char debug_name[16];
rc = s390_enable_sie();
if (rc)
- goto out_nokvm;
-
- rc = -ENOMEM;
- kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
- if (!kvm)
- goto out_nokvm;
+ goto out_err;
kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
if (!kvm->arch.sca)
- goto out_nosca;
+ goto out_err;
sprintf(debug_name, "kvm-%u", current->pid);
debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
VM_EVENT(kvm, 3, "%s", "vm created");
- return kvm;
+ return 0;
out_nodbf:
free_page((unsigned long)(kvm->arch.sca));
-out_nosca:
- kfree(kvm);
-out_nokvm:
- return ERR_PTR(rc);
+out_err:
+ return rc;
}
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
void kvm_arch_destroy_vm(struct kvm *kvm)
{
kvm_free_vcpus(kvm);
- kvm_free_physmem(kvm);
free_page((unsigned long)(kvm->arch.sca));
debug_unregister(kvm->arch.dbf);
- cleanup_srcu_struct(&kvm->srcu);
- kfree(kvm);
}
/* Section: vcpu related */
free_page((unsigned long)vcpu->arch.pio_data);
}
-struct kvm *kvm_arch_create_vm(void)
+int kvm_arch_init_vm(struct kvm *kvm)
{
- struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
-
- if (!kvm)
- return ERR_PTR(-ENOMEM);
-
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
spin_lock_init(&kvm->arch.tsc_write_lock);
- return kvm;
+ return 0;
}
static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
kfree(kvm->arch.vpic);
kfree(kvm->arch.vioapic);
kvm_free_vcpus(kvm);
- kvm_free_physmem(kvm);
if (kvm->arch.apic_access_page)
put_page(kvm->arch.apic_access_page);
if (kvm->arch.ept_identity_pagetable)
put_page(kvm->arch.ept_identity_pagetable);
- cleanup_srcu_struct(&kvm->srcu);
- kfree(kvm);
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
#include <linux/mm.h>
#include <linux/preempt.h>
#include <linux/msi.h>
+#include <linux/slab.h>
#include <asm/signal.h>
#include <linux/kvm.h>
void kvm_free_physmem(struct kvm *kvm);
-struct kvm *kvm_arch_create_vm(void);
+#ifndef __KVM_HAVE_ARCH_VM_ALLOC
+static inline struct kvm *kvm_arch_alloc_vm(void)
+{
+ return kzalloc(sizeof(struct kvm), GFP_KERNEL);
+}
+
+static inline void kvm_arch_free_vm(struct kvm *kvm)
+{
+ kfree(kvm);
+}
+#endif
+
+int kvm_arch_init_vm(struct kvm *kvm);
void kvm_arch_destroy_vm(struct kvm *kvm);
void kvm_free_all_assigned_devices(struct kvm *kvm);
void kvm_arch_sync_events(struct kvm *kvm);
static struct kvm *kvm_create_vm(void)
{
- int r = 0, i;
- struct kvm *kvm = kvm_arch_create_vm();
+ int r, i;
+ struct kvm *kvm = kvm_arch_alloc_vm();
- if (IS_ERR(kvm))
- goto out;
+ if (!kvm)
+ return ERR_PTR(-ENOMEM);
+
+ r = kvm_arch_init_vm(kvm);
+ if (r)
+ goto out_err_nodisable;
r = hardware_enable_all();
if (r)
spin_lock(&kvm_lock);
list_add(&kvm->vm_list, &vm_list);
spin_unlock(&kvm_lock);
-out:
+
return kvm;
out_err:
for (i = 0; i < KVM_NR_BUSES; i++)
kfree(kvm->buses[i]);
kfree(kvm->memslots);
- kfree(kvm);
+ kvm_arch_free_vm(kvm);
return ERR_PTR(r);
}
kvm_arch_flush_shadow(kvm);
#endif
kvm_arch_destroy_vm(kvm);
+ kvm_free_physmem(kvm);
+ cleanup_srcu_struct(&kvm->srcu);
+ kvm_arch_free_vm(kvm);
hardware_disable_all();
mmdrop(mm);
}