Split kvm_arch_vcpu_create() into kvm_arch_vcpu_create() and
kvm_arch_vcpu_setup(), enabling preemption notification between the two.
This mean that we can now do vcpu_load() within kvm_arch_vcpu_setup().
Signed-off-by: Avi Kivity <avi@qumranet.com>
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
+int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
+ r = kvm_arch_vcpu_setup(vcpu);
+ if (r)
+ goto vcpu_destroy;
+
mutex_lock(&kvm->lock);
if (kvm->vcpus[n]) {
r = -EEXIST;
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
unsigned int id)
{
- int r;
- struct kvm_vcpu *vcpu = kvm_x86_ops->vcpu_create(kvm, id);
+ return kvm_x86_ops->vcpu_create(kvm, id);
+}
- if (IS_ERR(vcpu)) {
- r = -ENOMEM;
- goto fail;
- }
+int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+ int r;
/* We do fxsave: this must be aligned. */
BUG_ON((unsigned long)&vcpu->host_fx_image & 0xF);
if (r < 0)
goto free_vcpu;
- return vcpu;
+ return 0;
free_vcpu:
kvm_x86_ops->vcpu_free(vcpu);
-fail:
- return ERR_PTR(r);
+ return r;
}
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)