KVM: add kvm_arch_sched_in
authorRadim Krčmář <rkrcmar@redhat.com>
Thu, 21 Aug 2014 16:08:05 +0000 (18:08 +0200)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 21 Aug 2014 16:45:21 +0000 (18:45 +0200)
Introduce preempt notifiers for architecture specific code.
Advantage over creating a new notifier in every arch is slightly simpler
code and guaranteed call order with respect to kvm_sched_in.

Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/arm/kvm/arm.c
arch/mips/kvm/mips.c
arch/powerpc/kvm/powerpc.c
arch/s390/kvm/kvm-s390.c
arch/x86/kvm/x86.c
include/linux/kvm_host.h
virt/kvm/kvm_main.c

index a99e0cdf8ba2f3c1799b3a7c2013f8e024b8056e..9f788ebac55bec7222b703bcb8f258c26c9334bc 100644 (file)
@@ -288,6 +288,10 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
 {
 }
 
+void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
+{
+}
+
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
        vcpu->cpu = cpu;
index cd7114147ae777f9a5bf7063acfd0cbd388cd944..2362df2a79f9327c054a1d599011851f7cdd904b 100644 (file)
@@ -1002,6 +1002,10 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
 {
 }
 
+void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
+{
+}
+
 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
                                  struct kvm_translation *tr)
 {
index 4c79284b58be9d0870eebbf62a772dbdf701598c..cbc432f4f0a6579a2d453d15adde1aac00f102fe 100644 (file)
@@ -720,6 +720,10 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
        kvmppc_subarch_vcpu_uninit(vcpu);
 }
 
+void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
+{
+}
+
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
 #ifdef CONFIG_BOOKE
index ce81eb2ab76a207128069119acaac4e92918fa99..a3c324ec43704fcee21973a3b521a38a22ada371 100644 (file)
@@ -555,6 +555,10 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
        /* Nothing todo */
 }
 
+void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
+{
+}
+
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
        save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
index cd718c01cdf1b1a9af35f9fd975ed30dbc29387f..7d43dc7bb906aef057879685cc78f503ce1a4cf8 100644 (file)
@@ -7171,6 +7171,10 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
                static_key_slow_dec(&kvm_no_apic_vcpu);
 }
 
+void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
+{
+}
+
 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 {
        if (type)
index a4c33b34fe3f0c7f3366a320c6df4534001c24b7..ebd723676633a4adeb39bff3c46189308a7e1ac1 100644 (file)
@@ -624,6 +624,8 @@ void kvm_arch_exit(void);
 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
 
+void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
+
 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
index 39b16035386f91db489c919174f24e0951a0a816..5a0817ee996ec058e1164dd77fb7404c1d6740a1 100644 (file)
@@ -3124,6 +3124,8 @@ static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
        if (vcpu->preempted)
                vcpu->preempted = false;
 
+       kvm_arch_sched_in(vcpu, cpu);
+
        kvm_arch_vcpu_load(vcpu, cpu);
 }