}
}
-static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
-{
- if (static_branch_unlikely(&vmx_l1d_should_flush))
- vmx_l1d_flush(vcpu);
-
- if (vcpu->arch.cr2 != read_cr2())
- write_cr2(vcpu->arch.cr2);
-
- asm(
- "call ____vmx_vcpu_run \n\t"
- : ASM_CALL_CONSTRAINT, "=b"(vmx->fail),
-#ifdef CONFIG_X86_64
- "=D"((int){0}), "=S"((int){0})
- : "D"(vmx), "S"(&vcpu->arch.regs),
-#else
- "=a"((int){0}), "=d"((int){0})
- : "a"(vmx), "d"(&vcpu->arch.regs),
-#endif
- "b"(vmx->loaded_vmcs->launched)
- : "cc", "memory"
-#ifdef CONFIG_X86_64
- , "rax", "rcx", "rdx"
- , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
-#else
- , "ecx", "edi", "esi"
-#endif
- );
-
- vcpu->arch.cr2 = read_cr2();
-}
-
static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
*/
x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
- __vmx_vcpu_run(vcpu, vmx);
+ if (static_branch_unlikely(&vmx_l1d_should_flush))
+ vmx_l1d_flush(vcpu);
+
+ if (vcpu->arch.cr2 != read_cr2())
+ write_cr2(vcpu->arch.cr2);
+
+ asm(
+ "call ____vmx_vcpu_run \n\t"
+ : ASM_CALL_CONSTRAINT, "=b"(vmx->fail),
+#ifdef CONFIG_X86_64
+ "=D"((int){0}), "=S"((int){0})
+ : "D"(vmx), "S"(&vcpu->arch.regs),
+#else
+ "=a"((int){0}), "=d"((int){0})
+ : "a"(vmx), "d"(&vcpu->arch.regs),
+#endif
+ "b"(vmx->loaded_vmcs->launched)
+ : "cc", "memory"
+#ifdef CONFIG_X86_64
+ , "rax", "rcx", "rdx"
+ , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
+#else
+ , "ecx", "edi", "esi"
+#endif
+ );
+
+ vcpu->arch.cr2 = read_cr2();
/*
* We do not use IBRS in the kernel. If this vCPU has used the