KVM: selftests: state_test: test bare VMXON migration
authorVitaly Kuznetsov <vkuznets@redhat.com>
Tue, 16 Oct 2018 16:50:08 +0000 (18:50 +0200)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 16 Oct 2018 22:30:18 +0000 (00:30 +0200)
Split prepare_for_vmx_operation() into prepare_for_vmx_operation() and
load_vmcs() so we can inject GUEST_SYNC() in between.

Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
tools/testing/selftests/kvm/include/x86_64/vmx.h
tools/testing/selftests/kvm/lib/x86_64/vmx.c
tools/testing/selftests/kvm/x86_64/state_test.c
tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c

index 12ebd836f7ef46b55fa22ba343f67981f2367c3e..4bbee8560292eded544e0f9539a44837d7aba4c1 100644 (file)
@@ -548,5 +548,6 @@ struct vmx_pages {
 struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva);
 bool prepare_for_vmx_operation(struct vmx_pages *vmx);
 void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp);
+bool load_vmcs(struct vmx_pages *vmx);
 
 #endif /* SELFTEST_KVM_VMX_H */
index d7c4014722473ac3981e80d3356260bd4ccb63d2..cc356da9b3d80186511887e47ff30dc174345394 100644 (file)
@@ -107,6 +107,11 @@ bool prepare_for_vmx_operation(struct vmx_pages *vmx)
        if (vmxon(vmx->vmxon_gpa))
                return false;
 
+       return true;
+}
+
+bool load_vmcs(struct vmx_pages *vmx)
+{
        /* Load a VMCS. */
        *(uint32_t *)(vmx->vmcs) = vmcs_revision();
        if (vmclear(vmx->vmcs_gpa))
index 43df194a7c1eeec98aa785de2f61c723f421a469..03da41f0f7364d6d274e72bb056988020a8839f5 100644 (file)
@@ -26,20 +26,20 @@ static bool have_nested_state;
 
 void l2_guest_code(void)
 {
-       GUEST_SYNC(5);
+       GUEST_SYNC(6);
 
         /* Exit to L1 */
        vmcall();
 
        /* L1 has now set up a shadow VMCS for us.  */
        GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
-       GUEST_SYNC(9);
+       GUEST_SYNC(10);
        GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
        GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0fffee));
-       GUEST_SYNC(10);
+       GUEST_SYNC(11);
        GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0fffee);
        GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0ffffee));
-       GUEST_SYNC(11);
+       GUEST_SYNC(12);
 
        /* Done, exit to L1 and never come back.  */
        vmcall();
@@ -52,15 +52,17 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
 
        GUEST_ASSERT(vmx_pages->vmcs_gpa);
        GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
+       GUEST_SYNC(3);
+       GUEST_ASSERT(load_vmcs(vmx_pages));
        GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
 
-       GUEST_SYNC(3);
+       GUEST_SYNC(4);
        GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
 
        prepare_vmcs(vmx_pages, l2_guest_code,
                     &l2_guest_stack[L2_GUEST_STACK_SIZE]);
 
-       GUEST_SYNC(4);
+       GUEST_SYNC(5);
        GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
        GUEST_ASSERT(!vmlaunch());
        GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
@@ -72,7 +74,7 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
        GUEST_ASSERT(!vmresume());
        GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
 
-       GUEST_SYNC(6);
+       GUEST_SYNC(7);
        GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
 
        GUEST_ASSERT(!vmresume());
@@ -85,12 +87,12 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
 
        GUEST_ASSERT(!vmptrld(vmx_pages->shadow_vmcs_gpa));
        GUEST_ASSERT(vmlaunch());
-       GUEST_SYNC(7);
+       GUEST_SYNC(8);
        GUEST_ASSERT(vmlaunch());
        GUEST_ASSERT(vmresume());
 
        vmwrite(GUEST_RIP, 0xc0ffee);
-       GUEST_SYNC(8);
+       GUEST_SYNC(9);
        GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
 
        GUEST_ASSERT(!vmptrld(vmx_pages->vmcs_gpa));
@@ -101,7 +103,7 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
        GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee);
        GUEST_ASSERT(vmlaunch());
        GUEST_ASSERT(vmresume());
-       GUEST_SYNC(12);
+       GUEST_SYNC(13);
        GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee);
        GUEST_ASSERT(vmlaunch());
        GUEST_ASSERT(vmresume());
index 38a91a5f04acfa132545603430dd2d239383a4a6..18fa64db0d7a5315a02e1f9c8a1c488180539ffc 100644 (file)
@@ -94,6 +94,7 @@ static void l1_guest_code(struct vmx_pages *vmx_pages)
        check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
 
        GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
+       GUEST_ASSERT(load_vmcs(vmx_pages));
 
        /* Prepare the VMCS for L2 execution. */
        prepare_vmcs(vmx_pages, l2_guest_code,