kvm: x86: mmu: Move pgtbl walk inside retry loop in fast_page_fault
authorJunaid Shahid <junaids@google.com>
Thu, 22 Dec 2016 04:29:30 +0000 (20:29 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 27 Jan 2017 14:46:40 +0000 (15:46 +0100)
Redo the page table walk in fast_page_fault when retrying so that we are
working on the latest PTE even if the hierarchy changes.

Signed-off-by: Junaid Shahid <junaids@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu.c

index e13041ac7cdf67bb021b004748beb45c287b549e..437d16274701b4f7a4a84e93f0bd3eb343c40b71 100644 (file)
@@ -3088,14 +3088,16 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
                return false;
 
        walk_shadow_page_lockless_begin(vcpu);
-       for_each_shadow_entry_lockless(vcpu, gva, iterator, spte)
-               if (!is_shadow_present_pte(spte) || iterator.level < level)
-                       break;
 
        do {
                bool remove_write_prot = false;
                bool remove_acc_track;
 
+               for_each_shadow_entry_lockless(vcpu, gva, iterator, spte)
+                       if (!is_shadow_present_pte(spte) ||
+                           iterator.level < level)
+                               break;
+
                sp = page_header(__pa(iterator.sptep));
                if (!is_last_spte(spte, sp->role.level))
                        break;
@@ -3176,8 +3178,6 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
                        break;
                }
 
-               spte = mmu_spte_get_lockless(iterator.sptep);
-
        } while (true);
 
        trace_fast_page_fault(vcpu, gva, error_code, iterator.sptep,