* - true: let the vcpu to access on the same address again.
* - false: let the real page fault path to fix it.
*/
-static bool fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, int level,
+static bool fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
u32 error_code)
{
struct kvm_shadow_walk_iterator iterator;
u64 new_spte;
for_each_shadow_entry_lockless(vcpu, cr2_or_gpa, iterator, spte)
- if (!is_shadow_present_pte(spte) ||
- iterator.level < level)
+ if (!is_shadow_present_pte(spte))
break;
sp = page_header(__pa(iterator.sptep));
if (level > PT_PAGE_TABLE_LEVEL)
gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
- if (fast_page_fault(vcpu, gpa, level, error_code))
+ if (fast_page_fault(vcpu, gpa, error_code))
return RET_PF_RETRY;
mmu_seq = vcpu->kvm->mmu_notifier_seq;