return r;
}
+static void mmu_pre_write_zap_pte(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_page *page,
+ u64 *spte)
+{
+ u64 pte;
+ struct kvm_mmu_page *child;
+
+ pte = *spte;
+ if (is_present_pte(pte)) {
+ if (page->role.level == PT_PAGE_TABLE_LEVEL)
+ rmap_remove(vcpu, spte);
+ else {
+ child = page_header(pte & PT64_BASE_ADDR_MASK);
+ mmu_page_remove_parent_pte(vcpu, child, spte);
+ }
+ }
+ *spte = 0;
+}
+
void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
{
gfn_t gfn = gpa >> PAGE_SHIFT;
struct kvm_mmu_page *page;
- struct kvm_mmu_page *child;
struct hlist_node *node, *n;
struct hlist_head *bucket;
unsigned index;
u64 *spte;
- u64 pte;
unsigned offset = offset_in_page(gpa);
unsigned pte_size;
unsigned page_offset;
unsigned misaligned;
int level;
int flooded = 0;
+ int npte;
pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
if (gfn == vcpu->last_pt_write_gfn) {
}
page_offset = offset;
level = page->role.level;
+ npte = 1;
if (page->role.glevels == PT32_ROOT_LEVEL) {
- page_offset <<= 1; /* 32->64 */
+ page_offset <<= 1; /* 32->64 */
+ /*
+ * A 32-bit pde maps 4MB while the shadow pdes map
+ * only 2MB. So we need to double the offset again
+ * and zap two pdes instead of one.
+ */
+ if (level == PT32_ROOT_LEVEL) {
+ page_offset <<= 1;
+ npte = 2;
+ }
page_offset &= ~PAGE_MASK;
}
spte = __va(page->page_hpa);
spte += page_offset / sizeof(*spte);
- pte = *spte;
- if (is_present_pte(pte)) {
- if (level == PT_PAGE_TABLE_LEVEL)
- rmap_remove(vcpu, spte);
- else {
- child = page_header(pte & PT64_BASE_ADDR_MASK);
- mmu_page_remove_parent_pte(vcpu, child, spte);
- }
+ while (npte--) {
+ mmu_pre_write_zap_pte(vcpu, page, spte);
+ ++spte;
}
- *spte = 0;
}
}