hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva);
struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva);
-extern hpa_t bad_page_address;
+extern struct page *bad_page;
+int is_error_page(struct page *page);
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
return r;
}
+int is_error_page(struct page *page)
+{
+ return page == bad_page;
+}
+EXPORT_SYMBOL_GPL(is_error_page);
+
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
{
int i;
gfn = unalias_gfn(kvm, gfn);
slot = __gfn_to_memslot(kvm, gfn);
if (!slot)
- return NULL;
+ return bad_page;
return slot->phys_mem[gfn - slot->base_gfn];
}
EXPORT_SYMBOL_GPL(gfn_to_page);
struct page *page;
page = gfn_to_page(kvm, gfn);
- if (!page)
+ if (is_error_page(page))
return -EFAULT;
page_virt = kmap_atomic(page, KM_USER0);
struct page *page;
page = gfn_to_page(kvm, gfn);
- if (!page)
+ if (is_error_page(page))
return -EFAULT;
page_virt = kmap_atomic(page, KM_USER0);
struct page *page;
page = gfn_to_page(kvm, gfn);
- if (!page)
+ if (is_error_page(page))
return -EFAULT;
page_virt = kmap_atomic(page, KM_USER0);
pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
page = gfn_to_page(kvm, pgoff);
- if (!page)
+ if (is_error_page(page))
return NOPAGE_SIGBUS;
get_page(page);
if (type != NULL)
.cls = &kvm_sysdev_class,
};
-hpa_t bad_page_address;
+struct page *bad_page;
static inline
struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
static __init int kvm_init(void)
{
- static struct page *bad_page;
int r;
r = kvm_mmu_module_init();
kvm_arch_init();
- bad_page = alloc_page(GFP_KERNEL);
+ bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (bad_page == NULL) {
r = -ENOMEM;
goto out;
}
- bad_page_address = page_to_pfn(bad_page) << PAGE_SHIFT;
- memset(__va(bad_page_address), 0, PAGE_SIZE);
-
return 0;
out:
static __exit void kvm_exit(void)
{
kvm_exit_debug();
- __free_page(pfn_to_page(bad_page_address >> PAGE_SHIFT));
+ __free_page(bad_page);
kvm_mmu_module_exit();
}
__set_bit(slot, &page_head->slot_bitmap);
}
-hpa_t safe_gpa_to_hpa(struct kvm *kvm, gpa_t gpa)
-{
- hpa_t hpa = gpa_to_hpa(kvm, gpa);
-
- return is_error_hpa(hpa) ? bad_page_address | (gpa & ~PAGE_MASK): hpa;
-}
-
hpa_t gpa_to_hpa(struct kvm *kvm, gpa_t gpa)
{
struct page *page;
+ hpa_t hpa;
ASSERT((gpa & HPA_ERR_MASK) == 0);
page = gfn_to_page(kvm, gpa >> PAGE_SHIFT);
- if (!page)
- return gpa | HPA_ERR_MASK;
- return ((hpa_t)page_to_pfn(page) << PAGE_SHIFT)
- | (gpa & (PAGE_SIZE-1));
+ hpa = ((hpa_t)page_to_pfn(page) << PAGE_SHIFT) | (gpa & (PAGE_SIZE-1));
+ if (is_error_page(page))
+ return hpa | HPA_ERR_MASK;
+ return hpa;
}
hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva)
struct kvm_vcpu *vcpu, gva_t addr,
int write_fault, int user_fault, int fetch_fault)
{
- hpa_t hpa;
- struct kvm_memory_slot *slot;
struct page *page;
pt_element_t *table;
pt_element_t pte;
pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
walker->level - 1, table_gfn);
- slot = gfn_to_memslot(vcpu->kvm, table_gfn);
- hpa = safe_gpa_to_hpa(vcpu->kvm, pte & PT64_BASE_ADDR_MASK);
- page = pfn_to_page(hpa >> PAGE_SHIFT);
+ page = gfn_to_page(vcpu->kvm, (pte & PT64_BASE_ADDR_MASK)
+ >> PAGE_SHIFT);
table = kmap_atomic(page, KM_USER0);
pte = table[index];