From 8f74d8e16812d63639871b4e56409b08bdcb66fc Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Thu, 28 Apr 2011 07:08:36 +0900 Subject: [PATCH] KVM: MMU: Fix 64-bit paging breakage on x86_32 Fix regression introduced by commit e30d2a170506830d5eef5e9d7990c5aedf1b0a51 KVM: MMU: Optimize guest page table walk On x86_32, get_user() does not support 64-bit values and we fail to build KVM at the point of 64-bit paging. This patch fixes this by using get_user() twice for that condition. Signed-off-by: Takuya Yoshikawa Reported-by: Jan Kiszka Signed-off-by: Avi Kivity --- arch/x86/kvm/paging_tmpl.h | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 652d56c081f7..52450a6b784f 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -115,6 +115,20 @@ static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte) return access; } +static int FNAME(read_gpte)(pt_element_t *pte, pt_element_t __user *ptep_user) +{ +#if defined(CONFIG_X86_32) && (PTTYPE == 64) + u32 *p = (u32 *)pte; + u32 __user *p_user = (u32 __user *)ptep_user; + + if (unlikely(get_user(*p, p_user))) + return -EFAULT; + return get_user(*(p + 1), p_user + 1); +#else + return get_user(*pte, ptep_user); +#endif +} + /* * Fetch a guest pte for a guest virtual address */ @@ -185,7 +199,7 @@ walk: } ptep_user = (pt_element_t __user *)((void *)host_addr + offset); - if (unlikely(get_user(pte, ptep_user))) { + if (unlikely(FNAME(read_gpte)(&pte, ptep_user))) { present = false; break; } -- 2.30.2