KVM: do not release the error pfn
authorXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Fri, 3 Aug 2012 07:42:10 +0000 (15:42 +0800)
committerAvi Kivity <avi@redhat.com>
Mon, 6 Aug 2012 13:04:57 +0000 (16:04 +0300)
After commit a2766325cf9f9, the error pfn is replaced by the
error code, it need not be released anymore

[ The patch has been compiling tested for powerpc ]

Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/powerpc/kvm/e500_tlb.c
arch/x86/kvm/mmu.c
arch/x86/kvm/mmu_audit.c
arch/x86/kvm/paging_tmpl.h
virt/kvm/iommu.c
virt/kvm/kvm_main.c

index c8f6c58267426309ff979a7ed2a0d473006adda1..09ce5ac128f84a5142afa13da620c6aaef6ea873 100644 (file)
@@ -524,7 +524,6 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
                if (is_error_pfn(pfn)) {
                        printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
                                        (long)gfn);
-                       kvm_release_pfn_clean(pfn);
                        return;
                }
 
index d3cdf69da513da0ed1eb25f1518eeb3678d351e4..9651c2cd000505ee69bd526b5b63d03cc4bb505b 100644 (file)
@@ -2496,7 +2496,9 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                                rmap_recycle(vcpu, sptep, gfn);
                }
        }
-       kvm_release_pfn_clean(pfn);
+
+       if (!is_error_pfn(pfn))
+               kvm_release_pfn_clean(pfn);
 }
 
 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
@@ -2648,7 +2650,6 @@ static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *
 
 static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn)
 {
-       kvm_release_pfn_clean(pfn);
        if (pfn == KVM_PFN_ERR_HWPOISON) {
                kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current);
                return 0;
@@ -3273,8 +3274,6 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
        if (!async)
                return false; /* *pfn has correct page already */
 
-       kvm_release_pfn_clean(*pfn);
-
        if (!prefault && can_do_async_pf(vcpu)) {
                trace_kvm_try_async_get_page(gva, gfn);
                if (kvm_find_async_pf_gfn(vcpu, gfn)) {
index ca403f9bb0f2d69c7a34a99a7385094d987d2019..daff69e21150d054a109a889630f730702088b76 100644 (file)
@@ -116,10 +116,8 @@ static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level)
        gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
        pfn = gfn_to_pfn_atomic(vcpu->kvm, gfn);
 
-       if (is_error_pfn(pfn)) {
-               kvm_release_pfn_clean(pfn);
+       if (is_error_pfn(pfn))
                return;
-       }
 
        hpa =  pfn << PAGE_SHIFT;
        if ((*sptep & PT64_BASE_ADDR_MASK) != hpa)
index bb7cf01cae76ea735f759ef3c3d2428c271a7151..bf8c42bf50fe6f6d3b45494413d6fd16bb2c8520 100644 (file)
@@ -370,10 +370,8 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
        pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
        pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte, true);
        pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte));
-       if (mmu_invalid_pfn(pfn)) {
-               kvm_release_pfn_clean(pfn);
+       if (mmu_invalid_pfn(pfn))
                return;
-       }
 
        /*
         * we call mmu_set_spte() with host_writable = true because that
@@ -448,10 +446,8 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
                gfn = gpte_to_gfn(gpte);
                pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
                                      pte_access & ACC_WRITE_MASK);
-               if (mmu_invalid_pfn(pfn)) {
-                       kvm_release_pfn_clean(pfn);
+               if (mmu_invalid_pfn(pfn))
                        break;
-               }
 
                mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
                             NULL, PT_PAGE_TABLE_LEVEL, gfn,
index 6a67bea4019c000af722bf81369c0793b66b3386..037cb6730e68eef3171b0e9660d1139b423d96de 100644 (file)
@@ -107,7 +107,6 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
                 */
                pfn = kvm_pin_pages(slot, gfn, page_size);
                if (is_error_pfn(pfn)) {
-                       kvm_release_pfn_clean(pfn);
                        gfn += 1;
                        continue;
                }
index 93d3c6e063c88c7d515dff7cb529b5602bb4ddef..eafba99d1070920600de48d49ccf1c344f524303 100644 (file)
@@ -102,9 +102,6 @@ static bool largepages_enabled = true;
 
 bool kvm_is_mmio_pfn(pfn_t pfn)
 {
-       if (is_error_pfn(pfn))
-               return false;
-
        if (pfn_valid(pfn)) {
                int reserved;
                struct page *tail = pfn_to_page(pfn);
@@ -1165,10 +1162,13 @@ EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
 
 static struct page *kvm_pfn_to_page(pfn_t pfn)
 {
-       WARN_ON(kvm_is_mmio_pfn(pfn));
+       if (is_error_pfn(pfn))
+               return KVM_ERR_PTR_BAD_PAGE;
 
-       if (is_error_pfn(pfn) || kvm_is_mmio_pfn(pfn))
+       if (kvm_is_mmio_pfn(pfn)) {
+               WARN_ON(1);
                return KVM_ERR_PTR_BAD_PAGE;
+       }
 
        return pfn_to_page(pfn);
 }
@@ -1193,7 +1193,9 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean);
 
 void kvm_release_pfn_clean(pfn_t pfn)
 {
-       if (!is_error_pfn(pfn) && !kvm_is_mmio_pfn(pfn))
+       WARN_ON(is_error_pfn(pfn));
+
+       if (!kvm_is_mmio_pfn(pfn))
                put_page(pfn_to_page(pfn));
 }
 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);