xen: refactor xen_{alloc,release}_{pt,pd}()
authorMark McLoughlin <markmc@redhat.com>
Wed, 2 Apr 2008 14:36:36 +0000 (15:36 +0100)
committerIngo Molnar <mingo@elte.hu>
Fri, 4 Apr 2008 16:36:48 +0000 (18:36 +0200)
Signed-off-by: Mark McLoughlin <markmc@redhat.com>
Cc: xen-devel@lists.xensource.com
Cc: Mark McLoughlin <markmc@redhat.com>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/xen/enlighten.c
arch/x86/xen/mmu.c
arch/x86/xen/mmu.h

index de4e6f05840b11a4b89cd91da51aa411648f14de..16e2f8096a1ac68332d479ea25158cfe4def0e4c 100644 (file)
@@ -667,10 +667,10 @@ static void xen_release_pt_init(u32 pfn)
        make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
 }
 
-static void pin_pagetable_pfn(unsigned level, unsigned long pfn)
+static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
 {
        struct mmuext_op op;
-       op.cmd = level;
+       op.cmd = cmd;
        op.arg1.mfn = pfn_to_mfn(pfn);
        if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
                BUG();
@@ -687,7 +687,10 @@ static void xen_alloc_ptpage(struct mm_struct *mm, u32 pfn, unsigned level)
 
                if (!PageHighMem(page)) {
                        make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
-                       pin_pagetable_pfn(level, pfn);
+                       if (level == PT_PTE)
+                               pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
+                       else if (level == PT_PMD)
+                               pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE, pfn);
                } else
                        /* make sure there are no stray mappings of
                           this page */
@@ -697,16 +700,16 @@ static void xen_alloc_ptpage(struct mm_struct *mm, u32 pfn, unsigned level)
 
 static void xen_alloc_pt(struct mm_struct *mm, u32 pfn)
 {
-       xen_alloc_ptpage(mm, pfn, MMUEXT_PIN_L1_TABLE);
+       xen_alloc_ptpage(mm, pfn, PT_PTE);
 }
 
 static void xen_alloc_pd(struct mm_struct *mm, u32 pfn)
 {
-       xen_alloc_ptpage(mm, pfn, MMUEXT_PIN_L2_TABLE);
+       xen_alloc_ptpage(mm, pfn, PT_PMD);
 }
 
 /* This should never happen until we're OK to use struct page */
-static void xen_release_pt(u32 pfn)
+static void xen_release_ptpage(u32 pfn, unsigned level)
 {
        struct page *page = pfn_to_page(pfn);
 
@@ -718,6 +721,16 @@ static void xen_release_pt(u32 pfn)
        }
 }
 
+static void xen_release_pt(u32 pfn)
+{
+       xen_release_ptpage(pfn, PT_PTE);
+}
+
+static void xen_release_pd(u32 pfn)
+{
+       xen_release_ptpage(pfn, PT_PMD);
+}
+
 #ifdef CONFIG_HIGHPTE
 static void *xen_kmap_atomic_pte(struct page *page, enum km_type type)
 {
@@ -838,7 +851,7 @@ static __init void xen_pagetable_setup_done(pgd_t *base)
        pv_mmu_ops.alloc_pt = xen_alloc_pt;
        pv_mmu_ops.alloc_pd = xen_alloc_pd;
        pv_mmu_ops.release_pt = xen_release_pt;
-       pv_mmu_ops.release_pd = xen_release_pt;
+       pv_mmu_ops.release_pd = xen_release_pd;
        pv_mmu_ops.set_pte = xen_set_pte;
 
        setup_shared_info();
index 0144395448ae8775c2609ecfb0c0072197e9b35b..2a054ef2a3dab3ac6fc3cebd380a21b8b84dd54a 100644 (file)
@@ -310,13 +310,6 @@ pgd_t xen_make_pgd(unsigned long pgd)
 }
 #endif /* CONFIG_X86_PAE */
 
-enum pt_level {
-       PT_PGD,
-       PT_PUD,
-       PT_PMD,
-       PT_PTE
-};
-
 /*
   (Yet another) pagetable walker.  This one is intended for pinning a
   pagetable.  This means that it walks a pagetable and calls the
index c9ff27f3ac3a451a6c57f102c1e4767295cbfa15..b5e189b1519dcc6bab65590620b9aeabb056282a 100644 (file)
@@ -3,6 +3,13 @@
 #include <linux/linkage.h>
 #include <asm/page.h>
 
+enum pt_level {
+       PT_PGD,
+       PT_PUD,
+       PT_PMD,
+       PT_PTE
+};
+
 /*
  * Page-directory addresses above 4GB do not fit into architectural %cr3.
  * When accessing %cr3, or equivalent field in vcpu_guest_context, guests