return pte;
}
-struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *page;
#else
page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
#endif
- if (page)
+ if (page) {
clear_highpage(page);
- flush_dcache_page(page);
+ pgtable_page_ctor(page);
+ flush_dcache_page(page);
+ }
return page;
}
return pte;
}
-struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *ptepage;
#ifdef CONFIG_HIGHPTE
- gfp_t flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT;
+ gfp_t flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT | __GFP_ZERO;
#else
- gfp_t flags = GFP_KERNEL | __GFP_REPEAT;
+ gfp_t flags = GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO;
#endif
ptepage = alloc_pages(flags, 0);
- if (ptepage)
- clear_highpage(ptepage);
+ if (!ptepage)
+ return NULL;
+ pgtable_page_ctor(ptepage);
return ptepage;
}
free_page((unsigned long)pte);
}
-void pte_free(struct mm_struct *mm, struct page *ptepage)
+void pte_free(struct mm_struct *mm, pgtable_t ptepage)
{
#ifdef CONFIG_SMP
hash_page_sync();
#endif
+ pgtable_page_dtor(ptepage);
__free_page(ptepage);
}
return pte;
}
-struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *ptepage;
#endif
ptepage = alloc_pages(flags, 0);
- if (ptepage)
+ if (ptepage) {
clear_highpage(ptepage);
+ pgtable_page_ctor(ptepage);
+ }
return ptepage;
}
free_page((unsigned long)pte);
}
-void pte_free(struct mm_struct *mm, struct page *ptepage)
+void pte_free(struct mm_struct *mm, pgtable_t ptepage)
{
#ifdef CONFIG_SMP
hash_page_sync();
#endif
+ pgtable_page_dtor(ptepage);
__free_page(ptepage);
}
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
page->index = (addr_t) table;
}
+ pgtable_page_ctor(page);
table = (unsigned long *) page_to_phys(page);
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
return table;
{
unsigned long *shadow = get_shadow_pte(table);
+ pgtable_page_dtor(virt_to_page(table));
if (shadow)
free_page((unsigned long) shadow);
free_page((unsigned long) table);
return (pte_t *)srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
}
-static struct page *
+static pgtable_t
srmmu_pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
unsigned long pte;
+ struct page *page;
if ((pte = (unsigned long)srmmu_pte_alloc_one_kernel(mm, address)) == 0)
return NULL;
- return pfn_to_page( __nocache_pa(pte) >> PAGE_SHIFT );
+ page = pfn_to_page( __nocache_pa(pte) >> PAGE_SHIFT );
+ pgtable_page_ctor(page);
+ return page;
}
static void srmmu_free_pte_fast(pte_t *pte)
srmmu_free_nocache((unsigned long)pte, PTE_SIZE);
}
-static void srmmu_pte_free(struct page *pte)
+static void srmmu_pte_free(pgtable_t pte)
{
unsigned long p;
+ pgtable_page_dtor(pte);
p = (unsigned long)page_address(pte); /* Cached address (for test) */
if (p == 0)
BUG();
return pte;
}
-static struct page *sun4c_pte_alloc_one(struct mm_struct *mm, unsigned long address)
+static pgtable_t sun4c_pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
- pte_t *pte = sun4c_pte_alloc_one_kernel(mm, address);
+ pte_t *pte;
+ struct page *page;
+
+ pte = sun4c_pte_alloc_one_kernel(mm, address);
if (pte == NULL)
return NULL;
- return virt_to_page(pte);
+ page = virt_to_page(pte);
+ pgtable_page_ctor(page);
+ return page;
}
static inline void sun4c_free_pte_fast(pte_t *pte)
pgtable_cache_size++;
}
-static void sun4c_pte_free(struct page *pte)
+static void sun4c_pte_free(pgtable_t pte)
{
+ pgtable_page_dtor(pte);
sun4c_free_pte_fast(page_address(pte));
}
return pte;
}
-struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *pte;
pte = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+ if (pte)
+ pgtable_page_ctor(pte);
return pte;
}
return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
}
-struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *pte;
#else
pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
#endif
+ if (pte)
+ pgtable_page_ctor(pte);
return pte;
}
void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
{
+ pgtable_page_dtor(pte);
paravirt_release_pt(page_to_pfn(pte));
tlb_remove_page(tlb, pte);
}
#endif /* STRICT_MM_TYPECHECKS */
+typedef struct page *pgtable_t;
+
#ifdef USE_48_BIT_KSEG
#define PAGE_OFFSET 0xffff800000000000UL
#else
*/
static inline void
-pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
+pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte)
{
pmd_set(pmd, (pte_t *)(page_to_pa(pte) + PAGE_OFFSET));
}
+#define pmd_pgtable(pmd) pmd_page(pmd)
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
free_page((unsigned long)pte);
}
-static inline struct page *
-pte_alloc_one(struct mm_struct *mm, unsigned long addr)
+static inline pgtable_t
+pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
- pte_t *pte = pte_alloc_one_kernel(mm, addr);
- if (pte)
- return virt_to_page(pte);
- return NULL;
+ pte_t *pte = pte_alloc_one_kernel(mm, address);
+ struct page *page;
+
+ if (!pte)
+ return NULL;
+ page = virt_to_page(pte);
+ pgtable_page_ctor(page);
+ return page;
}
static inline void
-pte_free(struct mm_struct *mm, struct page *page)
+pte_free(struct mm_struct *mm, pgtable_t page)
{
+ pgtable_page_dtor(page);
__free_page(page);
}
#endif /* STRICT_MM_TYPECHECKS */
+typedef struct page *pgtable_t;
+
#endif /* CONFIG_MMU */
#include <asm/memory.h>
return pte;
}
-static inline struct page *
+static inline pgtable_t
pte_alloc_one(struct mm_struct *mm, unsigned long addr)
{
struct page *pte;
if (pte) {
void *page = page_address(pte);
clean_dcache_area(page, sizeof(pte_t) * PTRS_PER_PTE);
+ pgtable_page_ctor(pte);
}
return pte;
}
}
-static inline void pte_free(struct mm_struct *mm, struct page *pte)
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
{
+ pgtable_page_dtor(pte);
__free_page(pte);
}
}
static inline void
-pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
+pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
{
__pmd_populate(pmdp, page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE);
}
+#define pmd_pgtable(pmd) pmd_page(pmd)
#endif /* CONFIG_MMU */
typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
+typedef struct page *pgtable_t;
#define pte_val(x) ((x).pte)
#define pgd_val(x) ((x).pgd)
set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
static __inline__ void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
- struct page *pte)
+ pgtable_t pte)
{
set_pmd(pmd, __pmd(_PAGE_TABLE + page_to_phys(pte)));
}
+#define pmd_pgtable(pmd) pmd_page(pmd)
/*
* Allocate and free page tables
struct page *pte;
pte = alloc_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
-
+ if (!pte)
+ return NULL;
+ pgtable_page_ctor(pte);
return pte;
}
free_page((unsigned long)pte);
}
-static inline void pte_free(struct mm_struct *mm, struct page *pte)
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
{
+ pgtable_page_dtor(pte);
__free_page(pte);
}
-#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
+#define __pte_free_tlb(tlb,pte) \
+do { \
+ pgtable_page_dtor(pte); \
+ tlb_remove_page((tlb), pte); \
+} while (0)
#define check_pgt_cache() do { } while(0)
typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
+typedef struct page *pgtable_t;
#endif
#define pte_val(x) ((x).pte)
#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, pte)
#define pmd_populate(mm, pmd, pte) pmd_set(pmd, page_address(pte))
+#define pmd_pgtable(pmd) pmd_page(pmd)
/*
* Allocate and free page tables.
return pte;
}
-static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *pte;
pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
+ pgtable_page_ctor(pte);
return pte;
}
free_page((unsigned long)pte);
}
-static inline void pte_free(struct mm_struct *mm, struct page *pte)
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
{
+ pgtable_page_dtor(pte);
__free_page(pte);
}
-
-#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
+#define __pte_free_tlb(tlb,pte) \
+do { \
+ pgtable_page_dtor(pte); \
+ tlb_remove_page((tlb), pte); \
+} while (0)
#define check_pgt_cache() do { } while (0)
typedef struct { pmd_t pue[1]; } pud_t;
typedef struct { pud_t pge[1]; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
+typedef struct page *pgtable_t;
#define pte_val(x) ((x).pte)
#define pmd_val(x) ((x).ste[0])
do { \
__set_pmd((PMD), page_to_pfn(PAGE) << PAGE_SHIFT | _PAGE_TABLE); \
} while(0)
+#define pmd_pgtable(pmd) pmd_page(pmd)
/*
* Allocate and free page tables.
extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
-extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
+extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
}
-static inline void pte_free(struct mm_struct *mm, struct page *pte)
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
{
+ pgtable_page_dtor(pte);
__free_page(pte);
}
-#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
+#define __pte_free_tlb(tlb,pte) \
+do { \
+ pgtable_page_dtor(pte); \
+ tlb_remove_page((tlb),(pte)); \
+} while (0)
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
#endif
typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
+ typedef struct page *pgtable_t;
# define pte_val(x) ((x).pte)
# define pmd_val(x) ((x).pmd)
typedef unsigned long pmd_t;
typedef unsigned long pgd_t;
typedef unsigned long pgprot_t;
+ typedef struct page *pgtable_t;
# endif
# define pte_val(x) (x)
#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd)
static inline void
-pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, struct page *pte)
+pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, pgtable_t pte)
{
pmd_val(*pmd_entry) = page_to_phys(pte);
}
+#define pmd_pgtable(pmd) pmd_page(pmd)
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
pmd_val(*pmd_entry) = __pa(pte);
}
-static inline struct page *pte_alloc_one(struct mm_struct *mm,
- unsigned long addr)
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- void *pg = quicklist_alloc(0, GFP_KERNEL, NULL);
- return pg ? virt_to_page(pg) : NULL;
+ struct page *page;
+ void *pg;
+
+ pg = quicklist_alloc(0, GFP_KERNEL, NULL);
+ if (!pg)
+ return NULL;
+ page = virt_to_page(pg);
+ pgtable_page_ctor(page);
+ return page;
}
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
return quicklist_alloc(0, GFP_KERNEL, NULL);
}
-static inline void pte_free(struct mm_struct *mm, struct page *pte)
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
{
+ pgtable_page_dtor(pte);
quicklist_free_page(0, NULL, pte);
}
#define PTE_MASK PAGE_MASK
typedef struct { unsigned long pgprot; } pgprot_t;
+typedef struct page *pgtable_t;
#define pmd_val(x) ((x).pmd)
#define pgd_val(x) ((x).pgd)
set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
static __inline__ void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
- struct page *pte)
+ pgtable_t pte)
{
set_pmd(pmd, __pmd(_PAGE_TABLE + page_to_phys(pte)));
}
+#define pmd_pgtable(pmd) pmd_page(pmd)
/*
* Allocate and free page tables.
return pte;
}
-static __inline__ struct page *pte_alloc_one(struct mm_struct *mm,
+static __inline__ pgtable_t pte_alloc_one(struct mm_struct *mm,
unsigned long address)
{
struct page *pte = alloc_page(GFP_KERNEL|__GFP_ZERO);
-
+ pgtable_page_ctor(pte);
return pte;
}
free_page((unsigned long)pte);
}
-static inline void pte_free(struct mm_struct *mm, struct page *pte)
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
{
+ pgtable_page_dtor(pte);
__free_page(pte);
}
extern pmd_t *get_pointer_table(void);
extern int free_pointer_table(pmd_t *);
-
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
pte_t *pte;
free_page((unsigned long) pte);
}
-static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
pte_t *pte;
nocache_page(pte);
}
kunmap(pte);
-
+ pgtable_page_ctor(page);
return page;
}
-static inline void pte_free(struct mm_struct *mm, struct page *page)
+static inline void pte_free(struct mm_struct *mm, pgtable_t page)
{
+ pgtable_page_dtor(page);
cache_page(kmap(page));
kunmap(page);
__free_page(page);
}
-static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *page)
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page)
{
+ pgtable_page_dtor(page);
cache_page(kmap(page));
kunmap(page);
__free_page(page);
pmd_set(pmd, pte);
}
-static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page)
{
pmd_set(pmd, page_address(page));
}
+#define pmd_pgtable(pmd) pmd_page(pmd)
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
{
typedef struct { unsigned long pmd[16]; } pmd_t;
typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
+typedef struct page *pgtable_t;
#define pte_val(x) ((x).pte)
#define pmd_val(x) ((&x)->pmd[0])
free_page((unsigned long) pte);
}
-static inline void pte_free(struct mm_struct *mm, struct page *page)
+static inline void pte_free(struct mm_struct *mm, pgtable_t page)
{
+ pgtable_page_dtor(page);
__free_page(page);
}
-#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
+#define __pte_free_tlb(tlb,pte) \
+do { \
+ pgtable_page_dtor(pte); \
+ tlb_remove_page((tlb), pte); \
+} while (0)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
return (pte_t *) (page);
}
-static inline struct page *pte_alloc_one(struct mm_struct *mm,
- unsigned long address)
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
+ unsigned long address)
{
struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
return NULL;
clear_highpage(page);
+ pgtable_page_ctor(page);
return page;
}
pmd_val(*pmd) = __pa((unsigned long)pte);
}
-static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page)
{
pmd_val(*pmd) = __pa((unsigned long)page_address(page));
}
+#define pmd_pgtable(pmd) pmd_page(pmd)
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
#define pte_val(x) ((x).pte)
#define __pte(x) ((pte_t) { (x) } )
#endif
+typedef struct page *pgtable_t;
/*
* For 3-level pagetables we defines these ourselves, for 2-level the
}
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
- struct page *pte)
+ pgtable_t pte)
{
set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
}
+#define pmd_pgtable(pmd) pmd_page(pmd)
/*
* Initialize a new pmd table with invalid pointers.
struct page *pte;
pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
- if (pte)
+ if (pte) {
clear_highpage(pte);
-
+ pgtable_page_ctor(pte);
+ }
return pte;
}
free_pages((unsigned long)pte, PTE_ORDER);
}
-static inline void pte_free(struct mm_struct *mm, struct page *pte)
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
{
+ pgtable_page_dtor(pte);
__free_pages(pte, PTE_ORDER);
}
-#define __pte_free_tlb(tlb, pte) tlb_remove_page((tlb), (pte))
+#define __pte_free_tlb(tlb,pte) \
+do { \
+ pgtable_page_dtor(pte); \
+ tlb_remove_page((tlb), pte); \
+} while (0)
#ifdef CONFIG_32BIT
#endif /* STRICT_MM_TYPECHECKS */
+typedef struct page *pgtable_t;
typedef struct __physmem_range {
unsigned long start_pfn;
#define pmd_populate(mm, pmd, pte_page) \
pmd_populate_kernel(mm, pmd, page_address(pte_page))
+#define pmd_pgtable(pmd) pmd_page(pmd)
-static inline struct page *
+static inline pgtable_t
pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+ if (page)
+ pgtable_page_ctor(page);
return page;
}
free_page((unsigned long)pte);
}
-#define pte_free(mm, page) pte_free_kernel(page_address(page))
+static inline void pte_free_kernel(struct mm_struct *mm, struct page *pte)
+{
+ pgtable_page_dtor(pte);
+ pte_free_kernel(page_address((pte));
+}
#define check_pgt_cache() do { } while (0)
struct vm_area_struct;
+typedef struct page *pgtable_t;
+
#include <asm-generic/memory_model.h>
#endif /* __ASSEMBLY__ */
(pmd_val(*(pmd)) = __pa(pte) | _PMD_PRESENT)
#define pmd_populate(mm, pmd, pte) \
(pmd_val(*(pmd)) = (page_to_pfn(pte) << PAGE_SHIFT) | _PMD_PRESENT)
+#define pmd_pgtable(pmd) pmd_page(pmd)
#else
#define pmd_populate_kernel(mm, pmd, pte) \
(pmd_val(*(pmd)) = (unsigned long)pte | _PMD_PRESENT)
#define pmd_populate(mm, pmd, pte) \
(pmd_val(*(pmd)) = (unsigned long)lowmem_page_address(pte) | _PMD_PRESENT)
+#define pmd_pgtable(pmd) pmd_page(pmd)
#endif
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
-extern struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr);
+extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
extern void pte_free_kernel(struct mm_struct *mm, pte_t *pte);
-extern void pte_free(struct mm_struct *mm, struct page *pte);
+extern void pte_free(struct mm_struct *mm, pgtable_t pte);
#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte))
#define pmd_populate(mm, pmd, pte_page) \
pmd_populate_kernel(mm, pmd, page_address(pte_page))
#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
+#define pmd_pgtable(pmd) pmd_page(pmd)
#else /* CONFIG_PPC_64K_PAGES */
#define pmd_populate(mm, pmd, pte_page) \
pmd_populate_kernel(mm, pmd, page_address(pte_page))
+#define pmd_pgtable(pmd) pmd_page(pmd)
#endif /* CONFIG_PPC_64K_PAGES */
return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
}
-static inline struct page *pte_alloc_one(struct mm_struct *mm,
- unsigned long address)
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
+ unsigned long address)
{
- pte_t *pte = pte_alloc_one_kernel(mm, address);
- return pte ? virt_to_page(pte) : NULL;
+ struct page *page;
+ pte_t *pte;
+
+ pte = pte_alloc_one_kernel(mm, address);
+ if (!pte)
+ return NULL;
+ page = virt_to_page(pte);
+ pgtable_page_ctor(page);
+ return page;
}
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
free_page((unsigned long)pte);
}
-static inline void pte_free(struct mm_struct *mm, struct page *ptepage)
+static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
{
+ pgtable_page_dtor(ptepage);
__free_page(ptepage);
}
extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
-#define __pte_free_tlb(tlb, ptepage) \
+#define __pte_free_tlb(tlb,ptepage) \
+do { \
+ pgtable_page_dtor(ptepage); \
pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
- PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1))
+ PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \
+} while (0)
#define __pmd_free_tlb(tlb, pmd) \
pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
(pmd_val(*(pmd)) = __pa(pte) | _PMD_PRESENT)
#define pmd_populate(mm, pmd, pte) \
(pmd_val(*(pmd)) = (page_to_pfn(pte) << PAGE_SHIFT) | _PMD_PRESENT)
+#define pmd_pgtable(pmd) pmd_page(pmd)
#else
#define pmd_populate_kernel(mm, pmd, pte) \
(pmd_val(*(pmd)) = (unsigned long)pte | _PMD_PRESENT)
#define pmd_populate(mm, pmd, pte) \
(pmd_val(*(pmd)) = (unsigned long)lowmem_page_address(pte) | _PMD_PRESENT)
+#define pmd_pgtable(pmd) pmd_page(pmd)
#endif
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
-extern struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr);
+extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
extern void pte_free_kernel(struct mm_struct *mm, pte_t *pte);
-extern void pte_free(struct mm_struct *mm, struct page *pte);
+extern void pte_free(struct mm_struct *mm, pgtable_t pte);
#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte))
#endif /* __s390x__ */
+typedef struct page *pgtable_t;
+
#define __pte(x) ((pte_t) { (x) } )
#define __pmd(x) ((pmd_t) { (x) } )
#define __pgd(x) ((pgd_t) { (x) } )
}
static inline void
-pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
+pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page)
{
pte_t *pte = (pte_t *)page_to_phys(page);
pmd_t *shadow_pmd = get_shadow_table(pmd);
if (shadow_pmd && shadow_pte)
pmd_populate_kernel(mm, shadow_pmd, shadow_pte);
}
+#define pmd_pgtable(pmd) pmd_page(pmd)
/*
* page table entry allocation/free routines.
* pte_free_tlb frees a pte table and clears the CRSTE for the
* page table from the tlb.
*/
-static inline void pte_free_tlb(struct mmu_gather *tlb, struct page *page)
+static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t page)
{
if (!tlb->fullmm) {
tlb->array[tlb->nr_ptes++] = page;
#define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } )
+typedef struct page *pgtable_t;
+
#endif /* !__ASSEMBLY__ */
/*
}
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
- struct page *pte)
+ pgtable_t pte)
{
set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
}
+#define pmd_pgtable(pmd) pmd_page(pmd)
static inline void pgd_ctor(void *x)
{
return quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
}
-static inline struct page *pte_alloc_one(struct mm_struct *mm,
- unsigned long address)
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
+ unsigned long address)
{
- void *pg = quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
- return pg ? virt_to_page(pg) : NULL;
+ struct page *page;
+ void *pg;
+
+ pg = quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
+ if (!pg)
+ return NULL;
+ page = virt_to_page(pg);
+ pgtable_page_ctor(page);
+ return page;
}
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
quicklist_free(QUICK_PT, NULL, pte);
}
-static inline void pte_free(struct mm_struct *mm, struct page *pte)
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
{
+ pgtable_page_dtor(pte);
quicklist_free_page(QUICK_PT, NULL, pte);
}
-#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
+#define __pte_free_tlb(tlb,pte) \
+do { \
+ pgtable_page_dtor(pte); \
+ tlb_remove_page((tlb), (pte)); \
+} while (0)
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
#endif
+typedef struct page *pgtable_t;
+
extern unsigned long sparc_unmapped_base;
BTFIXUPDEF_SETHI(sparc_unmapped_base)
BTFIXUPDEF_CALL(void, pmd_populate, pmd_t *, struct page *)
#define pmd_populate(MM, PMD, PTE) BTFIXUP_CALL(pmd_populate)(PMD, PTE)
+#define pmd_pgtable(pmd) pmd_page(pmd)
BTFIXUPDEF_CALL(void, pmd_set, pmd_t *, pte_t *)
#define pmd_populate_kernel(MM, PMD, PTE) BTFIXUP_CALL(pmd_set)(PMD, PTE)
-BTFIXUPDEF_CALL(struct page *, pte_alloc_one, struct mm_struct *, unsigned long)
+BTFIXUPDEF_CALL(pgtable_t , pte_alloc_one, struct mm_struct *, unsigned long)
#define pte_alloc_one(mm, address) BTFIXUP_CALL(pte_alloc_one)(mm, address)
BTFIXUPDEF_CALL(pte_t *, pte_alloc_one_kernel, struct mm_struct *, unsigned long)
#define pte_alloc_one_kernel(mm, addr) BTFIXUP_CALL(pte_alloc_one_kernel)(mm, addr)
BTFIXUPDEF_CALL(void, free_pte_fast, pte_t *)
#define pte_free_kernel(mm, pte) BTFIXUP_CALL(free_pte_fast)(pte)
-BTFIXUPDEF_CALL(void, pte_free, struct page *)
+BTFIXUPDEF_CALL(void, pte_free, pgtable_t )
#define pte_free(mm, pte) BTFIXUP_CALL(pte_free)(pte)
#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte)
#endif /* (STRICT_MM_TYPECHECKS) */
+typedef struct page *pgtable_t;
+
#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \
(_AC(0x0000000070000000,UL)) : \
(_AC(0xfffff80000000000,UL) + (1UL << 32UL)))
return quicklist_alloc(0, GFP_KERNEL, NULL);
}
-static inline struct page *pte_alloc_one(struct mm_struct *mm,
- unsigned long address)
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
+ unsigned long address)
{
- void *pg = quicklist_alloc(0, GFP_KERNEL, NULL);
- return pg ? virt_to_page(pg) : NULL;
+ struct page *page;
+ void *pg;
+
+ pg = quicklist_alloc(0, GFP_KERNEL, NULL);
+ if (!pg)
+ return NULL;
+ page = virt_to_page(pg);
+ pgtable_page_ctor(page);
+ return page;
}
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
quicklist_free(0, NULL, pte);
}
-static inline void pte_free(struct mm_struct *mm, struct page *ptepage)
+static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
{
+ pgtable_page_dtor(ptepage);
quicklist_free_page(0, NULL, ptepage);
}
#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE)
#define pmd_populate(MM,PMD,PTE_PAGE) \
pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
+#define pmd_pgtable(pmd) pmd_page(pmd)
static inline void check_pgt_cache(void)
{
typedef struct { unsigned long pgprot; } pgprot_t;
+typedef struct page *pgtable_t;
+
#define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot)
set_pmd(pmd, __pmd(_PAGE_TABLE + \
((unsigned long long)page_to_pfn(pte) << \
(unsigned long long) PAGE_SHIFT)))
+#define pmd_pgtable(pmd) pmd_page(pmd)
/*
* Allocate and free page tables.
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
-extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
+extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long) pte);
}
-static inline void pte_free(struct mm_struct *mm, struct page *pte)
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
{
+ pgtable_page_dtor(pte);
__free_page(pte);
}
-#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
+#define __pte_free_tlb(tlb,pte) \
+do { \
+ pgtable_page_dtor(pte); \
+ tlb_remove_page((tlb),(pte)); \
+} while (0)
#ifdef CONFIG_3_LEVEL_PGTABLES
typedef union { pteval_t pte, pte_low; } pte_t;
typedef pte_t boot_pte_t;
+typedef struct page *pgtable_t;
+
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_X86_PAE */
typedef unsigned long pgprotval_t;
typedef unsigned long phys_addr_t;
+typedef struct page *pgtable_t;
+
typedef struct { pteval_t pte; } pte_t;
#define vmemmap ((struct page *)VMEMMAP_START)
paravirt_alloc_pt(mm, pfn);
set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
}
+#define pmd_pgtable(pmd) pmd_page(pmd)
/*
* Allocate and free page tables.
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
-extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
+extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
}
-static inline void pte_free(struct mm_struct *mm, struct page *pte)
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
{
+ pgtable_page_dtor(pte);
__free_page(pte);
}
#define pgd_populate(mm, pgd, pud) \
set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)))
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
{
set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT)));
return (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
}
-static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
- void *p = (void *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
+ struct page *page;
+ void *p;
+
+ p = (void *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
if (!p)
return NULL;
- return virt_to_page(p);
+ page = virt_to_page(p);
+ pgtable_page_ctor(page);
+ return page;
}
/* Should really implement gc for free page table pages. This could be
free_page((unsigned long)pte);
}
-static inline void pte_free(struct mm_struct *mm, struct page *pte)
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
{
+ pgtable_page_dtor(pte);
__free_page(pte);
}
-#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
+#define __pte_free_tlb(tlb,pte) \
+do { \
+ pgtable_page_dtor((pte)); \
+ tlb_remove_page((tlb), (pte)); \
+} while (0)
#define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
#define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
typedef struct { unsigned long pte; } pte_t; /* page table entry */
typedef struct { unsigned long pgd; } pgd_t; /* PGD table entry */
typedef struct { unsigned long pgprot; } pgprot_t;
+typedef struct page *pgtable_t;
#define pte_val(x) ((x).pte)
#define pgd_val(x) ((x).pgd)
(pmd_val(*(pmdp)) = ((unsigned long)ptep))
#define pmd_populate(mm, pmdp, page) \
(pmd_val(*(pmdp)) = ((unsigned long)page_to_virt(page)))
+#define pmd_pgtable(pmd) pmd_page(pmd)
static inline pgd_t*
pgd_alloc(struct mm_struct *mm)
return kmem_cache_alloc(pgtable_cache, GFP_KERNEL|__GFP_REPEAT);
}
-static inline struct page *pte_alloc_one(struct mm_struct *mm,
- unsigned long addr)
+static inline pte_token_t pte_alloc_one(struct mm_struct *mm,
+ unsigned long addr)
{
- return virt_to_page(pte_alloc_one_kernel(mm, addr));
+ struct page *page;
+
+ page = virt_to_page(pte_alloc_one_kernel(mm, addr));
+ pgtable_page_ctor(page);
+ return page;
}
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
kmem_cache_free(pgtable_cache, pte);
}
-static inline void pte_free(struct mm_struct *mm, struct page *page)
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
{
- kmem_cache_free(pgtable_cache, page_address(page));
+ pgtable_page_dtor(pte);
+ kmem_cache_free(pgtable_cache, page_address(pte));
}
+#define pmd_pgtable(pmd) pmd_page(pmd)
#endif /* __KERNEL__ */
#endif /* _XTENSA_PGALLOC_H */
#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;})
#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
+static inline void pgtable_page_ctor(struct page *page)
+{
+ pte_lock_init(page);
+ inc_zone_page_state(page, NR_PAGETABLE);
+}
+
+static inline void pgtable_page_dtor(struct page *page)
+{
+ pte_lock_deinit(page);
+ dec_zone_page_state(page, NR_PAGETABLE);
+}
+
#define pte_offset_map_lock(mm, pmd, address, ptlp) \
({ \
spinlock_t *__ptl = pte_lockptr(mm, pmd); \
#define FOLL_GET 0x04 /* do get_page on page */
#define FOLL_ANON 0x08 /* give ZERO_PAGE if no pgtable */
-typedef int (*pte_fn_t)(pte_t *pte, struct page *pmd_page, unsigned long addr,
+typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
void *data);
extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
unsigned long size, pte_fn_t fn, void *data);
*/
static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd)
{
- struct page *page = pmd_page(*pmd);
+ pgtable_t token = pmd_pgtable(*pmd);
pmd_clear(pmd);
- pte_lock_deinit(page);
- pte_free_tlb(tlb, page);
- dec_zone_page_state(page, NR_PAGETABLE);
+ pte_free_tlb(tlb, token);
tlb->mm->nr_ptes--;
}
int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
{
- struct page *new = pte_alloc_one(mm, address);
+ pgtable_t new = pte_alloc_one(mm, address);
if (!new)
return -ENOMEM;
- pte_lock_init(new);
spin_lock(&mm->page_table_lock);
- if (pmd_present(*pmd)) { /* Another has populated it */
- pte_lock_deinit(new);
- pte_free(mm, new);
- } else {
+ if (!pmd_present(*pmd)) { /* Has another populated it ? */
mm->nr_ptes++;
- inc_zone_page_state(new, NR_PAGETABLE);
pmd_populate(mm, pmd, new);
+ new = NULL;
}
spin_unlock(&mm->page_table_lock);
+ if (new)
+ pte_free(mm, new);
return 0;
}
return -ENOMEM;
spin_lock(&init_mm.page_table_lock);
- if (pmd_present(*pmd)) /* Another has populated it */
- pte_free_kernel(&init_mm, new);
- else
+ if (!pmd_present(*pmd)) { /* Has another populated it ? */
pmd_populate_kernel(&init_mm, pmd, new);
+ new = NULL;
+ }
spin_unlock(&init_mm.page_table_lock);
+ if (new)
+ pte_free_kernel(&init_mm, new);
return 0;
}
{
pte_t *pte;
int err;
- struct page *pmd_page;
+ pgtable_t token;
spinlock_t *uninitialized_var(ptl);
pte = (mm == &init_mm) ?
BUG_ON(pmd_huge(*pmd));
- pmd_page = pmd_page(*pmd);
+ token = pmd_pgtable(*pmd);
do {
- err = fn(pte, pmd_page, addr, data);
+ err = fn(pte, token, addr, data);
if (err)
break;
} while (pte++, addr += PAGE_SIZE, addr != end);
}
-static int f(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
+static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
{
/* apply_to_page_range() does all the hard work. */
return 0;