{
}
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+}
+
#endif /* _ASM_IA64_HUGETLB_H */
{
}
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+}
+
#endif /* __ASM_HUGETLB_H */
{
}
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+}
+
#else /* ! CONFIG_HUGETLB_PAGE */
static inline void flush_hugetlb_page(struct vm_area_struct *vma,
unsigned long vmaddr)
}
#define hugetlb_prefault_arch_hook(mm) do { } while (0)
+#define arch_clear_hugepage_flags(page) do { } while (0)
int arch_prepare_hugepage(struct page *page);
void arch_release_hugepage(struct page *page);
#ifndef _ASM_SH_HUGETLB_H
#define _ASM_SH_HUGETLB_H
+#include <asm/cacheflush.h>
#include <asm/page.h>
{
}
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+ clear_bit(PG_dcache_clean, &page->flags);
+}
+
#endif /* _ASM_SH_HUGETLB_H */
{
}
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+}
+
#endif /* _ASM_SPARC64_HUGETLB_H */
{
}
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+}
+
#ifdef CONFIG_HUGETLB_SUPER_PAGES
static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
struct page *page, int writable)
{
}
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+}
+
#endif /* _ASM_X86_HUGETLB_H */
h->surplus_huge_pages--;
h->surplus_huge_pages_node[nid]--;
} else {
+ arch_clear_hugepage_flags(page);
enqueue_huge_page(h, page);
}
spin_unlock(&hugetlb_lock);