From: Christophe Leroy Date: Thu, 29 Nov 2018 14:06:59 +0000 (+0000) Subject: powerpc/mm: add helpers to get/set mm.context->pte_frag X-Git-Url: http://git.lede-project.org./?a=commitdiff_plain;h=a74791dd98332435550bdc57761969ba72b74769;p=openwrt%2Fstaging%2Fblogic.git powerpc/mm: add helpers to get/set mm.context->pte_frag In order to handle pte_fragment functions with single fragment without adding pte_frag in all mm_context_t, this patch creates two helpers which do nothing on platforms using a single fragment. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman --- diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 9679b7519a35..314a2890a972 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -110,6 +110,31 @@ void mark_initmem_nx(void); static inline void mark_initmem_nx(void) { } #endif +/* + * When used, PTE_FRAG_NR is defined in subarch pgtable.h + * so we are sure it is included when arriving here. + */ +#ifdef PTE_FRAG_NR +static inline void *pte_frag_get(mm_context_t *ctx) +{ + return ctx->pte_frag; +} + +static inline void pte_frag_set(mm_context_t *ctx, void *p) +{ + ctx->pte_frag = p; +} +#else +static inline void *pte_frag_get(mm_context_t *ctx) +{ + return NULL; +} + +static inline void pte_frag_set(mm_context_t *ctx, void *p) +{ +} +#endif + #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_PGTABLE_H */ diff --git a/arch/powerpc/mm/pgtable-frag.c b/arch/powerpc/mm/pgtable-frag.c index 7544d0d7177d..af23a587f019 100644 --- a/arch/powerpc/mm/pgtable-frag.c +++ b/arch/powerpc/mm/pgtable-frag.c @@ -38,7 +38,7 @@ static pte_t *get_pte_from_cache(struct mm_struct *mm) return NULL; spin_lock(&mm->page_table_lock); - ret = mm->context.pte_frag; + ret = pte_frag_get(&mm->context); if (ret) { pte_frag = ret + PTE_FRAG_SIZE; /* @@ -46,7 +46,7 @@ static pte_t *get_pte_from_cache(struct mm_struct *mm) */ if (((unsigned long)pte_frag & ~PAGE_MASK) == 0) pte_frag = NULL; - mm->context.pte_frag = pte_frag; + pte_frag_set(&mm->context, pte_frag); } spin_unlock(&mm->page_table_lock); return (pte_t *)ret; @@ -86,9 +86,9 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel) * the allocated page with single fragement * count. */ - if (likely(!mm->context.pte_frag)) { + if (likely(!pte_frag_get(&mm->context))) { atomic_set(&page->pt_frag_refcount, PTE_FRAG_NR); - mm->context.pte_frag = ret + PTE_FRAG_SIZE; + pte_frag_set(&mm->context, ret + PTE_FRAG_SIZE); } spin_unlock(&mm->page_table_lock);