#include <asm/memory.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable-prot.h>
+#include <asm/tlbflush.h>
/*
* VMALLOC range.
return __ptep_test_and_clear_young(ptep);
}
+#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
+{
+ int young = ptep_test_and_clear_young(vma, address, ptep);
+
+ if (young) {
+ /*
+ * We can elide the trailing DSB here since the worst that can
+ * happen is that a CPU continues to use the young entry in its
+ * TLB and we mistakenly reclaim the associated page. The
+ * window for such an event is bounded by the next
+ * context-switch, which provides a DSB to complete the TLB
+ * invalidation.
+ */
+ flush_tlb_page_nosync(vma, address);
+ }
+
+ return young;
+}
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
#ifndef __ASSEMBLY__
+#include <linux/mm_types.h>
#include <linux/sched.h>
#include <asm/cputype.h>
#include <asm/mmu.h>
dsb(ish);
}
-static inline void flush_tlb_page(struct vm_area_struct *vma,
- unsigned long uaddr)
+static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
+ unsigned long uaddr)
{
unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
dsb(ishst);
__tlbi(vale1is, addr);
__tlbi_user(vale1is, addr);
+}
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long uaddr)
+{
+ flush_tlb_page_nosync(vma, uaddr);
dsb(ish);
}