#include <linux/sched.h>
#include <linux/vmstat.h>
-struct mmu_gather;
-
#ifdef CONFIG_KSM
int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
unsigned long end, int advice, unsigned long *vm_flags);
return 0;
}
-/*
- * For KSM to handle OOM without deadlock when it's breaking COW in a
- * likely victim of the OOM killer, exit_mmap() has to serialize with
- * ksm_exit() after freeing mm's pages but before freeing its page tables.
- * That leaves a window in which KSM might refault pages which have just
- * been finally unmapped: guard against that with ksm_test_exit(), and
- * use it after getting mmap_sem in ksm.c, to check if mm is exiting.
- */
-static inline bool ksm_test_exit(struct mm_struct *mm)
-{
- return atomic_read(&mm->mm_users) == 0;
-}
-
static inline void ksm_exit(struct mm_struct *mm)
{
if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
return 0;
}
-static inline bool ksm_test_exit(struct mm_struct *mm)
-{
- return 0;
-}
-
static inline void ksm_exit(struct mm_struct *mm)
{
}
#include <linux/mmu_notifier.h>
#include <linux/ksm.h>
-#include <asm/tlb.h>
#include <asm/tlbflush.h>
/*
return rmap_item->address & STABLE_FLAG;
}
+/*
+ * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's
+ * page tables after it has passed through ksm_exit() - which, if necessary,
+ * takes mmap_sem briefly to serialize against them. ksm_exit() does not set
+ * a special flag: they can just back out as soon as mm_users goes to zero.
+ * ksm_test_exit() is used throughout to make this test for exit: in some
+ * places for correctness, in some places just to avoid unnecessary work.
+ */
+static inline bool ksm_test_exit(struct mm_struct *mm)
+{
+ return atomic_read(&mm->mm_users) == 0;
+}
+
/*
* We use break_ksm to break COW on a ksm page: it's a stripped down
*
#include <linux/mount.h>
#include <linux/mempolicy.h>
#include <linux/rmap.h>
-#include <linux/ksm.h>
#include <linux/mmu_notifier.h>
#include <linux/perf_event.h>