* any memory is quite low.
*/
if (!is_sysrq_oom(oc) && atomic_read(&task->signal->oom_victims)) {
- struct task_struct *p = find_lock_task_mm(task);
- bool reaped = false;
-
- if (p) {
- reaped = test_bit(MMF_OOM_REAPED, &p->mm->flags);
- task_unlock(p);
- }
- if (reaped)
+ if (test_bit(MMF_OOM_REAPED, &task->signal->oom_mm->flags))
goto next;
goto abort;
}
K(get_mm_counter(mm, MM_SHMEMPAGES)));
up_read(&mm->mmap_sem);
- /*
- * This task can be safely ignored because we cannot do much more
- * to release its memory.
- */
- set_bit(MMF_OOM_REAPED, &mm->flags);
/*
* Drop our reference but make sure the mmput slow path is called from a
* different context because we shouldn't risk we get stuck there and
static void oom_reap_task(struct task_struct *tsk)
{
int attempts = 0;
- struct mm_struct *mm = NULL;
- struct task_struct *p = find_lock_task_mm(tsk);
-
- /*
- * Make sure we find the associated mm_struct even when the particular
- * thread has already terminated and cleared its mm.
- * We might have race with exit path so consider our work done if there
- * is no mm.
- */
- if (!p)
- goto done;
- mm = p->mm;
- atomic_inc(&mm->mm_count);
- task_unlock(p);
+ struct mm_struct *mm = tsk->signal->oom_mm;
/* Retry the down_read_trylock(mmap_sem) a few times */
while (attempts++ < MAX_OOM_REAP_RETRIES && !__oom_reap_task_mm(tsk, mm))
if (attempts <= MAX_OOM_REAP_RETRIES)
goto done;
- /* Ignore this mm because somebody can't call up_write(mmap_sem). */
- set_bit(MMF_OOM_REAPED, &mm->flags);
pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
task_pid_nr(tsk), tsk->comm);
tsk->oom_reaper_list = NULL;
exit_oom_victim(tsk);
+ /*
+ * Hide this mm from OOM killer because it has been either reaped or
+ * somebody can't call up_write(mmap_sem).
+ */
+ set_bit(MMF_OOM_REAPED, &mm->flags);
+
/* Drop a reference taken by wake_oom_reaper */
put_task_struct(tsk);
- /* Drop a reference taken above. */
- if (mm)
- mmdrop(mm);
}
static int oom_reaper(void *unused)
*
* Has to be called with oom_lock held and never after
* oom has been disabled already.
+ *
+ * tsk->mm has to be non NULL and caller has to guarantee it is stable (either
+ * under task_lock or operate on the current).
*/
static void mark_oom_victim(struct task_struct *tsk)
{
+ struct mm_struct *mm = tsk->mm;
+
WARN_ON(oom_killer_disabled);
/* OOM killer might race with memcg OOM */
if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
return;
+
atomic_inc(&tsk->signal->oom_victims);
+
+ /* oom_mm is bound to the signal struct life time. */
+ if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm))
+ atomic_inc(&tsk->signal->oom_mm->mm_count);
+
/*
* Make sure that the task is woken up from uninterruptible sleep
* if it is frozen because OOM killer wouldn't be able to free