From d2e3ee9b29f5de5b01e611b04e6fb29760589b01 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 17 Jul 2009 09:09:36 +1000 Subject: [PATCH] kernel: fix is_single_threaded - Fix the comment, is_single_threaded(p) actually means that nobody shares ->mm with p. I think this helper should be renamed, and it should not have arguments. With or without this patch it must not be used unless p == current, otherwise we can't safely use p->signal or p->mm. - "if (atomic_read(&p->signal->count) != 1)" is not right when we have a zombie group leader, use signal->live instead. - Add PF_KTHREAD check to skip kernel threads which may borrow p->mm, otherwise we can return the wrong "false". - Use for_each_process() instead of do_each_thread(), all threads must use the same ->mm. - Use down_write(mm->mmap_sem) + rcu_read_lock() instead of tasklist_lock to iterate over the process list. If there is another CLONE_VM process it can't pass exit_mm() which takes the same mm->mmap_sem. We can miss a freshly forked CLONE_VM task, but this doesn't matter because we must see its parent and return false. Signed-off-by: Oleg Nesterov Cc: David Howells Cc: James Morris Cc: Roland McGrath Cc: Stephen Smalley Signed-off-by: Andrew Morton Signed-off-by: James Morris --- lib/is_single_threaded.c | 62 +++++++++++++++++++++++----------------- 1 file changed, 36 insertions(+), 26 deletions(-) diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c index f1ed2fe76c65..2762516e0a5e 100644 --- a/lib/is_single_threaded.c +++ b/lib/is_single_threaded.c @@ -12,34 +12,44 @@ #include -/** - * is_single_threaded - Determine if a thread group is single-threaded or not - * @p: A task in the thread group in question - * - * This returns true if the thread group to which a task belongs is single - * threaded, false if it is not. +/* + * Returns true if the task does not share ->mm with another thread/process. */ -bool is_single_threaded(struct task_struct *p) +bool is_single_threaded(struct task_struct *task) { - struct task_struct *g, *t; - struct mm_struct *mm = p->mm; - - if (atomic_read(&p->signal->count) != 1) - goto no; - - if (atomic_read(&p->mm->mm_users) != 1) { - read_lock(&tasklist_lock); - do_each_thread(g, t) { - if (t->mm == mm && t != p) - goto no_unlock; - } while_each_thread(g, t); - read_unlock(&tasklist_lock); - } + struct mm_struct *mm = task->mm; + struct task_struct *p, *t; + bool ret; + + might_sleep(); + + if (atomic_read(&task->signal->live) != 1) + return false; - return true; + if (atomic_read(&mm->mm_users) == 1) + return true; + + ret = false; + down_write(&mm->mmap_sem); + rcu_read_lock(); + for_each_process(p) { + if (unlikely(p->flags & PF_KTHREAD)) + continue; + if (unlikely(p == task->group_leader)) + continue; + + t = p; + do { + if (unlikely(t->mm == mm)) + goto found; + if (likely(t->mm)) + break; + } while_each_thread(p, t); + } + ret = true; +found: + rcu_read_unlock(); + up_write(&mm->mmap_sem); -no_unlock: - read_unlock(&tasklist_lock); -no: - return false; + return ret; } -- 2.30.2