From: Oleg Nesterov Date: Wed, 26 May 2010 21:43:16 +0000 (-0700) Subject: signals: make task_struct->signal immutable/refcountable X-Git-Url: http://git.lede-project.org./?a=commitdiff_plain;h=ea6d290ca34c4fd91b7348338c0cc7bdeff94a35;p=openwrt%2Fstaging%2Fblogic.git signals: make task_struct->signal immutable/refcountable We have a lot of problems with accessing task_struct->signal, it can "disappear" at any moment. Even current can't use its ->signal safely after exit_notify(). ->siglock helps, but it is not convenient, not always possible, and sometimes it makes sense to use task->signal even after this task has already dead. This patch adds the reference counter, sigcnt, into signal_struct. This reference is owned by task_struct and it is dropped in __put_task_struct(). Perhaps it makes sense to export get/put_signal_struct() later, but currently I don't see the immediate reason. Rename __cleanup_signal() to free_signal_struct() and unexport it. With the previous changes it does nothing except kmem_cache_free(). Change __exit_signal() to not clear/free ->signal, it will be freed when the last reference to any thread in the thread group goes away. Note: - when the last thead exits signal->tty can point to nowhere, see the next patch. - with or without this patch signal_struct->count should go away, or at least it should be "int nr_threads" for fs/proc. This will be addressed later. Signed-off-by: Oleg Nesterov Cc: Alan Cox Cc: Ingo Molnar Cc: Peter Zijlstra Acked-by: Roland McGrath Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- diff --git a/include/linux/sched.h b/include/linux/sched.h index a95a2455cebe..32e309df408c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -527,6 +527,7 @@ struct thread_group_cputimer { * the locking of signal_struct. */ struct signal_struct { + atomic_t sigcnt; atomic_t count; atomic_t live; @@ -2101,7 +2102,6 @@ extern void flush_thread(void); extern void exit_thread(void); extern void exit_files(struct task_struct *); -extern void __cleanup_signal(struct signal_struct *); extern void __cleanup_sighand(struct sighand_struct *); extern void exit_itimers(struct signal_struct *); diff --git a/kernel/exit.c b/kernel/exit.c index 4a72f1753edb..92af5cde9bbe 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -134,8 +134,6 @@ static void __exit_signal(struct task_struct *tsk) * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. */ flush_sigqueue(&tsk->pending); - - tsk->signal = NULL; tsk->sighand = NULL; spin_unlock(&sighand->siglock); @@ -150,7 +148,6 @@ static void __exit_signal(struct task_struct *tsk) */ task_rq_unlock_wait(tsk); tty_kref_put(sig->tty); - __cleanup_signal(sig); } } diff --git a/kernel/fork.c b/kernel/fork.c index b7879ef6e7cd..e08e3012cd6b 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -165,6 +165,18 @@ void free_task(struct task_struct *tsk) } EXPORT_SYMBOL(free_task); +static inline void free_signal_struct(struct signal_struct *sig) +{ + thread_group_cputime_free(sig); + kmem_cache_free(signal_cachep, sig); +} + +static inline void put_signal_struct(struct signal_struct *sig) +{ + if (atomic_dec_and_test(&sig->sigcnt)) + free_signal_struct(sig); +} + void __put_task_struct(struct task_struct *tsk) { WARN_ON(!tsk->exit_state); @@ -173,6 +185,7 @@ void __put_task_struct(struct task_struct *tsk) exit_creds(tsk); delayacct_tsk_free(tsk); + put_signal_struct(tsk->signal); if (!profile_handoff_task(tsk)) free_task(tsk); @@ -864,6 +877,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) if (!sig) return -ENOMEM; + atomic_set(&sig->sigcnt, 1); atomic_set(&sig->count, 1); atomic_set(&sig->live, 1); init_waitqueue_head(&sig->wait_chldexit); @@ -889,12 +903,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) return 0; } -void __cleanup_signal(struct signal_struct *sig) -{ - thread_group_cputime_free(sig); - kmem_cache_free(signal_cachep, sig); -} - static void copy_flags(unsigned long clone_flags, struct task_struct *p) { unsigned long new_flags = p->flags; @@ -1248,6 +1256,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, } if (clone_flags & CLONE_THREAD) { + atomic_inc(¤t->signal->sigcnt); atomic_inc(¤t->signal->count); atomic_inc(¤t->signal->live); p->group_leader = current->group_leader; @@ -1294,7 +1303,7 @@ bad_fork_cleanup_mm: mmput(p->mm); bad_fork_cleanup_signal: if (!(clone_flags & CLONE_THREAD)) - __cleanup_signal(p->signal); + free_signal_struct(p->signal); bad_fork_cleanup_sighand: __cleanup_sighand(p->sighand); bad_fork_cleanup_fs: