No functional changes, just s/atomic_t count/int nr_threads/.
With the recent changes this counter has a single user, get_nr_threads()
And, none of its callers need the really accurate number of threads, not
to mention each caller obviously races with fork/exit. It is only used to
report this value to the user-space, except first_tid() uses it to avoid
the unnecessary while_each_thread() loop in the unlikely case.
It is a bit sad we need a word in struct signal_struct for this, perhaps
we can change get_nr_threads() to approximate the number of threads using
signal->live and kill ->nr_threads later.
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Acked-by: Roland McGrath <roland@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
extern struct fs_struct init_fs;
#define INIT_SIGNALS(sig) { \
- .count = ATOMIC_INIT(1), \
+ .nr_threads = 1, \
.wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
.shared_pending = { \
.list = LIST_HEAD_INIT(sig.shared_pending.list), \
*/
struct signal_struct {
atomic_t sigcnt;
- atomic_t count;
atomic_t live;
+ int nr_threads;
wait_queue_head_t wait_chldexit; /* for wait4() */
static inline int get_nr_threads(struct task_struct *tsk)
{
- return atomic_read(&tsk->signal->count);
+ return tsk->signal->nr_threads;
}
/* de_thread depends on thread_group_leader not being a pid based check */
struct sighand_struct *sighand;
struct tty_struct *uninitialized_var(tty);
- BUG_ON(!sig);
- BUG_ON(!atomic_read(&sig->count));
-
sighand = rcu_dereference_check(tsk->sighand,
rcu_read_lock_held() ||
lockdep_tasklist_lock_is_held());
spin_lock(&sighand->siglock);
- atomic_dec(&sig->count);
posix_cpu_timers_exit(tsk);
if (group_dead) {
sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
}
+ sig->nr_threads--;
__unhash_process(tsk, group_dead);
/*
if (!sig)
return -ENOMEM;
- atomic_set(&sig->sigcnt, 1);
- atomic_set(&sig->count, 1);
+ sig->nr_threads = 1;
atomic_set(&sig->live, 1);
+ atomic_set(&sig->sigcnt, 1);
init_waitqueue_head(&sig->wait_chldexit);
if (clone_flags & CLONE_NEWPID)
sig->flags |= SIGNAL_UNKILLABLE;
}
if (clone_flags & CLONE_THREAD) {
- atomic_inc(¤t->signal->sigcnt);
- atomic_inc(¤t->signal->count);
+ current->signal->nr_threads++;
atomic_inc(¤t->signal->live);
+ atomic_inc(¤t->signal->sigcnt);
p->group_leader = current->group_leader;
list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
}