#define EXIT_DEAD 32
/* in tsk->state again */
#define TASK_NONINTERACTIVE 64
+#define TASK_DEAD 128
#define __set_task_state(tsk, state_value) \
do { (tsk)->state = (state_value); } while (0)
/*
* A task struct has one reference for the use as "current".
- * If a task dies, then it sets EXIT_DEAD in tsk->state and calls
+ * If a task dies, then it sets TASK_DEAD in tsk->state and calls
* schedule one last time. The schedule call will never return, and
* the scheduled task must drop that reference.
- * The test for EXIT_DEAD must occur while the runqueue locks are
+ * The test for TASK_DEAD must occur while the runqueue locks are
* still held, otherwise prev could be scheduled on another cpu, die
* there before we look at prev->state, and then the reference would
* be dropped twice.
finish_lock_switch(rq, prev);
if (mm)
mmdrop(mm);
- if (unlikely(prev_state == EXIT_DEAD)) {
+ if (unlikely(prev_state == TASK_DEAD)) {
/*
* Remove function-return probe instances associated with this
* task and put them back on the free list.
BUG_ON(p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD);
/* Cannot have done final schedule yet: would have vanished. */
- BUG_ON(p->state == EXIT_DEAD);
+ BUG_ON(p->state == TASK_DEAD);
get_task_struct(p);
p->flags & PF_EXITING;
if (releasing) {
/* TASK_DEAD tasks have already released their mm */
- if (p->state == EXIT_DEAD)
+ if (p->state == TASK_DEAD)
continue;
if (p->flags & PF_EXITING && p == current) {
chosen = p;