static void freezer_destroy(struct cgroup_subsys *ss,
struct cgroup *cgroup)
{
- kfree(cgroup_freezer(cgroup));
+ struct freezer *freezer = cgroup_freezer(cgroup);
+
+ if (freezer->state != CGROUP_THAWED)
+ atomic_dec(&system_freezing_cnt);
+ kfree(freezer);
}
+ /* task is frozen or will freeze immediately when next it gets woken */
+ static bool is_task_frozen_enough(struct task_struct *task)
+ {
+ return frozen(task) ||
+ (task_is_stopped_or_traced(task) && freezing(task));
+ }
+
/*
* The call to cgroup_lock() in the freezer.state write method prevents
* a write to that file racing against an attach, and hence the
cgroup_iter_start(cgroup, &it);
while ((task = cgroup_iter_next(cgroup, &it))) {
ntotal++;
- if (freezing(task) && frozen(task))
- if (is_task_frozen_enough(task))
++ if (freezing(task) && is_task_frozen_enough(task))
nfrozen++;
}
struct task_struct *task;
unsigned int num_cant_freeze_now = 0;
- freezer->state = CGROUP_FREEZING;
cgroup_iter_start(cgroup, &it);
while ((task = cgroup_iter_next(cgroup, &it))) {
- if (!freeze_task(task, true))
+ if (!freeze_task(task))
continue;
- if (frozen(task))
+ if (is_task_frozen_enough(task))
continue;
if (!freezing(task) && !freezer_should_skip(task))
num_cant_freeze_now++;