cgroup: protect cgroup->nr_(dying_)descendants by css_set_lock
authorRoman Gushchin <guro@fb.com>
Fri, 19 Apr 2019 17:03:03 +0000 (10:03 -0700)
committerTejun Heo <tj@kernel.org>
Fri, 19 Apr 2019 18:26:48 +0000 (11:26 -0700)
The number of descendant cgroups and the number of dying
descendant cgroups are currently synchronized using the cgroup_mutex.

The number of descendant cgroups will be required by the cgroup v2
freezer, which will use it to determine if a cgroup is frozen
(depending on total number of descendants and number of frozen
descendants). It's not always acceptable to grab the cgroup_mutex,
especially from quite hot paths (e.g. exit()).

To avoid this, let's additionally synchronize these counters using
the css_set_lock.

So, it's safe to read these counters with either cgroup_mutex or
css_set_lock locked, and for changing both locks should be acquired.

Signed-off-by: Roman Gushchin <guro@fb.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: kernel-team@fb.com
include/linux/cgroup-defs.h
kernel/cgroup/cgroup.c

index 1c70803e9f77056873e18aad6e1f3ce7195a25a1..7d57890cec671885d4bae9a15a59d240e9a36867 100644 (file)
@@ -349,6 +349,11 @@ struct cgroup {
         * Dying cgroups are cgroups which were deleted by a user,
         * but are still existing because someone else is holding a reference.
         * max_descendants is a maximum allowed number of descent cgroups.
+        *
+        * nr_descendants and nr_dying_descendants are protected
+        * by cgroup_mutex and css_set_lock. It's fine to read them holding
+        * any of cgroup_mutex and css_set_lock; for writing both locks
+        * should be held.
         */
        int nr_descendants;
        int nr_dying_descendants;
index 3008ea684aa0c31e6d92541534d1fd16ccea8374..786ceef2f222e8ca8c556d3526017fd061f3a137 100644 (file)
@@ -4811,9 +4811,11 @@ static void css_release_work_fn(struct work_struct *work)
                if (cgroup_on_dfl(cgrp))
                        cgroup_rstat_flush(cgrp);
 
+               spin_lock_irq(&css_set_lock);
                for (tcgrp = cgroup_parent(cgrp); tcgrp;
                     tcgrp = cgroup_parent(tcgrp))
                        tcgrp->nr_dying_descendants--;
+               spin_unlock_irq(&css_set_lock);
 
                cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
                cgrp->id = -1;
@@ -5031,12 +5033,14 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
        if (ret)
                goto out_psi_free;
 
+       spin_lock_irq(&css_set_lock);
        for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(tcgrp)) {
                cgrp->ancestor_ids[tcgrp->level] = tcgrp->id;
 
                if (tcgrp != cgrp)
                        tcgrp->nr_descendants++;
        }
+       spin_unlock_irq(&css_set_lock);
 
        if (notify_on_release(parent))
                set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
@@ -5321,10 +5325,12 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
        if (parent && cgroup_is_threaded(cgrp))
                parent->nr_threaded_children--;
 
+       spin_lock_irq(&css_set_lock);
        for (tcgrp = cgroup_parent(cgrp); tcgrp; tcgrp = cgroup_parent(tcgrp)) {
                tcgrp->nr_descendants--;
                tcgrp->nr_dying_descendants++;
        }
+       spin_unlock_irq(&css_set_lock);
 
        cgroup1_check_for_release(parent);