radix-tree: account nodes to memcg only if explicitly requested
authorVladimir Davydov <vdavydov@virtuozzo.com>
Tue, 2 Aug 2016 21:03:01 +0000 (14:03 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 2 Aug 2016 21:31:41 +0000 (17:31 -0400)
Radix trees may be used not only for storing page cache pages, so
unconditionally accounting radix tree nodes to the current memory cgroup
is bad: if a radix tree node is used for storing data shared among
different cgroups we risk pinning dead memory cgroups forever.

So let's only account radix tree nodes if it was explicitly requested by
passing __GFP_ACCOUNT to INIT_RADIX_TREE.  Currently, we only want to
account page cache entries, so mark mapping->page_tree so.

Fixes: 58e698af4c63 ("radix-tree: account radix_tree_node to memory cgroup")
Link: http://lkml.kernel.org/r/1470057188-7864-1-git-send-email-vdavydov@virtuozzo.com
Signed-off-by: Vladimir Davydov <vdavydov@virtuozzo.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: <stable@vger.kernel.org> [4.6+]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
fs/inode.c
lib/radix-tree.c

index 9cef4e16aedab53b5ff1272a5936de251ccddc58..ad445542c2858f5d622386bb58e725c4c11ae38d 100644 (file)
@@ -345,7 +345,7 @@ EXPORT_SYMBOL(inc_nlink);
 void address_space_init_once(struct address_space *mapping)
 {
        memset(mapping, 0, sizeof(*mapping));
-       INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
+       INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC | __GFP_ACCOUNT);
        spin_lock_init(&mapping->tree_lock);
        init_rwsem(&mapping->i_mmap_rwsem);
        INIT_LIST_HEAD(&mapping->private_list);
index 61b8fb529ceff9f3762c9b92f0d9b742dd1303cf..1b7bf73141418f5f1427b14e806c9a9d4c6e7582 100644 (file)
@@ -277,10 +277,11 @@ radix_tree_node_alloc(struct radix_tree_root *root)
 
                /*
                 * Even if the caller has preloaded, try to allocate from the
-                * cache first for the new node to get accounted.
+                * cache first for the new node to get accounted to the memory
+                * cgroup.
                 */
                ret = kmem_cache_alloc(radix_tree_node_cachep,
-                                      gfp_mask | __GFP_ACCOUNT | __GFP_NOWARN);
+                                      gfp_mask | __GFP_NOWARN);
                if (ret)
                        goto out;
 
@@ -303,8 +304,7 @@ radix_tree_node_alloc(struct radix_tree_root *root)
                kmemleak_update_trace(ret);
                goto out;
        }
-       ret = kmem_cache_alloc(radix_tree_node_cachep,
-                              gfp_mask | __GFP_ACCOUNT);
+       ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
 out:
        BUG_ON(radix_tree_is_internal_node(ret));
        return ret;
@@ -351,6 +351,12 @@ static int __radix_tree_preload(gfp_t gfp_mask, int nr)
        struct radix_tree_node *node;
        int ret = -ENOMEM;
 
+       /*
+        * Nodes preloaded by one cgroup can be be used by another cgroup, so
+        * they should never be accounted to any particular memory cgroup.
+        */
+       gfp_mask &= ~__GFP_ACCOUNT;
+
        preempt_disable();
        rtp = this_cpu_ptr(&radix_tree_preloads);
        while (rtp->nr < nr) {