slub: clean up code for kmem cgroup support to kmem_cache_free_bulk
authorJesper Dangaard Brouer <brouer@redhat.com>
Tue, 15 Mar 2016 21:53:32 +0000 (14:53 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 15 Mar 2016 23:55:16 +0000 (16:55 -0700)
This change is primarily an attempt to make it easier to realize the
optimizations the compiler performs in-case CONFIG_MEMCG_KMEM is not
enabled.

Performance wise, even when CONFIG_MEMCG_KMEM is compiled in, the
overhead is zero.  This is because, as long as no process have enabled
kmem cgroups accounting, the assignment is replaced by asm-NOP
operations.  This is possible because memcg_kmem_enabled() uses a
static_key_false() construct.

It also helps readability as it avoid accessing the p[] array like:
p[size - 1] which "expose" that the array is processed backwards inside
helper function build_detached_freelist().

Lastly this also makes the code more robust, in error case like passing
NULL pointers in the array.  Which were previously handled before commit
033745189b1b ("slub: add missing kmem cgroup support to
kmem_cache_free_bulk").

Fixes: 033745189b1b ("slub: add missing kmem cgroup support to kmem_cache_free_bulk")
Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Vladimir Davydov <vdavydov@virtuozzo.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/slub.c

index d8fbd4a6ed599882489143665ca750eb7613fd65..2a722e14195803a90f2477b27316544fc58e99f7 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2815,6 +2815,7 @@ struct detached_freelist {
        void *tail;
        void *freelist;
        int cnt;
+       struct kmem_cache *s;
 };
 
 /*
@@ -2829,8 +2830,9 @@ struct detached_freelist {
  * synchronization primitive.  Look ahead in the array is limited due
  * to performance reasons.
  */
-static int build_detached_freelist(struct kmem_cache *s, size_t size,
-                                  void **p, struct detached_freelist *df)
+static inline
+int build_detached_freelist(struct kmem_cache *s, size_t size,
+                           void **p, struct detached_freelist *df)
 {
        size_t first_skipped_index = 0;
        int lookahead = 3;
@@ -2846,8 +2848,11 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
        if (!object)
                return 0;
 
+       /* Support for memcg, compiler can optimize this out */
+       df->s = cache_from_obj(s, object);
+
        /* Start new detached freelist */
-       set_freepointer(s, object, NULL);
+       set_freepointer(df->s, object, NULL);
        df->page = virt_to_head_page(object);
        df->tail = object;
        df->freelist = object;
@@ -2862,7 +2867,7 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
                /* df->page is always set at this point */
                if (df->page == virt_to_head_page(object)) {
                        /* Opportunity build freelist */
-                       set_freepointer(s, object, df->freelist);
+                       set_freepointer(df->s, object, df->freelist);
                        df->freelist = object;
                        df->cnt++;
                        p[size] = NULL; /* mark object processed */
@@ -2881,25 +2886,20 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
        return first_skipped_index;
 }
 
-
 /* Note that interrupts must be enabled when calling this function. */
-void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
+void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
 {
        if (WARN_ON(!size))
                return;
 
        do {
                struct detached_freelist df;
-               struct kmem_cache *s;
-
-               /* Support for memcg */
-               s = cache_from_obj(orig_s, p[size - 1]);
 
                size = build_detached_freelist(s, size, p, &df);
                if (unlikely(!df.page))
                        continue;
 
-               slab_free(s, df.page, df.freelist, df.tail, df.cnt, _RET_IP_);
+               slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_);
        } while (likely(size));
 }
 EXPORT_SYMBOL(kmem_cache_free_bulk);