mm/slab: move SLUB alloc hooks to common mm/slab.h
authorJesper Dangaard Brouer <brouer@redhat.com>
Tue, 15 Mar 2016 21:53:35 +0000 (14:53 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 15 Mar 2016 23:55:16 +0000 (16:55 -0700)
First step towards sharing alloc_hook's between SLUB and SLAB
allocators.  Move the SLUB allocators *_alloc_hook to the common
mm/slab.h for internal slab definitions.

Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Vladimir Davydov <vdavydov@virtuozzo.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/slab.h
mm/slub.c

index 2eedacea439de698bdf2103f54ae936fe40de48d..fd231c9f5f93dc1afe818b3ceddbdec53d125e6d 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -38,6 +38,10 @@ struct kmem_cache {
 #endif
 
 #include <linux/memcontrol.h>
+#include <linux/fault-inject.h>
+#include <linux/kmemcheck.h>
+#include <linux/kasan.h>
+#include <linux/kmemleak.h>
 
 /*
  * State of the slab allocator.
@@ -321,6 +325,64 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
        return s;
 }
 
+static inline size_t slab_ksize(const struct kmem_cache *s)
+{
+#ifndef CONFIG_SLUB
+       return s->object_size;
+
+#else /* CONFIG_SLUB */
+# ifdef CONFIG_SLUB_DEBUG
+       /*
+        * Debugging requires use of the padding between object
+        * and whatever may come after it.
+        */
+       if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
+               return s->object_size;
+# endif
+       /*
+        * If we have the need to store the freelist pointer
+        * back there or track user information then we can
+        * only use the space before that information.
+        */
+       if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
+               return s->inuse;
+       /*
+        * Else we can use all the padding etc for the allocation
+        */
+       return s->size;
+#endif
+}
+
+static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
+                                                    gfp_t flags)
+{
+       flags &= gfp_allowed_mask;
+       lockdep_trace_alloc(flags);
+       might_sleep_if(gfpflags_allow_blocking(flags));
+
+       if (should_failslab(s->object_size, flags, s->flags))
+               return NULL;
+
+       return memcg_kmem_get_cache(s, flags);
+}
+
+static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
+                                       size_t size, void **p)
+{
+       size_t i;
+
+       flags &= gfp_allowed_mask;
+       for (i = 0; i < size; i++) {
+               void *object = p[i];
+
+               kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
+               kmemleak_alloc_recursive(object, s->object_size, 1,
+                                        s->flags, flags);
+               kasan_slab_alloc(s, object);
+       }
+       memcg_kmem_put_cache(s);
+}
+
 #ifndef CONFIG_SLOB
 /*
  * The slab lists for all objects.
index 2a722e14195803a90f2477b27316544fc58e99f7..6dd04c0465c584d6ad42a31cc6c46f95104e84eb 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -284,30 +284,6 @@ static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
        return (p - addr) / s->size;
 }
 
-static inline size_t slab_ksize(const struct kmem_cache *s)
-{
-#ifdef CONFIG_SLUB_DEBUG
-       /*
-        * Debugging requires use of the padding between object
-        * and whatever may come after it.
-        */
-       if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
-               return s->object_size;
-
-#endif
-       /*
-        * If we have the need to store the freelist pointer
-        * back there or track user information then we can
-        * only use the space before that information.
-        */
-       if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
-               return s->inuse;
-       /*
-        * Else we can use all the padding etc for the allocation
-        */
-       return s->size;
-}
-
 static inline int order_objects(int order, unsigned long size, int reserved)
 {
        return ((PAGE_SIZE << order) - reserved) / size;
@@ -1281,36 +1257,6 @@ static inline void kfree_hook(const void *x)
        kasan_kfree_large(x);
 }
 
-static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
-                                                    gfp_t flags)
-{
-       flags &= gfp_allowed_mask;
-       lockdep_trace_alloc(flags);
-       might_sleep_if(gfpflags_allow_blocking(flags));
-
-       if (should_failslab(s->object_size, flags, s->flags))
-               return NULL;
-
-       return memcg_kmem_get_cache(s, flags);
-}
-
-static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
-                                       size_t size, void **p)
-{
-       size_t i;
-
-       flags &= gfp_allowed_mask;
-       for (i = 0; i < size; i++) {
-               void *object = p[i];
-
-               kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
-               kmemleak_alloc_recursive(object, s->object_size, 1,
-                                        s->flags, flags);
-               kasan_slab_alloc(s, object);
-       }
-       memcg_kmem_put_cache(s);
-}
-
 static inline void slab_free_hook(struct kmem_cache *s, void *x)
 {
        kmemleak_free_recursive(x, s->flags);