struct kmem_cache **cache;
const char *name;
size_t size;
- unsigned long flags;
+ slab_flags_t flags;
void (*ctor)(void *obj);
} ecryptfs_cache_infos[] = {
{
}
static inline kmem_zone_t *
-kmem_zone_init_flags(int size, char *zone_name, unsigned long flags,
+kmem_zone_init_flags(int size, char *zone_name, slab_flags_t flags,
void (*construct)(void *))
{
return kmem_cache_create(zone_name, size, 0, flags, construct);
void kasan_free_pages(struct page *page, unsigned int order);
void kasan_cache_create(struct kmem_cache *cache, size_t *size,
- unsigned long *flags);
+ slab_flags_t *flags);
void kasan_cache_shrink(struct kmem_cache *cache);
void kasan_cache_shutdown(struct kmem_cache *cache);
static inline void kasan_cache_create(struct kmem_cache *cache,
size_t *size,
- unsigned long *flags) {}
+ slab_flags_t *flags) {}
static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
extern void kmemleak_ignore_phys(phys_addr_t phys) __ref;
static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
- int min_count, unsigned long flags,
+ int min_count, slab_flags_t flags,
gfp_t gfp)
{
if (!(flags & SLAB_NOLEAKTRACE))
kmemleak_alloc(ptr, size, min_count, gfp);
}
-static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags)
+static inline void kmemleak_free_recursive(const void *ptr, slab_flags_t flags)
{
if (!(flags & SLAB_NOLEAKTRACE))
kmemleak_free(ptr);
{
}
static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
- int min_count, unsigned long flags,
+ int min_count, slab_flags_t flags,
gfp_t gfp)
{
}
static inline void kmemleak_free_part(const void *ptr, size_t size)
{
}
-static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags)
+static inline void kmemleak_free_recursive(const void *ptr, slab_flags_t flags)
{
}
static inline void kmemleak_free_percpu(const void __percpu *ptr)
* Flags to pass to kmem_cache_create().
* The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
*/
-#define SLAB_CONSISTENCY_CHECKS 0x00000100UL /* DEBUG: Perform (expensive) checks on alloc/free */
-#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
-#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
-#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
-#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
-#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
-#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
+/* DEBUG: Perform (expensive) checks on alloc/free */
+#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100UL)
+/* DEBUG: Red zone objs in a cache */
+#define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400UL)
+/* DEBUG: Poison objects */
+#define SLAB_POISON ((slab_flags_t __force)0x00000800UL)
+/* Align objs on cache lines */
+#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000UL)
+/* Use GFP_DMA memory */
+#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000UL)
+/* DEBUG: Store the last owner for bug hunting */
+#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000UL)
+/* Panic if kmem_cache_create() fails */
+#define SLAB_PANIC ((slab_flags_t __force)0x00040000UL)
/*
* SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
*
*
* Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
*/
-#define SLAB_TYPESAFE_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
-#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
-#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
+/* Defer freeing slabs to RCU */
+#define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000UL)
+/* Spread some memory over cpuset */
+#define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000UL)
+/* Trace allocations and frees */
+#define SLAB_TRACE ((slab_flags_t __force)0x00200000UL)
/* Flag to prevent checks on free */
#ifdef CONFIG_DEBUG_OBJECTS
-# define SLAB_DEBUG_OBJECTS 0x00400000UL
+# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000UL)
#else
-# define SLAB_DEBUG_OBJECTS 0x00000000UL
+# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00000000UL)
#endif
-#define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */
+/* Avoid kmemleak tracing */
+#define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000UL)
/* Don't track use of uninitialized memory */
#ifdef CONFIG_KMEMCHECK
-# define SLAB_NOTRACK 0x01000000UL
+# define SLAB_NOTRACK ((slab_flags_t __force)0x01000000UL)
#else
-# define SLAB_NOTRACK 0x00000000UL
+# define SLAB_NOTRACK ((slab_flags_t __force)0x00000000UL)
#endif
+/* Fault injection mark */
#ifdef CONFIG_FAILSLAB
-# define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */
+# define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000UL)
#else
-# define SLAB_FAILSLAB 0x00000000UL
+# define SLAB_FAILSLAB ((slab_flags_t __force)0x00000000UL)
#endif
+/* Account to memcg */
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
-# define SLAB_ACCOUNT 0x04000000UL /* Account to memcg */
+# define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000UL)
#else
-# define SLAB_ACCOUNT 0x00000000UL
+# define SLAB_ACCOUNT ((slab_flags_t __force)0x00000000UL)
#endif
#ifdef CONFIG_KASAN
-#define SLAB_KASAN 0x08000000UL
+#define SLAB_KASAN ((slab_flags_t __force)0x08000000UL)
#else
-#define SLAB_KASAN 0x00000000UL
+#define SLAB_KASAN ((slab_flags_t __force)0x00000000UL)
#endif
/* The following flags affect the page allocator grouping pages by mobility */
-#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
+/* Objects are reclaimable */
+#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000UL)
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
/*
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
bool slab_is_available(void);
struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
- unsigned long,
+ slab_flags_t,
void (*)(void *));
void kmem_cache_destroy(struct kmem_cache *);
int kmem_cache_shrink(struct kmem_cache *);
struct reciprocal_value reciprocal_buffer_size;
/* 2) touched by every alloc & free from the backend */
- unsigned int flags; /* constant flags */
+ slab_flags_t flags; /* constant flags */
unsigned int num; /* # of objs per slab */
/* 3) cache_grow/shrink */
struct kmem_cache {
struct kmem_cache_cpu __percpu *cpu_slab;
/* Used for retriving partial slabs etc */
- unsigned long flags;
+ slab_flags_t flags;
unsigned long min_partial;
int size; /* The size of an object including meta data */
int object_size; /* The size of an object without meta data */
#endif
typedef unsigned __bitwise gfp_t;
+typedef unsigned long __bitwise slab_flags_t;
typedef unsigned __bitwise fmode_t;
#ifdef CONFIG_PHYS_ADDR_T_64BIT
struct kmem_cache *slab;
unsigned int obj_size;
- int slab_flags;
+ slab_flags_t slab_flags;
struct percpu_counter *orphan_count;
}
void kasan_cache_create(struct kmem_cache *cache, size_t *size,
- unsigned long *flags)
+ slab_flags_t *flags)
{
int redzone_adjust;
int orig_size = *size;
MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
} while (0)
-#define CFLGS_OBJFREELIST_SLAB (0x40000000UL)
-#define CFLGS_OFF_SLAB (0x80000000UL)
+#define CFLGS_OBJFREELIST_SLAB ((slab_flags_t __force)0x40000000UL)
+#define CFLGS_OFF_SLAB ((slab_flags_t __force)0x80000000UL)
#define OBJFREELIST_SLAB(x) ((x)->flags & CFLGS_OBJFREELIST_SLAB)
#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
* Calculate the number of objects and left-over bytes for a given buffer size.
*/
static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size,
- unsigned long flags, size_t *left_over)
+ slab_flags_t flags, size_t *left_over)
{
unsigned int num;
size_t slab_size = PAGE_SIZE << gfporder;
* towards high-order requests, this should be changed.
*/
static size_t calculate_slab_order(struct kmem_cache *cachep,
- size_t size, unsigned long flags)
+ size_t size, slab_flags_t flags)
{
size_t left_over = 0;
int gfporder;
return 0;
}
-unsigned long kmem_cache_flags(unsigned long object_size,
- unsigned long flags, const char *name,
+slab_flags_t kmem_cache_flags(unsigned long object_size,
+ slab_flags_t flags, const char *name,
void (*ctor)(void *))
{
return flags;
struct kmem_cache *
__kmem_cache_alias(const char *name, size_t size, size_t align,
- unsigned long flags, void (*ctor)(void *))
+ slab_flags_t flags, void (*ctor)(void *))
{
struct kmem_cache *cachep;
}
static bool set_objfreelist_slab_cache(struct kmem_cache *cachep,
- size_t size, unsigned long flags)
+ size_t size, slab_flags_t flags)
{
size_t left;
}
static bool set_off_slab_cache(struct kmem_cache *cachep,
- size_t size, unsigned long flags)
+ size_t size, slab_flags_t flags)
{
size_t left;
}
static bool set_on_slab_cache(struct kmem_cache *cachep,
- size_t size, unsigned long flags)
+ size_t size, slab_flags_t flags)
{
size_t left;
* cacheline. This can be beneficial if you're counting cycles as closely
* as davem.
*/
-int
-__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
+int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags)
{
size_t ralign = BYTES_PER_WORD;
gfp_t gfp;
unsigned int object_size;/* The original size of the object */
unsigned int size; /* The aligned/padded/added on size */
unsigned int align; /* Alignment as calculated */
- unsigned long flags; /* Active flags on the slab */
+ slab_flags_t flags; /* Active flags on the slab */
const char *name; /* Slab name for sysfs */
int refcount; /* Use counter */
void (*ctor)(void *); /* Called on object slot creation */
unsigned long size;
} kmalloc_info[];
-unsigned long calculate_alignment(unsigned long flags,
+unsigned long calculate_alignment(slab_flags_t flags,
unsigned long align, unsigned long size);
#ifndef CONFIG_SLOB
/* Kmalloc array related functions */
void setup_kmalloc_cache_index_table(void);
-void create_kmalloc_caches(unsigned long);
+void create_kmalloc_caches(slab_flags_t);
/* Find the kmalloc slab corresponding for a certain size */
struct kmem_cache *kmalloc_slab(size_t, gfp_t);
/* Functions provided by the slab allocators */
-extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
+int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
- unsigned long flags);
+ slab_flags_t flags);
extern void create_boot_cache(struct kmem_cache *, const char *name,
- size_t size, unsigned long flags);
+ size_t size, slab_flags_t flags);
int slab_unmergeable(struct kmem_cache *s);
struct kmem_cache *find_mergeable(size_t size, size_t align,
- unsigned long flags, const char *name, void (*ctor)(void *));
+ slab_flags_t flags, const char *name, void (*ctor)(void *));
#ifndef CONFIG_SLOB
struct kmem_cache *
__kmem_cache_alias(const char *name, size_t size, size_t align,
- unsigned long flags, void (*ctor)(void *));
+ slab_flags_t flags, void (*ctor)(void *));
-unsigned long kmem_cache_flags(unsigned long object_size,
- unsigned long flags, const char *name,
+slab_flags_t kmem_cache_flags(unsigned long object_size,
+ slab_flags_t flags, const char *name,
void (*ctor)(void *));
#else
static inline struct kmem_cache *
__kmem_cache_alias(const char *name, size_t size, size_t align,
- unsigned long flags, void (*ctor)(void *))
+ slab_flags_t flags, void (*ctor)(void *))
{ return NULL; }
-static inline unsigned long kmem_cache_flags(unsigned long object_size,
- unsigned long flags, const char *name,
+static inline slab_flags_t kmem_cache_flags(unsigned long object_size,
+ slab_flags_t flags, const char *name,
void (*ctor)(void *))
{
return flags;
}
struct kmem_cache *find_mergeable(size_t size, size_t align,
- unsigned long flags, const char *name, void (*ctor)(void *))
+ slab_flags_t flags, const char *name, void (*ctor)(void *))
{
struct kmem_cache *s;
* Figure out what the alignment of the objects will be given a set of
* flags, a user specified alignment and the size of the objects.
*/
-unsigned long calculate_alignment(unsigned long flags,
+unsigned long calculate_alignment(slab_flags_t flags,
unsigned long align, unsigned long size)
{
/*
static struct kmem_cache *create_cache(const char *name,
size_t object_size, size_t size, size_t align,
- unsigned long flags, void (*ctor)(void *),
+ slab_flags_t flags, void (*ctor)(void *),
struct mem_cgroup *memcg, struct kmem_cache *root_cache)
{
struct kmem_cache *s;
*/
struct kmem_cache *
kmem_cache_create(const char *name, size_t size, size_t align,
- unsigned long flags, void (*ctor)(void *))
+ slab_flags_t flags, void (*ctor)(void *))
{
struct kmem_cache *s = NULL;
const char *cache_name;
#ifndef CONFIG_SLOB
/* Create a cache during boot when no slab services are available yet */
void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size,
- unsigned long flags)
+ slab_flags_t flags)
{
int err;
}
struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
- unsigned long flags)
+ slab_flags_t flags)
{
struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
}
}
-static void __init new_kmalloc_cache(int idx, unsigned long flags)
+static void __init new_kmalloc_cache(int idx, slab_flags_t flags)
{
kmalloc_caches[idx] = create_kmalloc_cache(kmalloc_info[idx].name,
kmalloc_info[idx].size, flags);
* may already have been created because they were needed to
* enable allocations for slab creation.
*/
-void __init create_kmalloc_caches(unsigned long flags)
+void __init create_kmalloc_caches(slab_flags_t flags)
{
int i;
}
EXPORT_SYMBOL(ksize);
-int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
+int __kmem_cache_create(struct kmem_cache *c, slab_flags_t flags)
{
if (flags & SLAB_TYPESAFE_BY_RCU) {
/* leave room for rcu footer at the end of object */
#define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */
/* Internal SLUB flags */
-#define __OBJECT_POISON 0x80000000UL /* Poison object */
-#define __CMPXCHG_DOUBLE 0x40000000UL /* Use cmpxchg_double */
+/* Poison object */
+#define __OBJECT_POISON ((slab_flags_t __force)0x80000000UL)
+/* Use cmpxchg_double */
+#define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000UL)
/*
* Tracking user of a slab.
* Debug settings:
*/
#if defined(CONFIG_SLUB_DEBUG_ON)
-static int slub_debug = DEBUG_DEFAULT_FLAGS;
+static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS;
#else
-static int slub_debug;
+static slab_flags_t slub_debug;
#endif
static char *slub_debug_slabs;
__setup("slub_debug", setup_slub_debug);
-unsigned long kmem_cache_flags(unsigned long object_size,
- unsigned long flags, const char *name,
+slab_flags_t kmem_cache_flags(unsigned long object_size,
+ slab_flags_t flags, const char *name,
void (*ctor)(void *))
{
/*
struct page *page) {}
static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
struct page *page) {}
-unsigned long kmem_cache_flags(unsigned long object_size,
- unsigned long flags, const char *name,
+slab_flags_t kmem_cache_flags(unsigned long object_size,
+ slab_flags_t flags, const char *name,
void (*ctor)(void *))
{
return flags;
*/
static int calculate_sizes(struct kmem_cache *s, int forced_order)
{
- unsigned long flags = s->flags;
+ slab_flags_t flags = s->flags;
size_t size = s->object_size;
int order;
return !!oo_objects(s->oo);
}
-static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
+static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
{
s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
s->reserved = 0;
struct kmem_cache *
__kmem_cache_alias(const char *name, size_t size, size_t align,
- unsigned long flags, void (*ctor)(void *))
+ slab_flags_t flags, void (*ctor)(void *))
{
struct kmem_cache *s, *c;
return s;
}
-int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
+int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
{
int err;