{
struct spufs_inode_info *ei = p;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR) {
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
inode_init_once(&ei->vfs_inode);
}
}
{
struct ltree_entry *le = obj;
- if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) !=
- SLAB_CTOR_CONSTRUCTOR)
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
return;
le->users = 0;
{
struct adfs_inode_info *ei = (struct adfs_inode_info *) foo;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR)
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
inode_init_once(&ei->vfs_inode);
}
{
struct affs_inode_info *ei = (struct affs_inode_info *) foo;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR) {
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
init_MUTEX(&ei->i_link_lock);
init_MUTEX(&ei->i_ext_lock);
inode_init_once(&ei->vfs_inode);
{
struct afs_vnode *vnode = _vnode;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR) {
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
memset(vnode, 0, sizeof(*vnode));
inode_init_once(&vnode->vfs_inode);
init_waitqueue_head(&vnode->update_waitq);
{
struct befs_inode_info *bi = (struct befs_inode_info *) foo;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR) {
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
inode_init_once(&bi->vfs_inode);
}
}
{
struct bfs_inode_info *bi = foo;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR)
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
inode_init_once(&bi->vfs_inode);
}
struct bdev_inode *ei = (struct bdev_inode *) foo;
struct block_device *bdev = &ei->bdev;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR)
- {
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
memset(bdev, 0, sizeof(*bdev));
mutex_init(&bdev->bd_mutex);
sema_init(&bdev->bd_mount_sem, 1);
static void
init_buffer_head(void *data, struct kmem_cache *cachep, unsigned long flags)
{
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR) {
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
struct buffer_head * bh = (struct buffer_head *)data;
memset(bh, 0, sizeof(*bh));
{
struct cifsInodeInfo *cifsi = inode;
- if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR) {
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
inode_init_once(&cifsi->vfs_inode);
INIT_LIST_HEAD(&cifsi->lockList);
}
{
struct coda_inode_info *ei = (struct coda_inode_info *) foo;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR)
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
inode_init_once(&ei->vfs_inode);
}
{
struct ecryptfs_inode_info *ei = (struct ecryptfs_inode_info *)vptr;
- if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR)
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
inode_init_once(&ei->vfs_inode);
}
{
struct efs_inode_info *ei = (struct efs_inode_info *) foo;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR)
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
inode_init_once(&ei->vfs_inode);
}
{
struct ext2_inode_info *ei = (struct ext2_inode_info *) foo;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR) {
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
rwlock_init(&ei->i_meta_lock);
#ifdef CONFIG_EXT2_FS_XATTR
init_rwsem(&ei->xattr_sem);
{
struct ext3_inode_info *ei = (struct ext3_inode_info *) foo;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR) {
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
INIT_LIST_HEAD(&ei->i_orphan);
#ifdef CONFIG_EXT3_FS_XATTR
init_rwsem(&ei->xattr_sem);
{
struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR) {
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
INIT_LIST_HEAD(&ei->i_orphan);
#ifdef CONFIG_EXT4DEV_FS_XATTR
init_rwsem(&ei->xattr_sem);
{
struct fat_cache *cache = (struct fat_cache *)foo;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR)
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
INIT_LIST_HEAD(&cache->cache_list);
}
{
struct msdos_inode_info *ei = (struct msdos_inode_info *)foo;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR) {
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
spin_lock_init(&ei->cache_lru_lock);
ei->nr_caches = 0;
ei->cache_valid_id = FAT_CACHE_VALID + 1;
{
struct inode * inode = foo;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR)
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
inode_init_once(inode);
}
static void gfs2_init_inode_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
{
struct gfs2_inode *ip = foo;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR) {
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
inode_init_once(&ip->i_inode);
spin_lock_init(&ip->i_spin);
init_rwsem(&ip->i_rw_mutex);
static void gfs2_init_glock_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
{
struct gfs2_glock *gl = foo;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR) {
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
INIT_HLIST_NODE(&gl->gl_list);
spin_lock_init(&gl->gl_spin);
INIT_LIST_HEAD(&gl->gl_holders);
{
struct hfs_inode_info *i = p;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == SLAB_CTOR_CONSTRUCTOR)
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
inode_init_once(&i->vfs_inode);
}
{
struct hfsplus_inode_info *i = p;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == SLAB_CTOR_CONSTRUCTOR)
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
inode_init_once(&i->vfs_inode);
}
{
struct hpfs_inode_info *ei = (struct hpfs_inode_info *) foo;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR) {
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
mutex_init(&ei->i_mutex);
mutex_init(&ei->i_parent_mutex);
inode_init_once(&ei->vfs_inode);
{
struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR)
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
inode_init_once(&ei->vfs_inode);
}
{
struct inode * inode = (struct inode *) foo;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR)
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
inode_init_once(inode);
}
{
struct iso_inode_info *ei = foo;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR)
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
inode_init_once(&ei->vfs_inode);
}
{
struct jffs2_inode_info *ei = (struct jffs2_inode_info *) foo;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR) {
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
init_MUTEX(&ei->sem);
inode_init_once(&ei->vfs_inode);
}
{
struct metapage *mp = (struct metapage *)foo;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR) {
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
mp->lid = 0;
mp->lsn = 0;
mp->flag = 0;
{
struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo;
- if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR) {
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
memset(jfs_ip, 0, sizeof(struct jfs_inode_info));
INIT_LIST_HEAD(&jfs_ip->anon_inode_list);
init_rwsem(&jfs_ip->rdwrlock);
{
struct file_lock *lock = (struct file_lock *) foo;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) !=
- SLAB_CTOR_CONSTRUCTOR)
+ if (!(flags & SLAB_CTOR_CONSTRUCTOR))
return;
locks_init_lock(lock);
{
struct minix_inode_info *ei = (struct minix_inode_info *) foo;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR)
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
inode_init_once(&ei->vfs_inode);
}
{
struct ncp_inode_info *ei = (struct ncp_inode_info *) foo;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR) {
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
mutex_init(&ei->open_mutex);
inode_init_once(&ei->vfs_inode);
}
{
struct nfs_inode *nfsi = (struct nfs_inode *) foo;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR) {
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
inode_init_once(&nfsi->vfs_inode);
spin_lock_init(&nfsi->req_lock);
INIT_LIST_HEAD(&nfsi->dirty);
{
ntfs_inode *ni = (ntfs_inode *)foo;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR)
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
inode_init_once(VFS_I(ni));
}
struct dlmfs_inode_private *ip =
(struct dlmfs_inode_private *) foo;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR) {
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
ip->ip_dlm = NULL;
ip->ip_parent = NULL;
{
struct ocfs2_inode_info *oi = data;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR) {
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
oi->ip_flags = 0;
oi->ip_open_count = 0;
spin_lock_init(&oi->ip_lock);
{
struct op_inode_info *oi = (struct op_inode_info *) data;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR)
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
inode_init_once(&oi->vfs_inode);
}
{
struct proc_inode *ei = (struct proc_inode *) foo;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR)
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
inode_init_once(&ei->vfs_inode);
}
{
struct qnx4_inode_info *ei = (struct qnx4_inode_info *) foo;
- if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR)
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
inode_init_once(&ei->vfs_inode);
}
{
struct reiserfs_inode_info *ei = (struct reiserfs_inode_info *)foo;
- if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR) {
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
INIT_LIST_HEAD(&ei->i_prealloc_list);
inode_init_once(&ei->vfs_inode);
#ifdef CONFIG_REISERFS_FS_POSIX_ACL
{
struct romfs_inode_info *ei = (struct romfs_inode_info *) foo;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR)
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
inode_init_once(&ei->vfs_inode);
}
static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
{
struct smb_inode_info *ei = (struct smb_inode_info *) foo;
- unsigned long flagmask = SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR;
- if ((flags & flagmask) == SLAB_CTOR_CONSTRUCTOR)
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
inode_init_once(&ei->vfs_inode);
}
{
struct sysv_inode_info *si = (struct sysv_inode_info *)p;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR)
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
inode_init_once(&si->vfs_inode);
}
{
struct udf_inode_info *ei = (struct udf_inode_info *) foo;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR)
- {
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
ei->i_ext.i_data = NULL;
inode_init_once(&ei->vfs_inode);
}
{
struct ufs_inode_info *ei = (struct ufs_inode_info *) foo;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR)
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
inode_init_once(&ei->vfs_inode);
}
kmem_zone_t *zonep,
unsigned long flags)
{
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR)
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
inode_init_once(vn_to_inode((bhv_vnode_t *)vnode));
}
* The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
*/
#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
-#define SLAB_DEBUG_INITIAL 0x00000200UL /* DEBUG: Call constructor (as verifier) */
#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
/* Flags passed to a constructor functions */
#define SLAB_CTOR_CONSTRUCTOR 0x001UL /* If not set, then deconstructor */
#define SLAB_CTOR_ATOMIC 0x002UL /* Tell constructor it can't sleep */
-#define SLAB_CTOR_VERIFY 0x004UL /* Tell constructor it's a verify call */
/*
* struct kmem_cache related prototypes
{
struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
- if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR)
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
inode_init_once(&p->vfs_inode);
}
{
struct sighand_struct *sighand = data;
- if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR)
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
spin_lock_init(&sighand->siglock);
}
static void anon_vma_ctor(void *data, struct kmem_cache *cachep,
unsigned long flags)
{
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR) {
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
struct anon_vma *anon_vma = data;
spin_lock_init(&anon_vma->lock);
{
struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR) {
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
inode_init_once(&p->vfs_inode);
#ifdef CONFIG_TMPFS_POSIX_ACL
p->i_acl = NULL;
#include <asm/page.h>
/*
- * DEBUG - 1 for kmem_cache_create() to honour; SLAB_DEBUG_INITIAL,
- * SLAB_RED_ZONE & SLAB_POISON.
+ * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
* 0 for faster, smaller code (especially in the critical paths).
*
* STATS - 1 to collect stats for /proc/slabinfo.
/* Legal flag mask for kmem_cache_create(). */
#if DEBUG
-# define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \
+# define CREATE_MASK (SLAB_RED_ZONE | \
SLAB_POISON | SLAB_HWCACHE_ALIGN | \
SLAB_CACHE_DMA | \
SLAB_STORE_USER | \
#if DEBUG
WARN_ON(strchr(name, ' ')); /* It confuses parsers */
- if ((flags & SLAB_DEBUG_INITIAL) && !ctor) {
- /* No constructor, but inital state check requested */
- printk(KERN_ERR "%s: No con, but init state check "
- "requested - %s\n", __FUNCTION__, name);
- flags &= ~SLAB_DEBUG_INITIAL;
- }
#if FORCED_DEBUG
/*
* Enable redzoning and last user accounting, except for caches with
BUG_ON(objnr >= cachep->num);
BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
- if (cachep->flags & SLAB_DEBUG_INITIAL) {
- /*
- * Need to call the slab's constructor so the caller can
- * perform a verify of its state (debugging). Called without
- * the cache-lock held.
- */
- cachep->ctor(objp + obj_offset(cachep),
- cachep, SLAB_CTOR_CONSTRUCTOR | SLAB_CTOR_VERIFY);
- }
if (cachep->flags & SLAB_POISON && cachep->dtor) {
/* we want to cache poison the object,
* call the destruction callback
*
* - Support PAGE_ALLOC_DEBUG. Should be easy to do.
*
- * - SLAB_DEBUG_INITIAL is not supported but I have never seen a use of
- * it.
- *
* - Variable sizing of the per node arrays
*/
#endif
-/*
- * Flags from the regular SLAB that SLUB does not support:
- */
-#define SLUB_UNIMPLEMENTED (SLAB_DEBUG_INITIAL)
-
/*
* Mininum number of partial slabs. These will be left on the partial
* lists even if they are empty. kmem_cache_shrink may reclaim them.
s->flags = flags;
s->align = align;
- BUG_ON(flags & SLUB_UNIMPLEMENTED);
-
/*
* The page->offset field is only 16 bit wide. This is an offset
* in units of words from the beginning of an object. If the slab
{
struct socket_alloc *ei = (struct socket_alloc *)foo;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR))
- == SLAB_CTOR_CONSTRUCTOR)
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
inode_init_once(&ei->vfs_inode);
}
{
struct rpc_inode *rpci = (struct rpc_inode *) foo;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR) {
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
inode_init_once(&rpci->vfs_inode);
rpci->private = NULL;
rpci->nreaders = 0;