dma_kmem = kmem_cache_create("dma_desc",
sizeof(struct s3c2410_dma_buf), 0,
SLAB_HWCACHE_ALIGN,
- s3c2410_dma_cache_ctor, NULL);
+ s3c2410_dma_cache_ctor);
if (dma_kmem == NULL) {
printk(KERN_ERR "dma failed to make kmem cache\n");
{
pte_cache = kmem_cache_create("pte-cache",
sizeof(pte_t) * PTRS_PER_PTE,
- 0, SLAB_PANIC, pte_cache_ctor, NULL);
+ 0, SLAB_PANIC, pte_cache_ctor);
pgd_cache = kmem_cache_create("pgd-cache", MEMC_TABLE_SIZE +
sizeof(pgd_t) * PTRS_PER_PGD,
- 0, SLAB_PANIC, pgd_cache_ctor, NULL);
+ 0, SLAB_PANIC, pgd_cache_ctor);
}
PTRS_PER_PMD*sizeof(pmd_t),
PTRS_PER_PMD*sizeof(pmd_t),
SLAB_PANIC,
- pmd_ctor,
- NULL);
+ pmd_ctor);
if (!SHARED_KERNEL_PMD) {
/* If we're in PAE mode and have a non-shared
kernel pmd, then the pgd size must be a
partial_page_cachep = kmem_cache_create("partial_page_cache",
sizeof(struct partial_page),
- 0, SLAB_PANIC, NULL, NULL);
+ 0, SLAB_PANIC, NULL);
}
#endif
return 0;
flash_block_cache = kmem_cache_create("rtas_flash_cache",
RTAS_BLK_SIZE, RTAS_BLK_SIZE, 0,
- rtas_block_ctor, NULL);
+ rtas_block_ctor);
if (!flash_block_cache) {
printk(KERN_ERR "%s: failed to create block cache\n",
__FUNCTION__);
HUGEPTE_TABLE_SIZE,
HUGEPTE_TABLE_SIZE,
0,
- zero_ctor, NULL);
+ zero_ctor);
if (! huge_pgtable_cache)
panic("hugetlbpage_init(): could not create hugepte cache\n");
pgtable_cache[i] = kmem_cache_create(name,
size, size,
SLAB_PANIC,
- zero_ctor,
- NULL);
+ zero_ctor);
}
}
ret = -ENOMEM;
spufs_inode_cache = kmem_cache_create("spufs_inode_cache",
sizeof(struct spufs_inode_info), 0,
- SLAB_HWCACHE_ALIGN, spufs_init_once, NULL);
+ SLAB_HWCACHE_ALIGN, spufs_init_once);
if (!spufs_inode_cache)
goto out;
printk(KERN_NOTICE "sq: Registering store queue API.\n");
sq_cache = kmem_cache_create("store_queue_cache",
- sizeof(struct sq_mapping), 0, 0,
- NULL, NULL);
+ sizeof(struct sq_mapping), 0, 0, NULL);
if (unlikely(!sq_cache))
return ret;
BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES));
pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0,
- SLAB_PANIC, pmb_cache_ctor, NULL);
+ SLAB_PANIC, pmb_cache_ctor);
jump_to_P2();
tsb_caches[i] = kmem_cache_create(name,
size, size,
- 0,
- NULL, NULL);
+ 0, NULL);
if (!tsb_caches[i]) {
prom_printf("Could not create %s cache\n", name);
prom_halt();
dev_t devid;
bsg_cmd_cachep = kmem_cache_create("bsg_cmd",
- sizeof(struct bsg_command), 0, 0, NULL, NULL);
+ sizeof(struct bsg_command), 0, 0, NULL);
if (!bsg_cmd_cachep) {
printk(KERN_ERR "bsg: failed creating slab cache\n");
return -ENOMEM;
panic("Failed to create kblockd\n");
request_cachep = kmem_cache_create("blkdev_requests",
- sizeof(struct request), 0, SLAB_PANIC, NULL, NULL);
+ sizeof(struct request), 0, SLAB_PANIC, NULL);
requestq_cachep = kmem_cache_create("blkdev_queue",
- sizeof(request_queue_t), 0, SLAB_PANIC, NULL, NULL);
+ sizeof(request_queue_t), 0, SLAB_PANIC, NULL);
iocontext_cachep = kmem_cache_create("blkdev_ioc",
- sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
+ sizeof(struct io_context), 0, SLAB_PANIC, NULL);
for_each_possible_cpu(i)
INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
acpi_status
acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
{
- *cache = kmem_cache_create(name, size, 0, 0, NULL, NULL);
+ *cache = kmem_cache_create(name, size, 0, 0, NULL);
if (*cache == NULL)
return AE_ERROR;
else
int __init
aoeblk_init(void)
{
- buf_pool_cache = kmem_cache_create("aoe_bufs",
+ buf_pool_cache = kmem_cache_create("aoe_bufs",
sizeof(struct buf),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (buf_pool_cache == NULL)
return -ENOMEM;
packet_task_cache = kmem_cache_create("packet_task",
sizeof(struct packet_task),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!packet_task_cache)
return -ENOMEM;
sizeof(struct ib_mad_private),
0,
SLAB_HWCACHE_ALIGN,
- NULL,
NULL);
if (!ib_mad_cache) {
printk(KERN_ERR PFX "Couldn't create ib_mad cache\n");
(char) ('0' + c2dev->devnum));
c2dev->host_msg_cache =
kmem_cache_create(c2dev->vq_cache_name, c2dev->rep_vq.msg_size, 0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
+ SLAB_HWCACHE_ALIGN, NULL);
if (c2dev->host_msg_cache == NULL) {
return -ENOMEM;
}
av_cache = kmem_cache_create("ehca_cache_av",
sizeof(struct ehca_av), 0,
SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (!av_cache)
return -ENOMEM;
return 0;
cq_cache = kmem_cache_create("ehca_cache_cq",
sizeof(struct ehca_cq), 0,
SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (!cq_cache)
return -ENOMEM;
return 0;
ctblk_cache = kmem_cache_create("ehca_cache_ctblk",
EHCA_PAGESIZE, H_CB_ALIGNMENT,
SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (!ctblk_cache) {
ehca_gen_err("Cannot create ctblk SLAB cache.");
ehca_cleanup_mrmw_cache();
mr_cache = kmem_cache_create("ehca_cache_mr",
sizeof(struct ehca_mr), 0,
SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (!mr_cache)
return -ENOMEM;
mw_cache = kmem_cache_create("ehca_cache_mw",
sizeof(struct ehca_mw), 0,
SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (!mw_cache) {
kmem_cache_destroy(mr_cache);
mr_cache = NULL;
pd_cache = kmem_cache_create("ehca_cache_pd",
sizeof(struct ehca_pd), 0,
SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (!pd_cache)
return -ENOMEM;
return 0;
qp_cache = kmem_cache_create("ehca_cache_qp",
sizeof(struct ehca_qp), 0,
SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (!qp_cache)
return -ENOMEM;
return 0;
ig.desc_cache = kmem_cache_create("iser_descriptors",
sizeof (struct iser_desc),
0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (ig.desc_cache == NULL)
return -ENOMEM;
{
pte_chain_cache = kmem_cache_create("kvm_pte_chain",
sizeof(struct kvm_pte_chain),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!pte_chain_cache)
goto nomem;
rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
sizeof(struct kvm_rmap_desc),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!rmap_desc_cache)
goto nomem;
mmu_page_cache = kmem_cache_create("kvm_mmu_page",
PAGE_SIZE,
- PAGE_SIZE, 0, NULL, NULL);
+ PAGE_SIZE, 0, NULL);
if (!mmu_page_cache)
goto nomem;
mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
sizeof(struct kvm_mmu_page),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!mmu_page_header_cache)
goto nomem;
conf->active_name = 0;
sc = kmem_cache_create(conf->cache_name[conf->active_name],
sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!sc)
return 1;
conf->slab_cache = sc;
/* Step 1 */
sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!sc)
return -ENOMEM;
/* Allocate request mempool and slab */
size = sizeof(struct i2o_block_request);
i2o_blk_req_pool.slab = kmem_cache_create("i2o_block_req", size, 0,
- SLAB_HWCACHE_ALIGN, NULL,
- NULL);
+ SLAB_HWCACHE_ALIGN, NULL);
if (!i2o_blk_req_pool.slab) {
osm_err("can't init request slab\n");
rc = -ENOMEM;
if (ubi_devices_cnt == 0) {
ltree_slab = kmem_cache_create("ubi_ltree_slab",
sizeof(struct ltree_entry), 0,
- 0, <ree_entry_ctor, NULL);
+ 0, <ree_entry_ctor);
if (!ltree_slab)
return -ENOMEM;
}
if (ubi_devices_cnt == 0) {
wl_entries_slab = kmem_cache_create("ubi_wl_entry_slab",
sizeof(struct ubi_wl_entry),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!wl_entries_slab)
return -ENOMEM;
}
dasd_page_cache =
kmem_cache_create("dasd_page_cache", PAGE_SIZE,
PAGE_SIZE, SLAB_CACHE_DMA,
- NULL, NULL );
+ NULL);
if (!dasd_page_cache)
MESSAGE(KERN_WARNING, "%s", "Failed to create slab, "
"fixed buffer mode disabled.");
size = sizeof(struct zfcp_fsf_req_qtcb);
align = calc_alignment(size);
zfcp_data.fsf_req_qtcb_cache =
- kmem_cache_create("zfcp_fsf", size, align, 0, NULL, NULL);
+ kmem_cache_create("zfcp_fsf", size, align, 0, NULL);
if (!zfcp_data.fsf_req_qtcb_cache)
goto out;
size = sizeof(struct fsf_status_read_buffer);
align = calc_alignment(size);
zfcp_data.sr_buffer_cache =
- kmem_cache_create("zfcp_sr", size, align, 0, NULL, NULL);
+ kmem_cache_create("zfcp_sr", size, align, 0, NULL);
if (!zfcp_data.sr_buffer_cache)
goto out_sr_cache;
size = sizeof(struct zfcp_gid_pn_data);
align = calc_alignment(size);
zfcp_data.gid_pn_cache =
- kmem_cache_create("zfcp_gid", size, align, 0, NULL, NULL);
+ kmem_cache_create("zfcp_gid", size, align, 0, NULL);
if (!zfcp_data.gid_pn_cache)
goto out_gid_cache;
sizeof(struct asd_dma_tok),
0,
SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (!asd_dma_token_cache) {
asd_printk("couldn't create dma token cache\n");
return -ENOMEM;
sizeof(struct asd_ascb),
0,
SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (!asd_ascb_cache) {
asd_printk("couldn't create ascb cache\n");
goto Err;
static int __init sas_class_init(void)
{
sas_task_cache = kmem_cache_create("sas_task", sizeof(struct sas_task),
- 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+ 0, SLAB_HWCACHE_ALIGN, NULL);
if (!sas_task_cache)
return -ENOMEM;
/* Allocate cache for SRBs. */
srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
+ SLAB_HWCACHE_ALIGN, NULL);
if (srb_cachep == NULL) {
printk(KERN_ERR
"qla2xxx: Unable to allocate SRB cache...Failing load!\n");
/* Allocate cache for SRBs. */
srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
+ SLAB_HWCACHE_ALIGN, NULL);
if (srb_cachep == NULL) {
printk(KERN_ERR
"%s: Unable to allocate SRB cache..."
if (!pool->users) {
pool->slab = kmem_cache_create(pool->name,
sizeof(struct scsi_cmnd), 0,
- pool->slab_flags, NULL, NULL);
+ pool->slab_flags, NULL);
if (!pool->slab)
goto fail;
}
scsi_io_context_cache = kmem_cache_create("scsi_io_context",
sizeof(struct scsi_io_context),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!scsi_io_context_cache) {
printk(KERN_ERR "SCSI: can't init scsi io context cache\n");
return -ENOMEM;
int size = sgp->size * sizeof(struct scatterlist);
sgp->slab = kmem_cache_create(sgp->name, size, 0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
+ SLAB_HWCACHE_ALIGN, NULL);
if (!sgp->slab) {
printk(KERN_ERR "SCSI: can't init sg slab %s\n",
sgp->name);
scsi_tgt_cmd_cache = kmem_cache_create("scsi_tgt_cmd",
sizeof(struct scsi_tgt_cmd),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!scsi_tgt_cmd_cache)
return -ENOMEM;
}
uhci_up_cachep = kmem_cache_create("uhci_urb_priv",
- sizeof(struct urb_priv), 0, 0, NULL, NULL);
+ sizeof(struct urb_priv), 0, 0, NULL);
if (!uhci_up_cachep)
goto up_failed;
snprintf(rp->slab_name, SLAB_NAME_SZ, "mon_text_%p", rp);
rp->e_slab = kmem_cache_create(rp->slab_name,
sizeof(struct mon_event_text), sizeof(long), 0,
- mon_text_ctor, NULL);
+ mon_text_ctor);
if (rp->e_slab == NULL) {
rc = -ENOMEM;
goto err_slab;
inode_init_once(&ei->vfs_inode);
}
-
+
static int init_inodecache(void)
{
adfs_inode_cachep = kmem_cache_create("adfs_inode_cache",
sizeof(struct adfs_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (adfs_inode_cachep == NULL)
return -ENOMEM;
return 0;
sizeof(struct affs_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (affs_inode_cachep == NULL)
return -ENOMEM;
return 0;
sizeof(struct afs_vnode),
0,
SLAB_HWCACHE_ALIGN,
- afs_i_init_once,
- NULL);
+ afs_i_init_once);
if (!afs_inode_cachep) {
printk(KERN_NOTICE "kAFS: Failed to allocate inode cache\n");
return ret;
}
/* Initialize the inode cache. Called at fs setup.
- *
+ *
* Taken from NFS implementation by Al Viro.
*/
static int
sizeof (struct befs_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (befs_inode_cachep == NULL) {
printk(KERN_ERR "befs_init_inodecache: "
"Couldn't initalize inode slabcache\n");
inode_init_once(&bi->vfs_inode);
}
-
+
static int init_inodecache(void)
{
bfs_inode_cachep = kmem_cache_create("bfs_inode_cache",
sizeof(struct bfs_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (bfs_inode_cachep == NULL)
return -ENOMEM;
return 0;
size = bvs->nr_vecs * sizeof(struct bio_vec);
bvs->slab = kmem_cache_create(bvs->name, size, 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
}
}
bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode),
0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD|SLAB_PANIC),
- init_once, NULL);
+ init_once);
err = register_filesystem(&bd_type);
if (err)
panic("Cannot register bdev pseudo-fs");
sizeof (struct cifsInodeInfo),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- cifs_init_once, NULL);
+ cifs_init_once);
if (cifs_inode_cachep == NULL)
return -ENOMEM;
cifs_req_cachep = kmem_cache_create("cifs_request",
CIFSMaxBufSize +
MAX_CIFS_HDR_SIZE, 0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
+ SLAB_HWCACHE_ALIGN, NULL);
if (cifs_req_cachep == NULL)
return -ENOMEM;
alloc of large cifs buffers even when page debugging is on */
cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (cifs_sm_req_cachep == NULL) {
mempool_destroy(cifs_req_poolp);
kmem_cache_destroy(cifs_req_cachep);
{
cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
sizeof (struct mid_q_entry), 0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
+ SLAB_HWCACHE_ALIGN, NULL);
if (cifs_mid_cachep == NULL)
return -ENOMEM;
cifs_oplock_cachep = kmem_cache_create("cifs_oplock_structs",
sizeof (struct oplock_q_entry), 0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
+ SLAB_HWCACHE_ALIGN, NULL);
if (cifs_oplock_cachep == NULL) {
mempool_destroy(cifs_mid_poolp);
kmem_cache_destroy(cifs_mid_cachep);
inode_init_once(&ei->vfs_inode);
}
-
+
int coda_init_inodecache(void)
{
coda_inode_cachep = kmem_cache_create("coda_inode_cache",
sizeof(struct coda_inode_info),
0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
- init_once, NULL);
+ init_once);
if (coda_inode_cachep == NULL)
return -ENOMEM;
return 0;
configfs_dir_cachep = kmem_cache_create("configfs_dir_cache",
sizeof(struct configfs_dirent),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!configfs_dir_cachep)
goto out;
mempages -= reserve;
names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
dcache_init(mempages);
inode_init(mempages);
dcookie_cache = kmem_cache_create("dcookie_cache",
sizeof(struct dcookie_struct),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!dcookie_cache)
goto out;
error = -ENOMEM;
con_cache = kmem_cache_create("dlm_conn", sizeof(struct connection),
__alignof__(struct connection), 0,
- NULL, NULL);
+ NULL);
if (!con_cache)
goto out;
int ret = 0;
lkb_cache = kmem_cache_create("dlm_lkb", sizeof(struct dlm_lkb),
- __alignof__(struct dlm_lkb), 0, NULL, NULL);
+ __alignof__(struct dlm_lkb), 0, NULL);
if (!lkb_cache)
ret = -ENOMEM;
return ret;
static int __init dnotify_init(void)
{
dn_cache = kmem_cache_create("dnotify_cache",
- sizeof(struct dnotify_struct), 0, SLAB_PANIC, NULL, NULL);
+ sizeof(struct dnotify_struct), 0, SLAB_PANIC, NULL);
return 0;
}
register_sysctl_table(sys_table);
- dquot_cachep = kmem_cache_create("dquot",
+ dquot_cachep = kmem_cache_create("dquot",
sizeof(struct dquot), sizeof(unsigned long) * 4,
(SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD|SLAB_PANIC),
- NULL, NULL);
+ NULL);
order = 0;
dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order);
info = &ecryptfs_cache_infos[i];
*(info->cache) = kmem_cache_create(info->name, info->size,
- 0, SLAB_HWCACHE_ALIGN, info->ctor, NULL);
+ 0, SLAB_HWCACHE_ALIGN, info->ctor);
if (!*(info->cache)) {
ecryptfs_free_kmem_caches();
ecryptfs_printk(KERN_WARNING, "%s: "
inode_init_once(&ei->vfs_inode);
}
-
+
static int init_inodecache(void)
{
efs_inode_cachep = kmem_cache_create("efs_inode_cache",
sizeof(struct efs_inode_info),
0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
- init_once, NULL);
+ init_once);
if (efs_inode_cachep == NULL)
return -ENOMEM;
return 0;
/* Allocates slab cache used to allocate "struct epitem" items */
epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem),
0, SLAB_HWCACHE_ALIGN|EPI_SLAB_DEBUG|SLAB_PANIC,
- NULL, NULL);
+ NULL);
/* Allocates slab cache used to allocate "struct eppoll_entry" */
pwq_cache = kmem_cache_create("eventpoll_pwq",
sizeof(struct eppoll_entry), 0,
- EPI_SLAB_DEBUG|SLAB_PANIC, NULL, NULL);
+ EPI_SLAB_DEBUG|SLAB_PANIC, NULL);
return 0;
}
#endif
inode_init_once(&ei->vfs_inode);
}
-
+
static int init_inodecache(void)
{
ext2_inode_cachep = kmem_cache_create("ext2_inode_cache",
sizeof(struct ext2_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (ext2_inode_cachep == NULL)
return -ENOMEM;
return 0;
sizeof(struct ext3_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (ext3_inode_cachep == NULL)
return -ENOMEM;
return 0;
sizeof(struct ext4_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (ext4_inode_cachep == NULL)
return -ENOMEM;
return 0;
fat_cache_cachep = kmem_cache_create("fat_cache",
sizeof(struct fat_cache),
0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
- init_once, NULL);
+ init_once);
if (fat_cache_cachep == NULL)
return -ENOMEM;
return 0;
sizeof(struct msdos_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (fat_inode_cachep == NULL)
return -ENOMEM;
return 0;
static int __init fasync_init(void)
{
fasync_cache = kmem_cache_create("fasync_cache",
- sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL, NULL);
+ sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL);
return 0;
}
int rv;
vxfs_inode_cachep = kmem_cache_create("vxfs_inode",
- sizeof(struct vxfs_inode_info), 0,
- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL, NULL);
+ sizeof(struct vxfs_inode_info), 0,
+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
if (!vxfs_inode_cachep)
return -ENOMEM;
rv = register_filesystem(&vxfs_fs_type);
int err = -ENOMEM;
fuse_req_cachep = kmem_cache_create("fuse_request",
sizeof(struct fuse_req),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!fuse_req_cachep)
goto out;
fuse_inode_cachep = kmem_cache_create("fuse_inode",
sizeof(struct fuse_inode),
0, SLAB_HWCACHE_ALIGN,
- fuse_inode_init_once, NULL);
+ fuse_inode_init_once);
err = -ENOMEM;
if (!fuse_inode_cachep)
goto out_unreg2;
gfs2_glock_cachep = kmem_cache_create("gfs2_glock",
sizeof(struct gfs2_glock),
0, 0,
- gfs2_init_glock_once, NULL);
+ gfs2_init_glock_once);
if (!gfs2_glock_cachep)
goto fail;
sizeof(struct gfs2_inode),
0, SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD,
- gfs2_init_inode_once, NULL);
+ gfs2_init_inode_once);
if (!gfs2_inode_cachep)
goto fail;
gfs2_bufdata_cachep = kmem_cache_create("gfs2_bufdata",
sizeof(struct gfs2_bufdata),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!gfs2_bufdata_cachep)
goto fail;
hfs_inode_cachep = kmem_cache_create("hfs_inode_cache",
sizeof(struct hfs_inode_info), 0, SLAB_HWCACHE_ALIGN,
- hfs_init_once, NULL);
+ hfs_init_once);
if (!hfs_inode_cachep)
return -ENOMEM;
err = register_filesystem(&hfs_fs_type);
hfsplus_inode_cachep = kmem_cache_create("hfsplus_icache",
HFSPLUS_INODE_SIZE, 0, SLAB_HWCACHE_ALIGN,
- hfsplus_init_once, NULL);
+ hfsplus_init_once);
if (!hfsplus_inode_cachep)
return -ENOMEM;
err = register_filesystem(&hfsplus_fs_type);
mutex_init(&ei->i_parent_mutex);
inode_init_once(&ei->vfs_inode);
}
-
+
static int init_inodecache(void)
{
hpfs_inode_cachep = kmem_cache_create("hpfs_inode_cache",
sizeof(struct hpfs_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (hpfs_inode_cachep == NULL)
return -ENOMEM;
return 0;
hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
sizeof(struct hugetlbfs_inode_info),
- 0, 0, init_once, NULL);
+ 0, 0, init_once);
if (hugetlbfs_inode_cachep == NULL)
return -ENOMEM;
0,
(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
SLAB_MEM_SPREAD),
- init_once,
- NULL);
+ init_once);
register_shrinker(&icache_shrinker);
/* Hash may have been set up in inode_init_early */
watch_cachep = kmem_cache_create("inotify_watch_cache",
sizeof(struct inotify_user_watch),
- 0, SLAB_PANIC, NULL, NULL);
+ 0, SLAB_PANIC, NULL);
event_cachep = kmem_cache_create("inotify_event_cache",
sizeof(struct inotify_kernel_event),
- 0, SLAB_PANIC, NULL, NULL);
+ 0, SLAB_PANIC, NULL);
return 0;
}
sizeof(struct iso_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (isofs_inode_cachep == NULL)
return -ENOMEM;
return 0;
* boundary.
*/
jbd_slab[i] = kmem_cache_create(jbd_slab_names[i],
- slab_size, slab_size, 0, NULL, NULL);
+ slab_size, slab_size, 0, NULL);
if (!jbd_slab[i]) {
printk(KERN_EMERG "JBD: no memory for jbd_slab cache\n");
return -ENOMEM;
sizeof(struct journal_head),
0, /* offset */
0, /* flags */
- NULL, /* ctor */
- NULL); /* dtor */
+ NULL); /* ctor */
retval = 0;
if (journal_head_cache == 0) {
retval = -ENOMEM;
sizeof(handle_t),
0, /* offset */
0, /* flags */
- NULL, /* ctor */
- NULL); /* dtor */
+ NULL); /* ctor */
if (jbd_handle_cache == NULL) {
printk(KERN_EMERG "JBD: failed to create handle cache\n");
return -ENOMEM;
{
revoke_record_cache = kmem_cache_create("revoke_record",
sizeof(struct jbd_revoke_record_s),
- 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+ 0, SLAB_HWCACHE_ALIGN, NULL);
if (revoke_record_cache == 0)
return -ENOMEM;
revoke_table_cache = kmem_cache_create("revoke_table",
sizeof(struct jbd_revoke_table_s),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (revoke_table_cache == 0) {
kmem_cache_destroy(revoke_record_cache);
revoke_record_cache = NULL;
* boundary.
*/
jbd_slab[i] = kmem_cache_create(jbd_slab_names[i],
- slab_size, slab_size, 0, NULL, NULL);
+ slab_size, slab_size, 0, NULL);
if (!jbd_slab[i]) {
printk(KERN_EMERG "JBD: no memory for jbd_slab cache\n");
return -ENOMEM;
sizeof(struct journal_head),
0, /* offset */
0, /* flags */
- NULL, /* ctor */
- NULL); /* dtor */
+ NULL); /* ctor */
retval = 0;
if (jbd2_journal_head_cache == 0) {
retval = -ENOMEM;
sizeof(handle_t),
0, /* offset */
0, /* flags */
- NULL, /* ctor */
- NULL); /* dtor */
+ NULL); /* ctor */
if (jbd2_handle_cache == NULL) {
printk(KERN_EMERG "JBD: failed to create handle cache\n");
return -ENOMEM;
{
jbd2_revoke_record_cache = kmem_cache_create("jbd2_revoke_record",
sizeof(struct jbd2_revoke_record_s),
- 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+ 0, SLAB_HWCACHE_ALIGN, NULL);
if (jbd2_revoke_record_cache == 0)
return -ENOMEM;
jbd2_revoke_table_cache = kmem_cache_create("jbd2_revoke_table",
sizeof(struct jbd2_revoke_table_s),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (jbd2_revoke_table_cache == 0) {
kmem_cache_destroy(jbd2_revoke_record_cache);
jbd2_revoke_record_cache = NULL;
{
full_dnode_slab = kmem_cache_create("jffs2_full_dnode",
sizeof(struct jffs2_full_dnode),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!full_dnode_slab)
goto err;
raw_dirent_slab = kmem_cache_create("jffs2_raw_dirent",
sizeof(struct jffs2_raw_dirent),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!raw_dirent_slab)
goto err;
raw_inode_slab = kmem_cache_create("jffs2_raw_inode",
sizeof(struct jffs2_raw_inode),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!raw_inode_slab)
goto err;
tmp_dnode_info_slab = kmem_cache_create("jffs2_tmp_dnode",
sizeof(struct jffs2_tmp_dnode_info),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!tmp_dnode_info_slab)
goto err;
raw_node_ref_slab = kmem_cache_create("jffs2_refblock",
sizeof(struct jffs2_raw_node_ref) * (REFS_PER_BLOCK + 1),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!raw_node_ref_slab)
goto err;
node_frag_slab = kmem_cache_create("jffs2_node_frag",
sizeof(struct jffs2_node_frag),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!node_frag_slab)
goto err;
inode_cache_slab = kmem_cache_create("jffs2_inode_cache",
sizeof(struct jffs2_inode_cache),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!inode_cache_slab)
goto err;
#ifdef CONFIG_JFFS2_FS_XATTR
xattr_datum_cache = kmem_cache_create("jffs2_xattr_datum",
sizeof(struct jffs2_xattr_datum),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!xattr_datum_cache)
goto err;
xattr_ref_cache = kmem_cache_create("jffs2_xattr_ref",
sizeof(struct jffs2_xattr_ref),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!xattr_ref_cache)
goto err;
#endif
sizeof(struct jffs2_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- jffs2_i_init_once, NULL);
+ jffs2_i_init_once);
if (!jffs2_inode_cachep) {
printk(KERN_ERR "JFFS2 error: Failed to initialise inode cache\n");
return -ENOMEM;
* Allocate the metapage structures
*/
metapage_cache = kmem_cache_create("jfs_mp", sizeof(struct metapage),
- 0, 0, init_once, NULL);
+ 0, 0, init_once);
if (metapage_cache == NULL)
return -ENOMEM;
jfs_inode_cachep =
kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
- init_once, NULL);
+ init_once);
if (jfs_inode_cachep == NULL)
return -ENOMEM;
{
filelock_cache = kmem_cache_create("file_lock_cache",
sizeof(struct file_lock), 0, SLAB_PANIC,
- init_once, NULL);
+ init_once);
return 0;
}
INIT_LIST_HEAD(&cache->c_indexes_hash[m][n]);
}
cache->c_entry_cache = kmem_cache_create(name, entry_size, 0,
- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL, NULL);
+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
if (!cache->c_entry_cache)
goto fail;
inode_init_once(&ei->vfs_inode);
}
-
+
static int init_inodecache(void)
{
minix_inode_cachep = kmem_cache_create("minix_inode_cache",
sizeof(struct minix_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (minix_inode_cachep == NULL)
return -ENOMEM;
return 0;
init_rwsem(&namespace_sem);
mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount),
- 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL, NULL);
+ 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC);
mutex_init(&ei->open_mutex);
inode_init_once(&ei->vfs_inode);
}
-
+
static int init_inodecache(void)
{
ncp_inode_cachep = kmem_cache_create("ncp_inode_cache",
sizeof(struct ncp_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (ncp_inode_cachep == NULL)
return -ENOMEM;
return 0;
sizeof(struct nfs_direct_req),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- NULL, NULL);
+ NULL);
if (nfs_direct_cachep == NULL)
return -ENOMEM;
nfsi->npages = 0;
nfs4_init_once(nfsi);
}
-
+
static int __init nfs_init_inodecache(void)
{
nfs_inode_cachep = kmem_cache_create("nfs_inode_cache",
sizeof(struct nfs_inode),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (nfs_inode_cachep == NULL)
return -ENOMEM;
nfs_page_cachep = kmem_cache_create("nfs_page",
sizeof(struct nfs_page),
0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (nfs_page_cachep == NULL)
return -ENOMEM;
nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
sizeof(struct nfs_read_data),
0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (nfs_rdata_cachep == NULL)
return -ENOMEM;
nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
sizeof(struct nfs_write_data),
0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (nfs_wdata_cachep == NULL)
return -ENOMEM;
nfsd4_init_slabs(void)
{
stateowner_slab = kmem_cache_create("nfsd4_stateowners",
- sizeof(struct nfs4_stateowner), 0, 0, NULL, NULL);
+ sizeof(struct nfs4_stateowner), 0, 0, NULL);
if (stateowner_slab == NULL)
goto out_nomem;
file_slab = kmem_cache_create("nfsd4_files",
- sizeof(struct nfs4_file), 0, 0, NULL, NULL);
+ sizeof(struct nfs4_file), 0, 0, NULL);
if (file_slab == NULL)
goto out_nomem;
stateid_slab = kmem_cache_create("nfsd4_stateids",
- sizeof(struct nfs4_stateid), 0, 0, NULL, NULL);
+ sizeof(struct nfs4_stateid), 0, 0, NULL);
if (stateid_slab == NULL)
goto out_nomem;
deleg_slab = kmem_cache_create("nfsd4_delegations",
- sizeof(struct nfs4_delegation), 0, 0, NULL, NULL);
+ sizeof(struct nfs4_delegation), 0, 0, NULL);
if (deleg_slab == NULL)
goto out_nomem;
return 0;
ntfs_index_ctx_cache = kmem_cache_create(ntfs_index_ctx_cache_name,
sizeof(ntfs_index_context), 0 /* offset */,
- SLAB_HWCACHE_ALIGN, NULL /* ctor */, NULL /* dtor */);
+ SLAB_HWCACHE_ALIGN, NULL /* ctor */);
if (!ntfs_index_ctx_cache) {
printk(KERN_CRIT "NTFS: Failed to create %s!\n",
ntfs_index_ctx_cache_name);
}
ntfs_attr_ctx_cache = kmem_cache_create(ntfs_attr_ctx_cache_name,
sizeof(ntfs_attr_search_ctx), 0 /* offset */,
- SLAB_HWCACHE_ALIGN, NULL /* ctor */, NULL /* dtor */);
+ SLAB_HWCACHE_ALIGN, NULL /* ctor */);
if (!ntfs_attr_ctx_cache) {
printk(KERN_CRIT "NTFS: Failed to create %s!\n",
ntfs_attr_ctx_cache_name);
ntfs_name_cache = kmem_cache_create(ntfs_name_cache_name,
(NTFS_MAX_NAME_LEN+1) * sizeof(ntfschar), 0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
+ SLAB_HWCACHE_ALIGN, NULL);
if (!ntfs_name_cache) {
printk(KERN_CRIT "NTFS: Failed to create %s!\n",
ntfs_name_cache_name);
ntfs_inode_cache = kmem_cache_create(ntfs_inode_cache_name,
sizeof(ntfs_inode), 0,
- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL, NULL);
+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
if (!ntfs_inode_cache) {
printk(KERN_CRIT "NTFS: Failed to create %s!\n",
ntfs_inode_cache_name);
ntfs_big_inode_cache = kmem_cache_create(ntfs_big_inode_cache_name,
sizeof(big_ntfs_inode), 0,
SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
- ntfs_big_inode_init_once, NULL);
+ ntfs_big_inode_init_once);
if (!ntfs_big_inode_cache) {
printk(KERN_CRIT "NTFS: Failed to create %s!\n",
ntfs_big_inode_cache_name);
sizeof(struct dlmfs_inode_private),
0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- dlmfs_init_once, NULL);
+ dlmfs_init_once);
if (!dlmfs_inode_cache)
return -ENOMEM;
cleanup_inode = 1;
dlm_mle_cache = kmem_cache_create("dlm_mle_cache",
sizeof(struct dlm_master_list_entry),
0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (dlm_mle_cache == NULL)
return -ENOMEM;
return 0;
0,
(SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- ocfs2_inode_init_once, NULL);
+ ocfs2_inode_init_once);
if (!ocfs2_inode_cachep)
return -ENOMEM;
{
ocfs2_uptodate_cachep = kmem_cache_create("ocfs2_uptodate",
sizeof(struct ocfs2_meta_cache_item),
- 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+ 0, SLAB_HWCACHE_ALIGN, NULL);
if (!ocfs2_uptodate_cachep)
return -ENOMEM;
0,
(SLAB_RECLAIM_ACCOUNT |
SLAB_MEM_SPREAD),
- op_inode_init_once, NULL);
+ op_inode_init_once);
if (!op_inode_cachep)
return -ENOMEM;
inode_init_once(&ei->vfs_inode);
}
-
+
int __init proc_init_inodecache(void)
{
proc_inode_cachep = kmem_cache_create("proc_inode_cache",
sizeof(struct proc_inode),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (proc_inode_cachep == NULL)
return -ENOMEM;
return 0;
sizeof(struct qnx4_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (qnx4_inode_cachep == NULL)
return -ENOMEM;
return 0;
reiserfs_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (reiserfs_inode_cachep == NULL)
return -ENOMEM;
return 0;
inode_init_once(&ei->vfs_inode);
}
-
+
static int init_inodecache(void)
{
romfs_inode_cachep = kmem_cache_create("romfs_inode_cache",
sizeof(struct romfs_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (romfs_inode_cachep == NULL)
return -ENOMEM;
return 0;
inode_init_once(&ei->vfs_inode);
}
-
+
static int init_inodecache(void)
{
smb_inode_cachep = kmem_cache_create("smb_inode_cache",
sizeof(struct smb_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (smb_inode_cachep == NULL)
return -ENOMEM;
return 0;
req_cachep = kmem_cache_create("smb_request",
sizeof(struct smb_request), 0,
SMB_SLAB_DEBUG | SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (req_cachep == NULL)
return -ENOMEM;
sysfs_dir_cachep = kmem_cache_create("sysfs_dir_cache",
sizeof(struct sysfs_dirent),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!sysfs_dir_cachep)
goto out;
sysv_inode_cachep = kmem_cache_create("sysv_inode_cache",
sizeof(struct sysv_inode_info), 0,
SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
- init_once, NULL);
+ init_once);
if (!sysv_inode_cachep)
return -ENOMEM;
return 0;
sizeof(struct udf_inode_info),
0, (SLAB_RECLAIM_ACCOUNT |
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (udf_inode_cachep == NULL)
return -ENOMEM;
return 0;
inode_init_once(&ei->vfs_inode);
}
-
+
static int init_inodecache(void)
{
ufs_inode_cachep = kmem_cache_create("ufs_inode_cache",
sizeof(struct ufs_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (ufs_inode_cachep == NULL)
return -ENOMEM;
return 0;
static inline kmem_zone_t *
kmem_zone_init(int size, char *zone_name)
{
- return kmem_cache_create(zone_name, size, 0, 0, NULL, NULL);
+ return kmem_cache_create(zone_name, size, 0, 0, NULL);
}
static inline kmem_zone_t *
kmem_zone_init_flags(int size, char *zone_name, unsigned long flags,
void (*construct)(void *, kmem_zone_t *, unsigned long))
{
- return kmem_cache_create(zone_name, size, 0, flags, construct, NULL);
+ return kmem_cache_create(zone_name, size, 0, flags, construct);
}
static inline void
strcpy(pool->name, name);
pool->slab =
- kmem_cache_create(pool->name, size, 0, SLAB_HWCACHE_ALIGN, NULL,
- NULL);
+ kmem_cache_create(pool->name, size, 0, SLAB_HWCACHE_ALIGN, NULL);
if (!pool->slab)
goto free_name;
struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
unsigned long,
- void (*)(void *, struct kmem_cache *, unsigned long),
void (*)(void *, struct kmem_cache *, unsigned long));
void kmem_cache_destroy(struct kmem_cache *);
int kmem_cache_shrink(struct kmem_cache *);
*/
#define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
sizeof(struct __struct), __alignof__(struct __struct),\
- (__flags), NULL, NULL)
+ (__flags), NULL)
/*
* The largest kmalloc size supported by the slab allocators is
mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
sizeof(struct mqueue_inode_info), 0,
- SLAB_HWCACHE_ALIGN, init_once, NULL);
+ SLAB_HWCACHE_ALIGN, init_once);
if (mqueue_inode_cachep == NULL)
return -ENOMEM;
/* create a slab on which task_structs can be allocated */
task_struct_cachep =
kmem_cache_create("task_struct", sizeof(struct task_struct),
- ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL, NULL);
+ ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL);
#endif
/*
sighand_cachep = kmem_cache_create("sighand_cache",
sizeof(struct sighand_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU,
- sighand_ctor, NULL);
+ sighand_ctor);
signal_cachep = kmem_cache_create("signal_cache",
sizeof(struct signal_struct), 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
- files_cachep = kmem_cache_create("files_cache",
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+ files_cachep = kmem_cache_create("files_cache",
sizeof(struct files_struct), 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
- fs_cachep = kmem_cache_create("fs_cache",
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+ fs_cachep = kmem_cache_create("fs_cache",
sizeof(struct fs_struct), 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
vm_area_cachep = kmem_cache_create("vm_area_struct",
sizeof(struct vm_area_struct), 0,
- SLAB_PANIC, NULL, NULL);
+ SLAB_PANIC, NULL);
mm_cachep = kmem_cache_create("mm_struct",
sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
}
/*
static int __init nsproxy_cache_init(void)
{
nsproxy_cachep = kmem_cache_create("nsproxy", sizeof(struct nsproxy),
- 0, SLAB_PANIC, NULL, NULL);
+ 0, SLAB_PANIC, NULL);
return 0;
}
register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
posix_timers_cache = kmem_cache_create("posix_timers_cache",
- sizeof (struct k_itimer), 0, 0, NULL, NULL);
+ sizeof (struct k_itimer), 0, 0, NULL);
idr_init(&posix_timers_id);
return 0;
}
int n;
uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
- 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+ 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
for(n = 0; n < UIDHASH_SZ; ++n)
INIT_LIST_HEAD(init_user_ns.uidhash_table + n);
{
if (!idr_layer_cache)
idr_layer_cache = kmem_cache_create("idr_layer_cache",
- sizeof(struct idr_layer), 0, 0, idr_cache_ctor, NULL);
+ sizeof(struct idr_layer), 0, 0, idr_cache_ctor);
return 0;
}
{
radix_tree_node_cachep = kmem_cache_create("radix_tree_node",
sizeof(struct radix_tree_node), 0,
- SLAB_PANIC, radix_tree_node_ctor, NULL);
+ SLAB_PANIC, radix_tree_node_ctor);
radix_tree_init_maxindex();
hotcpu_notifier(radix_tree_callback, 0);
}
policy_cache = kmem_cache_create("numa_policy",
sizeof(struct mempolicy),
- 0, SLAB_PANIC, NULL, NULL);
+ 0, SLAB_PANIC, NULL);
sn_cache = kmem_cache_create("shared_policy_node",
sizeof(struct sp_node),
- 0, SLAB_PANIC, NULL, NULL);
+ 0, SLAB_PANIC, NULL);
/*
* Set interleaving policy for system init. Interleaving is only
void __init anon_vma_init(void)
{
anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor, NULL);
+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
}
/*
{
shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
sizeof(struct shmem_inode_info),
- 0, 0, init_once, NULL);
+ 0, 0, init_once);
if (shmem_inode_cachep == NULL)
return -ENOMEM;
return 0;
sizes[INDEX_AC].cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
- NULL, NULL);
+ NULL);
if (INDEX_AC != INDEX_L3) {
sizes[INDEX_L3].cs_cachep =
sizes[INDEX_L3].cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
- NULL, NULL);
+ NULL);
}
slab_early_init = 0;
sizes->cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
- NULL, NULL);
+ NULL);
}
#ifdef CONFIG_ZONE_DMA
sizes->cs_dmacachep = kmem_cache_create(
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
SLAB_PANIC,
- NULL, NULL);
+ NULL);
#endif
sizes++;
names++;
* @align: The required alignment for the objects.
* @flags: SLAB flags
* @ctor: A constructor for the objects.
- * @dtor: A destructor for the objects (not implemented anymore).
*
* Returns a ptr to the cache on success, NULL on failure.
* Cannot be called within a int, but can be interrupted.
- * The @ctor is run when new pages are allocated by the cache
- * and the @dtor is run before the pages are handed back.
+ * The @ctor is run when new pages are allocated by the cache.
*
* @name must be valid until the cache is destroyed. This implies that
* the module calling this has to destroy the cache before getting unloaded.
struct kmem_cache *
kmem_cache_create (const char *name, size_t size, size_t align,
unsigned long flags,
- void (*ctor)(void*, struct kmem_cache *, unsigned long),
- void (*dtor)(void*, struct kmem_cache *, unsigned long))
+ void (*ctor)(void*, struct kmem_cache *, unsigned long))
{
size_t left_over, slab_size, ralign;
struct kmem_cache *cachep = NULL, *pc;
* Sanity checks... these are all serious usage bugs.
*/
if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
- size > KMALLOC_MAX_SIZE || dtor) {
+ size > KMALLOC_MAX_SIZE) {
printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__,
name);
BUG();
struct kmem_cache *kmem_cache_create(const char *name, size_t size,
size_t align, unsigned long flags,
- void (*ctor)(void*, struct kmem_cache *, unsigned long),
- void (*dtor)(void*, struct kmem_cache *, unsigned long))
+ void (*ctor)(void*, struct kmem_cache *, unsigned long))
{
struct kmem_cache *c;
struct kmem_cache *kmem_cache_create(const char *name, size_t size,
size_t align, unsigned long flags,
- void (*ctor)(void *, struct kmem_cache *, unsigned long),
- void (*dtor)(void *, struct kmem_cache *, unsigned long))
+ void (*ctor)(void *, struct kmem_cache *, unsigned long))
{
struct kmem_cache *s;
- BUG_ON(dtor);
down_write(&slub_lock);
s = find_mergeable(size, align, flags, ctor);
if (s) {
br_fdb_cache = kmem_cache_create("bridge_fdb_cache",
sizeof(struct net_bridge_fdb_entry),
0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
+ SLAB_HWCACHE_ALIGN, NULL);
if (!br_fdb_cache)
return -ENOMEM;
flow_cachep = kmem_cache_create("flow_cache",
sizeof(struct flow_cache_entry),
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
- NULL, NULL);
+ NULL);
flow_hash_shift = 10;
flow_lwm = 2 * flow_hash_size;
flow_hwm = 4 * flow_hash_size;
tbl->kmem_cachep =
kmem_cache_create(tbl->id, tbl->entry_size, 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC,
- NULL, NULL);
+ NULL);
tbl->stats = alloc_percpu(struct neigh_statistics);
if (!tbl->stats)
panic("cannot create neighbour cache statistics");
sizeof(struct sk_buff),
0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC,
- NULL, NULL);
+ NULL);
skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
(2*sizeof(struct sk_buff)) +
sizeof(atomic_t),
0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC,
- NULL, NULL);
+ NULL);
}
/**
if (alloc_slab) {
prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
+ SLAB_HWCACHE_ALIGN, NULL);
if (prot->slab == NULL) {
printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n",
sprintf(request_sock_slab_name, mask, prot->name);
prot->rsk_prot->slab = kmem_cache_create(request_sock_slab_name,
prot->rsk_prot->obj_size, 0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
+ SLAB_HWCACHE_ALIGN, NULL);
if (prot->rsk_prot->slab == NULL) {
printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n",
kmem_cache_create(timewait_sock_slab_name,
prot->twsk_prot->twsk_obj_size,
0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (prot->twsk_prot->twsk_slab == NULL)
goto out_free_timewait_sock_slab_name;
}
{
dccp_ackvec_slab = kmem_cache_create("dccp_ackvec",
sizeof(struct dccp_ackvec), 0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
+ SLAB_HWCACHE_ALIGN, NULL);
if (dccp_ackvec_slab == NULL)
goto out_err;
dccp_ackvec_record_slab =
kmem_cache_create("dccp_ackvec_record",
sizeof(struct dccp_ackvec_record),
- 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+ 0, SLAB_HWCACHE_ALIGN, NULL);
if (dccp_ackvec_record_slab == NULL)
goto out_destroy_slab;
if (slab_name == NULL)
return NULL;
slab = kmem_cache_create(slab_name, sizeof(struct ccid) + obj_size, 0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
+ SLAB_HWCACHE_ALIGN, NULL);
if (slab == NULL)
kfree(slab_name);
return slab;
{
dccp_li_cachep = kmem_cache_create("dccp_li_hist",
sizeof(struct dccp_li_hist_entry),
- 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+ 0, SLAB_HWCACHE_ALIGN, NULL);
return dccp_li_cachep == NULL ? -ENOBUFS : 0;
}
hist->dccptxh_slab = kmem_cache_create(slab_name,
sizeof(struct dccp_tx_hist_entry),
0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (hist->dccptxh_slab == NULL)
goto out_free_slab_name;
out:
hist->dccprxh_slab = kmem_cache_create(slab_name,
sizeof(struct dccp_rx_hist_entry),
0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (hist->dccprxh_slab == NULL)
goto out_free_slab_name;
out:
dccp_hashinfo.bind_bucket_cachep =
kmem_cache_create("dccp_bind_bucket",
sizeof(struct inet_bind_bucket), 0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
+ SLAB_HWCACHE_ALIGN, NULL);
if (!dccp_hashinfo.bind_bucket_cachep)
goto out;
dn_dst_ops.kmem_cachep =
kmem_cache_create("dn_dst_cache", sizeof(struct dn_route), 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
init_timer(&dn_route_timer);
dn_route_timer.function = dn_dst_check_expire;
dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ;
dn_hash_kmem = kmem_cache_create("dn_fib_info_cache",
sizeof(struct dn_fib_info),
0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
}
void __exit dn_fib_table_cleanup(void)
fn_hash_kmem = kmem_cache_create("ip_fib_hash",
sizeof(struct fib_node),
0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (fn_alias_kmem == NULL)
fn_alias_kmem = kmem_cache_create("ip_fib_alias",
sizeof(struct fib_alias),
0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
tb = kmalloc(sizeof(struct fib_table) + sizeof(struct fn_hash),
GFP_KERNEL);
fn_alias_kmem = kmem_cache_create("ip_fib_alias",
sizeof(struct fib_alias),
0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
tb = kmalloc(sizeof(struct fib_table) + sizeof(struct trie),
GFP_KERNEL);
peer_cachep = kmem_cache_create("inet_peer_cache",
sizeof(struct inet_peer),
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
- NULL, NULL);
+ NULL);
/* All the timers, started at system startup tend
to synchronize. Perturb it a bit.
mrt_cachep = kmem_cache_create("ip_mrt_cache",
sizeof(struct mfc_cache),
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
- NULL, NULL);
+ NULL);
init_timer(&ipmr_expire_timer);
ipmr_expire_timer.function=ipmr_expire_process;
register_netdevice_notifier(&ip_mr_notifier);
/* Allocate ip_vs_conn slab cache */
ip_vs_conn_cachep = kmem_cache_create("ip_vs_conn",
sizeof(struct ip_vs_conn), 0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
+ SLAB_HWCACHE_ALIGN, NULL);
if (!ip_vs_conn_cachep) {
vfree(ip_vs_conn_tab);
return -ENOMEM;
ipv4_dst_ops.kmem_cachep =
kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
tcp_hashinfo.bind_bucket_cachep =
kmem_cache_create("tcp_bind_bucket",
sizeof(struct inet_bind_bucket), 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
/* Size and allocate the main established and bind bucket
* hash tables.
fib6_node_kmem = kmem_cache_create("fib6_nodes",
sizeof(struct fib6_node),
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
- NULL, NULL);
+ NULL);
fib6_tables_init();
#endif
ip6_dst_ops.kmem_cachep =
kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops.kmem_cachep;
fib6_init();
xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi",
sizeof(struct xfrm6_tunnel_spi),
0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (!xfrm6_tunnel_spi_kmem)
return -ENOMEM;
nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
sizeof(struct nf_conn),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!nf_conntrack_cachep) {
printk(KERN_ERR "Unable to create nf_conn slab cache\n");
goto err_free_hash;
nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
sizeof(struct nf_conntrack_expect),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!nf_ct_expect_cachep)
goto err2;
err = -ENOMEM;
hashlimit_cachep = kmem_cache_create("xt_hashlimit",
sizeof(struct dsthash_ent), 0, 0,
- NULL, NULL);
+ NULL);
if (!hashlimit_cachep) {
printk(KERN_ERR "xt_hashlimit: unable to create slab cache\n");
goto err2;
ret = -ENOMEM;
rxrpc_call_jar = kmem_cache_create(
"rxrpc_call_jar", sizeof(struct rxrpc_call), 0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
+ SLAB_HWCACHE_ALIGN, NULL);
if (!rxrpc_call_jar) {
printk(KERN_NOTICE "RxRPC: Failed to allocate call jar\n");
goto error_call_jar;
sctp_bucket_cachep = kmem_cache_create("sctp_bind_bucket",
sizeof(struct sctp_bind_bucket),
0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (!sctp_bucket_cachep)
goto out;
sctp_chunk_cachep = kmem_cache_create("sctp_chunk",
sizeof(struct sctp_chunk),
0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (!sctp_chunk_cachep)
goto err_chunk_cachep;
(SLAB_HWCACHE_ALIGN |
SLAB_RECLAIM_ACCOUNT |
SLAB_MEM_SPREAD),
- init_once,
- NULL);
+ init_once);
if (sock_inode_cachep == NULL)
return -ENOMEM;
return 0;
sizeof(struct rpc_inode),
0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (!rpc_inode_cachep)
return -ENOMEM;
err = register_filesystem(&rpc_pipe_fs_type);
rpc_task_slabp = kmem_cache_create("rpc_tasks",
sizeof(struct rpc_task),
0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (!rpc_task_slabp)
goto err_nomem;
rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
RPC_BUFFER_MAXSIZE,
0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (!rpc_buffer_slabp)
goto err_nomem;
rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
{
tipc_queue_item_cache =
kmem_cache_create("tipc_queue_items", sizeof(struct queue_item),
- 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+ 0, SLAB_HWCACHE_ALIGN, NULL);
if (!tipc_queue_item_cache)
return -ENOMEM;
secpath_cachep = kmem_cache_create("secpath_cache",
sizeof(struct sec_path),
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
- NULL, NULL);
+ NULL);
}
xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
sizeof(struct xfrm_dst),
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
- NULL, NULL);
+ NULL);
hmask = 8 - 1;
sz = (hmask+1) * sizeof(struct hlist_head);
{
/* allocate a slab in which we can store keys */
key_jar = kmem_cache_create("key_jar", sizeof(struct key),
- 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+ 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
/* add the special key types */
list_add_tail(&key_type_keyring.link, &key_types_list);
atomic_set(&avc_cache.lru_hint, 0);
avc_node_cachep = kmem_cache_create("avc_node", sizeof(struct avc_node),
- 0, SLAB_PANIC, NULL, NULL);
+ 0, SLAB_PANIC, NULL);
audit_log(current->audit_context, GFP_KERNEL, AUDIT_KERNEL, "AVC INITIALIZED\n");
}
sel_inode_cache = kmem_cache_create("selinux_inode_security",
sizeof(struct inode_security_struct),
- 0, SLAB_PANIC, NULL, NULL);
+ 0, SLAB_PANIC, NULL);
avc_init();
original_ops = secondary_ops = security_ops;
{
avtab_node_cachep = kmem_cache_create("avtab_node",
sizeof(struct avtab_node),
- 0, SLAB_PANIC, NULL, NULL);
+ 0, SLAB_PANIC, NULL);
}
void avtab_cache_destroy(void)