kvfree(ptr); \
} while (0)
+/*
+ * Use #define rather than inline, as lnet_cpt_table() might
+ * not be defined yet
+ */
+#define kmalloc_cpt(size, flags, cpt) \
+ kmalloc_node(size, flags, cfs_cpt_spread_node(lnet_cpt_table(), cpt))
+
+#define kzalloc_cpt(size, flags, cpt) \
+ kmalloc_node(size, flags | __GFP_ZERO, \
+ cfs_cpt_spread_node(lnet_cpt_table(), cpt))
+
+#define kvmalloc_cpt(size, flags, cpt) \
+ kvmalloc_node(size, flags, \
+ cfs_cpt_spread_node(lnet_cpt_table(), cpt))
+
+#define kvzalloc_cpt(size, flags, cpt) \
+ kvmalloc_node(size, flags | __GFP_ZERO, \
+ cfs_cpt_spread_node(lnet_cpt_table(), cpt))
+
/******************************************************************************/
void libcfs_debug_dumplog(void);
LASSERT(net);
LASSERT(nid != LNET_NID_ANY);
- LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer));
+ peer = kzalloc_cpt(sizeof(*peer), GFP_NOFS, cpt);
if (!peer) {
CERROR("Cannot allocate peer\n");
return -ENOMEM;
LASSERT(sched->ibs_nthreads > 0);
- LIBCFS_CPT_ALLOC(init_qp_attr, lnet_cpt_table(), cpt,
- sizeof(*init_qp_attr));
+ init_qp_attr = kzalloc_cpt(sizeof(*init_qp_attr), GFP_NOFS, cpt);
if (!init_qp_attr) {
CERROR("Can't allocate qp_attr for %s\n",
libcfs_nid2str(peer->ibp_nid));
goto failed_0;
}
- LIBCFS_CPT_ALLOC(conn, lnet_cpt_table(), cpt, sizeof(*conn));
+ conn = kzalloc_cpt(sizeof(*conn), GFP_NOFS, cpt);
if (!conn) {
CERROR("Can't allocate connection for %s\n",
libcfs_nid2str(peer->ibp_nid));
INIT_LIST_HEAD(&conn->ibc_active_txs);
spin_lock_init(&conn->ibc_lock);
- LIBCFS_CPT_ALLOC(conn->ibc_connvars, lnet_cpt_table(), cpt,
- sizeof(*conn->ibc_connvars));
+ conn->ibc_connvars = kzalloc_cpt(sizeof(*conn->ibc_connvars), GFP_NOFS, cpt);
if (!conn->ibc_connvars) {
CERROR("Can't allocate in-progress connection state\n");
goto failed_2;
write_unlock_irqrestore(glock, flags);
- LIBCFS_CPT_ALLOC(conn->ibc_rxs, lnet_cpt_table(), cpt,
- IBLND_RX_MSGS(conn) * sizeof(struct kib_rx));
+ conn->ibc_rxs = kzalloc_cpt(IBLND_RX_MSGS(conn) * sizeof(struct kib_rx),
+ GFP_NOFS, cpt);
if (!conn->ibc_rxs) {
CERROR("Cannot allocate RX buffers\n");
goto failed_2;
if (conn->ibc_rx_pages)
kiblnd_unmap_rx_descs(conn);
- if (conn->ibc_rxs) {
- LIBCFS_FREE(conn->ibc_rxs,
- IBLND_RX_MSGS(conn) * sizeof(struct kib_rx));
- }
-
+ kfree(conn->ibc_rxs);
kfree(conn->ibc_connvars);
if (conn->ibc_hdev)
__free_page(p->ibp_pages[i]);
}
- LIBCFS_FREE(p, offsetof(struct kib_pages, ibp_pages[npages]));
+ kfree(p);
}
int kiblnd_alloc_pages(struct kib_pages **pp, int cpt, int npages)
struct kib_pages *p;
int i;
- LIBCFS_CPT_ALLOC(p, lnet_cpt_table(), cpt,
- offsetof(struct kib_pages, ibp_pages[npages]));
+ p = kzalloc_cpt(offsetof(struct kib_pages, ibp_pages[npages]),
+ GFP_NOFS, cpt);
if (!p) {
CERROR("Can't allocate descriptor for %d pages\n", npages);
return -ENOMEM;
}
- memset(p, 0, offsetof(struct kib_pages, ibp_pages[npages]));
p->ibp_npages = npages;
for (i = 0; i < npages; i++) {
INIT_LIST_HEAD(&fpo->fast_reg.fpo_pool_list);
fpo->fast_reg.fpo_pool_size = 0;
for (i = 0; i < fps->fps_pool_size; i++) {
- LIBCFS_CPT_ALLOC(frd, lnet_cpt_table(), fps->fps_cpt,
- sizeof(*frd));
+ frd = kzalloc_cpt(sizeof(*frd), GFP_NOFS, fps->fps_cpt);
if (!frd) {
CERROR("Failed to allocate a new fast_reg descriptor\n");
rc = -ENOMEM;
struct kib_fmr_pool *fpo;
int rc;
- LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo));
+ fpo = kzalloc_cpt(sizeof(*fpo), GFP_NOFS, fps->fps_cpt);
if (!fpo)
return -ENOMEM;
struct kib_tx *tx = &tpo->tpo_tx_descs[i];
list_del(&tx->tx_list);
- if (tx->tx_pages)
- LIBCFS_FREE(tx->tx_pages,
- LNET_MAX_IOV *
- sizeof(*tx->tx_pages));
- if (tx->tx_frags)
- LIBCFS_FREE(tx->tx_frags,
- (1 + IBLND_MAX_RDMA_FRAGS) *
- sizeof(*tx->tx_frags));
- if (tx->tx_wrq)
- LIBCFS_FREE(tx->tx_wrq,
- (1 + IBLND_MAX_RDMA_FRAGS) *
- sizeof(*tx->tx_wrq));
- if (tx->tx_sge)
- LIBCFS_FREE(tx->tx_sge,
- (1 + IBLND_MAX_RDMA_FRAGS) *
- sizeof(*tx->tx_sge));
- if (tx->tx_rd)
- LIBCFS_FREE(tx->tx_rd,
- offsetof(struct kib_rdma_desc,
- rd_frags[IBLND_MAX_RDMA_FRAGS]));
- }
-
- LIBCFS_FREE(tpo->tpo_tx_descs,
- pool->po_size * sizeof(struct kib_tx));
+ kfree(tx->tx_pages);
+ kfree(tx->tx_frags);
+ kfree(tx->tx_wrq);
+ kfree(tx->tx_sge);
+ kfree(tx->tx_rd);
+ }
+
+ kfree(tpo->tpo_tx_descs);
out:
kiblnd_fini_pool(pool);
kfree(tpo);
struct kib_pool *pool;
struct kib_tx_pool *tpo;
- LIBCFS_CPT_ALLOC(tpo, lnet_cpt_table(), ps->ps_cpt, sizeof(*tpo));
+ tpo = kzalloc_cpt(sizeof(*tpo), GFP_NOFS, ps->ps_cpt);
if (!tpo) {
CERROR("Failed to allocate TX pool\n");
return -ENOMEM;
return -ENOMEM;
}
- LIBCFS_CPT_ALLOC(tpo->tpo_tx_descs, lnet_cpt_table(), ps->ps_cpt,
- size * sizeof(struct kib_tx));
+ tpo->tpo_tx_descs = kzalloc_cpt(size * sizeof(struct kib_tx),
+ GFP_NOFS, ps->ps_cpt);
if (!tpo->tpo_tx_descs) {
CERROR("Can't allocate %d tx descriptors\n", size);
ps->ps_pool_destroy(pool);
tx->tx_pool = tpo;
if (ps->ps_net->ibn_fmr_ps) {
- LIBCFS_CPT_ALLOC(tx->tx_pages,
- lnet_cpt_table(), ps->ps_cpt,
- LNET_MAX_IOV * sizeof(*tx->tx_pages));
+ tx->tx_pages = kzalloc_cpt(LNET_MAX_IOV * sizeof(*tx->tx_pages),
+ GFP_NOFS, ps->ps_cpt);
if (!tx->tx_pages)
break;
}
- LIBCFS_CPT_ALLOC(tx->tx_frags, lnet_cpt_table(), ps->ps_cpt,
- (1 + IBLND_MAX_RDMA_FRAGS) *
- sizeof(*tx->tx_frags));
+ tx->tx_frags = kzalloc_cpt((1 + IBLND_MAX_RDMA_FRAGS) *
+ sizeof(*tx->tx_frags),
+ GFP_NOFS, ps->ps_cpt);
if (!tx->tx_frags)
break;
sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS + 1);
- LIBCFS_CPT_ALLOC(tx->tx_wrq, lnet_cpt_table(), ps->ps_cpt,
- (1 + IBLND_MAX_RDMA_FRAGS) *
- sizeof(*tx->tx_wrq));
+ tx->tx_wrq = kzalloc_cpt((1 + IBLND_MAX_RDMA_FRAGS) *
+ sizeof(*tx->tx_wrq),
+ GFP_NOFS, ps->ps_cpt);
if (!tx->tx_wrq)
break;
- LIBCFS_CPT_ALLOC(tx->tx_sge, lnet_cpt_table(), ps->ps_cpt,
- (1 + IBLND_MAX_RDMA_FRAGS) *
- sizeof(*tx->tx_sge));
+ tx->tx_sge = kzalloc_cpt((1 + IBLND_MAX_RDMA_FRAGS) *
+ sizeof(*tx->tx_sge),
+ GFP_NOFS, ps->ps_cpt);
if (!tx->tx_sge)
break;
- LIBCFS_CPT_ALLOC(tx->tx_rd, lnet_cpt_table(), ps->ps_cpt,
- offsetof(struct kib_rdma_desc,
- rd_frags[IBLND_MAX_RDMA_FRAGS]));
+ tx->tx_rd = kzalloc_cpt(offsetof(struct kib_rdma_desc,
+ rd_frags[IBLND_MAX_RDMA_FRAGS]),
+ GFP_NOFS, ps->ps_cpt);
if (!tx->tx_rd)
break;
}
LASSERT(id.pid != LNET_PID_ANY);
LASSERT(!in_interrupt());
- LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer));
+ peer = kzalloc_cpt(sizeof(*peer), GFP_NOFS, cpt);
if (!peer)
return -ENOMEM;
struct ksock_sched_info *info;
int i;
- cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
- if (info->ksi_scheds) {
- LIBCFS_FREE(info->ksi_scheds,
- info->ksi_nthreads_max *
- sizeof(info->ksi_scheds[0]));
- }
- }
+ cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info)
+ kfree(info->ksi_scheds);
cfs_percpt_free(ksocknal_data.ksnd_sched_info);
}
info->ksi_nthreads_max = nthrs;
info->ksi_cpt = i;
- LIBCFS_CPT_ALLOC(info->ksi_scheds, lnet_cpt_table(), i,
- info->ksi_nthreads_max * sizeof(*sched));
+ info->ksi_scheds = kzalloc_cpt(info->ksi_nthreads_max * sizeof(*sched),
+ GFP_NOFS, i);
if (!info->ksi_scheds)
goto failed;
arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
- for (i = 0; i < arr->va_count; i++) {
- if (arr->va_ptrs[i])
- LIBCFS_FREE(arr->va_ptrs[i], arr->va_size);
- }
+ for (i = 0; i < arr->va_count; i++)
+ kfree(arr->va_ptrs[i]);
kvfree(arr);
}
arr->va_cptab = cptab;
for (i = 0; i < count; i++) {
- LIBCFS_CPT_ALLOC(arr->va_ptrs[i], cptab, i, size);
+ arr->va_ptrs[i] = kzalloc_node(size, GFP_KERNEL,
+ cfs_cpt_spread_node(cptab, i));
if (!arr->va_ptrs[i]) {
cfs_percpt_free((void *)&arr->va_ptrs[0]);
return NULL;
count, lnet_res_type2str(rec->rec_type));
}
- if (rec->rec_lh_hash) {
- LIBCFS_FREE(rec->rec_lh_hash,
- LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
- rec->rec_lh_hash = NULL;
- }
+ kfree(rec->rec_lh_hash);
+ rec->rec_lh_hash = NULL;
rec->rec_type = 0; /* mark it as finalized */
}
rec->rec_lh_cookie = (cpt << LNET_COOKIE_TYPE_BITS) | type;
/* Arbitrary choice of hash table size */
- LIBCFS_CPT_ALLOC(rec->rec_lh_hash, lnet_cpt_table(), cpt,
- LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
+ rec->rec_lh_hash = kvmalloc_cpt(LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]),
+ GFP_KERNEL, cpt);
if (!rec->rec_lh_hash) {
rc = -ENOMEM;
goto out;
if (count > 0)
CERROR("%d active msg on exit\n", count);
- if (container->msc_finalizers) {
- LIBCFS_FREE(container->msc_finalizers,
- container->msc_nfinalizers *
- sizeof(*container->msc_finalizers));
- container->msc_finalizers = NULL;
- }
+ kvfree(container->msc_finalizers);
+ container->msc_finalizers = NULL;
container->msc_init = 0;
}
/* number of CPUs */
container->msc_nfinalizers = cfs_cpt_weight(lnet_cpt_table(), cpt);
- LIBCFS_CPT_ALLOC(container->msc_finalizers, lnet_cpt_table(), cpt,
- container->msc_nfinalizers *
- sizeof(*container->msc_finalizers));
+ container->msc_finalizers = kvzalloc_cpt(container->msc_nfinalizers *
+ sizeof(*container->msc_finalizers),
+ GFP_KERNEL, cpt);
if (!container->msc_finalizers) {
CERROR("Failed to allocate message finalizers\n");
}
}
/* the extra entry is for MEs with ignore bits */
- LIBCFS_FREE(mhash, sizeof(*mhash) * (LNET_MT_HASH_SIZE + 1));
+ kvfree(mhash);
}
cfs_percpt_free(ptl->ptl_mtables);
spin_lock_init(&ptl->ptl_lock);
cfs_percpt_for_each(mtable, i, ptl->ptl_mtables) {
/* the extra entry is for MEs with ignore bits */
- LIBCFS_CPT_ALLOC(mhash, lnet_cpt_table(), i,
- sizeof(*mhash) * (LNET_MT_HASH_SIZE + 1));
+ mhash = kvzalloc_cpt(sizeof(*mhash) * (LNET_MT_HASH_SIZE + 1),
+ GFP_KERNEL, i);
if (!mhash) {
CERROR("Failed to create match hash for portal %d\n",
index);
cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
INIT_LIST_HEAD(&ptable->pt_deathrow);
- LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
- LNET_PEER_HASH_SIZE * sizeof(*hash));
+ hash = kvmalloc_cpt(LNET_PEER_HASH_SIZE * sizeof(*hash),
+ GFP_KERNEL, i);
if (!hash) {
CERROR("Failed to create peer hash table\n");
lnet_peer_tables_destroy();
for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
LASSERT(list_empty(&hash[j]));
- LIBCFS_FREE(hash, LNET_PEER_HASH_SIZE * sizeof(*hash));
+ kvfree(hash);
}
cfs_percpt_free(the_lnet.ln_peer_tables);
if (lp)
memset(lp, 0, sizeof(*lp));
else
- LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), cpt2, sizeof(*lp));
+ lp = kzalloc_cpt(sizeof(*lp), GFP_NOFS, cpt2);
if (!lp) {
rc = -ENOMEM;
void
lnet_destroy_rtrbuf(struct lnet_rtrbuf *rb, int npages)
{
- int sz = offsetof(struct lnet_rtrbuf, rb_kiov[npages]);
-
while (--npages >= 0)
__free_page(rb->rb_kiov[npages].bv_page);
- LIBCFS_FREE(rb, sz);
+ kfree(rb);
}
static struct lnet_rtrbuf *
struct lnet_rtrbuf *rb;
int i;
- LIBCFS_CPT_ALLOC(rb, lnet_cpt_table(), cpt, sz);
+ rb = kzalloc_cpt(sz, GFP_NOFS, cpt);
if (!rb)
return NULL;
while (--i >= 0)
__free_page(rb->rb_kiov[i].bv_page);
- LIBCFS_FREE(rb, sz);
+ kfree(rb);
return NULL;
}
__free_page(pg);
}
- LIBCFS_FREE(bk, offsetof(struct srpc_bulk, bk_iovs[bk->bk_niov]));
+ kfree(bk);
}
struct srpc_bulk *
LASSERT(bulk_npg > 0 && bulk_npg <= LNET_MAX_IOV);
- LIBCFS_CPT_ALLOC(bk, lnet_cpt_table(), cpt,
- offsetof(struct srpc_bulk, bk_iovs[bulk_npg]));
+ bk = kzalloc_cpt(offsetof(struct srpc_bulk, bk_iovs[bulk_npg]),
+ GFP_KERNEL, cpt);
if (!bk) {
CERROR("Can't allocate descriptor for %d pages\n", bulk_npg);
return NULL;
}
for (j = 0; j < nrpcs; j++) {
- LIBCFS_CPT_ALLOC(rpc, lnet_cpt_table(),
- i, sizeof(*rpc));
+ rpc = kzalloc_cpt(sizeof(*rpc), GFP_NOFS, i);
if (!rpc) {
srpc_service_fini(svc);
return -ENOMEM;