Add rpc_cache for allocating ptlrpc_requests.
Xyratex-bug-id: MRP-689
Signed-off-by: Andriy Skulysh <Andriy_Skulysh@xyratex.com>
Signed-off-by: Niu Yawei <yawei.niu@intel.com>
Reviewed-on: http://review.whamcloud.com/6874
Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-2424
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: Lai Siyao <lai.siyao@intel.com>
Signed-off-by: Oleg Drokin <oleg.drokin@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
return rc;
}
+struct kmem_cache *request_cache;
+
+int ptlrpc_request_cache_init(void)
+{
+ request_cache = kmem_cache_create("ptlrpc_cache",
+ sizeof(struct ptlrpc_request),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ return request_cache == NULL ? -ENOMEM : 0;
+}
+
+void ptlrpc_request_cache_fini(void)
+{
+ kmem_cache_destroy(request_cache);
+}
+
+struct ptlrpc_request *ptlrpc_request_cache_alloc(int flags)
+{
+ struct ptlrpc_request *req;
+
+ OBD_SLAB_ALLOC_PTR_GFP(req, request_cache, flags);
+ return req;
+}
+
+void ptlrpc_request_cache_free(struct ptlrpc_request *req)
+{
+ OBD_SLAB_FREE_PTR(req, request_cache);
+}
+
/**
* Wind down request pool \a pool.
* Frees all requests from the pool too
LASSERT(req->rq_reqbuf);
LASSERT(req->rq_reqbuf_len == pool->prp_rq_size);
OBD_FREE_LARGE(req->rq_reqbuf, pool->prp_rq_size);
- OBD_FREE(req, sizeof(*req));
+ ptlrpc_request_cache_free(req);
}
spin_unlock(&pool->prp_lock);
OBD_FREE(pool, sizeof(*pool));
struct lustre_msg *msg;
spin_unlock(&pool->prp_lock);
- OBD_ALLOC(req, sizeof(struct ptlrpc_request));
+ req = ptlrpc_request_cache_alloc(__GFP_IO);
if (!req)
return;
OBD_ALLOC_LARGE(msg, size);
if (!msg) {
- OBD_FREE(req, sizeof(struct ptlrpc_request));
+ ptlrpc_request_cache_free(req);
return;
}
req->rq_reqbuf = msg;
request = ptlrpc_prep_req_from_pool(pool);
if (!request)
- OBD_ALLOC_PTR(request);
+ request = ptlrpc_request_cache_alloc(__GFP_IO);
if (request) {
LASSERTF((unsigned long)imp > 0x1000, "%p", imp);
if (request->rq_pool)
__ptlrpc_free_req_to_pool(request);
else
- OBD_FREE_PTR(request);
+ ptlrpc_request_cache_free(request);
}
EXPORT_SYMBOL(ptlrpc_request_free);
if (request->rq_pool)
__ptlrpc_free_req_to_pool(request);
else
- OBD_FREE(request, sizeof(*request));
+ ptlrpc_request_cache_free(request);
}
static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked);
return ERR_PTR(-EINVAL);
/* copy some code from deprecated fakereq. */
- OBD_ALLOC_PTR(req);
+ req = ptlrpc_request_cache_alloc(__GFP_IO);
if (req == NULL) {
CERROR("ptlrpc: run out of memory!\n");
return ERR_PTR(-ENOMEM);
/* We moaned above already... */
return;
}
- OBD_ALLOC_GFP(req, sizeof(*req), ALLOC_ATOMIC_TRY);
+ req = ptlrpc_request_cache_alloc(ALLOC_ATOMIC_TRY);
if (req == NULL) {
CERROR("Can't allocate incoming request descriptor: "
"Dropping %s RPC from %s\n",
/* client.c */
struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw,
unsigned type, unsigned portal);
+int ptlrpc_request_cache_init(void);
+void ptlrpc_request_cache_fini(void);
+struct ptlrpc_request *ptlrpc_request_cache_alloc(int flags);
+void ptlrpc_request_cache_free(struct ptlrpc_request *req);
void ptlrpc_init_xid(void);
/* events.c */
return rc;
cleanup_phase = 1;
+ rc = ptlrpc_request_cache_init();
+ if (rc)
+ GOTO(cleanup, rc);
+ cleanup_phase = 2;
rc = ptlrpc_init_portals();
if (rc)
GOTO(cleanup, rc);
- cleanup_phase = 2;
+
+ cleanup_phase = 3;
rc = ptlrpc_connection_init();
if (rc)
GOTO(cleanup, rc);
- cleanup_phase = 3;
+ cleanup_phase = 4;
ptlrpc_put_connection_superhack = ptlrpc_connection_put;
rc = ptlrpc_start_pinger();
if (rc)
GOTO(cleanup, rc);
- cleanup_phase = 4;
+ cleanup_phase = 5;
rc = ldlm_init();
if (rc)
GOTO(cleanup, rc);
- cleanup_phase = 5;
+ cleanup_phase = 6;
rc = sptlrpc_init();
if (rc)
GOTO(cleanup, rc);
switch (cleanup_phase) {
case 8:
ptlrpc_nrs_fini();
+ /* Fall through */
case 7:
sptlrpc_fini();
- case 5:
+ /* Fall through */
+ case 6:
ldlm_exit();
- case 4:
+ /* Fall through */
+ case 5:
ptlrpc_stop_pinger();
- case 3:
+ /* Fall through */
+ case 4:
ptlrpc_connection_fini();
- case 2:
+ /* Fall through */
+ case 3:
ptlrpc_exit_portals();
+ /* Fall through */
+ case 2:
+ ptlrpc_request_cache_fini();
+ /* Fall through */
case 1:
ptlrpc_hr_fini();
req_layout_fini();
+ /* Fall through */
default: ;
}
ldlm_exit();
ptlrpc_stop_pinger();
ptlrpc_exit_portals();
+ ptlrpc_request_cache_fini();
ptlrpc_hr_fini();
ptlrpc_connection_fini();
}
return -EACCES;
}
- OBD_ALLOC_PTR(req);
+ req = ptlrpc_request_cache_alloc(__GFP_IO);
if (!req)
return -ENOMEM;
rc = sptlrpc_req_refresh_ctx(req, 0);
LASSERT(list_empty(&req->rq_ctx_chain));
sptlrpc_cli_ctx_put(req->rq_cli_ctx, 1);
- OBD_FREE_PTR(req);
+ ptlrpc_request_cache_free(req);
return rc;
}
int early_bufsz, early_size;
int rc;
- OBD_ALLOC_PTR(early_req);
+ early_req = ptlrpc_request_cache_alloc(__GFP_IO);
if (early_req == NULL)
return -ENOMEM;
err_buf:
OBD_FREE_LARGE(early_buf, early_bufsz);
err_req:
- OBD_FREE_PTR(early_req);
+ ptlrpc_request_cache_free(early_req);
return rc;
}
sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
OBD_FREE_LARGE(early_req->rq_repbuf, early_req->rq_repbuf_len);
- OBD_FREE_PTR(early_req);
+ ptlrpc_request_cache_free(early_req);
}
/**************************************************
/* NB request buffers use an embedded
* req if the incoming req unlinked the
* MD; this isn't one of them! */
- OBD_FREE(req, sizeof(*req));
+ ptlrpc_request_cache_free(req);
}
}
}
newdl = cfs_time_current_sec() + at_get(&svcpt->scp_at_estimate);
- OBD_ALLOC(reqcopy, sizeof(*reqcopy));
+ reqcopy = ptlrpc_request_cache_alloc(__GFP_IO);
if (reqcopy == NULL)
return -ENOMEM;
OBD_ALLOC_LARGE(reqmsg, req->rq_reqlen);
- if (!reqmsg) {
- OBD_FREE(reqcopy, sizeof(*reqcopy));
- return -ENOMEM;
- }
+ if (!reqmsg)
+ GOTO(out_free, rc = -ENOMEM);
*reqcopy = *req;
reqcopy->rq_reply_state = NULL;
out:
sptlrpc_svc_ctx_decref(reqcopy);
OBD_FREE_LARGE(reqmsg, req->rq_reqlen);
- OBD_FREE(reqcopy, sizeof(*reqcopy));
+out_free:
+ ptlrpc_request_cache_free(reqcopy);
return rc;
}