{
struct ptlrpc_request_pool *pool;
- OBD_ALLOC(pool, sizeof (struct ptlrpc_request_pool));
+ OBD_ALLOC(pool, sizeof(struct ptlrpc_request_pool));
if (!pool)
return NULL;
if (list_empty(&pool->prp_req_list)) {
/* have not allocated a single request for the pool */
- OBD_FREE(pool, sizeof (struct ptlrpc_request_pool));
+ OBD_FREE(pool, sizeof(struct ptlrpc_request_pool));
pool = NULL;
}
return pool;
/* Requests on the set should either all be completed, or all be new */
expected_phase = (atomic_read(&set->set_remaining) == 0) ?
RQ_PHASE_COMPLETE : RQ_PHASE_NEW;
- list_for_each (tmp, &set->set_requests) {
+ list_for_each(tmp, &set->set_requests) {
struct ptlrpc_request *req =
list_entry(tmp, struct ptlrpc_request,
rq_set_chain);
req->rq_invalid_rqset = 0;
spin_unlock(&req->rq_lock);
- ptlrpc_req_finished (req);
+ ptlrpc_req_finished(req);
}
LASSERT(atomic_read(&set->set_remaining) == 0);
{
int delay = 0;
- LASSERT (status != NULL);
+ LASSERT(status != NULL);
*status = 0;
if (req->rq_ctx_init || req->rq_ctx_fini) {
/*
* A timeout expired. See which reqs it applies to...
*/
- list_for_each (tmp, &set->set_requests) {
+ list_for_each(tmp, &set->set_requests) {
struct ptlrpc_request *req =
list_entry(tmp, struct ptlrpc_request,
rq_set_chain);
LASSERT(req->rq_import->imp_state == LUSTRE_IMP_REPLAY);
- LASSERT (sizeof (*aa) <= sizeof (req->rq_async_args));
+ LASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
aa = ptlrpc_req_async_args(req);
memset(aa, 0, sizeof(*aa));
init_waitqueue_head(&req->rq_set_waitq);
atomic_set(&req->rq_refcount, 1);
- CLASSERT (sizeof(*args) <= sizeof(req->rq_async_args));
+ CLASSERT(sizeof(*args) <= sizeof(req->rq_async_args));
args = ptlrpc_req_async_args(req);
args->magic = PTLRPC_WORK_MAGIC;
args->cb = cb;
struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
struct ptlrpc_request *req = cbid->cbid_arg;
- LASSERT (ev->type == LNET_EVENT_SEND ||
- ev->type == LNET_EVENT_UNLINK);
- LASSERT (ev->unlinked);
+ LASSERT(ev->type == LNET_EVENT_SEND ||
+ ev->type == LNET_EVENT_UNLINK);
+ LASSERT(ev->unlinked);
DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
- LASSERT (ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_UNLINK);
- LASSERT (ev->md.start == req->rq_repbuf);
- LASSERT (ev->offset + ev->mlength <= req->rq_repbuf_len);
+ LASSERT(ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_UNLINK);
+ LASSERT(ev->md.start == req->rq_repbuf);
+ LASSERT(ev->offset + ev->mlength <= req->rq_repbuf_len);
/* We've set LNET_MD_MANAGE_REMOTE for all outgoing requests
for adaptive timeouts' early reply. */
LASSERT((ev->md.options & LNET_MD_MANAGE_REMOTE) != 0);
goto out_wake;
}
- if (ev->mlength < ev->rlength ) {
+ if (ev->mlength < ev->rlength) {
CDEBUG(D_RPCTRACE, "truncate req %p rpc %d - %d+%d\n", req,
req->rq_replen, ev->rlength, ev->offset);
req->rq_reply_truncate = 1;
/*
* Client's bulk has been written/read
*/
-void client_bulk_callback (lnet_event_t *ev)
+void client_bulk_callback(lnet_event_t *ev)
{
struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
struct ptlrpc_request *req;
- LASSERT ((desc->bd_type == BULK_PUT_SINK &&
- ev->type == LNET_EVENT_PUT) ||
- (desc->bd_type == BULK_GET_SOURCE &&
- ev->type == LNET_EVENT_GET) ||
- ev->type == LNET_EVENT_UNLINK);
- LASSERT (ev->unlinked);
+ LASSERT((desc->bd_type == BULK_PUT_SINK &&
+ ev->type == LNET_EVENT_PUT) ||
+ (desc->bd_type == BULK_GET_SOURCE &&
+ ev->type == LNET_EVENT_GET) ||
+ ev->type == LNET_EVENT_UNLINK);
+ LASSERT(ev->unlinked);
if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB, CFS_FAIL_ONCE))
ev->status = -EIO;
struct ptlrpc_service *service = svcpt->scp_service;
struct ptlrpc_request *req;
- LASSERT (ev->type == LNET_EVENT_PUT ||
- ev->type == LNET_EVENT_UNLINK);
- LASSERT ((char *)ev->md.start >= rqbd->rqbd_buffer);
- LASSERT ((char *)ev->md.start + ev->offset + ev->mlength <=
- rqbd->rqbd_buffer + service->srv_buf_size);
+ LASSERT(ev->type == LNET_EVENT_PUT ||
+ ev->type == LNET_EVENT_UNLINK);
+ LASSERT((char *)ev->md.start >= rqbd->rqbd_buffer);
+ LASSERT((char *)ev->md.start + ev->offset + ev->mlength <=
+ rqbd->rqbd_buffer + service->srv_buf_size);
CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
"event type %d, status %d, service %s\n",
* we'd have to re-post the rqbd, which we can't do in this
* context. */
req = &rqbd->rqbd_req;
- memset(req, 0, sizeof (*req));
+ memset(req, 0, sizeof(*req));
} else {
- LASSERT (ev->type == LNET_EVENT_PUT);
+ LASSERT(ev->type == LNET_EVENT_PUT);
if (ev->status != 0) {
/* We moaned above already... */
return;
struct ptlrpc_reply_state *rs = cbid->cbid_arg;
struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
- LASSERT (ev->type == LNET_EVENT_SEND ||
- ev->type == LNET_EVENT_ACK ||
- ev->type == LNET_EVENT_UNLINK);
+ LASSERT(ev->type == LNET_EVENT_SEND ||
+ ev->type == LNET_EVENT_ACK ||
+ ev->type == LNET_EVENT_UNLINK);
if (!rs->rs_difficult) {
/* 'Easy' replies have no further processing so I drop the
* net's ref on 'rs' */
- LASSERT (ev->unlinked);
+ LASSERT(ev->unlinked);
ptlrpc_rs_decref(rs);
return;
}
- LASSERT (rs->rs_on_net);
+ LASSERT(rs->rs_on_net);
if (ev->unlinked) {
/* Last network callback. The net's ref on 'rs' stays put
void (*callback)(lnet_event_t *ev) = cbid->cbid_fn;
/* Honestly, it's best to find out early. */
- LASSERT (cbid->cbid_arg != LP_POISON);
- LASSERT (callback == request_out_callback ||
- callback == reply_in_callback ||
- callback == client_bulk_callback ||
- callback == request_in_callback ||
- callback == reply_out_callback
- );
-
- callback (ev);
+ LASSERT(cbid->cbid_arg != LP_POISON);
+ LASSERT(callback == request_out_callback ||
+ callback == reply_in_callback ||
+ callback == client_bulk_callback ||
+ callback == request_in_callback ||
+ callback == reply_out_callback);
+
+ callback(ev);
}
-int ptlrpc_uuid_to_peer (struct obd_uuid *uuid,
+int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
lnet_process_id_t *peer, lnet_nid_t *self)
{
int best_dist = 0;
/* We're not passing any limits yet... */
rc = LNetNIInit(pid);
if (rc < 0) {
- CDEBUG (D_NET, "Can't init network interface: %d\n", rc);
+ CDEBUG(D_NET, "Can't init network interface: %d\n", rc);
return (-ENOENT);
}
if (rc == 0)
return 0;
- CERROR ("Failed to allocate event queue: %d\n", rc);
+ CERROR("Failed to allocate event queue: %d\n", rc);
LNetNIFini();
return (-ENOMEM);
init_timer(timer);
timer->expires = timeout;
- timer->data = (unsigned long ) ctx;
+ timer->data = (unsigned long) ctx;
timer->function = ctx_upcall_timeout_kr;
add_timer(timer);
arc4_out_key:
rawobj_free(&arc4_keye);
arc4_out:
- do {} while(0); /* just to avoid compile warning */
+ do {} while (0); /* just to avoid compile warning */
} else {
rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
3, data_desc, &cipher, 1);
memset(window, 0, win_size / 8);
*max_seq = seq_num;
} else {
- while(*max_seq < seq_num) {
+ while (*max_seq < seq_num) {
(*max_seq)++;
__clear_bit((*max_seq) % win_size, window);
}
if (swabbed)
gss_header_swabber(ghdr);
- switch(ghdr->gh_proc) {
+ switch (ghdr->gh_proc) {
case PTLRPC_GSS_PROC_INIT:
case PTLRPC_GSS_PROC_CONTINUE_INIT:
rc = gss_svc_handle_init(req, gw);
switch (rc) {
case SECSVC_OK:
- LASSERT (grctx->src_ctx);
+ LASSERT(grctx->src_ctx);
req->rq_auth_gss = 1;
req->rq_auth_remote = grctx->src_ctx->gsc_remote;
ptlrpc_import_state_name(state)); \
__import_set_state(imp, state); \
} \
-} while(0)
+} while (0)
#define IMPORT_SET_STATE(imp, state) \
do { \
spin_lock(&imp->imp_lock); \
IMPORT_SET_STATE_NOLOCK(imp, state); \
spin_unlock(&imp->imp_lock); \
-} while(0)
+} while (0)
static int ptlrpc_connect_interpret(const struct lu_env *env,
ptlrpc_request_set_replen(request);
request->rq_interpret_reply = ptlrpc_connect_interpret;
- CLASSERT(sizeof (*aa) <= sizeof (request->rq_async_args));
+ CLASSERT(sizeof(*aa) <= sizeof(request->rq_async_args));
aa = ptlrpc_req_async_args(request);
memset(aa, 0, sizeof(*aa));
if (MSG_CONNECT_RECONNECT & msg_flags) {
memset(&old_hdl, 0, sizeof(old_hdl));
if (!memcmp(&old_hdl, lustre_msg_get_handle(request->rq_repmsg),
- sizeof (old_hdl))) {
+ sizeof(old_hdl))) {
LCONSOLE_WARN("Reconnect to %s (at @%s) failed due "
"bad handle "LPX64"\n",
obd2cli_tgt(imp->imp_obd),
at->at_worst_time = now;
at->at_hist[0] = val;
at->at_binstart = now;
- } else if (now - at->at_binstart < binlimit ) {
+ } else if (now - at->at_binstart < binlimit) {
/* in bin 0 */
at->at_hist[0] = max(val, at->at_hist[0]);
at->at_current = max(val, at->at_current);
/* move bins over */
shift = (now - at->at_binstart) / binlimit;
LASSERT(shift > 0);
- for(i = AT_BINS - 1; i >= 0; i--) {
+ for (i = AT_BINS - 1; i >= 0; i--) {
if (i >= shift) {
at->at_hist[i] = at->at_hist[i - shift];
maxv = max(maxv, at->at_hist[i]);
const struct req_msg_field *field,
enum req_location loc,
int offset,
- void *value, int len, int dump, void (*swabber)( void *))
+ void *value, int len, int dump, void (*swabber)(void *))
{
void *p;
int i;
static void *__req_capsule_get(struct req_capsule *pill,
const struct req_msg_field *field,
enum req_location loc,
- void (*swabber)( void *),
+ void (*swabber)(void *),
int dump)
{
const struct req_format *fmt;
return (-EINVAL); \
} \
mutex_unlock(&ctxt->loc_mutex); \
-} while(0)
+} while (0)
#define LLOG_CLIENT_EXIT(ctxt, imp) do { \
mutex_lock(&ctxt->loc_mutex); \
ctxt->loc_imp, imp); \
class_import_put(imp); \
mutex_unlock(&ctxt->loc_mutex); \
-} while(0)
+} while (0)
/* This is a callback from the llog_* functions.
* Assumes caller has already pushed us into the kernel context. */
if (hdr == NULL)
GOTO(out, rc =-EFAULT);
- memcpy(handle->lgh_hdr, hdr, sizeof (*hdr));
+ memcpy(handle->lgh_hdr, hdr, sizeof(*hdr));
handle->lgh_last_idx = handle->lgh_hdr->llh_tail.lrt_index;
/* sanity checks */
for (i = 0; i < EXTRA_LAST_OPC; i++) {
char *units;
- switch(i) {
+ switch (i) {
case BRW_WRITE_BYTES:
case BRW_READ_BYTES:
units = "bytes";
* over \a conn connection to portal \a portal.
* Returns 0 on success or error code.
*/
-static int ptl_send_buf (lnet_handle_md_t *mdh, void *base, int len,
- lnet_ack_req_t ack, struct ptlrpc_cb_id *cbid,
- struct ptlrpc_connection *conn, int portal, __u64 xid,
- unsigned int offset)
+static int ptl_send_buf(lnet_handle_md_t *mdh, void *base, int len,
+ lnet_ack_req_t ack, struct ptlrpc_cb_id *cbid,
+ struct ptlrpc_connection *conn, int portal, __u64 xid,
+ unsigned int offset)
{
int rc;
lnet_md_t md;
- LASSERT (portal != 0);
- LASSERT (conn != NULL);
- CDEBUG (D_INFO, "conn=%p id %s\n", conn, libcfs_id2str(conn->c_peer));
+ LASSERT(portal != 0);
+ LASSERT(conn != NULL);
+ CDEBUG(D_INFO, "conn=%p id %s\n", conn, libcfs_id2str(conn->c_peer));
md.start = base;
md.length = len;
md.threshold = (ack == LNET_ACK_REQ) ? 2 : 1;
ack = LNET_NOACK_REQ;
}
- rc = LNetMDBind (md, LNET_UNLINK, mdh);
+ rc = LNetMDBind(md, LNET_UNLINK, mdh);
if (unlikely(rc != 0)) {
- CERROR ("LNetMDBind failed: %d\n", rc);
- LASSERT (rc == -ENOMEM);
+ CERROR("LNetMDBind failed: %d\n", rc);
+ LASSERT(rc == -ENOMEM);
return -ENOMEM;
}
CDEBUG(D_NET, "Sending %d bytes to portal %d, xid "LPD64", offset %u\n",
len, portal, xid, offset);
- rc = LNetPut (conn->c_self, *mdh, ack,
- conn->c_peer, portal, xid, offset, 0);
+ rc = LNetPut(conn->c_self, *mdh, ack,
+ conn->c_peer, portal, xid, offset, 0);
if (unlikely(rc != 0)) {
int rc2;
/* We're going to get an UNLINK event when I unlink below,
* request, or a saved copy if this is a req saved in
* target_queue_final_reply().
*/
- LASSERT (req->rq_no_reply == 0);
- LASSERT (req->rq_reqbuf != NULL);
- LASSERT (rs != NULL);
- LASSERT ((flags & PTLRPC_REPLY_MAYBE_DIFFICULT) || !rs->rs_difficult);
- LASSERT (req->rq_repmsg != NULL);
- LASSERT (req->rq_repmsg == rs->rs_msg);
- LASSERT (rs->rs_cb_id.cbid_fn == reply_out_callback);
- LASSERT (rs->rs_cb_id.cbid_arg == rs);
+ LASSERT(req->rq_no_reply == 0);
+ LASSERT(req->rq_reqbuf != NULL);
+ LASSERT(rs != NULL);
+ LASSERT((flags & PTLRPC_REPLY_MAYBE_DIFFICULT) || !rs->rs_difficult);
+ LASSERT(req->rq_repmsg != NULL);
+ LASSERT(req->rq_repmsg == rs->rs_msg);
+ LASSERT(rs->rs_cb_id.cbid_fn == reply_out_callback);
+ LASSERT(rs->rs_cb_id.cbid_arg == rs);
/* There may be no rq_export during failover */
req->rq_sent = cfs_time_current_sec();
- rc = ptl_send_buf (&rs->rs_md_h, rs->rs_repbuf, rs->rs_repdata_len,
- (rs->rs_difficult && !rs->rs_no_ack) ?
- LNET_ACK_REQ : LNET_NOACK_REQ,
- &rs->rs_cb_id, conn,
- ptlrpc_req2svc(req)->srv_rep_portal,
- req->rq_xid, req->rq_reply_off);
+ rc = ptl_send_buf(&rs->rs_md_h, rs->rs_repbuf, rs->rs_repdata_len,
+ (rs->rs_difficult && !rs->rs_no_ack) ?
+ LNET_ACK_REQ : LNET_NOACK_REQ,
+ &rs->rs_cb_id, conn,
+ ptlrpc_req2svc(req)->srv_rep_portal,
+ req->rq_xid, req->rq_reply_off);
out:
if (unlikely(rc != 0))
ptlrpc_req_drop_rs(req);
}
EXPORT_SYMBOL(ptlrpc_send_reply);
-int ptlrpc_reply (struct ptlrpc_request *req)
+int ptlrpc_reply(struct ptlrpc_request *req)
{
if (req->rq_no_reply)
return 0;
/* bulk register should be done after wrap_request() */
if (request->rq_bulk != NULL) {
- rc = ptlrpc_register_bulk (request);
+ rc = ptlrpc_register_bulk(request);
if (rc != 0)
GOTO(out, rc);
}
if (!noreply) {
- LASSERT (request->rq_replen != 0);
+ LASSERT(request->rq_replen != 0);
if (request->rq_repbuf == NULL) {
LASSERT(request->rq_repdata == NULL);
LASSERT(request->rq_repmsg == NULL);
LNET_UNLINK, LNET_INS_AFTER, &reply_me_h);
if (rc != 0) {
CERROR("LNetMEAttach failed: %d\n", rc);
- LASSERT (rc == -ENOMEM);
+ LASSERT(rc == -ENOMEM);
GOTO(cleanup_bulk, rc = -ENOMEM);
}
}
&request->rq_reply_md_h);
if (rc != 0) {
CERROR("LNetMDAttach failed: %d\n", rc);
- LASSERT (rc == -ENOMEM);
+ LASSERT(rc == -ENOMEM);
spin_lock(&request->rq_lock);
/* ...but the MD attach didn't succeed... */
request->rq_receiving_reply = 0;
* nobody apart from the PUT's target has the right nid+XID to
* access the reply buffer. */
rc2 = LNetMEUnlink(reply_me_h);
- LASSERT (rc2 == 0);
+ LASSERT(rc2 == 0);
/* UNLINKED callback called synchronously */
LASSERT(!request->rq_receiving_reply);
return (0);
CERROR("LNetMDAttach failed: %d; \n", rc);
- LASSERT (rc == -ENOMEM);
- rc = LNetMEUnlink (me_h);
- LASSERT (rc == 0);
+ LASSERT(rc == -ENOMEM);
+ rc = LNetMEUnlink(me_h);
+ LASSERT(rc == 0);
rqbd->rqbd_refcount = 0;
return (-ENOMEM);
struct ptlrpc_service *svc);
void ptlrpc_lprocfs_unregister_service(struct ptlrpc_service *svc);
void ptlrpc_lprocfs_rpc_sent(struct ptlrpc_request *req, long amount);
-void ptlrpc_lprocfs_do_request_stat (struct ptlrpc_request *req,
+void ptlrpc_lprocfs_do_request_stat(struct ptlrpc_request *req,
long q_usec, long work_usec);
#else
-#define ptlrpc_lprocfs_register_service(params...) do{}while(0)
-#define ptlrpc_lprocfs_unregister_service(params...) do{}while(0)
-#define ptlrpc_lprocfs_rpc_sent(params...) do{}while(0)
-#define ptlrpc_lprocfs_do_request_stat(params...) do{}while(0)
+#define ptlrpc_lprocfs_register_service(params...) do{}while (0)
+#define ptlrpc_lprocfs_unregister_service(params...) do{}while (0)
+#define ptlrpc_lprocfs_rpc_sent(params...) do{}while (0)
+#define ptlrpc_lprocfs_do_request_stat(params...) do{}while (0)
#endif /* LPROCFS */
/* NRS */
return 0;
cleanup:
- switch(cleanup_phase) {
+ switch (cleanup_phase) {
case 8:
ptlrpc_nrs_fini();
case 7:
{
LASSERT(spin_is_locked(&rs->rs_svcpt->scp_rep_lock));
LASSERT(spin_is_locked(&rs->rs_lock));
- LASSERT (rs->rs_difficult);
+ LASSERT(rs->rs_difficult);
rs->rs_scheduled_ever = 1; /* flag any notification attempt */
if (rs->rs_scheduled) { /* being set up or already notified */
spin_lock(&exp->exp_uncommitted_replies_lock);
list_for_each_entry_safe(rs, nxt, &exp->exp_uncommitted_replies,
rs_obd_list) {
- LASSERT (rs->rs_difficult);
+ LASSERT(rs->rs_difficult);
/* VBR: per-export last_committed */
LASSERT(rs->rs_export);
if (rs->rs_transno <= exp->exp_last_committed) {
LASSERT(rc == 0);
mutex_lock(&ptlrpc_all_services_mutex);
- list_add (&service->srv_list, &ptlrpc_all_services);
+ list_add(&service->srv_list, &ptlrpc_all_services);
mutex_unlock(&ptlrpc_all_services_mutex);
if (proc_entry != NULL)
rc = lustre_unpack_req_ptlrpc_body(req, MSG_PTLRPC_BODY_OFF);
if (rc) {
- CERROR ("error unpacking ptlrpc body: ptl %d from %s x"
- LPU64"\n", svc->srv_req_portal,
- libcfs_id2str(req->rq_peer), req->rq_xid);
+ CERROR("error unpacking ptlrpc body: ptl %d from %s x"
+ LPU64"\n", svc->srv_req_portal,
+ libcfs_id2str(req->rq_peer), req->rq_xid);
goto err_req;
}
goto err_req;
}
- switch(lustre_msg_get_opc(req->rq_reqmsg)) {
+ switch (lustre_msg_get_opc(req->rq_reqmsg)) {
case MDS_WRITEPAGE:
case OST_WRITE:
req->rq_bulk_write = 1;
ptlrpc_rqphase_move(request, RQ_PHASE_INTERPRET);
- if(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DUMP_LOG))
+ if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DUMP_LOG))
libcfs_debug_dumplog();
do_gettimeofday(&work_start);
exp = rs->rs_export;
- LASSERT (rs->rs_difficult);
- LASSERT (rs->rs_scheduled);
- LASSERT (list_empty(&rs->rs_list));
+ LASSERT(rs->rs_difficult);
+ LASSERT(rs->rs_scheduled);
+ LASSERT(list_empty(&rs->rs_list));
spin_lock(&exp->exp_lock);
/* Noop if removed already */
- list_del_init (&rs->rs_exp_list);
+ list_del_init(&rs->rs_exp_list);
spin_unlock(&exp->exp_lock);
/* The disk commit callback holds exp_uncommitted_replies_lock while it
/* Off the net */
spin_unlock(&rs->rs_lock);
- class_export_put (exp);
+ class_export_put(exp);
rs->rs_export = NULL;
- ptlrpc_rs_decref (rs);
+ ptlrpc_rs_decref(rs);
if (atomic_dec_and_test(&svcpt->scp_nreps_difficult) &&
svc->srv_is_stopping)
wake_up_all(&svcpt->scp_waitq);