# error "Unkown compiler"
#endif /* __GNUC__ */
-#define ENTRY \
-do { \
- CDEBUG(D_TRACE, "Process entered\n"); \
-} while (0)
-
#define EXIT \
do { \
CDEBUG(D_TRACE, "Process leaving\n"); \
* Caller checks ksnc_rx_nob_wanted to determine
* progress/completion. */
int rc;
- ENTRY;
if (ksocknal_data.ksnd_stall_rx != 0) {
cfs_pause(cfs_time_seconds (ksocknal_data.ksnd_stall_rx));
{
lnet_msg_t *lnetmsg = tx->tx_lnetmsg;
int rc = (tx->tx_resid == 0 && !tx->tx_zc_aborted) ? 0 : -EIO;
- ENTRY;
LASSERT(ni != NULL || tx->tx_conn != NULL);
void ksocknal_read_callback (ksock_conn_t *conn)
{
ksock_sched_t *sched;
- ENTRY;
sched = conn->ksnc_scheduler;
void ksocknal_write_callback (ksock_conn_t *conn)
{
ksock_sched_t *sched;
- ENTRY;
sched = conn->ksnc_scheduler;
ksocknal_data_ready (struct sock *sk, int n)
{
ksock_conn_t *conn;
- ENTRY;
/* interleave correctly with closing sockets... */
LASSERT(!in_irq());
int new_index = eq->eq_deq_seq & (eq->eq_size - 1);
lnet_event_t *new_event = &eq->eq_events[new_index];
int rc;
- ENTRY;
/* must called with lnet_eq_wait_lock hold */
if (LNET_SEQ_GT(eq->eq_deq_seq, new_event->sequence))
int wait = 1;
int rc;
int i;
- ENTRY;
LASSERT (the_lnet.ln_init);
LASSERT (the_lnet.ln_refcount > 0);
void
lnet_build_unlink_event (lnet_libmd_t *md, lnet_event_t *ev)
{
- ENTRY;
-
memset(ev, 0, sizeof(*ev));
ev->status = 0;
init_lnet(void)
{
int rc;
- ENTRY;
mutex_init(&lnet_config_mutex);
__u32 *op;
unsigned int debug_mask;
int rc;
- ENTRY;
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_SEQ_QUERY,
LUSTRE_MDS_VERSION, SEQ_QUERY);
const struct lu_env *env)
{
int rc;
- ENTRY;
mutex_lock(&seq->lcs_mutex);
struct lu_client_seq *seq)
{
int rc;
- ENTRY;
if (seq->lcs_srv) {
rc = 0;
struct lu_client_seq *seq, seqno_t *seqnr)
{
int rc;
- ENTRY;
LASSERT(range_is_sane(&seq->lcs_space));
{
wait_queue_t link;
int rc;
- ENTRY;
LASSERT(seq != NULL);
LASSERT(fid != NULL);
static void seq_client_proc_fini(struct lu_client_seq *seq)
{
#ifdef LPROCFS
- ENTRY;
if (seq->lcs_proc_dir) {
if (!IS_ERR(seq->lcs_proc_dir))
lprocfs_remove(&seq->lcs_proc_dir);
{
#ifdef LPROCFS
int rc;
- ENTRY;
seq->lcs_proc_dir = lprocfs_register(seq->lcs_name,
seq_type_proc_dir,
struct lu_server_seq *srv)
{
int rc;
- ENTRY;
LASSERT(seq != NULL);
LASSERT(prefix != NULL);
void seq_client_fini(struct lu_client_seq *seq)
{
- ENTRY;
-
seq_client_proc_fini(seq);
if (seq->lcs_exp != NULL) {
struct client_obd *cli = &obd->u.cli;
char *prefix;
int rc;
- ENTRY;
OBD_ALLOC_PTR(cli->cl_seq);
if (cli->cl_seq == NULL)
int client_fid_fini(struct obd_device *obd)
{
struct client_obd *cli = &obd->u.cli;
- ENTRY;
if (cli->cl_seq != NULL) {
seq_client_fini(cli->cl_seq);
{
struct lu_seq_range tmp;
int rc;
- ENTRY;
LASSERT(range != NULL);
{
struct lu_client_seq *seq = ((struct seq_file *)file->private_data)->private;
int rc;
- ENTRY;
LASSERT(seq != NULL);
{
struct lu_client_seq *seq = (struct lu_client_seq *)m->private;
int rc;
- ENTRY;
LASSERT(seq != NULL);
struct lu_client_seq *seq = ((struct seq_file *)file->private_data)->private;
__u64 max;
int rc, val;
- ENTRY;
LASSERT(seq != NULL);
{
struct lu_client_seq *seq = (struct lu_client_seq *)m->private;
int rc;
- ENTRY;
LASSERT(seq != NULL);
{
struct lu_client_seq *seq = (struct lu_client_seq *)m->private;
int rc;
- ENTRY;
LASSERT(seq != NULL);
struct lu_client_seq *seq = (struct lu_client_seq *)m->private;
struct client_obd *cli;
int rc;
- ENTRY;
LASSERT(seq != NULL);
int cache_size, int cache_threshold)
{
struct fld_cache *cache;
- ENTRY;
LASSERT(name != NULL);
LASSERT(cache_threshold < cache_size);
void fld_cache_fini(struct fld_cache *cache)
{
__u64 pct;
- ENTRY;
LASSERT(cache != NULL);
fld_cache_flush(cache);
struct lu_seq_range *c_range;
struct lu_seq_range *n_range;
struct list_head *head = &cache->fci_entries_head;
- ENTRY;
restart_fixup:
struct fld_cache_entry *flde;
struct list_head *curr;
int num = 0;
- ENTRY;
LASSERT(cache != NULL);
*/
void fld_cache_flush(struct fld_cache *cache)
{
- ENTRY;
-
write_lock(&cache->fci_lock);
cache->fci_cache_size = 0;
fld_cache_shrink(cache);
const seqno_t new_end = range->lsr_end;
struct fld_cache_entry *fldt;
- ENTRY;
OBD_ALLOC_GFP(fldt, sizeof *fldt, GFP_ATOMIC);
if (!fldt) {
OBD_FREE_PTR(f_new);
const seqno_t new_start = f_new->fce_range.lsr_start;
const seqno_t new_end = f_new->fce_range.lsr_end;
__u32 new_flags = f_new->fce_range.lsr_flags;
- ENTRY;
/*
* Duplicate entries are eliminated in insert op.
*fld_cache_entry_lookup(struct fld_cache *cache, struct lu_seq_range *range)
{
struct fld_cache_entry *got = NULL;
- ENTRY;
read_lock(&cache->fci_lock);
got = fld_cache_entry_lookup_nolock(cache, range);
struct fld_cache_entry *flde;
struct fld_cache_entry *prev = NULL;
struct list_head *head;
- ENTRY;
read_lock(&cache->fci_lock);
head = &cache->fci_entries_head;
static int fld_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw)
{
int rc;
- ENTRY;
+
client_obd_list_lock(&cli->cl_loi_list_lock);
rc = list_empty(&mcw->mcw_entry);
client_obd_list_unlock(&cli->cl_loi_list_lock);
{
struct lu_fld_target *target;
int hash;
- ENTRY;
/* Because almost all of special sequence located in MDT0,
* it should go to index 0 directly, instead of calculating
fld_client_get_target(struct lu_client_fld *fld, seqno_t seq)
{
struct lu_fld_target *target;
- ENTRY;
LASSERT(fld->lcf_hash != NULL);
{
const char *name;
struct lu_fld_target *target, *tmp;
- ENTRY;
LASSERT(tar != NULL);
name = fld_target_name(tar);
int fld_client_del_target(struct lu_client_fld *fld, __u64 idx)
{
struct lu_fld_target *target, *tmp;
- ENTRY;
spin_lock(&fld->lcf_lock);
list_for_each_entry_safe(target, tmp,
static int fld_client_proc_init(struct lu_client_fld *fld)
{
int rc;
- ENTRY;
fld->lcf_proc_dir = lprocfs_register(fld->lcf_name,
fld_type_proc_dir,
void fld_client_proc_fini(struct lu_client_fld *fld)
{
- ENTRY;
if (fld->lcf_proc_dir) {
if (!IS_ERR(fld->lcf_proc_dir))
lprocfs_remove(&fld->lcf_proc_dir);
{
int cache_size, cache_threshold;
int rc;
- ENTRY;
LASSERT(fld != NULL);
void fld_client_fini(struct lu_client_fld *fld)
{
struct lu_fld_target *target, *tmp;
- ENTRY;
spin_lock(&fld->lcf_lock);
list_for_each_entry_safe(target, tmp,
__u32 *op;
int rc;
struct obd_import *imp;
- ENTRY;
LASSERT(exp != NULL);
struct lu_seq_range res = { 0 };
struct lu_fld_target *target;
int rc;
- ENTRY;
fld->lcf_flags |= LUSTRE_FLD_RUN;
{
struct lu_client_fld *fld = (struct lu_client_fld *)m->private;
struct lu_fld_target *target;
- ENTRY;
LASSERT(fld != NULL);
fld_proc_hash_seq_show(struct seq_file *m, void *unused)
{
struct lu_client_fld *fld = (struct lu_client_fld *)m->private;
- ENTRY;
LASSERT(fld != NULL);
struct lu_client_fld *fld = ((struct seq_file *)file->private_data)->private;
struct lu_fld_hash *hash = NULL;
int i;
- ENTRY;
LASSERT(fld != NULL);
size_t count, loff_t *pos)
{
struct lu_client_fld *fld = file->private_data;
- ENTRY;
LASSERT(fld != NULL);
{
int i;
int len;
- ENTRY;
len = LCFG_HDR_SIZE(bufcount);
for (i = 0; i < bufcount; i++)
char *ptr;
int i;
- ENTRY;
-
OBD_ALLOC(lcfg, lustre_cfg_len(bufs->lcfg_bufcount,
bufs->lcfg_buflen));
if (!lcfg)
static inline int lustre_cfg_sanity_check(void *buf, int len)
{
struct lustre_cfg *lcfg = (struct lustre_cfg *)buf;
- ENTRY;
+
if (!lcfg)
RETURN(-EINVAL);
static inline void obd_ioctl_freedata(char *buf, int len)
{
- ENTRY;
-
OBD_FREE_LARGE(buf, len);
EXIT;
return;
struct llog_operations *lop;
int rc;
- ENTRY;
-
rc = llog_handle2ops(handle, &lop);
if (rc)
RETURN(rc);
struct llog_operations *lop;
int rc;
- ENTRY;
-
rc = llog_handle2ops(loghandle, &lop);
if (rc)
RETURN(rc);
struct llog_operations *lop;
int rc;
- ENTRY;
-
rc = llog_handle2ops(loghandle, &lop);
if (rc)
RETURN(rc);
struct llog_operations *lop;
int rc;
- ENTRY;
-
rc = llog_obd2ops(ctxt, &lop);
if (rc)
RETURN(rc);
static inline void mdc_get_rpc_lock(struct mdc_rpc_lock *lck,
struct lookup_intent *it)
{
- ENTRY;
-
if (it != NULL && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP ||
it->it_op == IT_LAYOUT))
return;
struct lov_stripe_md *lsm)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, get_info);
EXP_COUNTER_INCREMENT(exp, get_info);
struct ptlrpc_request_set *set)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, set_info_async);
EXP_COUNTER_INCREMENT(exp, set_info_async);
{
int rc;
DECLARE_LU_VARS(ldt, d);
- ENTRY;
ldt = obd->obd_type->typ_lu;
if (ldt != NULL) {
{
int rc;
DECLARE_LU_VARS(ldt, d);
- ENTRY;
OBD_CHECK_DEV(obd);
ldt = obd->obd_type->typ_lu;
{
int rc;
DECLARE_LU_VARS(ldt, d);
- ENTRY;
OBD_CHECK_DEV(obd);
static inline void obd_cleanup_client_import(struct obd_device *obd)
{
- ENTRY;
-
/* If we set up but never connected, the
client import will not have been cleaned. */
down_write(&obd->u.cli.cl_sem);
{
int rc;
DECLARE_LU_VARS(ldt, d);
- ENTRY;
OBD_CHECK_DEV(obd);
struct lov_stripe_md *mem_src)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, packmd);
EXP_COUNTER_INCREMENT(exp, packmd);
int disk_len)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, unpackmd);
EXP_COUNTER_INCREMENT(exp, unpackmd);
static inline int obd_precreate(struct obd_export *exp)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, precreate);
OBD_COUNTER_INCREMENT(exp->exp_obd, precreate);
struct obd_trans_info *oti)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, create_async);
EXP_COUNTER_INCREMENT(exp, create_async);
struct obd_trans_info *oti)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, create);
EXP_COUNTER_INCREMENT(exp, create);
struct obd_export *md_exp, void *capa)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, destroy);
EXP_COUNTER_INCREMENT(exp, destroy);
struct obd_info *oinfo)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, getattr);
EXP_COUNTER_INCREMENT(exp, getattr);
struct ptlrpc_request_set *set)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, getattr_async);
EXP_COUNTER_INCREMENT(exp, getattr_async);
struct obd_trans_info *oti)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, setattr);
EXP_COUNTER_INCREMENT(exp, setattr);
{
struct ptlrpc_request_set *set = NULL;
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, setattr_async);
EXP_COUNTER_INCREMENT(exp, setattr_async);
struct ptlrpc_request_set *set)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, setattr_async);
EXP_COUNTER_INCREMENT(exp, setattr_async);
{
struct obd_device *obd = imp->imp_obd;
int rc;
- ENTRY;
OBD_CHECK_DEV_ACTIVE(obd);
OBD_CHECK_DT_OP(obd, add_conn, -EOPNOTSUPP);
{
struct obd_device *obd = imp->imp_obd;
int rc;
- ENTRY;
OBD_CHECK_DEV_ACTIVE(obd);
OBD_CHECK_DT_OP(obd, del_conn, -EOPNOTSUPP);
static inline struct obd_uuid *obd_get_uuid(struct obd_export *exp)
{
struct obd_uuid *uuid;
- ENTRY;
OBD_CHECK_DT_OP(exp->exp_obd, get_uuid, NULL);
EXP_COUNTER_INCREMENT(exp, get_uuid);
int rc;
__u64 ocf = data ? data->ocd_connect_flags : 0; /* for post-condition
* check */
- ENTRY;
OBD_CHECK_DEV_ACTIVE(obd);
OBD_CHECK_DT_OP(obd, connect, -EOPNOTSUPP);
__u64 ocf = d ? d->ocd_connect_flags : 0; /* for post-condition
* check */
- ENTRY;
-
OBD_CHECK_DEV_ACTIVE(obd);
OBD_CHECK_DT_OP(obd, reconnect, 0);
OBD_COUNTER_INCREMENT(obd, reconnect);
static inline int obd_disconnect(struct obd_export *exp)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, disconnect);
EXP_COUNTER_INCREMENT(exp, disconnect);
enum lu_cli_type type)
{
int rc;
- ENTRY;
OBD_CHECK_DT_OP(obd, fid_init, 0);
OBD_COUNTER_INCREMENT(obd, fid_init);
static inline int obd_fid_fini(struct obd_device *obd)
{
int rc;
- ENTRY;
OBD_CHECK_DT_OP(obd, fid_fini, 0);
OBD_COUNTER_INCREMENT(obd, fid_fini);
struct md_op_data *op_data)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, fid_alloc);
EXP_COUNTER_INCREMENT(exp, fid_alloc);
static inline int obd_ping(const struct lu_env *env, struct obd_export *exp)
{
int rc;
- ENTRY;
OBD_CHECK_DT_OP(exp->exp_obd, ping, 0);
EXP_COUNTER_INCREMENT(exp, ping);
static inline int obd_pool_new(struct obd_device *obd, char *poolname)
{
int rc;
- ENTRY;
OBD_CHECK_DT_OP(obd, pool_new, -EOPNOTSUPP);
OBD_COUNTER_INCREMENT(obd, pool_new);
static inline int obd_pool_del(struct obd_device *obd, char *poolname)
{
int rc;
- ENTRY;
OBD_CHECK_DT_OP(obd, pool_del, -EOPNOTSUPP);
OBD_COUNTER_INCREMENT(obd, pool_del);
static inline int obd_pool_add(struct obd_device *obd, char *poolname, char *ostname)
{
int rc;
- ENTRY;
OBD_CHECK_DT_OP(obd, pool_add, -EOPNOTSUPP);
OBD_COUNTER_INCREMENT(obd, pool_add);
static inline int obd_pool_rem(struct obd_device *obd, char *poolname, char *ostname)
{
int rc;
- ENTRY;
OBD_CHECK_DT_OP(obd, pool_rem, -EOPNOTSUPP);
OBD_COUNTER_INCREMENT(obd, pool_rem);
static inline void obd_getref(struct obd_device *obd)
{
- ENTRY;
if (OBT(obd) && OBP(obd, getref)) {
OBD_COUNTER_INCREMENT(obd, getref);
OBP(obd, getref)(obd);
static inline void obd_putref(struct obd_device *obd)
{
- ENTRY;
if (OBT(obd) && OBP(obd, putref)) {
OBD_COUNTER_INCREMENT(obd, putref);
OBP(obd, putref)(obd);
{
int rc = 0;
- ENTRY;
if ((exp)->exp_obd != NULL && OBT((exp)->exp_obd) &&
OBP((exp)->exp_obd, init_export))
rc = OBP(exp->exp_obd, init_export)(exp);
static inline int obd_destroy_export(struct obd_export *exp)
{
- ENTRY;
if ((exp)->exp_obd != NULL && OBT((exp)->exp_obd) &&
OBP((exp)->exp_obd, destroy_export))
OBP(exp->exp_obd, destroy_export)(exp);
int cmd, obd_off *offset)
{
int rc;
- ENTRY;
+
EXP_CHECK_DT_OP(exp, extent_calc);
rc = OBP(exp->exp_obd, extent_calc)(exp, md, cmd, offset);
RETURN(rc);
{
int rc = 0;
struct obd_device *obd;
- ENTRY;
if (exp == NULL || exp->exp_obd == NULL)
RETURN(-EINVAL);
struct ptlrpc_request_set *set = NULL;
struct obd_info oinfo = { { { 0 } } };
int rc = 0;
- ENTRY;
set = ptlrpc_prep_set();
if (set == NULL)
{
int rc = 0;
struct obd_device *obd = exp->exp_obd;
- ENTRY;
if (obd == NULL)
RETURN(-EINVAL);
{
struct ptlrpc_request_set *set = NULL;
int rc;
- ENTRY;
OBD_CHECK_DT_OP(exp->exp_obd, sync, -EOPNOTSUPP);
EXP_COUNTER_INCREMENT(exp, sync);
struct ptlrpc_request_set *set)
{
int rc;
- ENTRY;
OBD_CHECK_DT_OP(exp->exp_obd, sync, -EOPNOTSUPP);
EXP_COUNTER_INCREMENT(exp, sync);
{
struct ptlrpc_request_set *set = NULL;
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, punch);
EXP_COUNTER_INCREMENT(exp, punch);
struct ptlrpc_request_set *rqset)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, punch);
EXP_COUNTER_INCREMENT(exp, punch);
struct brw_page *pg, struct obd_trans_info *oti)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, brw);
EXP_COUNTER_INCREMENT(exp, brw);
struct lustre_capa *capa)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, preprw);
EXP_COUNTER_INCREMENT(exp, preprw);
struct niobuf_local *local,
struct obd_trans_info *oti, int rc)
{
- ENTRY;
-
EXP_CHECK_DT_OP(exp, commitrw);
EXP_COUNTER_INCREMENT(exp, commitrw);
struct ost_lvb *lvb, int kms_only)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, merge_lvb);
EXP_COUNTER_INCREMENT(exp, merge_lvb);
int shrink)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, adjust_kms);
EXP_COUNTER_INCREMENT(exp, adjust_kms);
int len, void *karg, void *uarg)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, iocontrol);
EXP_COUNTER_INCREMENT(exp, iocontrol);
{
struct ptlrpc_request_set *set = NULL;
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, enqueue);
EXP_COUNTER_INCREMENT(exp, enqueue);
struct ptlrpc_request_set *set)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, enqueue);
EXP_COUNTER_INCREMENT(exp, enqueue);
ldlm_iterator_t it, void *data)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, change_cbdata);
EXP_COUNTER_INCREMENT(exp, change_cbdata);
ldlm_iterator_t it, void *data)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, find_cbdata);
EXP_COUNTER_INCREMENT(exp, find_cbdata);
struct lustre_handle *lockh)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, cancel);
EXP_COUNTER_INCREMENT(exp, cancel);
void *opaque)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, cancel_unused);
EXP_COUNTER_INCREMENT(exp, cancel_unused);
int flag)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, pin);
EXP_COUNTER_INCREMENT(exp, pin);
struct obd_client_handle *handle, int flag)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, unpin);
EXP_COUNTER_INCREMENT(exp, unpin);
struct obd_import *imp,
enum obd_import_event event)
{
- ENTRY;
if (!obd) {
CERROR("NULL device\n");
EXIT;
struct llogd_conn_body *body)
{
int rc;
- ENTRY;
OBD_CHECK_DT_OP(exp->exp_obd, llog_connect, 0);
EXP_COUNTER_INCREMENT(exp, llog_connect);
void *data)
{
int rc;
- ENTRY;
+
OBD_CHECK_DEV(obd);
/* the check for async_recov is a complete hack - I'm hereby
struct obd_quotactl *oqctl)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, quotacheck);
EXP_COUNTER_INCREMENT(exp, quotacheck);
struct obd_quotactl *oqctl)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, quotactl);
EXP_COUNTER_INCREMENT(exp, quotactl);
* <0 on error
*/
int rc;
- ENTRY;
/* don't use EXP_CHECK_DT_OP, because NULL method is normal here */
if (obd == NULL || !OBT(obd)) {
static inline int obd_register_observer(struct obd_device *obd,
struct obd_device *observer)
{
- ENTRY;
OBD_CHECK_DEV(obd);
down_write(&obd->obd_observer_link_sem);
if (obd->obd_observer && observer) {
static inline int obd_pin_observer(struct obd_device *obd,
struct obd_device **observer)
{
- ENTRY;
down_read(&obd->obd_observer_link_sem);
if (!obd->obd_observer) {
*observer = NULL;
static inline int obd_unpin_observer(struct obd_device *obd)
{
- ENTRY;
up_read(&obd->obd_observer_link_sem);
RETURN(0);
}
obd_pin_extent_cb pin_cb)
{
int rc;
- ENTRY;
OBD_CHECK_DT_OP(exp->exp_obd, register_page_removal_cb, 0);
OBD_COUNTER_INCREMENT(exp->exp_obd, register_page_removal_cb);
obd_page_removal_cb_t cb)
{
int rc;
- ENTRY;
OBD_CHECK_DT_OP(exp->exp_obd, unregister_page_removal_cb, 0);
OBD_COUNTER_INCREMENT(exp->exp_obd, unregister_page_removal_cb);
obd_lock_cancel_cb cb)
{
int rc;
- ENTRY;
OBD_CHECK_DT_OP(exp->exp_obd, register_lock_cancel_cb, 0);
OBD_COUNTER_INCREMENT(exp->exp_obd, register_lock_cancel_cb);
obd_lock_cancel_cb cb)
{
int rc;
- ENTRY;
OBD_CHECK_DT_OP(exp->exp_obd, unregister_lock_cancel_cb, 0);
OBD_COUNTER_INCREMENT(exp->exp_obd, unregister_lock_cancel_cb);
struct lu_fid *fid, struct obd_capa **pc)
{
int rc;
- ENTRY;
EXP_CHECK_MD_OP(exp, getstatus);
EXP_MD_COUNTER_INCREMENT(exp, getstatus);
struct ptlrpc_request **request)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, getattr);
EXP_MD_COUNTER_INCREMENT(exp, getattr);
rc = MDP(exp->exp_obd, getattr)(exp, op_data, request);
const struct lu_fid *fid)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, null_inode);
EXP_MD_COUNTER_INCREMENT(exp, null_inode);
rc = MDP(exp->exp_obd, null_inode)(exp, fid);
ldlm_iterator_t it, void *data)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, find_cbdata);
EXP_MD_COUNTER_INCREMENT(exp, find_cbdata);
rc = MDP(exp->exp_obd, find_cbdata)(exp, fid, it, data);
struct ptlrpc_request **request)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, close);
EXP_MD_COUNTER_INCREMENT(exp, close);
rc = MDP(exp->exp_obd, close)(exp, op_data, mod, request);
struct ptlrpc_request **request)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, create);
EXP_MD_COUNTER_INCREMENT(exp, create);
rc = MDP(exp->exp_obd, create)(exp, op_data, data, datalen, mode,
struct md_open_data *mod)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, done_writing);
EXP_MD_COUNTER_INCREMENT(exp, done_writing);
rc = MDP(exp->exp_obd, done_writing)(exp, op_data, mod);
int extra_lock_flags)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, enqueue);
EXP_MD_COUNTER_INCREMENT(exp, enqueue);
rc = MDP(exp->exp_obd, enqueue)(exp, einfo, it, op_data, lockh,
struct ptlrpc_request **request)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, getattr_name);
EXP_MD_COUNTER_INCREMENT(exp, getattr_name);
rc = MDP(exp->exp_obd, getattr_name)(exp, op_data, request);
__u64 extra_lock_flags)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, intent_lock);
EXP_MD_COUNTER_INCREMENT(exp, intent_lock);
rc = MDP(exp->exp_obd, intent_lock)(exp, op_data, lmm, lmmsize,
struct ptlrpc_request **request)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, link);
EXP_MD_COUNTER_INCREMENT(exp, link);
rc = MDP(exp->exp_obd, link)(exp, op_data, request);
int newlen, struct ptlrpc_request **request)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, rename);
EXP_MD_COUNTER_INCREMENT(exp, rename);
rc = MDP(exp->exp_obd, rename)(exp, op_data, old, oldlen, new,
struct ptlrpc_request **request)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, is_subdir);
EXP_MD_COUNTER_INCREMENT(exp, is_subdir);
rc = MDP(exp->exp_obd, is_subdir)(exp, pfid, cfid, request);
struct md_open_data **mod)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, setattr);
EXP_MD_COUNTER_INCREMENT(exp, setattr);
rc = MDP(exp->exp_obd, setattr)(exp, op_data, ea, ealen,
struct obd_capa *oc, struct ptlrpc_request **request)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, sync);
EXP_MD_COUNTER_INCREMENT(exp, sync);
rc = MDP(exp->exp_obd, sync)(exp, fid, oc, request);
struct ptlrpc_request **request)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, readpage);
EXP_MD_COUNTER_INCREMENT(exp, readpage);
rc = MDP(exp->exp_obd, readpage)(exp, opdata, pages, request);
struct ptlrpc_request **request)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, unlink);
EXP_MD_COUNTER_INCREMENT(exp, unlink);
rc = MDP(exp->exp_obd, unlink)(exp, op_data, request);
struct obd_export *md_exp,
struct lustre_md *md)
{
- ENTRY;
EXP_CHECK_MD_OP(exp, get_lustre_md);
EXP_MD_COUNTER_INCREMENT(exp, get_lustre_md);
RETURN(MDP(exp->exp_obd, get_lustre_md)(exp, req, dt_exp, md_exp, md));
static inline int md_free_lustre_md(struct obd_export *exp,
struct lustre_md *md)
{
- ENTRY;
EXP_CHECK_MD_OP(exp, free_lustre_md);
EXP_MD_COUNTER_INCREMENT(exp, free_lustre_md);
RETURN(MDP(exp->exp_obd, free_lustre_md)(exp, md));
int output_size, int flags, __u32 suppgid,
struct ptlrpc_request **request)
{
- ENTRY;
EXP_CHECK_MD_OP(exp, setxattr);
EXP_MD_COUNTER_INCREMENT(exp, setxattr);
RETURN(MDP(exp->exp_obd, setxattr)(exp, fid, oc, valid, name, input,
int output_size, int flags,
struct ptlrpc_request **request)
{
- ENTRY;
EXP_CHECK_MD_OP(exp, getxattr);
EXP_MD_COUNTER_INCREMENT(exp, getxattr);
RETURN(MDP(exp->exp_obd, getxattr)(exp, fid, oc, valid, name, input,
struct obd_client_handle *och,
struct ptlrpc_request *open_req)
{
- ENTRY;
EXP_CHECK_MD_OP(exp, set_open_replay_data);
EXP_MD_COUNTER_INCREMENT(exp, set_open_replay_data);
RETURN(MDP(exp->exp_obd, set_open_replay_data)(exp, och, open_req));
static inline int md_clear_open_replay_data(struct obd_export *exp,
struct obd_client_handle *och)
{
- ENTRY;
EXP_CHECK_MD_OP(exp, clear_open_replay_data);
EXP_MD_COUNTER_INCREMENT(exp, clear_open_replay_data);
RETURN(MDP(exp->exp_obd, clear_open_replay_data)(exp, och));
static inline int md_set_lock_data(struct obd_export *exp,
__u64 *lockh, void *data, __u64 *bits)
{
- ENTRY;
EXP_CHECK_MD_OP(exp, set_lock_data);
EXP_MD_COUNTER_INCREMENT(exp, set_lock_data);
RETURN(MDP(exp->exp_obd, set_lock_data)(exp, lockh, data, bits));
void *opaque)
{
int rc;
- ENTRY;
EXP_CHECK_MD_OP(exp, cancel_unused);
EXP_MD_COUNTER_INCREMENT(exp, cancel_unused);
ldlm_mode_t mode,
struct lustre_handle *lockh)
{
- ENTRY;
EXP_CHECK_MD_OP(exp, lock_match);
EXP_MD_COUNTER_INCREMENT(exp, lock_match);
RETURN(MDP(exp->exp_obd, lock_match)(exp, flags, fid, type,
static inline int md_init_ea_size(struct obd_export *exp, int easize,
int def_asize, int cookiesize)
{
- ENTRY;
EXP_CHECK_MD_OP(exp, init_ea_size);
EXP_MD_COUNTER_INCREMENT(exp, init_ea_size);
RETURN(MDP(exp->exp_obd, init_ea_size)(exp, easize, def_asize,
struct obd_capa *oc, __u32 suppgid,
struct ptlrpc_request **request)
{
- ENTRY;
EXP_CHECK_MD_OP(exp, get_remote_perm);
EXP_MD_COUNTER_INCREMENT(exp, get_remote_perm);
RETURN(MDP(exp->exp_obd, get_remote_perm)(exp, fid, oc, suppgid,
renew_capa_cb_t cb)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, renew_capa);
EXP_MD_COUNTER_INCREMENT(exp, renew_capa);
rc = MDP(exp->exp_obd, renew_capa)(exp, ocapa, cb);
struct obd_capa **oc)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, unpack_capa);
EXP_MD_COUNTER_INCREMENT(exp, unpack_capa);
rc = MDP(exp->exp_obd, unpack_capa)(exp, req, field, oc);
struct ldlm_enqueue_info *einfo)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, intent_getattr_async);
EXP_MD_COUNTER_INCREMENT(exp, intent_getattr_async);
rc = MDP(exp->exp_obd, intent_getattr_async)(exp, minfo, einfo);
struct lu_fid *fid, __u64 *bits)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, revalidate_lock);
EXP_MD_COUNTER_INCREMENT(exp, revalidate_lock);
rc = MDP(exp->exp_obd, revalidate_lock)(exp, it, fid, bits);
struct cl_lock *lock;
int result;
- ENTRY;
result = 0;
if (!(lli->lli_flags & LLIF_MDS_SIZE_LOCK)) {
CDEBUG(D_DLMTRACE, "Glimpsing inode "DFID"\n", PFID(fid));
int result;
int refcheck;
- ENTRY;
-
result = cl_io_get(inode, &env, &io, &refcheck);
if (result > 0) {
again:
int result;
int refcheck;
- ENTRY;
-
if (!cl_i2info(inode)->lli_has_smd)
RETURN(0);
{
struct ccc_device *vdv;
int rc;
- ENTRY;
vdv = lu2ccc_dev(d);
vdv->cdv_next = lu2cl_dev(next);
struct lu_device *lud;
struct cl_site *site;
int rc;
- ENTRY;
OBD_ALLOC_PTR(vdv);
if (vdv == NULL)
{
struct inode *inode = ccc_object_inode(obj);
- ENTRY;
lvb->lvb_mtime = cl_inode_mtime(inode);
lvb->lvb_atime = cl_inode_atime(inode);
lvb->lvb_ctime = cl_inode_ctime(inode);
int result;
- ENTRY;
-
if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
io->ci_type == CIT_FAULT) {
if (cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)
const struct cl_page_slice *slice,
struct cl_io *unused)
{
- ENTRY;
/* transient page should always be sent. */
RETURN(0);
}
const struct ccc_io *cio = ccc_env_io(env);
int result;
- ENTRY;
/*
* Work around DLM peculiarity: it assumes that glimpse
* (LDLM_FL_HAS_INTENT) lock is always LCK_PR, and returns reads lock
enum cl_lock_state state)
{
struct cl_lock *lock = slice->cls_lock;
- ENTRY;
/*
* Refresh inode attributes when the lock is moving into CLS_HELD
struct cl_object *obj = io->ci_obj;
CLOBINVRNT(env, obj, ccc_object_invariant(obj));
- ENTRY;
CDEBUG(D_VFSTRACE, "lock: %d [%lu, %lu]\n", mode, start, end);
int result;
int refcheck;
- ENTRY;
-
env = cl_env_get(&refcheck);
if (IS_ERR(env))
RETURN(PTR_ERR(env));
__u32 cl_fid_build_gen(const struct lu_fid *fid)
{
__u32 gen;
- ENTRY;
if (fid_is_igif(fid)) {
gen = lu_igif_gen(fid);
int rc, easize, def_easize, cookiesize;
struct lov_desc desc;
__u16 stripes;
- ENTRY;
rc = obd_get_info(NULL, dt_exp, sizeof(KEY_LOVDESC), KEY_LOVDESC,
&valsize, &desc, NULL);
__u64 flags;
int result;
- ENTRY;
if (!strcmp(watched->obd_type->typ_name, LUSTRE_OSC_NAME)) {
cli = &watched->u.cli;
lco = owner;
static struct interval_node *interval_first(struct interval_node *node)
{
- ENTRY;
-
if (!node)
RETURN(NULL);
while (node->in_left)
static struct interval_node *interval_last(struct interval_node *node)
{
- ENTRY;
-
if (!node)
RETURN(NULL);
while (node->in_right)
static struct interval_node *interval_next(struct interval_node *node)
{
- ENTRY;
-
if (!node)
RETURN(NULL);
if (node->in_right)
static struct interval_node *interval_prev(struct interval_node *node)
{
- ENTRY;
-
if (!node)
RETURN(NULL);
{
struct interval_node *node;
enum interval_iter rc = INTERVAL_ITER_CONT;
- ENTRY;
interval_for_each(node, root) {
rc = func(node, data);
{
struct interval_node *node;
enum interval_iter rc = INTERVAL_ITER_CONT;
- ENTRY;
interval_for_each_reverse(node, root) {
rc = func(node, data);
{
struct interval_node *walk = root;
int rc;
- ENTRY;
while (walk) {
rc = extent_compare(ex, &walk->in_extent);
struct interval_node **root)
{
struct interval_node *parent, *gparent;
- ENTRY;
while ((parent = node->in_parent) && node_is_red(parent)) {
gparent = parent->in_parent;
{
struct interval_node **p, *parent = NULL;
- ENTRY;
LASSERT(!interval_is_intree(node));
p = root;
struct interval_node **root)
{
struct interval_node *tmp;
- ENTRY;
while (node_is_black_or_0(node) && node != *root) {
if (parent->in_left == node) {
__u64 old_maxhigh)
{
__u64 left_max, right_max;
- ENTRY;
while (node) {
left_max = node->in_left ? node->in_left->in_max_high : 0;
{
struct interval_node *child, *parent;
int color;
- ENTRY;
LASSERT(interval_is_intree(node));
node->in_intree = 0;
struct list_head *tmp;
struct ldlm_lock *lck;
__u64 kms = 0;
- ENTRY;
/* don't let another thread in ldlm_extent_shift_kms race in
* just after we finish and take our lock into account in its
struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
{
struct ldlm_interval *node;
- ENTRY;
LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, __GFP_IO);
static inline void
ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, __u64 flags)
{
- ENTRY;
-
LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: 0x%llx)",
mode, flags);
int splitted = 0;
const struct ldlm_callback_suite null_cbs = { NULL };
int rc;
- ENTRY;
CDEBUG(D_DLMTRACE, "flags %#llx owner "LPU64" pid %u mode %u start "
LPU64" end "LPU64"\n", *flags,
ldlm_flock_interrupted_wait(void *data)
{
struct ldlm_lock *lock;
- ENTRY;
lock = ((struct ldlm_flock_wait_data *)data)->fwd_lock;
struct l_wait_info lwi;
ldlm_error_t err;
int rc = 0;
- ENTRY;
CDEBUG(D_DLMTRACE, "flags: 0x%llx data: %p getlk: %p\n",
flags, data, getlk);
int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
void *data, int flag)
{
- ENTRY;
-
LASSERT(lock);
LASSERT(flag == LDLM_CB_CANCELING);
void ldlm_destroy_flock_export(struct obd_export *exp)
{
- ENTRY;
if (exp->exp_flock_hash) {
cfs_hash_putref(exp->exp_flock_hash);
exp->exp_flock_hash = NULL;
struct ptlrpc_connection *ptlrpc_conn;
struct obd_import_conn *imp_conn = NULL, *item;
int rc = 0;
- ENTRY;
if (!create && !priority) {
CDEBUG(D_HA, "Nothing to do\n");
struct obd_import_conn *imp_conn;
struct obd_export *dlmexp;
int rc = -ENOENT;
- ENTRY;
spin_lock(&imp->imp_lock);
if (list_empty(&imp->imp_conn_list)) {
{
struct obd_import_conn *conn;
int rc = -ENOENT;
- ENTRY;
spin_lock(&imp->imp_lock);
list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
ldlm_ns_type_t ns_type = LDLM_NS_TYPE_UNKNOWN;
int rc;
char *cli_name = lustre_cfg_buf(lcfg, 0);
- ENTRY;
/* In a more perfect world, we would hang a ptlrpc_client off of
* obd_type and just use the values from there. */
int client_obd_cleanup(struct obd_device *obddev)
{
- ENTRY;
-
ldlm_namespace_free_post(obddev->obd_namespace);
obddev->obd_namespace = NULL;
struct obd_connect_data *ocd;
struct lustre_handle conn = { 0 };
int rc;
- ENTRY;
*exp = NULL;
down_write(&cli->cl_sem);
struct client_obd *cli;
struct obd_import *imp;
int rc = 0, err;
- ENTRY;
if (!obd) {
CERROR("invalid export for disconnect: exp %p cookie "LPX64"\n",
int target_pack_pool_reply(struct ptlrpc_request *req)
{
struct obd_device *obd;
- ENTRY;
/* Check that we still have all structures alive as this may
* be some late RPC at shutdown time. */
int netrc;
struct ptlrpc_reply_state *rs;
struct obd_export *exp;
- ENTRY;
if (req->rq_no_reply) {
EXIT;
*/
void ldlm_lock_put(struct ldlm_lock *lock)
{
- ENTRY;
-
LASSERT(lock->l_resource != LP_POISON);
LASSERT(atomic_read(&lock->l_refc) > 0);
if (atomic_dec_and_test(&lock->l_refc)) {
struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
int rc;
- ENTRY;
if (lock->l_flags & LDLM_FL_NS_SRV) {
LASSERT(list_empty(&lock->l_lru));
RETURN(0);
{
struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
- ENTRY;
spin_lock(&ns->ns_lock);
ldlm_lock_add_to_lru_nolock(lock);
spin_unlock(&ns->ns_lock);
{
struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
- ENTRY;
if (lock->l_flags & LDLM_FL_NS_SRV) {
LASSERT(list_empty(&lock->l_lru));
EXIT;
*/
int ldlm_lock_destroy_internal(struct ldlm_lock *lock)
{
- ENTRY;
-
if (lock->l_readers || lock->l_writers) {
LDLM_ERROR(lock, "lock still has references");
LBUG();
void ldlm_lock_destroy(struct ldlm_lock *lock)
{
int first;
- ENTRY;
+
lock_res_and_lock(lock);
first = ldlm_lock_destroy_internal(lock);
unlock_res_and_lock(lock);
void ldlm_lock_destroy_nolock(struct ldlm_lock *lock)
{
int first;
- ENTRY;
+
first = ldlm_lock_destroy_internal(lock);
/* drop reference from hashtable only for first destroy */
if (first) {
static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource)
{
struct ldlm_lock *lock;
- ENTRY;
if (resource == NULL)
LBUG();
struct ldlm_resource *oldres = lock->l_resource;
struct ldlm_resource *newres;
int type;
- ENTRY;
LASSERT(ns_is_client(ns));
__u64 flags)
{
struct ldlm_lock *lock;
- ENTRY;
LASSERT(handle);
void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
struct list_head *work_list)
{
- ENTRY;
check_res_locked(lock->l_resource);
if (new)
ldlm_add_bl_work_item(lock, new, work_list);
void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
{
struct ldlm_namespace *ns;
- ENTRY;
lock_res_and_lock(lock);
void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode)
{
struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
- ENTRY;
LASSERT(lock != NULL);
{
struct list_head *tmp;
struct ldlm_lock *lock, *mode_end, *policy_end;
- ENTRY;
list_for_each(tmp, queue) {
lock = list_entry(tmp, struct ldlm_lock, l_res_link);
struct sl_insert_point *prev)
{
struct ldlm_resource *res = lock->l_resource;
- ENTRY;
check_res_locked(res);
static void ldlm_grant_lock_with_skiplist(struct ldlm_lock *lock)
{
struct sl_insert_point prev;
- ENTRY;
LASSERT(lock->l_req_mode == lock->l_granted_mode);
void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list)
{
struct ldlm_resource *res = lock->l_resource;
- ENTRY;
check_res_locked(res);
struct ldlm_resource *res;
struct ldlm_lock *lock, *old_lock = NULL;
int rc = 0;
- ENTRY;
if (ns == NULL) {
old_lock = ldlm_handle2lock(lockh);
{
struct ldlm_lock *lock;
ldlm_mode_t mode = 0;
- ENTRY;
lock = ldlm_handle2lock(lockh);
if (lock != NULL) {
enum req_location loc, void *data, int size)
{
void *lvb;
- ENTRY;
LASSERT(data != NULL);
LASSERT(size >= 0);
{
struct ldlm_lock *lock;
struct ldlm_resource *res;
- ENTRY;
res = ldlm_resource_get(ns, NULL, res_id, type, 1);
if (res == NULL)
int local = ns_is_client(ldlm_res_to_ns(res));
ldlm_error_t rc = ELDLM_OK;
struct ldlm_interval *node = NULL;
- ENTRY;
lock->l_last_activity = cfs_time_current_sec();
/* policies are not executed on the client or during replay */
struct ldlm_lock_desc d;
int rc;
struct ldlm_lock *lock;
- ENTRY;
if (list_empty(arg->list))
RETURN(-ENOENT);
int rc = 0;
struct ldlm_lock *lock;
ldlm_completion_callback completion_callback;
- ENTRY;
if (list_empty(arg->list))
RETURN(-ENOENT);
struct ldlm_lock_desc desc;
int rc;
struct ldlm_lock *lock;
- ENTRY;
if (list_empty(arg->list))
RETURN(-ENOENT);
struct ldlm_glimpse_work *gl_work;
struct ldlm_lock *lock;
int rc = 0;
- ENTRY;
if (list_empty(arg->list))
RETURN(-ENOENT);
*/
void ldlm_reprocess_all_ns(struct ldlm_namespace *ns)
{
- ENTRY;
-
if (ns != NULL) {
cfs_hash_for_each_nolock(ns->ns_rs_hash,
ldlm_reprocess_res, NULL);
{
LIST_HEAD(rpc_list);
- ENTRY;
if (!ns_is_client(ldlm_res_to_ns(res))) {
CERROR("This is client-side-only module, cannot handle "
"LDLM_NAMESPACE_SERVER resource type lock.\n");
{
struct ldlm_resource *res;
struct ldlm_namespace *ns;
- ENTRY;
lock_res_and_lock(lock);
{
struct ldlm_lock *lock = ldlm_handle2lock(lockh);
int rc = -EINVAL;
- ENTRY;
if (lock) {
if (lock->l_ast_data == NULL)
*/
void ldlm_lock_downgrade(struct ldlm_lock *lock, int new_mode)
{
- ENTRY;
-
LASSERT(lock->l_granted_mode & (LCK_PW | LCK_EX));
LASSERT(new_mode == LCK_COS);
struct ldlm_namespace *ns;
int granted = 0;
struct ldlm_interval *node;
- ENTRY;
/* Just return if mode is unchanged. */
if (new_mode == lock->l_granted_mode) {
struct ldlm_lock_desc *ld, struct ldlm_lock *lock)
{
int do_ast;
- ENTRY;
LDLM_DEBUG(lock, "client blocking AST callback handler");
int lvb_len;
LIST_HEAD(ast_list);
int rc = 0;
- ENTRY;
LDLM_DEBUG(lock, "client completion callback handler START");
struct ldlm_lock *lock)
{
int rc = -ENOSYS;
- ENTRY;
LDLM_DEBUG(lock, "client glimpse AST callback handler");
ldlm_cancel_flags_t cancel_flags)
{
struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
- ENTRY;
spin_lock(&blp->blp_lock);
if (blwi->blwi_lock &&
struct list_head *cancels, int count,
ldlm_cancel_flags_t cancel_flags)
{
- ENTRY;
-
if (cancels && count == 0)
RETURN(0);
void *val;
int keylen, vallen;
int rc = -ENOSYS;
- ENTRY;
DEBUG_REQ(D_HSM, req, "%s: handle setinfo\n", obd->obd_name);
struct ldlm_request *dlm_req;
struct ldlm_lock *lock;
int rc;
- ENTRY;
/* Requests arrive in sender's byte order. The ptlrpc service
* handler has already checked and, if necessary, byte-swapped the
static int ldlm_bl_thread_main(void *arg)
{
struct ldlm_bl_pool *blp;
- ENTRY;
{
struct ldlm_bl_thread_data *bltd = arg;
int ldlm_get_ref(void)
{
int rc = 0;
- ENTRY;
+
mutex_lock(&ldlm_ref_mutex);
if (++ldlm_refcount == 1) {
rc = ldlm_setup();
void ldlm_put_ref(void)
{
- ENTRY;
mutex_lock(&ldlm_ref_mutex);
if (ldlm_refcount == 1) {
int rc = ldlm_cleanup();
int ldlm_init_export(struct obd_export *exp)
{
- ENTRY;
-
exp->exp_lock_hash =
cfs_hash_create(obd_uuid2str(&exp->exp_client_uuid),
HASH_EXP_LOCK_CUR_BITS,
void ldlm_destroy_export(struct obd_export *exp)
{
- ENTRY;
cfs_hash_putref(exp->exp_lock_hash);
exp->exp_lock_hash = NULL;
struct ldlm_bl_pool *blp = NULL;
int rc = 0;
int i;
- ENTRY;
if (ldlm_state != NULL)
RETURN(-EALREADY);
static int ldlm_cleanup(void)
{
- ENTRY;
-
if (!list_empty(ldlm_namespace_list(LDLM_NAMESPACE_SERVER)) ||
!list_empty(ldlm_namespace_list(LDLM_NAMESPACE_CLIENT))) {
CERROR("ldlm still has namespaces; clean these up first.\n");
static int ldlm_srv_pool_recalc(struct ldlm_pool *pl)
{
time_t recalc_interval_sec;
- ENTRY;
recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
if (recalc_interval_sec < pl->pl_recalc_period)
static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
{
time_t recalc_interval_sec;
- ENTRY;
recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
if (recalc_interval_sec < pl->pl_recalc_period)
struct lprocfs_vars pool_vars[2];
char *var_name = NULL;
int rc = 0;
- ENTRY;
OBD_ALLOC(var_name, MAX_STRING_SIZE + 1);
if (!var_name)
int idx, ldlm_side_t client)
{
int rc;
- ENTRY;
spin_lock_init(&pl->pl_lock);
atomic_set(&pl->pl_granted, 0);
void ldlm_pool_fini(struct ldlm_pool *pl)
{
- ENTRY;
ldlm_pool_proc_fini(pl);
/*
{
struct ptlrpc_thread *thread = (struct ptlrpc_thread *)arg;
int s_time, c_time;
- ENTRY;
thread_set_flags(thread, SVC_RUNNING);
wake_up(&thread->t_ctl_waitq);
{
struct l_wait_info lwi = { 0 };
task_t *task;
- ENTRY;
if (ldlm_pools_thread != NULL)
RETURN(-EALREADY);
static void ldlm_pools_thread_stop(void)
{
- ENTRY;
-
if (ldlm_pools_thread == NULL) {
EXIT;
return;
int ldlm_pools_init(void)
{
int rc;
- ENTRY;
rc = ldlm_pools_thread_start();
if (rc == 0) {
struct obd_import *imp;
struct obd_device *obd;
- ENTRY;
if (lock->l_conn_export == NULL) {
static cfs_time_t next_dump = 0, last_dump = 0;
*/
int ldlm_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data)
{
- ENTRY;
-
if (flags == LDLM_FL_WAIT_NOREPROC) {
LDLM_DEBUG(lock, "client-side enqueue waiting on pending lock");
RETURN(0);
struct l_wait_info lwi;
__u32 timeout;
int rc = 0;
- ENTRY;
if (flags == LDLM_FL_WAIT_NOREPROC) {
LDLM_DEBUG(lock, "client-side enqueue waiting on pending lock");
int ldlm_blocking_ast_nocheck(struct ldlm_lock *lock)
{
int do_ast;
- ENTRY;
lock->l_flags |= LDLM_FL_CBPENDING;
do_ast = (!lock->l_readers && !lock->l_writers);
int ldlm_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
void *data, int flag)
{
- ENTRY;
-
if (flag == LDLM_CB_CANCELING) {
/* Don't need to do anything here. */
RETURN(0);
.lcs_blocking = blocking,
.lcs_glimpse = glimpse,
};
- ENTRY;
LASSERT(!(*flags & LDLM_FL_REPLAY));
if (unlikely(ns_is_client(ns))) {
struct ldlm_reply *reply;
int cleanup_phase = 1;
int size = 0;
- ENTRY;
lock = ldlm_handle2lock(lockh);
/* ldlm_cli_enqueue is holding a reference on this lock. */
int flags, avail, to_free, pack = 0;
LIST_HEAD(head);
int rc;
- ENTRY;
if (cancels == NULL)
cancels = &head;
{
struct ptlrpc_request *req;
int rc;
- ENTRY;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE);
if (req == NULL)
int req_passed_in = 1;
int rc, err;
struct ptlrpc_request *req;
- ENTRY;
LASSERT(exp != NULL);
{
struct ldlm_resource *res;
int rc;
- ENTRY;
+
if (ns_is_client(ldlm_lock_to_ns(lock))) {
CERROR("Trying to cancel local lock\n");
LBUG();
struct ldlm_resource *res;
struct ptlrpc_request *req;
int rc;
- ENTRY;
lock = ldlm_handle2lock(lockh);
if (!lock) {
static __u64 ldlm_cli_cancel_local(struct ldlm_lock *lock)
{
__u64 rc = LDLM_FL_LOCAL_ONLY;
- ENTRY;
if (lock->l_conn_export) {
bool local_only;
struct ldlm_request *dlm;
struct ldlm_lock *lock;
int max, packed = 0;
- ENTRY;
dlm = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
LASSERT(dlm != NULL);
struct obd_import *imp;
int free, sent = 0;
int rc = 0;
- ENTRY;
LASSERT(exp != NULL);
LASSERT(count > 0);
struct obd_device *obd;
__u64 new_slv;
__u32 new_limit;
- ENTRY;
+
if (unlikely(!req->rq_import || !req->rq_import->imp_obd ||
!imp_connect_lru_resize(req->rq_import)))
{
struct ldlm_namespace *ns;
struct ldlm_lock *lock;
LIST_HEAD(cancels);
- ENTRY;
/* concurrent cancels on the same handle can happen */
lock = ldlm_handle2lock_long(lockh, LDLM_FL_CANCELING);
ldlm_cancel_lru_policy_t pf;
struct ldlm_lock *lock, *next;
int added = 0, unused, remained;
- ENTRY;
spin_lock(&ns->ns_lock);
unused = ns->ns_nr_unused;
{
LIST_HEAD(cancels);
int count, rc;
- ENTRY;
/* Just prepare the list of locks, do not actually cancel them yet.
* Locks are cancelled later in a separate thread. */
{
struct ldlm_lock *lock;
int count = 0;
- ENTRY;
lock_res(res);
list_for_each_entry(lock, &res->lr_granted, l_res_link) {
{
struct ldlm_lock *lock;
int res = 0;
- ENTRY;
if (list_empty(cancels) || count == 0)
RETURN(0);
LIST_HEAD(cancels);
int count;
int rc;
- ENTRY;
res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
if (res == NULL) {
.lc_opaque = opaque,
};
- ENTRY;
-
if (ns == NULL)
RETURN(ELDLM_OK);
struct ldlm_lock *lock;
int rc = LDLM_ITER_CONTINUE;
- ENTRY;
-
if (!res)
RETURN(LDLM_ITER_CONTINUE);
{
struct ldlm_resource *res;
int rc;
- ENTRY;
if (ns == NULL) {
CERROR("must pass in namespace\n");
struct ldlm_reply *reply;
struct obd_export *exp;
- ENTRY;
atomic_dec(&req->rq_import->imp_replay_inflight);
if (rc != ELDLM_OK)
GOTO(out, rc);
struct ldlm_async_args *aa;
struct ldlm_request *body;
int flags;
- ENTRY;
-
/* Bug 11974: Do not replay a lock which is actively being canceled */
if (lock->l_flags & LDLM_FL_CANCELING) {
struct ldlm_lock *lock, *next;
int rc = 0;
- ENTRY;
-
LASSERT(atomic_read(&imp->imp_replay_inflight) == 0);
/* don't replay locks if import failed recovery */
{ "cancel_unused_locks_before_replay", &ldlm_rw_uint_fops,
&ldlm_cancel_unused_locks_before_replay },
{ NULL }};
- ENTRY;
LASSERT(ldlm_ns_proc_dir == NULL);
ldlm_type_proc_dir = lprocfs_register(OBD_LDLM_DEVICENAME,
cfs_hash_bd_t bd;
int idx;
int rc;
- ENTRY;
LASSERT(obd != NULL);
*/
static int __ldlm_namespace_free(struct ldlm_namespace *ns, int force)
{
- ENTRY;
-
/* At shutdown time, don't call the cancellation callback */
ldlm_namespace_cleanup(ns, force ? LDLM_FL_LOCAL_ONLY : 0);
int force)
{
int rc;
- ENTRY;
+
if (!ns) {
EXIT;
return;
*/
void ldlm_namespace_free_post(struct ldlm_namespace *ns)
{
- ENTRY;
if (!ns) {
EXIT;
return;
{
wait_queue_t wait;
task_t *dumper;
- ENTRY;
/* we're being careful to ensure that the kernel thread is
* able to set our state to running as it exits before we
cfs_hash_t *hs;
int len;
- ENTRY;
-
CLASSERT(CFS_HASH_THETA_BITS < 15);
LASSERT(name != NULL);
struct hlist_node *pos;
cfs_hash_bd_t bd;
int i;
- ENTRY;
LASSERT(hs != NULL);
LASSERT(!cfs_hash_is_exiting(hs) &&
int excl = !!remove_safe;
int loop = 0;
int i;
- ENTRY;
cfs_hash_for_each_enter(hs);
int stop_on_change;
int rc;
int i;
- ENTRY;
stop_on_change = cfs_hash_with_rehash_key(hs) ||
!cfs_hash_with_no_itemref(hs) ||
cfs_hash_for_each_nolock(cfs_hash_t *hs,
cfs_hash_for_each_cb_t func, void *data)
{
- ENTRY;
-
if (cfs_hash_with_no_lock(hs) ||
cfs_hash_with_rehash_key(hs) ||
!cfs_hash_with_no_itemref(hs))
cfs_hash_for_each_cb_t func, void *data)
{
unsigned i = 0;
- ENTRY;
if (cfs_hash_with_no_lock(hs))
return -EOPNOTSUPP;
int libcfs_kkuc_group_rem(int uid, int group)
{
struct kkuc_reg *reg, *next;
- ENTRY;
if (kkuc_groups[group].next == NULL)
RETURN(0);
struct kkuc_reg *reg;
int rc = 0;
int one_success = 0;
- ENTRY;
down_read(&kg_sem);
list_for_each_entry(reg, &kkuc_groups[group], kr_chain) {
{
struct kkuc_reg *reg;
int rc = 0;
- ENTRY;
if (group > KUC_GRP_MAX) {
CDEBUG(D_WARNING, "Kernelcomm: bad group %d\n", group);
const char *debugstr;
char op = 0;
int newmask = minmask, i, len, found = 0;
- ENTRY;
/* <str> must be a list of tokens separated by whitespace
* and optionally an operator ('+' or '-'). If an operator
int key_len = strlen(key);
unsigned long addr;
int rc;
- ENTRY;
buffer = kmalloc(buf_len, GFP_USER);
if (!buffer)
"HOME=/",
"PATH=/sbin:/bin:/usr/sbin:/usr/bin",
NULL};
- ENTRY;
argv[0] = lnet_debug_log_upcall;
"HOME=/",
"PATH=/sbin:/bin:/usr/sbin:/usr/bin",
NULL};
- ENTRY;
argv[0] = lnet_upcall;
argc = 1;
char *argv[6];
char buf[32];
- ENTRY;
snprintf (buf, sizeof buf, "%d", msgdata->msg_line);
argv[1] = "LBUG";
struct libcfs_ioctl_hdr *hdr;
struct libcfs_ioctl_data *data;
int err;
- ENTRY;
hdr = (struct libcfs_ioctl_hdr *)buf;
data = (struct libcfs_ioctl_data *)buf;
static int libcfs_psdev_open(unsigned long flags, void *args)
{
struct libcfs_device_userstate *ldu;
- ENTRY;
try_module_get(THIS_MODULE);
static int libcfs_psdev_release(unsigned long flags, void *args)
{
struct libcfs_device_userstate *ldu;
- ENTRY;
ldu = (struct libcfs_device_userstate *)args;
if (ldu != NULL) {
void *arg, struct libcfs_ioctl_data *data)
{
int err = -EINVAL;
- ENTRY;
switch (cmd) {
case IOC_LIBCFS_CLEAR_DEBUG:
char *buf;
struct libcfs_ioctl_data *data;
int err = 0;
- ENTRY;
LIBCFS_ALLOC_GFP(buf, 1024, GFP_IOFS);
if (buf == NULL)
struct cfs_lstr src;
struct cfs_lstr res;
int rc;
- ENTRY;
src.ls_str = str;
src.ls_len = len;
{
struct nidrange *nr;
struct addrrange *ar;
- ENTRY;
list_for_each_entry(nr, nidlist, nr_link) {
if (nr->nr_netstrfns->nf_type != LNET_NETTYP(LNET_NIDNET(nid)))
struct list_head *head;
wait_queue_t wait;
int rc, found;
- ENTRY;
LASSERT(cache);
void upcall_cache_put_entry(struct upcall_cache *cache,
struct upcall_cache_entry *entry)
{
- ENTRY;
-
if (!entry) {
EXIT;
return;
struct upcall_cache_entry *entry = NULL;
struct list_head *head;
int found = 0, rc = 0;
- ENTRY;
LASSERT(cache);
{
struct upcall_cache_entry *entry, *next;
int i;
- ENTRY;
spin_lock(&cache->uc_lock);
for (i = 0; i < UC_CACHE_HASH_SIZE; i++) {
struct list_head *head;
struct upcall_cache_entry *entry;
int found = 0;
- ENTRY;
head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
{
struct upcall_cache *cache;
int i;
- ENTRY;
LIBCFS_ALLOC(cache, sizeof(*cache));
if (!cache)
static void ll_release(struct dentry *de)
{
struct ll_dentry_data *lld;
- ENTRY;
+
LASSERT(de != NULL);
lld = ll_d2d(de);
if (lld == NULL) /* NFS copies the de->d_op methods (bug 4655) */
int ll_dcompare(const struct dentry *parent, const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
- ENTRY;
-
if (len != name->len)
RETURN(1);
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct lov_stripe_md *lsm;
int rc = 0;
- ENTRY;
LASSERT(inode);
rc = md_find_cbdata(sbi->ll_md_exp, ll_inode2fid(inode),
*/
static int ll_ddelete(const struct dentry *de)
{
- ENTRY;
LASSERT(de);
CDEBUG(D_DENTRY, "%s dentry %.*s (%p, parent %p, inode %p) %s%s\n",
static int ll_set_dd(struct dentry *de)
{
- ENTRY;
LASSERT(de != NULL);
CDEBUG(D_DENTRY, "ldd on dentry %.*s (%p) parent %p inode %p refc %d\n",
void ll_intent_release(struct lookup_intent *it)
{
- ENTRY;
-
CDEBUG(D_INFO, "intent %p released\n", it);
ll_intent_drop_lock(it);
/* We are still holding extra reference on a request, need to free it */
{
struct dentry *dentry;
struct ll_d_hlist_node *p;
- ENTRY;
LASSERT(inode != NULL);
struct dentry *de)
{
int rc = 0;
- ENTRY;
if (!request)
RETURN(0);
struct inode *parent = de->d_parent->d_inode;
int rc;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:name=%s,intent=%s\n", de->d_name.name,
LL_IT2STR(it));
struct inode *parent = dentry->d_parent->d_inode;
int unplug = 0;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:name=%s,flags=%u\n",
dentry->d_name.name, flags);
int npages;
int i;
int rc;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) hash "LPU64"\n",
inode->i_ino, inode->i_generation, inode, hash);
struct ll_dir_chain chain;
int done = 0;
int rc = 0;
- ENTRY;
ll_dir_chain_init(&chain);
int hash64 = sbi->ll_flags & LL_SBI_64BIT_HASH;
int api32 = ll_need_32bit_api(sbi);
int rc;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) pos %lu/%llu "
" 32bit_api %d\n", inode->i_ino, inode->i_generation,
int mode;
int err;
- ENTRY;
-
mode = (0755 & (S_IRWXUGO|S_ISVTX) & ~current->fs->umask) | S_IFDIR;
op_data = ll_prep_md_op_data(NULL, dir, NULL, filename,
strlen(filename), mode, LUSTRE_OPC_MKDIR,
struct lustre_sb_info *lsi = s2lsi(inode->i_sb);
struct obd_device *mgc = lsi->lsi_mgc;
int lum_size;
- ENTRY;
if (lump != NULL) {
/*
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct md_op_data *op_data;
int rc, mdtidx;
- ENTRY;
op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0,
0, LUSTRE_OPC_ANY, NULL);
struct ll_sb_info *sbi = ll_s2sbi(sb);
struct hsm_progress_kernel hpk;
int rc;
- ENTRY;
/* Forge a hsm_progress based on data from copy. */
hpk.hpk_fid = copy->hc_hai.hai_fid;
struct ll_sb_info *sbi = ll_s2sbi(sb);
struct hsm_progress_kernel hpk;
int rc;
- ENTRY;
/* If you modify the logic here, also check llapi_hsm_copy_end(). */
/* Take care: copy->hc_hai.hai_action, len, gid and data are not
int id = qctl->qc_id;
int valid = qctl->qc_valid;
int rc = 0;
- ENTRY;
switch (cmd) {
case LUSTRE_Q_INVALIDATE:
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct obd_ioctl_data *data;
int rc = 0;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), cmd=%#x\n",
inode->i_ino, inode->i_generation, inode, cmd);
struct ll_sb_info *sbi = ll_i2sbi(inode);
int api32 = ll_need_32bit_api(sbi);
loff_t ret = -EINVAL;
- ENTRY;
mutex_lock(&inode->i_mutex);
switch (origin) {
int ll_dir_open(struct inode *inode, struct file *file)
{
- ENTRY;
RETURN(ll_file_open(inode, file));
}
int ll_dir_release(struct inode *inode, struct file *file)
{
- ENTRY;
RETURN(ll_file_release(inode, file));
}
static void ll_prepare_close(struct inode *inode, struct md_op_data *op_data,
struct obd_client_handle *och)
{
- ENTRY;
-
op_data->op_attr.ia_valid = ATTR_MODE | ATTR_ATIME | ATTR_ATIME_SET |
ATTR_MTIME | ATTR_MTIME_SET |
ATTR_CTIME | ATTR_CTIME_SET;
struct obd_device *obd = class_exp2obd(exp);
int epoch_close = 1;
int rc;
- ENTRY;
if (obd == NULL) {
/*
struct obd_client_handle *och;
__u64 *och_usecount;
int rc = 0;
- ENTRY;
if (flags & FMODE_WRITE) {
och_p = &lli->lli_mds_write_och;
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
struct ll_inode_info *lli = ll_i2info(inode);
int rc = 0;
- ENTRY;
/* clear group lock, if present */
if (unlikely(fd->fd_flags & LL_FILE_GROUP_LOCKED))
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct ll_inode_info *lli = ll_i2info(inode);
int rc;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
inode->i_generation, inode);
struct ptlrpc_request *req;
__u32 opc = LUSTRE_OPC_ANY;
int rc;
- ENTRY;
if (!parent)
RETURN(-ENOENT);
{
struct inode *inode = file->f_dentry->d_inode;
struct ll_inode_info *lli = ll_i2info(inode);
- ENTRY;
LASSERT(!LUSTRE_FPRIVATE(file));
__u64 *och_usecount = NULL;
struct ll_file_data *fd;
int rc = 0, opendir_set = 0;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), flags %o\n", inode->i_ino,
inode->i_generation, inode, file->f_flags);
struct obd_info oinfo = { { { 0 } } };
int rc;
- ENTRY;
-
LASSERT(lsm != NULL);
oinfo.oi_md = lsm;
struct obd_capa *capa = ll_mdscapa_get(inode);
struct lov_stripe_md *lsm;
int rc;
- ENTRY;
lsm = ccc_inode_lsm_get(inode);
rc = ll_lsm_getattr(lsm, ll_i2dtexp(inode),
struct ost_lvb lvb;
int rc = 0;
- ENTRY;
-
ll_inode_size_lock(inode);
/* merge timestamps the most recently obtained from mds with
timestamps obtained from osts */
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
struct cl_io *io;
ssize_t result;
- ENTRY;
restart:
io = ccc_env_thread_io(env);
size_t count;
ssize_t result;
int refcheck;
- ENTRY;
result = ll_file_get_iov_count(iov, &nr_segs, &count);
if (result)
struct kiocb *kiocb;
ssize_t result;
int refcheck;
- ENTRY;
env = cl_env_get(&refcheck);
if (IS_ERR(env))
size_t count;
ssize_t result;
int refcheck;
- ENTRY;
result = ll_file_get_iov_count(iov, &nr_segs, &count);
if (result)
struct kiocb *kiocb;
ssize_t result;
int refcheck;
- ENTRY;
env = cl_env_get(&refcheck);
if (IS_ERR(env))
struct vvp_io_args *args;
ssize_t result;
int refcheck;
- ENTRY;
env = cl_env_get(&refcheck);
if (IS_ERR(env))
int lsm_size;
int rc = 0;
struct lov_stripe_md *lsm = NULL, *lsm2;
- ENTRY;
OBDO_ALLOC(oa);
if (oa == NULL)
{
struct ll_recreate_obj ucreat;
struct ost_id oi;
- ENTRY;
if (!cfs_capable(CFS_CAP_SYS_ADMIN))
RETURN(-EPERM);
struct lu_fid fid;
struct ost_id oi;
obd_count ost_idx;
- ENTRY;
if (!cfs_capable(CFS_CAP_SYS_ADMIN))
RETURN(-EPERM);
struct lov_stripe_md *lsm = NULL;
struct lookup_intent oit = {.it_op = IT_OPEN, .it_flags = flags};
int rc = 0;
- ENTRY;
lsm = ccc_inode_lsm_get(inode);
if (lsm != NULL) {
int lum_size = sizeof(struct lov_user_md) +
sizeof(struct lov_user_ost_data);
int rc;
- ENTRY;
if (!cfs_capable(CFS_CAP_SYS_ADMIN))
RETURN(-EPERM);
struct lov_user_md_v3 *lumv3p = (struct lov_user_md_v3 *)arg;
int lum_size, rc;
int flags = FMODE_WRITE;
- ENTRY;
/* first try with v1 which is smaller than v3 */
lum_size = sizeof(struct lov_user_md_v1);
{
struct lov_stripe_md *lsm;
int rc = -ENODATA;
- ENTRY;
lsm = ccc_inode_lsm_get(inode);
if (lsm != NULL)
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
struct ccc_grouplock grouplock;
int rc;
- ENTRY;
if (ll_file_nolock(file))
RETURN(-EOPNOTSUPP);
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
struct ccc_grouplock grouplock;
- ENTRY;
spin_lock(&lli->lli_lock);
if (!(fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
struct inode *inode = dentry->d_inode;
struct obd_client_handle *och;
int rc;
- ENTRY;
LASSERT(inode);
struct ll_fiemap_info_key fm_key = { .name = KEY_FIEMAP, };
int vallen = num_bytes;
int rc;
- ENTRY;
/* Checks for fiemap flags */
if (fiemap->fm_flags & ~LUSTRE_FIEMAP_FLAGS_COMPAT) {
struct obd_export *exp = ll_i2mdexp(inode);
struct getinfo_fid2path *gfout, *gfin;
int outsize, rc;
- ENTRY;
if (!cfs_capable(CFS_CAP_DAC_READ_SEARCH) &&
!(ll_i2sbi(inode)->ll_flags & LL_SBI_USER_FID2PATH))
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct obdo *obdo = NULL;
int rc;
- ENTRY;
/* If no stripe, we consider version is 0. */
lsm = ccc_inode_lsm_get(inode);
struct inode *inode = file->f_dentry->d_inode;
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
int flags, rc;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),cmd=%x\n", inode->i_ino,
inode->i_generation, inode, cmd);
struct inode *inode = file->f_dentry->d_inode;
loff_t retval, eof = 0;
- ENTRY;
retval = offset + ((origin == SEEK_END) ? i_size_read(inode) :
(origin == SEEK_CUR) ? file->f_pos : 0);
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), to=%llu=%#llx(%d)\n",
struct obd_capa *capa = NULL;
struct cl_fsync_io *fio;
int result;
- ENTRY;
if (mode != CL_FSYNC_NONE && mode != CL_FSYNC_LOCAL &&
mode != CL_FSYNC_DISCARD && mode != CL_FSYNC_ALL)
struct ptlrpc_request *req;
struct obd_capa *oc;
int rc, err;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
inode->i_generation, inode);
int flags = 0;
int rc;
int rc2 = 0;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu file_lock=%p\n",
inode->i_ino, file_lock);
int ll_file_noflock(struct file *file, int cmd, struct file_lock *file_lock)
{
- ENTRY;
-
RETURN(-ENOSYS);
}
struct lu_fid *fid;
__u64 flags;
int i;
- ENTRY;
if (!inode)
RETURN(0);
ldlm_policy_data_t policy = { .l_inodebits = {bits}};
struct lu_fid *fid;
ldlm_mode_t rc;
- ENTRY;
fid = &ll_i2info(inode)->lli_fid;
CDEBUG(D_INFO, "trying to match res "DFID"\n", PFID(fid));
struct ptlrpc_request *req = NULL;
struct obd_export *exp;
int rc = 0;
- ENTRY;
LASSERT(inode != NULL);
{
struct inode *inode = dentry->d_inode;
int rc;
- ENTRY;
rc = __ll_inode_revalidate_it(dentry, it, ibits);
if (rc != 0)
{
struct ll_inode_info *lli = ll_i2info(inode);
struct posix_acl *acl = NULL;
- ENTRY;
spin_lock(&lli->lli_lock);
/* VFS' acl_permission_check->check_acl will release the refcount */
int ll_inode_permission(struct inode *inode, int mask)
{
int rc = 0;
- ENTRY;
#ifdef MAY_NOT_BLOCK
if (mask & MAY_NOT_BLOCK)
{
unsigned int size;
struct llioc_data *in_data = NULL;
- ENTRY;
if (cb == NULL || cmd == NULL ||
count > LLIOC_MAX_CMD || count < 0)
struct cl_env_nest nest;
struct lu_env *env;
int result;
- ENTRY;
if (lli->lli_clob == NULL)
RETURN(0);
void *lmm;
int lmmsize;
int rc;
- ENTRY;
CDEBUG(D_INODE, DFID" LVB_READY=%d l_lvb_data=%p l_lvb_len=%d\n",
PFID(ll_inode2fid(inode)), !!(lock->l_flags & LDLM_FL_LVB_READY),
int rc = 0;
bool lvb_ready;
bool wait_layout = false;
- ENTRY;
LASSERT(lustre_handle_is_used(lockh));
.ei_cb_cp = ldlm_completion_ast,
};
int rc;
- ENTRY;
*gen = lli->lli_layout_gen;
if (!(sbi->ll_flags & LL_SBI_LAYOUT_LOCK))
struct inode *inode = NULL;
struct l_wait_info lwi = { 0 };
int rc;
- ENTRY;
thread_set_flags(&ll_capa_thread, SVC_RUNNING);
wake_up(&ll_capa_thread.t_ctl_waitq);
int ll_capa_thread_start(void)
{
task_t *task;
- ENTRY;
init_waitqueue_head(&ll_capa_thread.t_ctl_waitq);
struct obd_capa *ocapa;
int found = 0;
- ENTRY;
-
if ((ll_i2sbi(inode)->ll_flags & LL_SBI_OSS_CAPA) == 0)
RETURN(NULL);
{
struct ll_inode_info *lli = ll_i2info(inode);
struct obd_capa *ocapa;
- ENTRY;
LASSERT(inode != NULL);
{
struct inode *inode = ocapa->u.cli.inode;
int rc = 0;
- ENTRY;
LASSERT(ocapa);
{
struct ll_inode_info *lli = ll_i2info(club->cob_inode);
- ENTRY;
spin_lock(&lli->lli_lock);
lli->lli_flags |= LLIF_SOM_DIRTY;
if (page != NULL && list_empty(&page->cpg_pending_linkage))
struct ll_inode_info *lli = ll_i2info(club->cob_inode);
int rc = 0;
- ENTRY;
spin_lock(&lli->lli_lock);
if (page != NULL && !list_empty(&page->cpg_pending_linkage)) {
list_del_init(&page->cpg_pending_linkage);
{
struct ll_inode_info *lli = ll_i2info(inode);
struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
- ENTRY;
spin_lock(&lli->lli_lock);
lli->lli_flags |= flags;
void ll_done_writing_attr(struct inode *inode, struct md_op_data *op_data)
{
struct ll_inode_info *lli = ll_i2info(inode);
- ENTRY;
op_data->op_flags |= MF_SOM_CHANGE;
/* Check if Size-on-MDS attributes are valid. */
{
struct ll_inode_info *lli = ll_i2info(inode);
struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
- ENTRY;
spin_lock(&lli->lli_lock);
if (!(list_empty(&club->cob_pending_list))) {
__u32 old_flags;
struct obdo *oa;
int rc;
- ENTRY;
LASSERT(op_data != NULL);
if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
struct obd_client_handle *och = NULL;
struct md_op_data *op_data;
int rc;
- ENTRY;
LASSERT(exp_connect_som(ll_i2mdexp(inode)));
static int ll_close_thread(void *arg)
{
struct ll_close_queue *lcq = arg;
- ENTRY;
complete(&lcq->lcq_comp);
struct sysinfo si;
class_uuid_t uuid;
int i;
- ENTRY;
OBD_ALLOC(sbi, sizeof(*sbi));
if (!sbi)
void ll_free_sbi(struct super_block *sb)
{
struct ll_sb_info *sbi = ll_s2sbi(sb);
- ENTRY;
if (sbi != NULL) {
spin_lock(&ll_sb_lock);
struct lustre_md lmd;
obd_valid valid;
int size, err, checksum;
- ENTRY;
obd = class_name2obd(md);
if (!obd) {
void client_common_put_super(struct super_block *sb)
{
struct ll_sb_info *sbi = ll_s2sbi(sb);
- ENTRY;
#ifdef CONFIG_FS_POSIX_ACL
if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
{
struct ll_sb_info *sbi;
- ENTRY;
-
/* not init sb ?*/
if (!(sb->s_flags & MS_ACTIVE))
return;
{
char *value;
char *retval;
- ENTRY;
CDEBUG(D_SUPER, "option: %s, data %s\n", opt, data);
if (strncmp(opt, data, strlen(opt)))
{
int tmp;
char *s1 = options, *s2;
- ENTRY;
if (!options)
RETURN(0);
/* %p for void* in printf needs 16+2 characters: 0xffffffffffffffff */
const int instlen = sizeof(cfg->cfg_instance) * 2 + 2;
int err;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op: sb %p\n", sb);
struct ll_sb_info *sbi = ll_s2sbi(sb);
char *profilenm = get_profile_name(sb);
int next, force = 1;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op: sb %p - %s\n", sb, profilenm);
{
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_sb_info *sbi = ll_i2sbi(inode);
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
inode->i_generation, inode);
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct ptlrpc_request *request = NULL;
int rc, ia_valid;
- ENTRY;
op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0,
LUSTRE_OPC_ANY, NULL);
{
struct ll_inode_info *lli = ll_i2info(inode);
int rc = 0;
- ENTRY;
LASSERT(op_data != NULL);
if (!S_ISREG(inode->i_mode))
struct md_op_data *op_data = NULL;
struct md_open_data *mod = NULL;
int rc = 0, rc1 = 0;
- ENTRY;
CDEBUG(D_VFSTRACE, "%s: setattr inode %p/fid:"DFID" from %llu to %llu, "
"valid %x\n", ll_get_fsname(inode->i_sb, NULL, 0), inode,
struct ll_sb_info *sbi = ll_s2sbi(sb);
struct obd_statfs obd_osfs;
int rc;
- ENTRY;
rc = obd_statfs(NULL, sbi->ll_md_exp, osfs, max_age, flags);
if (rc) {
{
struct lustre_md *md = opaque;
struct ll_inode_info *lli = ll_i2info(inode);
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
PFID(&lli->lli_fid), inode);
void ll_delete_inode(struct inode *inode)
{
struct cl_inode_info *lli = cl_i2info(inode);
- ENTRY;
if (S_ISREG(inode->i_mode) && lli->lli_clob != NULL)
/* discard all dirty pages before truncating them, required by
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct ptlrpc_request *req = NULL;
int rc, flags = 0;
- ENTRY;
switch(cmd) {
case FSFILT_IOC_GETFLAGS: {
struct ll_sb_info *sbi = ll_s2sbi(sb);
struct obd_device *obd;
struct obd_ioctl_data *ioc_data;
- ENTRY;
-
CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
sb->s_count, atomic_read(&sb->s_active));
struct ll_sb_info *sbi = NULL;
struct lustre_md md;
int rc;
- ENTRY;
LASSERT(*inode || sb);
sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
{
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct obd_device *obd;
- ENTRY;
if (cmd == OBD_IOC_GETDTNAME)
obd = class_exp2obd(sbi->ll_dt_exp);
size_t count)
{
struct vm_area_struct *vma, *ret = NULL;
- ENTRY;
/* mmap_sem must have been held by caller. */
LASSERT(!down_write_trylock(&mm->mmap_sem));
struct cl_fault_io *fio;
struct lu_env *env;
int rc;
- ENTRY;
*env_ret = NULL;
if (ll_file_nolock(file))
sigset_t set;
struct inode *inode;
struct ll_inode_info *lli;
- ENTRY;
LASSERT(vmpage != NULL);
struct cl_env_nest nest;
int result;
int fault_ret = 0;
- ENTRY;
io = ll_fault_io_init(vma, &env, &nest, vmf->pgoff, &ra_flags);
if (IS_ERR(io))
struct inode *inode = vma->vm_file->f_dentry->d_inode;
struct ccc_object *vob = cl_inode2ccc(inode);
- ENTRY;
LASSERT(vma->vm_file);
LASSERT(atomic_read(&vob->cob_mmap_cnt) >= 0);
atomic_inc(&vob->cob_mmap_cnt);
struct inode *inode = vma->vm_file->f_dentry->d_inode;
struct ccc_object *vob = cl_inode2ccc(inode);
- ENTRY;
LASSERT(vma->vm_file);
atomic_dec(&vob->cob_mmap_cnt);
LASSERT(atomic_read(&vob->cob_mmap_cnt) >= 0);
int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
{
int rc = -ENOENT;
- ENTRY;
LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first);
if (mapping_mapped(mapping)) {
{
struct inode *inode = file->f_dentry->d_inode;
int rc;
- ENTRY;
if (ll_file_nolock(file))
RETURN(-EOPNOTSUPP);
ll_need_32bit_api(sbi));
struct md_op_data *op_data;
int rc;
- ENTRY;
CDEBUG(D_INFO, "searching inode for:(%lu,"DFID")\n", hash, PFID(fid));
{
struct inode *inode;
struct dentry *result;
- ENTRY;
CDEBUG(D_INFO, "Get dentry for fid: "DFID"\n", PFID(fid));
if (!fid_is_sane(fid))
struct inode *parent)
{
struct lustre_nfs_fid *nfs_fid = (void *)fh;
- ENTRY;
CDEBUG(D_INFO, "encoding for (%lu,"DFID") maxlen=%d minlen=%d\n",
inode->i_ino, PFID(ll_inode2fid(inode)), *plen,
.lgd_fid = ll_i2info(child->d_inode)->lli_fid,
.ctx.actor = ll_nfs_get_name_filldir,
};
- ENTRY;
if (!dir || !S_ISDIR(dir->i_mode))
GOTO(out, rc = -ENOTDIR);
struct md_op_data *op_data;
int rc;
int lmmsize;
- ENTRY;
LASSERT(dir && S_ISDIR(dir->i_mode));
int mult, rc, pages_number;
int diff = 0;
int nrpages = 0;
- ENTRY;
mult = 1 << (20 - PAGE_CACHE_SHIFT);
buffer = lprocfs_find_named_value(buffer, "max_cached_mb:", &count);
proc_dir_entry_t *dir;
char name[MAX_STRING_SIZE + 1], *ptr;
int err, id, len, rc;
- ENTRY;
memset(lvars, 0, sizeof(lvars));
int ll_unlock(__u32 mode, struct lustre_handle *lockh)
{
- ENTRY;
-
ldlm_lock_decref(lockh, mode);
RETURN(0);
struct lustre_md *md)
{
struct inode *inode;
- ENTRY;
LASSERT(hash != 0);
inode = iget5_locked(sb, hash, ll_test_inode, ll_set_inode, md);
{
int rc;
struct lustre_handle lockh;
- ENTRY;
switch (flag) {
case LDLM_CB_BLOCKING:
struct inode *inode = NULL;
__u64 bits = 0;
int rc;
- ENTRY;
/* NB 1 request reference will be taken away by ll_intent_lock()
* when I return */
struct it_cb_data icbd;
__u32 opc;
int rc;
- ENTRY;
if (dentry->d_name.len > ll_i2sbi(parent)->ll_namelen)
RETURN(ERR_PTR(-ENAMETOOLONG));
struct dentry *de;
long long lookup_flags = LOOKUP_OPEN;
int rc = 0;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p),file %p,"
"open_flags %x,mode %x opened %d\n",
struct ptlrpc_request *request = NULL;
struct ll_sb_info *sbi = ll_i2sbi(dir);
int rc;
- ENTRY;
LASSERT(it && it->d.lustre.it_disposition);
{
struct inode *inode;
int rc = 0;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p),intent=%s\n",
dentry->d_name.len, dentry->d_name.name, dir->i_ino,
int tgt_len = 0;
int err;
- ENTRY;
if (unlikely(tgt != NULL))
tgt_len = strlen(tgt) + 1;
unsigned rdev, struct dentry *dchild)
{
int err;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p) mode %o dev %x\n",
name->len, name->name, dir->i_ino, dir->i_generation, dir,
const char *tgt, struct dentry *dchild)
{
int err;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p),target=%.*s\n",
name->len, name->name, dir->i_ino, dir->i_generation,
struct md_op_data *op_data;
int err;
- ENTRY;
CDEBUG(D_VFSTRACE,
"VFS Op: inode=%lu/%u(%p), dir=%lu/%u(%p), target=%.*s\n",
src->i_ino, src->i_generation, src, dir->i_ino,
{
int err;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p)\n",
name->len, name->name, dir->i_ino, dir->i_generation, dir);
struct ptlrpc_request *request = NULL;
struct md_op_data *op_data;
int rc;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p)\n",
name->len, name->name, dir->i_ino, dir->i_generation, dir);
struct ptlrpc_request *request = NULL;
struct md_op_data *op_data;
int rc;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p)\n",
namelen, name, dir->i_ino, dir->i_generation, dir);
struct obdo *oa;
struct obd_capa *oc = NULL;
int rc;
- ENTRY;
/* req is swabbed so this is safe */
body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
struct ptlrpc_request *request = NULL;
struct md_op_data *op_data;
int rc;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p)\n",
name->len, name->name, dir->i_ino, dir->i_generation, dir);
struct ll_sb_info *sbi = ll_i2sbi(src);
struct md_op_data *op_data;
int err;
- ENTRY;
+
CDEBUG(D_VFSTRACE,"VFS Op:oldname=%.*s,src_dir=%lu/%u(%p),newname=%.*s,"
"tgt_dir=%lu/%u(%p)\n", src_name->len, src_name->name,
src->i_ino, src->i_generation, src, tgt_name->len,
struct hlist_head *head;
struct ll_remote_perm *lrp;
int found = 0, rc;
- ENTRY;
if (!lli->lli_remote_perms)
RETURN(-ENOENT);
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_remote_perm *lrp = NULL, *tmp = NULL;
struct hlist_head *head, *perm_hash = NULL;
- ENTRY;
LASSERT(ll_i2sbi(inode)->ll_flags & LL_SBI_RMT_CLIENT);
struct obd_capa *oc;
cfs_time_t save;
int i = 0, rc;
- ENTRY;
do {
save = lli->lli_rmtperm_time;
{
struct ll_cl_context *lcc;
int result;
- ENTRY;
lcc = ll_cl_init(file, vmpage, 1);
if (!IS_ERR(lcc)) {
struct cl_io *io;
struct cl_page *page;
int result = 0;
- ENTRY;
lcc = ll_cl_get();
env = lcc->lcc_env;
{
struct ll_ra_info *ra = &sbi->ll_ra_info;
long ret;
- ENTRY;
/* If read-ahead pages left are less than 1M, do not do read-ahead,
* otherwise it will form small read RPC(< 1M), which hurt server
struct ccc_page *cp;
int rc;
- ENTRY;
-
rc = 0;
cl_page_assume(env, io, page);
lu_ref_add(&page->cp_reference, "ra", current);
int rc = 0;
const char *msg = NULL;
- ENTRY;
-
gfp_mask = GFP_HIGHUSER & ~__GFP_WAIT;
#ifdef __GFP_NOWARN
gfp_mask |= __GFP_NOWARN;
struct cl_object *clob;
int ret = 0;
__u64 kms;
- ENTRY;
inode = mapping->host;
lli = ll_i2info(inode);
{
struct ll_ra_info *ra = &sbi->ll_ra_info;
int zero = 0, stride_detect = 0, ra_miss = 0;
- ENTRY;
spin_lock(&ras->ras_lock);
bool redirtied = false;
bool unlocked = false;
int result;
- ENTRY;
LASSERT(PageLocked(vmpage));
LASSERT(!PageWriteback(vmpage));
int range_whole = 0;
int result;
int ignore_layout = 0;
- ENTRY;
if (wbc->range_cyclic) {
start = mapping->writeback_index << PAGE_CACHE_SHIFT;
{
struct ll_cl_context *lcc;
int result;
- ENTRY;
lcc = ll_cl_init(file, vmpage, 0);
if (!IS_ERR(lcc)) {
long page_size = cl_page_size(obj);
bool do_io;
int io_pages = 0;
- ENTRY;
queue = &io->ci_queue;
cl_2queue_init(queue);
unsigned long seg = 0;
long size = MAX_DIO_SIZE;
int refcheck;
- ENTRY;
if (!lli->lli_has_smd)
RETURN(-EBADF);
struct page *page;
int rc;
unsigned from = pos & (PAGE_CACHE_SIZE - 1);
- ENTRY;
page = grab_cache_page_write_begin(mapping, index, flags);
if (!page)
struct ll_sa_entry *entry;
int entry_size;
char *dname;
- ENTRY;
entry_size = sizeof(struct ll_sa_entry) + (len & ~3) + 4;
OBD_ALLOC(entry, entry_size);
{
struct ll_statahead_info *sai;
int i;
- ENTRY;
OBD_ALLOC_PTR(sai);
if (!sai)
{
struct inode *inode = sai->sai_inode;
struct ll_inode_info *lli = ll_i2info(inode);
- ENTRY;
if (atomic_dec_and_lock(&sai->sai_refcount, &lli->lli_sa_lock)) {
struct ll_sa_entry *entry, *next;
struct ll_inode_info *lli = ll_i2info(inode);
__u64 index = lli->lli_agl_index;
int rc;
- ENTRY;
LASSERT(list_empty(&lli->lli_agl_list));
struct ptlrpc_request *req;
struct mdt_body *body;
int rc = 0;
- ENTRY;
spin_lock(&lli->lli_sa_lock);
if (unlikely(sa_received_empty(sai))) {
struct ll_statahead_info *sai = NULL;
struct ll_sa_entry *entry;
int wakeup;
- ENTRY;
if (it_disposition(it, DISP_LOOKUP_NEG))
rc = -ENOENT;
struct ldlm_enqueue_info *einfo;
struct obd_capa *capas[2];
int rc;
- ENTRY;
rc = sa_args_init(dir, NULL, entry, &minfo, &einfo, capas);
if (rc)
struct ldlm_enqueue_info *einfo;
struct obd_capa *capas[2];
int rc;
- ENTRY;
if (unlikely(inode == NULL))
RETURN(1);
struct ll_sa_entry *entry;
int rc;
int rc1;
- ENTRY;
entry = ll_sa_entry_alloc(sai, sai->sai_index, entry_name,
entry_name_len);
struct ll_statahead_info *sai = ll_sai_get(plli->lli_sai);
struct ptlrpc_thread *thread = &sai->sai_agl_thread;
struct l_wait_info lwi = { 0 };
- ENTRY;
CDEBUG(D_READA, "agl thread started: [pid %d] [parent %.*s]\n",
current_pid(), parent->d_name.len, parent->d_name.name);
struct l_wait_info lwi = { 0 };
struct ll_inode_info *plli;
task_t *task;
- ENTRY;
CDEBUG(D_READA, "start agl thread: [pid %d] [parent %.*s]\n",
current_pid(), parent->d_name.len, parent->d_name.name);
int rc = 0;
struct ll_dir_chain chain;
struct l_wait_info lwi = { 0 };
- ENTRY;
CDEBUG(D_READA, "statahead thread started: [pid %d] [parent %.*s]\n",
current_pid(), parent->d_name.len, parent->d_name.name);
__u64 pos = 0;
int dot_de;
int rc = LS_NONE_FIRST_DE;
- ENTRY;
ll_dir_chain_init(&chain);
page = ll_get_dir_page(dir, pos, &chain);
struct ptlrpc_thread *thread = &sai->sai_thread;
struct ll_sb_info *sbi = ll_i2sbi(sai->sai_inode);
int hit;
- ENTRY;
if (entry != NULL && entry->se_stat == SA_ENTRY_SUCC)
hit = 1;
struct l_wait_info lwi = { 0 };
int rc = 0;
struct ll_inode_info *plli;
- ENTRY;
LASSERT(lli->lli_opendir_pid == current_pid());
int rc, symlen = i_size_read(inode) + 1;
struct mdt_body *body;
struct md_op_data *op_data;
- ENTRY;
*request = NULL;
struct ptlrpc_request *request;
char *symname;
int rc;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op\n");
struct ptlrpc_request *request = NULL;
int rc;
char *symname;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op\n");
/* Limit the recursive symlink depth to 5 instead of default
int refcheck;
int result;
- ENTRY;
sbi = ll_s2sbi(sb);
env = cl_env_get(&refcheck);
if (!IS_ERR(env)) {
unsigned long seg;
ssize_t count;
int result;
- ENTRY;
LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
int ast_flags = 0;
LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
- ENTRY;
ccc_io_update_iov(env, cio, io);
struct ll_inode_info *lli = ll_i2info(ccc_object_inode(io->ci_obj));
int result;
- ENTRY;
/* XXX: Layer violation, we shouldn't see lsm at llite level. */
if (lli->lli_has_smd) /* lsm-less file doesn't need to lock */
result = vvp_io_rw_lock(env, io, CLM_READ,
loff_t pos = io->u.ci_wr.wr.crw_pos;
size_t cnt = io->u.ci_wr.wr.crw_count;
- ENTRY;
-
if (!can_populate_pages(env, io, inode))
return 0;
CLOBINVRNT(env, obj, ccc_object_invariant(obj));
LASSERT(slice->cpl_obj == obj);
- ENTRY;
-
if (sbi->ll_ra_info.ra_max_pages_per_file &&
sbi->ll_ra_info.ra_max_pages)
ras_update(sbi, inode, ras, page->cp_index,
int result;
- ENTRY;
-
LINVRNT(cl_page_is_vmlocked(env, pg));
LASSERT(vmpage->mapping->host == ccc_object_inode(obj));
int tallyop;
loff_t size;
- ENTRY;
-
LINVRNT(cl_page_is_vmlocked(env, pg));
LASSERT(vmpage->mapping->host == inode);
int result;
CLOBINVRNT(env, obj, ccc_object_invariant(obj));
- ENTRY;
CL_IO_SLICE_CLEAN(cio, cui_cl);
cl_io_slice_add(io, &cio->cui_cl, obj, &vvp_io_ops);
{
struct ccc_object *cob = cl2ccc(slice->cls_obj);
- ENTRY;
RETURN(atomic_read(&cob->cob_mmap_cnt) > 0 ? ~0UL >> 2 : 0);
}
const struct cl_page_slice *slice,
struct cl_io *unused)
{
- ENTRY;
/* Skip the page already marked as PG_uptodate. */
RETURN(PageUptodate(cl2vm_page(slice)) ? -EALREADY : 0);
}
struct page *vmpage = cp->cpg_page;
struct cl_page *page = cl_page_top(slice->cpl_page);
struct inode *inode = ccc_object_inode(page->cp_obj);
- ENTRY;
LASSERT(PageLocked(vmpage));
CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
struct ccc_page *cp = cl2ccc_page(slice);
struct cl_page *pg = slice->cpl_page;
struct page *vmpage = cp->cpg_page;
- ENTRY;
LASSERT(ergo(pg->cp_sync_io != NULL, PageLocked(vmpage)));
LASSERT(PageWriteback(vmpage));
ext_acl_xattr_header *acl = NULL;
#endif
const char *pv = value;
- ENTRY;
xattr_type = get_xattr_type(name);
rc = xattr_type_filter(sbi, xattr_type);
void *xdata;
struct obd_capa *oc;
struct rmtacl_ctl_entry *rce = NULL;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n",
inode->i_ino, inode->i_generation, inode);
mdsno_t *mds)
{
int rc;
- ENTRY;
-
/* FIXME: Currently ZFS still use local seq for ROOT unfortunately, and
* this fid_is_local check should be removed once LU-2240 is fixed */
struct mdt_body *body;
int pmode;
int rc = 0;
- ENTRY;
body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY);
if (body == NULL)
struct lmv_tgt_desc *tgt;
struct mdt_body *body;
int rc;
- ENTRY;
tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
if (IS_ERR(tgt))
struct lmv_tgt_desc *tgt = NULL;
struct mdt_body *body;
int rc = 0;
- ENTRY;
tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
if (IS_ERR(tgt))
{
struct obd_device *obd = exp->exp_obd;
int rc;
- ENTRY;
LASSERT(it != NULL);
LASSERT(fid_is_sane(&op_data->op_fid1));
struct obd_device *obd;
int i;
int rc = 0;
- ENTRY;
CDEBUG(D_INFO, "Searching in lmv %p for uuid %s (activate=%d)\n",
lmv, uuid->uuid, activate);
struct lmv_obd *lmv = &obd->u.lmv;
struct obd_uuid *uuid;
int rc = 0;
- ENTRY;
if (strcmp(watched->obd_type->typ_name, LUSTRE_MDC_NAME)) {
CERROR("unexpected notification of %s %s!\n",
struct lmv_obd *lmv = &obd->u.lmv;
struct lustre_handle conn = { 0 };
int rc = 0;
- ENTRY;
/*
* We don't want to actually do the underlying connections more than
int i;
int rc = 0;
int change = 0;
- ENTRY;
if (lmv->max_easize < easize) {
lmv->max_easize = easize;
struct obd_export *mdc_exp;
struct lu_fld_target target;
int rc;
- ENTRY;
mdc_obd = class_find_client_obd(&tgt->ltd_uuid, LUSTRE_MDC_NAME,
&obd->obd_uuid);
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
int rc = 0;
- ENTRY;
CDEBUG(D_CONFIG, "Target uuid: %s. index %d\n", uuidp->uuid, index);
int i;
int rc;
int easize;
- ENTRY;
if (lmv->connected)
RETURN(0);
struct lmv_obd *lmv = &obd->u.lmv;
struct obd_device *mdc_obd;
int rc;
- ENTRY;
LASSERT(tgt != NULL);
LASSERT(obd != NULL);
struct lmv_obd *lmv = &obd->u.lmv;
int rc;
int i;
- ENTRY;
if (!lmv->tgts)
goto out_local;
struct lustre_kernelcomm *lk, void *uarg)
{
int i, rc = 0;
- ENTRY;
/* unregister request (call from llapi_hsm_copytool_fini) */
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
int i, j, err;
int rc = 0;
bool any_set = false;
- ENTRY;
/* All or nothing: try to register to all MDS.
* In case of failure, unregister from previous MDS,
int rc = 0;
int set = 0;
int count = lmv->desc.ld_tgt_count;
- ENTRY;
if (count == 0)
RETURN(-ENOTTY);
mdsno_t *mds)
{
struct lmv_obd *lmv = &obd->u.lmv;
- ENTRY;
LASSERT(mds != NULL);
{
struct lmv_tgt_desc *tgt;
int rc;
- ENTRY;
tgt = lmv_get_target(lmv, mds);
if (IS_ERR(tgt))
struct lmv_obd *lmv = &obd->u.lmv;
mdsno_t mds = 0;
int rc;
- ENTRY;
LASSERT(op_data != NULL);
LASSERT(fid != NULL);
struct lprocfs_static_vars lvars;
struct lmv_desc *desc;
int rc;
- ENTRY;
if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1) {
CERROR("LMV setup requires a descriptor\n");
static int lmv_cleanup(struct obd_device *obd)
{
struct lmv_obd *lmv = &obd->u.lmv;
- ENTRY;
fld_client_fini(&lmv->lmv_fld);
if (lmv->tgts != NULL) {
int gen;
__u32 index;
int rc;
- ENTRY;
switch (lcfg->lcfg_command) {
case LCFG_ADD_MDC:
struct obd_statfs *temp;
int rc = 0;
int i;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
int rc;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
int rc;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
int rc;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
int rc;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
struct lmv_obd *lmv = &obd->u.lmv;
int i;
int rc;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
struct lmv_obd *lmv = &obd->u.lmv;
int i;
int rc;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
int rc;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
int rc;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
int rc;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
struct mdt_body *body;
int rc = 0;
int pmode;
- ENTRY;
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
LASSERT(body != NULL);
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
int rc;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
struct lmv_tgt_desc *tgt;
struct mdt_body *body;
int rc;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
struct lmv_tgt_desc *tgt;
ldlm_policy_data_t policy = {{0}};
int rc = 0;
- ENTRY;
if (!fid_is_sane(fid))
RETURN(0);
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
int rc;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
struct lmv_tgt_desc *src_tgt;
struct lmv_tgt_desc *tgt_tgt;
int rc;
- ENTRY;
LASSERT(oldlen != 0);
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
int rc = 0;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
int rc;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
int ncfspgs; /* pages read in PAGE_CACHE_SIZE */
int nlupgs; /* pages read in LU_PAGE_SIZE */
struct lmv_tgt_desc *tgt;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
struct lmv_tgt_desc *tgt = NULL;
struct mdt_body *body;
int rc;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
struct obd_device *obd;
struct lmv_obd *lmv;
int rc = 0;
- ENTRY;
obd = class_exp2obd(exp);
if (obd == NULL) {
struct obd_device *obd;
struct lmv_obd *lmv;
int rc = 0;
- ENTRY;
obd = class_exp2obd(exp);
if (obd == NULL) {
struct lmv_stripe_md *lsmp;
int mea_size;
int i;
- ENTRY;
mea_size = lmv_get_easize(lmv);
if (!lmmp)
int mea_size;
int i;
__u32 magic;
- ENTRY;
mea_size = lmv_get_easize(lmv);
if (lsmp == NULL)
int rc = 0;
int err;
int i;
- ENTRY;
LASSERT(fid != NULL);
{
struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
int rc;
- ENTRY;
rc = md_set_lock_data(lmv->tgts[0]->ltd_exp, lockh, data, bits);
RETURN(rc);
struct lmv_obd *lmv = &obd->u.lmv;
ldlm_mode_t rc;
int i;
- ENTRY;
CDEBUG(D_INODE, "Lock match for "DFID"\n", PFID(fid));
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
- ENTRY;
if (md->mea)
obd_free_memmd(exp, (void *)&md->mea);
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
- ENTRY;
tgt = lmv_find_target(lmv, &och->och_fid);
if (IS_ERR(tgt))
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
- ENTRY;
tgt = lmv_find_target(lmv, &och->och_fid);
if (IS_ERR(tgt))
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
int rc;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
int rc;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt = NULL;
int rc;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
int rc;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
struct lmv_tgt_desc *tgt = lmv->tgts[0];
int rc = 0, i;
__u64 curspace, curinodes;
- ENTRY;
if (!lmv->desc.ld_tgt_count || !tgt->ltd_active) {
CERROR("master lmv inactive\n");
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
int i, rc = 0;
- ENTRY;
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
int err;
{
struct lov_req *lr;
- ENTRY;
lr = cl2lov_req(slice);
OBD_SLAB_FREE_PTR(lr, lov_req_kmem);
EXIT;
struct lov_req *lr;
int result;
- ENTRY;
OBD_SLAB_ALLOC_PTR_GFP(lr, lov_req_kmem, __GFP_IO);
if (lr != NULL) {
cl_req_slice_add(req, &lr->lr_cl, dev, &lov_req_ops);
__u32 index)
{
struct lov_device *ld = lu2lov_dev(dev);
- ENTRY;
if (ld->ld_target[index] != NULL) {
cl_stack_fini(env, lovsub2cl_dev(ld->ld_target[index]));
__u32 tgt_size;
__u32 sub_size;
- ENTRY;
result = 0;
tgt_size = dev->ld_lov->lov_tgt_size;
sub_size = dev->ld_target_nr;
struct lovsub_device *lsd;
struct cl_device *cl;
int rc;
- ENTRY;
obd_getref(obd);
static void lov_io_sub_fini(const struct lu_env *env, struct lov_io *lio,
struct lov_io_sub *sub)
{
- ENTRY;
if (sub->sub_io != NULL) {
if (sub->sub_io_initialized) {
lov_sub_enter(sub);
LASSERT(sub->sub_io == NULL);
LASSERT(sub->sub_env == NULL);
LASSERT(sub->sub_stripe < lio->lis_stripe_count);
- ENTRY;
result = 0;
sub->sub_io_initialized = 0;
struct lov_io_sub *sub = &lio->lis_subs[stripe];
LASSERT(stripe < lio->lis_stripe_count);
- ENTRY;
if (!sub->sub_io_initialized) {
sub->sub_stripe = stripe;
{
struct lovsub_object *subobj;
- ENTRY;
subobj = lu2lovsub(
lu_object_locate(page->cp_child->cp_obj->co_lu.lo_header,
&lovsub_device_type));
LASSERT(cl2lov(slice->cpl_obj) == lio->lis_object);
LASSERT(lsm != NULL);
LASSERT(lio->lis_nr_subios > 0);
- ENTRY;
stripe = lov_page_stripe(page);
RETURN(lov_sub_get(env, lio, stripe));
int result;
LASSERT(lio->lis_object != NULL);
- ENTRY;
/*
* Need to be optimized, we can't afford to allocate a piece of memory
static void lov_io_slice_init(struct lov_io *lio,
struct lov_object *obj, struct cl_io *io)
{
- ENTRY;
-
io->ci_result = 0;
lio->lis_object = obj;
struct lov_object *lov = cl2lov(ios->cis_obj);
int i;
- ENTRY;
if (lio->lis_subs != NULL) {
for (i = 0; i < lio->lis_nr_subios; i++)
lov_io_sub_fini(env, lio, &lio->lis_subs[i]);
int stripe;
int rc = 0;
- ENTRY;
endpos = lov_offset_mod(lio->lis_endpos, -1);
for (stripe = 0; stripe < lio->lis_stripe_count; stripe++) {
if (!lov_stripe_intersects(lsm, stripe, lio->lis_pos,
unsigned long ssize = lsm->lsm_stripe_size;
LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
- ENTRY;
/* fast path for common case. */
if (lio->lis_nr_subios != 1 && !cl_io_is_append(io)) {
struct lov_io_sub *sub;
int rc = 0;
- ENTRY;
list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
lov_sub_enter(sub);
rc = iofunc(sub->sub_env, sub->sub_io);
static int lov_io_lock(const struct lu_env *env, const struct cl_io_slice *ios)
{
- ENTRY;
RETURN(lov_io_call(env, cl2lov_io(env, ios), cl_io_lock));
}
static int lov_io_start(const struct lu_env *env, const struct cl_io_slice *ios)
{
- ENTRY;
RETURN(lov_io_call(env, cl2lov_io(env, ios), cl_io_start));
}
static int lov_io_end_wrapper(const struct lu_env *env, struct cl_io *io)
{
- ENTRY;
/*
* It's possible that lov_io_start() wasn't called against this
* sub-io, either because previous sub-io failed, or upper layer
struct lov_io *lio = cl2lov_io(env, ios);
int rc;
- ENTRY;
rc = lov_io_call(env, lio, lov_io_iter_fini_wrapper);
LASSERT(rc == 0);
while (!list_empty(&lio->lis_active))
{
int rc;
- ENTRY;
rc = lov_io_call(env, cl2lov_io(env, ios), lov_io_unlock_wrapper);
LASSERT(rc == 0);
EXIT;
int rc = 0;
int alloc =
!(current->flags & PF_MEMALLOC);
- ENTRY;
+
if (lio->lis_active_subios == 1) {
int idx = lio->lis_single_subio_index;
struct lov_io_sub *sub;
struct lov_io_sub *sub;
int result;
- ENTRY;
sub = lov_page_subio(env, lio, slice);
if (!IS_ERR(sub)) {
result = cl_io_prepare_write(sub->sub_env, sub->sub_io,
struct lov_io_sub *sub;
int result;
- ENTRY;
sub = lov_page_subio(env, lio, slice);
if (!IS_ERR(sub)) {
result = cl_io_commit_write(sub->sub_env, sub->sub_io,
struct lov_io *lio;
struct lov_io_sub *sub;
- ENTRY;
fio = &ios->cis_io->u.ci_fault;
lio = cl2lov_io(env, ios);
sub = lov_sub_get(env, lio, lov_page_stripe(fio->ft_page));
struct lov_io *lio = cl2lov_io(env, ios);
struct lov_io_sub *sub;
unsigned int *written = &ios->cis_io->u.ci_fsync.fi_nr_written;
- ENTRY;
*written = 0;
list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
const struct cl_io_slice *ios)
{
struct lov_object *lov = cl2lov(ios->cis_obj);
- ENTRY;
if (atomic_dec_and_test(&lov->lo_active_ios))
wake_up_all(&lov->lo_waitq);
struct lov_io *lio = lov_env_io(env);
struct lov_object *lov = cl2lov(obj);
- ENTRY;
INIT_LIST_HEAD(&lio->lis_active);
lov_io_slice_init(lio, lov, io);
if (io->ci_result == 0) {
struct lov_object *lov = cl2lov(obj);
struct lov_io *lio = lov_env_io(env);
int result;
- ENTRY;
lio->lis_object = lov;
switch (io->ci_type) {
struct lov_object *lov = cl2lov(obj);
struct lov_io *lio = lov_env_io(env);
int result;
- ENTRY;
LASSERT(lov->lo_lsm != NULL);
lio->lis_object = lov;
LASSERT(cl_lock_is_mutexed(parent));
LASSERT(cl_lock_is_mutexed(sublock));
- ENTRY;
lsl = cl2sub_lock(sublock);
/*
struct lov_lock_link *link;
LASSERT(idx < lck->lls_nr);
- ENTRY;
OBD_SLAB_ALLOC_PTR_GFP(link, lov_lock_link_kmem, __GFP_IO);
if (link != NULL) {
struct cl_lock_closure *closure,
struct lov_sublock_env *subenv)
{
- ENTRY;
lov_sublock_env_put(subenv);
lsl->lss_active = NULL;
cl_lock_disclosure(env, closure);
struct lovsub_lock *sublock;
struct cl_lock *child;
int result = 0;
- ENTRY;
LASSERT(list_empty(&closure->clc_list));
int result_rank;
int rc_rank;
- ENTRY;
-
LASSERTF(result <= 0 || result == CLO_REPEAT || result == CLO_WAIT,
"result = %d", result);
LASSERTF(rc <= 0 || rc == CLO_REPEAT || rc == CLO_WAIT,
struct lov_layout_raid0 *r0 = lov_r0(loo);
struct cl_lock *parent = lck->lls_cl.cls_lock;
- ENTRY;
-
lck->lls_orig = parent->cll_descr;
file_start = cl_offset(lov2cl(loo), parent->cll_descr.cld_start);
file_end = cl_offset(lov2cl(loo), parent->cll_descr.cld_end + 1) - 1;
struct cl_lock *parent = lck->lls_cl.cls_lock;
LASSERT(cl_lock_is_mutexed(parent));
- ENTRY;
if (lck->lls_sub[i].sub_flags & LSF_HELD) {
struct cl_lock *sublock;
struct cl_lock *parent = lck->lls_cl.cls_lock;
LASSERT(cl_lock_is_mutexed(parent));
- ENTRY;
if (!(lck->lls_sub[i].sub_flags & LSF_HELD)) {
struct cl_lock *sublock;
struct lov_lock *lck;
int i;
- ENTRY;
lck = cl2lov_lock(slice);
LASSERT(lck->lls_nr_filled == 0);
if (lck->lls_sub != NULL) {
{
struct cl_lock *lock = lck->lls_cl.cls_lock;
int result;
- ENTRY;
LASSERT(cl_lock_is_mutexed(lock));
struct cl_io *io, __u32 enqflags, int last)
{
int result;
- ENTRY;
/* first, try to enqueue a sub-lock ... */
result = cl_enqueue_try(env, sublock, io, enqflags);
int result;
enum cl_lock_state minstate;
- ENTRY;
-
for (result = 0, minstate = CLS_FREEING, i = 0; i < lck->lls_nr; ++i) {
int rc;
struct lovsub_lock *sub;
int i;
int result;
- ENTRY;
-
for (result = 0, i = 0; i < lck->lls_nr; ++i) {
int rc;
struct lovsub_lock *sub;
int i;
int result;
- ENTRY;
-
for (result = 0, i = 0; i < lck->lls_nr; ++i) {
int rc;
struct lovsub_lock *sub;
int result;
int i;
- ENTRY;
-
again:
for (result = 0, minstate = CLS_FREEING, i = 0, reenqueued = 0;
i < lck->lls_nr; ++i) {
int i;
LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
- ENTRY;
for (result = 0, i = 0; i < lck->lls_nr; ++i) {
int rc;
LASSERT(cl_object_same(need->cld_obj, slice->cls_obj));
LASSERT(lov->lls_nr > 0);
- ENTRY;
-
/* for top lock, it's necessary to match enq flags otherwise it will
* run into problem if a sublock is missing and reenqueue. */
if (need->cld_enq_flags != lov->lls_orig.cld_enq_flags)
LASSERT(cl_lock_is_mutexed(parent));
LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
- ENTRY;
list_del_init(&link->lll_list);
LASSERT(lck->lls_sub[link->lll_idx].sub_lock == sub);
struct lov_lock_link *scan;
LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
- ENTRY;
list_for_each_entry(scan, &sub->lss_parents, lll_list) {
if (scan->lll_super == lck)
int i;
LASSERT(slice->cls_lock->cll_state == CLS_FREEING);
- ENTRY;
for (i = 0; i < lck->lls_nr; ++i) {
struct lov_lock_sub *lls = &lck->lls_sub[i];
struct lov_lock *lck;
int result;
- ENTRY;
OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, __GFP_IO);
if (lck != NULL) {
cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
struct lov_lock *lck;
int result = -ENOMEM;
- ENTRY;
OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, __GFP_IO);
if (lck != NULL) {
cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_empty_lock_ops);
struct obd_device *obd = ctxt->loc_obd;
struct lov_obd *lov = &obd->u.lov;
int i, rc = 0, cookies = 0;
- ENTRY;
LASSERTF(logcookies && numcookies >= lsm->lsm_stripe_count,
"logcookies %p, numcookies %d lsm->lsm_stripe_count %d \n",
struct obd_device *obd = ctxt->loc_obd;
struct lov_obd *lov = &obd->u.lov;
int i, rc = 0, err = 0;
- ENTRY;
obd_getref(obd);
for (i = 0; i < lov->desc.ld_tgt_count; i++) {
struct lov_obd *lov;
struct obd_device *obd = ctxt->loc_obd;
int rc = 0, i;
- ENTRY;
LASSERT(lsm != NULL);
LASSERT(count == lsm->lsm_stripe_count);
struct lov_obd *lov = &obd->u.lov;
struct obd_device *child;
int i, rc = 0;
- ENTRY;
LASSERT(olg == &obd->obd_olg);
rc = llog_setup(NULL, obd, olg, LLOG_MDS_OST_ORIG_CTXT, disk_obd,
{
struct llog_ctxt *ctxt;
- ENTRY;
-
/* cleanup our llogs only if the ctxts have been setup
* (client lov doesn't setup, mds lov does). */
ctxt = llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
int rc;
__u64 kms;
- ENTRY;
lov_stripe_lock(lsm);
rc = lov_merge_lvb_kms(lsm, lvb, &kms);
lov_stripe_unlock(lsm);
struct lov_oinfo *loi;
int stripe = 0;
__u64 kms;
- ENTRY;
LASSERT(spin_is_locked(&lsm->lsm_lock));
LASSERT(lsm->lsm_lock_owner == current_pid());
struct obd_import *imp;
proc_dir_entry_t *lov_proc_dir;
int rc;
- ENTRY;
if (!lov->lov_tgts[index])
RETURN(-EINVAL);
struct lov_tgt_desc *tgt;
struct lustre_handle conn;
int i, rc;
- ENTRY;
CDEBUG(D_CONFIG, "connect #%d\n", lov->lov_connects);
struct lov_obd *lov = &obd->u.lov;
struct obd_device *osc_obd;
int rc;
- ENTRY;
osc_obd = class_exp2obd(tgt->ltd_exp);
CDEBUG(D_CONFIG, "%s: disconnecting target %s\n",
struct obd_device *obd = class_exp2obd(exp);
struct lov_obd *lov = &obd->u.lov;
int i, rc;
- ENTRY;
if (!lov->lov_tgts)
goto out;
struct lov_obd *lov = &obd->u.lov;
struct lov_tgt_desc *tgt;
int index, activate, active;
- ENTRY;
CDEBUG(D_INFO, "Searching in lov %p for uuid %s event(%d)\n",
lov, uuid->uuid, ev);
{
int rc = 0;
struct lov_obd *lov = &obd->u.lov;
- ENTRY;
down_read(&lov->lov_notify_lock);
if (!lov->lov_connects) {
struct lov_tgt_desc *tgt;
struct obd_device *tgt_obd;
int rc;
- ENTRY;
CDEBUG(D_CONFIG, "uuid:%s idx:%d gen:%d active:%d\n",
uuidp->uuid, index, gen, active);
struct lov_obd *lov = &obd->u.lov;
int count = lov->desc.ld_tgt_count;
int rc = 0;
- ENTRY;
if (index >= count) {
CERROR("LOV target index %d >= number of LOV OBDs %d.\n",
struct lov_desc *desc;
struct lov_obd *lov = &obd->u.lov;
int rc;
- ENTRY;
if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1) {
CERROR("LOV setup requires a descriptor\n");
int rc = 0;
struct lov_obd *lov = &obd->u.lov;
- ENTRY;
-
switch (stage) {
case OBD_CLEANUP_EARLY: {
int i;
struct lov_obd *lov = &obd->u.lov;
struct list_head *pos, *tmp;
struct pool_desc *pool;
- ENTRY;
list_for_each_safe(pos, tmp, &lov->lov_pool_list) {
pool = list_entry(pos, struct pool_desc, pool_list);
struct obd_uuid obd_uuid;
int cmd;
int rc = 0;
- ENTRY;
switch(cmd = lcfg->lcfg_command) {
case LCFG_LOV_ADD_OBD:
struct lov_obd *lov = &exp->exp_obd->u.lov;
unsigned ost_idx;
int rc, i;
- ENTRY;
LASSERT(src_oa->o_valid & OBD_MD_FLFLAGS &&
src_oa->o_flags & OBD_FL_RECREATE_OBJS);
{
struct lov_obd *lov;
int rc = 0;
- ENTRY;
LASSERT(ea != NULL);
if (exp == NULL)
struct list_head *pos;
struct lov_obd *lov;
int rc = 0, err = 0;
- ENTRY;
ASSERT_LSM_MAGIC(lsm);
struct list_head *pos;
struct lov_obd *lov;
int err = 0, rc = 0;
- ENTRY;
LASSERT(oinfo);
ASSERT_LSM_MAGIC(oinfo->oi_md);
{
struct lov_request_set *lovset = (struct lov_request_set *)data;
int err;
- ENTRY;
/* don't do attribute merge if this aysnc op failed */
if (rc)
struct list_head *pos;
struct lov_request *req;
int rc = 0, err;
- ENTRY;
LASSERT(oinfo);
ASSERT_LSM_MAGIC(oinfo->oi_md);
struct list_head *pos;
struct lov_request *req;
int err = 0, rc = 0;
- ENTRY;
LASSERT(oinfo);
ASSERT_LSM_MAGIC(oinfo->oi_md);
{
struct lov_request_set *lovset = (struct lov_request_set *)data;
int err;
- ENTRY;
if (rc)
atomic_set(&lovset->set_completes, 0);
struct list_head *pos;
struct lov_obd *lov;
int rc = 0;
- ENTRY;
LASSERT(oinfo);
ASSERT_LSM_MAGIC(oinfo->oi_md);
{
struct lov_request_set *lovset = (struct lov_request_set *)data;
int err;
- ENTRY;
if (rc)
atomic_set(&lovset->set_completes, 0);
struct list_head *pos;
struct lov_request *req;
int rc = 0;
- ENTRY;
LASSERT(oinfo);
ASSERT_LSM_MAGIC(oinfo->oi_md);
{
struct lov_request_set *lovset = data;
int err;
- ENTRY;
if (rc)
atomic_set(&lovset->set_completes, 0);
struct list_head *pos;
struct lov_request *req;
int rc = 0;
- ENTRY;
ASSERT_LSM_MAGIC(oinfo->oi_md);
LASSERT(rqset != NULL);
struct list_head *pos;
struct lov_obd *lov = &exp->exp_obd->u.lov;
int err, rc = 0;
- ENTRY;
ASSERT_LSM_MAGIC(oinfo->oi_md);
void *data, int rc)
{
struct lov_request_set *lovset = (struct lov_request_set *)data;
- ENTRY;
+
rc = lov_fini_enqueue_set(lovset, lovset->set_ei->ei_mode, rc, rqset);
RETURN(rc);
}
struct list_head *pos;
struct lov_obd *lov;
ldlm_error_t rc;
- ENTRY;
LASSERT(oinfo);
ASSERT_LSM_MAGIC(oinfo->oi_md);
{
struct lov_obd *lov;
int rc = 0, i;
- ENTRY;
ASSERT_LSM_MAGIC(lsm);
{
struct lov_obd *lov;
int rc = 0, i;
- ENTRY;
ASSERT_LSM_MAGIC(lsm);
struct lov_obd *lov;
struct lustre_handle *lov_lockhp;
int err = 0, rc = 0;
- ENTRY;
ASSERT_LSM_MAGIC(lsm);
{
struct lov_obd *lov;
int rc = 0, i;
- ENTRY;
if (!exp || !exp->exp_obd)
RETURN(-ENODEV);
{
struct lov_request_set *lovset = (struct lov_request_set *)data;
int err;
- ENTRY;
if (rc)
atomic_set(&lovset->set_completes, 0);
struct list_head *pos;
struct lov_obd *lov;
int rc = 0;
- ENTRY;
LASSERT(oinfo != NULL);
LASSERT(oinfo->oi_osfs != NULL);
struct ptlrpc_request_set *set = NULL;
struct obd_info oinfo = { { { 0 } } };
int rc = 0;
- ENTRY;
-
/* for obdclass we forbid using obd_statfs_rqset, but prefer using async
* statfs requests */
struct lov_obd *lov = &obddev->u.lov;
int i = 0, rc = 0, count = lov->desc.ld_tgt_count;
struct obd_uuid *uuidp;
- ENTRY;
switch (cmd) {
case IOC_OBD_STATFS: {
struct obd_device *obddev = class_exp2obd(exp);
struct lov_obd *lov = &obddev->u.lov;
int i, rc;
- ENTRY;
if (!vallen || !val)
RETURN(-EFAULT);
unsigned incr, check_uuid,
do_inactive, no_set;
unsigned next_id = 0, mds_con = 0, capa = 0;
- ENTRY;
incr = check_uuid = do_inactive = no_set = 0;
if (set == NULL) {
__u64 curspace = 0;
__u64 bhardlimit = 0;
int i, rc = 0;
- ENTRY;
if (oqctl->qc_cmd != LUSTRE_Q_QUOTAON &&
oqctl->qc_cmd != LUSTRE_Q_QUOTAOFF &&
{
struct lov_obd *lov = &obd->u.lov;
int i, rc = 0;
- ENTRY;
obd_getref(obd);
{
struct lprocfs_static_vars lvars = { 0 };
int rc;
- ENTRY;
/* print an address of _any_ initialized kernel symbol from this
* module, to allow debugging with gdb that doesn't support data
{
struct lu_object *o;
- ENTRY;
o = lu_object_find_at(env, cl2lu_dev(dev), fid, &conf->coc_lu);
LASSERT(ergo(!IS_ERR(o), o->lo_dev->ld_type == &lovsub_device_type));
RETURN(lu2cl(o));
struct lu_fid *ofid = <i->lti_fid;
struct lov_layout_raid0 *r0 = &state->raid0;
- ENTRY;
-
if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3) {
dump_lsm(D_ERROR, lsm);
LASSERTF(0, "magic mismatch, expected %d/%d, actual %d.\n",
struct lov_stripe_md *lsm = lov->lo_lsm;
int i;
- ENTRY;
-
dump_lsm(D_INODE, lsm);
lov_layout_wait(env, lov);
union lov_layout_state *state)
{
struct lov_layout_raid0 *r0 = &state->raid0;
- ENTRY;
if (r0->lo_sub != NULL) {
OBD_FREE_LARGE(r0->lo_sub, r0->lo_nr * sizeof r0->lo_sub[0]);
static void lov_fini_released(const struct lu_env *env, struct lov_object *lov,
union lov_layout_state *state)
{
- ENTRY;
dump_lsm(D_INODE, lov->lo_lsm);
lov_free_memmd(&lov->lo_lsm);
EXIT;
struct cl_attr *lov_attr = &r0->lo_attr;
int result = 0;
- ENTRY;
-
/* this is called w/o holding type guard mutex, so it must be inside
* an on going IO otherwise lsm may be replaced.
* LU-2117: it turns out there exists one exception. For mmaped files,
static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov)
{
struct l_wait_info lwi = { 0 };
- ENTRY;
while (atomic_read(&lov->lo_active_ios) > 0) {
CDEBUG(D_INODE, "file:"DFID" wait for active IO, now: %d.\n",
void *cookie;
struct lu_env *env;
int refcheck;
- ENTRY;
LASSERT(0 <= lov->lo_type && lov->lo_type < ARRAY_SIZE(lov_dispatch));
const struct lov_layout_operations *ops;
int result;
- ENTRY;
init_rwsem(&lov->lo_type_guard);
atomic_set(&lov->lo_active_ios, 0);
init_waitqueue_head(&lov->lo_waitq);
struct lov_stripe_md *lsm = NULL;
struct lov_object *lov = cl2lov(obj);
int result = 0;
- ENTRY;
lov_conf_lock(lov);
if (conf->coc_opc == OBJECT_CONF_INVALIDATE) {
{
struct lov_object *lov = lu2lov(obj);
- ENTRY;
LOV_2DISPATCH_VOID(lov, llo_delete, env, lov, &lov->u);
EXIT;
}
{
struct lov_object *lov = lu2lov(obj);
- ENTRY;
LOV_2DISPATCH_VOID(lov, llo_fini, env, lov, &lov->u);
lu_object_fini(obj);
OBD_SLAB_FREE_PTR(lov, lov_object_kmem);
struct lov_object *lov;
struct lu_object *obj;
- ENTRY;
OBD_SLAB_ALLOC_PTR_GFP(lov, lov_object_kmem, __GFP_IO);
if (lov != NULL) {
obj = lov2lu(lov);
{
struct lu_object *luobj;
int rc = 0;
- ENTRY;
luobj = lu_object_locate(&cl_object_header(clob)->coh_lu,
&lov_device_type);
obd_off swidth;
obd_size lov_size;
int magic = lsm->lsm_magic;
- ENTRY;
if (ost_size == 0)
RETURN(0);
int lmm_size, lmm_magic;
int i;
int cplen = 0;
- ENTRY;
if (lsm) {
lmm_magic = lsm->lsm_magic;
int pattern, int magic)
{
int i, lsm_size;
- ENTRY;
CDEBUG(D_INFO, "alloc lsm, stripe_count %d\n", stripe_count);
__u16 stripe_count;
__u32 magic;
__u32 pattern;
- ENTRY;
/* If passed an MDS struct use values from there, otherwise defaults */
if (lmm) {
__u16 stripe_count;
int rc;
int cplen = 0;
- ENTRY;
rc = lov_lum_swab_if_needed(lumv3, &lmm_magic, lump);
if (rc)
obd_id last_id = 0;
struct lov_user_ost_data_v1 *lmm_objects;
- ENTRY;
-
if (lump->lmm_magic == LOV_USER_MAGIC_V3)
lmm_objects = ((struct lov_user_md_v3 *)lump)->lmm_objects;
else
int rc, lmm_size;
int lum_size;
mm_segment_t seg;
- ENTRY;
if (!lsm)
RETURN(-ENODATA);
struct cl_page *sub = lov_sub_page(slice);
LINVRNT(lov_page_invariant(slice));
- ENTRY;
if (sub != NULL) {
LASSERT(sub->cp_state == CPS_FREEING);
LINVRNT(lov_page_invariant(slice));
LINVRNT(!cl2lov_page(slice)->lps_invalid);
- ENTRY;
sub = lov_page_subio(env, lio, slice);
if (!IS_ERR(sub)) {
LINVRNT(lov_page_invariant(slice));
LINVRNT(!cl2lov_page(slice)->lps_invalid);
- ENTRY;
sub = lov_page_subio(env, lio, slice);
if (!IS_ERR(sub)) {
obd_off suboff;
int stripe;
int rc;
- ENTRY;
offset = cl_offset(obj, page->cp_index);
stripe = lov_stripe_number(loo->lo_lsm, offset);
{
struct lov_page *lpg = cl_object_page_slice(obj, page);
void *addr;
- ENTRY;
cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_empty_page_ops);
addr = kmap(vmpage);
#define LOV_POOL_INIT_COUNT 2
int lov_ost_pool_init(struct ost_pool *op, unsigned int count)
{
- ENTRY;
-
if (count == 0)
count = LOV_POOL_INIT_COUNT;
op->op_array = NULL;
int lov_ost_pool_add(struct ost_pool *op, __u32 idx, unsigned int min_count)
{
int rc = 0, i;
- ENTRY;
down_write(&op->op_rw_sem);
int lov_ost_pool_remove(struct ost_pool *op, __u32 idx)
{
int i;
- ENTRY;
down_write(&op->op_rw_sem);
int lov_ost_pool_free(struct ost_pool *op)
{
- ENTRY;
-
if (op->op_size == 0)
RETURN(0);
struct lov_obd *lov;
struct pool_desc *new_pool;
int rc;
- ENTRY;
lov = &(obd->u.lov);
{
struct lov_obd *lov;
struct pool_desc *pool;
- ENTRY;
lov = &(obd->u.lov);
struct pool_desc *pool;
unsigned int lov_idx;
int rc;
- ENTRY;
lov = &(obd->u.lov);
struct pool_desc *pool;
unsigned int lov_idx;
int rc = 0;
- ENTRY;
lov = &(obd->u.lov);
int lov_check_index_in_pool(__u32 idx, struct pool_desc *pool)
{
int i, rc;
- ENTRY;
/* caller may no have a ref on pool if it got the pool
* without calling lov_find_pool() (e.g. go through the lov pool
void lov_finish_set(struct lov_request_set *set)
{
struct list_head *pos, *n;
- ENTRY;
LASSERT(set);
list_for_each_safe(pos, n, &set->set_list) {
struct lov_request *req, int rc)
{
struct lov_obd *lov = &set->set_exp->exp_obd->u.lov;
- ENTRY;
lov_update_set(set, req, rc);
struct lustre_handle *lov_lockhp;
struct obd_info *oi = set->set_oi;
struct lov_oinfo *loi;
- ENTRY;
LASSERT(oi != NULL);
struct lov_obd *lov = &set->set_exp->exp_obd->u.lov;
int completes = atomic_read(&set->set_completes);
int rc = 0;
- ENTRY;
/* enqueue/match success, just return */
if (completes && completes == atomic_read(&set->set_success))
struct ptlrpc_request_set *rqset)
{
int ret = 0;
- ENTRY;
if (set == NULL)
RETURN(0);
struct lov_obd *lov = &exp->exp_obd->u.lov;
struct lov_request_set *set;
int i, rc = 0;
- ENTRY;
OBD_ALLOC(set, sizeof(*set));
if (set == NULL)
int lov_fini_match_set(struct lov_request_set *set, __u32 mode, int flags)
{
int rc = 0;
- ENTRY;
if (set == NULL)
RETURN(0);
struct lov_obd *lov = &exp->exp_obd->u.lov;
struct lov_request_set *set;
int i, rc = 0;
- ENTRY;
OBD_ALLOC(set, sizeof(*set));
if (set == NULL)
int lov_fini_cancel_set(struct lov_request_set *set)
{
int rc = 0;
- ENTRY;
if (set == NULL)
RETURN(0);
{
struct lov_request_set *set;
int i, rc = 0;
- ENTRY;
OBD_ALLOC(set, sizeof(*set));
if (set == NULL)
struct lov_request *req;
struct obdo *tmp_oa;
int rc = 0, attrset = 0;
- ENTRY;
LASSERT(set->set_oi != NULL);
struct lov_oinfo *loi = NULL;
struct list_head *pos;
struct lov_request *req;
- ENTRY;
list_for_each (pos, &set->set_list) {
req = list_entry(pos, struct lov_request, rq_link);
int lov_fini_brw_set(struct lov_request_set *set)
{
int rc = 0;
- ENTRY;
if (set == NULL)
RETURN(0);
struct lov_request_set *set;
struct lov_obd *lov = &exp->exp_obd->u.lov;
int rc = 0, i, shift;
- ENTRY;
OBD_ALLOC(set, sizeof(*set));
if (set == NULL)
int lov_fini_getattr_set(struct lov_request_set *set)
{
int rc = 0;
- ENTRY;
if (set == NULL)
RETURN(0);
struct lov_request_set *set;
struct lov_obd *lov = &exp->exp_obd->u.lov;
int rc = 0, i;
- ENTRY;
OBD_ALLOC(set, sizeof(*set));
if (set == NULL)
int lov_fini_destroy_set(struct lov_request_set *set)
{
- ENTRY;
-
if (set == NULL)
RETURN(0);
LASSERT(set->set_exp);
struct lov_request_set *set;
struct lov_obd *lov = &exp->exp_obd->u.lov;
int rc = 0, i;
- ENTRY;
OBD_ALLOC(set, sizeof(*set));
if (set == NULL)
int lov_fini_setattr_set(struct lov_request_set *set)
{
int rc = 0;
- ENTRY;
if (set == NULL)
RETURN(0);
{
struct lov_obd *lov = &req->rq_rqset->set_exp->exp_obd->u.lov;
struct lov_stripe_md *lsm = req->rq_rqset->set_oi->oi_md;
- ENTRY;
lov_update_set(set, req, rc);
struct lov_request_set *set;
struct lov_obd *lov = &exp->exp_obd->u.lov;
int rc = 0, i;
- ENTRY;
OBD_ALLOC(set, sizeof(*set));
if (set == NULL)
int lov_fini_punch_set(struct lov_request_set *set)
{
int rc = 0;
- ENTRY;
if (set == NULL)
RETURN(0);
{
struct lov_obd *lov = &req->rq_rqset->set_exp->exp_obd->u.lov;
struct lov_stripe_md *lsm = req->rq_rqset->set_oi->oi_md;
- ENTRY;
lov_update_set(set, req, rc);
struct lov_request_set *set;
struct lov_obd *lov = &exp->exp_obd->u.lov;
int rc = 0, i;
- ENTRY;
OBD_ALLOC(set, sizeof(*set));
if (set == NULL)
int lov_fini_sync_set(struct lov_request_set *set)
{
int rc = 0;
- ENTRY;
if (set == NULL)
RETURN(0);
struct lov_request_set *set;
struct lov_obd *lov = &exp->exp_obd->u.lov;
int rc = 0, i;
- ENTRY;
OBD_ALLOC_PTR(set);
if (set == NULL)
int lov_fini_statfs(struct obd_device *obd, struct obd_statfs *osfs,int success)
{
- ENTRY;
-
if (success) {
__u32 expected_stripes = lov_get_stripecnt(&obd->u.lov,
LOV_MAGIC, 0);
int lov_fini_statfs_set(struct lov_request_set *set)
{
int rc = 0;
- ENTRY;
if (set == NULL)
RETURN(0);
struct lov_tgt_desc *tgt;
struct obd_device *lovobd, *tgtobd;
int success;
- ENTRY;
lovreq = container_of(oinfo, struct lov_request, rq_oi);
set = lovreq->rq_rqset;
struct lov_request_set *set;
struct lov_obd *lov = &obd->u.lov;
int rc = 0, i;
- ENTRY;
OBD_ALLOC(set, sizeof(*set));
if (set == NULL)
{
struct lovsub_req *lsr;
- ENTRY;
lsr = cl2lovsub_req(slice);
OBD_SLAB_FREE_PTR(lsr, lovsub_req_kmem);
EXIT;
{
struct lovsub_object *subobj;
- ENTRY;
subobj = cl2lovsub(obj);
/*
* There is no OBD_MD_* flag for obdo::o_stripe_idx, so set it
struct lu_device_type *ldt;
int rc;
- ENTRY;
next->ld_site = d->ld_site;
ldt = next->ld_type;
LASSERT(ldt != NULL);
struct lu_device *next;
struct lovsub_device *lsd;
- ENTRY;
lsd = lu2lovsub_dev(d);
next = cl2lu_dev(lsd->acid_next);
lsd->acid_super = NULL;
{
struct lovsub_lock *lsl;
- ENTRY;
lsl = cl2lovsub_lock(slice);
LASSERT(list_empty(&lsl->lss_parents));
OBD_SLAB_FREE_PTR(lsl, lovsub_lock_kmem);
{
struct cl_lock *parent;
- ENTRY;
parent = lov->lls_cl.cls_lock;
cl_lock_get(parent);
lu_ref_add(&parent->cll_reference, "lovsub-parent", current);
{
struct cl_lock *parent;
- ENTRY;
parent = lov->lls_cl.cls_lock;
cl_lock_mutex_put(env, lov->lls_cl.cls_lock);
lu_ref_del(&parent->cll_reference, "lovsub-parent", current);
struct lov_lock_link *scan;
LASSERT(cl_lock_is_mutexed(slice->cls_lock));
- ENTRY;
list_for_each_entry(scan, &sub->lss_parents, lll_list) {
struct lov_lock *lov = scan->lll_super;
struct lov_lock *lov;
unsigned long dumbbell;
- ENTRY;
-
LASSERT(cl_lock_is_mutexed(slice->cls_lock));
if (!list_empty(&lock->lss_parents)) {
pgoff_t start;
pgoff_t end;
- ENTRY;
start = in->cld_start;
end = in->cld_end;
struct lov_lock *lov;
int result = 0;
- ENTRY;
-
LASSERT(cl_lock_mode_match(d->cld_mode,
s->cls_lock->cll_descr.cld_mode));
list_for_each_entry(scan, &lock->lss_parents, lll_list) {
int result;
LASSERT(cl_lock_is_mutexed(slice->cls_lock));
- ENTRY;
sub = cl2lovsub_lock(slice);
result = 0;
{
struct cl_lock *parent;
int result;
- ENTRY;
parent = lov->lls_cl.cls_lock;
if (parent->cll_error)
LASSERT(cl_lock_is_mutexed(child));
- ENTRY;
/*
* Destruction of a sub-lock might take multiple iterations, because
* when the last sub-lock of a given top-lock is deleted, top-lock is
struct lovsub_lock *lsk;
int result;
- ENTRY;
OBD_SLAB_ALLOC_PTR_GFP(lsk, lovsub_lock_kmem, __GFP_IO);
if (lsk != NULL) {
INIT_LIST_HEAD(&lsk->lss_parents);
int result;
- ENTRY;
under = &dev->acid_next->cd_lu_dev;
below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under);
if (below != NULL) {
{
struct lovsub_object *los = lu2lovsub(obj);
struct lov_object *lov = los->lso_super;
- ENTRY;
/* We can't assume lov was assigned here, because of the shadow
* object handling in lu_object_find.
{
struct lov_object *lov = cl2lovsub(obj)->lso_super;
- ENTRY;
lov_r0(lov)->lo_attr_valid = 0;
RETURN(0);
}
{
struct lovsub_object *los = cl2lovsub(obj);
- ENTRY;
RETURN(cl_object_glimpse(env, &los->lso_super->lo_cl, lvb));
}
struct lovsub_object *los;
struct lu_object *obj;
- ENTRY;
OBD_SLAB_ALLOC_PTR_GFP(los, lovsub_object_kmem, __GFP_IO);
if (los != NULL) {
struct cl_object_header *hdr;
struct cl_page *page, struct page *unused)
{
struct lovsub_page *lsb = cl_object_page_slice(obj, page);
- ENTRY;
cl_page_slice_add(page, &lsb->lsb_cl, obj, &lovsub_page_ops);
RETURN(0);
{
struct dentry *dchild_old, *dchild_new;
int err = 0;
- ENTRY;
ASSERT_KERNEL_CTXT("kernel doing rename outside kernel context\n");
CDEBUG(D_INODE, "renaming file %.*s to %.*s\n",
struct hsm_action_item *hai;
int len;
int fd, rc;
- ENTRY;
rc = lprocfs_write_helper(buffer, count, &fd);
if (rc)
static int mdc_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw)
{
int rc;
- ENTRY;
+
client_obd_list_lock(&cli->cl_loi_list_lock);
rc = list_empty(&mcw->mcw_entry);
client_obd_list_unlock(&cli->cl_loi_list_lock);
{
struct ldlm_lock *lock;
struct inode *new_inode = data;
- ENTRY;
if(bits)
*bits = 0;
{
struct ldlm_res_id res_id;
ldlm_mode_t rc;
- ENTRY;
fid_build_reg_res_name(fid, &res_id);
rc = ldlm_lock_match(class_exp2obd(exp)->obd_namespace, flags,
struct obd_device *obd = class_exp2obd(exp);
int rc;
- ENTRY;
-
fid_build_reg_res_name(fid, &res_id);
rc = ldlm_cli_cancel_unused_resource(obd->obd_namespace, &res_id,
policy, mode, flags, opaque);
struct ldlm_res_id res_id;
struct ldlm_resource *res;
struct ldlm_namespace *ns = class_exp2obd(exp)->obd_namespace;
- ENTRY;
LASSERTF(ns != NULL, "no namespace passed\n");
{
struct ldlm_res_id res_id;
int rc = 0;
- ENTRY;
fid_build_reg_res_name((struct lu_fid*)fid, &res_id);
rc = ldlm_resource_iterate(class_exp2obd(exp)->obd_namespace, &res_id,
int count = 0;
int mode;
int rc;
- ENTRY;
it->it_create_mode = (it->it_create_mode & ~S_IFMT) | S_IFREG;
struct obd_device *obddev = class_exp2obd(exp);
struct ldlm_intent *lit;
int rc;
- ENTRY;
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_LDLM_INTENT_UNLINK);
OBD_MD_FLRMTPERM : OBD_MD_FLACL);
struct ldlm_intent *lit;
int rc;
- ENTRY;
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_LDLM_INTENT_GETATTR);
struct ldlm_intent *lit;
struct layout_intent *layout;
int rc;
- ENTRY;
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_LDLM_INTENT_LAYOUT);
{
struct ptlrpc_request *req;
int rc;
- ENTRY;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE);
if (req == NULL)
struct ldlm_lock *lock;
void *lvb_data = NULL;
int lvb_len = 0;
- ENTRY;
LASSERT(rc >= 0);
/* Similarly, if we're going to replay this request, we don't want to
int generation, resends = 0;
struct ldlm_reply *lockrep;
enum lvb_type lvb_type = 0;
- ENTRY;
LASSERTF(!it || einfo->ei_type == LDLM_IBITS, "lock type %d\n",
einfo->ei_type);
struct mdt_body *mdt_body;
struct ldlm_lock *lock;
int rc;
- ENTRY;
LASSERT(request != NULL);
LASSERT(request != LP_POISON);
struct lustre_handle lockh;
ldlm_policy_data_t policy;
ldlm_mode_t mode;
- ENTRY;
if (it->d.lustre.it_lock_handle) {
lockh.cookie = it->d.lustre.it_lock_handle;
{
struct lustre_handle lockh;
int rc = 0;
- ENTRY;
+
LASSERT(it);
CDEBUG(D_DLMTRACE, "(name: %.*s,"DFID") in obj "DFID
struct obd_device *obddev;
struct ldlm_reply *lockrep;
__u64 flags = LDLM_FL_HAS_INTENT;
- ENTRY;
it = &minfo->mi_it;
lockh = &minfo->mi_lockh;
};
int rc = 0;
__u64 flags = LDLM_FL_HAS_INTENT;
- ENTRY;
CDEBUG(D_DLMTRACE,"name: %.*s in inode "DFID", intent: %s flags %#o\n",
op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1),
struct ldlm_res_id res_id;
struct ldlm_resource *res;
int count;
- ENTRY;
/* Return, i.e. cancel nothing, only if ELC is supported (flag in
* export) but disabled through procfs (flag in NS).
struct obd_device *obd = exp->exp_obd;
int count = 0, rc;
__u64 bits;
- ENTRY;
LASSERT(op_data != NULL);
struct obd_import *import = exp->exp_obd->u.cli.cl_import;
int generation = import->imp_generation;
LIST_HEAD(cancels);
- ENTRY;
/* For case if upper layer did not alloc fid, do it now. */
if (!fid_is_sane(&op_data->op_fid2)) {
struct obd_device *obd = class_exp2obd(exp);
struct ptlrpc_request *req = *request;
int count = 0, rc;
- ENTRY;
LASSERT(req == NULL);
struct obd_device *obd = exp->exp_obd;
struct ptlrpc_request *req;
int count = 0, rc;
- ENTRY;
if ((op_data->op_flags & MF_MDC_CANCEL_FID2) &&
(fid_is_sane(&op_data->op_fid2)))
struct obd_device *obd = exp->exp_obd;
struct ptlrpc_request *req;
int count = 0, rc;
- ENTRY;
if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
(fid_is_sane(&op_data->op_fid1)))
{
struct lustre_capa *capa;
struct obd_capa *c;
- ENTRY;
/* swabbed already in mdc_enqueue */
capa = req_capsule_server_get(&req->rq_pill, field);
struct ptlrpc_request *req;
struct mdt_body *body;
int rc;
- ENTRY;
req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_GETSTATUS,
LUSTRE_MDS_VERSION, MDS_GETSTATUS);
struct mdt_body *body;
void *eadata;
int rc;
- ENTRY;
/* Request message already built. */
rc = ptlrpc_queue_wait(req);
{
struct ptlrpc_request *req;
int rc;
- ENTRY;
/* Single MDS without an LMV case */
if (op_data->op_flags & MF_GET_MDT_IDX) {
{
struct ptlrpc_request *req;
int rc;
- ENTRY;
*request = NULL;
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
struct ptlrpc_request *req;
int rc;
- ENTRY;
-
*request = NULL;
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
&RQF_MDS_IS_SUBDIR, LUSTRE_MDS_VERSION,
int xattr_namelen = 0;
char *tmp;
int rc;
- ENTRY;
*request = NULL;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), fmt);
struct posix_acl *acl;
void *buf;
int rc;
- ENTRY;
if (!body->aclsize)
RETURN(0);
{
struct req_capsule *pill = &req->rq_pill;
int rc;
- ENTRY;
LASSERT(md);
memset(md, 0, sizeof(*md));
int mdc_free_lustre_md(struct obd_export *exp, struct lustre_md *md)
{
- ENTRY;
RETURN(0);
}
struct obd_client_handle *och;
struct lustre_handle old;
struct mdt_body *body;
- ENTRY;
if (mod == NULL) {
DEBUG_REQ(D_ERROR, req,
struct mdt_rec_create *rec;
struct mdt_body *body;
struct obd_import *imp = open_req->rq_import;
- ENTRY;
if (!open_req->rq_replay)
RETURN(0);
struct obd_client_handle *och)
{
struct md_open_data *mod = och->och_mod;
- ENTRY;
/**
* It is possible to not have \var mod in a case of eviction between
struct obd_device *obd = class_exp2obd(exp);
struct ptlrpc_request *req;
int rc;
- ENTRY;
*request = NULL;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_CLOSE);
struct obd_device *obd = class_exp2obd(exp);
struct ptlrpc_request *req;
int rc;
- ENTRY;
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_MDS_DONE_WRITING);
int resends = 0;
struct l_wait_info lwi;
int rc;
- ENTRY;
*request = NULL;
init_waitqueue_head(&waitq);
struct obd_statfs *msfs;
struct obd_import *imp = NULL;
int rc;
- ENTRY;
/*
* Since the request might also come from lprocfs, so we need
struct hsm_progress_kernel *req_hpk;
struct ptlrpc_request *req;
int rc;
- ENTRY;
req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_PROGRESS,
LUSTRE_MDS_VERSION, MDS_HSM_PROGRESS);
__u32 *archive_mask;
struct ptlrpc_request *req;
int rc;
- ENTRY;
req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_CT_REGISTER,
LUSTRE_MDS_VERSION,
struct hsm_current_action *req_hca;
struct ptlrpc_request *req;
int rc;
- ENTRY;
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_MDS_HSM_ACTION);
{
struct ptlrpc_request *req;
int rc;
- ENTRY;
req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_CT_UNREGISTER,
LUSTRE_MDS_VERSION,
struct hsm_user_state *req_hus;
struct ptlrpc_request *req;
int rc;
- ENTRY;
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_MDS_HSM_STATE_GET);
struct hsm_state_set *req_hss;
struct ptlrpc_request *req;
int rc;
- ENTRY;
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_MDS_HSM_STATE_SET);
struct hsm_user_item *req_hui;
char *req_opaque;
int rc;
- ENTRY;
req = ptlrpc_request_alloc(imp, &RQF_MDS_HSM_REQUEST);
if (req == NULL)
struct llog_changelog_rec *rec = (struct llog_changelog_rec *)hdr;
struct kuc_hdr *lh;
int len, rc;
- ENTRY;
if (rec->cr_hdr.lrh_type != CHANGELOG_REC) {
rc = -EINVAL;
struct ptlrpc_request *req;
struct obd_quotactl *body;
int rc;
- ENTRY;
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
&RQF_MDS_QUOTACHECK, LUSTRE_MDS_VERSION,
{
struct client_obd *cli = &exp->exp_obd->u.cli;
int rc;
- ENTRY;
qchk->obd_uuid = cli->cl_target_uuid;
memcpy(qchk->obd_type, LUSTRE_MDS_NAME, strlen(LUSTRE_MDS_NAME));
struct ptlrpc_request *req;
struct obd_quotactl *oqc;
int rc;
- ENTRY;
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
&RQF_MDS_QUOTACTL, LUSTRE_MDS_VERSION,
struct ptlrpc_request *req;
int rc, count;
struct mdc_swap_layouts *msl, *payload;
- ENTRY;
msl = op_data->op_data;
struct obd_import *imp = obd->u.cli.cl_import;
struct llog_ctxt *ctxt;
int rc;
- ENTRY;
if (!try_module_get(THIS_MODULE)) {
CERROR("Can't get module. Is it alive?");
struct ptlrpc_request *req;
char *tmp;
int rc = -EINVAL;
- ENTRY;
req = ptlrpc_request_alloc(imp, &RQF_MDS_GET_INFO);
if (req == NULL)
struct kuc_hdr *lh = (struct kuc_hdr *)val;
struct hsm_action_list *hal = (struct hsm_action_list *)(lh + 1);
int rc;
- ENTRY;
if (len < sizeof(*lh) + sizeof(*hal)) {
CERROR("Short HSM message %d < %d\n", len,
{
struct obd_import *imp = class_exp2cliimp(exp);
int rc;
- ENTRY;
if (KEY_IS(KEY_READ_ONLY)) {
if (vallen != sizeof(int))
struct ptlrpc_request *req;
struct mdt_body *body;
int rc;
- ENTRY;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_PIN);
if (req == NULL)
struct ptlrpc_request *req;
struct mdt_body *body;
int rc;
- ENTRY;
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_MDS_UNPIN,
LUSTRE_MDS_VERSION, MDS_UNPIN);
{
struct ptlrpc_request *req;
int rc;
- ENTRY;
*request = NULL;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_SYNC);
{
struct client_obd *cli = &exp->exp_obd->u.cli;
struct lu_client_seq *seq = cli->cl_seq;
- ENTRY;
+
RETURN(seq_client_alloc_fid(NULL, seq, fid));
}
struct client_obd *cli = &obd->u.cli;
struct lprocfs_static_vars lvars = { 0 };
int rc;
- ENTRY;
OBD_ALLOC(cli->cl_rpc_lock, sizeof (*cli->cl_rpc_lock));
if (!cli->cl_rpc_lock)
{
struct obd_device *obd = exp->exp_obd;
struct client_obd *cli = &obd->u.cli;
- ENTRY;
if (cli->cl_max_mds_easize < easize)
cli->cl_max_mds_easize = easize;
static int mdc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
{
int rc = 0;
- ENTRY;
switch (stage) {
case OBD_CLEANUP_EARLY:
struct llog_ctxt *ctxt;
int rc;
- ENTRY;
-
LASSERT(olg == &obd->obd_olg);
rc = llog_setup(NULL, obd, olg, LLOG_CHANGELOG_REPL_CTXT, tgt,
{
struct llog_ctxt *ctxt;
- ENTRY;
-
ctxt = llog_get_context(obd, LLOG_CHANGELOG_REPL_CTXT);
if (ctxt)
llog_cleanup(NULL, ctxt);
{
struct ptlrpc_request *req;
int rc;
- ENTRY;
LASSERT(client_is_remote(exp));
struct mdc_renew_capa_args *ra = args;
struct mdt_body *body = NULL;
struct lustre_capa *capa;
- ENTRY;
if (status)
GOTO(out, capa = ERR_PTR(status));
{
struct ptlrpc_request *req;
struct mdc_renew_capa_args *ra;
- ENTRY;
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_MDS_GETATTR,
LUSTRE_MDS_VERSION, MDS_GETATTR);
static int mgc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
{
int rc;
- ENTRY;
ptlrpcd_addref();
static int mgc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
{
int rc = 0;
- ENTRY;
switch (stage) {
case OBD_CLEANUP_EARLY:
{
struct client_obd *cli = &obd->u.cli;
int rc;
- ENTRY;
LASSERT(cli->cl_mgc_vfsmnt == NULL);
{
struct llog_ctxt *ctxt;
int rc;
- ENTRY;
LASSERT(olg == &obd->obd_olg);
rc = llog_setup(NULL, obd, olg, LLOG_CONFIG_REPL_CTXT, tgt,
{
struct llog_ctxt *ctxt;
- ENTRY;
ctxt = llog_get_context(obd, LLOG_CONFIG_REPL_CTXT);
if (ctxt)
/* Take a reference to a config log */
static int config_log_get(struct config_llog_data *cld)
{
- ENTRY;
atomic_inc(&cld->cld_refcount);
CDEBUG(D_INFO, "log %s refs %d\n", cld->cld_logname,
atomic_read(&cld->cld_refcount));
we can free the config log data */
static void config_log_put(struct config_llog_data *cld)
{
- ENTRY;
-
CDEBUG(D_INFO, "log %s refs %d\n", cld->cld_logname,
atomic_read(&cld->cld_refcount));
LASSERT(atomic_read(&cld->cld_refcount) > 0);
struct config_llog_data *cld;
struct config_llog_data *found = NULL;
void * instance;
- ENTRY;
LASSERT(logname != NULL);
{
struct config_llog_data *cld;
int rc;
- ENTRY;
CDEBUG(D_MGC, "do adding config log %s:%p\n", logname,
cfg ? cfg->cfg_instance : 0);
struct config_llog_data *sptlrpc_cld;
char seclogname[32];
char *ptr;
- ENTRY;
CDEBUG(D_MGC, "adding config log %s:%p\n", logname, cfg->cfg_instance);
struct config_llog_data *cld_sptlrpc = NULL;
struct config_llog_data *cld_recover = NULL;
int rc = 0;
- ENTRY;
cld = config_log_find(logname, cfg);
if (cld == NULL)
struct obd_import *imp = obd->u.cli.cl_import;
struct obd_connect_data *ocd = &imp->imp_connect_data;
struct config_llog_data *cld;
- ENTRY;
seq_printf(m, "imperative_recovery: %s\n",
OCD_HAS_FLAG(ocd, IMP_RECOV) ? "ENABLED" : "DISABLED");
static void do_requeue(struct config_llog_data *cld)
{
- ENTRY;
LASSERT(atomic_read(&cld->cld_refcount) > 0);
/* Do not run mgc_process_log on a disconnected export or an
static int mgc_requeue_thread(void *data)
{
int rc = 0;
- ENTRY;
CDEBUG(D_MGC, "Starting requeue thread\n");
We are responsible for dropping the config log reference from here on out. */
static void mgc_requeue_add(struct config_llog_data *cld)
{
- ENTRY;
-
CDEBUG(D_INFO, "log %s: requeue (r=%d sp=%d st=%x)\n",
cld->cld_logname, atomic_read(&cld->cld_refcount),
cld->cld_stopping, rq_state);
struct dentry *dentry;
char *label;
int err = 0;
- ENTRY;
LASSERT(lsi);
LASSERT(lsi->lsi_srv_mnt == mnt);
{
struct client_obd *cli = &obd->u.cli;
int rc = 0;
- ENTRY;
LASSERT(cli->cl_mgc_vfsmnt != NULL);
static int mgc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
{
int rc = 0;
- ENTRY;
switch (stage) {
case OBD_CLEANUP_EARLY:
{
struct client_obd *cli = &obd->u.cli;
int rc;
- ENTRY;
LASSERT(cli->cl_mgc_vfsmnt == NULL);
{
struct lprocfs_static_vars lvars;
int rc;
- ENTRY;
ptlrpcd_addref();
struct lustre_handle lockh;
struct config_llog_data *cld = (struct config_llog_data *)data;
int rc = 0;
- ENTRY;
switch (flag) {
case LDLM_CB_BLOCKING:
struct ptlrpc_request *req;
struct mgs_send_param *req_msp, *rep_msp;
int rc;
- ENTRY;
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
&RQF_MGS_SET_INFO, LUSTRE_MGS_VERSION,
struct ptlrpc_request *req;
int short_limit = cld_is_sptlrpc(cld);
int rc;
- ENTRY;
CDEBUG(D_MGC, "Enqueue for %s (res "LPX64")\n", cld->cld_logname,
cld->cld_resid.name[0]);
static int mgc_cancel(struct obd_export *exp, struct lov_stripe_md *md,
__u32 mode, struct lustre_handle *lockh)
{
- ENTRY;
-
ldlm_lock_decref(lockh, mode);
RETURN(0);
struct ptlrpc_request *req;
struct mgs_target_info *req_mti, *rep_mti;
int rc;
- ENTRY;
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
&RQF_MGS_TARGET_REG, LUSTRE_MGS_VERSION,
void *val, struct ptlrpc_request_set *set)
{
int rc = -EINVAL;
- ENTRY;
/* Turn off initial_recov after we try all backup servers once */
if (KEY_IS(KEY_INIT_RECOV_BACKUP)) {
{
struct llog_ctxt *ctxt;
int rc;
- ENTRY;
LASSERT(olg == &obd->obd_olg);
{
struct llog_ctxt *ctxt;
- ENTRY;
-
ctxt = llog_get_context(obd, LLOG_CONFIG_REPL_CTXT);
if (ctxt)
llog_cleanup(NULL, ctxt);
int pos;
int rc = 0;
int off = 0;
- ENTRY;
LASSERT(cfg->cfg_instance != NULL);
LASSERT(cfg->cfg_sb == cfg->cfg_instance);
int i;
int ealen;
int rc;
- ENTRY;
/* allocate buffer for bulk transfer.
* if this is the first time for this mgs to read logs,
int rc = 0, must_pop = 0;
bool sptlrpc_started = false;
- ENTRY;
-
LASSERT(cld);
LASSERT(mutex_is_locked(&cld->cld_lock));
struct lustre_handle lockh = { 0 };
__u64 flags = LDLM_FL_NO_LRU;
int rc = 0, rcl;
- ENTRY;
LASSERT(cld);
struct config_llog_instance *cfg = NULL;
char *logname;
int rc = 0;
- ENTRY;
switch(lcfg->lcfg_command) {
case LCFG_LOV_ADD_OBD: {
{
int count, i, esize;
ext_acl_xattr_header *new;
- ENTRY;
if (unlikely(size < 0))
RETURN(ERR_PTR(-EINVAL));
int count, i, j, rc = 0;
__u32 id;
posix_acl_xattr_header *new;
- ENTRY;
if (unlikely(size < 0))
RETURN(-EINVAL);
posix_acl_xattr_entry pe = {ACL_MASK, 0, ACL_UNDEFINED_ID};
posix_acl_xattr_header *new;
ext_acl_xattr_entry *ee, ae;
- ENTRY;
lustre_posix_acl_cpu_to_le(&pe, &pe);
ee = lustre_ext_acl_xattr_search(ext_header, &pe, &pos);
posix_acl_xattr_entry pae;
ext_acl_xattr_header *new;
ext_acl_xattr_entry *ee, eae;
- ENTRY;
if (unlikely(size < 0))
RETURN(ERR_PTR(-EINVAL));
unsigned int min;
int rc;
char alg[CRYPTO_MAX_ALG_NAME+1] = "aes";
- ENTRY;
/* passing "aes" in a variable instead of a constant string keeps gcc
* 4.3.2 happy */
unsigned int min;
int rc;
char alg[CRYPTO_MAX_ALG_NAME+1] = "aes";
- ENTRY;
/* passing "aes" in a variable instead of a constant string keeps gcc
* 4.3.2 happy */
LINVRNT(cl_io_type_is_valid(io->ci_type));
LINVRNT(cl_io_invariant(io));
- ENTRY;
while (!list_empty(&io->ci_layers)) {
slice = container_of(io->ci_layers.prev, struct cl_io_slice,
LINVRNT(io->ci_state == CIS_ZERO || io->ci_state == CIS_FINI);
LINVRNT(cl_io_type_is_valid(iot));
LINVRNT(cl_io_invariant(io));
- ENTRY;
io->ci_type = iot;
INIT_LIST_HEAD(&io->ci_lockset.cls_todo);
{
LINVRNT(iot == CIT_READ || iot == CIT_WRITE);
LINVRNT(io->ci_obj != NULL);
- ENTRY;
LU_OBJECT_HEADER(D_VFSTRACE, env, &io->ci_obj->co_lu,
"io range: %u ["LPU64", "LPU64") %u %u\n",
{
int done = 0;
- ENTRY;
/* hidden treasure: bubble sort for now. */
do {
struct cl_io_lock_link *curr;
{
struct cl_io_lock_link *scan;
- ENTRY;
list_for_each_entry(scan, queue, cill_linkage) {
if (cl_lock_descr_match(&scan->cill_descr, need))
RETURN(+1);
{
struct cl_io_lock_link *scan;
- ENTRY;
list_for_each_entry(scan, queue, cill_linkage) {
if (cl_lock_descr_cmp(&scan->cill_descr, need))
continue;
struct cl_lock *lock;
int result;
- ENTRY;
-
lock = cl_lock_request(env, io, &link->cill_descr, "io", io);
if (!IS_ERR(lock)) {
{
struct cl_lock *lock = link->cill_lock;
- ENTRY;
list_del_init(&link->cill_linkage);
if (lock != NULL) {
cl_lock_release(env, lock, "io", io);
struct cl_lock *lock;
int result;
- ENTRY;
result = 0;
list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
if (!cl_lockset_match(set, &link->cill_descr)) {
LINVRNT(io->ci_state == CIS_IT_STARTED);
LINVRNT(cl_io_invariant(io));
- ENTRY;
cl_io_for_each(scan, io) {
if (scan->cis_iop->op[io->ci_type].cio_lock == NULL)
continue;
LASSERT(CIS_IT_STARTED <= io->ci_state && io->ci_state < CIS_UNLOCKED);
LINVRNT(cl_io_invariant(io));
- ENTRY;
set = &io->ci_lockset;
list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage)
LINVRNT(io->ci_state == CIS_INIT || io->ci_state == CIS_IT_ENDED);
LINVRNT(cl_io_invariant(io));
- ENTRY;
result = 0;
cl_io_for_each(scan, io) {
if (scan->cis_iop->op[io->ci_type].cio_iter_init == NULL)
LINVRNT(io->ci_state == CIS_UNLOCKED);
LINVRNT(cl_io_invariant(io));
- ENTRY;
cl_io_for_each_reverse(scan, io) {
if (scan->cis_iop->op[io->ci_type].cio_iter_fini != NULL)
scan->cis_iop->op[io->ci_type].cio_iter_fini(env, scan);
LINVRNT(cl_io_is_loopable(io));
LINVRNT(cl_io_invariant(io));
- ENTRY;
-
io->u.ci_rw.crw_pos += nob;
io->u.ci_rw.crw_count -= nob;
{
int result;
- ENTRY;
if (cl_lockset_merge(&io->ci_lockset, &link->cill_descr))
result = +1;
else {
struct cl_io_lock_link *link;
int result;
- ENTRY;
OBD_ALLOC_PTR(link);
if (link != NULL) {
link->cill_descr = *descr;
LINVRNT(cl_io_is_loopable(io));
LINVRNT(io->ci_state == CIS_LOCKED);
LINVRNT(cl_io_invariant(io));
- ENTRY;
io->ci_state = CIS_IO_GOING;
cl_io_for_each(scan, io) {
LINVRNT(cl_io_is_loopable(io));
LINVRNT(io->ci_state == CIS_IO_GOING);
LINVRNT(cl_io_invariant(io));
- ENTRY;
cl_io_for_each_reverse(scan, io) {
if (scan->cis_iop->op[io->ci_type].cio_end != NULL)
LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
LINVRNT(cl_page_in_io(page, io));
LINVRNT(cl_io_invariant(io));
- ENTRY;
queue = &io->ci_queue;
LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
LINVRNT(cl_io_invariant(io));
LASSERT(cl_page_in_io(page, io));
- ENTRY;
cl_io_for_each_reverse(scan, io) {
if (scan->cis_iop->cio_prepare_write != NULL) {
*/
LASSERT(cl_page_is_owned(page, io) || page->cp_parent != NULL);
LASSERT(cl_page_in_io(page, io));
- ENTRY;
cl_io_for_each(scan, io) {
if (scan->cis_iop->cio_commit_write != NULL) {
int result = 0;
LINVRNT(crt < ARRAY_SIZE(scan->cis_iop->req_op));
- ENTRY;
cl_io_for_each(scan, io) {
if (scan->cis_iop->req_op[crt].cio_submit == NULL)
int result = 0;
LINVRNT(cl_io_is_loopable(io));
- ENTRY;
do {
size_t nob;
LASSERT((linkage->prev == NULL && linkage->next == NULL) ||
list_empty(linkage));
- ENTRY;
list_add_tail(linkage, &io->ci_layers);
slice->cis_io = io;
*/
void cl_page_list_init(struct cl_page_list *plist)
{
- ENTRY;
plist->pl_nr = 0;
INIT_LIST_HEAD(&plist->pl_pages);
plist->pl_owner = current;
*/
void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page)
{
- ENTRY;
/* it would be better to check that page is owned by "current" io, but
* it is not passed here. */
LASSERT(page->cp_owner != NULL);
LASSERT(plist->pl_nr > 0);
LINVRNT(plist->pl_owner == current);
- ENTRY;
list_del_init(&page->cp_batch);
lockdep_off();
mutex_unlock(&page->cp_mutex);
LINVRNT(dst->pl_owner == current);
LINVRNT(src->pl_owner == current);
- ENTRY;
list_move_tail(&page->cp_batch, &dst->pl_pages);
--src->pl_nr;
++dst->pl_nr;
LINVRNT(list->pl_owner == current);
LINVRNT(head->pl_owner == current);
- ENTRY;
cl_page_list_for_each_safe(page, tmp, list)
cl_page_list_move(head, list, page);
EXIT;
LINVRNT(plist->pl_owner == current);
- ENTRY;
cl_page_list_for_each_safe(page, temp, plist) {
LASSERT(plist->pl_nr > 0);
LINVRNT(plist->pl_owner == current);
- ENTRY;
cl_page_list_for_each_safe(page, temp, plist)
cl_page_list_del(env, plist, page);
LASSERT(plist->pl_nr == 0);
LINVRNT(plist->pl_owner == current);
- ENTRY;
result = 0;
cl_page_list_for_each_safe(page, temp, plist) {
LASSERT(index <= page->cp_index);
struct cl_page *page;
LINVRNT(plist->pl_owner == current);
- ENTRY;
cl_page_list_for_each(page, plist)
cl_page_discard(env, io, page);
EXIT;
int result;
LINVRNT(plist->pl_owner == current);
- ENTRY;
result = 0;
cl_page_list_for_each(page, plist) {
result = cl_page_unmap(env, io, page);
*/
void cl_2queue_init(struct cl_2queue *queue)
{
- ENTRY;
cl_page_list_init(&queue->c2_qin);
cl_page_list_init(&queue->c2_qout);
EXIT;
*/
void cl_2queue_add(struct cl_2queue *queue, struct cl_page *page)
{
- ENTRY;
cl_page_list_add(&queue->c2_qin, page);
EXIT;
}
void cl_2queue_disown(const struct lu_env *env,
struct cl_io *io, struct cl_2queue *queue)
{
- ENTRY;
cl_page_list_disown(env, io, &queue->c2_qin);
cl_page_list_disown(env, io, &queue->c2_qout);
EXIT;
void cl_2queue_discard(const struct lu_env *env,
struct cl_io *io, struct cl_2queue *queue)
{
- ENTRY;
cl_page_list_discard(env, io, &queue->c2_qin);
cl_page_list_discard(env, io, &queue->c2_qout);
EXIT;
*/
void cl_2queue_fini(const struct lu_env *env, struct cl_2queue *queue)
{
- ENTRY;
cl_page_list_fini(env, &queue->c2_qout);
cl_page_list_fini(env, &queue->c2_qin);
EXIT;
*/
void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page)
{
- ENTRY;
cl_2queue_init(queue);
cl_2queue_add(queue, page);
EXIT;
*/
struct cl_io *cl_io_top(struct cl_io *io)
{
- ENTRY;
while (io->ci_parent != NULL)
io = io->ci_parent;
RETURN(io);
struct cl_device *dev,
const struct cl_req_operations *ops)
{
- ENTRY;
list_add_tail(&slice->crs_linkage, &req->crq_layers);
slice->crs_dev = dev;
slice->crs_ops = ops;
LASSERT(req->crq_nrpages == 0);
LINVRNT(list_empty(&req->crq_layers));
LINVRNT(equi(req->crq_nrobjs > 0, req->crq_o != NULL));
- ENTRY;
if (req->crq_o != NULL) {
for (i = 0; i < req->crq_nrobjs; ++i) {
struct cl_page_slice *slice;
int result;
- ENTRY;
result = 0;
page = cl_page_top(page);
do {
{
struct cl_req_slice *slice;
- ENTRY;
/*
* for the lack of list_for_each_entry_reverse_safe()...
*/
struct cl_req *req;
LINVRNT(nr_objects > 0);
- ENTRY;
OBD_ALLOC_PTR(req);
if (req != NULL) {
struct cl_req_obj *rqo;
int i;
- ENTRY;
page = cl_page_top(page);
LASSERT(list_empty(&page->cp_flight));
{
struct cl_req *req = page->cp_req;
- ENTRY;
page = cl_page_top(page);
LASSERT(!list_empty(&page->cp_flight));
int result;
const struct cl_req_slice *slice;
- ENTRY;
/*
* Check that the caller of cl_req_alloc() didn't lie about the number
* of objects.
int i;
LASSERT(!list_empty(&req->crq_pages));
- ENTRY;
/* Take any page to use as a model. */
page = list_entry(req->crq_pages.next, struct cl_page, cp_flight);
*/
void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages)
{
- ENTRY;
init_waitqueue_head(&anchor->csi_waitq);
atomic_set(&anchor->csi_sync_nr, nrpages);
atomic_set(&anchor->csi_barrier, nrpages > 0);
struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout),
NULL, NULL, NULL);
int rc;
- ENTRY;
LASSERT(timeout >= 0);
*/
void cl_sync_io_note(struct cl_sync_io *anchor, int ioret)
{
- ENTRY;
if (anchor->csi_sync_rc == 0 && ioret < 0)
anchor->csi_sync_rc = ioret;
/*
struct cl_object *obj,
const struct cl_lock_operations *ops)
{
- ENTRY;
slice->cls_lock = lock;
list_add_tail(&slice->cls_linkage, &lock->cll_layers);
slice->cls_obj = obj;
LINVRNT(!cl_lock_is_mutexed(lock));
- ENTRY;
cl_lock_trace(D_DLMTRACE, env, "free lock", lock);
might_sleep();
while (!list_empty(&lock->cll_layers)) {
struct cl_object *obj;
LINVRNT(cl_lock_invariant(env, lock));
- ENTRY;
obj = lock->cll_descr.cld_obj;
LINVRNT(obj != NULL);
struct cl_lock *lock;
struct lu_object_header *head;
- ENTRY;
OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, __GFP_IO);
if (lock != NULL) {
atomic_set(&lock->cll_ref, 1);
const struct cl_lock_slice *slice;
LINVRNT(cl_lock_invariant_trusted(env, lock));
- ENTRY;
list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_ops->clo_fits_into != NULL &&
!slice->cls_ops->clo_fits_into(env, slice, need, io))
struct cl_lock *lock;
struct cl_object_header *head;
- ENTRY;
-
head = cl_object_header(obj);
LINVRNT(spin_is_locked(&head->coh_lock_guard));
CS_LOCK_INC(obj, lookup);
struct cl_object *obj;
struct cl_lock *lock;
- ENTRY;
-
obj = need->cld_obj;
head = cl_object_header(obj);
const struct cl_lock_slice *slice;
LINVRNT(cl_lock_invariant_trusted(NULL, lock));
- ENTRY;
list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype)
int result;
LINVRNT(cl_lock_invariant_trusted(env, lock));
- ENTRY;
result = 0;
if (lock->cll_guarder == current) {
{
LINVRNT(cl_lock_is_mutexed(lock));
LINVRNT(cl_lock_invariant(env, lock));
- ENTRY;
if (!(lock->cll_flags & CLF_CANCELLED)) {
const struct cl_lock_slice *slice;
LINVRNT(cl_lock_is_mutexed(lock));
LINVRNT(cl_lock_invariant(env, lock));
- ENTRY;
if (lock->cll_state < CLS_FREEING) {
LASSERT(lock->cll_state != CLS_INTRANSIT);
cl_lock_state_set(env, lock, CLS_FREEING);
LINVRNT(cl_lock_invariant(env, lock));
LASSERT(lock->cll_holds > 0);
- ENTRY;
cl_lock_trace(D_DLMTRACE, env, "hold release lock", lock);
lu_ref_del(&lock->cll_holders, scope, source);
cl_lock_hold_mod(env, lock, -1);
sigset_t blocked;
int result;
- ENTRY;
LINVRNT(cl_lock_is_mutexed(lock));
LINVRNT(cl_lock_invariant(env, lock));
LASSERT(lock->cll_depth == 1);
{
const struct cl_lock_slice *slice;
- ENTRY;
LINVRNT(cl_lock_is_mutexed(lock));
LINVRNT(cl_lock_invariant(env, lock));
*/
void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock)
{
- ENTRY;
cl_lock_trace(D_DLMTRACE, env, "state signal lock", lock);
cl_lock_state_signal(env, lock, lock->cll_state);
EXIT;
void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
enum cl_lock_state state)
{
- ENTRY;
LASSERT(lock->cll_state <= state ||
(lock->cll_state == CLS_CACHED &&
(state == CLS_HELD || /* lock found in cache */
int result;
enum cl_lock_state state;
- ENTRY;
cl_lock_trace(D_DLMTRACE, env, "use lock", lock);
LASSERT(lock->cll_state == CLS_CACHED);
int result;
const struct cl_lock_slice *slice;
- ENTRY;
result = -ENOSYS;
list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_ops->clo_enqueue != NULL) {
{
int result;
- ENTRY;
cl_lock_trace(D_DLMTRACE, env, "enqueue lock", lock);
do {
LINVRNT(cl_lock_is_mutexed(lock));
{
struct cl_lock *conflict;
int rc = 0;
- ENTRY;
LASSERT(cl_lock_is_mutexed(lock));
LASSERT(lock->cll_state == CLS_QUEUING);
{
int result;
- ENTRY;
-
LINVRNT(cl_lock_is_mutexed(lock));
LINVRNT(cl_lock_invariant(env, lock));
LASSERT(lock->cll_holds > 0);
{
int result;
- ENTRY;
-
cl_lock_lockdep_acquire(env, lock, enqflags);
cl_lock_mutex_get(env, lock);
result = cl_enqueue_locked(env, lock, io, enqflags);
int result;
enum cl_lock_state state = CLS_NEW;
- ENTRY;
cl_lock_trace(D_DLMTRACE, env, "unuse lock", lock);
if (lock->cll_users > 1) {
static void cl_unuse_locked(const struct lu_env *env, struct cl_lock *lock)
{
int result;
- ENTRY;
result = cl_unuse_try(env, lock);
if (result)
*/
void cl_unuse(const struct lu_env *env, struct cl_lock *lock)
{
- ENTRY;
cl_lock_mutex_get(env, lock);
cl_unuse_locked(env, lock);
cl_lock_mutex_put(env, lock);
const struct cl_lock_slice *slice;
int result;
- ENTRY;
cl_lock_trace(D_DLMTRACE, env, "wait lock try", lock);
do {
LINVRNT(cl_lock_is_mutexed(lock));
{
int result;
- ENTRY;
cl_lock_mutex_get(env, lock);
LINVRNT(cl_lock_invariant(env, lock));
unsigned long pound;
unsigned long ounce;
- ENTRY;
LINVRNT(cl_lock_is_mutexed(lock));
LINVRNT(cl_lock_invariant(env, lock));
struct cl_object_header *hdr = cl_object_header(obj);
int result;
- ENTRY;
cl_lock_trace(D_DLMTRACE, env, "modify lock", lock);
/* don't allow object to change */
LASSERT(obj == desc->cld_obj);
const struct cl_lock_slice *slice;
int result;
- ENTRY;
LINVRNT(cl_lock_is_mutexed(closure->clc_origin));
LINVRNT(cl_lock_invariant(env, closure->clc_origin));
struct cl_lock_closure *closure)
{
int result = 0;
- ENTRY;
+
cl_lock_trace(D_DLMTRACE, env, "enclosure lock", lock);
if (!cl_lock_mutex_try(env, lock)) {
/*
LASSERT(ergo(cl_lock_nesting(lock) == CNL_TOP,
cl_lock_nr_mutexed(env) == 1));
- ENTRY;
cl_lock_trace(D_DLMTRACE, env, "delete lock", lock);
if (lock->cll_holds == 0)
cl_lock_delete0(env, lock);
LINVRNT(cl_lock_is_mutexed(lock));
LINVRNT(cl_lock_invariant(env, lock));
- ENTRY;
if (lock->cll_error == 0 && error != 0) {
cl_lock_trace(D_DLMTRACE, env, "set lock error", lock);
lock->cll_error = error;
LINVRNT(cl_lock_is_mutexed(lock));
LINVRNT(cl_lock_invariant(env, lock));
- ENTRY;
cl_lock_trace(D_DLMTRACE, env, "cancel lock", lock);
if (lock->cll_holds == 0)
cl_lock_cancel0(env, lock);
struct cl_lock *lock;
struct cl_lock_descr *need;
- ENTRY;
-
head = cl_object_header(obj);
need = &cl_env_info(env)->clt_descr;
lock = NULL;
int result;
LINVRNT(cl_lock_invariant(env, lock));
- ENTRY;
io->ci_obj = cl_object_top(descr->cld_obj);
io->ci_ignore_layout = 1;
struct cl_object_header *head;
struct cl_lock *lock;
- ENTRY;
head = cl_object_header(obj);
/*
* If locks are destroyed without cancellation, all pages must be
{
struct cl_lock *lock;
- ENTRY;
-
while (1) {
lock = cl_lock_find(env, io, need);
if (IS_ERR(lock))
{
struct cl_lock *lock;
- ENTRY;
-
lock = cl_lock_hold_mutex(env, io, need, scope, source);
if (!IS_ERR(lock))
cl_lock_mutex_put(env, lock);
int rc;
__u32 enqflags = need->cld_enq_flags;
- ENTRY;
do {
lock = cl_lock_hold_mutex(env, io, need, scope, source);
if (IS_ERR(lock))
LINVRNT(cl_lock_invariant(env, lock));
LASSERT(lock->cll_state != CLS_FREEING);
- ENTRY;
cl_lock_hold_mod(env, lock, +1);
cl_lock_get(lock);
lu_ref_add(&lock->cll_holders, scope, source);
const char *scope, const void *source)
{
LINVRNT(cl_lock_invariant(env, lock));
- ENTRY;
cl_lock_hold_release(env, lock, scope, source);
lu_ref_del(&lock->cll_reference, scope, source);
cl_lock_put(env, lock);
const char *scope, const void *source)
{
LINVRNT(cl_lock_invariant(env, lock));
- ENTRY;
cl_lock_trace(D_DLMTRACE, env, "release lock", lock);
cl_lock_mutex_get(env, lock);
cl_lock_hold_release(env, lock, scope, source);
LINVRNT(cl_lock_is_mutexed(lock));
LINVRNT(cl_lock_invariant(env, lock));
- ENTRY;
cl_lock_used_mod(env, lock, +1);
EXIT;
}
LINVRNT(cl_lock_invariant(env, lock));
LASSERT(lock->cll_users > 0);
- ENTRY;
cl_lock_used_mod(env, lock, -1);
if (lock->cll_users == 0)
wake_up_all(&lock->cll_wq);
{
int result;
- ENTRY;
result = lu_object_header_init(&h->coh_lu);
if (result == 0) {
spin_lock_init(&h->coh_page_guard);
int result;
LASSERT(spin_is_locked(cl_object_attr_guard(obj)));
- ENTRY;
top = obj->co_lu.lo_header;
result = 0;
int result;
LASSERT(spin_is_locked(cl_object_attr_guard(obj)));
- ENTRY;
top = obj->co_lu.lo_header;
result = 0;
struct lu_object_header *top;
int result;
- ENTRY;
top = obj->co_lu.lo_header;
result = 0;
list_for_each_entry_reverse(obj, &top->loh_layers,
struct lu_object_header *top;
int result;
- ENTRY;
top = obj->co_lu.lo_header;
result = 0;
list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
*/
void cl_object_prune(const struct lu_env *env, struct cl_object *obj)
{
- ENTRY;
cl_pages_prune(env, obj);
cl_locks_prune(env, obj, 1);
EXIT;
*/
void cl_attr2lvb(struct ost_lvb *lvb, const struct cl_attr *attr)
{
- ENTRY;
lvb->lvb_size = attr->cat_size;
lvb->lvb_mtime = attr->cat_mtime;
lvb->lvb_atime = attr->cat_atime;
*/
void cl_lvb2attr(struct cl_attr *attr, const struct ost_lvb *lvb)
{
- ENTRY;
attr->cat_size = lvb->lvb_size;
attr->cat_mtime = lvb->lvb_mtime;
attr->cat_atime = lvb->lvb_atime;
const struct lu_device_type *dtype)
{
const struct cl_page_slice *slice;
- ENTRY;
page = cl_page_top_trusted((struct cl_page *)page);
do {
unsigned int j;
int res = CLP_GANG_OKAY;
int tree_lock = 1;
- ENTRY;
idx = start;
hdr = cl_object_header(obj);
PASSERT(env, page, page->cp_parent == NULL);
PASSERT(env, page, page->cp_state == CPS_FREEING);
- ENTRY;
might_sleep();
while (!list_empty(&page->cp_layers)) {
struct cl_page_slice *slice;
struct cl_page *page;
struct lu_object_header *head;
- ENTRY;
OBD_ALLOC_GFP(page, cl_object_header(o)->coh_page_bufsize,
__GFP_IO);
if (page != NULL) {
LASSERT(type == CPT_CACHEABLE || type == CPT_TRANSIENT);
might_sleep();
- ENTRY;
-
hdr = cl_object_header(o);
CS_PAGE_INC(o, lookup);
}
};
- ENTRY;
old = page->cp_state;
PASSERT(env, page, allowed_transitions[old][state]);
CL_PAGE_HEADER(D_TRACE, env, page, "%d -> %d\n", old, state);
*/
void cl_page_get(struct cl_page *page)
{
- ENTRY;
cl_page_get_trust(page);
EXIT;
}
{
PASSERT(env, page, atomic_read(&page->cp_ref) > !!page->cp_parent);
- ENTRY;
CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
atomic_read(&page->cp_ref));
struct cl_page *top;
struct cl_page *page;
- ENTRY;
KLASSERT(PageLocked(vmpage));
/*
{
PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
- ENTRY;
RETURN(CL_PAGE_INVOKE(env, page, op,
(const struct lu_env *,
const struct cl_page_slice *, struct cl_io *),
{
PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
- ENTRY;
CL_PAGE_INVOID(env, page, op,
(const struct lu_env *,
const struct cl_page_slice *, struct cl_io *), io);
static void cl_page_owner_clear(struct cl_page *page)
{
- ENTRY;
for (page = cl_page_top(page); page != NULL; page = page->cp_child) {
if (page->cp_owner != NULL) {
LASSERT(page->cp_owner->ci_owned_nr > 0);
static void cl_page_owner_set(struct cl_page *page)
{
- ENTRY;
for (page = cl_page_top(page); page != NULL; page = page->cp_child) {
LASSERT(page->cp_owner != NULL);
page->cp_owner->ci_owned_nr++;
{
enum cl_page_state state;
- ENTRY;
state = pg->cp_state;
PINVRNT(env, pg, state == CPS_OWNED || state == CPS_FREEING);
PINVRNT(env, pg, cl_page_invariant(pg));
int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io)
{
LINVRNT(cl_object_same(pg->cp_obj, io->ci_obj));
- ENTRY;
RETURN(pg->cp_state == CPS_OWNED && pg->cp_owner == io);
}
EXPORT_SYMBOL(cl_page_is_owned);
PINVRNT(env, pg, !cl_page_is_owned(pg, io));
- ENTRY;
pg = cl_page_top(pg);
io = cl_io_top(io);
{
PINVRNT(env, pg, cl_object_same(pg->cp_obj, io->ci_obj));
- ENTRY;
pg = cl_page_top(pg);
io = cl_io_top(io);
PINVRNT(env, pg, cl_page_is_owned(pg, io));
PINVRNT(env, pg, cl_page_invariant(pg));
- ENTRY;
pg = cl_page_top(pg);
io = cl_io_top(io);
cl_page_owner_clear(pg);
{
PINVRNT(env, pg, cl_page_is_owned(pg, io));
- ENTRY;
pg = cl_page_top(pg);
io = cl_io_top(io);
cl_page_disown0(env, io, pg);
int radix)
{
struct cl_page *tmp = pg;
- ENTRY;
PASSERT(env, pg, pg == cl_page_top(pg));
PASSERT(env, pg, pg->cp_state != CPS_FREEING);
void cl_page_delete(const struct lu_env *env, struct cl_page *pg)
{
PINVRNT(env, pg, cl_page_invariant(pg));
- ENTRY;
cl_page_delete0(env, pg, 1);
EXIT;
}
int result;
const struct cl_page_slice *slice;
- ENTRY;
pg = cl_page_top_trusted((struct cl_page *)pg);
slice = container_of(pg->cp_layers.next,
const struct cl_page_slice, cpl_linkage);
static enum cl_page_state cl_req_type_state(enum cl_req_type crt)
{
- ENTRY;
RETURN(crt == CRT_WRITE ? CPS_PAGEOUT : CPS_PAGEIN);
}
/*
* Page is queued for IO, change its state.
*/
- ENTRY;
cl_page_owner_clear(pg);
cl_page_state_set(env, pg, cl_req_type_state(crt));
EXIT;
PASSERT(env, pg, pg->cp_req == NULL);
PASSERT(env, pg, pg->cp_state == cl_req_type_state(crt));
- ENTRY;
CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, ioret);
if (crt == CRT_READ && ioret == 0) {
PASSERT(env, pg, !(pg->cp_flags & CPF_READ_COMPLETED));
PINVRNT(env, pg, crt < CRT_NR);
- ENTRY;
if (crt >= CRT_NR)
RETURN(-EINVAL);
result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(io[crt].cpo_make_ready),
PINVRNT(env, pg, cl_page_is_owned(pg, io));
PINVRNT(env, pg, cl_page_invariant(pg));
- ENTRY;
-
if (crt >= CRT_NR)
RETURN(-EINVAL);
PINVRNT(env, pg, cl_page_is_owned(pg, io));
PINVRNT(env, pg, cl_page_invariant(pg));
- ENTRY;
-
result = cl_page_invoke(env, io, pg, CL_PAGE_OP(cpo_flush));
CL_PAGE_HEADER(D_TRACE, env, pg, "%d\n", result);
PINVRNT(env, page, cl_page_invariant(page));
- ENTRY;
rc = CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_is_under_lock),
(const struct lu_env *,
const struct cl_page_slice *, struct cl_io *),
struct cl_io *io;
int result;
- ENTRY;
info = cl_env_info(env);
io = &info->clt_io;
struct cl_object *obj,
const struct cl_page_operations *ops)
{
- ENTRY;
list_add_tail(&slice->cpl_linkage, &page->cp_layers);
slice->cpl_obj = obj;
slice->cpl_ops = ops;
{
int jobid_len = JOBSTATS_JOBID_SIZE;
int rc = 0;
- ENTRY;
memset(jobid, 0, JOBSTATS_JOBID_SIZE);
/* Jobstats isn't enabled */
int rc;
int dev;
- ENTRY;
if (!len || !name) {
CERROR("No name passed,!\n");
GOTO(out, rc = -EINVAL);
struct libcfs_debug_ioctl_data *debug_data;
struct obd_device *obd = NULL;
int err = 0, len = 0;
- ENTRY;
/* only for debugging */
if (cmd == LIBCFS_IOC_DEBUG_MASK) {
int lustre_unregister_fs(void);
__u64 memory_leaked, pages_leaked;
__u64 memory_max, pages_max;
- ENTRY;
lustre_unregister_fs();
struct lu_device *top_dev)
{
struct lu_object *lo, *n;
- ENTRY;
lo = lu_object_find_at(env, top_dev, fid, NULL);
if (IS_ERR(lo))
struct thandle *th;
int rc;
- ENTRY;
-
dto = dt_locate(env, dt, fid);
if (IS_ERR(dto))
RETURN(dto);
struct lu_idxpage *lip = &lp->lp_idx;
char *entry;
int rc, size;
- ENTRY;
/* no support for variable key & record size for now */
LASSERT((ii->ii_flags & II_FL_VARKEY) == 0);
const struct dt_it_ops *iops;
unsigned int pageidx, nob, nlupgs = 0;
int rc;
- ENTRY;
LASSERT(rdpg->rp_pages != NULL);
LASSERT(obj->do_index_ops != NULL);
const struct dt_index_features *feat;
struct dt_object *obj;
int rc;
- ENTRY;
/* rp_count shouldn't be null and should be a multiple of the container
* size */
{
struct obd_type *type;
int rc = 0;
- ENTRY;
/* sanity check */
LASSERT(strnlen(name, CLASS_MAX_NAME) < CLASS_MAX_NAME);
int class_unregister_type(const char *name)
{
struct obd_type *type = class_search_type(name);
- ENTRY;
if (!type) {
CERROR("unknown obd type\n");
struct obd_type *type = NULL;
int i;
int new_obd_minor = 0;
- ENTRY;
if (strlen(name) >= MAX_OBD_NAME) {
CERROR("name/uuid must be < %u bytes long\n", MAX_OBD_NAME);
void obd_cleanup_caches(void)
{
- ENTRY;
if (obd_device_cachep) {
kmem_cache_destroy(obd_device_cachep);
obd_device_cachep = NULL;
int obd_init_caches(void)
{
- ENTRY;
-
LASSERT(obd_device_cachep == NULL);
obd_device_cachep = kmem_cache_create("ll_obd_dev_cache",
sizeof(struct obd_device),
struct obd_export *class_conn2export(struct lustre_handle *conn)
{
struct obd_export *export;
- ENTRY;
if (!conn) {
CDEBUG(D_CACHE, "looking for null handle\n");
static void class_export_destroy(struct obd_export *exp)
{
struct obd_device *obd = exp->exp_obd;
- ENTRY;
LASSERT_ATOMIC_ZERO(&exp->exp_refcount);
LASSERT(obd != NULL);
struct obd_export *export;
cfs_hash_t *hash = NULL;
int rc = 0;
- ENTRY;
OBD_ALLOC_PTR(export);
if (!export)
/* Import management functions */
void class_import_destroy(struct obd_import *imp)
{
- ENTRY;
-
CDEBUG(D_IOCTL, "destroying import %p for %s\n", imp,
imp->imp_obd->obd_name);
void class_import_put(struct obd_import *imp)
{
- ENTRY;
-
LASSERT(list_empty(&imp->imp_zombie_chain));
LASSERT_ATOMIC_GT_LT(&imp->imp_refcount, 0, LI_POISON);
LASSERT(conn != NULL);
LASSERT(obd != NULL);
LASSERT(cluuid != NULL);
- ENTRY;
export = class_new_export(obd, cluuid);
if (IS_ERR(export))
int class_disconnect(struct obd_export *export)
{
int already_disconnected;
- ENTRY;
if (export == NULL) {
CWARN("attempting to free NULL export %p\n", export);
{
int rc;
struct obd_export *exp;
- ENTRY;
/* It's possible that an export may disconnect itself, but
* nothing else will be added to this list. */
void class_disconnect_exports(struct obd_device *obd)
{
struct list_head work_list;
- ENTRY;
/* Move all of the exports from obd_exports to a work list, en masse. */
INIT_LIST_HEAD(&work_list);
struct list_head work_list;
struct obd_export *exp, *n;
int evicted = 0;
- ENTRY;
INIT_LIST_HEAD(&work_list);
spin_lock(&obd->obd_dev_lock);
{
struct obd_import *import;
struct obd_export *export;
- ENTRY;
do {
spin_lock(&obd_zombie_impexp_lock);
struct obd_ioctl_data *data;
int err;
int offset = 0;
- ENTRY;
err = copy_from_user(&hdr, (void *)arg, sizeof(hdr));
if ( err )
/* opening /dev/obd */
static int obd_class_open(struct inode * inode, struct file * file)
{
- ENTRY;
-
try_module_get(THIS_MODULE);
RETURN(0);
}
/* closing /dev/obd */
static int obd_class_release(struct inode * inode, struct file * file)
{
- ENTRY;
-
module_put(THIS_MODULE);
RETURN(0);
}
unsigned long arg)
{
int err = 0;
- ENTRY;
/* Allow non-root access for OBD_IOC_PING_TARGET - used by lfs check */
if (!cfs_capable(CFS_CAP_SYS_ADMIN) && (cmd != OBD_IOC_PING_TARGET))
int class_procfs_init(void)
{
int rc = 0;
- ENTRY;
obd_sysctl_init();
proc_lustre_root = lprocfs_register("fs/lustre", NULL,
int class_procfs_clean(void)
{
- ENTRY;
if (proc_lustre_root) {
lprocfs_remove(&proc_lustre_root);
}
{
struct llog_log_hdr *llh = loghandle->lgh_hdr;
int rc = 0;
- ENTRY;
CDEBUG(D_RPCTRACE, "Canceling %d in log "DOSTID"\n",
index, POSTID(&loghandle->lgh_id.lgl_oi));
struct llog_log_hdr *llh;
int rc;
- ENTRY;
LASSERT(handle->lgh_hdr == NULL);
OBD_ALLOC_PTR(llh);
char *cfg_buf = (char*) (rec + 1);
struct lustre_cfg *lcfg;
int rc = 0;
- ENTRY;
/* Append all records */
local_rec.lrh_len -= sizeof(*rec) + sizeof(struct llog_rec_tail);
int saved_index = 0;
int last_called_index = 0;
- ENTRY;
-
LASSERT(llh);
OBD_ALLOC(buf, LLOG_CHUNK_SIZE);
struct llog_process_info *lpi;
int rc;
- ENTRY;
-
OBD_ALLOC_PTR(lpi);
if (lpi == NULL) {
CERROR("cannot alloc pointer\n");
struct llog_process_cat_data *cd = catdata;
void *buf;
int rc = 0, first_index = 1, index, idx;
- ENTRY;
OBD_ALLOC(buf, LLOG_CHUNK_SIZE);
if (!buf)
struct llog_operations *lop;
int rc;
- ENTRY;
-
rc = llog_handle2ops(loghandle, &lop);
if (rc)
RETURN(rc);
struct llog_operations *lop;
int raised, rc;
- ENTRY;
-
rc = llog_handle2ops(loghandle, &lop);
if (rc)
RETURN(rc);
struct llog_operations *lop;
int raised, rc;
- ENTRY;
-
rc = llog_handle2ops(handle, &lop);
if (rc)
RETURN(rc);
struct llog_operations *lop;
int raised, rc;
- ENTRY;
-
rc = llog_handle2ops(handle, &lop);
if (rc)
RETURN(rc);
struct llog_operations *lop;
int raised, rc, buflen;
- ENTRY;
-
rc = llog_handle2ops(handle, &lop);
if (rc)
RETURN(rc);
{
int raised, rc;
- ENTRY;
-
if (lgh->lgh_logops->lop_add == NULL)
RETURN(-EOPNOTSUPP);
{
int raised, rc;
- ENTRY;
-
if (lgh->lgh_logops->lop_declare_add == NULL)
RETURN(-EOPNOTSUPP);
struct thandle *th;
int rc;
- ENTRY;
-
rc = llog_open(env, ctxt, res, logid, name, LLOG_OPEN_NEW);
if (rc)
RETURN(rc);
struct llog_handle *handle;
int rc = 0, rc2;
- ENTRY;
-
/* nothing to erase */
if (name == NULL && logid == NULL)
RETURN(0);
{
int rc;
- ENTRY;
-
LASSERT(loghandle);
LASSERT(loghandle->lgh_ctxt);
int raised;
int rc;
- ENTRY;
-
LASSERT(ctxt);
LASSERT(ctxt->loc_logops);
struct llog_operations *lop;
int rc;
- ENTRY;
-
rc = llog_handle2ops(loghandle, &lop);
if (rc)
GOTO(out, rc);
struct llog_log_hdr *llh;
struct llog_logid_rec rec = { { 0 }, };
int rc, index, bitmap_size;
- ENTRY;
llh = cathandle->lgh_hdr;
bitmap_size = LLOG_BITMAP_SIZE(llh);
struct llog_handle *loghandle;
int rc = 0;
- ENTRY;
-
if (cathandle == NULL)
RETURN(-EBADF);
struct llog_handle *loghandle, *n;
int rc;
- ENTRY;
-
list_for_each_entry_safe(loghandle, n, &cathandle->u.chd.chd_head,
u.phd.phd_entry) {
struct llog_log_hdr *llh = loghandle->lgh_hdr;
struct thandle *th)
{
struct llog_handle *loghandle = NULL;
- ENTRY;
down_read_nested(&cathandle->lgh_lock, LLOGH_CAT);
loghandle = cathandle->u.chd.chd_current_log;
{
struct llog_handle *loghandle;
int rc;
- ENTRY;
LASSERT(rec->lrh_len <= LLOG_CHUNK_SIZE);
loghandle = llog_cat_current_log(cathandle, th);
struct llog_handle *loghandle, *next;
int rc = 0;
- ENTRY;
-
if (cathandle->u.chd.chd_current_log == NULL) {
/* declare new plain llog */
down_write(&cathandle->lgh_lock);
{
int i, index, rc = 0, failed = 0;
- ENTRY;
-
for (i = 0; i < count; i++, cookies++) {
struct llog_handle *loghandle;
struct llog_logid *lgl = &cookies->lgc_lgl;
struct llog_handle *llh;
int rc;
- ENTRY;
if (rec->lrh_type != LLOG_LOGID_MAGIC) {
CERROR("invalid record in catalog\n");
RETURN(-EINVAL);
struct llog_process_data d;
struct llog_log_hdr *llh = cat_llh->lgh_hdr;
int rc;
- ENTRY;
LASSERT(llh->llh_flags & LLOG_F_IS_CAT);
d.lpd_data = data;
struct llog_process_cat_data cd;
struct llog_log_hdr *llh = cat_llh->lgh_hdr;
int rc;
- ENTRY;
LASSERT(llh->llh_flags & LLOG_F_IS_CAT);
d.lpd_data = data;
{
struct llog_log_hdr *llh = cathandle->lgh_hdr;
int i, bitmap_size, idx;
- ENTRY;
bitmap_size = LLOG_BITMAP_SIZE(llh);
if (llh->llh_cat_idx == (index - 1)) {
struct llog_log_hdr *llh;
int rc;
- ENTRY;
-
if (rec->lrh_type != LLOG_LOGID_MAGIC) {
CERROR("invalid record in catalog\n");
RETURN(-EINVAL);
char *start, *end, *endp;
__u64 id, seq;
- ENTRY;
start = str;
if (*start != '#')
RETURN(-EINVAL);
char *endp;
int cur_index, rc = 0;
- ENTRY;
-
if (ioc_data && ioc_data->ioc_inllen1 > 0) {
l = 0;
remains = ioc_data->ioc_inllen4 +
char *endp;
int cur_index;
- ENTRY;
if (ioc_data != NULL && ioc_data->ioc_inllen1 > 0) {
l = 0;
remains = ioc_data->ioc_inllen4 +
struct llog_handle *log;
int rc;
- ENTRY;
-
rc = llog_cat_id2handle(env, cat, &log, logid);
if (rc) {
CDEBUG(D_IOCTL, "cannot find log #"DOSTID"#%08x\n",
struct llog_logid_rec *lir = (struct llog_logid_rec *)rec;
int rc;
- ENTRY;
if (rec->lrh_type != LLOG_LOGID_MAGIC)
RETURN(-EINVAL);
rc = llog_remove_log(env, handle, &lir->lid_id);
int rc = 0;
struct llog_handle *handle = NULL;
- ENTRY;
-
if (*data->ioc_inlbuf1 == '#') {
rc = str2logid(&logid, data->ioc_inlbuf1, data->ioc_inllen1);
if (rc)
struct llog_rec_hdr rec = { 0 };
struct llog_rec_tail tail;
int rc;
- ENTRY;
LASSERT(len >= LLOG_MIN_REC_SIZE && (len & 0x7) == 0);
loff_t saved_off = file->f_pos;
int buflen = rec->lrh_len;
- ENTRY;
-
file->f_pos = off;
if (buflen == 0)
{
loff_t offset = off;
int rc;
- ENTRY;
rc = fsfilt_read_record(obd, file, buf, size, &offset);
if (rc) {
{
struct obd_device *obd;
int rc;
- ENTRY;
LASSERT(sizeof(*handle->lgh_hdr) == LLOG_CHUNK_SIZE);
struct obd_device *obd;
struct file *file;
size_t left;
- ENTRY;
llh = loghandle->lgh_hdr;
file = loghandle->lgh_file;
int len)
{
int rc;
- ENTRY;
if (len == 0 || len & (LLOG_CHUNK_SIZE - 1))
RETURN(-EINVAL);
{
__u64 cur_offset;
int rc;
- ENTRY;
if (len == 0 || len & (LLOG_CHUNK_SIZE - 1))
RETURN(-EINVAL);
struct obd_device *obd;
int rc = 0;
- ENTRY;
-
LASSERT(ctxt);
LASSERT(ctxt->loc_exp);
LASSERT(ctxt->loc_exp->exp_obd);
int rc = 0;
int open_flags = O_RDWR | O_CREAT | O_LARGEFILE;
- ENTRY;
-
LASSERT(ctxt);
LASSERT(ctxt->loc_exp);
obd = ctxt->loc_exp->exp_obd;
{
int rc;
- ENTRY;
-
if (handle->lgh_file == NULL)
RETURN(0);
rc = filp_close(handle->lgh_file, 0);
void *th;
struct inode *inode;
int rc, rc1;
- ENTRY;
dir = MOUNT_CONFIGS_DIR;
struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
struct obd_llog_group *olg;
int rc, idx;
- ENTRY;
LASSERT(ctxt != NULL);
LASSERT(ctxt != LP_POISON);
{
struct llog_ctxt *ctxt;
int rc = 0;
- ENTRY;
if (index < 0 || index >= LLOG_MAX_CTXTS)
RETURN(-EINVAL);
int llog_sync(struct llog_ctxt *ctxt, struct obd_export *exp, int flags)
{
int rc = 0;
- ENTRY;
if (!ctxt)
RETURN(0);
struct llog_cookie *logcookies, int numcookies)
{
int raised, rc;
- ENTRY;
if (!ctxt) {
CERROR("No ctxt\n");
struct llog_cookie *cookies, int flags)
{
int rc;
- ENTRY;
if (!ctxt) {
CERROR("No ctxt\n");
struct obd_device *disk_obd, int *index)
{
int rc;
- ENTRY;
+
OBD_CHECK_DT_OP(obd, llog_init, 0);
OBD_COUNTER_INCREMENT(obd, llog_init);
int obd_llog_finish(struct obd_device *obd, int count)
{
int rc;
- ENTRY;
+
OBD_CHECK_DT_OP(obd, llog_finish, 0);
OBD_COUNTER_INCREMENT(obd, llog_finish);
struct llog_thread_info *lgi = llog_info(env);
int rc;
- ENTRY;
-
LASSERT(th);
LASSERT(off);
LASSERT(len >= LLOG_MIN_REC_SIZE && (len & 0x7) == 0);
int buflen = rec->lrh_len;
int rc;
- ENTRY;
-
LASSERT(env);
LASSERT(o);
struct llog_thread_info *lgi;
int rc;
- ENTRY;
-
LASSERT(sizeof(*handle->lgh_hdr) == LLOG_CHUNK_SIZE);
o = handle->lgh_obj;
struct dt_object *o;
int rc;
- ENTRY;
-
LASSERT(env);
LASSERT(th);
LASSERT(loghandle);
struct dt_object *o;
size_t left;
- ENTRY;
-
LASSERT(env);
llh = loghandle->lgh_hdr;
LASSERT(llh);
struct dt_device *dt;
int rc;
- ENTRY;
-
LASSERT(env);
LASSERT(lgi);
loff_t cur_offset;
int rc;
- ENTRY;
-
if (len == 0 || len & (LLOG_CHUNK_SIZE - 1))
RETURN(-EINVAL);
struct local_oid_storage *los;
int rc = 0;
- ENTRY;
-
LASSERT(env);
LASSERT(ctxt);
LASSERT(ctxt->loc_exp);
struct dt_object *o;
int rc;
- ENTRY;
-
LASSERT(res->lgh_obj);
LASSERT(th);
struct dt_object *o;
int rc = 0;
- ENTRY;
-
LASSERT(env);
o = res->lgh_obj;
LASSERT(o);
struct local_oid_storage *los;
int rc = 0;
- ENTRY;
-
LASSERT(handle->lgh_obj);
lu_object_put(env, &handle->lgh_obj->do_lu);
char *name = NULL;
int rc;
- ENTRY;
-
ctxt = loghandle->lgh_ctxt;
LASSERT(ctxt);
struct llog_ctxt *ctxt;
int rc = 0;
- ENTRY;
-
LASSERT(obd);
LASSERT(olg->olg_ctxts[ctxt_idx]);
struct thandle *th;
int rc, size;
- ENTRY;
-
LASSERT(d);
size = sizeof(*idarray) * count;
void lustre_swab_llogd_body (struct llogd_body *d)
{
- ENTRY;
print_llogd_body(d);
lustre_swab_llog_id(&d->lgd_logid);
__swab32s (&d->lgd_ctxt_idx);
void lustre_swab_llog_hdr (struct llog_log_hdr *h)
{
- ENTRY;
print_llog_hdr(h);
lustre_swab_llog_rec(&h->llh_hdr);
static void print_lustre_cfg(struct lustre_cfg *lcfg)
{
int i;
- ENTRY;
if (!(libcfs_debug & D_OTHER)) /* don't loop on nothing */
return;
void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg)
{
int i;
- ENTRY;
__swab32s(&lcfg->lcfg_version);
void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size)
{
struct cfg_marker32 *cm32 = (struct cfg_marker32*)marker;
- ENTRY;
if (swab) {
__swab32s(&marker->cm_step);
int rc;
int rc2;
- ENTRY;
-
CWARN("1a: create a log with name: %s\n", name);
ctxt = llog_get_context(obd, LLOG_TEST_ORIG_CTXT);
LASSERT(ctxt);
struct llog_logid logid;
int rc;
- ENTRY;
-
CWARN("2a: re-open a log with name: %s\n", name);
ctxt = llog_get_context(obd, LLOG_TEST_ORIG_CTXT);
LASSERT(ctxt);
int rc, i;
int num_recs = 1; /* 1 for the header */
- ENTRY;
-
lgr.lgr_hdr.lrh_len = lgr.lgr_tail.lrt_len = sizeof(lgr);
lgr.lgr_hdr.lrh_type = LLOG_GEN_REC;
char *buf;
struct llog_rec_hdr rec;
- ENTRY;
-
ctxt = llog_get_context(obd, LLOG_TEST_ORIG_CTXT);
LASSERT(ctxt);
struct llog_mini_rec lmr;
struct llog_ctxt *ctxt;
- ENTRY;
-
ctxt = llog_get_context(obd, LLOG_TEST_ORIG_CTXT);
LASSERT(ctxt);
int rc = 0, i, process_count;
int num_recs = 0;
- ENTRY;
-
rc = llog_open_create(env, ctxt, &llh, NULL, NULL);
if (rc) {
CERROR("7_sub: create log failed\n");
struct llog_ctxt *ctxt;
int rc;
- ENTRY;
-
ctxt = llog_get_context(obd, LLOG_TEST_ORIG_CTXT);
CWARN("7a: test llog_logid_rec\n");
int rc, err;
char name[10];
- ENTRY;
ctxt = llog_get_context(obd, LLOG_TEST_ORIG_CTXT);
LASSERT(ctxt);
struct lu_env env;
int rc;
- ENTRY;
-
rc = lu_env_init(&env, LCT_LOCAL | LCT_MG_THREAD);
if (rc)
RETURN(rc);
struct lu_context test_session;
int rc;
- ENTRY;
-
if (lcfg->lcfg_bufcount < 2) {
CERROR("requires a TARGET OBD name\n");
RETURN(-EINVAL);
struct lu_object *below;
struct lu_device *under;
- ENTRY;
-
ls = container_of0(o->lo_dev, struct ls_device, ls_top_dev.dd_lu_dev);
under = &ls->ls_osd->dd_lu_dev;
below = under->ld_ops->ldo_object_alloc(env, o->lo_header, under);
{
struct ls_device *ls;
- ENTRY;
-
mutex_lock(&ls_list_mutex);
ls = __ls_find_dev(dev);
if (ls)
struct dt_thread_info *dti = dt_info(env);
int rc;
- ENTRY;
-
/* update fid generation file */
if (los != NULL) {
LASSERT(dt_object_exists(los->los_obj));
obd_id lastid;
int rc;
- ENTRY;
-
rc = dt_create(env, o, attr, NULL, dof, th);
if (rc)
RETURN(rc);
struct thandle *th;
int rc;
- ENTRY;
-
rc = dt_lookup_dir(env, parent, name, &dti->dti_fid);
if (rc == -ENOENT)
RETURN(0);
__u32 first_oid = fid_oid(first_fid);
int rc = 0;
- ENTRY;
-
ls = ls_device_get(dev);
if (IS_ERR(ls))
RETURN(PTR_ERR(ls));
{
cfs_hash_t *hash = obd->obd_nid_stats_hash;
struct nid_stat *stat;
- ENTRY;
/* we need extra list - because hash_exit called to early */
/* not need locking because all clients is died */
static int lprocfs_nid_stats_clear_write_cb(void *obj, void *data)
{
struct nid_stat *stat = obj;
- ENTRY;
CDEBUG(D_INFO,"refcnt %d\n", atomic_read(&stat->nid_exp_ref_count));
if (atomic_read(&stat->nid_exp_ref_count) == 1) {
proc_dir_entry_t *entry;
char *buffer = NULL;
int rc = 0;
- ENTRY;
*newnid = 0;
void *data)
{
struct proc_dir_entry *entry;
- ENTRY;
/* Disallow secretly (un)writable entries. */
LASSERT((seq_fops->write == NULL) == ((mode & 0222) == 0));
struct list_head *layers;
int clean;
int result;
- ENTRY;
/*
* Create top-level object slice. This will also create
char name[16];
int bits;
int i;
- ENTRY;
memset(s, 0, sizeof *s);
bits = lu_htable_order();
struct portals_handle_ops *ops)
{
struct handle_bucket *bucket;
- ENTRY;
LASSERT(h != NULL);
LASSERT(list_empty(&h->h_link));
void class_handle_hash_back(struct portals_handle *h)
{
struct handle_bucket *bucket;
- ENTRY;
bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
struct handle_bucket *bucket;
struct portals_handle *h;
void *retval = NULL;
- ENTRY;
LASSERT(handle_hash != NULL);
{
struct uuid_nid_data *entry;
int found = 0;
- ENTRY;
CDEBUG(D_INFO, "check if uuid %s has %s.\n",
obd_uuid2str(uuid), libcfs_nid2str(nid));
int lustre_buf2som(void *buf, int rc, struct md_som_data *msd)
{
struct som_attrs *attrs = (struct som_attrs *)buf;
- ENTRY;
if (rc == 0 || rc == -ENODATA)
/* no SOM attributes */
int lustre_buf2hsm(void *buf, int rc, struct md_hsm *mh)
{
struct hsm_attrs *attrs = (struct hsm_attrs *)buf;
- ENTRY;
if (rc == 0 || rc == -ENODATA)
/* no HSM attributes */
void lustre_hsm2buf(void *buf, struct md_hsm *mh)
{
struct hsm_attrs *attrs = (struct hsm_attrs *)buf;
- ENTRY;
/* copy HSM attributes */
attrs->hsm_compat = mh->mh_compat;
struct obd_device *obd = NULL;
char *typename, *name, *uuid;
int rc, len;
- ENTRY;
if (!LUSTRE_CFG_BUFLEN(lcfg, 1)) {
CERROR("No type passed!\n");
{
int err = 0;
struct obd_export *exp;
- ENTRY;
LASSERT(obd != NULL);
LASSERTF(obd == class_num2obd(obd->obd_minor),
*/
int class_detach(struct obd_device *obd, struct lustre_cfg *lcfg)
{
- ENTRY;
-
if (obd->obd_set_up) {
CERROR("OBD device %d still set up\n", obd->obd_minor);
RETURN(-EBUSY);
{
int err = 0;
char *flag;
- ENTRY;
OBD_RACE(OBD_FAIL_LDLM_RECOV_CLIENTS);
struct obd_import *imp;
struct obd_uuid uuid;
int rc;
- ENTRY;
if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1 ||
LUSTRE_CFG_BUFLEN(lcfg, 1) > sizeof(struct obd_uuid)) {
struct obd_import *imp;
struct obd_uuid uuid;
int rc;
- ENTRY;
if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1 ||
LUSTRE_CFG_BUFLEN(lcfg, 1) > sizeof(struct obd_uuid)) {
{
struct lustre_profile *lprof;
- ENTRY;
list_for_each_entry(lprof, &lustre_profile_list, lp_list) {
if (!strcmp(lprof->lp_profile, prof)) {
RETURN(lprof);
{
struct lustre_profile *lprof;
int err = 0;
- ENTRY;
CDEBUG(D_CONFIG, "Add profile %s\n", prof);
void class_del_profile(const char *prof)
{
struct lustre_profile *lprof;
- ENTRY;
CDEBUG(D_CONFIG, "Del profile %s\n", prof);
void class_del_profiles(void)
{
struct lustre_profile *lprof, *n;
- ENTRY;
list_for_each_entry_safe(lprof, n, &lustre_profile_list, lp_list) {
list_del(&lprof->lp_list);
static int class_set_global(char *ptr, int val, struct lustre_cfg *lcfg)
{
- ENTRY;
if (class_match_param(ptr, PARAM_AT_MIN, NULL) == 0)
at_min = val;
else if (class_match_param(ptr, PARAM_AT_MAX, NULL) == 0)
char *value = NULL;
int name_len = 0;
int new_len = 0;
- ENTRY;
if (cfg == NULL || new_name == NULL)
RETURN(ERR_PTR(-EINVAL));
int matched = 0, j = 0;
int rc = 0;
int skip = 0;
- ENTRY;
if (lcfg->lcfg_command != LCFG_PARAM) {
CERROR("Unknown command: %d\n", lcfg->lcfg_command);
int cfg_len = rec->lrh_len;
char *cfg_buf = (char*) (rec + 1);
int rc = 0;
- ENTRY;
//class_config_dump_handler(handle, rec, data);
struct llog_handle *llh;
llog_cb_t callback;
int rc;
- ENTRY;
CDEBUG(D_INFO, "looking up llog %s\n", name);
rc = llog_open(env, ctxt, &llh, NULL, name, LLOG_OPEN_EXISTS);
char *end = buf + size;
int rc = 0;
- ENTRY;
-
LASSERT(rec->lrh_type == OBD_CFG_REC);
rc = lustre_cfg_sanity_check(lcfg, rec->lrh_len);
if (rc < 0)
char *outstr;
int rc = 0;
- ENTRY;
-
OBD_ALLOC(outstr, 256);
if (outstr == NULL)
RETURN(-ENOMEM);
struct llog_handle *llh;
int rc;
- ENTRY;
-
LCONSOLE_INFO("Dumping config log %s\n", name);
rc = llog_open(env, ctxt, &llh, NULL, name, LLOG_OPEN_EXISTS);
struct lustre_cfg *lcfg;
struct lustre_cfg_bufs bufs;
int rc;
- ENTRY;
if (!obd) {
CERROR("empty cleanup\n");
struct lustre_sb_info *lsi = s2lsi(sb);
struct obd_device *mgc = lsi->lsi_mgc;
int rc;
- ENTRY;
LASSERT(mgc);
LASSERT(cfg);
struct lustre_sb_info *lsi = s2lsi(sb);
struct obd_device *mgc = lsi->lsi_mgc;
int rc;
- ENTRY;
if (!mgc)
RETURN(-ENOENT);
char *ptr;
int recov_bk;
int rc = 0, i = 0, j, len;
- ENTRY;
LASSERT(lsi->lsi_lmd);
struct obd_device *obd;
char *niduuid = 0, *ptr = 0;
int i, rc = 0, len = 0;
- ENTRY;
if (!lsi)
RETURN(-ENOENT);
struct lustre_sb_info *lustre_init_lsi(struct super_block *sb)
{
struct lustre_sb_info *lsi;
- ENTRY;
OBD_ALLOC_PTR(lsi);
if (!lsi)
static int lustre_free_lsi(struct super_block *sb)
{
struct lustre_sb_info *lsi = s2lsi(sb);
- ENTRY;
LASSERT(lsi != NULL);
CDEBUG(D_MOUNT, "Freeing lsi %p\n", lsi);
int lustre_put_lsi(struct super_block *sb)
{
struct lustre_sb_info *lsi = s2lsi(sb);
- ENTRY;
LASSERT(lsi != NULL);
int lustre_common_put_super(struct super_block *sb)
{
int rc;
- ENTRY;
CDEBUG(D_MOUNT, "dropping sb %p\n", sb);
struct lustre_mount_data *lmd = lsi->lsi_lmd;
__u32 index;
int i, rc;
- ENTRY;
rc = server_name2index(svname, &index, NULL);
if (rc != LDD_F_SV_TYPE_OST)
const char *s1 = ptr, *s2;
__u32 index, *exclude_list;
int rc = 0, devmax;
- ENTRY;
/* The shortest an ost name can be is 8 chars: -OST0000.
We don't actually know the fsname at this time, so in fact
char *s1, *s2, *devname = NULL;
struct lustre_mount_data *raw = (struct lustre_mount_data *)options;
int rc = 0;
- ENTRY;
LASSERT(lmd);
if (!options) {
struct lustre_mount_data2 *lmd2 = data;
struct lustre_sb_info *lsi;
int rc;
- ENTRY;
CDEBUG(D_MOUNT|D_VFSTRACE, "VFS Op: sb %p\n", sb);
static int echo_destroy_export(struct obd_export *exp)
{
- ENTRY;
-
target_destroy_export(exp);
ldlm_destroy_export(exp);
{
struct obd_device *obd = class_exp2obd(exp);
- ENTRY;
if (!obd) {
CERROR("invalid client cookie "LPX64"\n",
exp->exp_handle.h_cookie);
struct obd_device *obd = class_exp2obd(exp);
obd_id id = ostid_id(&oinfo->oi_oa->o_oi);
- ENTRY;
if (!obd) {
CERROR("invalid client cookie "LPX64"\n",
exp->exp_handle.h_cookie);
{
struct obd_device *obd = class_exp2obd(exp);
- ENTRY;
if (!obd) {
CERROR("invalid client cookie "LPX64"\n",
exp->exp_handle.h_cookie);
int tot_bytes = 0;
int rc = 0;
int i, left;
- ENTRY;
obd = export->exp_obd;
if (obd == NULL)
struct obd_device *obd;
int pgs = 0;
int i;
- ENTRY;
obd = export->exp_obd;
if (obd == NULL)
__u64 lock_flags = 0;
struct ldlm_res_id res_id = {.name = {1}};
char ns_name[48];
- ENTRY;
obd->u.echo.eo_obt.obt_magic = OBT_MAGIC;
spin_lock_init(&obd->u.echo.eo_lock);
static int echo_cleanup(struct obd_device *obd)
{
int leaked;
- ENTRY;
lprocfs_obd_cleanup(obd);
lprocfs_free_obd_stats(obd);
struct echo_page *ep = cl2echo_page(slice);
struct echo_object *eco = cl2echo_obj(slice->cpl_obj);
struct page *vmpage = ep->ep_vmpage;
- ENTRY;
atomic_dec(&eco->eo_npages);
page_cache_release(vmpage);
{
struct echo_page *ep = cl_object_page_slice(obj, page);
struct echo_object *eco = cl2echo_obj(obj);
- ENTRY;
ep->ep_vmpage = vmpage;
page_cache_get(vmpage);
const struct cl_io *unused)
{
struct echo_lock *el;
- ENTRY;
OBD_SLAB_ALLOC_PTR_GFP(el, echo_lock_kmem, __GFP_IO);
if (el != NULL) {
struct echo_device *ed = cl2echo_dev(lu2cl_dev(obj->lo_dev));
struct echo_client_obd *ec = ed->ed_ec;
struct echo_object *eco = cl2echo_obj(lu2cl(obj));
- ENTRY;
if (ed->ed_next) {
struct lu_object *below;
{
int lsm_size;
- ENTRY;
-
/* If export is lov/osc then use their obd method */
if (ed->ed_next != NULL)
return obd_alloc_memmd(ed->ed_ec->ec_exp, lsmp);
{
int lsm_size;
- ENTRY;
-
/* If export is lov/osc then use their obd method */
if (ed->ed_next != NULL)
return obd_free_memmd(ed->ed_ec->ec_exp, lsmp);
{
struct echo_object *eco = cl2echo_obj(lu2cl(obj));
struct echo_client_obd *ec = eco->eo_dev->ed_ec;
- ENTRY;
LASSERT(atomic_read(&eco->eo_npages) == 0);
{
struct echo_object *eco;
struct lu_object *obj = NULL;
- ENTRY;
/* we're the top dev. */
LASSERT(hdr == NULL);
{
char *prefix;
int rc;
- ENTRY;
OBD_ALLOC_PTR(ed->ed_cl_seq);
if (ed->ed_cl_seq == NULL)
static int echo_fid_fini(struct obd_device *obddev)
{
struct echo_device *ed = obd2echo_dev(obddev);
- ENTRY;
if (ed->ed_cl_seq != NULL) {
seq_client_fini(ed->ed_cl_seq);
const char *tgt_type_name;
int rc;
int cleanup = 0;
- ENTRY;
OBD_ALLOC_PTR(ed);
if (ed == NULL)
struct lu_fid *fid;
int refcheck;
int rc;
- ENTRY;
LASSERT(lsmp);
lsm = *lsmp;
struct lu_env *env;
struct cl_object *obj = echo_obj2cl(eco);
int refcheck;
- ENTRY;
env = cl_env_get(&refcheck);
if (IS_ERR(env))
struct cl_lock_descr *descr;
struct echo_thread_info *info;
int rc = -ENOMEM;
- ENTRY;
info = echo_env_info(env);
io = &info->eti_io;
struct cl_io *io;
int refcheck;
int result;
- ENTRY;
env = cl_env_get(&refcheck);
if (IS_ERR(env))
struct echo_lock *ecl = NULL;
struct list_head *el;
int found = 0, still_used = 0;
- ENTRY;
LASSERT(ec != NULL);
spin_lock(&ec->ec_lock);
struct lu_env *env;
int refcheck;
int rc;
- ENTRY;
env = cl_env_get(&refcheck);
if (IS_ERR(env))
struct cl_page *clp;
struct cl_page *temp;
int result = 0;
- ENTRY;
cl_page_list_for_each_safe(clp, temp, &queue->c2_qin) {
int rc;
int refcheck;
int rc;
int i;
- ENTRY;
LASSERT((offset & ~CFS_PAGE_MASK) == 0);
LASSERT(ed->ed_next != NULL);
struct echo_thread_info *info = echo_env_info(env);
int rc;
- ENTRY;
-
LASSERT(ma->ma_lmm_size > 0);
rc = mo_xattr_get(env, o, &LU_BUF_NULL, XATTR_NAME_LOV);
int need = ma->ma_need;
int rc = 0, rc2;
- ENTRY;
-
ma->ma_valid = 0;
if (need & MA_INODE) {
struct lu_object_conf conf = { .loc_flags = LOC_F_NEW };
int rc;
- ENTRY;
-
rc = mdo_lookup(env, parent, lname, fid2, spec);
if (rc == 0)
return -EEXIST;
int rc = 0;
int i;
- ENTRY;
-
if (ec_parent == NULL)
return -1;
parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
struct lu_fid *fid = &info->eti_fid;
struct lu_object *child;
int rc;
- ENTRY;
CDEBUG(D_INFO, "lookup %s in parent "DFID" %p\n", lname->ln_name,
PFID(fid), parent);
int rc = 0;
int i;
- ENTRY;
-
if (ec_parent == NULL)
return -1;
parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
int rc = 0;
int i;
- ENTRY;
-
if (ec_parent == NULL)
return -1;
parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
struct lu_object *child;
int rc;
- ENTRY;
-
ec_child = echo_md_lookup(env, ed, parent, lname);
if (IS_ERR(ec_child)) {
CERROR("Can't find child %s: rc = %ld\n", lname->ln_name,
struct lu_object *parent;
int rc = 0;
int i;
- ENTRY;
parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
if (parent == NULL)
struct lu_object *parent = NULL;
struct lu_object *child = NULL;
int rc = 0;
- ENTRY;
/*Only support MDD layer right now*/
rc = md->md_ops->mdo_root_get(env, md, fid);
char *name = NULL;
int namelen = data->ioc_plen2;
int rc = 0;
- ENTRY;
if (ld == NULL) {
CERROR("MD echo client is not being initialized properly\n");
struct lov_stripe_md *lsm = NULL;
int rc;
int created = 0;
- ENTRY;
if ((oa->o_valid & OBD_MD_FLID) == 0 && /* no obj id */
(on_target || /* set_stripe */
struct lov_stripe_md *lsm = NULL;
struct echo_object *eco;
int rc;
- ENTRY;
if ((oa->o_valid & OBD_MD_FLID) == 0 || ostid_id(&oa->o_oi) == 0) {
/* disallow use of object id 0 */
int verify;
int gfp_mask;
int brw_flags = 0;
- ENTRY;
verify = (ostid_id(&oa->o_oi) != ECHO_PERSISTENT_OBJID &&
(oa->o_valid & OBD_MD_FLFLAGS) != 0 &&
obd_size npages, tot_pages;
int i, ret = 0, brw_flags = 0;
- ENTRY;
-
if (count <= 0 || (count & (~CFS_PAGE_MASK)) != 0 ||
(lsm != NULL && ostid_id(&lsm->lsm_oi) != ostid_id(&oa->o_oi)))
RETURN(-EINVAL);
int rc;
int async = 1;
long test_mode;
- ENTRY;
LASSERT(oa->o_valid & OBD_MD_FLGROUP);
struct echo_object *eco;
obd_off end;
int rc;
- ENTRY;
if (ed->ed_next == NULL)
RETURN(-EOPNOTSUPP);
int rw = OBD_BRW_READ;
int rc = 0;
int i;
- ENTRY;
memset(&dummy_oti, 0, sizeof(dummy_oti));
struct obd_uuid echo_uuid = { "ECHO_UUID" };
struct obd_connect_data *ocd = NULL;
int rc;
- ENTRY;
if (lcfg->lcfg_bufcount < 2 || LUSTRE_CFG_BUFLEN(lcfg, 1) < 1) {
CERROR("requires a TARGET OBD name\n");
struct echo_device *ed = obd2echo_dev(obddev);
struct echo_client_obd *ec = &obddev->u.echo_client;
int rc;
- ENTRY;
/*Do nothing for Metadata echo client*/
if (ed == NULL )
int rc;
struct lustre_handle conn = { 0 };
- ENTRY;
rc = class_connect(&conn, src, cluuid);
if (rc == 0) {
*exp = class_conn2export(&conn);
struct ec_lock *ecl;
#endif
int rc;
- ENTRY;
if (exp == NULL)
GOTO(out, rc = -EINVAL);
struct lprocfs_static_vars lvars;
int rc;
- ENTRY;
LCONSOLE_INFO("Echo OBD driver; http://www.lustre.org/\n");
LASSERT(PAGE_CACHE_SIZE % OBD_ECHO_BLOCK_SIZE == 0);
{
struct osc_object *obj = ext->oe_obj;
int rc = 0;
- ENTRY;
LASSERT(atomic_read(&ext->oe_users) > 0);
LASSERT(sanity_check(ext) == 0);
int ppc_bits; /* pages per chunk bits */
int chunk_mask;
int rc;
- ENTRY;
cur = osc_extent_alloc(obj);
if (cur == NULL)
int blocksize = cli->cl_import->imp_obd->obd_osfs.os_bsize ? : 4096;
__u64 last_off = 0;
int last_count = -1;
- ENTRY;
OSC_EXTENT_DUMP(D_CACHE, ext, "extent finished.\n");
struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(600), NULL,
LWI_ON_SIGNAL_NOOP, NULL);
int rc = 0;
- ENTRY;
osc_object_lock(obj);
LASSERT(sanity_check_nolock(ext) == 0);
int grants = 0;
int nr_pages = 0;
int rc = 0;
- ENTRY;
LASSERT(sanity_check(ext) == 0);
LASSERT(ext->oe_state == OES_TRUNC);
struct osc_object *obj = ext->oe_obj;
int page_count = 0;
int rc;
- ENTRY;
/* we're going to grab page lock, so object lock must not be taken. */
LASSERT(sanity_check(ext) == 0);
pgoff_t end_index;
int chunksize = 1 << cli->cl_chunkbits;
int rc = 0;
- ENTRY;
LASSERT(ext->oe_max_end >= index && ext->oe_start <= index);
osc_object_lock(obj);
LASSERT(cmd == OBD_BRW_WRITE); /* no cached reads */
- ENTRY;
result = cl_page_make_ready(env, page, CRT_WRITE);
if (result == 0)
opg->ops_submit_time = cfs_time_current();
enum cl_req_type crt;
int srvlock;
- ENTRY;
-
cmd &= ~OBD_BRW_NOQUOTA;
LASSERT(equi(page->cp_state == CPS_PAGEIN, cmd == OBD_BRW_READ));
LASSERT(equi(page->cp_state == CPS_PAGEOUT, cmd == OBD_BRW_WRITE));
static void osc_release_write_grant(struct client_obd *cli,
struct brw_page *pga)
{
- ENTRY;
-
LASSERT(spin_is_locked(&cli->cl_loi_list_lock.lock));
if (!(pga->flag & OBD_BRW_FROM_GRANT)) {
EXIT;
struct osc_cache_waiter ocw;
struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
int rc = -EDQUOT;
- ENTRY;
OSC_DUMP_GRANT(cli, "need:%d.\n", bytes);
struct list_head *l, *tmp;
struct osc_cache_waiter *ocw;
- ENTRY;
list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
ocw = list_entry(l, struct osc_cache_waiter, ocw_entry);
list_del_init(&ocw->ocw_entry);
int cmd)
{
int invalid_import = 0;
- ENTRY;
/* if we have an invalid import we want to drain the queued pages
* by forcing them through rpcs that immediately fail and complete
struct lov_oinfo *loi = osc->oo_oinfo;
__u64 xid = 0;
- ENTRY;
if (oap->oap_request != NULL) {
xid = ptlrpc_req_xid(oap->oap_request);
ptlrpc_req_finished(oap->oap_request);
int *pc, unsigned int *max_pages)
{
struct osc_extent *tmp;
- ENTRY;
EASSERT((ext->oe_state == OES_CACHE || ext->oe_state == OES_LOCK_DONE),
ext);
obd_count page_count = 0;
int srvlock = 0;
int rc = 0;
- ENTRY;
LASSERT(osc_object_is_locked(osc));
int page_count = 0;
unsigned int max_pages = cli->cl_max_pages_per_rpc;
int rc = 0;
- ENTRY;
LASSERT(osc_object_is_locked(osc));
list_for_each_entry_safe(ext, next,
* we could be sending. These lists are maintained by osc_makes_rpc(). */
static struct osc_object *osc_next_obj(struct client_obd *cli)
{
- ENTRY;
-
/* First return objects that have blocked locks so that they
* will be flushed quickly and other clients can get the lock,
* then objects which have pages ready to be stuffed into RPCs */
{
struct osc_object *osc;
int rc = 0;
- ENTRY;
while ((osc = osc_next_obj(cli)) != NULL) {
struct cl_object *obj = osc2cl(osc);
{
struct obd_export *exp = osc_export(osc);
struct osc_async_page *oap = &ops->ops_oap;
- ENTRY;
if (!page)
return cfs_size_round(sizeof(*oap));
int cmd = OBD_BRW_WRITE;
int need_release = 0;
int rc = 0;
- ENTRY;
if (oap->oap_magic != OAP_MAGIC)
RETURN(-EINVAL);
struct osc_async_page *oap = &ops->ops_oap;
struct osc_extent *ext = NULL;
int rc = 0;
- ENTRY;
LASSERT(oap->oap_magic == OAP_MAGIC);
struct osc_async_page *oap = &ops->ops_oap;
bool unplug = false;
int rc = 0;
- ENTRY;
osc_object_lock(obj);
ext = osc_extent_lookup(obj, index);
pgoff_t index = oap2cl_page(oap)->cp_index;
int rc = -EBUSY;
int cmd;
- ENTRY;
LASSERT(!oap->oap_interrupted);
oap->oap_interrupted = 1;
int mppr = cli->cl_max_pages_per_rpc;
pgoff_t start = CL_PAGE_EOF;
pgoff_t end = 0;
- ENTRY;
list_for_each_entry(oap, list, oap_pending_item) {
struct cl_page *cp = oap2cl_page(oap);
LIST_HEAD(list);
int result = 0;
bool partial;
- ENTRY;
/* pages with index greater or equal to index will be truncated. */
index = cl_index(osc2cl(obj), size);
struct osc_extent *ext;
pgoff_t index = start;
int result = 0;
- ENTRY;
again:
osc_object_lock(obj);
LIST_HEAD(discard_list);
bool unplug = false;
int result = 0;
- ENTRY;
osc_object_lock(obj);
ext = osc_extent_search(obj, start);
static int osc_cl_process_config(const struct lu_env *env,
struct lu_device *d, struct lustre_cfg *cfg)
{
- ENTRY;
RETURN(osc_process_config_base(d->ld_obd, cfg));
}
struct obd_import *imp = class_exp2cliimp(dev->od_exp);
struct osc_io *oio = cl2osc_io(env, ios);
int result = 0;
- ENTRY;
/*
* This implements OBD_BRW_CHECK logic from old client.
struct osc_page *opg = cl2osc_page(slice);
struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
struct osc_async_page *oap = &opg->ops_oap;
- ENTRY;
LASSERT(to > 0);
/*
struct cl_io *io;
struct cl_fault_io *fio;
- ENTRY;
-
io = ios->cis_io;
fio = &io->u.ci_fault;
CDEBUG(D_INFO, "%lu %d %d\n",
struct cl_object *obj = slice->cis_obj;
struct cl_attr *attr = &osc_env_info(env)->oti_attr;
int result = 0;
- ENTRY;
if (oio->oi_lockless == 0) {
cl_object_attr_lock(obj);
struct cl_object *obj = slice->cis_obj;
struct cl_attr *attr = &osc_env_info(env)->oti_attr;
int result = 0;
- ENTRY;
if (oio->oi_lockless == 0) {
OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_SETTIME, 1);
struct lov_oinfo *loi = obj->oo_oinfo;
struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
int rc = 0;
- ENTRY;
memset(oa, 0, sizeof(*oa));
oa->o_oi = loi->loi_oi;
pgoff_t start = cl_index(obj, fio->fi_start);
pgoff_t end = cl_index(obj, fio->fi_end);
int result = 0;
- ENTRY;
if (fio->fi_end == OBD_OBJECT_EOF)
end = CL_PAGE_EOF;
struct cl_attr *attr;
unsigned valid;
- ENTRY;
-
if (!(olck->ols_flags & LDLM_FL_LVB_READY))
RETURN_EXIT;
LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
- ENTRY;
if (olck->ols_state < OLS_GRANTED) {
lock = olck->ols_cl.cls_lock;
ext = &dlmlock->l_policy_data.l_extent;
{
struct ldlm_lock *dlmlock;
- ENTRY;
-
dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0);
LASSERT(dlmlock != NULL);
struct lu_env *env;
struct cl_env_nest nest;
- ENTRY;
env = cl_env_nested_get(&nest);
if (!IS_ERR(env)) {
int rc;
struct cl_lock *conflict= NULL;
int lockless = osc_lock_is_lockless(olck);
int rc = 0;
- ENTRY;
LASSERT(cl_lock_is_mutexed(lock));
struct osc_lock *ols = cl2osc_lock(slice);
struct cl_lock *lock = ols->ols_cl.cls_lock;
int result;
- ENTRY;
LASSERT(cl_lock_is_mutexed(lock));
LASSERTF(ols->ols_state == OLS_NEW,
struct cl_env_nest nest;
struct lu_env *env;
int result = 0;
- ENTRY;
env = cl_env_nested_get(&nest);
if (!IS_ERR(env)) {
{
struct lov_oinfo *oinfo = cl2osc(obj)->oo_oinfo;
- ENTRY;
lvb->lvb_size = oinfo->loi_kms;
lvb->lvb_blocks = oinfo->loi_lvb.lvb_blocks;
RETURN(0);
struct osc_io *oio = osc_env_io(env);
struct osc_page *opg = cl2osc_page(slice);
int result;
- ENTRY;
LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 0));
struct cl_lock *lock;
int result = -ENODATA;
- ENTRY;
lock = cl_lock_at_page(env, slice->cpl_obj, slice->cpl_page,
NULL, 1, 0);
if (lock != NULL) {
LINVRNT(opg->ops_temp || osc_page_protected(env, opg, CLM_READ, 1));
- ENTRY;
CDEBUG(D_TRACE, "%p\n", opg);
osc_page_transfer_put(env, opg);
rc = osc_teardown_async_page(env, obj, opg);
{
struct osc_page *opg = cl2osc_page(slice);
int rc = 0;
- ENTRY;
+
rc = osc_flush_async_page(env, io, opg);
RETURN(rc);
}
int count = 0;
int index = 0;
int rc = 0;
- ENTRY;
LASSERT(atomic_read(&cli->cl_lru_in_list) >= 0);
if (atomic_read(&cli->cl_lru_in_list) == 0 || target <= 0)
struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
struct client_obd *cli = osc_cli(obj);
int rc = 0;
- ENTRY;
if (cli->cl_cache == NULL) /* shall not be in LRU */
RETURN(0);
int osc_quota_chkdq(struct client_obd *cli, const unsigned int qid[])
{
int type;
- ENTRY;
for (type = 0; type < MAXQUOTAS; type++) {
struct osc_quota_info *oqi;
{
int type;
int rc = 0;
- ENTRY;
if ((valid & (OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA)) == 0)
RETURN(0);
{
struct client_obd *cli = &obd->u.cli;
int i, type;
- ENTRY;
for (type = 0; type < MAXQUOTAS; type++) {
cli->cl_quota_hash[type] = cfs_hash_create("QUOTA_HASH",
{
struct client_obd *cli = &obd->u.cli;
int type;
- ENTRY;
for (type = 0; type < MAXQUOTAS; type++)
cfs_hash_putref(cli->cl_quota_hash[type]);
struct ptlrpc_request *req;
struct obd_quotactl *oqc;
int rc;
- ENTRY;
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
&RQF_OST_QUOTACTL, LUSTRE_OST_VERSION,
struct ptlrpc_request *req;
struct obd_quotactl *body;
int rc;
- ENTRY;
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
&RQF_OST_QUOTACHECK, LUSTRE_OST_VERSION,
{
struct client_obd *cli = &exp->exp_obd->u.cli;
int rc;
- ENTRY;
qchk->obd_uuid = cli->cl_target_uuid;
memcpy(qchk->obd_type, LUSTRE_OST_NAME, strlen(LUSTRE_OST_NAME));
struct lov_stripe_md *lsm)
{
int lmm_size;
- ENTRY;
lmm_size = sizeof(**lmmp);
if (lmmp == NULL)
{
int lsm_size;
struct obd_import *imp = class_exp2cliimp(exp);
- ENTRY;
if (lmm != NULL) {
if (lmm_bytes < sizeof(*lmm)) {
struct osc_async_args *aa, int rc)
{
struct ost_body *body;
- ENTRY;
if (rc != 0)
GOTO(out, rc);
struct ptlrpc_request *req;
struct osc_async_args *aa;
int rc;
- ENTRY;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
if (req == NULL)
struct ptlrpc_request *req;
struct ost_body *body;
int rc;
- ENTRY;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
if (req == NULL)
struct ptlrpc_request *req;
struct ost_body *body;
int rc;
- ENTRY;
LASSERT(oinfo->oi_oa->o_valid & OBD_MD_FLGROUP);
struct osc_setattr_args *sa, int rc)
{
struct ost_body *body;
- ENTRY;
if (rc != 0)
GOTO(out, rc);
struct ptlrpc_request *req;
struct osc_setattr_args *sa;
int rc;
- ENTRY;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
if (req == NULL)
struct ost_body *body;
struct lov_stripe_md *lsm;
int rc;
- ENTRY;
LASSERT(oa);
LASSERT(ea);
struct osc_setattr_args *sa;
struct ost_body *body;
int rc;
- ENTRY;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_PUNCH);
if (req == NULL)
{
struct osc_fsync_args *fa = arg;
struct ost_body *body;
- ENTRY;
if (rc)
GOTO(out, rc);
struct ost_body *body;
struct osc_fsync_args *fa;
int rc;
- ENTRY;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC);
if (req == NULL)
struct obd_info *oinfo, obd_size start, obd_size end,
struct ptlrpc_request_set *set)
{
- ENTRY;
-
if (!oinfo->oi_oa) {
CDEBUG(D_INFO, "oa NULL\n");
RETURN(-EINVAL);
struct ldlm_res_id res_id;
struct ldlm_resource *res;
int count;
- ENTRY;
/* Return, i.e. cancel nothing, only if ELC is supported (flag in
* export) but disabled through procfs (flag in NS).
struct obd_trans_info *oti)
{
int rc = 0;
- ENTRY;
LASSERT(oa);
LASSERT(ea);
struct ost_body *body;
LIST_HEAD(cancels);
int rc, count;
- ENTRY;
if (!oa) {
CDEBUG(D_INFO, "oa NULL\n");
{
int rc = 0;
struct ost_body *body;
- ENTRY;
client_obd_list_lock(&cli->cl_loi_list_lock);
/* Don't shrink if we are already above or below the desired limit
struct req_capsule *pill;
struct brw_page *pg_prev;
- ENTRY;
if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
RETURN(-ENOMEM); /* Recoverable */
if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
struct client_obd *cli = aa->aa_cli;
struct ost_body *body;
__u32 client_cksum = 0;
- ENTRY;
if (rc < 0 && rc != -EDQUOT) {
DEBUG_REQ(D_INFO, req, "Failed request with rc = %d\n", rc);
int generation, resends = 0;
struct l_wait_info lwi;
- ENTRY;
-
init_waitqueue_head(&waitq);
generation = exp->exp_obd->u.cli.cl_import->imp_generation;
struct ptlrpc_request *new_req;
struct osc_brw_async_args *new_aa;
struct osc_async_page *oap;
- ENTRY;
DEBUG_REQ(rc == -EINPROGRESS ? D_RPCTRACE : D_ERROR, request,
"redo for recoverable error %d", rc);
struct obd_import *imp = class_exp2cliimp(exp);
struct client_obd *cli;
int rc, page_count_orig;
- ENTRY;
LASSERT((imp != NULL) && (imp->imp_obd != NULL));
cli = &imp->imp_obd->u.cli;
struct osc_extent *tmp;
struct cl_object *obj = NULL;
struct client_obd *cli = aa->aa_cli;
- ENTRY;
rc = osc_brw_fini_request(req, rc);
CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
int rc;
LIST_HEAD(rpc_list);
- ENTRY;
LASSERT(!list_empty(ext_list));
/* add pages into rpc_list to build BRW rpc */
__u64 *flags, int agl, int rc)
{
int intent = *flags & LDLM_FL_HAS_INTENT;
- ENTRY;
if (intent) {
/* The request was created before ldlm_cli_enqueue call. */
int match_lvb = (agl != 0 ? 0 : LDLM_FL_LVB_READY);
ldlm_mode_t mode;
int rc;
- ENTRY;
/* Filesystem lock extents are extended to page boundaries so that
* dealing with the page cache is a little smoother. */
{
struct ldlm_res_id res_id;
int rc;
- ENTRY;
ostid_build_res_name(&oinfo->oi_md->lsm_oi, &res_id);
rc = osc_enqueue_base(exp, &res_id, &oinfo->oi_flags, &oinfo->oi_policy,
struct obd_device *obd = exp->exp_obd;
int lflags = *flags;
ldlm_mode_t rc;
- ENTRY;
if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH))
RETURN(-EIO);
int osc_cancel_base(struct lustre_handle *lockh, __u32 mode)
{
- ENTRY;
-
if (unlikely(mode == LCK_GROUP))
ldlm_lock_decref_and_cancel(lockh, mode);
else
static int osc_cancel(struct obd_export *exp, struct lov_stripe_md *md,
__u32 mode, struct lustre_handle *lockh)
{
- ENTRY;
RETURN(osc_cancel_base(lockh, mode));
}
struct osc_async_args *aa, int rc)
{
struct obd_statfs *msfs;
- ENTRY;
if (rc == -EBADR)
/* The request has in fact never been sent
struct ptlrpc_request *req;
struct osc_async_args *aa;
int rc;
- ENTRY;
/* We could possibly pass max_age in the request (as an absolute
* timestamp or a "seconds.usec ago") so the target can avoid doing
struct ptlrpc_request *req;
struct obd_import *imp = NULL;
int rc;
- ENTRY;
/*Since the request might also come from lprocfs, so we need
*sync this with client_disconnect_export Bug15684*/
struct lov_user_md_v3 lum, *lumk;
struct lov_user_ost_data_v1 *lmm_objects;
int rc = 0, lum_size;
- ENTRY;
if (!lsm)
RETURN(-ENODATA);
struct obd_device *obd = exp->exp_obd;
struct obd_ioctl_data *data = karg;
int err = 0;
- ENTRY;
if (!try_module_get(THIS_MODULE)) {
CERROR("Can't get module. Is it alive?");
obd_count keylen, void *key, __u32 *vallen, void *val,
struct lov_stripe_md *lsm)
{
- ENTRY;
if (!vallen || !val)
RETURN(-EFAULT);
struct obd_import *imp = class_exp2cliimp(exp);
char *tmp;
int rc;
- ENTRY;
OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_SHUTDOWN, 10);
{
struct llog_ctxt *ctxt;
- ENTRY;
-
ctxt = llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
if (ctxt) {
llog_cat_close(NULL, ctxt->loc_handle);
struct client_obd *cli;
int rc = 0;
- ENTRY;
LASSERT(imp->imp_obd == obd);
switch (event) {
struct client_obd *cli = &obd->u.cli;
void *handler;
int rc;
- ENTRY;
rc = ptlrpcd_addref();
if (rc)
static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
{
int rc = 0;
- ENTRY;
switch (stage) {
case OBD_CLEANUP_EARLY: {
struct client_obd *cli = &obd->u.cli;
int rc;
- ENTRY;
-
/* lru cleanup */
if (cli->cl_cache != NULL) {
LASSERT(atomic_read(&cli->cl_cache->ccc_users) > 0);
{
struct lprocfs_static_vars lvars = { 0 };
int rc;
- ENTRY;
/* print an address of _any_ initialized kernel symbol from this
* module, to allow debugging with gdb that doesn't support data
struct obd_import *imp = req->rq_import;
struct ptlrpc_bulk_desc *desc;
- ENTRY;
LASSERT(type == BULK_PUT_SINK || type == BULK_GET_SOURCE);
desc = ptlrpc_new_bulk(npages, max_brw, type, portal);
if (desc == NULL)
void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin)
{
int i;
- ENTRY;
LASSERT(desc != NULL);
LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */
struct ptlrpc_request *early_req;
time_t olddl;
int rc;
- ENTRY;
req->rq_early = 0;
spin_unlock(&req->rq_lock);
{
struct obd_import *imp = request->rq_import;
int rc;
- ENTRY;
if (unlikely(ctx))
request->rq_cli_ctx = sptlrpc_cli_ctx_get(ctx);
{
struct ptlrpc_request_set *set;
- ENTRY;
OBD_ALLOC(set, sizeof *set);
if (!set)
RETURN(NULL);
struct list_head *next;
int expected_phase;
int n = 0;
- ENTRY;
/* Requests on the set should either all be completed, or all be new */
expected_phase = (atomic_read(&set->set_remaining) == 0) ?
struct ptlrpc_request *req, int *status)
{
int delay = 0;
- ENTRY;
LASSERT (status != NULL);
*status = 0;
static int ptlrpc_check_status(struct ptlrpc_request *req)
{
int err;
- ENTRY;
err = lustre_msg_get_status(req->rq_repmsg);
if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR) {
struct lustre_msg *repmsg = req->rq_repmsg;
struct lustre_msg *reqmsg = req->rq_reqmsg;
__u64 *versions = lustre_msg_get_versions(repmsg);
- ENTRY;
if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
return;
int rc;
struct timeval work_start;
long timediff;
- ENTRY;
LASSERT(obd != NULL);
/* repbuf must be unlinked */
{
struct obd_import *imp = req->rq_import;
int rc;
- ENTRY;
LASSERT(req->rq_phase == RQ_PHASE_NEW);
if (req->rq_sent && (req->rq_sent > cfs_time_current_sec()) &&
static inline int ptlrpc_set_producer(struct ptlrpc_request_set *set)
{
int remaining, rc;
- ENTRY;
LASSERT(set->set_producer != NULL);
{
struct list_head *tmp, *next;
int force_timer_recalc = 0;
- ENTRY;
if (atomic_read(&set->set_remaining) == 0)
RETURN(1);
{
struct obd_import *imp = req->rq_import;
int rc = 0;
- ENTRY;
spin_lock(&req->rq_lock);
req->rq_timedout = 1;
struct ptlrpc_request_set *set = data;
struct list_head *tmp;
time_t now = cfs_time_current_sec();
- ENTRY;
LASSERT(set != NULL);
int timeout = 0;
struct ptlrpc_request *req;
int deadline;
- ENTRY;
SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
struct ptlrpc_request *req;
struct l_wait_info lwi;
int rc, timeout;
- ENTRY;
if (set->set_producer)
(void)ptlrpc_set_producer(set);
*/
static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
{
- ENTRY;
if (request == NULL) {
EXIT;
return;
*/
static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked)
{
- ENTRY;
if (request == NULL)
RETURN(1);
struct list_head *tmp, *saved;
struct ptlrpc_request *req;
struct ptlrpc_request *last_req = NULL; /* temporary fire escape */
- ENTRY;
LASSERT(imp != NULL);
void ptlrpc_cleanup_client(struct obd_import *imp)
{
- ENTRY;
EXIT;
return;
}
*/
struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req)
{
- ENTRY;
atomic_inc(&req->rq_refcount);
RETURN(req);
}
{
struct ptlrpc_request_set *set;
int rc;
- ENTRY;
LASSERT(req->rq_set == NULL);
LASSERT(!req->rq_receiving_reply);
struct ptlrpc_replay_async_args *aa = data;
struct obd_import *imp = req->rq_import;
- ENTRY;
atomic_dec(&imp->imp_replay_inflight);
if (!ptlrpc_client_replied(req)) {
int ptlrpc_replay_req(struct ptlrpc_request *req)
{
struct ptlrpc_replay_async_args *aa;
- ENTRY;
LASSERT(req->rq_import->imp_state == LUSTRE_IMP_REPLAY);
void ptlrpc_abort_inflight(struct obd_import *imp)
{
struct list_head *tmp, *n;
- ENTRY;
/* Make sure that no new requests get processed for this import.
* ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing
{
struct ptlrpc_request *req = NULL;
struct ptlrpc_work_async_args *args;
- ENTRY;
might_sleep();
struct obd_uuid *uuid)
{
struct ptlrpc_connection *conn, *conn2;
- ENTRY;
conn = cfs_hash_lookup(conn_hash, &peer);
if (conn)
int ptlrpc_connection_put(struct ptlrpc_connection *conn)
{
int rc = 0;
- ENTRY;
if (!conn)
RETURN(rc);
struct ptlrpc_connection *
ptlrpc_connection_addref(struct ptlrpc_connection *conn)
{
- ENTRY;
-
atomic_inc(&conn->c_refcount);
CDEBUG(D_INFO, "conn=%p refcount %d to %s\n",
conn, atomic_read(&conn->c_refcount),
int ptlrpc_connection_init(void)
{
- ENTRY;
-
conn_hash = cfs_hash_create("CONN_HASH",
HASH_CONN_CUR_BITS,
HASH_CONN_MAX_BITS,
EXPORT_SYMBOL(ptlrpc_connection_init);
void ptlrpc_connection_fini(void) {
- ENTRY;
cfs_hash_putref(conn_hash);
EXIT;
}
{
struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
struct ptlrpc_request *req = cbid->cbid_arg;
- ENTRY;
LASSERT (ev->type == LNET_EVENT_SEND ||
ev->type == LNET_EVENT_UNLINK);
{
struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
struct ptlrpc_request *req = cbid->cbid_arg;
- ENTRY;
DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
struct ptlrpc_request *req;
- ENTRY;
LASSERT ((desc->bd_type == BULK_PUT_SINK &&
ev->type == LNET_EVENT_PUT) ||
struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt;
struct ptlrpc_service *service = svcpt->scp_service;
struct ptlrpc_request *req;
- ENTRY;
LASSERT (ev->type == LNET_EVENT_PUT ||
ev->type == LNET_EVENT_UNLINK);
struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
struct ptlrpc_reply_state *rs = cbid->cbid_arg;
struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
- ENTRY;
LASSERT (ev->type == LNET_EVENT_SEND ||
ev->type == LNET_EVENT_ACK ||
__u32 maj;
int offset;
int rc;
- ENTRY;
LASSERT(req->rq_pack_bulk);
LASSERT(req->rq_bulk_read || req->rq_bulk_write);
rawobj_t token;
__u32 maj;
int roff, voff;
- ENTRY;
LASSERT(req->rq_pack_bulk);
LASSERT(req->rq_bulk_read || req->rq_bulk_write);
struct ptlrpc_bulk_desc *desc)
{
int rc;
- ENTRY;
LASSERT(req->rq_cli_ctx);
LASSERT(req->rq_pack_bulk);
struct gss_svc_reqctx *grctx;
struct ptlrpc_bulk_sec_desc *bsd;
int rc;
- ENTRY;
LASSERT(req->rq_svc_ctx);
LASSERT(req->rq_pack_bulk);
struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
rawobj_t token;
__u32 maj;
- ENTRY;
LASSERT(req->rq_svc_ctx);
LASSERT(req->rq_pack_bulk);
rawobj_t token;
__u32 maj;
int rc;
- ENTRY;
LASSERT(req->rq_svc_ctx);
LASSERT(req->rq_pack_bulk);
struct ptlrpc_request *req;
struct ptlrpc_user_desc *pud;
int rc;
- ENTRY;
LASSERT(atomic_read(&ctx->cc_refcount) > 0);
struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
struct ptlrpc_cli_ctx *ctx;
cfs_time_t now;
- ENTRY;
LASSERT(sec_is_reverse(sec));
struct sptlrpc_flavor *sf)
{
struct gss_sec_keyring *gsec_kr;
- ENTRY;
OBD_ALLOC(gsec_kr, sizeof(*gsec_kr));
if (gsec_kr == NULL)
char *coinfo;
int coinfo_size;
char *co_flags = "";
- ENTRY;
LASSERT(imp != NULL);
struct hlist_head freelist = HLIST_HEAD_INIT;
struct hlist_node *next;
struct ptlrpc_cli_ctx *ctx;
- ENTRY;
gsec_kr = sec2gsec_keyring(sec);
int gss_sec_flush_ctx_cache_kr(struct ptlrpc_sec *sec,
uid_t uid, int grace, int force)
{
- ENTRY;
-
CDEBUG(D_SEC, "sec %p(%d, nctx %d), uid %d, grace %d, force %d\n",
sec, atomic_read(&sec->ps_refcount),
atomic_read(&sec->ps_nctx),
struct hlist_head freelist = HLIST_HEAD_INIT;
struct hlist_node *next;
struct ptlrpc_cli_ctx *ctx;
- ENTRY;
CWARN("running gc\n");
struct ptlrpc_cli_ctx *ctx;
struct gss_cli_ctx *gctx;
time_t now = cfs_time_current_sec();
- ENTRY;
spin_lock(&sec->ps_lock);
hlist_for_each_entry_safe(ctx, next,
int gss_kt_instantiate(struct key *key, const void *data, size_t datalen)
{
int rc;
- ENTRY;
if (data != NULL || datalen != 0) {
CERROR("invalid: data %p, len %lu\n", data, (long)datalen);
rawobj_t tmpobj = RAWOBJ_EMPTY;
__u32 datalen32 = (__u32) datalen;
int rc;
- ENTRY;
if (data == NULL || datalen == 0) {
CWARN("invalid: data %p, len %lu\n", data, (long)datalen);
static
void gss_kt_destroy(struct key *key)
{
- ENTRY;
LASSERT(key->payload.data == NULL);
CDEBUG(D_SEC, "destroy key %p\n", key);
EXIT;
__u8 local_iv[16] = {0}, *buf;
__u32 datalen = 0;
int i, rc;
- ENTRY;
buf = outobj->data;
desc.tfm = tfm;
struct hlist_node *next;
HLIST_HEAD(freelist);
unsigned int hash;
- ENTRY;
gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
struct vfs_cred vcred;
struct ptlrpc_cli_ctx *cli_ctx;
int rc;
- ENTRY;
vcred.vc_uid = 0;
vcred.vc_gid = 0;
struct ptlrpc_cli_ctx *ctx;
struct hlist_node *next;
int i;
- ENTRY;
sec = &gsec_pf->gsp_base.gs_base;
{
struct gss_sec_pipefs *gsec_pf;
int alloc_size, hash_size, i;
- ENTRY;
#define GSS_SEC_PIPEFS_CTX_HASH_SIZE (32)
struct hlist_node *next;
HLIST_HEAD(freelist);
unsigned int hash, gc = 0, found = 0;
- ENTRY;
might_sleep();
struct hlist_node *next;
HLIST_HEAD(freelist);
int i, busy = 0;
- ENTRY;
might_sleep_if(grace);
static
void gss_release_msg(struct gss_upcall_msg *gmsg)
{
- ENTRY;
LASSERT(atomic_read(&gmsg->gum_refcount) > 0);
if (!atomic_dec_and_test(&gmsg->gum_refcount)) {
char *data = (char *)msg->data + msg->copied;
ssize_t mlen = msg->len;
ssize_t left;
- ENTRY;
if (mlen > buflen)
mlen = buflen;
int datalen;
int timeout, rc;
__u32 mechidx, seq, gss_err;
- ENTRY;
mechidx = (__u32) (long) rpci->private;
LASSERT(mechidx < MECH_MAX);
struct gss_upcall_msg *gmsg;
struct gss_upcall_msg_data *gumd;
static cfs_time_t ratelimit = 0;
- ENTRY;
LASSERT(list_empty(&msg->list));
{
struct rpc_inode *rpci = RPC_I(inode);
__u32 idx;
- ENTRY;
idx = (__u32) (long) rpci->private;
LASSERT(idx < MECH_MAX);
struct gss_sec *gsec;
struct gss_upcall_msg *gmsg;
int rc = 0;
- ENTRY;
might_sleep();
struct rsi rsii, *rsip = NULL;
time_t expiry;
int status = -EINVAL;
- ENTRY;
-
memset(&rsii, 0, sizeof(rsii));
struct cache_head **ch;
struct rsc *rscp;
int n;
- ENTRY;
write_lock(&rsc_cache.hash_lock);
for (n = 0; n < RSC_HASHMAX; n++) {
unsigned long ctx_expiry;
__u32 major;
int rc;
- ENTRY;
memset(&rsci, 0, sizeof(rsci));
struct gss_rep_header *rephdr;
int first_check = 1;
int rc = SECSVC_DROP;
- ENTRY;
memset(&rsikey, 0, sizeof(rsikey));
rsikey.lustre_svc = lustre_svc;
__u8 *clear_buf;
int clear_buflen;
__u32 major;
- ENTRY;
if (msgbuf->lm_bufcount != 2) {
CERROR("invalid bufcount %d\n", msgbuf->lm_bufcount);
struct gss_cli_ctx *gctx = ctx2gctx(ctx);
__u32 flags = 0, seq, svc;
int rc;
- ENTRY;
LASSERT(req->rq_reqbuf);
LASSERT(req->rq_reqbuf->lm_bufcount >= 2);
struct lustre_msg *msg = req->rq_repdata;
__u32 major;
int pack_bulk, swabbed, rc = 0;
- ENTRY;
LASSERT(req->rq_cli_ctx == ctx);
LASSERT(msg);
struct gss_header *ghdr;
__u32 buflens[2], major;
int wiresize, rc;
- ENTRY;
LASSERT(req->rq_clrbuf);
LASSERT(req->rq_cli_ctx == ctx);
struct lustre_msg *msg = req->rq_repdata;
int msglen, pack_bulk, swabbed, rc;
__u32 major;
- ENTRY;
LASSERT(req->rq_cli_ctx == ctx);
LASSERT(req->rq_ctx_init == 0);
void gss_sec_destroy_common(struct gss_sec *gsec)
{
struct ptlrpc_sec *sec = &gsec->gs_base;
- ENTRY;
LASSERT(sec->ps_import);
LASSERT(atomic_read(&sec->ps_refcount) == 0);
int bufsize, txtsize;
int bufcnt = 2;
__u32 buflens[5];
- ENTRY;
/*
* on-wire data layout:
__u32 ibuflens[3], wbuflens[2];
int ibufcnt;
int clearsize, wiresize;
- ENTRY;
LASSERT(req->rq_clrbuf == NULL);
LASSERT(req->rq_clrbuf_len == 0);
struct ptlrpc_request *req)
{
int privacy;
- ENTRY;
LASSERT(!req->rq_pool || req->rq_reqbuf);
privacy = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc) == SPTLRPC_SVC_PRIV;
int msgsize)
{
int svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
- ENTRY;
LASSERT(!req->rq_pack_bulk ||
(req->rq_bulk_read || req->rq_bulk_write));
{
__u32 flags = 0;
int rc;
- ENTRY;
LASSERT(rs->rs_msg == lustre_msg_buf(rs->rs_repbuf, 1, 0));
struct gss_err_header *ghdr;
int replen = sizeof(struct ptlrpc_body);
int rc;
- ENTRY;
//if (OBD_FAIL_CHECK_ORSET(OBD_FAIL_SVCGSS_ERR_NOTIFY, OBD_FAIL_ONCE))
// RETURN(-EINVAL);
__u32 lustre_svc;
__u32 *secdata, seclen;
int swabbed, rc;
- ENTRY;
CDEBUG(D_SEC, "processing gss init(%d) request from %s\n", gw->gw_proc,
libcfs_nid2str(req->rq_peer.nid));
struct lustre_msg *msg = req->rq_reqbuf;
int offset = 2;
int swabbed;
- ENTRY;
*major = GSS_S_COMPLETE;
struct gss_svc_ctx *gctx = grctx->src_ctx;
struct lustre_msg *msg = req->rq_reqbuf;
int swabbed, msglen, offset = 1;
- ENTRY;
if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 0)) {
CERROR("phase 0: discard replayed req: seq %u\n", gw->gw_seq);
struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
__u32 major = 0;
int rc = 0;
- ENTRY;
grctx->src_ctx = gss_svc_upcall_get_ctx(req, gw);
if (!grctx->src_ctx) {
{
struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
__u32 major;
- ENTRY;
req->rq_ctx_fini = 1;
req->rq_no_reply = 1;
struct gss_svc_reqctx *grctx;
struct gss_wire_ctx *gw;
int swabbed, rc;
- ENTRY;
LASSERT(req->rq_reqbuf);
LASSERT(req->rq_svc_ctx == NULL);
void gss_svc_invalidate_ctx(struct ptlrpc_svc_ctx *svc_ctx)
{
struct gss_svc_reqctx *grctx;
- ENTRY;
if (svc_ctx == NULL) {
EXIT;
__u32 ibuflens[2], buflens[4];
int ibufcnt = 0, bufcnt;
int txtsize, wmsg_size, rs_size;
- ENTRY;
LASSERT(msglen % 8 == 0);
int token_buflen;
__u32 buflens[2], major;
int msglen, rc;
- ENTRY;
/* get clear data length. note embedded lustre_msg might
* have been shrinked */
struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
struct gss_wire_ctx *gw = &grctx->src_wirectx;
int early, rc;
- ENTRY;
early = (req->rq_packed_final == 0);
/* Must be called with imp_lock held! */
static void ptlrpc_deactivate_and_unlock_import(struct obd_import *imp)
{
- ENTRY;
LASSERT(spin_is_locked(&imp->imp_lock));
CDEBUG(D_HA, "setting import %s INVALID\n", obd2cli_tgt(imp->imp_obd));
void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt)
{
- ENTRY;
-
LASSERT(!imp->imp_dlm_fake);
if (ptlrpc_set_import_discon(imp, conn_cnt)) {
struct obd_export *dlmexp;
char *target_start;
int target_len, tried_all = 1;
- ENTRY;
spin_lock(&imp->imp_lock);
(char *)&imp->imp_connect_data };
struct ptlrpc_connect_async_args *aa;
int rc;
- ENTRY;
spin_lock(&imp->imp_lock);
if (imp->imp_state == LUSTRE_IMP_CLOSED) {
struct obd_connect_data *ocd;
struct obd_export *exp;
int ret;
- ENTRY;
spin_lock(&imp->imp_lock);
if (imp->imp_state == LUSTRE_IMP_CLOSED) {
struct ptlrpc_request *req,
void * data, int rc)
{
- ENTRY;
atomic_dec(&req->rq_import->imp_replay_inflight);
if (req->rq_status == 0 &&
!req->rq_import->imp_vbr_failed) {
static int signal_completed_replay(struct obd_import *imp)
{
struct ptlrpc_request *req;
- ENTRY;
if (unlikely(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_FINISH_REPLAY)))
RETURN(0);
{
struct obd_import *imp = data;
- ENTRY;
-
unshare_fs_struct();
CDEBUG(D_HA, "thread invalidate import %s to %s@%s\n",
char *target_start;
int target_len;
- ENTRY;
if (imp->imp_state == LUSTRE_IMP_EVICTED) {
deuuidify(obd2cli_tgt(imp->imp_obd), NULL,
&target_start, &target_len);
struct ptlrpc_request *req;
int rq_opc, rc = 0;
int nowait = imp->imp_obd->obd_force;
- ENTRY;
if (nowait)
GOTO(set_state, rc);
void ptlrpc_cleanup_imp(struct obd_import *imp)
{
- ENTRY;
-
spin_lock(&imp->imp_lock);
IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CLOSED);
imp->imp_generation++;
struct llog_ctxt *ctxt = lgh->lgh_ctxt;
struct ptlrpc_request *req = NULL;
int rc;
- ENTRY;
LLOG_CLIENT_ENTRY(ctxt, imp);
struct ptlrpc_request *req = NULL;
struct llogd_body *body;
int rc;
- ENTRY;
LLOG_CLIENT_ENTRY(loghandle->lgh_ctxt, imp);
req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_DESTROY,
struct llogd_body *body;
void *ptr;
int rc;
- ENTRY;
LLOG_CLIENT_ENTRY(loghandle->lgh_ctxt, imp);
req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK,
struct llogd_body *body;
void *ptr;
int rc;
- ENTRY;
LLOG_CLIENT_ENTRY(loghandle->lgh_ctxt, imp);
req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK,
struct llog_log_hdr *hdr;
struct llog_rec_hdr *llh_hdr;
int rc;
- ENTRY;
LLOG_CLIENT_ENTRY(handle->lgh_ctxt, imp);
req = ptlrpc_request_alloc_pack(imp,&RQF_LLOG_ORIGIN_HANDLE_READ_HEADER,
int llog_initiator_connect(struct llog_ctxt *ctxt)
{
struct obd_import *new_imp;
- ENTRY;
LASSERT(ctxt);
new_imp = ctxt->loc_obd->u.cli.cl_import;
char *name = NULL;
int rc;
- ENTRY;
-
body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
if (body == NULL)
RETURN(-EFAULT);
struct llog_ctxt *ctxt;
int rc;
- ENTRY;
-
body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
if (body == NULL)
RETURN(-EFAULT);
void *ptr;
int rc;
- ENTRY;
-
body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
if (body == NULL)
RETURN(-EFAULT);
void *ptr;
int rc;
- ENTRY;
-
body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
if (body == NULL)
RETURN(-EFAULT);
__u32 flags;
int rc;
- ENTRY;
-
body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
if (body == NULL)
RETURN(-EFAULT);
int llog_origin_handle_close(struct ptlrpc_request *req)
{
- ENTRY;
/* Nothing to do */
RETURN(0);
}
struct llog_handle *cathandle;
struct inode *inode;
void *handle;
- ENTRY;
logcookies = req_capsule_client_get(&req->rq_pill, &RMF_LOGCOOKIES);
num_cookies = req_capsule_get_size(&req->rq_pill, &RMF_LOGCOOKIES,
bool hp = false;
int i;
int rc = 0;
- ENTRY;
/**
* Serialize NRS core lprocfs operations with policy registration/
char *cmd_copy = NULL;
char *token;
int rc = 0;
- ENTRY;
if (count >= LPROCFS_NRS_WR_MAX_CMD)
GOTO(out, rc = -EINVAL);
struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
struct ptlrpc_request *req;
int rc;
- ENTRY;
LPROCFS_CLIMP_CHECK(obd);
req = ptlrpc_prep_ping(obd->u.cli.cl_import);
{
int rc;
lnet_md_t md;
- ENTRY;
LASSERT (portal != 0);
LASSERT (conn != NULL);
__u64 xid;
lnet_handle_me_t me_h;
lnet_md_t md;
- ENTRY;
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_GET_NET))
RETURN(0);
wait_queue_head_t *wq;
struct l_wait_info lwi;
int rc;
- ENTRY;
LASSERT(!in_interrupt()); /* might sleep */
int ptlrpc_send_error(struct ptlrpc_request *req, int may_be_difficult)
{
int rc;
- ENTRY;
if (req->rq_no_reply)
RETURN(0);
lnet_handle_me_t reply_me_h;
lnet_md_t reply_md;
struct obd_device *obd = request->rq_import->imp_obd;
- ENTRY;
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_RPC))
RETURN(0);
static void nrs_policy_stop0(struct ptlrpc_nrs_policy *policy)
{
struct ptlrpc_nrs *nrs = policy->pol_nrs;
- ENTRY;
if (policy->pol_desc->pd_ops->op_policy_stop != NULL) {
spin_unlock(&nrs->nrs_lock);
static int nrs_policy_stop_locked(struct ptlrpc_nrs_policy *policy)
{
struct ptlrpc_nrs *nrs = policy->pol_nrs;
- ENTRY;
if (nrs->nrs_policy_fallback == policy && !nrs->nrs_stopping)
RETURN(-EPERM);
static void nrs_policy_stop_primary(struct ptlrpc_nrs *nrs)
{
struct ptlrpc_nrs_policy *tmp = nrs->nrs_policy_primary;
- ENTRY;
if (tmp == NULL) {
/**
{
struct ptlrpc_nrs *nrs = policy->pol_nrs;
int rc = 0;
- ENTRY;
/**
* Don't allow multiple starting which is too complex, and has no real
{
struct ptlrpc_nrs_policy *policy;
int rc = 0;
- ENTRY;
spin_lock(&nrs->nrs_lock);
static int nrs_policy_unregister(struct ptlrpc_nrs *nrs, char *name)
{
struct ptlrpc_nrs_policy *policy = NULL;
- ENTRY;
spin_lock(&nrs->nrs_lock);
struct ptlrpc_nrs_policy *tmp;
struct ptlrpc_service_part *svcpt = nrs->nrs_svcpt;
int rc;
- ENTRY;
LASSERT(svcpt != NULL);
LASSERT(desc->pd_ops != NULL);
static void ptlrpc_nrs_hpreq_add_nolock(struct ptlrpc_request *req)
{
int opc = lustre_msg_get_opc(req->rq_reqmsg);
- ENTRY;
spin_lock(&req->rq_lock);
req->rq_hp = 1;
struct ptlrpc_service_part *svcpt = nrs->nrs_svcpt;
struct ptlrpc_service *svc = svcpt->scp_service;
int rc = -EINVAL;
- ENTRY;
LASSERT(mutex_is_locked(&nrs_core.nrs_mutex));
{
struct ptlrpc_nrs *nrs;
int rc;
- ENTRY;
LASSERT(mutex_is_locked(&nrs_core.nrs_mutex));
struct ptlrpc_nrs_policy *tmp;
int rc;
bool hp = false;
- ENTRY;
LASSERT(mutex_is_locked(&nrs_core.nrs_mutex));
static struct ptlrpc_nrs_pol_desc *nrs_policy_find_desc_locked(const char *name)
{
struct ptlrpc_nrs_pol_desc *tmp;
- ENTRY;
list_for_each_entry(tmp, &nrs_core.nrs_policies, pd_list) {
if (strncmp(tmp->pd_name, name, NRS_POL_NAME_MAX) == 0)
struct ptlrpc_service_part *svcpt;
int i;
int rc = 0;
- ENTRY;
LASSERT(mutex_is_locked(&nrs_core.nrs_mutex));
LASSERT(mutex_is_locked(&ptlrpc_all_services_mutex));
struct ptlrpc_service *svc;
struct ptlrpc_nrs_pol_desc *desc;
int rc = 0;
- ENTRY;
LASSERT(conf != NULL);
LASSERT(conf->nc_ops != NULL);
{
struct ptlrpc_nrs_pol_desc *desc;
int rc;
- ENTRY;
LASSERT(conf != NULL);
struct ptlrpc_nrs_request *nrq = &req->rq_nrq;
struct ptlrpc_nrs_resource *res1[NRS_RES_MAX];
struct ptlrpc_nrs_resource *res2[NRS_RES_MAX];
- ENTRY;
/**
* Obtain the high-priority NRS head resources.
struct ptlrpc_service_part *svcpt;
int i;
int rc = 0;
- ENTRY;
LASSERT(opc != PTLRPC_NRS_CTL_INVALID);
int ptlrpc_nrs_init(void)
{
int rc;
- ENTRY;
mutex_init(&nrs_core.nrs_mutex);
INIT_LIST_HEAD(&nrs_core.nrs_policies);
{
struct ptlrpc_reply_state *rs;
int msg_len, rc;
- ENTRY;
LASSERT(req->rq_reply_state == NULL);
int __lustre_unpack_msg(struct lustre_msg *m, int len)
{
int required_len, rc;
- ENTRY;
/* We can provide a slightly better error log, if we check the
* message magic and version first. In the future, struct
struct ptlrpc_request *req;
char *tmp;
int rc;
- ENTRY;
req = ptlrpc_request_alloc(imp, &RQF_OBD_SET_INFO);
if (req == NULL)
static void lustre_swab_lov_user_md_common(struct lov_user_md_v1 *lum)
{
- ENTRY;
__swab32s(&lum->lmm_magic);
__swab32s(&lum->lmm_pattern);
lustre_swab_lmm_oi(&lum->lmm_oi);
void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum)
{
- ENTRY;
CDEBUG(D_IOCTL, "swabbing lov_user_md v1\n");
lustre_swab_lov_user_md_common(lum);
EXIT;
void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum)
{
- ENTRY;
CDEBUG(D_IOCTL, "swabbing lov_user_md v3\n");
lustre_swab_lov_user_md_common((struct lov_user_md_v1 *)lum);
/* lmm_pool_name nothing to do with char */
void lustre_swab_lov_mds_md(struct lov_mds_md *lmm)
{
- ENTRY;
CDEBUG(D_IOCTL, "swabbing lov_mds_md\n");
__swab32s(&lmm->lmm_magic);
__swab32s(&lmm->lmm_pattern);
int stripe_count)
{
int i;
- ENTRY;
+
for (i = 0; i < stripe_count; i++) {
lustre_swab_ost_id(&(lod[i].l_ost_oi));
__swab32s(&(lod[i].l_ost_gen));
{
int rc;
struct ptlrpc_request *req;
- ENTRY;
req = ptlrpc_prep_ping(obd->u.cli.cl_import);
if (req == NULL)
int ptlrpc_ping(struct obd_import *imp)
{
struct ptlrpc_request *req;
- ENTRY;
req = ptlrpc_prep_ping(imp);
if (req == NULL) {
static int ptlrpc_pinger_main(void *arg)
{
struct ptlrpc_thread *thread = (struct ptlrpc_thread *)arg;
- ENTRY;
/* Record that the thread is running */
thread_set_flags(thread, SVC_RUNNING);
{
struct l_wait_info lwi = { 0 };
int rc;
- ENTRY;
if (!thread_is_init(&pinger_thread) &&
!thread_is_stopped(&pinger_thread))
{
struct l_wait_info lwi = { 0 };
int rc = 0;
- ENTRY;
if (!thread_is_init(&pinger_thread) &&
!thread_is_stopped(&pinger_thread))
int ptlrpc_pinger_add_import(struct obd_import *imp)
{
- ENTRY;
if (!list_empty(&imp->imp_pinger_chain))
RETURN(-EALREADY);
int ptlrpc_pinger_del_import(struct obd_import *imp)
{
- ENTRY;
if (list_empty(&imp->imp_pinger_chain))
RETURN(-ENOENT);
struct obd_export *exp;
struct l_wait_info lwi = { 0 };
time_t expire_time;
- ENTRY;
unshare_fs_struct();
__init int ptlrpc_init(void)
{
int rc, cleanup_phase = 0;
- ENTRY;
lustre_assert_wire_constants();
#if RS_DEBUG
struct ptlrpc_request_set *set = pc->pc_set;
int rc = 0;
int rc2;
- ENTRY;
if (atomic_read(&set->set_new_count)) {
spin_lock(&set->set_new_req_lock);
struct ptlrpc_request_set *set = pc->pc_set;
struct lu_env env = { .le_ses = NULL };
int rc, exit = 0;
- ENTRY;
unshare_fs_struct();
#if defined(CONFIG_SMP)
#if defined(CONFIG_NUMA)
cpumask_t mask;
#endif
- ENTRY;
LASSERT(index <= max - 1);
pc = &ptlrpcds->pd_threads[index];
{
int rc;
int env = 0;
- ENTRY;
/*
* Do not allow start second thread for one pc.
void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force)
{
- ENTRY;
-
if (!test_bit(LIOD_START, &pc->pc_flags)) {
CWARN("Thread for pc %p was not started\n", pc);
goto out;
void ptlrpcd_free(struct ptlrpcd_ctl *pc)
{
struct ptlrpc_request_set *set = pc->pc_set;
- ENTRY;
if (!test_bit(LIOD_START, &pc->pc_flags)) {
CWARN("Thread for pc %p was not started\n", pc);
static void ptlrpcd_fini(void)
{
int i;
- ENTRY;
if (ptlrpcds != NULL) {
for (i = 0; i < ptlrpcds->pd_nthreads; i++)
int nthreads = num_online_cpus();
char name[16];
int size, i = -1, j, rc = 0;
- ENTRY;
if (max_ptlrpcds > 0 && max_ptlrpcds < nthreads)
nthreads = max_ptlrpcds;
int ptlrpcd_addref(void)
{
int rc = 0;
- ENTRY;
mutex_lock(&ptlrpcd_mutex);
if (++ptlrpcd_users == 1)
*/
void ptlrpc_initiate_recovery(struct obd_import *imp)
{
- ENTRY;
-
CDEBUG(D_HA, "%s: starting recovery\n", obd2cli_tgt(imp->imp_obd));
ptlrpc_connect_import(imp);
struct list_head *tmp, *pos;
struct ptlrpc_request *req = NULL;
__u64 last_transno;
- ENTRY;
*inflight = 0;
{
struct ptlrpc_request *req, *next;
- ENTRY;
-
/* As long as we're in recovery, nothing should be added to the sending
* list, so we don't need to hold the lock during this iteration and
* resend process.
void ptlrpc_request_handle_notconn(struct ptlrpc_request *failed_req)
{
struct obd_import *imp = failed_req->rq_import;
- ENTRY;
CDEBUG(D_HA, "import %s of %s@%s abruptly disconnected: reconnecting\n",
imp->imp_obd->obd_name, obd2cli_tgt(imp->imp_obd),
struct obd_device *obd = imp->imp_obd;
int rc = 0;
- ENTRY;
LASSERT(obd);
/* When deactivating, mark import invalid, and abort in-flight
int ptlrpc_recover_import(struct obd_import *imp, char *new_uuid, int async)
{
int rc = 0;
- ENTRY;
spin_lock(&imp->imp_lock);
if (imp->imp_state == LUSTRE_IMP_NEW || imp->imp_deactive ||
struct obd_import *imp = req->rq_import;
struct ptlrpc_sec *sec;
int rc;
- ENTRY;
LASSERT(!req->rq_cli_ctx);
LASSERT(imp);
*/
void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync)
{
- ENTRY;
-
LASSERT(req);
LASSERT(req->rq_cli_ctx);
struct ptlrpc_cli_ctx *oldctx = req->rq_cli_ctx;
struct ptlrpc_cli_ctx *newctx;
int rc;
- ENTRY;
LASSERT(oldctx);
struct ptlrpc_sec *sec;
struct l_wait_info lwi;
int rc;
- ENTRY;
LASSERT(ctx);
struct ptlrpc_cli_ctx *ctx;
struct ptlrpc_request *req = NULL;
int rc;
- ENTRY;
might_sleep();
{
struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
int rc = 0;
- ENTRY;
LASSERT(ctx);
LASSERT(ctx->cc_sec);
{
struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
int rc;
- ENTRY;
LASSERT(ctx);
LASSERT(ctx->cc_sec);
char *early_buf;
int early_bufsz, early_size;
int rc;
- ENTRY;
OBD_ALLOC_PTR(early_req);
if (early_req == NULL)
struct ptlrpc_sec_policy *policy;
struct ptlrpc_sec *sec;
char str[32];
- ENTRY;
if (svc_ctx) {
LASSERT(imp->imp_dlm_fake == 1);
enum lustre_sec_part sp;
char str[24];
int rc = 0;
- ENTRY;
might_sleep();
{
struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
struct ptlrpc_sec_policy *policy;
- ENTRY;
LASSERT(ctx);
LASSERT(ctx->cc_sec);
{
struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
struct ptlrpc_sec_policy *policy;
- ENTRY;
LASSERT(ctx);
LASSERT(ctx->cc_sec);
struct ptlrpc_sec_policy *policy;
struct lustre_msg *msg = req->rq_reqbuf;
int rc;
- ENTRY;
LASSERT(msg);
LASSERT(req->rq_reqmsg == NULL);
struct ptlrpc_sec_policy *policy;
struct ptlrpc_reply_state *rs;
int rc;
- ENTRY;
LASSERT(req->rq_svc_ctx);
LASSERT(req->rq_svc_ctx->sc_policy);
{
struct ptlrpc_sec_policy *policy;
int rc;
- ENTRY;
LASSERT(req->rq_svc_ctx);
LASSERT(req->rq_svc_ctx->sc_policy);
{
struct ptlrpc_sec_policy *policy;
unsigned int prealloc;
- ENTRY;
LASSERT(rs->rs_svc_ctx);
LASSERT(rs->rs_svc_ctx->sc_policy);
char fsname[MTI_NAME_MAXLEN];
struct sptlrpc_rule rule;
int rc;
- ENTRY;
target = lustre_cfg_string(lcfg, 1);
if (target == NULL) {
void sptlrpc_conf_client_adapt(struct obd_device *obd)
{
struct obd_import *imp;
- ENTRY;
LASSERT(strcmp(obd->obd_type->typ_name, LUSTRE_MDC_NAME) == 0 ||
strcmp(obd->obd_type->typ_name, LUSTRE_OSC_NAME) ==0);
struct lvfs_run_ctxt saved;
struct dentry *dentry;
int rc;
- ENTRY;
ctxt = llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT);
if (ctxt == NULL)
struct sptlrpc_conf *conf = (struct sptlrpc_conf *) data;
struct lustre_cfg *lcfg = (struct lustre_cfg *)(rec + 1);
int cfg_len, rc;
- ENTRY;
if (rec->lrh_type != OBD_CFG_REC) {
CERROR("unhandled lrh_type: %#x\n", rec->lrh_type);
struct llog_ctxt *ctxt;
struct lvfs_run_ctxt saved;
int rc;
- ENTRY;
LASSERT(conf->sc_updated == 0 && conf->sc_local == 0);
enum lustre_sec_part sp_dst;
char fsname[MTI_NAME_MAXLEN];
int rc = 0;
- ENTRY;
if (strcmp(obd->obd_type->typ_name, LUSTRE_MDT_NAME) == 0) {
sp_dst = LUSTRE_SP_MDT;
{
struct lustre_msg *msg = req->rq_reqbuf;
struct plain_header *phdr;
- ENTRY;
msg->lm_secflvr = req->rq_flvr.sf_rpc;
struct plain_header *phdr;
__u32 cksum;
int swabbed;
- ENTRY;
if (msg->lm_bufcount != PLAIN_PACK_SEGMENTS) {
CERROR("unexpected reply buf count %u\n", msg->lm_bufcount);
void plain_destroy_sec(struct ptlrpc_sec *sec)
{
struct plain_sec *plsec = sec2plsec(sec);
- ENTRY;
LASSERT(sec->ps_policy == &plain_policy);
LASSERT(sec->ps_import);
struct plain_sec *plsec;
struct ptlrpc_sec *sec;
struct ptlrpc_cli_ctx *ctx;
- ENTRY;
LASSERT(SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN);
{
struct plain_sec *plsec = sec2plsec(sec);
struct ptlrpc_cli_ctx *ctx;
- ENTRY;
read_lock(&plsec->pls_lock);
ctx = plsec->pls_ctx;
{
struct plain_sec *plsec = sec2plsec(sec);
struct ptlrpc_cli_ctx *ctx;
- ENTRY;
/* do nothing unless caller want to flush for 'all' */
if (uid != -1)
{
__u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
int alloc_len;
- ENTRY;
buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
buflens[PLAIN_PACK_MSG_OFF] = msgsize;
void plain_free_reqbuf(struct ptlrpc_sec *sec,
struct ptlrpc_request *req)
{
- ENTRY;
if (!req->rq_pool) {
OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
req->rq_reqbuf = NULL;
{
__u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
int alloc_len;
- ENTRY;
buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
buflens[PLAIN_PACK_MSG_OFF] = msgsize;
void plain_free_repbuf(struct ptlrpc_sec *sec,
struct ptlrpc_request *req)
{
- ENTRY;
OBD_FREE_LARGE(req->rq_repbuf, req->rq_repbuf_len);
req->rq_repbuf = NULL;
req->rq_repbuf_len = 0;
struct lustre_msg *newbuf;
int oldsize;
int newmsg_size, newbuf_size;
- ENTRY;
LASSERT(req->rq_reqbuf);
LASSERT(req->rq_reqbuf_len >= req->rq_reqlen);
struct lustre_msg *msg = req->rq_reqbuf;
struct plain_header *phdr;
int swabbed;
- ENTRY;
LASSERT(SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) ==
SPTLRPC_POLICY_PLAIN);
struct ptlrpc_reply_state *rs;
__u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
int rs_size = sizeof(*rs);
- ENTRY;
LASSERT(msgsize % 8 == 0);
static
void plain_free_rs(struct ptlrpc_reply_state *rs)
{
- ENTRY;
-
LASSERT(atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
atomic_dec(&rs->rs_svc_ctx->sc_refcount);
struct lustre_msg_v2 *msg = rs->rs_repbuf;
struct plain_header *phdr;
int len;
- ENTRY;
LASSERT(rs);
LASSERT(msg);
void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs)
{
struct ptlrpc_hr_thread *hrt;
- ENTRY;
LASSERT(list_empty(&rs->rs_list));
void
ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs)
{
- ENTRY;
-
LASSERT(spin_is_locked(&rs->rs_svcpt->scp_rep_lock));
LASSERT(spin_is_locked(&rs->rs_lock));
LASSERT (rs->rs_difficult);
{
struct ptlrpc_reply_state *rs, *nxt;
DECLARE_RS_BATCH(batch);
- ENTRY;
rs_batch_init(&batch);
/* Find any replies that have been committed and get their service
int cpt;
int rc;
int i;
- ENTRY;
LASSERT(conf->psc_buf.bc_nbufs > 0);
LASSERT(conf->psc_buf.bc_buf_size >=
struct obd_export *oldest_exp;
time_t oldest_time, new_time;
- ENTRY;
-
LASSERT(exp);
/* Compensate for slow machines, etc, by faking our request time
cfs_duration_t olddl = req->rq_deadline - cfs_time_current_sec();
time_t newdl;
int rc;
- ENTRY;
/* deadline is when the client expects us to reply, margin is the
difference between clients' and servers' expectations */
time_t now = cfs_time_current_sec();
cfs_duration_t delay;
int first, counter = 0;
- ENTRY;
spin_lock(&svcpt->scp_at_lock);
if (svcpt->scp_at_check == 0) {
struct ptlrpc_request *req)
{
int rc = 0;
- ENTRY;
if (svcpt->scp_service->srv_ops.so_hpreq_handler) {
rc = svcpt->scp_service->srv_ops.so_hpreq_handler(req);
/** Remove the request from the export list. */
static void ptlrpc_server_hpreq_fini(struct ptlrpc_request *req)
{
- ENTRY;
if (req->rq_export && req->rq_ops) {
/* refresh lock timeout again so that client has more
* room to send lock cancel RPC. */
struct ptlrpc_request *req)
{
int rc;
- ENTRY;
rc = ptlrpc_server_hpreq_init(svcpt, req);
if (rc < 0)
ptlrpc_server_request_get(struct ptlrpc_service_part *svcpt, bool force)
{
struct ptlrpc_request *req = NULL;
- ENTRY;
spin_lock(&svcpt->scp_req_lock);
struct ptlrpc_request *req;
__u32 deadline;
int rc;
- ENTRY;
spin_lock(&svcpt->scp_lock);
if (list_empty(&svcpt->scp_req_incoming)) {
long timediff;
int rc;
int fail_opc = 0;
- ENTRY;
request = ptlrpc_server_request_get(svcpt, false);
if (request == NULL)
struct obd_export *exp;
int nlocks;
int been_handled;
- ENTRY;
exp = rs->rs_export;
#endif
struct lu_env *env;
int counter = 0, rc = 0;
- ENTRY;
thread->t_pid = current_pid();
unshare_fs_struct();
struct ptlrpc_hr_partition *hrp;
int i;
int j;
- ENTRY;
cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
int rc = 0;
struct ptlrpc_thread *thread;
LIST_HEAD (zombie);
- ENTRY;
-
CDEBUG(D_INFO, "Stopping threads for service %s\n",
svcpt->scp_service->srv_name);
{
struct ptlrpc_service_part *svcpt;
int i;
- ENTRY;
ptlrpc_service_for_each_part(svcpt, i, svc) {
if (svcpt->scp_service != NULL)
int rc = 0;
int i;
int j;
- ENTRY;
/* We require 2 threads min, see note in ptlrpc_server_handle_request */
LASSERT(svc->srv_nthrs_cpt_init >= PTLRPC_NTHRS_INIT);
struct ptlrpc_thread *thread;
struct ptlrpc_service *svc;
int rc;
- ENTRY;
LASSERT(svcpt != NULL);
int i;
int j;
int weight;
- ENTRY;
memset(&ptlrpc_hr, 0, sizeof(ptlrpc_hr));
ptlrpc_hr.hr_cpt_table = cfs_cpt_table;
int ptlrpc_unregister_service(struct ptlrpc_service *service)
{
- ENTRY;
-
CDEBUG(D_NET, "%s: tearing down\n", service->srv_name);
service->srv_is_stopping = 1;