#error Do not #include this file directly. #include <linux/libcfs/libcfs.h> instead
#endif
-
#include <linux/bitops.h>
#include <linux/compiler.h>
#include <linux/ctype.h>
#include "linux-time.h"
#include "linux-mem.h"
-
#define LUSTRE_TRACE_SIZE (THREAD_SIZE >> 5)
#if !defined(__x86_64__)
#define WITH_WATCHDOG
#endif
-
#endif /* _LINUX_LIBCFS_H */
offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[n]);
}
-
static inline __u64
kiblnd_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
{
#define KIBLND_CONN_PARAM(e) ((e)->param.conn.private_data)
#define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len)
-
struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev,
kib_rdma_desc_t *rd);
struct ib_mr *kiblnd_find_dma_mr(kib_hca_dev_t *hdev,
return -EINVAL;
}
-
static int
kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
unsigned int niov, struct kvec *iov, int offset, int nob)
module_param(dev_failover, int, 0444);
MODULE_PARM_DESC(dev_failover, "HCA failover for bonding (0 off, 1 on, other values reserved)");
-
static int require_privileged_port;
module_param(require_privileged_port, int, 0644);
MODULE_PARM_DESC(require_privileged_port, "require privileged port when accepting connection");
ksocknal_data.ksnd_connd_starting++;
spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
-
snprintf(name, sizeof(name), "socknal_cd%02d", i);
rc = ksocknal_thread_start(ksocknal_connd,
(void *)((ulong_ptr_t)i), name);
return -ENETDOWN;
}
-
static void __exit
ksocknal_module_fini(void)
{
return tx;
}
-
void
ksocknal_free_tx (ksock_tx_t *tx)
{
spin_unlock_bh(&sched->kss_lock);
}
-
ksock_route_t *
ksocknal_find_connectable_route_locked (ksock_peer_t *peer)
{
return -EIO;
}
-
static void
ksocknal_check_peer_timeouts (int idx)
{
module_param(zc_recv_min_nfrags, int, 0644);
MODULE_PARM_DESC(zc_recv_min_nfrags, "minimum # of fragments to enable ZC recv");
-
#if SOCKNAL_VERSION_DEBUG
static int protocol = 3;
module_param(protocol, int, 0644);
}
EXPORT_SYMBOL(lnet_connect);
-
/* Below is the code common for both kernel and MT user-space */
static int
if (rc != 0)
return rc;
-
init_completion(&lnet_acceptor_state.pta_signal);
rc = accept2secure(accept_type, &secure);
if (rc <= 0)
lnet_t the_lnet; /* THE state of the network */
EXPORT_SYMBOL(the_lnet);
-
static char *ip2nets = "";
module_param(ip2nets, charp, 0444);
MODULE_PARM_DESC(ip2nets, "LNET network <- IP table");
list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
}
-
int lnet_unprepare(void);
static int
}
EXPORT_SYMBOL(LNetEQWait);
-
static int
lnet_eq_wait_locked(int *timeout_ms)
__must_hold(&the_lnet.ln_eq_wait_lock)
return wait;
}
-
-
/**
* Block the calling process until there's an event from a set of EQs or
* timeout happens.
}
EXPORT_SYMBOL(lnet_extract_iov);
-
unsigned int
lnet_kiov_nob(unsigned int niov, lnet_kiov_t *kiov)
{
return alive;
}
-
/* NB: returns 1 when alive, 0 when dead, negative when error;
* may drop the lnet_net_lock */
static int
return 0;
}
-
static lnet_rtrbufpool_t *
lnet_msg2bufpool(lnet_msg_t *msg)
{
lp->lp_notifying = 0;
}
-
static void
lnet_rtr_addref_locked(lnet_peer_t *lp)
{
* Author: Liang Zhen <liang@whamcloud.com>
*/
-
#include "../../include/linux/libcfs/libcfs.h"
#include "../../include/linux/lnet/lib-lnet.h"
#include "timer.h"
int lstcon_rpc_module_init(void);
void lstcon_rpc_module_fini(void);
-
#endif
* Author: Liang Zhen <liangzhen@clusterfs.com>
*/
-
#include "../../include/linux/libcfs/libcfs.h"
#include "../../include/linux/lnet/lib-lnet.h"
#include "console.h"
for (i = 0; i < LST_GLOBAL_HASHSIZE; i++)
INIT_LIST_HEAD(&console_session.ses_ndl_hash[i]);
-
/* initialize acceptor service table */
lstcon_init_acceptor_service();
#ifndef __LST_CONSOLE_H__
#define __LST_CONSOLE_H__
-
#include "../../include/linux/libcfs/libcfs.h"
#include "../../include/linux/lnet/lnet.h"
#include "../../include/linux/lnet/lib-types.h"
spin_lock(&sfw_data.fw_lock);
}
-
static void
sfw_session_expired(void *data)
{
extern void brw_init_test_client(void);
extern void brw_init_test_service(void);
-
int
sfw_startup(void)
{
srpc_service_t *sv;
sfw_test_case_t *tsc;
-
if (session_timeout < 0) {
CERROR("Session timeout must be non-negative: %d\n",
session_timeout);
return rc;
}
-
MODULE_DESCRIPTION("LNet Selftest");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.9.0");
return;
}
-
int
srpc_send_reply(struct srpc_server_rpc *rpc)
{
}
}
-
int
srpc_startup(void)
{
SRPC_MSG_JOIN_REPLY = 17,
} srpc_msg_type_t;
-
/* CAVEAT EMPTOR:
* All srpc_*_reqst_t's 1st field must be matchbits of reply buffer,
* and 2nd field matchbits of bulk buffer if any.
#define MADE_WITHOUT_COMPROMISE
#endif
-
#define SWI_STATE_NEWBORN 0
#define SWI_STATE_REPLY_SUBMITTED 1
#define SWI_STATE_REPLY_SENT 2
return cfs_wi_deschedule(swi->swi_sched, &swi->swi_workitem);
}
-
int sfw_startup(void);
int srpc_startup(void);
void sfw_shutdown(void);
schedule_timeout(cfs_time_seconds(1) / 10); \
} while (0)
-
#define lst_wait_until(cond, lock, fmt, ...) \
do { \
int __I = 2; \
#include "selftest.h"
-
/*
* Timers are implemented as a sorted queue of expiry times. The queue
* is slotted, with each slot holding timers which expire in a
return expired;
}
-
static int
stt_timer_main(void *arg)
{
return 0;
}
-
int
stt_startup(void)
{
struct list_head cis_linkage;
};
-
/**
* Per-layer io operations.
* \see vvp_io_ops, lov_io_ops, lovsub_io_ops, osc_io_ops
int crw_nonblock;
};
-
/**
* State for io.
*
/** @} transfer */
-
/**
* \name helper routines
* Functions to discard, delete and export a cl_page.
SETATTR_MATCH_LOCK
};
-
/**
* IO state private to vvp or slp layers.
*/
#define module_init(a) late_initcall(a)
#endif
-
#define LTIME_S(time) (time.tv_sec)
#ifndef QUOTA_OK
#error Do not #include this file directly. #include <lustre_lite.h> instead
#endif
-
#include <linux/statfs.h>
#include <linux/fs.h>
LPROC_LL_FILE_OPCODES
};
-
#endif
#include <linux/mm.h>
#include <linux/hash.h>
-
#define ll_delete_from_page_cache(page) delete_from_page_cache(page)
static inline void
spin_unlock(&lock->lock);
}
-
static inline void client_obd_list_lock_init(client_obd_lock_t *lock)
{
spin_lock_init(&lock->lock);
}
}
-
#define LUSTRE_MAX_OPCODES (OPC_RANGE(OST) + \
OPC_RANGE(MDS) + \
OPC_RANGE(LDLM) + \
#define LPROCFS_CLIMP_EXIT(obd) \
up_read(&(obd)->u.cli.cl_sem)
-
/* write the name##_seq_show function, call LPROC_SEQ_FOPS_RO for read-only
proc entries; otherwise, you will define name##_seq_write function also for
a read-write proc entry, and then call LPROC_SEQ_SEQ instead. Finally,
*/
int lu_object_invariant(const struct lu_object *o);
-
/**
* Check whether object exists, no matter on local or remote storage.
* Note: LOHA_EXISTS will be set once some one created the object,
void lu_context_key_quiesce (struct lu_context_key *key);
void lu_context_key_revive (struct lu_context_key *key);
-
/*
* LU_KEY_INIT_GENERIC() has to be a macro to correctly determine an
* owning module.
} \
struct __##mod##_dummy_type_stop {; }
-
-
#define LU_TYPE_INIT_FINI(mod, ...) \
LU_TYPE_INIT(mod, __VA_ARGS__); \
LU_TYPE_FINI(mod, __VA_ARGS__); \
* @{
*/
-
/*
* dummy data structures/functions to pass compile for now.
* We need to reimplement them with kref.
#ifndef _LUSTRE_FIEMAP_H
#define _LUSTRE_FIEMAP_H
-
-
struct ll_fiemap_extent {
__u64 fe_logical; /* logical offset in bytes for the start of
* the extent from the beginning of the file */
* support extents. Result
* merged for efficiency. */
-
static inline size_t fiemap_count_to_size(size_t extent_count)
{
return (sizeof(struct ll_user_fiemap) + extent_count *
(range)->lsr_index, \
fld_range_is_mdt(range) ? "mdt" : "ost"
-
/** \defgroup lu_fid lu_fid
* @{ */
#define DTTOIF(dirtype) ((dirtype) << IFSHIFT)
#endif
-
struct lu_dirpage {
__u64 ldp_hash_start;
__u64 ldp_hash_end;
#define OCD_HAS_FLAG(ocd, flg) \
(!!((ocd)->ocd_connect_flags & OBD_CONNECT_##flg))
-
#define LRU_RESIZE_CONNECT_FLAG OBD_CONNECT_LRU_RESIZE
#define MDT_CONNECT_SUPPORTED (OBD_CONNECT_RDONLY | OBD_CONNECT_VERSION | \
* the matching OBD_CONNECT flag, so that can be approved and landed easily to
* reserve the flag for future use. */
-
void lustre_swab_connect(struct obd_connect_data *ocd);
/*
#define MDS_FIRST_OPC MDS_GETATTR
-
/* opcodes for object update */
typedef enum {
UPDATE_OBJ = 1000,
/* This FULL lock is useful to take on unlink sort of operations */
#define MDS_INODELOCK_FULL ((1<<(MDS_INODELOCK_MAXSHIFT+1))-1)
-
/* NOTE: until Lustre 1.8.7/2.1.1 the fid_ver() was packed into name[2],
* but was moved into name[1] along with the OID to avoid consuming the
* name[2,3] fields that need to be used for the quota id (also a FID). */
#define LL_IOC_OBD_STATFS IOC_OBD_STATFS
#define IOC_MDC_GETSTRIPE IOC_MDC_GETFILESTRIPE
-
#define MAX_OBD_NAME 128 /* If this changes, a NEW ioctl must be added */
/* Define O_LOV_DELAY_CREATE to be a mask that is not useful for regular
&((fid)->f_oid), \
&((fid)->f_ver)
-
/********* Quotas **********/
/* these must be explicitly translated into linux Q_* in ll_dir_ioctl */
__u64 sl_dv2;
};
-
/********* Changelogs **********/
/** Changelog record types */
enum changelog_rec_type {
#define dot_lustre_name ".lustre"
-
/********* HSM **********/
/** HSM per-file state
return cfs_size_round(len);
}
-
#include "obd_support.h"
static inline struct lustre_cfg *lustre_cfg_new(int cmd,
#define lmd_is_client(x) ((x)->lmd_flags & LMD_FLG_CLIENT)
-
/****************** last_rcvd file *********************/
/** version recovery epoch */
#define get_mount_flags(sb) (s2lsi(sb)->lsi_lmd->lmd_flags)
#define get_mntdev_name(sb) (s2lsi(sb)->lsi_lmd->lmd_dev)
-
/****************** mount lookup info *********************/
struct lustre_mount_info {
void lustre_register_kill_super_cb(void (*cfs)(struct super_block *sb));
int lustre_common_put_super(struct super_block *sb);
-
int mgc_fsname2resid(char *fsname, struct ldlm_res_id *res_id, int type);
/** @} disk */
int intent_disposition(struct ldlm_reply *rep, int flag);
void intent_set_disposition(struct ldlm_reply *rep, int flag);
-
/* ioctls for trying requests */
#define IOC_LDLM_TYPE 'f'
#define IOC_LDLM_MIN_NR 40
#define CFS_ACL_XATTR_COUNT(size, prefix) \
(((size) - sizeof(prefix ## _header)) / sizeof(prefix ## _entry))
-
extern ext_acl_xattr_header *
lustre_posix_acl_xattr_2ext(posix_acl_xattr_header *header, int size);
extern int
LUSTRE_CLI_FLD_HASH_RRB
};
-
struct lu_fld_target {
struct list_head ft_chain;
struct obd_export *ft_exp;
struct obd_device;
struct ptlrpc_request;
-
int ptlrpc_replay(struct obd_import *imp);
int ptlrpc_resend(struct obd_import *imp);
void ptlrpc_free_committed(struct obd_import *imp);
#include "../../include/linux/libcfs/libcfs.h"
-
struct portals_handle_ops {
void (*hop_addref)(void *object);
void (*hop_free)(void *object, int size);
#include "lustre_handles.h"
#include "lustre/lustre_idl.h"
-
/**
* Adaptive Timeout stuff
*
struct adaptive_timeout iat_service_estimate[IMP_AT_MAX_PORTALS];
};
-
/** @} */
/** Possible import states */
return len;
}
-
static inline int obd_ioctl_is_invalid(struct obd_ioctl_data *data)
{
if (data->ioc_len > OBD_MAX_IOCTL_BUFFER) {
return 0;
}
-
#include "obd_support.h"
/* function defined in lustre/obdclass/<platform>/<platform>-module.c */
#define OBD_IOC_READ _IOWR('f', 109, OBD_IOC_DATA_TYPE)
#define OBD_IOC_WRITE _IOWR('f', 110, OBD_IOC_DATA_TYPE)
-
#define OBD_IOC_STATFS _IOWR('f', 113, OBD_IOC_DATA_TYPE)
#define OBD_IOC_SYNC _IOW ('f', 114, OBD_IOC_DATA_TYPE)
#define OBD_IOC_READ2 _IOWR('f', 115, OBD_IOC_DATA_TYPE)
sigmask(SIGTERM) | sigmask(SIGQUIT) | \
sigmask(SIGALRM))
-
/*
* wait for @condition to become true, but no longer than timeout, specified
* by @info.
remove_wait_queue(&wq, &__wait); \
} while (0)
-
-
#define l_wait_event(wq, condition, info) \
({ \
int __ret; \
#include "lustre/lustre_user.h"
-
struct lustre_rw_params {
int lrp_lock_mode;
ldlm_policy_data_t lrp_policy;
}
}
-
struct mdc_cache_waiter {
struct list_head mcw_entry;
wait_queue_head_t mcw_waitq;
* @{
*/
-
#ifndef _LUSTRE_NET_H
#define _LUSTRE_NET_H
void client_destroy_import(struct obd_import *imp);
/** @} */
-
/* ptlrpc/pinger.c */
/**
* Pinger API (client side only)
int class_parse_nid(char *buf, lnet_nid_t *nid, char **endh);
int class_parse_nid_quiet(char *buf, lnet_nid_t *nid, char **endh);
-
-
/****************** User-settable parameter keys *********************/
/* e.g.
tunefs.lustre --param="failover.node=192.168.0.13@tcp0" /dev/sda
*/
#define SPTLRPC_MAX_PAYLOAD (1024)
-
struct vfs_cred {
uint32_t vc_uid;
uint32_t vc_gid;
return (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_ROOTONLY);
}
-
struct ptlrpc_svc_ctx {
atomic_t sc_refcount;
struct ptlrpc_sec_policy *sc_policy;
__u8 bsd_data[0]; /* policy-specific token */
};
-
/*
* round size up to next power of 2, for slab allocation.
* @size must be sane (can't overflow after round up)
int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout);
void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode);
-
/* gc */
void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec);
void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec);
int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset);
int sptlrpc_unpack_user_desc(struct lustre_msg *req, int offset, int swabbed);
-
#define CFS_CAP_CHOWN_MASK (1 << CFS_CAP_CHOWN)
#define CFS_CAP_SYS_RESOURCE_MASK (1 << CFS_CAP_SYS_RESOURCE)
#ifndef _OBD_CACHE_H__
#define _OBD_CACHE_H__
-
#endif
return ret;
}
-
/* Select the best checksum algorithm among those supplied in the cksum_types
* input.
*
#ifndef __CLASS_OBD_H
#define __CLASS_OBD_H
-
#include "obd_support.h"
#include "lustre_import.h"
#include "lustre_net.h"
(export)->exp_md_stats, coffset); \
}
-
#define OBD_CHECK_MD_OP(obd, op, err) \
do { \
if (!OBT(obd) || !MDP((obd), op)) { \
} \
} while (0)
-
#define OBD_CHECK_DT_OP(obd, op, err) \
do { \
if (!OBT(obd) || !OBP((obd), op)) { \
return rc;
}
-
/* OBD Metadata Support */
int obd_init_caches(void);
OBD_SLAB_FREE_PTR((ptr), obdo_cachep); \
} while (0)
-
static inline void obdo2fid(struct obdo *oa, struct lu_fid *fid)
{
/* something here */
#define OBD_FAIL_UPDATE_OBJ_NET 0x1700
#define OBD_FAIL_UPDATE_OBJ_NET_REP 0x1701
-
/* Assign references to moved code to reduce code changes */
#define OBD_FAIL_PRECHECK(id) CFS_FAIL_PRECHECK(id)
#define OBD_FAIL_CHECK(id) CFS_FAIL_CHECK(id)
.lct_fini = ccc_session_key_fini
};
-
/* type constructor/destructor: ccc_type_{init,fini,start,stop}(). */
/* LU_TYPE_INIT_FINI(ccc, &ccc_key, &ccc_session_key); */
#include "../include/lustre_lite.h"
-
/* Initialize the default and maximum LOV EA and cookie sizes. This allows
* us to make MDS RPCs with large enough reply buffers to hold the
* maximum-sized (= maximum striped) EA and cookie without having to
#include "../include/lustre_lib.h"
#include "ldlm_internal.h"
-
/* When a lock is cancelled by a client, the KMS may undergo change if this
* is the "highest lock". This function returns the new KMS value.
* Caller must hold lr_lock already.
lpolicy->l_flock.owner = wpolicy->l_flock.lfw_pid;
}
-
void ldlm_flock_policy_wire21_to_local(const ldlm_wire_policy_data_t *wpolicy,
ldlm_policy_data_t *lpolicy)
{
#include "../include/lustre_lib.h"
#include "ldlm_internal.h"
-
void ldlm_ibits_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
ldlm_policy_data_t *lpolicy)
{
}
EXPORT_SYMBOL(client_disconnect_export);
-
/**
* Packs current SLV and Limit into \a req.
*/
* REFCOUNTED LOCK OBJECTS
*/
-
/**
* Get a reference on a lock.
*
return rc;
}
-
/**
* Process a call to blocking AST callback for a lock in ast_work list
*/
}
}
-
int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
struct ldlm_lock *lock)
{
return 0;
}
-
static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
{
struct ldlm_bl_work_item *blwi = NULL;
return 0;
}
-
static int ldlm_setup(void);
static int ldlm_cleanup(void);
goto out;
}
-
blp = kzalloc(sizeof(*blp), GFP_NOFS);
if (!blp) {
rc = -ENOMEM;
#include "ldlm_internal.h"
-
void ldlm_plain_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
ldlm_policy_data_t *lpolicy)
{
#include "../include/obd_support.h"
#include "ldlm_internal.h"
-
/*
* 50 ldlm locks for 1MB of RAM.
*/
if (rc != ELDLM_OK)
goto out;
-
reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
if (reply == NULL) {
rc = -EPROTO;
#include "../../include/linux/libcfs/libcfs.h"
-
/** destroy cpu-partition lock, see libcfs_private.h for more detail */
void
cfs_percpt_lock_free(struct cfs_percpt_lock *pcl)
}
EXPORT_SYMBOL(cfs_percpt_unlock);
-
/** free cpu-partition refcount */
void
cfs_percpt_atomic_free(atomic_t **refs)
}
};
-
int cfs_crypto_adler32_register(void)
{
return crypto_register_shash(&alg);
*/
static int cfs_crypto_hash_speeds[CFS_HASH_ALG_MAX];
-
-
static int cfs_crypto_hash_alloc(unsigned char alg_id,
const struct cfs_crypto_hash_type **type,
struct hash_desc *desc, unsigned char *key,
return -EINVAL;
}
-
if (hdr->ioc_len < sizeof(struct libcfs_ioctl_data)) {
CERROR("PORTALS: user buffer too small for ioctl\n");
return -EINVAL;
return err;
}
-
struct cfs_psdev_ops libcfs_psdev_ops = {
libcfs_psdev_open,
libcfs_psdev_release,
* Author: Phil Schwan <phil@clusterfs.com>
*/
-
#define DEBUG_SUBSYSTEM S_LNET
#define LUSTRE_TRACEFILE_PRIVATE
#include "tracefile.h"
int cfs_tracefile_init(int max_pages);
void cfs_tracefile_exit(void);
-
-
int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
const char __user *usr_buffer, int usr_buffer_nob);
int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob,
int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, gfp_t gfp,
struct list_head *stock);
-
int cfs_tcd_owns_tage(struct cfs_trace_cpu_data *tcd,
struct cfs_trace_page *tage);
return 1;
}
-
/* XXX:
* 0. it only works when called from wi->wi_action.
* 1. when it returns no one shall try to schedule the workitem.
}
EXPORT_SYMBOL(cfs_wi_schedule);
-
static int
cfs_wi_scheduler (void *arg)
{
wi->wi_running = 1;
wi->wi_scheduled = 0;
-
cfs_wi_sched_unlock(sched);
nloops++;
return 0;
}
-
void
cfs_wi_sched_destroy(struct cfs_wi_sched *sched)
{
return ll_revalidate_dentry(dentry, flags);
}
-
static void ll_d_iput(struct dentry *de, struct inode *inode)
{
LASSERT(inode);
hpk.hpk_errval = 0;
hpk.hpk_data_version = 0;
-
/* For archive request, we need to read the current file version. */
if (copy->hc_hai.hai_action == HSMA_ARCHIVE) {
struct inode *inode;
return rc;
}
-
static int copy_and_ioctl(int cmd, struct obd_export *exp,
const void __user *data, size_t size)
{
__u64 data_version = 0;
int rc;
-
CDEBUG(D_INODE, "%s: Releasing file "DFID".\n",
ll_get_fsname(inode->i_sb, NULL, 0),
PFID(&ll_i2info(inode)->lli_fid));
&data_version);
och = NULL;
-
out:
if (och != NULL && !IS_ERR(och)) /* close the file */
ll_lease_close(och, inode, NULL);
struct iattr *attr = NULL;
int rc;
-
if (!S_ISREG(inode->i_mode))
return -EINVAL;
}
}
-
static loff_t ll_file_seek(struct file *file, loff_t offset, int origin)
{
struct inode *inode = file_inode(file);
return acl;
}
-
int ll_inode_permission(struct inode *inode, int mask)
{
int rc = 0;
LIST_HEAD_INIT(llioc.ioc_head)
};
-
struct llioc_data {
struct list_head iocd_list;
unsigned int iocd_size;
struct ccc_object *cl_inode2ccc(struct inode *inode);
-
void vvp_write_pending (struct ccc_object *club, struct ccc_page *page);
void vvp_write_complete(struct ccc_object *club, struct ccc_page *page);
void *ll_iocontrol_register(llioc_callback_t cb, int count, unsigned int *cmd);
void ll_iocontrol_unregister(void *magic);
-
/* lclient compat stuff */
#define cl_inode_info ll_inode_info
#define cl_i2info(info) ll_i2info(info)
return result;
}
-
-
static inline int to_fault_error(int result)
{
switch (result) {
spin_unlock(&rct->rct_lock);
}
-
static struct eacl_entry *ee_alloc(pid_t key, struct lu_fid *fid, int type,
ext_acl_xattr_header *header)
{
if (page_count + (*bio)->bi_vcnt > LLOOP_MAX_SEGMENTS)
break;
-
page_count += (*bio)->bi_vcnt;
count++;
bio = &(*bio)->bi_next;
bio_io_error(old_bio);
}
-
static inline void loop_handle_bio(struct lloop_device *lo, struct bio *bio)
{
int ret;
char name[MAX_STRING_SIZE + 1], *ptr;
int err, id, len, rc;
-
name[MAX_STRING_SIZE] = '\0';
LASSERT(sbi != NULL);
if (err)
goto out;
-
err = ldebugfs_add_vars(sbi->ll_debugfs_entry,
lprocfs_llite_obd_vars, sb);
if (err)
return 0;
}
-
/*
* Get an inode by inode number (already instantiated by the intent lookup).
* Returns inode or NULL
return rc;
}
-
/* We depend on "mode" being set with the proper file type/umask by now */
static struct inode *ll_create_node(struct inode *dir, struct lookup_intent *it)
{
#define DEBUG_SUBSYSTEM S_LLITE
-
#include "../include/obd.h"
#include "../include/lustre_lite.h"
#include "llite_internal.h"
OBD_SLAB_FREE_PTR(session, vvp_session_kmem);
}
-
struct lu_context_key vvp_key = {
.lct_tags = LCT_CL_THREAD,
.lct_init = vvp_key_init,
lu_kmem_fini(vvp_caches);
}
-
/*****************************************************************************
*
* mirror obd-devices into cl devices.
#ifndef VVP_INTERNAL_H
#define VVP_INTERNAL_H
-
#include "../include/cl_object.h"
#include "llite_internal.h"
#define DEBUG_SUBSYSTEM S_LLITE
-
#include "../include/obd.h"
#include "../include/lustre_lite.h"
return -EINVAL;
}
-
static int vvp_io_fault_start(const struct lu_env *env,
const struct cl_io_slice *ios)
{
goto out;
}
-
if (fio->ft_mkwrite) {
pgoff_t last_index;
/*
#define DEBUG_SUBSYSTEM S_LLITE
-
#include "../include/obd.h"
#include "../include/lustre_lite.h"
#define DEBUG_SUBSYSTEM S_LLITE
-
#include "../../include/linux/libcfs/libcfs.h"
#include "../include/obd.h"
#define DEBUG_SUBSYSTEM S_LLITE
-
#include "../include/obd.h"
#include "../include/lustre_lite.h"
static void ll_xattr_cache_init(struct ll_inode_info *lli)
{
-
LASSERT(lli != NULL);
INIT_LIST_HEAD(&lli->lli_xattrs);
{
struct ll_xattr_entry *entry;
-
-
list_for_each_entry(entry, cache, xe_list) {
/* xattr_name == NULL means look for any entry */
if (xattr_name == NULL ||
{
struct ll_xattr_entry *xattr;
-
-
if (ll_xattr_cache_find(cache, xattr_name, &xattr) == 0) {
CDEBUG(D_CACHE, "duplicate xattr: [%s]\n", xattr_name);
return -EPROTO;
{
struct ll_xattr_entry *xattr;
-
-
CDEBUG(D_CACHE, "del xattr: %s\n", xattr_name);
if (ll_xattr_cache_find(cache, xattr_name, &xattr) == 0) {
struct ll_xattr_entry *xattr, *tmp;
int xld_tail = 0;
-
-
list_for_each_entry_safe(xattr, tmp, cache, xe_list) {
CDEBUG(D_CACHE, "list: buffer=%p[%d] name=%s\n",
xld_buffer, xld_tail, xattr->xe_name);
static int ll_xattr_cache_destroy_locked(struct ll_inode_info *lli)
{
-
if (!ll_xattr_cache_valid(lli))
return 0;
struct ll_inode_info *lli = ll_i2info(inode);
int rc;
-
-
down_write(&lli->lli_xattrs_list_rwsem);
rc = ll_xattr_cache_destroy_locked(lli);
up_write(&lli->lli_xattrs_list_rwsem);
struct obd_export *exp = sbi->ll_md_exp;
int rc;
-
-
mutex_lock(&lli->lli_xattrs_enq_lock);
/* inode may have been shrunk and recreated, so data is gone, match lock
* only when data exists. */
__u32 *xsizes;
int rc, i;
-
-
rc = ll_xattr_find_get_lock(inode, oit, &req);
if (rc)
goto out_no_unlock;
struct ll_inode_info *lli = ll_i2info(inode);
int rc = 0;
-
-
LASSERT(!!(valid & OBD_MD_FLXATTR) ^ !!(valid & OBD_MD_FLXATTRLS));
down_read(&lli->lli_xattrs_list_rwsem);
return rc;
}
-
-
-
static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
int len, void *karg, void *uarg)
{
return rc;
}
-
static int lmv_close(struct obd_export *exp, struct md_op_data *op_data,
struct md_open_data *mod, struct ptlrpc_request **request)
{
struct cl_page_slice lsb_cl;
};
-
struct lov_thread_info {
struct cl_object_conf lti_stripe_conf;
struct lu_fid lti_fid;
struct lovsub_lock *sublock,
const struct cl_lock_descr *d, int idx);
-
int lov_page_init(const struct lu_env *env, struct cl_object *ob,
struct cl_page *page, struct page *vmpage);
int lovsub_page_init(const struct lu_env *env, struct cl_object *ob,
#include "lov_cl_internal.h"
#include "lov_internal.h"
-
struct kmem_cache *lov_lock_kmem;
struct kmem_cache *lov_object_kmem;
struct kmem_cache *lov_thread_kmem;
return lov_sub_get(env, lio, stripe);
}
-
static int lov_io_subio_init(const struct lu_env *env, struct lov_io *lio,
struct cl_io *io)
{
LASSERT(rc == 0);
}
-
static struct cl_page_list *lov_io_submit_qin(struct lov_device *ld,
struct cl_page_list *qin,
int idx, int alloc)
return result;
}
-
static void lov_lock_cancel(const struct lu_env *env,
const struct cl_lock_slice *slice)
{
return closure;
}
-
/** @} lov */
static int lov_notify(struct obd_device *obd, struct obd_device *watched,
enum obd_notify_event ev, void *data);
-
#define MAX_STRING_SIZE 128
int lov_connect_obd(struct obd_device *obd, __u32 index, int activate,
struct obd_connect_data *data)
return rc;
}
-
if (imp->imp_invalid) {
CDEBUG(D_CONFIG, "not connecting OSC %s; administratively disabled\n",
obd_uuid2str(tgt_uuid));
return stripe_count;
}
-
static int lov_verify_lmm(void *lmm, int lmm_bytes, __u16 *stripe_count)
{
int rc;
return refc;
}
-
/* Unpack LOV object metadata from disk storage. It is packed in LE byte
* order and is opaque to the networking layer.
*/
return rc;
}
-
static const struct cl_page_operations lov_empty_page_ops = {
.cpo_fini = lov_empty_page_fini,
.cpo_print = lov_page_print
return 0;
}
-
/** @} lov */
return 0;
}
-
int lov_pool_new(struct obd_device *obd, char *poolname)
{
struct lov_obd *lov;
return 0;
}
-
int lov_pool_add(struct obd_device *obd, char *poolname, char *ostname)
{
struct obd_uuid ost_uuid;
obd_str2uuid(&ost_uuid, ostname);
-
/* search ost in lov array */
obd_getref(obd);
for (lov_idx = 0; lov_idx < lov->desc.ld_tgt_count; lov_idx++) {
.ldt_ctx_tags = LCT_CL_THREAD
};
-
/** @} lov */
return cl_object_glimpse(env, &los->lso_super->lo_cl, lvb);
}
-
-
static const struct cl_object_operations lovsub_ops = {
.coo_page_init = lovsub_page_init,
.coo_lock_init = lovsub_lock_init,
#include "../include/lustre/lustre_idl.h"
#include "mdc_internal.h"
-
static void __mdc_pack_body(struct mdt_body *b, __u32 suppgid)
{
LASSERT(b != NULL);
CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_create));
rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
-
rec->cr_opcode = REINT_CREATE;
rec->cr_fsuid = uid;
rec->cr_fsgid = gid;
int rc, count = 0, maxdata;
LIST_HEAD(cancels);
-
-
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_LDLM_INTENT_GETXATTR);
if (req == NULL)
int rc;
int saved_rc = 0;
-
req_fmt = &RQF_MDS_CLOSE;
if (op_data->op_bias & MDS_HSM_RELEASE) {
req_fmt = &RQF_MDS_RELEASE_CLOSE;
return rc;
}
-
static int mdc_readpage(struct obd_export *exp, struct md_op_data *op_data,
struct page **pages, struct ptlrpc_request **request)
{
return rc;
}
-
/* get remote permission for current user on fid */
static int mdc_get_remote_perm(struct obd_export *exp, const struct lu_fid *fid,
__u32 suppgid, struct ptlrpc_request **request)
config_log_get(cld);
}
-
if (cld_is_recover(cld)) {
rc = 0; /* this is not a fatal error for recover log */
if (rcl == 0)
return rc;
}
-
/** Called from lustre_process_log.
* LCFG_LOG_START gets the config log from the MGS, processes it to start
* any services, and adds it to the list logs to watch (follow).
d->e_id = cpu_to_le32(s->e_id);
}
-
/* if "new_count == 0", then "new = {a_version, NULL}", NOT NULL. */
static int lustre_posix_acl_xattr_reduce_space(posix_acl_xattr_header **header,
int old_count, int new_count)
}
EXPORT_SYMBOL(cl_io_slice_add);
-
/**
* Initializes page list.
*/
CL_ENV_INC(busy);
}
-
/*
* The implementation of using hash table to connect cl_env and thread
*/
cfs_hash_putref(cl_env_hash);
}
-
static inline struct cl_env *cl_env_detach(struct cl_env *cle)
{
if (cle == NULL)
}
EXPORT_SYMBOL(cl_page_find);
-
struct cl_page *cl_page_find_sub(const struct lu_env *env, struct cl_object *o,
pgoff_t idx, struct page *vmpage,
struct cl_page *parent)
}
EXPORT_SYMBOL(cl_page_own_try);
-
/**
* Assume page ownership.
*
#include "../include/cl_object.h"
#include "llog_internal.h"
-
struct obd_device *obd_devs[MAX_OBD_DEVICES];
EXPORT_SYMBOL(obd_devs);
struct list_head obd_types;
if (err != 0)
return err;
-
err = llog_info_init();
if (err)
return err;
EXPORT_SYMBOL(class_export_dump_hook);
#endif
-
/* Total amount of zombies to be destroyed */
static int zombies_count;
}
EXPORT_SYMBOL(obd_zombie_barrier);
-
/**
* destroy zombie export/import thread.
*/
return 0;
}
-
/**
* start destroy zombie import/export thread
*/
.fops = &obd_psdev_fops,
};
-
static ssize_t version_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
#define DEBUG_SUBSYSTEM S_LOG
-
#include "../include/obd_class.h"
#include "../include/lustre_log.h"
#include "llog_internal.h"
#define DEBUG_SUBSYSTEM S_LOG
-
#include "../include/obd_class.h"
#include "llog_internal.h"
#define DEBUG_SUBSYSTEM S_LOG
-
#include "../include/obd_class.h"
#include "../include/lustre_log.h"
#include "llog_internal.h"
#define DEBUG_SUBSYSTEM S_LOG
-
#include "../include/lustre_log.h"
static void print_llogd_body(struct llogd_body *d)
#define DEBUG_SUBSYSTEM S_CLASS
-
#include "../include/obd_class.h"
#include "../include/lprocfs_status.h"
#include "../include/lustre/lustre_idl.h"
}
EXPORT_SYMBOL(lu_object_locate);
-
-
/**
* Finalize and free devices in the device stack.
*
}
}
-
/*
* lu_cache_shrink_count returns the number of cached objects that are
* candidates to be freed by shrink_slab(). A counter, which tracks
#include "../include/lustre_handles.h"
#include "../include/lustre_lib.h"
-
static __u64 handle_base;
#define HANDLE_INCR 7
static spinlock_t handle_base_lock;
return 0;
}
-
/* We can't call ll_process_config or lquota_process_config directly because
* it lives in a module that must be loaded after this one. */
static int (*client_process_config)(struct lustre_cfg *lcfg);
ktime_t end;
int rc;
-
/* Add upcall processing here. Now only lctl is supported */
if (strcmp(upcall, LCTL_UPCALL) != 0) {
CERROR("Unsupported upcall %s\n", upcall);
}
}
-
if (clli->cfg_flags & CFG_F_EXCLUDE) {
CDEBUG(D_CONFIG, "cmd: %x marked EXCLUDED\n",
lcfg->lcfg_command);
* Author: Nathan Rutman <nathan@clusterfs.com>
*/
-
#define DEBUG_SUBSYSTEM S_CLASS
#define D_MOUNT (D_SUPER|D_CONFIG/*|D_WARNING */)
#define PRINT_CMD CDEBUG
return rc;
}
-
/* We can't call ll_fill_super by name because it lives in a module that
must be loaded after this one. */
void lustre_register_client_fill_super(int (*cfs)(struct super_block *sb,
struct lustre_cfg *lcfg);
static int echo_client_cleanup(struct obd_device *obddev);
-
/** \defgroup echo_helpers Helper functions
* @{
*/
goto out;
LASSERT(rc == 0);
-
rc = cl_echo_enqueue0(env, eco, offset,
offset + npages * PAGE_CACHE_SIZE - 1,
rw == READ ? LCK_PR : LCK_PW, &lh.cookie,
}
/** @} echo_exports */
-
static u64 last_object_id;
static int
((__u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count > ~0UL))
return -EINVAL;
-
for (i = 0; i < lsm->lsm_stripe_count; i++) {
if (copy_from_user(lsm->lsm_oinfo[i],
((struct lov_stripe_md *)ulsm)-> \
/* block size to use for data verification */
#define OBD_ECHO_BLOCK_SIZE (4<<10)
-
#endif
__res; \
})
-
/**
* sanity check - to make sure there is no overlapped extent in the tree.
*/
int chunks = (ext->oe_end >> ppc_bits) - trunc_chunk;
pgoff_t last_index;
-
/* if there is no pages in this chunk, we can also free grants
* for the last chunk */
if (pages_in_chunk == 0) {
ar->ar_force_sync = 0;
}
-
/* this must be called holding the loi list lock to give coverage to exit_cache,
* async_flag maintenance, and oap_request */
static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli,
struct osc_io *ols_owner;
};
-
/**
* Page state private for osc layer.
*/
return cl2osc_page(slice);
}
-
/*****************************************************************************
*
* io operations.
.cro_completion = osc_req_completion
};
-
int osc_io_init(const struct lu_env *env,
struct cl_object *obj, struct cl_io *io)
{
return 0;
}
-
static int osc_attr_get(const struct lu_env *env, struct cl_object *obj,
struct cl_attr *attr)
{
return 0;
}
-
void osc_object_set_contended(struct osc_object *obj)
{
obj->oo_contention_time = cfs_time_current();
return 0;
}
-
static const char *osc_list(struct list_head *head)
{
return list_empty(head) ? "-" : "+";
#include "../../include/linux/libcfs/libcfs.h"
-
#include "../include/lustre_dlm.h"
#include "../include/lustre_net.h"
#include "../include/lustre/lustre_user.h"
return rc;
}
-
static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
void *karg, void *uarg)
{
}
}
-
static void ptlrpc_master_callback(lnet_event_t *ev)
{
struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
return -ENOMEM;
}
-
int ptlrpc_init_portals(void)
{
int rc = ptlrpc_ni_init();
spin_unlock(&imp->imp_lock); \
} while (0)
-
static int ptlrpc_connect_interpret(const struct lu_env *env,
struct ptlrpc_request *request,
void *data, int rc);
request->rq_repmsg)->cookie);
}
-
imp->imp_remote_handle =
*lustre_msg_get_handle(request->rq_repmsg);
&RMF_CAPA1
};
-
static const struct req_msg_field *ost_brw_client[] = {
&RMF_PTLRPC_BODY,
&RMF_OST_BODY,
*/
#define DEBUG_SUBSYSTEM S_CLASS
-
#include "../include/obd_support.h"
#include "../include/obd.h"
#include "../include/lprocfs_status.h"
#include "../include/obd_class.h"
#include "ptlrpc_internal.h"
-
static struct ll_rpc_opcode {
__u32 opcode;
const char *opname;
}
LPROC_SEQ_FOPS(ptlrpc_lprocfs_req_history_max);
-
static ssize_t threads_min_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
LNetMDUnlink(bd_mds[i]);
}
-
/**
* Register bulk at the sender for later transfer.
* Returns 0 on success or error code.
return rc;
}
-
/* ptlrpc/nrs_fifo.c */
extern struct ptlrpc_nrs_pol_conf nrs_conf_fifo;
if (rc != 0)
goto fail;
-
return rc;
fail:
/**
}
EXPORT_SYMBOL(lustre_msg_get_slv);
-
void lustre_msg_set_slv(struct lustre_msg *msg, __u64 slv)
{
switch (msg->lm_magic) {
}
EXPORT_SYMBOL(lustre_msg_get_limit);
-
void lustre_msg_set_limit(struct lustre_msg *msg, __u64 limit)
{
switch (msg->lm_magic) {
}
}
-
void ptlrpc_request_set_replen(struct ptlrpc_request *req)
{
int count = req_capsule_filled_sizes(&req->rq_pill, RCL_SERVER);
#include "ptlrpc_internal.h"
-
void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc,
int mdidx)
{
#define DEBUG_SUBSYSTEM S_RPC
-
#include "../include/obd_support.h"
#include "../include/obd_class.h"
#include "../include/lustre_net.h"
}
EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_write);
-
/****************************************
* user descriptor helpers *
****************************************/
* bulk encryption page pools *
****************************************/
-
#define POINTERS_PER_PAGE (PAGE_CACHE_SIZE / sizeof(void *))
#define PAGES_PER_POOL (POINTERS_PER_PAGE)
}
}
-
static int cfs_hash_alg_id[] = {
[BULK_HASH_ALG_NULL] = CFS_HASH_ALG_NULL,
[BULK_HASH_ALG_ADLER32] = CFS_HASH_ALG_ADLER32,
#define SEC_GC_INTERVAL (30 * 60)
-
static struct mutex sec_gc_mutex;
static LIST_HEAD(sec_gc_list);
static spinlock_t sec_gc_list_lock;
static struct ptlrpc_thread sec_gc_thread;
static atomic_t sec_gc_wait_del = ATOMIC_INIT(0);
-
void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec)
{
LASSERT(sec->ps_policy->sp_cops->gc_ctx);
#define DEBUG_SUBSYSTEM S_SEC
-
#include "../include/obd_support.h"
#include "../include/obd_cksum.h"
#include "../include/obd_class.h"
#define DEBUG_SUBSYSTEM S_SEC
-
#include "../include/obd_support.h"
#include "../include/obd_cksum.h"
#include "../include/obd_class.h"
module_param(at_extra, int, 0644);
MODULE_PARM_DESC(at_extra, "How much extra time to give with each early reply");
-
/* forward ref */
static int ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt);
static void ptlrpc_server_hpreq_fini(struct ptlrpc_request *req);
svcpt->scp_rqbd_allocating++;
spin_unlock(&svcpt->scp_lock);
-
for (i = 0; i < svc->srv_nbuf_per_group; i++) {
/* NB: another thread might have recycled enough rqbds, we
* need to make sure it wouldn't over-allocate, see LU-1212. */
return 1;
}
-
static void
ptlrpc_check_rqbd_pool(struct ptlrpc_service_part *svcpt)
{
ptlrpc_hr.hr_partitions = NULL;
}
-
/**
* Wait until all already scheduled replies are processed.
*/