{
const char *(*fn)(int bit) = is_subsys ? libcfs_debug_subsys2str :
libcfs_debug_dbg2str;
- int len = 0;
- const char *token;
- int i;
+ int len = 0;
+ const char *token;
+ int i;
if (!mask) { /* "0" */
if (size > 0)
{
const char *(*fn)(int bit) = is_subsys ? libcfs_debug_subsys2str :
libcfs_debug_dbg2str;
- int m = 0;
- int matched;
- int n;
- int t;
+ int m = 0;
+ int matched;
+ int n;
+ int t;
/* Allow a number for backwards compatibility */
int libcfs_debug_init(unsigned long bufsize)
{
- int rc = 0;
unsigned int max = libcfs_debug_mb;
+ int rc = 0;
init_waitqueue_head(&debug_ctlwq);
static struct hlist_head *
cfs_hash_hd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
{
- struct cfs_hash_head_dep *head;
+ struct cfs_hash_head_dep *head;
head = (struct cfs_hash_head_dep *)&bd->bd_bucket->hsb_head[0];
return &head[bd->bd_offset].hd_head;
return;
spin_lock(&hs->hs_dep_lock);
- hs->hs_dep_max = dep_cur;
- hs->hs_dep_bkt = bd->bd_bucket->hsb_index;
- hs->hs_dep_off = bd->bd_offset;
+ hs->hs_dep_max = dep_cur;
+ hs->hs_dep_bkt = bd->bd_bucket->hsb_index;
+ hs->hs_dep_off = bd->bd_offset;
hs->hs_dep_bits = hs->hs_cur_bits;
spin_unlock(&hs->hs_dep_lock);
return NULL;
}
- new_bkts[i]->hsb_index = i;
- new_bkts[i]->hsb_version = 1; /* shouldn't be zero */
- new_bkts[i]->hsb_depmax = -1; /* unknown */
+ new_bkts[i]->hsb_index = i;
+ new_bkts[i]->hsb_version = 1; /* shouldn't be zero */
+ new_bkts[i]->hsb_depmax = -1; /* unknown */
bd.bd_bucket = new_bkts[i];
cfs_hash_bd_for_each_hlist(hs, &bd, hhead)
INIT_HLIST_HEAD(hhead);
int bits;
spin_lock(&hs->hs_dep_lock);
- dep = hs->hs_dep_max;
- bkt = hs->hs_dep_bkt;
- off = hs->hs_dep_off;
+ dep = hs->hs_dep_max;
+ bkt = hs->hs_dep_bkt;
+ off = hs->hs_dep_off;
bits = hs->hs_dep_bits;
spin_unlock(&hs->hs_dep_lock);
hs->hs_max_bits = (__u8)max_bits;
hs->hs_bkt_bits = (__u8)bkt_bits;
- hs->hs_ops = ops;
+ hs->hs_ops = ops;
hs->hs_extra_bytes = extra_bytes;
hs->hs_rehash_bits = 0;
cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker);
}
if (hnode) {
- obj = cfs_hash_object(hs, hnode);
+ obj = cfs_hash_object(hs, hnode);
bits = cfs_hash_rehash_bits(hs);
}
int
cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len)
{
- int rc;
+ int rc;
rc = snprintf(buf, len, "%d\t: %d\n", 0, 0);
len -= rc;
cfs_percpt_lock_create(struct cfs_cpt_table *cptab,
struct lock_class_key *keys)
{
- struct cfs_percpt_lock *pcl;
- spinlock_t *lock;
- int i;
+ struct cfs_percpt_lock *pcl;
+ spinlock_t *lock;
+ int i;
/* NB: cptab can be NULL, pcl will be for HW CPUs on that case */
LIBCFS_ALLOC(pcl, sizeof(*pcl));
cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index)
__acquires(pcl->pcl_locks)
{
- int ncpt = cfs_cpt_number(pcl->pcl_cptab);
- int i;
+ int ncpt = cfs_cpt_number(pcl->pcl_cptab);
+ int i;
LASSERT(index >= CFS_PERCPT_LOCK_EX && index < ncpt);
cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
__releases(pcl->pcl_locks)
{
- int ncpt = cfs_cpt_number(pcl->pcl_cptab);
- int i;
+ int ncpt = cfs_cpt_number(pcl->pcl_cptab);
+ int i;
index = ncpt == 1 ? 0 : index;
void
cfs_percpt_free(void *vars)
{
- struct cfs_var_array *arr;
- int i;
+ struct cfs_var_array *arr;
+ int i;
arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
void *
cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size)
{
- struct cfs_var_array *arr;
- int count;
- int i;
+ struct cfs_var_array *arr;
+ int count;
+ int i;
count = cfs_cpt_number(cptab);
void
cfs_array_free(void *vars)
{
- struct cfs_var_array *arr;
- int i;
+ struct cfs_var_array *arr;
+ int i;
arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
void *
cfs_array_alloc(int count, unsigned int size)
{
- struct cfs_var_array *arr;
- int i;
+ struct cfs_var_array *arr;
+ int i;
LIBCFS_ALLOC(arr, offsetof(struct cfs_var_array, va_ptrs[count]));
if (!arr)
return NULL;
- arr->va_count = count;
- arr->va_size = size;
+ arr->va_count = count;
+ arr->va_size = size;
for (i = 0; i < count; i++) {
LIBCFS_ALLOC(arr->va_ptrs[i], size);
char *cfs_firststr(char *str, size_t size)
{
size_t i = 0;
- char *end;
+ char *end;
/* trim leading spaces */
while (i < size && *str && isspace(*str)) {
cfs_range_expr_parse(struct cfs_lstr *src, unsigned int min, unsigned int max,
int bracketed, struct cfs_range_expr **expr)
{
- struct cfs_range_expr *re;
- struct cfs_lstr tok;
+ struct cfs_range_expr *re;
+ struct cfs_lstr tok;
LIBCFS_ALLOC(re, sizeof(*re));
if (!re)
int
cfs_expr_list_match(__u32 value, struct cfs_expr_list *expr_list)
{
- struct cfs_range_expr *expr;
+ struct cfs_range_expr *expr;
list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
if (value >= expr->re_lo && value <= expr->re_hi &&
int
cfs_expr_list_values(struct cfs_expr_list *expr_list, int max, __u32 **valpp)
{
- struct cfs_range_expr *expr;
- __u32 *val;
- int count = 0;
- int i;
+ struct cfs_range_expr *expr;
+ __u32 *val;
+ int count = 0;
+ int i;
list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
for (i = expr->re_lo; i <= expr->re_hi; i++) {
cfs_expr_list_parse(char *str, int len, unsigned int min, unsigned int max,
struct cfs_expr_list **elpp)
{
- struct cfs_expr_list *expr_list;
- struct cfs_range_expr *expr;
- struct cfs_lstr src;
- int rc;
+ struct cfs_expr_list *expr_list;
+ struct cfs_range_expr *expr;
+ struct cfs_lstr src;
+ int rc;
LIBCFS_ALLOC(expr_list, sizeof(*expr_list));
if (!expr_list)
void
cfs_cpt_table_free(struct cfs_cpt_table *cptab)
{
- int i;
+ int i;
if (cptab->ctb_cpu2cpt) {
LIBCFS_FREE(cptab->ctb_cpu2cpt,
cfs_cpt_table_alloc(unsigned int ncpt)
{
struct cfs_cpt_table *cptab;
- int i;
+ int i;
LIBCFS_ALLOC(cptab, sizeof(*cptab));
if (!cptab)
int
cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len)
{
- char *tmp = buf;
- int rc = 0;
- int i;
- int j;
+ char *tmp = buf;
+ int rc = 0;
+ int i;
+ int j;
for (i = 0; i < cptab->ctb_nparts; i++) {
if (len > 0) {
int
cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
{
- int node;
+ int node;
LASSERT(cpt >= 0 && cpt < cptab->ctb_nparts);
void
cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
{
- int node;
- int i;
+ int node;
+ int i;
LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
int
cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask)
{
- int i;
+ int i;
if (!cpumask_weight(mask) ||
cpumask_any_and(mask, cpu_online_mask) >= nr_cpu_ids) {
void
cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask)
{
- int i;
+ int i;
for_each_cpu(i, mask)
cfs_cpt_unset_cpu(cptab, cpt, i);
int
cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node)
{
- cpumask_t *mask;
- int rc;
+ cpumask_t *mask;
+ int rc;
if (node < 0 || node >= MAX_NUMNODES) {
CDEBUG(D_INFO,
int
cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask)
{
- int i;
+ int i;
for_each_node_mask(i, *mask) {
if (!cfs_cpt_set_node(cptab, cpt, i))
void
cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask)
{
- int i;
+ int i;
for_each_node_mask(i, *mask)
cfs_cpt_unset_node(cptab, cpt, i);
void
cfs_cpt_clear(struct cfs_cpt_table *cptab, int cpt)
{
- int last;
- int i;
+ int last;
+ int i;
if (cpt == CFS_CPT_ANY) {
last = cptab->ctb_nparts - 1;
int
cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt)
{
- nodemask_t *mask;
- int weight;
- int rotor;
- int node;
+ nodemask_t *mask;
+ int weight;
+ int rotor;
+ int node;
/* convert CPU partition ID to HW node id */
int
cfs_cpt_current(struct cfs_cpt_table *cptab, int remap)
{
- int cpu = smp_processor_id();
- int cpt = cptab->ctb_cpu2cpt[cpu];
+ int cpu = smp_processor_id();
+ int cpt = cptab->ctb_cpu2cpt[cpu];
if (cpt < 0) {
if (!remap)
int
cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt)
{
- cpumask_t *cpumask;
- nodemask_t *nodemask;
- int rc;
- int i;
+ cpumask_t *cpumask;
+ nodemask_t *nodemask;
+ int rc;
+ int i;
LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
cpumask_t *node, int number)
{
- cpumask_t *socket = NULL;
- cpumask_t *core = NULL;
- int rc = 0;
- int cpu;
+ cpumask_t *socket = NULL;
+ cpumask_t *core = NULL;
+ int rc = 0;
+ int cpu;
LASSERT(number > 0);
LASSERT(!cpumask_empty(socket));
while (!cpumask_empty(socket)) {
- int i;
+ int i;
/* get cpumask for hts in the same core */
cpumask_copy(core, topology_sibling_cpumask(cpu));
}
}
- out:
+out:
if (socket)
LIBCFS_FREE(socket, cpumask_size());
if (core)
ncpt = nnode;
- out:
+out:
#if (BITS_PER_LONG == 32)
/* config many CPU partitions on 32-bit system could consume
* too much memory
cfs_cpt_table_create(int ncpt)
{
struct cfs_cpt_table *cptab = NULL;
- cpumask_t *mask = NULL;
- int cpt = 0;
- int num;
- int rc;
- int i;
+ cpumask_t *mask = NULL;
+ int cpt = 0;
+ int num;
+ int rc;
+ int i;
rc = cfs_cpt_num_estimate();
if (ncpt <= 0)
while (!cpumask_empty(mask)) {
struct cfs_cpu_partition *part;
- int n;
+ int n;
/*
* Each emulated NUMA node has all allowed CPUs in
static struct cfs_cpt_table *
cfs_cpt_table_create_pattern(char *pattern)
{
- struct cfs_cpt_table *cptab;
+ struct cfs_cpt_table *cptab;
char *str;
- int node = 0;
- int high;
+ int node = 0;
+ int high;
int ncpt = 0;
int cpt;
int rc;
- int c;
+ int c;
int i;
str = cfs_trimwhite(pattern);
high = node ? MAX_NUMNODES - 1 : nr_cpu_ids - 1;
for (str = cfs_trimwhite(pattern), c = 0;; c++) {
- struct cfs_range_expr *range;
- struct cfs_expr_list *el;
- char *bracket = strchr(str, '[');
- int n;
+ struct cfs_range_expr *range;
+ struct cfs_expr_list *el;
+ char *bracket = strchr(str, '[');
+ int n;
if (!bracket) {
if (*str) {
static int
cfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
{
- unsigned int cpu = (unsigned long)hcpu;
- bool warn;
+ unsigned int cpu = (unsigned long)hcpu;
+ bool warn;
switch (action) {
case CPU_DEAD:
unsigned int key_len)
{
struct crypto_ahash *tfm;
- int err = 0;
+ int err = 0;
*type = cfs_crypto_hash_type(hash_alg);
unsigned char *key, unsigned int key_len,
unsigned char *hash, unsigned int *hash_len)
{
- struct scatterlist sl;
+ struct scatterlist sl;
struct ahash_request *req;
- int err;
- const struct cfs_crypto_hash_type *type;
+ int err;
+ const struct cfs_crypto_hash_type *type;
if (!buf || !buf_len || !hash_len)
return -EINVAL;
unsigned char *key, unsigned int key_len)
{
struct ahash_request *req;
- int err;
- const struct cfs_crypto_hash_type *type;
+ int err;
+ const struct cfs_crypto_hash_type *type;
err = cfs_crypto_hash_alloc(hash_alg, &type, &req, key, key_len);
int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *hdesc,
unsigned char *hash, unsigned int *hash_len)
{
- int err;
+ int err;
struct ahash_request *req = (void *)hdesc;
int size = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
{
int buf_len = max(PAGE_SIZE, 1048576UL);
void *buf;
- unsigned long start, end;
- int bcount, err = 0;
+ unsigned long start, end;
+ int bcount, err = 0;
struct page *page;
unsigned char hash[CFS_CRYPTO_HASH_DIGESTSIZE_MAX];
unsigned int hash_len = sizeof(hash);
CDEBUG(D_INFO, "Crypto hash algorithm %s test error: rc = %d\n",
cfs_crypto_hash_name(hash_alg), err);
} else {
- unsigned long tmp;
+ unsigned long tmp;
tmp = ((bcount * buf_len / jiffies_to_msecs(end - start)) *
1000) / (1024 * 1024);
void libcfs_run_debug_log_upcall(char *file)
{
char *argv[3];
- int rc;
+ int rc;
char *envp[] = {
"HOME=/",
"PATH=/sbin:/bin:/usr/sbin:/usr/bin",
void libcfs_run_upcall(char **argv)
{
- int rc;
- int argc;
+ int rc;
+ int argc;
char *envp[] = {
"HOME=/",
"PATH=/sbin:/bin:/usr/sbin:/usr/bin",
sigset_t
cfs_block_allsigs(void)
{
- unsigned long flags;
- sigset_t old;
+ unsigned long flags;
+ sigset_t old;
spin_lock_irqsave(¤t->sighand->siglock, flags);
old = current->blocked;
sigset_t cfs_block_sigs(unsigned long sigs)
{
- unsigned long flags;
- sigset_t old;
+ unsigned long flags;
+ sigset_t old;
spin_lock_irqsave(¤t->sighand->siglock, flags);
old = current->blocked;
void
cfs_restore_sigs(sigset_t old)
{
- unsigned long flags;
+ unsigned long flags;
spin_lock_irqsave(¤t->sighand->siglock, flags);
current->blocked = old;
int cfs_tracefile_init_arch(void)
{
- int i;
- int j;
+ int i;
+ int j;
struct cfs_trace_cpu_data *tcd;
/* initialize trace_data */
void cfs_tracefile_fini_arch(void)
{
- int i;
- int j;
+ int i;
+ int j;
for (i = 0; i < num_possible_cpus(); i++)
for (j = 0; j < 3; j++) {
static int __proc_dobitmasks(void *data, int write,
loff_t pos, void __user *buffer, int nob)
{
- const int tmpstrlen = 512;
- char *tmpstr;
- int rc;
+ const int tmpstrlen = 512;
+ char *tmpstr;
+ int rc;
unsigned int *mask = data;
- int is_subsys = (mask == &libcfs_subsystem_debug) ? 1 : 0;
- int is_printk = (mask == &libcfs_printk) ? 1 : 0;
+ int is_subsys = (mask == &libcfs_subsystem_debug) ? 1 : 0;
+ int is_printk = (mask == &libcfs_printk) ? 1 : 0;
rc = cfs_trace_allocate_string_buffer(&tmpstr, tmpstrlen);
if (rc < 0)
loff_t pos, void __user *buffer, int nob)
{
char *buf = NULL;
- int len = 4096;
- int rc = 0;
+ int len = 4096;
+ int rc = 0;
if (write)
return -EPERM;
* ->tcd_daemon_pages and ->tcd_pages to the ->pc_pages. Otherwise,
* only ->tcd_pages are spilled.
*/
- int pc_want_daemon_pages;
+ int pc_want_daemon_pages;
};
struct tracefiled_ctl {
struct completion tctl_start;
struct completion tctl_stop;
- wait_queue_head_t tctl_waitq;
+ wait_queue_head_t tctl_waitq;
pid_t tctl_pid;
atomic_t tctl_shutdown;
};
/*
* page itself
*/
- struct page *page;
+ struct page *page;
/*
* linkage into one of the lists in trace_data_union or
* page_collection
*/
- struct list_head linkage;
+ struct list_head linkage;
/*
* number of bytes used within this page
*/
- unsigned int used;
+ unsigned int used;
/*
* cpu that owns this page
*/
- unsigned short cpu;
+ unsigned short cpu;
/*
* type(context) of this page
*/
- unsigned short type;
+ unsigned short type;
};
static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
static struct cfs_trace_page *cfs_tage_alloc(gfp_t gfp)
{
- struct page *page;
+ struct page *page;
struct cfs_trace_page *tage;
/* My caller is trying to free memory */
const char *format, ...)
{
va_list args;
- int rc;
+ int rc;
va_start(args, format);
rc = libcfs_debug_vmsg2(msgdata, format, args, NULL);
const char *format2, ...)
{
struct cfs_trace_cpu_data *tcd = NULL;
- struct ptldebug_header header = {0};
- struct cfs_trace_page *tage;
+ struct ptldebug_header header = { 0 };
+ struct cfs_trace_page *tage;
/* string_buf is used only if tcd != NULL, and is always set then */
- char *string_buf = NULL;
- char *debug_buf;
- int known_size;
- int needed = 85; /* average message length */
- int max_nob;
- va_list ap;
- int depth;
- int i;
- int remain;
- int mask = msgdata->msg_mask;
- const char *file = kbasename(msgdata->msg_file);
- struct cfs_debug_limit_state *cdls = msgdata->msg_cdls;
+ char *string_buf = NULL;
+ char *debug_buf;
+ int known_size;
+ int needed = 85; /* average message length */
+ int max_nob;
+ va_list ap;
+ int depth;
+ int i;
+ int remain;
+ int mask = msgdata->msg_mask;
+ const char *file = kbasename(msgdata->msg_file);
+ struct cfs_debug_limit_state *cdls = msgdata->msg_cdls;
tcd = cfs_trace_get_tcd();
* CPUs have been stopped during a panic. If this isn't true for some
* arch, this will have to be implemented separately in each arch.
*/
- int i;
- int j;
struct cfs_trace_cpu_data *tcd;
+ int i;
+ int j;
INIT_LIST_HEAD(&pc->pc_pages);
int cfs_tracefile_dump_all_pages(char *filename)
{
- struct page_collection pc;
- struct file *filp;
- struct cfs_trace_page *tage;
- struct cfs_trace_page *tmp;
- char *buf;
+ struct page_collection pc;
+ struct file *filp;
+ struct cfs_trace_page *tage;
+ struct cfs_trace_page *tmp;
+ char *buf;
mm_segment_t __oldfs;
int rc;
int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
const char __user *usr_buffer, int usr_buffer_nob)
{
- int nob;
+ int nob;
if (usr_buffer_nob > knl_buffer_nob)
return -EOVERFLOW;
* NB if 'append' != NULL, it's a single character to append to the
* copied out string - usually "\n" or "" (i.e. a terminating zero byte)
*/
- int nob = strlen(knl_buffer);
+ int nob = strlen(knl_buffer);
if (nob > usr_buffer_nob)
nob = usr_buffer_nob;
int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob)
{
- char *str;
- int rc;
+ char *str;
+ int rc;
rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
if (rc)
int cfs_trace_daemon_command(char *str)
{
- int rc = 0;
+ int rc = 0;
cfs_tracefile_write_lock();
int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob)
{
char *str;
- int rc;
+ int rc;
rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
if (rc)
int cfs_tracefile_init(int max_pages)
{
struct cfs_trace_cpu_data *tcd;
- int i;
- int j;
- int rc;
- int factor;
+ int i;
+ int j;
+ int rc;
+ int factor;
rc = cfs_tracefile_init_arch();
if (rc)
/* trace file lock routines */
#define TRACEFILE_NAME_SIZE 1024
-extern char cfs_tracefile[TRACEFILE_NAME_SIZE];
+extern char cfs_tracefile[TRACEFILE_NAME_SIZE];
extern long long cfs_tracefile_size;
void libcfs_run_debug_log_upcall(char *file);
void libcfs_debug_dumplog_internal(void *arg);
void libcfs_register_panic_notifier(void);
void libcfs_unregister_panic_notifier(void);
-extern int libcfs_panic_in_progress;
+extern int libcfs_panic_in_progress;
int cfs_trace_max_debug_mb(void);
#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT))
* tcd_for_each_type_lock
*/
spinlock_t tcd_lock;
- unsigned long tcd_lock_flags;
+ unsigned long tcd_lock_flags;
/*
* pages with trace records not yet processed by tracefiled.
*/
- struct list_head tcd_pages;
+ struct list_head tcd_pages;
/* number of pages on ->tcd_pages */
- unsigned long tcd_cur_pages;
+ unsigned long tcd_cur_pages;
/*
* pages with trace records already processed by
* (put_pages_on_daemon_list()). LRU pages from this list are
* discarded when list grows too large.
*/
- struct list_head tcd_daemon_pages;
+ struct list_head tcd_daemon_pages;
/* number of pages on ->tcd_daemon_pages */
- unsigned long tcd_cur_daemon_pages;
+ unsigned long tcd_cur_daemon_pages;
/*
* Maximal number of pages allowed on ->tcd_pages and
* Always TCD_MAX_PAGES * tcd_pages_factor / 100 in current
* implementation.
*/
- unsigned long tcd_max_pages;
+ unsigned long tcd_max_pages;
/*
* preallocated pages to write trace records into. Pages from
* TCD_STOCK_PAGES pagesful are consumed by trace records all
* emitted in non-blocking contexts. Which is quite unlikely.
*/
- struct list_head tcd_stock_pages;
+ struct list_head tcd_stock_pages;
/* number of pages on ->tcd_stock_pages */
- unsigned long tcd_cur_stock_pages;
+ unsigned long tcd_cur_stock_pages;
- unsigned short tcd_shutting_down;
- unsigned short tcd_cpu;
- unsigned short tcd_type;
+ unsigned short tcd_shutting_down;
+ unsigned short tcd_cpu;
+ unsigned short tcd_type;
/* The factors to share debug memory. */
- unsigned short tcd_pages_factor;
+ unsigned short tcd_pages_factor;
} tcd;
char __pad[L1_CACHE_ALIGN(sizeof(struct cfs_trace_cpu_data))];
};
/* chain on global list */
struct list_head ws_list;
/** serialised workitems */
- spinlock_t ws_lock;
+ spinlock_t ws_lock;
/** where schedulers sleep */
wait_queue_head_t ws_waitq;
/** concurrent workitems */
*/
struct list_head ws_rerunq;
/** CPT-table for this scheduler */
- struct cfs_cpt_table *ws_cptab;
+ struct cfs_cpt_table *ws_cptab;
/** CPT id for affinity */
- int ws_cpt;
+ int ws_cpt;
/** number of scheduled workitems */
- int ws_nscheduled;
+ int ws_nscheduled;
/** started scheduler thread, protected by cfs_wi_data::wi_glock */
- unsigned int ws_nthreads:30;
+ unsigned int ws_nthreads:30;
/** shutting down, protected by cfs_wi_data::wi_glock */
- unsigned int ws_stopping:1;
+ unsigned int ws_stopping:1;
/** serialize starting thread, protected by cfs_wi_data::wi_glock */
- unsigned int ws_starting:1;
+ unsigned int ws_starting:1;
/** scheduler name */
- char ws_name[CFS_WS_NAME_LEN];
+ char ws_name[CFS_WS_NAME_LEN];
};
static struct cfs_workitem_data {
/** serialize */
spinlock_t wi_glock;
/** list of all schedulers */
- struct list_head wi_scheds;
+ struct list_head wi_scheds;
/** WI module is initialized */
int wi_init;
/** shutting down the whole WI module */
int
cfs_wi_deschedule(struct cfs_wi_sched *sched, struct cfs_workitem *wi)
{
- int rc;
+ int rc;
LASSERT(!in_interrupt()); /* because we use plain spinlock */
LASSERT(!sched->ws_stopping);
static int cfs_wi_scheduler(void *arg)
{
- struct cfs_wi_sched *sched = (struct cfs_wi_sched *)arg;
+ struct cfs_wi_sched *sched = (struct cfs_wi_sched *)arg;
cfs_block_allsigs();
spin_lock(&sched->ws_lock);
while (!sched->ws_stopping) {
- int nloops = 0;
- int rc;
+ int nloops = 0;
+ int rc;
struct cfs_workitem *wi;
while (!list_empty(&sched->ws_runq) &&
LASSERT(sched->ws_nscheduled > 0);
sched->ws_nscheduled--;
- wi->wi_running = 1;
+ wi->wi_running = 1;
wi->wi_scheduled = 0;
spin_unlock(&sched->ws_lock);
nloops++;
- rc = (*wi->wi_action) (wi);
+ rc = (*wi->wi_action)(wi);
spin_lock(&sched->ws_lock);
if (rc) /* WI should be dead, even be freed! */
void
cfs_wi_sched_destroy(struct cfs_wi_sched *sched)
{
- int i;
+ int i;
LASSERT(cfs_wi_data.wi_init);
LASSERT(!cfs_wi_data.wi_stopping);
cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab,
int cpt, int nthrs, struct cfs_wi_sched **sched_pp)
{
- struct cfs_wi_sched *sched;
- int rc;
+ struct cfs_wi_sched *sched;
+ int rc;
LASSERT(cfs_wi_data.wi_init);
LASSERT(!cfs_wi_data.wi_stopping);
rc = 0;
while (nthrs > 0) {
- char name[16];
+ char name[16];
struct task_struct *task;
spin_lock(&cfs_wi_data.wi_glock);
void
cfs_wi_shutdown(void)
{
- struct cfs_wi_sched *sched;
+ struct cfs_wi_sched *sched;
struct cfs_wi_sched *temp;
spin_lock(&cfs_wi_data.wi_glock);