#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
+#ifdef CONFIG_DYNAMIC_FTRACE
+#define INIT_REGEX_LOCK(opsname) \
+ .regex_lock = __MUTEX_INITIALIZER(opsname.regex_lock),
+#else
+#define INIT_REGEX_LOCK(opsname)
+#endif
+
static struct ftrace_ops ftrace_list_end __read_mostly = {
.func = ftrace_stub,
.flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
while (likely(op = rcu_dereference_raw((op)->next)) && \
unlikely((op) != &ftrace_list_end))
+static inline void ftrace_ops_init(struct ftrace_ops *ops)
+{
+#ifdef CONFIG_DYNAMIC_FTRACE
+ if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
+ mutex_init(&ops->regex_lock);
+ ops->flags |= FTRACE_OPS_FL_INITIALIZED;
+ }
+#endif
+}
+
/**
* ftrace_nr_registered_ops - return number of ops registered
*
#else
static struct ftrace_ops ftrace_profile_ops __read_mostly = {
.func = function_profile_call,
- .flags = FTRACE_OPS_FL_RECURSION_SAFE,
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+ INIT_REGEX_LOCK(ftrace_profile_ops)
};
static int register_ftrace_profiler(void)
.func = ftrace_stub,
.notrace_hash = EMPTY_HASH,
.filter_hash = EMPTY_HASH,
- .flags = FTRACE_OPS_FL_RECURSION_SAFE,
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+ INIT_REGEX_LOCK(global_ops)
};
-static DEFINE_MUTEX(ftrace_regex_lock);
-
struct ftrace_page {
struct ftrace_page *next;
struct dyn_ftrace *records;
void ftrace_free_filter(struct ftrace_ops *ops)
{
+ ftrace_ops_init(ops);
free_ftrace_hash(ops->filter_hash);
free_ftrace_hash(ops->notrace_hash);
}
struct ftrace_hash *hash;
int ret = 0;
+ ftrace_ops_init(ops);
+
if (unlikely(ftrace_disabled))
return -ENODEV;
}
}
- mutex_lock(&ftrace_regex_lock);
+ mutex_lock(&ops->regex_lock);
if ((file->f_mode & FMODE_WRITE) &&
(file->f_flags & O_TRUNC))
}
} else
file->private_data = iter;
- mutex_unlock(&ftrace_regex_lock);
+ mutex_unlock(&ops->regex_lock);
return ret;
}
static struct ftrace_ops trace_probe_ops __read_mostly =
{
.func = function_trace_probe_call,
+ .flags = FTRACE_OPS_FL_INITIALIZED,
+ INIT_REGEX_LOCK(trace_probe_ops)
};
static int ftrace_probe_registered;
if (!cnt)
return 0;
- mutex_lock(&ftrace_regex_lock);
-
- ret = -ENODEV;
- if (unlikely(ftrace_disabled))
- goto out_unlock;
-
if (file->f_mode & FMODE_READ) {
struct seq_file *m = file->private_data;
iter = m->private;
} else
iter = file->private_data;
+ mutex_lock(&iter->ops->regex_lock);
+
+ ret = -ENODEV;
+ if (unlikely(ftrace_disabled))
+ goto out_unlock;
+
parser = &iter->parser;
read = trace_get_user(parser, ubuf, cnt, ppos);
ret = read;
out_unlock:
- mutex_unlock(&ftrace_regex_lock);
+ mutex_unlock(&iter->ops->regex_lock);
return ret;
}
if (!hash)
return -ENOMEM;
- mutex_lock(&ftrace_regex_lock);
+ mutex_lock(&ops->regex_lock);
if (reset)
ftrace_filter_reset(hash);
if (buf && !ftrace_match_records(hash, buf, len)) {
mutex_unlock(&ftrace_lock);
out_regex_unlock:
- mutex_unlock(&ftrace_regex_lock);
+ mutex_unlock(&ops->regex_lock);
free_ftrace_hash(hash);
return ret;
int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
int remove, int reset)
{
+ ftrace_ops_init(ops);
return ftrace_set_addr(ops, ip, remove, reset, 1);
}
EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
int len, int reset)
{
+ ftrace_ops_init(ops);
return ftrace_set_regex(ops, buf, len, reset, 1);
}
EXPORT_SYMBOL_GPL(ftrace_set_filter);
int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
int len, int reset)
{
+ ftrace_ops_init(ops);
return ftrace_set_regex(ops, buf, len, reset, 0);
}
EXPORT_SYMBOL_GPL(ftrace_set_notrace);
{
char *func;
+ ftrace_ops_init(ops);
+
while (buf) {
func = strsep(&buf, ",");
ftrace_set_regex(ops, func, strlen(func), 0, enable);
int filter_hash;
int ret;
- mutex_lock(&ftrace_regex_lock);
if (file->f_mode & FMODE_READ) {
iter = m->private;
-
seq_release(inode, file);
} else
iter = file->private_data;
+ mutex_lock(&iter->ops->regex_lock);
+
parser = &iter->parser;
if (trace_parser_loaded(parser)) {
parser->buffer[parser->idx] = 0;
free_ftrace_hash(iter->hash);
kfree(iter);
- mutex_unlock(&ftrace_regex_lock);
+ mutex_unlock(&iter->ops->regex_lock);
return 0;
}
static struct ftrace_ops global_ops = {
.func = ftrace_stub,
- .flags = FTRACE_OPS_FL_RECURSION_SAFE,
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+ INIT_REGEX_LOCK(global_ops)
};
static int __init ftrace_nodyn_init(void)
}
static struct ftrace_ops control_ops = {
- .func = ftrace_ops_control_func,
- .flags = FTRACE_OPS_FL_RECURSION_SAFE,
+ .func = ftrace_ops_control_func,
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+ INIT_REGEX_LOCK(control_ops)
};
static inline void
{
int ret = -1;
+ ftrace_ops_init(ops);
+
mutex_lock(&ftrace_lock);
ret = __register_ftrace_function(ops);