Move the printk from each ftrace_raw_reg_event_foo() to
its caller ftrace_event_enable_disable(). This avoids each
regfunc trace event callbacks to handle a same error report
that can be carried from the caller.
See how much space this saves:
text data bss dec hex filename
5345151 1961864 7103260 14410275 dbe223 vmlinux.o.old
5331487 1961864 7103260 14396611 dbacc3 vmlinux.o
Signed-off-by: Li Zefan <lizf@cn.fujitsu.com>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Jason Baron <jbaron@redhat.com>
LKML-Reference: <
4B1DC4AC.802@cn.fujitsu.com>
[start cmdline record before calling regfunc to avoid lost
window of pid to comm resolution]
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
*
* static int ftrace_reg_event_<call>(struct ftrace_event_call *unused)
* {
- * int ret;
- *
- * ret = register_trace_<call>(ftrace_event_<call>);
- * if (!ret)
- * pr_info("event trace: Could not activate trace point "
- * "probe to <call>");
- * return ret;
+ * return register_trace_<call>(ftrace_event_<call>);
* }
*
* static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
\
static int ftrace_raw_reg_event_##call(struct ftrace_event_call *unused)\
{ \
- int ret; \
- \
- ret = register_trace_##call(ftrace_raw_event_##call); \
- if (ret) \
- pr_info("event trace: Could not activate trace point " \
- "probe to %s\n", #call); \
- return ret; \
+ return register_trace_##call(ftrace_raw_event_##call); \
} \
\
static void ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused)\
}
EXPORT_SYMBOL_GPL(trace_event_raw_init);
-static void ftrace_event_enable_disable(struct ftrace_event_call *call,
+static int ftrace_event_enable_disable(struct ftrace_event_call *call,
int enable)
{
+ int ret = 0;
+
switch (enable) {
case 0:
if (call->enabled) {
break;
case 1:
if (!call->enabled) {
- call->enabled = 1;
tracing_start_cmdline_record();
- call->regfunc(call);
+ ret = call->regfunc(call);
+ if (ret) {
+ tracing_stop_cmdline_record();
+ pr_info("event trace: Could not enable event "
+ "%s\n", call->name);
+ break;
+ }
+ call->enabled = 1;
}
break;
}
+
+ return ret;
}
static void ftrace_clear_events(void)
case 0:
case 1:
mutex_lock(&event_mutex);
- ftrace_event_enable_disable(call, val);
+ ret = ftrace_event_enable_disable(call, val);
mutex_unlock(&event_mutex);
break;
*ppos += cnt;
- return cnt;
+ return ret ? ret : cnt;
}
static ssize_t
mutex_lock(&syscall_trace_lock);
if (!sys_refcount_enter)
ret = register_trace_sys_enter(ftrace_syscall_enter);
- if (ret) {
- pr_info("event trace: Could not activate"
- "syscall entry trace point");
- } else {
+ if (!ret) {
set_bit(num, enabled_enter_syscalls);
sys_refcount_enter++;
}
mutex_lock(&syscall_trace_lock);
if (!sys_refcount_exit)
ret = register_trace_sys_exit(ftrace_syscall_exit);
- if (ret) {
- pr_info("event trace: Could not activate"
- "syscall exit trace point");
- } else {
+ if (!ret) {
set_bit(num, enabled_exit_syscalls);
sys_refcount_exit++;
}