preempt_enable_notrace();
}
+ /* Our two options */
+ enum {
+ TRACE_FUNC_OPT_STACK = 0x1,
+ TRACE_FUNC_OPT_PSTORE = 0x2,
+ };
+
+ static struct tracer_flags func_flags;
+
static void
-function_trace_call(unsigned long ip, unsigned long parent_ip)
+function_trace_call(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct pt_regs *pt_regs)
+
{
struct trace_array *tr = func_trace;
struct trace_array_cpu *data;
static struct ftrace_ops trace_stack_ops __read_mostly =
{
.func = function_stack_trace_call,
- .flags = FTRACE_OPS_FL_GLOBAL,
+ .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
};
- /* Our two options */
- enum {
- TRACE_FUNC_OPT_STACK = 0x1,
- };
-
static struct tracer_opt func_opts[] = {
#ifdef CONFIG_STACKTRACE
{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },