int cpu;
int pc;
- raw_local_irq_save(flags);
+ local_irq_save(flags);
cpu = raw_smp_processor_id();
data = tr->data[cpu];
disabled = atomic_inc_return(&data->disabled);
__trace_graph_entry(tr, data, trace, flags, pc);
}
atomic_dec(&data->disabled);
- raw_local_irq_restore(flags);
+ local_irq_restore(flags);
}
void trace_graph_return(struct ftrace_graph_ret *trace)
int cpu;
int pc;
- raw_local_irq_save(flags);
+ local_irq_save(flags);
cpu = raw_smp_processor_id();
data = tr->data[cpu];
disabled = atomic_inc_return(&data->disabled);
__trace_graph_return(tr, data, trace, flags, pc);
}
atomic_dec(&data->disabled);
- raw_local_irq_restore(flags);
+ local_irq_restore(flags);
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
if (err)
goto err_unlock;
- raw_local_irq_disable();
+ local_irq_disable();
__raw_spin_lock(&ftrace_max_lock);
for_each_tracing_cpu(cpu) {
/*
}
}
__raw_spin_unlock(&ftrace_max_lock);
- raw_local_irq_enable();
+ local_irq_enable();
tracing_cpumask = tracing_cpumask_new;
if (unlikely(!tr))
return;
- raw_local_irq_save(flags);
+ local_irq_save(flags);
cpu = raw_smp_processor_id();
if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
goto out;
out:
atomic_dec(&tr->data[cpu]->disabled);
- raw_local_irq_restore(flags);
+ local_irq_restore(flags);
}
static inline
if (!object_is_on_stack(&this_size))
return;
- raw_local_irq_save(flags);
+ local_irq_save(flags);
__raw_spin_lock(&max_stack_lock);
/* a race could have already updated it */
out:
__raw_spin_unlock(&max_stack_lock);
- raw_local_irq_restore(flags);
+ local_irq_restore(flags);
}
static void
if (ret < 0)
return ret;
- raw_local_irq_save(flags);
+ local_irq_save(flags);
__raw_spin_lock(&max_stack_lock);
*ptr = val;
__raw_spin_unlock(&max_stack_lock);
- raw_local_irq_restore(flags);
+ local_irq_restore(flags);
return count;
}