}
}
-static void save_context_stack(struct stack_trace *trace,
- struct task_struct *task, struct pt_regs *regs)
+static void save_context_stack(struct stack_trace *trace, struct pt_regs *regs)
{
unsigned long sp = regs->regs[29];
#ifdef CONFIG_KALLSYMS
if (raw_show_trace || !__kernel_text_address(pc)) {
unsigned long stack_page =
- (unsigned long)task_stack_page(task);
+ (unsigned long)task_stack_page(current);
if (stack_page && sp >= stack_page &&
sp <= stack_page + THREAD_SIZE - 32)
save_raw_context_stack(trace, sp);
trace->entries[trace->nr_entries++] = pc;
if (trace->nr_entries >= trace->max_entries)
break;
- pc = unwind_stack(task, &sp, pc, &ra);
+ pc = unwind_stack(current, &sp, pc, &ra);
} while (pc);
#else
save_raw_context_stack(trace, sp);
/*
* Save stack-backtrace addresses into a stack_trace buffer.
*/
-void save_stack_trace(struct stack_trace *trace, struct task_struct *task)
+void save_stack_trace(struct stack_trace *trace)
{
struct pt_regs dummyregs;
struct pt_regs *regs = &dummyregs;
WARN_ON(trace->nr_entries || !trace->max_entries);
- if (task && task != current) {
- regs->regs[29] = task->thread.reg29;
- regs->regs[31] = 0;
- regs->cp0_epc = task->thread.reg31;
- } else {
- if (!task)
- task = current;
- prepare_frametrace(regs);
- }
-
- save_context_stack(trace, task, regs);
+ prepare_frametrace(regs);
+ save_context_stack(trace, regs);
}
}
}
-void save_stack_trace(struct stack_trace *trace, struct task_struct *task)
+void save_stack_trace(struct stack_trace *trace)
{
register unsigned long sp asm ("15");
unsigned long orig_sp, new_sp;
new_sp = save_context_stack(trace, &trace->skip, orig_sp,
S390_lowcore.panic_stack - PAGE_SIZE,
S390_lowcore.panic_stack);
- if ((new_sp != orig_sp) && !trace->all_contexts)
+ if (new_sp != orig_sp)
return;
new_sp = save_context_stack(trace, &trace->skip, new_sp,
S390_lowcore.async_stack - ASYNC_SIZE,
S390_lowcore.async_stack);
- if ((new_sp != orig_sp) && !trace->all_contexts)
+ if (new_sp != orig_sp)
return;
- if (task)
- save_context_stack(trace, &trace->skip, new_sp,
- (unsigned long) task_stack_page(task),
- (unsigned long) task_stack_page(task) + THREAD_SIZE);
- else
- save_context_stack(trace, &trace->skip, new_sp,
- S390_lowcore.thread_info,
- S390_lowcore.thread_info + THREAD_SIZE);
+
+ save_context_stack(trace, &trace->skip, new_sp,
+ S390_lowcore.thread_info,
+ S390_lowcore.thread_info + THREAD_SIZE);
return;
}
*/
void save_stack_trace(struct stack_trace *trace, struct task_struct *task)
{
- unsigned long *sp;
-
- if (!task)
- task = current;
- if (task == current)
- sp = (unsigned long *)current_stack_pointer;
- else
- sp = (unsigned long *)task->thread.sp;
+ unsigned long *sp = (unsigned long *)current_stack_pointer;
while (!kstack_end(sp)) {
unsigned long addr = *sp++;
#include <linux/thread_info.h>
#include <asm/ptrace.h>
-void save_stack_trace(struct stack_trace *trace, struct task_struct *task)
+void save_stack_trace(struct stack_trace *trace)
{
unsigned long ksp, fp, thread_base;
- struct thread_info *tp;
+ struct thread_info *tp = task_thread_info(current);
- if (!task)
- task = current;
- tp = task_thread_info(task);
- if (task == current) {
- flushw_all();
- __asm__ __volatile__(
- "mov %%fp, %0"
- : "=r" (ksp)
- );
- } else
- ksp = tp->ksp;
+ flushw_all();
+ __asm__ __volatile__(
+ "mov %%fp, %0"
+ : "=r" (ksp)
+ );
fp = ksp + STACK_BIAS;
thread_base = (unsigned long) tp;
static int save_stack_stack(void *data, char *name)
{
- struct stack_trace *trace = (struct stack_trace *)data;
- return trace->all_contexts ? 0 : -1;
+ return -1;
}
static void save_stack_address(void *data, unsigned long addr)
/*
* Save stack-backtrace addresses into a stack_trace buffer.
*/
-void save_stack_trace(struct stack_trace *trace, struct task_struct *task)
+void save_stack_trace(struct stack_trace *trace)
{
- dump_trace(task, NULL, NULL, &save_stack_ops, trace);
+ dump_trace(current, NULL, NULL, &save_stack_ops, trace);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
EXPORT_SYMBOL(save_stack_trace);
-
unsigned int nr_entries, max_entries;
unsigned long *entries;
int skip; /* input argument: How many entries to skip */
- int all_contexts; /* input argument: if true do than one stack */
};
-extern void save_stack_trace(struct stack_trace *trace,
- struct task_struct *task);
+extern void save_stack_trace(struct stack_trace *trace);
extern void print_stack_trace(struct stack_trace *trace, int spaces);
#else
-# define save_stack_trace(trace, task) do { } while (0)
+# define save_stack_trace(trace) do { } while (0)
# define print_stack_trace(trace) do { } while (0)
#endif
trace->entries = stack_trace + nr_stack_trace_entries;
trace->skip = 3;
- trace->all_contexts = 0;
- save_stack_trace(trace, NULL);
+ save_stack_trace(trace);
trace->max_entries = trace->nr_entries;
trace.entries = entries;
trace.max_entries = depth;
trace.skip = 1;
- trace.all_contexts = 0;
- save_stack_trace(&trace, NULL);
+ save_stack_trace(&trace);
for (n = 0; n < trace.nr_entries; n++) {
if (attr->reject_start <= entries[n] &&
entries[n] < attr->reject_end)