diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h index 433fde85b14ea902308cdf5bf83583c7305473b1..4aefbe32265d8f31b64308350ea3b2b9d8a4d96c 100644 --- a/arch/s390/include/asm/stacktrace.h +++ b/arch/s390/include/asm/stacktrace.h @@ -2,6 +2,7 @@ #ifndef _ASM_S390_STACKTRACE_H #define _ASM_S390_STACKTRACE_H +#include <linux/stacktrace.h> #include <linux/uaccess.h> #include <linux/ptrace.h> @@ -12,6 +13,12 @@ struct stack_frame_user { unsigned long empty2[4]; }; +struct perf_callchain_entry_ctx; + +void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *cookie, + struct perf_callchain_entry_ctx *entry, + const struct pt_regs *regs, bool perf); + enum stack_type { STACK_TYPE_UNKNOWN, STACK_TYPE_TASK, diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c index dfa77da2fd2ec5413b6358c853f5a8532d250419..5fff629b1a89806649fcf53483bb9be211bace1c 100644 --- a/arch/s390/kernel/perf_event.c +++ b/arch/s390/kernel/perf_event.c @@ -218,39 +218,7 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { - struct stack_frame_user __user *sf; - unsigned long ip, sp; - bool first = true; - - if (is_compat_task()) - return; - perf_callchain_store(entry, instruction_pointer(regs)); - sf = (void __user *)user_stack_pointer(regs); - pagefault_disable(); - while (entry->nr < entry->max_stack) { - if (__get_user(sp, &sf->back_chain)) - break; - if (__get_user(ip, &sf->gprs[8])) - break; - if (ip & 0x1) { - /* - * If the instruction address is invalid, and this - * is the first stack frame, assume r14 has not - * been written to the stack yet. Otherwise exit. - */ - if (first && !(regs->gprs[14] & 0x1)) - ip = regs->gprs[14]; - else - break; - } - perf_callchain_store(entry, ip); - /* Sanity check: ABI requires SP to be aligned 8 bytes. */ - if (!sp || sp & 0x7) - break; - sf = (void __user *)sp; - first = false; - } - pagefault_enable(); + arch_stack_walk_user_common(NULL, NULL, entry, regs, true); } /* Perf definitions for PMU event attributes in sysfs */ diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c index 7c294da45bf524e9891d8c3e83ab71f9b0115e50..e580d4cd2729afa0f53c8cace5fc844f17d5e2d6 100644 --- a/arch/s390/kernel/stacktrace.c +++ b/arch/s390/kernel/stacktrace.c @@ -5,6 +5,7 @@ * Copyright IBM Corp. 2006 */ +#include <linux/perf_event.h> #include <linux/stacktrace.h> #include <linux/uaccess.h> #include <linux/compat.h> @@ -62,8 +63,23 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, return 0; } -void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, - const struct pt_regs *regs) +static inline bool store_ip(stack_trace_consume_fn consume_entry, void *cookie, + struct perf_callchain_entry_ctx *entry, bool perf, + unsigned long ip) +{ +#ifdef CONFIG_PERF_EVENTS + if (perf) { + if (perf_callchain_store(entry, ip)) + return false; + return true; + } +#endif + return consume_entry(cookie, ip); +} + +void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *cookie, + struct perf_callchain_entry_ctx *entry, + const struct pt_regs *regs, bool perf) { struct stack_frame_user __user *sf; unsigned long ip, sp; @@ -71,7 +87,8 @@ void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, if (is_compat_task()) return; - if (!consume_entry(cookie, instruction_pointer(regs))) + ip = instruction_pointer(regs); + if (!store_ip(consume_entry, cookie, entry, perf, ip)) return; sf = (void __user *)user_stack_pointer(regs); pagefault_disable(); @@ -91,8 +108,8 @@ void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, else break; } - if (!consume_entry(cookie, ip)) - break; + if (!store_ip(consume_entry, cookie, entry, perf, ip)) + return; /* Sanity check: ABI requires SP to be aligned 8 bytes. */ if (!sp || sp & 0x7) break; @@ -102,6 +119,12 @@ void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, pagefault_enable(); } +void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, + const struct pt_regs *regs) +{ + arch_stack_walk_user_common(consume_entry, cookie, NULL, regs, false); +} + unsigned long return_address(unsigned int n) { struct unwind_state state;