aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/events/core.c
diff options
context:
space:
mode:
authorJiri Olsa <jolsa@redhat.com>2012-08-07 09:20:40 -0400
committerArnaldo Carvalho de Melo <acme@redhat.com>2012-08-10 11:17:58 -0400
commitc5ebcedb566ef17bda7b02686e0d658a7bb42ee7 (patch)
tree46619f5d62f0fbe9a6389ce20fd5ed92c25755e3 /kernel/events/core.c
parent5685e0ff45f5df67e79e9b052b6ffd501ff38c11 (diff)
perf: Add ability to attach user stack dump to sample
Introducing PERF_SAMPLE_STACK_USER sample type bit to trigger the dump of the user level stack on sample. The size of the dump is specified by sample_stack_user value. Being able to dump parts of the user stack, starting from the stack pointer, will be useful to make a post mortem dwarf CFI based stack unwinding. Added HAVE_PERF_USER_STACK_DUMP config option to determine if the architecture provides user stack dump on perf event samples. This needs access to the user stack pointer which is not unified across architectures. Enabling this for x86 architecture. Signed-off-by: Jiri Olsa <jolsa@redhat.com> Original-patch-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: "Frank Ch. Eigler" <fche@redhat.com> Cc: Arun Sharma <asharma@fb.com> Cc: Benjamin Redelings <benjamin.redelings@nescent.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Cyrill Gorcunov <gorcunov@openvz.org> Cc: Frank Ch. Eigler <fche@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Robert Richter <robert.richter@amd.com> Cc: Stephane Eranian <eranian@google.com> Cc: Tom Zanussi <tzanussi@gmail.com> Cc: Ulrich Drepper <drepper@gmail.com> Link: http://lkml.kernel.org/r/1344345647-11536-6-git-send-email-jolsa@redhat.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'kernel/events/core.c')
-rw-r--r--kernel/events/core.c150
1 files changed, 149 insertions, 1 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d3ce97525b9f..2ba890450d15 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -36,6 +36,7 @@
36#include <linux/perf_event.h> 36#include <linux/perf_event.h>
37#include <linux/ftrace_event.h> 37#include <linux/ftrace_event.h>
38#include <linux/hw_breakpoint.h> 38#include <linux/hw_breakpoint.h>
39#include <linux/mm_types.h>
39 40
40#include "internal.h" 41#include "internal.h"
41 42
@@ -3787,6 +3788,101 @@ static void perf_sample_regs_user(struct perf_regs_user *regs_user,
3787 } 3788 }
3788} 3789}
3789 3790
3791/*
3792 * Get remaining task size from user stack pointer.
3793 *
3794 * It'd be better to take stack vma map and limit this more
3795 * precisly, but there's no way to get it safely under interrupt,
3796 * so using TASK_SIZE as limit.
3797 */
3798static u64 perf_ustack_task_size(struct pt_regs *regs)
3799{
3800 unsigned long addr = perf_user_stack_pointer(regs);
3801
3802 if (!addr || addr >= TASK_SIZE)
3803 return 0;
3804
3805 return TASK_SIZE - addr;
3806}
3807
3808static u16
3809perf_sample_ustack_size(u16 stack_size, u16 header_size,
3810 struct pt_regs *regs)
3811{
3812 u64 task_size;
3813
3814 /* No regs, no stack pointer, no dump. */
3815 if (!regs)
3816 return 0;
3817
3818 /*
3819 * Check if we fit in with the requested stack size into the:
3820 * - TASK_SIZE
3821 * If we don't, we limit the size to the TASK_SIZE.
3822 *
3823 * - remaining sample size
3824 * If we don't, we customize the stack size to
3825 * fit in to the remaining sample size.
3826 */
3827
3828 task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs));
3829 stack_size = min(stack_size, (u16) task_size);
3830
3831 /* Current header size plus static size and dynamic size. */
3832 header_size += 2 * sizeof(u64);
3833
3834 /* Do we fit in with the current stack dump size? */
3835 if ((u16) (header_size + stack_size) < header_size) {
3836 /*
3837 * If we overflow the maximum size for the sample,
3838 * we customize the stack dump size to fit in.
3839 */
3840 stack_size = USHRT_MAX - header_size - sizeof(u64);
3841 stack_size = round_up(stack_size, sizeof(u64));
3842 }
3843
3844 return stack_size;
3845}
3846
3847static void
3848perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
3849 struct pt_regs *regs)
3850{
3851 /* Case of a kernel thread, nothing to dump */
3852 if (!regs) {
3853 u64 size = 0;
3854 perf_output_put(handle, size);
3855 } else {
3856 unsigned long sp;
3857 unsigned int rem;
3858 u64 dyn_size;
3859
3860 /*
3861 * We dump:
3862 * static size
3863 * - the size requested by user or the best one we can fit
3864 * in to the sample max size
3865 * data
3866 * - user stack dump data
3867 * dynamic size
3868 * - the actual dumped size
3869 */
3870
3871 /* Static size. */
3872 perf_output_put(handle, dump_size);
3873
3874 /* Data. */
3875 sp = perf_user_stack_pointer(regs);
3876 rem = __output_copy_user(handle, (void *) sp, dump_size);
3877 dyn_size = dump_size - rem;
3878
3879 perf_output_skip(handle, rem);
3880
3881 /* Dynamic size. */
3882 perf_output_put(handle, dyn_size);
3883 }
3884}
3885
3790static void __perf_event_header__init_id(struct perf_event_header *header, 3886static void __perf_event_header__init_id(struct perf_event_header *header,
3791 struct perf_sample_data *data, 3887 struct perf_sample_data *data,
3792 struct perf_event *event) 3888 struct perf_event *event)
@@ -4064,6 +4160,11 @@ void perf_output_sample(struct perf_output_handle *handle,
4064 mask); 4160 mask);
4065 } 4161 }
4066 } 4162 }
4163
4164 if (sample_type & PERF_SAMPLE_STACK_USER)
4165 perf_output_sample_ustack(handle,
4166 data->stack_user_size,
4167 data->regs_user.regs);
4067} 4168}
4068 4169
4069void perf_prepare_sample(struct perf_event_header *header, 4170void perf_prepare_sample(struct perf_event_header *header,
@@ -4129,6 +4230,35 @@ void perf_prepare_sample(struct perf_event_header *header,
4129 4230
4130 header->size += size; 4231 header->size += size;
4131 } 4232 }
4233
4234 if (sample_type & PERF_SAMPLE_STACK_USER) {
4235 /*
4236 * Either we need PERF_SAMPLE_STACK_USER bit to be allways
4237 * processed as the last one or have additional check added
4238 * in case new sample type is added, because we could eat
4239 * up the rest of the sample size.
4240 */
4241 struct perf_regs_user *uregs = &data->regs_user;
4242 u16 stack_size = event->attr.sample_stack_user;
4243 u16 size = sizeof(u64);
4244
4245 if (!uregs->abi)
4246 perf_sample_regs_user(uregs, regs);
4247
4248 stack_size = perf_sample_ustack_size(stack_size, header->size,
4249 uregs->regs);
4250
4251 /*
4252 * If there is something to dump, add space for the dump
4253 * itself and for the field that tells the dynamic size,
4254 * which is how many have been actually dumped.
4255 */
4256 if (stack_size)
4257 size += sizeof(u64) + stack_size;
4258
4259 data->stack_user_size = stack_size;
4260 header->size += size;
4261 }
4132} 4262}
4133 4263
4134static void perf_event_output(struct perf_event *event, 4264static void perf_event_output(struct perf_event *event,
@@ -6205,8 +6335,26 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
6205 } 6335 }
6206 } 6336 }
6207 6337
6208 if (attr->sample_type & PERF_SAMPLE_REGS_USER) 6338 if (attr->sample_type & PERF_SAMPLE_REGS_USER) {
6209 ret = perf_reg_validate(attr->sample_regs_user); 6339 ret = perf_reg_validate(attr->sample_regs_user);
6340 if (ret)
6341 return ret;
6342 }
6343
6344 if (attr->sample_type & PERF_SAMPLE_STACK_USER) {
6345 if (!arch_perf_have_user_stack_dump())
6346 return -ENOSYS;
6347
6348 /*
6349 * We have __u32 type for the size, but so far
6350 * we can only use __u16 as maximum due to the
6351 * __u16 sample size limit.
6352 */
6353 if (attr->sample_stack_user >= USHRT_MAX)
6354 ret = -EINVAL;
6355 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
6356 ret = -EINVAL;
6357 }
6210 6358
6211out: 6359out:
6212 return ret; 6360 return ret;