diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-08-10 14:48:51 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-08-10 14:48:51 -0400 |
commit | d00aa6695b67a31be2ce5f7464da32c20cb50699 (patch) | |
tree | 4e4a2bbd1ab710ddca3bd1a611a6c3e9a00f52f9 /include/trace/ftrace.h | |
parent | cec36911b5fa4ac342f6de856b12a9f71f84e6e5 (diff) | |
parent | 1853db0e02ae4088f102b0d8e59e83dc98f93f03 (diff) |
Merge branch 'perfcounters-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perfcounters-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (27 commits)
perf_counter: Zero dead bytes from ftrace raw samples size alignment
perf_counter: Subtract the buffer size field from the event record size
perf_counter: Require CAP_SYS_ADMIN for raw tracepoint data
perf_counter: Correct PERF_SAMPLE_RAW output
perf tools: callchain: Fix bad rounding of minimum rate
perf_counter tools: Fix libbfd detection for systems with libz dependency
perf: "Longum est iter per praecepta, breve et efficax per exempla"
perf_counter: Fix a race on perf_counter_ctx
perf_counter: Fix tracepoint sampling to be part of generic sampling
perf_counter: Work around gcc warning by initializing tracepoint record unconditionally
perf tools: callchain: Fix sum of percentages to be 100% by displaying amount of ignored chains in fractal mode
perf tools: callchain: Fix 'perf report' display to be callchain by default
perf tools: callchain: Fix spurious 'perf report' warnings: ignore empty callchains
perf record: Fix the -A UI for empty or non-existent perf.data
perf util: Fix do_read() to fail on EOF instead of busy-looping
perf list: Fix the output to not include tracepoints without an id
perf_counter/powerpc: Fix oops on cpus without perf_counter hardware support
perf stat: Fix tool option consistency: rename -S/--scale to -c/--scale
perf report: Add debug help for the finding of symbol bugs - show the symtab origin (DSO, build-id, kernel, etc)
perf report: Fix per task mult-counter stat reporting
...
Diffstat (limited to 'include/trace/ftrace.h')
-rw-r--r-- | include/trace/ftrace.h | 15 |
1 files changed, 13 insertions, 2 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 7fb16d90e7b..f64fbaae781 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
@@ -637,12 +637,20 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ | |||
637 | * pc = preempt_count(); | 637 | * pc = preempt_count(); |
638 | * | 638 | * |
639 | * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); | 639 | * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); |
640 | * __entry_size = __data_size + sizeof(*entry); | 640 | * |
641 | * // Below we want to get the aligned size by taking into account | ||
642 | * // the u32 field that will later store the buffer size | ||
643 | * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32), | ||
644 | * sizeof(u64)); | ||
645 | * __entry_size -= sizeof(u32); | ||
641 | * | 646 | * |
642 | * do { | 647 | * do { |
643 | * char raw_data[__entry_size]; <- allocate our sample in the stack | 648 | * char raw_data[__entry_size]; <- allocate our sample in the stack |
644 | * struct trace_entry *ent; | 649 | * struct trace_entry *ent; |
645 | * | 650 | * |
651 | * zero dead bytes from alignment to avoid stack leak to userspace: | ||
652 | * | ||
653 | * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; | ||
646 | * entry = (struct ftrace_raw_<call> *)raw_data; | 654 | * entry = (struct ftrace_raw_<call> *)raw_data; |
647 | * ent = &entry->ent; | 655 | * ent = &entry->ent; |
648 | * tracing_generic_entry_update(ent, irq_flags, pc); | 656 | * tracing_generic_entry_update(ent, irq_flags, pc); |
@@ -685,12 +693,15 @@ static void ftrace_profile_##call(proto) \ | |||
685 | pc = preempt_count(); \ | 693 | pc = preempt_count(); \ |
686 | \ | 694 | \ |
687 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ | 695 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ |
688 | __entry_size = ALIGN(__data_size + sizeof(*entry), sizeof(u64));\ | 696 | __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ |
697 | sizeof(u64)); \ | ||
698 | __entry_size -= sizeof(u32); \ | ||
689 | \ | 699 | \ |
690 | do { \ | 700 | do { \ |
691 | char raw_data[__entry_size]; \ | 701 | char raw_data[__entry_size]; \ |
692 | struct trace_entry *ent; \ | 702 | struct trace_entry *ent; \ |
693 | \ | 703 | \ |
704 | *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \ | ||
694 | entry = (struct ftrace_raw_##call *)raw_data; \ | 705 | entry = (struct ftrace_raw_##call *)raw_data; \ |
695 | ent = &entry->ent; \ | 706 | ent = &entry->ent; \ |
696 | tracing_generic_entry_update(ent, irq_flags, pc); \ | 707 | tracing_generic_entry_update(ent, irq_flags, pc); \ |