diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/perf_counter.h | 12 | ||||
-rw-r--r-- | include/trace/ftrace.h | 15 |
2 files changed, 20 insertions, 7 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index a67dd5c5b6d3..a9d823a93fe8 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h | |||
@@ -121,7 +121,7 @@ enum perf_counter_sample_format { | |||
121 | PERF_SAMPLE_CPU = 1U << 7, | 121 | PERF_SAMPLE_CPU = 1U << 7, |
122 | PERF_SAMPLE_PERIOD = 1U << 8, | 122 | PERF_SAMPLE_PERIOD = 1U << 8, |
123 | PERF_SAMPLE_STREAM_ID = 1U << 9, | 123 | PERF_SAMPLE_STREAM_ID = 1U << 9, |
124 | PERF_SAMPLE_TP_RECORD = 1U << 10, | 124 | PERF_SAMPLE_RAW = 1U << 10, |
125 | 125 | ||
126 | PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */ | 126 | PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */ |
127 | }; | 127 | }; |
@@ -369,6 +369,8 @@ enum perf_event_type { | |||
369 | * | 369 | * |
370 | * { u64 nr, | 370 | * { u64 nr, |
371 | * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN | 371 | * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN |
372 | * { u32 size; | ||
373 | * char data[size];}&& PERF_SAMPLE_RAW | ||
372 | * }; | 374 | * }; |
373 | */ | 375 | */ |
374 | PERF_EVENT_SAMPLE = 9, | 376 | PERF_EVENT_SAMPLE = 9, |
@@ -414,9 +416,9 @@ struct perf_callchain_entry { | |||
414 | __u64 ip[PERF_MAX_STACK_DEPTH]; | 416 | __u64 ip[PERF_MAX_STACK_DEPTH]; |
415 | }; | 417 | }; |
416 | 418 | ||
417 | struct perf_tracepoint_record { | 419 | struct perf_raw_record { |
418 | int size; | 420 | u32 size; |
419 | char *record; | 421 | void *data; |
420 | }; | 422 | }; |
421 | 423 | ||
422 | struct task_struct; | 424 | struct task_struct; |
@@ -687,7 +689,7 @@ struct perf_sample_data { | |||
687 | struct pt_regs *regs; | 689 | struct pt_regs *regs; |
688 | u64 addr; | 690 | u64 addr; |
689 | u64 period; | 691 | u64 period; |
690 | void *private; | 692 | struct perf_raw_record *raw; |
691 | }; | 693 | }; |
692 | 694 | ||
693 | extern int perf_counter_overflow(struct perf_counter *counter, int nmi, | 695 | extern int perf_counter_overflow(struct perf_counter *counter, int nmi, |
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 7fb16d90e7b1..f64fbaae781a 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
@@ -637,12 +637,20 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ | |||
637 | * pc = preempt_count(); | 637 | * pc = preempt_count(); |
638 | * | 638 | * |
639 | * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); | 639 | * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); |
640 | * __entry_size = __data_size + sizeof(*entry); | 640 | * |
641 | * // Below we want to get the aligned size by taking into account | ||
642 | * // the u32 field that will later store the buffer size | ||
643 | * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32), | ||
644 | * sizeof(u64)); | ||
645 | * __entry_size -= sizeof(u32); | ||
641 | * | 646 | * |
642 | * do { | 647 | * do { |
643 | * char raw_data[__entry_size]; <- allocate our sample in the stack | 648 | * char raw_data[__entry_size]; <- allocate our sample in the stack |
644 | * struct trace_entry *ent; | 649 | * struct trace_entry *ent; |
645 | * | 650 | * |
651 | * zero dead bytes from alignment to avoid stack leak to userspace: | ||
652 | * | ||
653 | * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; | ||
646 | * entry = (struct ftrace_raw_<call> *)raw_data; | 654 | * entry = (struct ftrace_raw_<call> *)raw_data; |
647 | * ent = &entry->ent; | 655 | * ent = &entry->ent; |
648 | * tracing_generic_entry_update(ent, irq_flags, pc); | 656 | * tracing_generic_entry_update(ent, irq_flags, pc); |
@@ -685,12 +693,15 @@ static void ftrace_profile_##call(proto) \ | |||
685 | pc = preempt_count(); \ | 693 | pc = preempt_count(); \ |
686 | \ | 694 | \ |
687 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ | 695 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ |
688 | __entry_size = ALIGN(__data_size + sizeof(*entry), sizeof(u64));\ | 696 | __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ |
697 | sizeof(u64)); \ | ||
698 | __entry_size -= sizeof(u32); \ | ||
689 | \ | 699 | \ |
690 | do { \ | 700 | do { \ |
691 | char raw_data[__entry_size]; \ | 701 | char raw_data[__entry_size]; \ |
692 | struct trace_entry *ent; \ | 702 | struct trace_entry *ent; \ |
693 | \ | 703 | \ |
704 | *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \ | ||
694 | entry = (struct ftrace_raw_##call *)raw_data; \ | 705 | entry = (struct ftrace_raw_##call *)raw_data; \ |
695 | ent = &entry->ent; \ | 706 | ent = &entry->ent; \ |
696 | tracing_generic_entry_update(ent, irq_flags, pc); \ | 707 | tracing_generic_entry_update(ent, irq_flags, pc); \ |