aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/perf_counter.h12
-rw-r--r--kernel/perf_counter.c72
2 files changed, 80 insertions, 4 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index bcbf1c43ed42..6a384f04755a 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -335,6 +335,18 @@ enum perf_event_type {
335 PERF_EVENT_FORK = 7, 335 PERF_EVENT_FORK = 7,
336 336
337 /* 337 /*
338 * struct {
339 * struct perf_event_header header;
340 * u32 pid, tid;
341 * u64 value;
342 * { u64 time_enabled; } && PERF_FORMAT_ENABLED
343 * { u64 time_running; } && PERF_FORMAT_RUNNING
344 * { u64 parent_id; } && PERF_FORMAT_ID
345 * };
346 */
347 PERF_EVENT_READ = 8,
348
349 /*
338 * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field 350 * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field
339 * will be PERF_SAMPLE_* 351 * will be PERF_SAMPLE_*
340 * 352 *
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 02994a719e27..a72c20e91953 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -2624,6 +2624,66 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
2624} 2624}
2625 2625
2626/* 2626/*
2627 * read event
2628 */
2629
2630struct perf_read_event {
2631 struct perf_event_header header;
2632
2633 u32 pid;
2634 u32 tid;
2635 u64 value;
2636 u64 format[3];
2637};
2638
2639static void
2640perf_counter_read_event(struct perf_counter *counter,
2641 struct task_struct *task)
2642{
2643 struct perf_output_handle handle;
2644 struct perf_read_event event = {
2645 .header = {
2646 .type = PERF_EVENT_READ,
2647 .misc = 0,
2648 .size = sizeof(event) - sizeof(event.format),
2649 },
2650 .pid = perf_counter_pid(counter, task),
2651 .tid = perf_counter_tid(counter, task),
2652 .value = atomic64_read(&counter->count),
2653 };
2654 int ret, i = 0;
2655
2656 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2657 event.header.size += sizeof(u64);
2658 event.format[i++] = counter->total_time_enabled;
2659 }
2660
2661 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2662 event.header.size += sizeof(u64);
2663 event.format[i++] = counter->total_time_running;
2664 }
2665
2666 if (counter->attr.read_format & PERF_FORMAT_ID) {
2667 u64 id;
2668
2669 event.header.size += sizeof(u64);
2670 if (counter->parent)
2671 id = counter->parent->id;
2672 else
2673 id = counter->id;
2674
2675 event.format[i++] = id;
2676 }
2677
2678 ret = perf_output_begin(&handle, counter, event.header.size, 0, 0);
2679 if (ret)
2680 return;
2681
2682 perf_output_copy(&handle, &event, event.header.size);
2683 perf_output_end(&handle);
2684}
2685
2686/*
2627 * fork tracking 2687 * fork tracking
2628 */ 2688 */
2629 2689
@@ -3985,10 +4045,13 @@ static int inherit_group(struct perf_counter *parent_counter,
3985} 4045}
3986 4046
3987static void sync_child_counter(struct perf_counter *child_counter, 4047static void sync_child_counter(struct perf_counter *child_counter,
3988 struct perf_counter *parent_counter) 4048 struct task_struct *child)
3989{ 4049{
4050 struct perf_counter *parent_counter = child_counter->parent;
3990 u64 child_val; 4051 u64 child_val;
3991 4052
4053 perf_counter_read_event(child_counter, child);
4054
3992 child_val = atomic64_read(&child_counter->count); 4055 child_val = atomic64_read(&child_counter->count);
3993 4056
3994 /* 4057 /*
@@ -4017,7 +4080,8 @@ static void sync_child_counter(struct perf_counter *child_counter,
4017 4080
4018static void 4081static void
4019__perf_counter_exit_task(struct perf_counter *child_counter, 4082__perf_counter_exit_task(struct perf_counter *child_counter,
4020 struct perf_counter_context *child_ctx) 4083 struct perf_counter_context *child_ctx,
4084 struct task_struct *child)
4021{ 4085{
4022 struct perf_counter *parent_counter; 4086 struct perf_counter *parent_counter;
4023 4087
@@ -4031,7 +4095,7 @@ __perf_counter_exit_task(struct perf_counter *child_counter,
4031 * counters need to be zapped - but otherwise linger. 4095 * counters need to be zapped - but otherwise linger.
4032 */ 4096 */
4033 if (parent_counter) { 4097 if (parent_counter) {
4034 sync_child_counter(child_counter, parent_counter); 4098 sync_child_counter(child_counter, child);
4035 free_counter(child_counter); 4099 free_counter(child_counter);
4036 } 4100 }
4037} 4101}
@@ -4093,7 +4157,7 @@ void perf_counter_exit_task(struct task_struct *child)
4093again: 4157again:
4094 list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list, 4158 list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
4095 list_entry) 4159 list_entry)
4096 __perf_counter_exit_task(child_counter, child_ctx); 4160 __perf_counter_exit_task(child_counter, child_ctx, child);
4097 4161
4098 /* 4162 /*
4099 * If the last counter was a group counter, it will have appended all 4163 * If the last counter was a group counter, it will have appended all