diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-06-25 05:27:12 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-06-25 15:39:08 -0400 |
commit | e6e18ec79b023d5fe84226cef533cf0e3770ce93 (patch) | |
tree | 6fc1bd9afd21454864abe2aec6a0e35e17d47f04 /kernel/perf_counter.c | |
parent | bfbd3381e63aa2a14c6706afb50ce4630aa0d9a2 (diff) |
perf_counter: Rework the sample ABI
The PERF_EVENT_READ implementation made me realize we don't
actually need the sample_type int the output sample, since
we already have that in the perf_counter_attr information.
Therefore, remove the PERF_EVENT_MISC_OVERFLOW bit and the
event->type overloading, and imply put counter overflow
samples in a PERF_EVENT_SAMPLE type.
This also fixes the issue that event->type was only 32-bit
and sample_type had 64 usable bits.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r-- | kernel/perf_counter.c | 36 |
1 files changed, 15 insertions, 21 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 385ca51c6e60..f2f232696587 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -2575,15 +2575,14 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2575 | u32 cpu, reserved; | 2575 | u32 cpu, reserved; |
2576 | } cpu_entry; | 2576 | } cpu_entry; |
2577 | 2577 | ||
2578 | header.type = 0; | 2578 | header.type = PERF_EVENT_SAMPLE; |
2579 | header.size = sizeof(header); | 2579 | header.size = sizeof(header); |
2580 | 2580 | ||
2581 | header.misc = PERF_EVENT_MISC_OVERFLOW; | 2581 | header.misc = 0; |
2582 | header.misc |= perf_misc_flags(data->regs); | 2582 | header.misc |= perf_misc_flags(data->regs); |
2583 | 2583 | ||
2584 | if (sample_type & PERF_SAMPLE_IP) { | 2584 | if (sample_type & PERF_SAMPLE_IP) { |
2585 | ip = perf_instruction_pointer(data->regs); | 2585 | ip = perf_instruction_pointer(data->regs); |
2586 | header.type |= PERF_SAMPLE_IP; | ||
2587 | header.size += sizeof(ip); | 2586 | header.size += sizeof(ip); |
2588 | } | 2587 | } |
2589 | 2588 | ||
@@ -2592,7 +2591,6 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2592 | tid_entry.pid = perf_counter_pid(counter, current); | 2591 | tid_entry.pid = perf_counter_pid(counter, current); |
2593 | tid_entry.tid = perf_counter_tid(counter, current); | 2592 | tid_entry.tid = perf_counter_tid(counter, current); |
2594 | 2593 | ||
2595 | header.type |= PERF_SAMPLE_TID; | ||
2596 | header.size += sizeof(tid_entry); | 2594 | header.size += sizeof(tid_entry); |
2597 | } | 2595 | } |
2598 | 2596 | ||
@@ -2602,34 +2600,25 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2602 | */ | 2600 | */ |
2603 | time = sched_clock(); | 2601 | time = sched_clock(); |
2604 | 2602 | ||
2605 | header.type |= PERF_SAMPLE_TIME; | ||
2606 | header.size += sizeof(u64); | 2603 | header.size += sizeof(u64); |
2607 | } | 2604 | } |
2608 | 2605 | ||
2609 | if (sample_type & PERF_SAMPLE_ADDR) { | 2606 | if (sample_type & PERF_SAMPLE_ADDR) |
2610 | header.type |= PERF_SAMPLE_ADDR; | ||
2611 | header.size += sizeof(u64); | 2607 | header.size += sizeof(u64); |
2612 | } | ||
2613 | 2608 | ||
2614 | if (sample_type & PERF_SAMPLE_ID) { | 2609 | if (sample_type & PERF_SAMPLE_ID) |
2615 | header.type |= PERF_SAMPLE_ID; | ||
2616 | header.size += sizeof(u64); | 2610 | header.size += sizeof(u64); |
2617 | } | ||
2618 | 2611 | ||
2619 | if (sample_type & PERF_SAMPLE_CPU) { | 2612 | if (sample_type & PERF_SAMPLE_CPU) { |
2620 | header.type |= PERF_SAMPLE_CPU; | ||
2621 | header.size += sizeof(cpu_entry); | 2613 | header.size += sizeof(cpu_entry); |
2622 | 2614 | ||
2623 | cpu_entry.cpu = raw_smp_processor_id(); | 2615 | cpu_entry.cpu = raw_smp_processor_id(); |
2624 | } | 2616 | } |
2625 | 2617 | ||
2626 | if (sample_type & PERF_SAMPLE_PERIOD) { | 2618 | if (sample_type & PERF_SAMPLE_PERIOD) |
2627 | header.type |= PERF_SAMPLE_PERIOD; | ||
2628 | header.size += sizeof(u64); | 2619 | header.size += sizeof(u64); |
2629 | } | ||
2630 | 2620 | ||
2631 | if (sample_type & PERF_SAMPLE_GROUP) { | 2621 | if (sample_type & PERF_SAMPLE_GROUP) { |
2632 | header.type |= PERF_SAMPLE_GROUP; | ||
2633 | header.size += sizeof(u64) + | 2622 | header.size += sizeof(u64) + |
2634 | counter->nr_siblings * sizeof(group_entry); | 2623 | counter->nr_siblings * sizeof(group_entry); |
2635 | } | 2624 | } |
@@ -2639,10 +2628,9 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2639 | 2628 | ||
2640 | if (callchain) { | 2629 | if (callchain) { |
2641 | callchain_size = (1 + callchain->nr) * sizeof(u64); | 2630 | callchain_size = (1 + callchain->nr) * sizeof(u64); |
2642 | |||
2643 | header.type |= PERF_SAMPLE_CALLCHAIN; | ||
2644 | header.size += callchain_size; | 2631 | header.size += callchain_size; |
2645 | } | 2632 | } else |
2633 | header.size += sizeof(u64); | ||
2646 | } | 2634 | } |
2647 | 2635 | ||
2648 | ret = perf_output_begin(&handle, counter, header.size, nmi, 1); | 2636 | ret = perf_output_begin(&handle, counter, header.size, nmi, 1); |
@@ -2693,8 +2681,14 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2693 | } | 2681 | } |
2694 | } | 2682 | } |
2695 | 2683 | ||
2696 | if (callchain) | 2684 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { |
2697 | perf_output_copy(&handle, callchain, callchain_size); | 2685 | if (callchain) |
2686 | perf_output_copy(&handle, callchain, callchain_size); | ||
2687 | else { | ||
2688 | u64 nr = 0; | ||
2689 | perf_output_put(&handle, nr); | ||
2690 | } | ||
2691 | } | ||
2698 | 2692 | ||
2699 | perf_output_end(&handle); | 2693 | perf_output_end(&handle); |
2700 | } | 2694 | } |