diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-03-23 13:22:10 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-06 03:30:27 -0400 |
commit | 7b732a75047738e4f85438ed2f9cd34bf5f2a19a (patch) | |
tree | bae36de785ac819ceef6fa5e1b7884a4a421cc3c /arch/powerpc | |
parent | b09d2501ed3d294619cbfbcf828ad39324d0e548 (diff) |
perf_counter: new output ABI - part 1
Impact: Rework the perfcounter output ABI
use sys_read() only for instant data and provide mmap() output for all
async overflow data.
The first mmap() determines the size of the output buffer. The mmap()
size must be a PAGE_SIZE multiple of 1+pages, where pages must be a
power of 2 or 0. Further mmap()s of the same fd must have the same
size. Once all maps are gone, you can again mmap() with a new size.
In case of 0 extra pages there is no data output and the first page
only contains meta data.
When there are data pages, a poll() event will be generated for each
full page of data. Furthermore, the output is circular. This means
that although 1 page is a valid configuration, its useless, since
we'll start overwriting it the instant we report a full page.
Future work will focus on the output format (currently maintained)
where we'll likey want each entry denoted by a header which includes a
type and length.
Further future work will allow to splice() the fd, also containing the
async overflow data -- splice() would be mutually exclusive with
mmap() of the data.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Orig-LKML-Reference: <20090323172417.470536358@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/kernel/perf_counter.c | 9 |
1 files changed, 3 insertions, 6 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index e4349281b07d..d48596ab6557 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c | |||
@@ -417,8 +417,7 @@ void hw_perf_restore(u64 disable) | |||
417 | atomic64_set(&counter->hw.prev_count, val); | 417 | atomic64_set(&counter->hw.prev_count, val); |
418 | counter->hw.idx = hwc_index[i] + 1; | 418 | counter->hw.idx = hwc_index[i] + 1; |
419 | write_pmc(counter->hw.idx, val); | 419 | write_pmc(counter->hw.idx, val); |
420 | if (counter->user_page) | 420 | perf_counter_update_userpage(counter); |
421 | perf_counter_update_userpage(counter); | ||
422 | } | 421 | } |
423 | mb(); | 422 | mb(); |
424 | cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE; | 423 | cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE; |
@@ -574,8 +573,7 @@ static void power_perf_disable(struct perf_counter *counter) | |||
574 | ppmu->disable_pmc(counter->hw.idx - 1, cpuhw->mmcr); | 573 | ppmu->disable_pmc(counter->hw.idx - 1, cpuhw->mmcr); |
575 | write_pmc(counter->hw.idx, 0); | 574 | write_pmc(counter->hw.idx, 0); |
576 | counter->hw.idx = 0; | 575 | counter->hw.idx = 0; |
577 | if (counter->user_page) | 576 | perf_counter_update_userpage(counter); |
578 | perf_counter_update_userpage(counter); | ||
579 | break; | 577 | break; |
580 | } | 578 | } |
581 | } | 579 | } |
@@ -702,8 +700,7 @@ static void record_and_restart(struct perf_counter *counter, long val, | |||
702 | write_pmc(counter->hw.idx, val); | 700 | write_pmc(counter->hw.idx, val); |
703 | atomic64_set(&counter->hw.prev_count, val); | 701 | atomic64_set(&counter->hw.prev_count, val); |
704 | atomic64_set(&counter->hw.period_left, left); | 702 | atomic64_set(&counter->hw.period_left, left); |
705 | if (counter->user_page) | 703 | perf_counter_update_userpage(counter); |
706 | perf_counter_update_userpage(counter); | ||
707 | 704 | ||
708 | /* | 705 | /* |
709 | * Finally record data if requested. | 706 | * Finally record data if requested. |