diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-22 21:18:55 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-22 21:18:55 -0400 |
commit | 2ff2b289a695807e291e1ed9f639d8a3ba5f4254 (patch) | |
tree | e4b7f44e5cc1582ba2be8aeba221f4841f4c86a6 /arch/arm/kernel | |
parent | 88d6ae8dc33af12fe1c7941b1fae2767374046fd (diff) | |
parent | 73787190d04a34e6da745da893b3ae8bedde418f (diff) |
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf changes from Ingo Molnar:
"Lots of changes:
- (much) improved assembly annotation support in perf report, with
jump visualization, searching, navigation, visual output
improvements and more.
- kernel support for AMD IBS PMU hardware features. Notably 'perf
record -e cycles:p' and 'perf top -e cycles:p' should work without
skid now, like PEBS does on the Intel side, because it takes
advantage of IBS transparently.
- the libtracevents library: it is the first step towards unifying
tracing tooling and perf, and it also gives a tracing library for
external tools like powertop to rely on.
- infrastructure: various improvements and refactoring of the UI
modules and related code
- infrastructure: cleanup and simplification of the profiling
targets code (--uid, --pid, --tid, --cpu, --all-cpus, etc.)
- tons of robustness fixes all around
- various ftrace updates: speedups, cleanups, robustness
improvements.
- typing 'make' in tools/ will now give you a menu of projects to
build and a short help text to explain what each does.
- ... and lots of other changes I forgot to list.
The perf record make bzImage + perf report regression you reported
should be fixed."
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (166 commits)
tracing: Remove kernel_lock annotations
tracing: Fix initial buffer_size_kb state
ring-buffer: Merge separate resize loops
perf evsel: Create events initially disabled -- again
perf tools: Split term type into value type and term type
perf hists: Fix callchain ip printf format
perf target: Add uses_mmap field
ftrace: Remove selecting FRAME_POINTER with FUNCTION_TRACER
ftrace/x86: Have x86 ftrace use the ftrace_modify_all_code()
ftrace: Make ftrace_modify_all_code() global for archs to use
ftrace: Return record ip addr for ftrace_location()
ftrace: Consolidate ftrace_location() and ftrace_text_reserved()
ftrace: Speed up search by skipping pages by address
ftrace: Remove extra helper functions
ftrace: Sort all function addresses, not just per page
tracing: change CPU ring buffer state from tracing_cpumask
tracing: Check return value of tracing_dentry_percpu()
ring-buffer: Reset head page before running self test
ring-buffer: Add integrity check at end of iter read
ring-buffer: Make addition of pages in ring buffer atomic
...
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/perf_event_v6.c | 4 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_v7.c | 4 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_xscale.c | 8 |
3 files changed, 4 insertions, 12 deletions
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index b78af0cc6ef3..ab627a740fa3 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c | |||
@@ -489,8 +489,6 @@ armv6pmu_handle_irq(int irq_num, | |||
489 | */ | 489 | */ |
490 | armv6_pmcr_write(pmcr); | 490 | armv6_pmcr_write(pmcr); |
491 | 491 | ||
492 | perf_sample_data_init(&data, 0); | ||
493 | |||
494 | cpuc = &__get_cpu_var(cpu_hw_events); | 492 | cpuc = &__get_cpu_var(cpu_hw_events); |
495 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { | 493 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
496 | struct perf_event *event = cpuc->events[idx]; | 494 | struct perf_event *event = cpuc->events[idx]; |
@@ -509,7 +507,7 @@ armv6pmu_handle_irq(int irq_num, | |||
509 | 507 | ||
510 | hwc = &event->hw; | 508 | hwc = &event->hw; |
511 | armpmu_event_update(event, hwc, idx); | 509 | armpmu_event_update(event, hwc, idx); |
512 | data.period = event->hw.last_period; | 510 | perf_sample_data_init(&data, 0, hwc->last_period); |
513 | if (!armpmu_event_set_period(event, hwc, idx)) | 511 | if (!armpmu_event_set_period(event, hwc, idx)) |
514 | continue; | 512 | continue; |
515 | 513 | ||
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 00755d82e2f2..d3c536068162 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c | |||
@@ -1077,8 +1077,6 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | |||
1077 | */ | 1077 | */ |
1078 | regs = get_irq_regs(); | 1078 | regs = get_irq_regs(); |
1079 | 1079 | ||
1080 | perf_sample_data_init(&data, 0); | ||
1081 | |||
1082 | cpuc = &__get_cpu_var(cpu_hw_events); | 1080 | cpuc = &__get_cpu_var(cpu_hw_events); |
1083 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { | 1081 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
1084 | struct perf_event *event = cpuc->events[idx]; | 1082 | struct perf_event *event = cpuc->events[idx]; |
@@ -1097,7 +1095,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | |||
1097 | 1095 | ||
1098 | hwc = &event->hw; | 1096 | hwc = &event->hw; |
1099 | armpmu_event_update(event, hwc, idx); | 1097 | armpmu_event_update(event, hwc, idx); |
1100 | data.period = event->hw.last_period; | 1098 | perf_sample_data_init(&data, 0, hwc->last_period); |
1101 | if (!armpmu_event_set_period(event, hwc, idx)) | 1099 | if (!armpmu_event_set_period(event, hwc, idx)) |
1102 | continue; | 1100 | continue; |
1103 | 1101 | ||
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index 71a21e6712f5..e34e7254e652 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c | |||
@@ -248,8 +248,6 @@ xscale1pmu_handle_irq(int irq_num, void *dev) | |||
248 | 248 | ||
249 | regs = get_irq_regs(); | 249 | regs = get_irq_regs(); |
250 | 250 | ||
251 | perf_sample_data_init(&data, 0); | ||
252 | |||
253 | cpuc = &__get_cpu_var(cpu_hw_events); | 251 | cpuc = &__get_cpu_var(cpu_hw_events); |
254 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { | 252 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
255 | struct perf_event *event = cpuc->events[idx]; | 253 | struct perf_event *event = cpuc->events[idx]; |
@@ -263,7 +261,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev) | |||
263 | 261 | ||
264 | hwc = &event->hw; | 262 | hwc = &event->hw; |
265 | armpmu_event_update(event, hwc, idx); | 263 | armpmu_event_update(event, hwc, idx); |
266 | data.period = event->hw.last_period; | 264 | perf_sample_data_init(&data, 0, hwc->last_period); |
267 | if (!armpmu_event_set_period(event, hwc, idx)) | 265 | if (!armpmu_event_set_period(event, hwc, idx)) |
268 | continue; | 266 | continue; |
269 | 267 | ||
@@ -588,8 +586,6 @@ xscale2pmu_handle_irq(int irq_num, void *dev) | |||
588 | 586 | ||
589 | regs = get_irq_regs(); | 587 | regs = get_irq_regs(); |
590 | 588 | ||
591 | perf_sample_data_init(&data, 0); | ||
592 | |||
593 | cpuc = &__get_cpu_var(cpu_hw_events); | 589 | cpuc = &__get_cpu_var(cpu_hw_events); |
594 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { | 590 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
595 | struct perf_event *event = cpuc->events[idx]; | 591 | struct perf_event *event = cpuc->events[idx]; |
@@ -603,7 +599,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev) | |||
603 | 599 | ||
604 | hwc = &event->hw; | 600 | hwc = &event->hw; |
605 | armpmu_event_update(event, hwc, idx); | 601 | armpmu_event_update(event, hwc, idx); |
606 | data.period = event->hw.last_period; | 602 | perf_sample_data_init(&data, 0, hwc->last_period); |
607 | if (!armpmu_event_set_period(event, hwc, idx)) | 603 | if (!armpmu_event_set_period(event, hwc, idx)) |
608 | continue; | 604 | continue; |
609 | 605 | ||