diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-04-08 09:01:33 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-08 13:05:56 -0400 |
commit | 78f13e9525ba777da25c4ddab89f28e9366a8b7c (patch) | |
tree | d23d43df02330f39e9c31901df9956d2e58a3474 /arch | |
parent | 4d855457d84b819fefcd1cd1b0a2a0a0ec475c07 (diff) |
perf_counter: allow for data addresses to be recorded
Paul suggested we allow for data addresses to be recorded along with
the traditional IPs as power can provide these.
For now, only the software pagefault events provide data addresses,
but in the future power might as well for some events.
x86 doesn't seem capable of providing this atm.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
LKML-Reference: <20090408130409.394816925@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/kernel/perf_counter.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/fault.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_counter.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/fault.c | 8 |
4 files changed, 12 insertions, 8 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index 0697ade84dd3..c9d019f19074 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c | |||
@@ -749,7 +749,7 @@ static void record_and_restart(struct perf_counter *counter, long val, | |||
749 | * Finally record data if requested. | 749 | * Finally record data if requested. |
750 | */ | 750 | */ |
751 | if (record) | 751 | if (record) |
752 | perf_counter_overflow(counter, 1, regs); | 752 | perf_counter_overflow(counter, 1, regs, 0); |
753 | } | 753 | } |
754 | 754 | ||
755 | /* | 755 | /* |
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 17bbf6f91fbe..ac0e112031b2 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c | |||
@@ -171,7 +171,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, | |||
171 | die("Weird page fault", regs, SIGSEGV); | 171 | die("Weird page fault", regs, SIGSEGV); |
172 | } | 172 | } |
173 | 173 | ||
174 | perf_swcounter_event(PERF_COUNT_PAGE_FAULTS, 1, 0, regs); | 174 | perf_swcounter_event(PERF_COUNT_PAGE_FAULTS, 1, 0, regs, address); |
175 | 175 | ||
176 | /* When running in the kernel we expect faults to occur only to | 176 | /* When running in the kernel we expect faults to occur only to |
177 | * addresses in user space. All other faults represent errors in the | 177 | * addresses in user space. All other faults represent errors in the |
@@ -312,7 +312,8 @@ good_area: | |||
312 | } | 312 | } |
313 | if (ret & VM_FAULT_MAJOR) { | 313 | if (ret & VM_FAULT_MAJOR) { |
314 | current->maj_flt++; | 314 | current->maj_flt++; |
315 | perf_swcounter_event(PERF_COUNT_PAGE_FAULTS_MAJ, 1, 0, regs); | 315 | perf_swcounter_event(PERF_COUNT_PAGE_FAULTS_MAJ, 1, 0, |
316 | regs, address); | ||
316 | #ifdef CONFIG_PPC_SMLPAR | 317 | #ifdef CONFIG_PPC_SMLPAR |
317 | if (firmware_has_feature(FW_FEATURE_CMO)) { | 318 | if (firmware_has_feature(FW_FEATURE_CMO)) { |
318 | preempt_disable(); | 319 | preempt_disable(); |
@@ -322,7 +323,8 @@ good_area: | |||
322 | #endif | 323 | #endif |
323 | } else { | 324 | } else { |
324 | current->min_flt++; | 325 | current->min_flt++; |
325 | perf_swcounter_event(PERF_COUNT_PAGE_FAULTS_MIN, 1, 0, regs); | 326 | perf_swcounter_event(PERF_COUNT_PAGE_FAULTS_MIN, 1, 0, |
327 | regs, address); | ||
326 | } | 328 | } |
327 | up_read(&mm->mmap_sem); | 329 | up_read(&mm->mmap_sem); |
328 | return 0; | 330 | return 0; |
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 1116a41bc7b5..0fcbaab83f9b 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -800,7 +800,7 @@ again: | |||
800 | continue; | 800 | continue; |
801 | 801 | ||
802 | perf_save_and_restart(counter); | 802 | perf_save_and_restart(counter); |
803 | if (perf_counter_overflow(counter, nmi, regs)) | 803 | if (perf_counter_overflow(counter, nmi, regs, 0)) |
804 | __pmc_generic_disable(counter, &counter->hw, bit); | 804 | __pmc_generic_disable(counter, &counter->hw, bit); |
805 | } | 805 | } |
806 | 806 | ||
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index f2d3324d9215..6f9df2babe48 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -1045,7 +1045,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
1045 | if (unlikely(error_code & PF_RSVD)) | 1045 | if (unlikely(error_code & PF_RSVD)) |
1046 | pgtable_bad(regs, error_code, address); | 1046 | pgtable_bad(regs, error_code, address); |
1047 | 1047 | ||
1048 | perf_swcounter_event(PERF_COUNT_PAGE_FAULTS, 1, 0, regs); | 1048 | perf_swcounter_event(PERF_COUNT_PAGE_FAULTS, 1, 0, regs, address); |
1049 | 1049 | ||
1050 | /* | 1050 | /* |
1051 | * If we're in an interrupt, have no user context or are running | 1051 | * If we're in an interrupt, have no user context or are running |
@@ -1142,10 +1142,12 @@ good_area: | |||
1142 | 1142 | ||
1143 | if (fault & VM_FAULT_MAJOR) { | 1143 | if (fault & VM_FAULT_MAJOR) { |
1144 | tsk->maj_flt++; | 1144 | tsk->maj_flt++; |
1145 | perf_swcounter_event(PERF_COUNT_PAGE_FAULTS_MAJ, 1, 0, regs); | 1145 | perf_swcounter_event(PERF_COUNT_PAGE_FAULTS_MAJ, 1, 0, |
1146 | regs, address); | ||
1146 | } else { | 1147 | } else { |
1147 | tsk->min_flt++; | 1148 | tsk->min_flt++; |
1148 | perf_swcounter_event(PERF_COUNT_PAGE_FAULTS_MIN, 1, 0, regs); | 1149 | perf_swcounter_event(PERF_COUNT_PAGE_FAULTS_MIN, 1, 0, |
1150 | regs, address); | ||
1149 | } | 1151 | } |
1150 | 1152 | ||
1151 | check_v8086_mode(regs, address, tsk); | 1153 | check_v8086_mode(regs, address, tsk); |