aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-06-10 15:02:22 -0400
committerIngo Molnar <mingo@elte.hu>2009-06-10 20:39:02 -0400
commitdf1a132bf3d3508f863336c80a27806a2ac947e0 (patch)
tree2aa26b9c5d0528e816a80bd3b58c9b2442670d5c /arch
parentea1900e571d40a3ce60c835c2f21e1fd8c5cb663 (diff)
perf_counter: Introduce struct for sample data
For easy extension of the sample data, put it in a structure. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kernel/perf_counter.c10
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c15
2 files changed, 18 insertions, 7 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index 4786ad9a2887..5e0bf399c433 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -1001,7 +1001,11 @@ static void record_and_restart(struct perf_counter *counter, long val,
1001 * Finally record data if requested. 1001 * Finally record data if requested.
1002 */ 1002 */
1003 if (record) { 1003 if (record) {
1004 addr = 0; 1004 struct perf_sample_data data = {
1005 .regs = regs,
1006 .addr = 0,
1007 };
1008
1005 if (counter->attr.sample_type & PERF_SAMPLE_ADDR) { 1009 if (counter->attr.sample_type & PERF_SAMPLE_ADDR) {
1006 /* 1010 /*
1007 * The user wants a data address recorded. 1011 * The user wants a data address recorded.
@@ -1016,9 +1020,9 @@ static void record_and_restart(struct perf_counter *counter, long val,
1016 sdsync = (ppmu->flags & PPMU_ALT_SIPR) ? 1020 sdsync = (ppmu->flags & PPMU_ALT_SIPR) ?
1017 POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC; 1021 POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC;
1018 if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync)) 1022 if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
1019 addr = mfspr(SPRN_SDAR); 1023 data.addr = mfspr(SPRN_SDAR);
1020 } 1024 }
1021 if (perf_counter_overflow(counter, nmi, regs, addr)) { 1025 if (perf_counter_overflow(counter, nmi, &data)) {
1022 /* 1026 /*
1023 * Interrupts are coming too fast - throttle them 1027 * Interrupts are coming too fast - throttle them
1024 * by setting the counter to 0, so it will be 1028 * by setting the counter to 0, so it will be
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 240ca5630632..82a23d487f92 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -1173,11 +1173,14 @@ static void intel_pmu_reset(void)
1173 */ 1173 */
1174static int intel_pmu_handle_irq(struct pt_regs *regs) 1174static int intel_pmu_handle_irq(struct pt_regs *regs)
1175{ 1175{
1176 struct perf_sample_data data;
1176 struct cpu_hw_counters *cpuc; 1177 struct cpu_hw_counters *cpuc;
1177 struct cpu_hw_counters;
1178 int bit, cpu, loops; 1178 int bit, cpu, loops;
1179 u64 ack, status; 1179 u64 ack, status;
1180 1180
1181 data.regs = regs;
1182 data.addr = 0;
1183
1181 cpu = smp_processor_id(); 1184 cpu = smp_processor_id();
1182 cpuc = &per_cpu(cpu_hw_counters, cpu); 1185 cpuc = &per_cpu(cpu_hw_counters, cpu);
1183 1186
@@ -1210,7 +1213,7 @@ again:
1210 if (!intel_pmu_save_and_restart(counter)) 1213 if (!intel_pmu_save_and_restart(counter))
1211 continue; 1214 continue;
1212 1215
1213 if (perf_counter_overflow(counter, 1, regs, 0)) 1216 if (perf_counter_overflow(counter, 1, &data))
1214 intel_pmu_disable_counter(&counter->hw, bit); 1217 intel_pmu_disable_counter(&counter->hw, bit);
1215 } 1218 }
1216 1219
@@ -1230,12 +1233,16 @@ again:
1230 1233
1231static int amd_pmu_handle_irq(struct pt_regs *regs) 1234static int amd_pmu_handle_irq(struct pt_regs *regs)
1232{ 1235{
1233 int cpu, idx, handled = 0; 1236 struct perf_sample_data data;
1234 struct cpu_hw_counters *cpuc; 1237 struct cpu_hw_counters *cpuc;
1235 struct perf_counter *counter; 1238 struct perf_counter *counter;
1236 struct hw_perf_counter *hwc; 1239 struct hw_perf_counter *hwc;
1240 int cpu, idx, handled = 0;
1237 u64 val; 1241 u64 val;
1238 1242
1243 data.regs = regs;
1244 data.addr = 0;
1245
1239 cpu = smp_processor_id(); 1246 cpu = smp_processor_id();
1240 cpuc = &per_cpu(cpu_hw_counters, cpu); 1247 cpuc = &per_cpu(cpu_hw_counters, cpu);
1241 1248
@@ -1256,7 +1263,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
1256 if (!x86_perf_counter_set_period(counter, hwc, idx)) 1263 if (!x86_perf_counter_set_period(counter, hwc, idx))
1257 continue; 1264 continue;
1258 1265
1259 if (perf_counter_overflow(counter, 1, regs, 0)) 1266 if (perf_counter_overflow(counter, 1, &data))
1260 amd_pmu_disable_counter(hwc, idx); 1267 amd_pmu_disable_counter(hwc, idx);
1261 } 1268 }
1262 1269