aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2009-05-13 23:31:48 -0400
committerIngo Molnar <mingo@elte.hu>2009-05-15 10:38:57 -0400
commit0bbd0d4be8d5d3676c126e06e3c75c16def00441 (patch)
treec84a75a2c452c98a07d807b96eacff564940dd0d /arch/powerpc/kernel
parent9d23a90a67261e73b2fcac04d8ca963c6b496afb (diff)
perf_counter: powerpc: supply more precise information on counter overflow events
This uses values from the MMCRA, SIAR and SDAR registers on powerpc to supply more precise information for overflow events, including a data address when PERF_RECORD_ADDR is specified. Since POWER6 uses different bit positions in MMCRA from earlier processors, this converts the struct power_pmu limited_pmc5_6 field, which only had 0/1 values, into a flags field and defines bit values for its previous use (PPMU_LIMITED_PMC5_6) and a new flag (PPMU_ALT_SIPR) to indicate that the processor uses the POWER6 bit positions rather than the earlier positions. It also adds definitions in reg.h for the new and old positions of the bit that indicates that the SIAR and SDAR values come from the same instruction. For the data address, the SDAR value is supplied if we are not doing instruction sampling. In that case there is no guarantee that the address given in the PERF_RECORD_ADDR subrecord will correspond to the instruction whose address is given in the PERF_RECORD_IP subrecord. If instruction sampling is enabled (e.g. because this counter is counting a marked instruction event), then we only supply the SDAR value for the PERF_RECORD_ADDR subrecord if it corresponds to the instruction whose address is in the PERF_RECORD_IP subrecord. Otherwise we supply 0. [ Impact: support more PMU hardware features on PowerPC ] Signed-off-by: Paul Mackerras <paulus@samba.org> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> LKML-Reference: <18955.37028.48861.555309@drongo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/perf_counter.c84
-rw-r--r--arch/powerpc/kernel/power5+-pmu.c2
-rw-r--r--arch/powerpc/kernel/power6-pmu.c2
3 files changed, 82 insertions, 6 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index 8d4cafc84b82..6baae5a5c331 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -17,6 +17,7 @@
17#include <asm/pmc.h> 17#include <asm/pmc.h>
18#include <asm/machdep.h> 18#include <asm/machdep.h>
19#include <asm/firmware.h> 19#include <asm/firmware.h>
20#include <asm/ptrace.h>
20 21
21struct cpu_hw_counters { 22struct cpu_hw_counters {
22 int n_counters; 23 int n_counters;
@@ -310,7 +311,8 @@ static void power_pmu_read(struct perf_counter *counter)
310 */ 311 */
311static int is_limited_pmc(int pmcnum) 312static int is_limited_pmc(int pmcnum)
312{ 313{
313 return ppmu->limited_pmc5_6 && (pmcnum == 5 || pmcnum == 6); 314 return (ppmu->flags & PPMU_LIMITED_PMC5_6)
315 && (pmcnum == 5 || pmcnum == 6);
314} 316}
315 317
316static void freeze_limited_counters(struct cpu_hw_counters *cpuhw, 318static void freeze_limited_counters(struct cpu_hw_counters *cpuhw,
@@ -860,7 +862,7 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
860 * If this machine has limited counters, check whether this 862 * If this machine has limited counters, check whether this
861 * event could go on a limited counter. 863 * event could go on a limited counter.
862 */ 864 */
863 if (ppmu->limited_pmc5_6) { 865 if (ppmu->flags & PPMU_LIMITED_PMC5_6) {
864 if (can_go_on_limited_pmc(counter, ev, flags)) { 866 if (can_go_on_limited_pmc(counter, ev, flags)) {
865 flags |= PPMU_LIMITED_PMC_OK; 867 flags |= PPMU_LIMITED_PMC_OK;
866 } else if (ppmu->limited_pmc_event(ev)) { 868 } else if (ppmu->limited_pmc_event(ev)) {
@@ -933,6 +935,7 @@ static void record_and_restart(struct perf_counter *counter, long val,
933 u64 period = counter->hw.irq_period; 935 u64 period = counter->hw.irq_period;
934 s64 prev, delta, left; 936 s64 prev, delta, left;
935 int record = 0; 937 int record = 0;
938 u64 addr, mmcra, sdsync;
936 939
937 /* we don't have to worry about interrupts here */ 940 /* we don't have to worry about interrupts here */
938 prev = atomic64_read(&counter->hw.prev_count); 941 prev = atomic64_read(&counter->hw.prev_count);
@@ -963,8 +966,76 @@ static void record_and_restart(struct perf_counter *counter, long val,
963 /* 966 /*
964 * Finally record data if requested. 967 * Finally record data if requested.
965 */ 968 */
966 if (record) 969 if (record) {
967 perf_counter_overflow(counter, nmi, regs, 0); 970 addr = 0;
971 if (counter->hw_event.record_type & PERF_RECORD_ADDR) {
972 /*
973 * The user wants a data address recorded.
974 * If we're not doing instruction sampling,
975 * give them the SDAR (sampled data address).
976 * If we are doing instruction sampling, then only
977 * give them the SDAR if it corresponds to the
978 * instruction pointed to by SIAR; this is indicated
979 * by the [POWER6_]MMCRA_SDSYNC bit in MMCRA.
980 */
981 mmcra = regs->dsisr;
982 sdsync = (ppmu->flags & PPMU_ALT_SIPR) ?
983 POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC;
984 if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
985 addr = mfspr(SPRN_SDAR);
986 }
987 perf_counter_overflow(counter, nmi, regs, addr);
988 }
989}
990
991/*
992 * Called from generic code to get the misc flags (i.e. processor mode)
993 * for an event.
994 */
995unsigned long perf_misc_flags(struct pt_regs *regs)
996{
997 unsigned long mmcra;
998
999 if (TRAP(regs) != 0xf00) {
1000 /* not a PMU interrupt */
1001 return user_mode(regs) ? PERF_EVENT_MISC_USER :
1002 PERF_EVENT_MISC_KERNEL;
1003 }
1004
1005 mmcra = regs->dsisr;
1006 if (ppmu->flags & PPMU_ALT_SIPR) {
1007 if (mmcra & POWER6_MMCRA_SIHV)
1008 return PERF_EVENT_MISC_HYPERVISOR;
1009 return (mmcra & POWER6_MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
1010 PERF_EVENT_MISC_KERNEL;
1011 }
1012 if (mmcra & MMCRA_SIHV)
1013 return PERF_EVENT_MISC_HYPERVISOR;
1014 return (mmcra & MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
1015 PERF_EVENT_MISC_KERNEL;
1016}
1017
1018/*
1019 * Called from generic code to get the instruction pointer
1020 * for an event.
1021 */
1022unsigned long perf_instruction_pointer(struct pt_regs *regs)
1023{
1024 unsigned long mmcra;
1025 unsigned long ip;
1026 unsigned long slot;
1027
1028 if (TRAP(regs) != 0xf00)
1029 return regs->nip; /* not a PMU interrupt */
1030
1031 ip = mfspr(SPRN_SIAR);
1032 mmcra = regs->dsisr;
1033 if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) {
1034 slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
1035 if (slot > 1)
1036 ip += 4 * (slot - 1);
1037 }
1038 return ip;
968} 1039}
969 1040
970/* 1041/*
@@ -984,6 +1055,11 @@ static void perf_counter_interrupt(struct pt_regs *regs)
984 mfspr(SPRN_PMC6)); 1055 mfspr(SPRN_PMC6));
985 1056
986 /* 1057 /*
1058 * Overload regs->dsisr to store MMCRA so we only need to read it once.
1059 */
1060 regs->dsisr = mfspr(SPRN_MMCRA);
1061
1062 /*
987 * If interrupts were soft-disabled when this PMU interrupt 1063 * If interrupts were soft-disabled when this PMU interrupt
988 * occurred, treat it as an NMI. 1064 * occurred, treat it as an NMI.
989 */ 1065 */
diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c
index 3ac0654372ab..c6cdfc165d6e 100644
--- a/arch/powerpc/kernel/power5+-pmu.c
+++ b/arch/powerpc/kernel/power5+-pmu.c
@@ -625,6 +625,6 @@ struct power_pmu power5p_pmu = {
625 .disable_pmc = power5p_disable_pmc, 625 .disable_pmc = power5p_disable_pmc,
626 .n_generic = ARRAY_SIZE(power5p_generic_events), 626 .n_generic = ARRAY_SIZE(power5p_generic_events),
627 .generic_events = power5p_generic_events, 627 .generic_events = power5p_generic_events,
628 .limited_pmc5_6 = 1, 628 .flags = PPMU_LIMITED_PMC5_6,
629 .limited_pmc_event = power5p_limited_pmc_event, 629 .limited_pmc_event = power5p_limited_pmc_event,
630}; 630};
diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c
index ab7c615c458d..cd4fbe06c35d 100644
--- a/arch/powerpc/kernel/power6-pmu.c
+++ b/arch/powerpc/kernel/power6-pmu.c
@@ -485,6 +485,6 @@ struct power_pmu power6_pmu = {
485 .disable_pmc = p6_disable_pmc, 485 .disable_pmc = p6_disable_pmc,
486 .n_generic = ARRAY_SIZE(power6_generic_events), 486 .n_generic = ARRAY_SIZE(power6_generic_events),
487 .generic_events = power6_generic_events, 487 .generic_events = power6_generic_events,
488 .limited_pmc5_6 = 1, 488 .flags = PPMU_LIMITED_PMC5_6 | PPMU_ALT_SIPR,
489 .limited_pmc_event = p6_limited_pmc_event, 489 .limited_pmc_event = p6_limited_pmc_event,
490}; 490};