aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/perf_counter.h14
-rw-r--r--arch/powerpc/include/asm/reg.h2
-rw-r--r--arch/powerpc/kernel/perf_counter.c84
-rw-r--r--arch/powerpc/kernel/power5+-pmu.c2
-rw-r--r--arch/powerpc/kernel/power6-pmu.c2
5 files changed, 97 insertions, 7 deletions
diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h
index ceea76a48e3d..1c60f0ca7920 100644
--- a/arch/powerpc/include/asm/perf_counter.h
+++ b/arch/powerpc/include/asm/perf_counter.h
@@ -30,7 +30,7 @@ struct power_pmu {
30 u64 alt[]); 30 u64 alt[]);
31 void (*disable_pmc)(unsigned int pmc, u64 mmcr[]); 31 void (*disable_pmc)(unsigned int pmc, u64 mmcr[]);
32 int (*limited_pmc_event)(u64 event); 32 int (*limited_pmc_event)(u64 event);
33 int limited_pmc5_6; /* PMC5 and PMC6 have limited function */ 33 u32 flags;
34 int n_generic; 34 int n_generic;
35 int *generic_events; 35 int *generic_events;
36}; 36};
@@ -38,12 +38,24 @@ struct power_pmu {
38extern struct power_pmu *ppmu; 38extern struct power_pmu *ppmu;
39 39
40/* 40/*
41 * Values for power_pmu.flags
42 */
43#define PPMU_LIMITED_PMC5_6 1 /* PMC5/6 have limited function */
44#define PPMU_ALT_SIPR 2 /* uses alternate posn for SIPR/HV */
45
46/*
41 * Values for flags to get_alternatives() 47 * Values for flags to get_alternatives()
42 */ 48 */
43#define PPMU_LIMITED_PMC_OK 1 /* can put this on a limited PMC */ 49#define PPMU_LIMITED_PMC_OK 1 /* can put this on a limited PMC */
44#define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */ 50#define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */
45#define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */ 51#define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */
46 52
53struct pt_regs;
54extern unsigned long perf_misc_flags(struct pt_regs *regs);
55#define perf_misc_flags(regs) perf_misc_flags(regs)
56
57extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
58
47/* 59/*
48 * The power_pmu.get_constraint function returns a 64-bit value and 60 * The power_pmu.get_constraint function returns a 64-bit value and
49 * a 64-bit mask that express the constraints between this event and 61 * a 64-bit mask that express the constraints between this event and
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index e8018d540e87..fb359b0a6937 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -492,11 +492,13 @@
492#define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */ 492#define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */
493#define SPRN_MMCR1 798 493#define SPRN_MMCR1 798
494#define SPRN_MMCRA 0x312 494#define SPRN_MMCRA 0x312
495#define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */
495#define MMCRA_SIHV 0x10000000UL /* state of MSR HV when SIAR set */ 496#define MMCRA_SIHV 0x10000000UL /* state of MSR HV when SIAR set */
496#define MMCRA_SIPR 0x08000000UL /* state of MSR PR when SIAR set */ 497#define MMCRA_SIPR 0x08000000UL /* state of MSR PR when SIAR set */
497#define MMCRA_SLOT 0x07000000UL /* SLOT bits (37-39) */ 498#define MMCRA_SLOT 0x07000000UL /* SLOT bits (37-39) */
498#define MMCRA_SLOT_SHIFT 24 499#define MMCRA_SLOT_SHIFT 24
499#define MMCRA_SAMPLE_ENABLE 0x00000001UL /* enable sampling */ 500#define MMCRA_SAMPLE_ENABLE 0x00000001UL /* enable sampling */
501#define POWER6_MMCRA_SDSYNC 0x0000080000000000ULL /* SDAR/SIAR synced */
500#define POWER6_MMCRA_SIHV 0x0000040000000000ULL 502#define POWER6_MMCRA_SIHV 0x0000040000000000ULL
501#define POWER6_MMCRA_SIPR 0x0000020000000000ULL 503#define POWER6_MMCRA_SIPR 0x0000020000000000ULL
502#define POWER6_MMCRA_THRM 0x00000020UL 504#define POWER6_MMCRA_THRM 0x00000020UL
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index 8d4cafc84b82..6baae5a5c331 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -17,6 +17,7 @@
17#include <asm/pmc.h> 17#include <asm/pmc.h>
18#include <asm/machdep.h> 18#include <asm/machdep.h>
19#include <asm/firmware.h> 19#include <asm/firmware.h>
20#include <asm/ptrace.h>
20 21
21struct cpu_hw_counters { 22struct cpu_hw_counters {
22 int n_counters; 23 int n_counters;
@@ -310,7 +311,8 @@ static void power_pmu_read(struct perf_counter *counter)
310 */ 311 */
311static int is_limited_pmc(int pmcnum) 312static int is_limited_pmc(int pmcnum)
312{ 313{
313 return ppmu->limited_pmc5_6 && (pmcnum == 5 || pmcnum == 6); 314 return (ppmu->flags & PPMU_LIMITED_PMC5_6)
315 && (pmcnum == 5 || pmcnum == 6);
314} 316}
315 317
316static void freeze_limited_counters(struct cpu_hw_counters *cpuhw, 318static void freeze_limited_counters(struct cpu_hw_counters *cpuhw,
@@ -860,7 +862,7 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
860 * If this machine has limited counters, check whether this 862 * If this machine has limited counters, check whether this
861 * event could go on a limited counter. 863 * event could go on a limited counter.
862 */ 864 */
863 if (ppmu->limited_pmc5_6) { 865 if (ppmu->flags & PPMU_LIMITED_PMC5_6) {
864 if (can_go_on_limited_pmc(counter, ev, flags)) { 866 if (can_go_on_limited_pmc(counter, ev, flags)) {
865 flags |= PPMU_LIMITED_PMC_OK; 867 flags |= PPMU_LIMITED_PMC_OK;
866 } else if (ppmu->limited_pmc_event(ev)) { 868 } else if (ppmu->limited_pmc_event(ev)) {
@@ -933,6 +935,7 @@ static void record_and_restart(struct perf_counter *counter, long val,
933 u64 period = counter->hw.irq_period; 935 u64 period = counter->hw.irq_period;
934 s64 prev, delta, left; 936 s64 prev, delta, left;
935 int record = 0; 937 int record = 0;
938 u64 addr, mmcra, sdsync;
936 939
937 /* we don't have to worry about interrupts here */ 940 /* we don't have to worry about interrupts here */
938 prev = atomic64_read(&counter->hw.prev_count); 941 prev = atomic64_read(&counter->hw.prev_count);
@@ -963,8 +966,76 @@ static void record_and_restart(struct perf_counter *counter, long val,
963 /* 966 /*
964 * Finally record data if requested. 967 * Finally record data if requested.
965 */ 968 */
966 if (record) 969 if (record) {
967 perf_counter_overflow(counter, nmi, regs, 0); 970 addr = 0;
971 if (counter->hw_event.record_type & PERF_RECORD_ADDR) {
972 /*
973 * The user wants a data address recorded.
974 * If we're not doing instruction sampling,
975 * give them the SDAR (sampled data address).
976 * If we are doing instruction sampling, then only
977 * give them the SDAR if it corresponds to the
978 * instruction pointed to by SIAR; this is indicated
979 * by the [POWER6_]MMCRA_SDSYNC bit in MMCRA.
980 */
981 mmcra = regs->dsisr;
982 sdsync = (ppmu->flags & PPMU_ALT_SIPR) ?
983 POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC;
984 if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
985 addr = mfspr(SPRN_SDAR);
986 }
987 perf_counter_overflow(counter, nmi, regs, addr);
988 }
989}
990
991/*
992 * Called from generic code to get the misc flags (i.e. processor mode)
993 * for an event.
994 */
995unsigned long perf_misc_flags(struct pt_regs *regs)
996{
997 unsigned long mmcra;
998
999 if (TRAP(regs) != 0xf00) {
1000 /* not a PMU interrupt */
1001 return user_mode(regs) ? PERF_EVENT_MISC_USER :
1002 PERF_EVENT_MISC_KERNEL;
1003 }
1004
1005 mmcra = regs->dsisr;
1006 if (ppmu->flags & PPMU_ALT_SIPR) {
1007 if (mmcra & POWER6_MMCRA_SIHV)
1008 return PERF_EVENT_MISC_HYPERVISOR;
1009 return (mmcra & POWER6_MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
1010 PERF_EVENT_MISC_KERNEL;
1011 }
1012 if (mmcra & MMCRA_SIHV)
1013 return PERF_EVENT_MISC_HYPERVISOR;
1014 return (mmcra & MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
1015 PERF_EVENT_MISC_KERNEL;
1016}
1017
1018/*
1019 * Called from generic code to get the instruction pointer
1020 * for an event.
1021 */
1022unsigned long perf_instruction_pointer(struct pt_regs *regs)
1023{
1024 unsigned long mmcra;
1025 unsigned long ip;
1026 unsigned long slot;
1027
1028 if (TRAP(regs) != 0xf00)
1029 return regs->nip; /* not a PMU interrupt */
1030
1031 ip = mfspr(SPRN_SIAR);
1032 mmcra = regs->dsisr;
1033 if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) {
1034 slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
1035 if (slot > 1)
1036 ip += 4 * (slot - 1);
1037 }
1038 return ip;
968} 1039}
969 1040
970/* 1041/*
@@ -984,6 +1055,11 @@ static void perf_counter_interrupt(struct pt_regs *regs)
984 mfspr(SPRN_PMC6)); 1055 mfspr(SPRN_PMC6));
985 1056
986 /* 1057 /*
1058 * Overload regs->dsisr to store MMCRA so we only need to read it once.
1059 */
1060 regs->dsisr = mfspr(SPRN_MMCRA);
1061
1062 /*
987 * If interrupts were soft-disabled when this PMU interrupt 1063 * If interrupts were soft-disabled when this PMU interrupt
988 * occurred, treat it as an NMI. 1064 * occurred, treat it as an NMI.
989 */ 1065 */
diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c
index 3ac0654372ab..c6cdfc165d6e 100644
--- a/arch/powerpc/kernel/power5+-pmu.c
+++ b/arch/powerpc/kernel/power5+-pmu.c
@@ -625,6 +625,6 @@ struct power_pmu power5p_pmu = {
625 .disable_pmc = power5p_disable_pmc, 625 .disable_pmc = power5p_disable_pmc,
626 .n_generic = ARRAY_SIZE(power5p_generic_events), 626 .n_generic = ARRAY_SIZE(power5p_generic_events),
627 .generic_events = power5p_generic_events, 627 .generic_events = power5p_generic_events,
628 .limited_pmc5_6 = 1, 628 .flags = PPMU_LIMITED_PMC5_6,
629 .limited_pmc_event = power5p_limited_pmc_event, 629 .limited_pmc_event = power5p_limited_pmc_event,
630}; 630};
diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c
index ab7c615c458d..cd4fbe06c35d 100644
--- a/arch/powerpc/kernel/power6-pmu.c
+++ b/arch/powerpc/kernel/power6-pmu.c
@@ -485,6 +485,6 @@ struct power_pmu power6_pmu = {
485 .disable_pmc = p6_disable_pmc, 485 .disable_pmc = p6_disable_pmc,
486 .n_generic = ARRAY_SIZE(power6_generic_events), 486 .n_generic = ARRAY_SIZE(power6_generic_events),
487 .generic_events = power6_generic_events, 487 .generic_events = power6_generic_events,
488 .limited_pmc5_6 = 1, 488 .flags = PPMU_LIMITED_PMC5_6 | PPMU_ALT_SIPR,
489 .limited_pmc_event = p6_limited_pmc_event, 489 .limited_pmc_event = p6_limited_pmc_event,
490}; 490};