aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorMichael Ellerman <michael@ellerman.id.au>2013-06-28 04:15:16 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2013-06-30 21:50:10 -0400
commit330a1eb7775ba876dbd46b9885556e57f705e3d4 (patch)
tree138693772ff043ec23e335a1ef42df7a7d5707ba /arch/powerpc
parent2ac138ca21ad26c988ce7c91d27327f85beb7519 (diff)
powerpc/perf: Core EBB support for 64-bit book3s
Add support for EBB (Event Based Branches) on 64-bit book3s. See the included documentation for more details. EBBs are a feature which allows the hardware to branch directly to a specified user space address when a PMU event overflows. This can be used by programs for self-monitoring with no kernel involvement in the inner loop. Most of the logic is in the generic book3s code, primarily to avoid a proliferation of PMU callbacks. Signed-off-by: Michael Ellerman <michael@ellerman.id.au> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h6
-rw-r--r--arch/powerpc/include/asm/processor.h3
-rw-r--r--arch/powerpc/include/asm/reg.h8
-rw-r--r--arch/powerpc/include/asm/switch_to.h14
-rw-r--r--arch/powerpc/kernel/process.c4
-rw-r--r--arch/powerpc/perf/core-book3s.c161
6 files changed, 182 insertions, 14 deletions
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index f265049dd7d6..2dd7bfc459be 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -60,6 +60,7 @@ struct power_pmu {
60#define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */ 60#define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */
61#define PPMU_HAS_SIER 0x00000040 /* Has SIER */ 61#define PPMU_HAS_SIER 0x00000040 /* Has SIER */
62#define PPMU_BHRB 0x00000080 /* has BHRB feature enabled */ 62#define PPMU_BHRB 0x00000080 /* has BHRB feature enabled */
63#define PPMU_EBB 0x00000100 /* supports event based branch */
63 64
64/* 65/*
65 * Values for flags to get_alternatives() 66 * Values for flags to get_alternatives()
@@ -68,6 +69,11 @@ struct power_pmu {
68#define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */ 69#define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */
69#define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */ 70#define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */
70 71
72/*
73 * We use the event config bit 63 as a flag to request EBB.
74 */
75#define EVENT_CONFIG_EBB_SHIFT 63
76
71extern int register_power_pmu(struct power_pmu *); 77extern int register_power_pmu(struct power_pmu *);
72 78
73struct pt_regs; 79struct pt_regs;
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 3f19df3cc7a3..47a35b08b963 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -287,8 +287,9 @@ struct thread_struct {
287 unsigned long siar; 287 unsigned long siar;
288 unsigned long sdar; 288 unsigned long sdar;
289 unsigned long sier; 289 unsigned long sier;
290 unsigned long mmcr0;
291 unsigned long mmcr2; 290 unsigned long mmcr2;
291 unsigned mmcr0;
292 unsigned used_ebb;
292#endif 293#endif
293}; 294};
294 295
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 362142b69d5b..5d7d9c2a5473 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -621,6 +621,9 @@
621#define MMCR0_PMXE 0x04000000UL /* performance monitor exception enable */ 621#define MMCR0_PMXE 0x04000000UL /* performance monitor exception enable */
622#define MMCR0_FCECE 0x02000000UL /* freeze ctrs on enabled cond or event */ 622#define MMCR0_FCECE 0x02000000UL /* freeze ctrs on enabled cond or event */
623#define MMCR0_TBEE 0x00400000UL /* time base exception enable */ 623#define MMCR0_TBEE 0x00400000UL /* time base exception enable */
624#define MMCR0_EBE 0x00100000UL /* Event based branch enable */
625#define MMCR0_PMCC 0x000c0000UL /* PMC control */
626#define MMCR0_PMCC_U6 0x00080000UL /* PMC1-6 are R/W by user (PR) */
624#define MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/ 627#define MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/
625#define MMCR0_PMCjCE 0x00004000UL /* PMCj count enable*/ 628#define MMCR0_PMCjCE 0x00004000UL /* PMCj count enable*/
626#define MMCR0_TRIGGER 0x00002000UL /* TRIGGER enable */ 629#define MMCR0_TRIGGER 0x00002000UL /* TRIGGER enable */
@@ -674,6 +677,11 @@
674#define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */ 677#define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */
675#define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */ 678#define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */
676 679
680/* When EBB is enabled, some of MMCR0/MMCR2/SIER are user accessible */
681#define MMCR0_USER_MASK (MMCR0_FC | MMCR0_PMXE | MMCR0_PMAO)
682#define MMCR2_USER_MASK 0x4020100804020000UL /* (FC1P|FC2P|FC3P|FC4P|FC5P|FC6P) */
683#define SIER_USER_MASK 0x7fffffUL
684
677#define SPRN_PA6T_MMCR0 795 685#define SPRN_PA6T_MMCR0 795
678#define PA6T_MMCR0_EN0 0x0000000000000001UL 686#define PA6T_MMCR0_EN0 0x0000000000000001UL
679#define PA6T_MMCR0_EN1 0x0000000000000002UL 687#define PA6T_MMCR0_EN1 0x0000000000000002UL
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index 200d763a0a67..49a13e0ef234 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -67,4 +67,18 @@ static inline void flush_spe_to_thread(struct task_struct *t)
67} 67}
68#endif 68#endif
69 69
70static inline void clear_task_ebb(struct task_struct *t)
71{
72#ifdef CONFIG_PPC_BOOK3S_64
73 /* EBB perf events are not inherited, so clear all EBB state. */
74 t->thread.bescr = 0;
75 t->thread.mmcr2 = 0;
76 t->thread.mmcr0 = 0;
77 t->thread.siar = 0;
78 t->thread.sdar = 0;
79 t->thread.sier = 0;
80 t->thread.used_ebb = 0;
81#endif
82}
83
70#endif /* _ASM_POWERPC_SWITCH_TO_H */ 84#endif /* _ASM_POWERPC_SWITCH_TO_H */
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index b0f3e3f77e72..f8a76e6207bd 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -916,7 +916,11 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
916 flush_altivec_to_thread(src); 916 flush_altivec_to_thread(src);
917 flush_vsx_to_thread(src); 917 flush_vsx_to_thread(src);
918 flush_spe_to_thread(src); 918 flush_spe_to_thread(src);
919
919 *dst = *src; 920 *dst = *src;
921
922 clear_task_ebb(dst);
923
920 return 0; 924 return 0;
921} 925}
922 926
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index c91dc43e04de..a3985aee77fe 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -77,6 +77,9 @@ static unsigned int freeze_events_kernel = MMCR0_FCS;
77#define MMCR0_PMCjCE MMCR0_PMCnCE 77#define MMCR0_PMCjCE MMCR0_PMCnCE
78#define MMCR0_FC56 0 78#define MMCR0_FC56 0
79#define MMCR0_PMAO 0 79#define MMCR0_PMAO 0
80#define MMCR0_EBE 0
81#define MMCR0_PMCC 0
82#define MMCR0_PMCC_U6 0
80 83
81#define SPRN_MMCRA SPRN_MMCR2 84#define SPRN_MMCRA SPRN_MMCR2
82#define MMCRA_SAMPLE_ENABLE 0 85#define MMCRA_SAMPLE_ENABLE 0
@@ -104,6 +107,15 @@ static inline int siar_valid(struct pt_regs *regs)
104 return 1; 107 return 1;
105} 108}
106 109
110static bool is_ebb_event(struct perf_event *event) { return false; }
111static int ebb_event_check(struct perf_event *event) { return 0; }
112static void ebb_event_add(struct perf_event *event) { }
113static void ebb_switch_out(unsigned long mmcr0) { }
114static unsigned long ebb_switch_in(bool ebb, unsigned long mmcr0)
115{
116 return mmcr0;
117}
118
107static inline void power_pmu_bhrb_enable(struct perf_event *event) {} 119static inline void power_pmu_bhrb_enable(struct perf_event *event) {}
108static inline void power_pmu_bhrb_disable(struct perf_event *event) {} 120static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
109void power_pmu_flush_branch_stack(void) {} 121void power_pmu_flush_branch_stack(void) {}
@@ -464,6 +476,89 @@ void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
464 return; 476 return;
465} 477}
466 478
479static bool is_ebb_event(struct perf_event *event)
480{
481 /*
482 * This could be a per-PMU callback, but we'd rather avoid the cost. We
483 * check that the PMU supports EBB, meaning those that don't can still
484 * use bit 63 of the event code for something else if they wish.
485 */
486 return (ppmu->flags & PPMU_EBB) &&
487 ((event->attr.config >> EVENT_CONFIG_EBB_SHIFT) & 1);
488}
489
490static int ebb_event_check(struct perf_event *event)
491{
492 struct perf_event *leader = event->group_leader;
493
494 /* Event and group leader must agree on EBB */
495 if (is_ebb_event(leader) != is_ebb_event(event))
496 return -EINVAL;
497
498 if (is_ebb_event(event)) {
499 if (!(event->attach_state & PERF_ATTACH_TASK))
500 return -EINVAL;
501
502 if (!leader->attr.pinned || !leader->attr.exclusive)
503 return -EINVAL;
504
505 if (event->attr.inherit || event->attr.sample_period ||
506 event->attr.enable_on_exec || event->attr.freq)
507 return -EINVAL;
508 }
509
510 return 0;
511}
512
513static void ebb_event_add(struct perf_event *event)
514{
515 if (!is_ebb_event(event) || current->thread.used_ebb)
516 return;
517
518 /*
519 * IFF this is the first time we've added an EBB event, set
520 * PMXE in the user MMCR0 so we can detect when it's cleared by
521 * userspace. We need this so that we can context switch while
522 * userspace is in the EBB handler (where PMXE is 0).
523 */
524 current->thread.used_ebb = 1;
525 current->thread.mmcr0 |= MMCR0_PMXE;
526}
527
528static void ebb_switch_out(unsigned long mmcr0)
529{
530 if (!(mmcr0 & MMCR0_EBE))
531 return;
532
533 current->thread.siar = mfspr(SPRN_SIAR);
534 current->thread.sier = mfspr(SPRN_SIER);
535 current->thread.sdar = mfspr(SPRN_SDAR);
536 current->thread.mmcr0 = mmcr0 & MMCR0_USER_MASK;
537 current->thread.mmcr2 = mfspr(SPRN_MMCR2) & MMCR2_USER_MASK;
538}
539
540static unsigned long ebb_switch_in(bool ebb, unsigned long mmcr0)
541{
542 if (!ebb)
543 goto out;
544
545 /* Enable EBB and read/write to all 6 PMCs for userspace */
546 mmcr0 |= MMCR0_EBE | MMCR0_PMCC_U6;
547
548 /* Add any bits from the user reg, FC or PMAO */
549 mmcr0 |= current->thread.mmcr0;
550
551 /* Be careful not to set PMXE if userspace had it cleared */
552 if (!(current->thread.mmcr0 & MMCR0_PMXE))
553 mmcr0 &= ~MMCR0_PMXE;
554
555 mtspr(SPRN_SIAR, current->thread.siar);
556 mtspr(SPRN_SIER, current->thread.sier);
557 mtspr(SPRN_SDAR, current->thread.sdar);
558 mtspr(SPRN_MMCR2, current->thread.mmcr2);
559out:
560 return mmcr0;
561}
467#endif /* CONFIG_PPC64 */ 562#endif /* CONFIG_PPC64 */
468 563
469static void perf_event_interrupt(struct pt_regs *regs); 564static void perf_event_interrupt(struct pt_regs *regs);
@@ -734,6 +829,13 @@ static void power_pmu_read(struct perf_event *event)
734 829
735 if (!event->hw.idx) 830 if (!event->hw.idx)
736 return; 831 return;
832
833 if (is_ebb_event(event)) {
834 val = read_pmc(event->hw.idx);
835 local64_set(&event->hw.prev_count, val);
836 return;
837 }
838
737 /* 839 /*
738 * Performance monitor interrupts come even when interrupts 840 * Performance monitor interrupts come even when interrupts
739 * are soft-disabled, as long as interrupts are hard-enabled. 841 * are soft-disabled, as long as interrupts are hard-enabled.
@@ -854,7 +956,7 @@ static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
854static void power_pmu_disable(struct pmu *pmu) 956static void power_pmu_disable(struct pmu *pmu)
855{ 957{
856 struct cpu_hw_events *cpuhw; 958 struct cpu_hw_events *cpuhw;
857 unsigned long flags, val; 959 unsigned long flags, mmcr0, val;
858 960
859 if (!ppmu) 961 if (!ppmu)
860 return; 962 return;
@@ -871,11 +973,11 @@ static void power_pmu_disable(struct pmu *pmu)
871 } 973 }
872 974
873 /* 975 /*
874 * Set the 'freeze counters' bit, clear PMAO/FC56. 976 * Set the 'freeze counters' bit, clear EBE/PMCC/PMAO/FC56.
875 */ 977 */
876 val = mfspr(SPRN_MMCR0); 978 val = mmcr0 = mfspr(SPRN_MMCR0);
877 val |= MMCR0_FC; 979 val |= MMCR0_FC;
878 val &= ~(MMCR0_PMAO | MMCR0_FC56); 980 val &= ~(MMCR0_EBE | MMCR0_PMCC | MMCR0_PMAO | MMCR0_FC56);
879 981
880 /* 982 /*
881 * The barrier is to make sure the mtspr has been 983 * The barrier is to make sure the mtspr has been
@@ -896,7 +998,10 @@ static void power_pmu_disable(struct pmu *pmu)
896 998
897 cpuhw->disabled = 1; 999 cpuhw->disabled = 1;
898 cpuhw->n_added = 0; 1000 cpuhw->n_added = 0;
1001
1002 ebb_switch_out(mmcr0);
899 } 1003 }
1004
900 local_irq_restore(flags); 1005 local_irq_restore(flags);
901} 1006}
902 1007
@@ -911,15 +1016,15 @@ static void power_pmu_enable(struct pmu *pmu)
911 struct cpu_hw_events *cpuhw; 1016 struct cpu_hw_events *cpuhw;
912 unsigned long flags; 1017 unsigned long flags;
913 long i; 1018 long i;
914 unsigned long val; 1019 unsigned long val, mmcr0;
915 s64 left; 1020 s64 left;
916 unsigned int hwc_index[MAX_HWEVENTS]; 1021 unsigned int hwc_index[MAX_HWEVENTS];
917 int n_lim; 1022 int n_lim;
918 int idx; 1023 int idx;
1024 bool ebb;
919 1025
920 if (!ppmu) 1026 if (!ppmu)
921 return; 1027 return;
922
923 local_irq_save(flags); 1028 local_irq_save(flags);
924 1029
925 cpuhw = &__get_cpu_var(cpu_hw_events); 1030 cpuhw = &__get_cpu_var(cpu_hw_events);
@@ -934,6 +1039,13 @@ static void power_pmu_enable(struct pmu *pmu)
934 cpuhw->disabled = 0; 1039 cpuhw->disabled = 0;
935 1040
936 /* 1041 /*
1042 * EBB requires an exclusive group and all events must have the EBB
1043 * flag set, or not set, so we can just check a single event. Also we
1044 * know we have at least one event.
1045 */
1046 ebb = is_ebb_event(cpuhw->event[0]);
1047
1048 /*
937 * If we didn't change anything, or only removed events, 1049 * If we didn't change anything, or only removed events,
938 * no need to recalculate MMCR* settings and reset the PMCs. 1050 * no need to recalculate MMCR* settings and reset the PMCs.
939 * Just reenable the PMU with the current MMCR* settings 1051 * Just reenable the PMU with the current MMCR* settings
@@ -1008,25 +1120,34 @@ static void power_pmu_enable(struct pmu *pmu)
1008 ++n_lim; 1120 ++n_lim;
1009 continue; 1121 continue;
1010 } 1122 }
1011 val = 0; 1123
1012 if (event->hw.sample_period) { 1124 if (ebb)
1013 left = local64_read(&event->hw.period_left); 1125 val = local64_read(&event->hw.prev_count);
1014 if (left < 0x80000000L) 1126 else {
1015 val = 0x80000000L - left; 1127 val = 0;
1128 if (event->hw.sample_period) {
1129 left = local64_read(&event->hw.period_left);
1130 if (left < 0x80000000L)
1131 val = 0x80000000L - left;
1132 }
1133 local64_set(&event->hw.prev_count, val);
1016 } 1134 }
1017 local64_set(&event->hw.prev_count, val); 1135
1018 event->hw.idx = idx; 1136 event->hw.idx = idx;
1019 if (event->hw.state & PERF_HES_STOPPED) 1137 if (event->hw.state & PERF_HES_STOPPED)
1020 val = 0; 1138 val = 0;
1021 write_pmc(idx, val); 1139 write_pmc(idx, val);
1140
1022 perf_event_update_userpage(event); 1141 perf_event_update_userpage(event);
1023 } 1142 }
1024 cpuhw->n_limited = n_lim; 1143 cpuhw->n_limited = n_lim;
1025 cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE; 1144 cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE;
1026 1145
1027 out_enable: 1146 out_enable:
1147 mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]);
1148
1028 mb(); 1149 mb();
1029 write_mmcr0(cpuhw, cpuhw->mmcr[0]); 1150 write_mmcr0(cpuhw, mmcr0);
1030 1151
1031 /* 1152 /*
1032 * Enable instruction sampling if necessary 1153 * Enable instruction sampling if necessary
@@ -1124,6 +1245,8 @@ static int power_pmu_add(struct perf_event *event, int ef_flags)
1124 event->hw.config = cpuhw->events[n0]; 1245 event->hw.config = cpuhw->events[n0];
1125 1246
1126nocheck: 1247nocheck:
1248 ebb_event_add(event);
1249
1127 ++cpuhw->n_events; 1250 ++cpuhw->n_events;
1128 ++cpuhw->n_added; 1251 ++cpuhw->n_added;
1129 1252
@@ -1484,6 +1607,11 @@ static int power_pmu_event_init(struct perf_event *event)
1484 } 1607 }
1485 } 1608 }
1486 1609
1610 /* Extra checks for EBB */
1611 err = ebb_event_check(event);
1612 if (err)
1613 return err;
1614
1487 /* 1615 /*
1488 * If this is in a group, check if it can go on with all the 1616 * If this is in a group, check if it can go on with all the
1489 * other hardware events in the group. We assume the event 1617 * other hardware events in the group. We assume the event
@@ -1523,6 +1651,13 @@ static int power_pmu_event_init(struct perf_event *event)
1523 local64_set(&event->hw.period_left, event->hw.last_period); 1651 local64_set(&event->hw.period_left, event->hw.last_period);
1524 1652
1525 /* 1653 /*
1654 * For EBB events we just context switch the PMC value, we don't do any
1655 * of the sample_period logic. We use hw.prev_count for this.
1656 */
1657 if (is_ebb_event(event))
1658 local64_set(&event->hw.prev_count, 0);
1659
1660 /*
1526 * See if we need to reserve the PMU. 1661 * See if we need to reserve the PMU.
1527 * If no events are currently in use, then we have to take a 1662 * If no events are currently in use, then we have to take a
1528 * mutex to ensure that we don't race with another task doing 1663 * mutex to ensure that we don't race with another task doing