aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-08-17 05:51:21 -0400
committerDavid S. Miller <davem@davemloft.net>2012-08-19 02:26:20 -0400
commit3f1a20972239e3f66720c34d9009ae9cc9ddffba (patch)
treede2212c7170098c9b25e517407218d42fcaa0648
parent7ac2ed286f9338ea6437831096cc36ce8395b6fc (diff)
sparc64: Prepare perf event layer for handling multiple PCR registers.
Make the per-cpu pcr save area an array instead of one u64. Describe how many PCR and PIC registers the chip has in the sparc_pmu descriptor. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc/kernel/perf_event.c72
1 files changed, 45 insertions, 27 deletions
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 1ab676bd13f..9be089abb5d 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -54,6 +54,7 @@
54 */ 54 */
55 55
56#define MAX_HWEVENTS 2 56#define MAX_HWEVENTS 2
57#define MAX_PCRS 1
57#define MAX_PERIOD ((1UL << 32) - 1) 58#define MAX_PERIOD ((1UL << 32) - 1)
58 59
59#define PIC_UPPER_INDEX 0 60#define PIC_UPPER_INDEX 0
@@ -89,8 +90,8 @@ struct cpu_hw_events {
89 */ 90 */
90 int current_idx[MAX_HWEVENTS]; 91 int current_idx[MAX_HWEVENTS];
91 92
92 /* Software copy of %pcr register on this cpu. */ 93 /* Software copy of %pcr register(s) on this cpu. */
93 u64 pcr; 94 u64 pcr[MAX_HWEVENTS];
94 95
95 /* Enabled/disable state. */ 96 /* Enabled/disable state. */
96 int enabled; 97 int enabled;
@@ -156,6 +157,8 @@ struct sparc_pmu {
156#define SPARC_PMU_ALL_EXCLUDES_SAME 0x00000001 157#define SPARC_PMU_ALL_EXCLUDES_SAME 0x00000001
157#define SPARC_PMU_HAS_CONFLICTS 0x00000002 158#define SPARC_PMU_HAS_CONFLICTS 0x00000002
158 int max_hw_events; 159 int max_hw_events;
160 int num_pcrs;
161 int num_pic_regs;
159}; 162};
160 163
161static u32 sparc_default_read_pmc(int idx) 164static u32 sparc_default_read_pmc(int idx)
@@ -315,6 +318,8 @@ static const struct sparc_pmu ultra3_pmu = {
315 .flags = (SPARC_PMU_ALL_EXCLUDES_SAME | 318 .flags = (SPARC_PMU_ALL_EXCLUDES_SAME |
316 SPARC_PMU_HAS_CONFLICTS), 319 SPARC_PMU_HAS_CONFLICTS),
317 .max_hw_events = 2, 320 .max_hw_events = 2,
321 .num_pcrs = 1,
322 .num_pic_regs = 1,
318}; 323};
319 324
320/* Niagara1 is very limited. The upper PIC is hard-locked to count 325/* Niagara1 is very limited. The upper PIC is hard-locked to count
@@ -451,6 +456,8 @@ static const struct sparc_pmu niagara1_pmu = {
451 .flags = (SPARC_PMU_ALL_EXCLUDES_SAME | 456 .flags = (SPARC_PMU_ALL_EXCLUDES_SAME |
452 SPARC_PMU_HAS_CONFLICTS), 457 SPARC_PMU_HAS_CONFLICTS),
453 .max_hw_events = 2, 458 .max_hw_events = 2,
459 .num_pcrs = 1,
460 .num_pic_regs = 1,
454}; 461};
455 462
456static const struct perf_event_map niagara2_perfmon_event_map[] = { 463static const struct perf_event_map niagara2_perfmon_event_map[] = {
@@ -586,6 +593,8 @@ static const struct sparc_pmu niagara2_pmu = {
586 .flags = (SPARC_PMU_ALL_EXCLUDES_SAME | 593 .flags = (SPARC_PMU_ALL_EXCLUDES_SAME |
587 SPARC_PMU_HAS_CONFLICTS), 594 SPARC_PMU_HAS_CONFLICTS),
588 .max_hw_events = 2, 595 .max_hw_events = 2,
596 .num_pcrs = 1,
597 .num_pic_regs = 1,
589}; 598};
590 599
591static const struct sparc_pmu *sparc_pmu __read_mostly; 600static const struct sparc_pmu *sparc_pmu __read_mostly;
@@ -615,12 +624,12 @@ static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_
615{ 624{
616 u64 val, mask = mask_for_index(idx); 625 u64 val, mask = mask_for_index(idx);
617 626
618 val = cpuc->pcr; 627 val = cpuc->pcr[0];
619 val &= ~mask; 628 val &= ~mask;
620 val |= hwc->config; 629 val |= hwc->config;
621 cpuc->pcr = val; 630 cpuc->pcr[0] = val;
622 631
623 pcr_ops->write_pcr(0, cpuc->pcr); 632 pcr_ops->write_pcr(0, cpuc->pcr[0]);
624} 633}
625 634
626static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx) 635static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
@@ -629,12 +638,12 @@ static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw
629 u64 nop = nop_for_index(idx); 638 u64 nop = nop_for_index(idx);
630 u64 val; 639 u64 val;
631 640
632 val = cpuc->pcr; 641 val = cpuc->pcr[0];
633 val &= ~mask; 642 val &= ~mask;
634 val |= nop; 643 val |= nop;
635 cpuc->pcr = val; 644 cpuc->pcr[0] = val;
636 645
637 pcr_ops->write_pcr(0, cpuc->pcr); 646 pcr_ops->write_pcr(0, cpuc->pcr[0]);
638} 647}
639 648
640static u64 sparc_perf_event_update(struct perf_event *event, 649static u64 sparc_perf_event_update(struct perf_event *event,
@@ -751,7 +760,7 @@ static void sparc_pmu_enable(struct pmu *pmu)
751 cpuc->enabled = 1; 760 cpuc->enabled = 1;
752 barrier(); 761 barrier();
753 762
754 pcr = cpuc->pcr; 763 pcr = cpuc->pcr[0];
755 if (!cpuc->n_events) { 764 if (!cpuc->n_events) {
756 pcr = 0; 765 pcr = 0;
757 } else { 766 } else {
@@ -761,16 +770,16 @@ static void sparc_pmu_enable(struct pmu *pmu)
761 * configuration, so just fetch the settings from the 770 * configuration, so just fetch the settings from the
762 * first entry. 771 * first entry.
763 */ 772 */
764 cpuc->pcr = pcr | cpuc->event[0]->hw.config_base; 773 cpuc->pcr[0] = pcr | cpuc->event[0]->hw.config_base;
765 } 774 }
766 775
767 pcr_ops->write_pcr(0, cpuc->pcr); 776 pcr_ops->write_pcr(0, cpuc->pcr[0]);
768} 777}
769 778
770static void sparc_pmu_disable(struct pmu *pmu) 779static void sparc_pmu_disable(struct pmu *pmu)
771{ 780{
772 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 781 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
773 u64 val; 782 int i;
774 783
775 if (!cpuc->enabled) 784 if (!cpuc->enabled)
776 return; 785 return;
@@ -778,12 +787,14 @@ static void sparc_pmu_disable(struct pmu *pmu)
778 cpuc->enabled = 0; 787 cpuc->enabled = 0;
779 cpuc->n_added = 0; 788 cpuc->n_added = 0;
780 789
781 val = cpuc->pcr; 790 for (i = 0; i < sparc_pmu->num_pcrs; i++) {
782 val &= ~(sparc_pmu->user_bit | sparc_pmu->priv_bit | 791 u64 val = cpuc->pcr[i];
783 sparc_pmu->hv_bit | sparc_pmu->irq_bit);
784 cpuc->pcr = val;
785 792
786 pcr_ops->write_pcr(0, cpuc->pcr); 793 val &= ~(sparc_pmu->user_bit | sparc_pmu->priv_bit |
794 sparc_pmu->hv_bit | sparc_pmu->irq_bit);
795 cpuc->pcr[i] = val;
796 pcr_ops->write_pcr(i, cpuc->pcr[i]);
797 }
787} 798}
788 799
789static int active_event_index(struct cpu_hw_events *cpuc, 800static int active_event_index(struct cpu_hw_events *cpuc,
@@ -882,9 +893,11 @@ static DEFINE_MUTEX(pmc_grab_mutex);
882static void perf_stop_nmi_watchdog(void *unused) 893static void perf_stop_nmi_watchdog(void *unused)
883{ 894{
884 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 895 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
896 int i;
885 897
886 stop_nmi_watchdog(NULL); 898 stop_nmi_watchdog(NULL);
887 cpuc->pcr = pcr_ops->read_pcr(0); 899 for (i = 0; i < sparc_pmu->num_pcrs; i++)
900 cpuc->pcr[i] = pcr_ops->read_pcr(i);
888} 901}
889 902
890void perf_event_grab_pmc(void) 903void perf_event_grab_pmc(void)
@@ -1293,8 +1306,7 @@ static struct pmu pmu = {
1293void perf_event_print_debug(void) 1306void perf_event_print_debug(void)
1294{ 1307{
1295 unsigned long flags; 1308 unsigned long flags;
1296 u64 pcr, pic; 1309 int cpu, i;
1297 int cpu;
1298 1310
1299 if (!sparc_pmu) 1311 if (!sparc_pmu)
1300 return; 1312 return;
@@ -1303,12 +1315,13 @@ void perf_event_print_debug(void)
1303 1315
1304 cpu = smp_processor_id(); 1316 cpu = smp_processor_id();
1305 1317
1306 pcr = pcr_ops->read_pcr(0);
1307 pic = pcr_ops->read_pic(0);
1308
1309 pr_info("\n"); 1318 pr_info("\n");
1310 pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n", 1319 for (i = 0; i < sparc_pmu->num_pcrs; i++)
1311 cpu, pcr, pic); 1320 pr_info("CPU#%d: PCR%d[%016llx]\n",
1321 cpu, i, pcr_ops->read_pcr(i));
1322 for (i = 0; i < sparc_pmu->num_pic_regs; i++)
1323 pr_info("CPU#%d: PIC%d[%016llx]\n",
1324 cpu, i, pcr_ops->read_pic(i));
1312 1325
1313 local_irq_restore(flags); 1326 local_irq_restore(flags);
1314} 1327}
@@ -1344,8 +1357,9 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
1344 * Do this before we peek at the counters to determine 1357 * Do this before we peek at the counters to determine
1345 * overflow so we don't lose any events. 1358 * overflow so we don't lose any events.
1346 */ 1359 */
1347 if (sparc_pmu->irq_bit) 1360 if (sparc_pmu->irq_bit &&
1348 pcr_ops->write_pcr(0, cpuc->pcr); 1361 sparc_pmu->num_pcrs == 1)
1362 pcr_ops->write_pcr(0, cpuc->pcr[0]);
1349 1363
1350 for (i = 0; i < cpuc->n_events; i++) { 1364 for (i = 0; i < cpuc->n_events; i++) {
1351 struct perf_event *event = cpuc->event[i]; 1365 struct perf_event *event = cpuc->event[i];
@@ -1353,6 +1367,10 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
1353 struct hw_perf_event *hwc; 1367 struct hw_perf_event *hwc;
1354 u64 val; 1368 u64 val;
1355 1369
1370 if (sparc_pmu->irq_bit &&
1371 sparc_pmu->num_pcrs > 1)
1372 pcr_ops->write_pcr(idx, cpuc->pcr[idx]);
1373
1356 hwc = &event->hw; 1374 hwc = &event->hw;
1357 val = sparc_perf_event_update(event, hwc, idx); 1375 val = sparc_perf_event_update(event, hwc, idx);
1358 if (val & (1ULL << 31)) 1376 if (val & (1ULL << 31))