aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2010-02-25 19:09:45 -0500
committerKumar Gala <galak@kernel.crashing.org>2010-03-05 04:04:08 -0500
commita11106544f33c104706ae42d27219a409b67478e (patch)
tree0ffb268e41d70951415cb3cc9526e6bc673229b1 /arch/powerpc
parent9d6df3fdfc470c313b7ea13dbe8a63fb21ab7365 (diff)
powerpc/perf: e500 support
This implements perf_event support for the Freescale embedded performance monitor, based on the existing perf_event.c that supports server/classic chips. Some limitations: - Performance monitor interrupts are regular EE interrupts, and thus you can't profile places with interrupts disabled. We may want to implement soft IRQ-disabling, with perfmon interrupts exempted and treated as NMIs. - When trying to schedule multiple event groups at once, and using restricted events, situations could arise where scheduling fails even though it would be possible. Consider three groups, each with two events. One group has restricted events, the others don't. The two non-restricted groups are scheduled, then one is removed, which happens to occupy the two counters that can't do restricted events. The remaining non-restricted group will not be moved to the non-restricted-capable counters to make room if the restricted group tries to be scheduled. Signed-off-by: Scott Wood <scottwood@freescale.com> Acked-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/perf_event.h109
-rw-r--r--arch/powerpc/include/asm/perf_event_fsl_emb.h50
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h110
-rw-r--r--arch/powerpc/include/asm/reg_fsl_emb.h2
-rw-r--r--arch/powerpc/kernel/Makefile4
-rw-r--r--arch/powerpc/kernel/cputable.c2
-rw-r--r--arch/powerpc/kernel/e500-pmu.c129
-rw-r--r--arch/powerpc/kernel/perf_event_fsl_emb.c654
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype10
9 files changed, 970 insertions, 100 deletions
diff --git a/arch/powerpc/include/asm/perf_event.h b/arch/powerpc/include/asm/perf_event.h
index 3288ce3997e0..e6d4ce69b126 100644
--- a/arch/powerpc/include/asm/perf_event.h
+++ b/arch/powerpc/include/asm/perf_event.h
@@ -1,110 +1,23 @@
1/* 1/*
2 * Performance event support - PowerPC-specific definitions. 2 * Performance event support - hardware-specific disambiguation
3 * 3 *
4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation. 4 * For now this is a compile-time decision, but eventually it should be
5 * runtime. This would allow multiplatform perf event support for e300 (fsl
6 * embedded perf counters) plus server/classic, and would accommodate
7 * devices other than the core which provide their own performance counters.
8 *
9 * Copyright 2010 Freescale Semiconductor, Inc.
5 * 10 *
6 * This program is free software; you can redistribute it and/or 11 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 12 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 13 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version. 14 * 2 of the License, or (at your option) any later version.
10 */ 15 */
11#include <linux/types.h>
12
13#include <asm/hw_irq.h>
14
15#define MAX_HWEVENTS 8
16#define MAX_EVENT_ALTERNATIVES 8
17#define MAX_LIMITED_HWCOUNTERS 2
18
19/*
20 * This struct provides the constants and functions needed to
21 * describe the PMU on a particular POWER-family CPU.
22 */
23struct power_pmu {
24 const char *name;
25 int n_counter;
26 int max_alternatives;
27 unsigned long add_fields;
28 unsigned long test_adder;
29 int (*compute_mmcr)(u64 events[], int n_ev,
30 unsigned int hwc[], unsigned long mmcr[]);
31 int (*get_constraint)(u64 event_id, unsigned long *mskp,
32 unsigned long *valp);
33 int (*get_alternatives)(u64 event_id, unsigned int flags,
34 u64 alt[]);
35 void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]);
36 int (*limited_pmc_event)(u64 event_id);
37 u32 flags;
38 int n_generic;
39 int *generic_events;
40 int (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
41 [PERF_COUNT_HW_CACHE_OP_MAX]
42 [PERF_COUNT_HW_CACHE_RESULT_MAX];
43};
44
45/*
46 * Values for power_pmu.flags
47 */
48#define PPMU_LIMITED_PMC5_6 1 /* PMC5/6 have limited function */
49#define PPMU_ALT_SIPR 2 /* uses alternate posn for SIPR/HV */
50
51/*
52 * Values for flags to get_alternatives()
53 */
54#define PPMU_LIMITED_PMC_OK 1 /* can put this on a limited PMC */
55#define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */
56#define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */
57
58extern int register_power_pmu(struct power_pmu *);
59 16
60struct pt_regs;
61extern unsigned long perf_misc_flags(struct pt_regs *regs);
62extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
63
64#define PERF_EVENT_INDEX_OFFSET 1
65
66/*
67 * Only override the default definitions in include/linux/perf_event.h
68 * if we have hardware PMU support.
69 */
70#ifdef CONFIG_PPC_PERF_CTRS 17#ifdef CONFIG_PPC_PERF_CTRS
71#define perf_misc_flags(regs) perf_misc_flags(regs) 18#include <asm/perf_event_server.h>
72#endif 19#endif
73 20
74/* 21#ifdef CONFIG_FSL_EMB_PERF_EVENT
75 * The power_pmu.get_constraint function returns a 32/64-bit value and 22#include <asm/perf_event_fsl_emb.h>
76 * a 32/64-bit mask that express the constraints between this event_id and 23#endif
77 * other events.
78 *
79 * The value and mask are divided up into (non-overlapping) bitfields
80 * of three different types:
81 *
82 * Select field: this expresses the constraint that some set of bits
83 * in MMCR* needs to be set to a specific value for this event_id. For a
84 * select field, the mask contains 1s in every bit of the field, and
85 * the value contains a unique value for each possible setting of the
86 * MMCR* bits. The constraint checking code will ensure that two events
87 * that set the same field in their masks have the same value in their
88 * value dwords.
89 *
90 * Add field: this expresses the constraint that there can be at most
91 * N events in a particular class. A field of k bits can be used for
92 * N <= 2^(k-1) - 1. The mask has the most significant bit of the field
93 * set (and the other bits 0), and the value has only the least significant
94 * bit of the field set. In addition, the 'add_fields' and 'test_adder'
95 * in the struct power_pmu for this processor come into play. The
96 * add_fields value contains 1 in the LSB of the field, and the
97 * test_adder contains 2^(k-1) - 1 - N in the field.
98 *
99 * NAND field: this expresses the constraint that you may not have events
100 * in all of a set of classes. (For example, on PPC970, you can't select
101 * events from the FPU, ISU and IDU simultaneously, although any two are
102 * possible.) For N classes, the field is N+1 bits wide, and each class
103 * is assigned one bit from the least-significant N bits. The mask has
104 * only the most-significant bit set, and the value has only the bit
105 * for the event_id's class set. The test_adder has the least significant
106 * bit set in the field.
107 *
108 * If an event_id is not subject to the constraint expressed by a particular
109 * field, then it will have 0 in both the mask and value for that field.
110 */
diff --git a/arch/powerpc/include/asm/perf_event_fsl_emb.h b/arch/powerpc/include/asm/perf_event_fsl_emb.h
new file mode 100644
index 000000000000..718a9fa94e68
--- /dev/null
+++ b/arch/powerpc/include/asm/perf_event_fsl_emb.h
@@ -0,0 +1,50 @@
1/*
2 * Performance event support - Freescale embedded specific definitions.
3 *
4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
5 * Copyright 2010 Freescale Semiconductor, Inc.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/types.h>
14#include <asm/hw_irq.h>
15
16#define MAX_HWEVENTS 4
17
18/* event flags */
19#define FSL_EMB_EVENT_VALID 1
20#define FSL_EMB_EVENT_RESTRICTED 2
21
22/* upper half of event flags is PMLCb */
23#define FSL_EMB_EVENT_THRESHMUL 0x0000070000000000ULL
24#define FSL_EMB_EVENT_THRESH 0x0000003f00000000ULL
25
26struct fsl_emb_pmu {
27 const char *name;
28 int n_counter; /* total number of counters */
29
30 /*
31 * The number of contiguous counters starting at zero that
32 * can hold restricted events, or zero if there are no
33 * restricted events.
34 *
35 * This isn't a very flexible method of expressing constraints,
36 * but it's very simple and is adequate for existing chips.
37 */
38 int n_restricted;
39
40 /* Returns event flags and PMLCb (FSL_EMB_EVENT_*) */
41 u64 (*xlate_event)(u64 event_id);
42
43 int n_generic;
44 int *generic_events;
45 int (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
46 [PERF_COUNT_HW_CACHE_OP_MAX]
47 [PERF_COUNT_HW_CACHE_RESULT_MAX];
48};
49
50int register_fsl_emb_pmu(struct fsl_emb_pmu *);
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
new file mode 100644
index 000000000000..8f1df1208d23
--- /dev/null
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -0,0 +1,110 @@
1/*
2 * Performance event support - PowerPC classic/server specific definitions.
3 *
4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/types.h>
13#include <asm/hw_irq.h>
14
15#define MAX_HWEVENTS 8
16#define MAX_EVENT_ALTERNATIVES 8
17#define MAX_LIMITED_HWCOUNTERS 2
18
19/*
20 * This struct provides the constants and functions needed to
21 * describe the PMU on a particular POWER-family CPU.
22 */
23struct power_pmu {
24 const char *name;
25 int n_counter;
26 int max_alternatives;
27 unsigned long add_fields;
28 unsigned long test_adder;
29 int (*compute_mmcr)(u64 events[], int n_ev,
30 unsigned int hwc[], unsigned long mmcr[]);
31 int (*get_constraint)(u64 event_id, unsigned long *mskp,
32 unsigned long *valp);
33 int (*get_alternatives)(u64 event_id, unsigned int flags,
34 u64 alt[]);
35 void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]);
36 int (*limited_pmc_event)(u64 event_id);
37 u32 flags;
38 int n_generic;
39 int *generic_events;
40 int (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
41 [PERF_COUNT_HW_CACHE_OP_MAX]
42 [PERF_COUNT_HW_CACHE_RESULT_MAX];
43};
44
45/*
46 * Values for power_pmu.flags
47 */
48#define PPMU_LIMITED_PMC5_6 1 /* PMC5/6 have limited function */
49#define PPMU_ALT_SIPR 2 /* uses alternate posn for SIPR/HV */
50
51/*
52 * Values for flags to get_alternatives()
53 */
54#define PPMU_LIMITED_PMC_OK 1 /* can put this on a limited PMC */
55#define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */
56#define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */
57
58extern int register_power_pmu(struct power_pmu *);
59
60struct pt_regs;
61extern unsigned long perf_misc_flags(struct pt_regs *regs);
62extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
63
64#define PERF_EVENT_INDEX_OFFSET 1
65
66/*
67 * Only override the default definitions in include/linux/perf_event.h
68 * if we have hardware PMU support.
69 */
70#ifdef CONFIG_PPC_PERF_CTRS
71#define perf_misc_flags(regs) perf_misc_flags(regs)
72#endif
73
74/*
75 * The power_pmu.get_constraint function returns a 32/64-bit value and
76 * a 32/64-bit mask that express the constraints between this event_id and
77 * other events.
78 *
79 * The value and mask are divided up into (non-overlapping) bitfields
80 * of three different types:
81 *
82 * Select field: this expresses the constraint that some set of bits
83 * in MMCR* needs to be set to a specific value for this event_id. For a
84 * select field, the mask contains 1s in every bit of the field, and
85 * the value contains a unique value for each possible setting of the
86 * MMCR* bits. The constraint checking code will ensure that two events
87 * that set the same field in their masks have the same value in their
88 * value dwords.
89 *
90 * Add field: this expresses the constraint that there can be at most
91 * N events in a particular class. A field of k bits can be used for
92 * N <= 2^(k-1) - 1. The mask has the most significant bit of the field
93 * set (and the other bits 0), and the value has only the least significant
94 * bit of the field set. In addition, the 'add_fields' and 'test_adder'
95 * in the struct power_pmu for this processor come into play. The
96 * add_fields value contains 1 in the LSB of the field, and the
97 * test_adder contains 2^(k-1) - 1 - N in the field.
98 *
99 * NAND field: this expresses the constraint that you may not have events
100 * in all of a set of classes. (For example, on PPC970, you can't select
101 * events from the FPU, ISU and IDU simultaneously, although any two are
102 * possible.) For N classes, the field is N+1 bits wide, and each class
103 * is assigned one bit from the least-significant N bits. The mask has
104 * only the most-significant bit set, and the value has only the bit
105 * for the event_id's class set. The test_adder has the least significant
106 * bit set in the field.
107 *
108 * If an event_id is not subject to the constraint expressed by a particular
109 * field, then it will have 0 in both the mask and value for that field.
110 */
diff --git a/arch/powerpc/include/asm/reg_fsl_emb.h b/arch/powerpc/include/asm/reg_fsl_emb.h
index 0de404dfee8b..77bb71cfd991 100644
--- a/arch/powerpc/include/asm/reg_fsl_emb.h
+++ b/arch/powerpc/include/asm/reg_fsl_emb.h
@@ -31,7 +31,7 @@
31#define PMLCA_FCM0 0x08000000 /* Freeze when PMM==0 */ 31#define PMLCA_FCM0 0x08000000 /* Freeze when PMM==0 */
32#define PMLCA_CE 0x04000000 /* Condition Enable */ 32#define PMLCA_CE 0x04000000 /* Condition Enable */
33 33
34#define PMLCA_EVENT_MASK 0x007f0000 /* Event field */ 34#define PMLCA_EVENT_MASK 0x00ff0000 /* Event field */
35#define PMLCA_EVENT_SHIFT 16 35#define PMLCA_EVENT_SHIFT 16
36 36
37#define PMRN_PMLCB0 0x110 /* PM Local Control B0 */ 37#define PMRN_PMLCB0 0x110 /* PM Local Control B0 */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 93fd1629f10e..877326320e74 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -99,11 +99,15 @@ obj64-$(CONFIG_AUDIT) += compat_audit.o
99obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 99obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
100obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 100obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
101obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o 101obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o
102
102obj-$(CONFIG_PPC_PERF_CTRS) += perf_event.o 103obj-$(CONFIG_PPC_PERF_CTRS) += perf_event.o
103obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \ 104obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \
104 power5+-pmu.o power6-pmu.o power7-pmu.o 105 power5+-pmu.o power6-pmu.o power7-pmu.o
105obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o 106obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o
106 107
108obj-$(CONFIG_FSL_EMB_PERF_EVENT) += perf_event_fsl_emb.o
109obj-$(CONFIG_FSL_EMB_PERF_EVENT_E500) += e500-pmu.o
110
107obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o 111obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o
108 112
109ifneq ($(CONFIG_PPC_INDIRECT_IO),y) 113ifneq ($(CONFIG_PPC_INDIRECT_IO),y)
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 2fc82bac3bbc..8af4949434b2 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1808,7 +1808,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1808 .icache_bsize = 64, 1808 .icache_bsize = 64,
1809 .dcache_bsize = 64, 1809 .dcache_bsize = 64,
1810 .num_pmcs = 4, 1810 .num_pmcs = 4,
1811 .oprofile_cpu_type = "ppc/e500", /* xxx - galak, e500mc? */ 1811 .oprofile_cpu_type = "ppc/e500mc",
1812 .oprofile_type = PPC_OPROFILE_FSL_EMB, 1812 .oprofile_type = PPC_OPROFILE_FSL_EMB,
1813 .cpu_setup = __setup_cpu_e500mc, 1813 .cpu_setup = __setup_cpu_e500mc,
1814 .machine_check = machine_check_e500, 1814 .machine_check = machine_check_e500,
diff --git a/arch/powerpc/kernel/e500-pmu.c b/arch/powerpc/kernel/e500-pmu.c
new file mode 100644
index 000000000000..7c07de0d8943
--- /dev/null
+++ b/arch/powerpc/kernel/e500-pmu.c
@@ -0,0 +1,129 @@
1/*
2 * Performance counter support for e500 family processors.
3 *
4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
5 * Copyright 2010 Freescale Semiconductor, Inc.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12#include <linux/string.h>
13#include <linux/perf_event.h>
14#include <asm/reg.h>
15#include <asm/cputable.h>
16
17/*
18 * Map of generic hardware event types to hardware events
19 * Zero if unsupported
20 */
21static int e500_generic_events[] = {
22 [PERF_COUNT_HW_CPU_CYCLES] = 1,
23 [PERF_COUNT_HW_INSTRUCTIONS] = 2,
24 [PERF_COUNT_HW_CACHE_MISSES] = 41, /* Data L1 cache reloads */
25 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 12,
26 [PERF_COUNT_HW_BRANCH_MISSES] = 15,
27};
28
29#define C(x) PERF_COUNT_HW_CACHE_##x
30
31/*
32 * Table of generalized cache-related events.
33 * 0 means not supported, -1 means nonsensical, other values
34 * are event codes.
35 */
36static int e500_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
37 /*
38 * D-cache misses are not split into read/write/prefetch;
39 * use raw event 41.
40 */
41 [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */
42 [C(OP_READ)] = { 27, 0 },
43 [C(OP_WRITE)] = { 28, 0 },
44 [C(OP_PREFETCH)] = { 29, 0 },
45 },
46 [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */
47 [C(OP_READ)] = { 2, 60 },
48 [C(OP_WRITE)] = { -1, -1 },
49 [C(OP_PREFETCH)] = { 0, 0 },
50 },
51 /*
52 * Assuming LL means L2, it's not a good match for this model.
53 * It allocates only on L1 castout or explicit prefetch, and
54 * does not have separate read/write events (but it does have
55 * separate instruction/data events).
56 */
57 [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */
58 [C(OP_READ)] = { 0, 0 },
59 [C(OP_WRITE)] = { 0, 0 },
60 [C(OP_PREFETCH)] = { 0, 0 },
61 },
62 /*
63 * There are data/instruction MMU misses, but that's a miss on
64 * the chip's internal level-one TLB which is probably not
65 * what the user wants. Instead, unified level-two TLB misses
66 * are reported here.
67 */
68 [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */
69 [C(OP_READ)] = { 26, 66 },
70 [C(OP_WRITE)] = { -1, -1 },
71 [C(OP_PREFETCH)] = { -1, -1 },
72 },
73 [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */
74 [C(OP_READ)] = { 12, 15 },
75 [C(OP_WRITE)] = { -1, -1 },
76 [C(OP_PREFETCH)] = { -1, -1 },
77 },
78};
79
80static int num_events = 128;
81
82/* Upper half of event id is PMLCb, for threshold events */
83static u64 e500_xlate_event(u64 event_id)
84{
85 u32 event_low = (u32)event_id;
86 u64 ret;
87
88 if (event_low >= num_events)
89 return 0;
90
91 ret = FSL_EMB_EVENT_VALID;
92
93 if (event_low >= 76 && event_low <= 81) {
94 ret |= FSL_EMB_EVENT_RESTRICTED;
95 ret |= event_id &
96 (FSL_EMB_EVENT_THRESHMUL | FSL_EMB_EVENT_THRESH);
97 } else if (event_id &
98 (FSL_EMB_EVENT_THRESHMUL | FSL_EMB_EVENT_THRESH)) {
99 /* Threshold requested on non-threshold event */
100 return 0;
101 }
102
103 return ret;
104}
105
106static struct fsl_emb_pmu e500_pmu = {
107 .name = "e500 family",
108 .n_counter = 4,
109 .n_restricted = 2,
110 .xlate_event = e500_xlate_event,
111 .n_generic = ARRAY_SIZE(e500_generic_events),
112 .generic_events = e500_generic_events,
113 .cache_events = &e500_cache_events,
114};
115
116static int init_e500_pmu(void)
117{
118 if (!cur_cpu_spec->oprofile_cpu_type)
119 return -ENODEV;
120
121 if (!strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc/e500mc"))
122 num_events = 256;
123 else if (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc/e500"))
124 return -ENODEV;
125
126 return register_fsl_emb_pmu(&e500_pmu);
127}
128
129arch_initcall(init_e500_pmu);
diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c
new file mode 100644
index 000000000000..369872f6cf78
--- /dev/null
+++ b/arch/powerpc/kernel/perf_event_fsl_emb.c
@@ -0,0 +1,654 @@
1/*
2 * Performance event support - Freescale Embedded Performance Monitor
3 *
4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
5 * Copyright 2010 Freescale Semiconductor, Inc.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12#include <linux/kernel.h>
13#include <linux/sched.h>
14#include <linux/perf_event.h>
15#include <linux/percpu.h>
16#include <linux/hardirq.h>
17#include <asm/reg_fsl_emb.h>
18#include <asm/pmc.h>
19#include <asm/machdep.h>
20#include <asm/firmware.h>
21#include <asm/ptrace.h>
22
23struct cpu_hw_events {
24 int n_events;
25 int disabled;
26 u8 pmcs_enabled;
27 struct perf_event *event[MAX_HWEVENTS];
28};
29static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
30
31static struct fsl_emb_pmu *ppmu;
32
33/* Number of perf_events counting hardware events */
34static atomic_t num_events;
35/* Used to avoid races in calling reserve/release_pmc_hardware */
36static DEFINE_MUTEX(pmc_reserve_mutex);
37
38/*
39 * If interrupts were soft-disabled when a PMU interrupt occurs, treat
40 * it as an NMI.
41 */
42static inline int perf_intr_is_nmi(struct pt_regs *regs)
43{
44#ifdef __powerpc64__
45 return !regs->softe;
46#else
47 return 0;
48#endif
49}
50
51static void perf_event_interrupt(struct pt_regs *regs);
52
53/*
54 * Read one performance monitor counter (PMC).
55 */
56static unsigned long read_pmc(int idx)
57{
58 unsigned long val;
59
60 switch (idx) {
61 case 0:
62 val = mfpmr(PMRN_PMC0);
63 break;
64 case 1:
65 val = mfpmr(PMRN_PMC1);
66 break;
67 case 2:
68 val = mfpmr(PMRN_PMC2);
69 break;
70 case 3:
71 val = mfpmr(PMRN_PMC3);
72 break;
73 default:
74 printk(KERN_ERR "oops trying to read PMC%d\n", idx);
75 val = 0;
76 }
77 return val;
78}
79
80/*
81 * Write one PMC.
82 */
83static void write_pmc(int idx, unsigned long val)
84{
85 switch (idx) {
86 case 0:
87 mtpmr(PMRN_PMC0, val);
88 break;
89 case 1:
90 mtpmr(PMRN_PMC1, val);
91 break;
92 case 2:
93 mtpmr(PMRN_PMC2, val);
94 break;
95 case 3:
96 mtpmr(PMRN_PMC3, val);
97 break;
98 default:
99 printk(KERN_ERR "oops trying to write PMC%d\n", idx);
100 }
101
102 isync();
103}
104
105/*
106 * Write one local control A register
107 */
108static void write_pmlca(int idx, unsigned long val)
109{
110 switch (idx) {
111 case 0:
112 mtpmr(PMRN_PMLCA0, val);
113 break;
114 case 1:
115 mtpmr(PMRN_PMLCA1, val);
116 break;
117 case 2:
118 mtpmr(PMRN_PMLCA2, val);
119 break;
120 case 3:
121 mtpmr(PMRN_PMLCA3, val);
122 break;
123 default:
124 printk(KERN_ERR "oops trying to write PMLCA%d\n", idx);
125 }
126
127 isync();
128}
129
130/*
131 * Write one local control B register
132 */
133static void write_pmlcb(int idx, unsigned long val)
134{
135 switch (idx) {
136 case 0:
137 mtpmr(PMRN_PMLCB0, val);
138 break;
139 case 1:
140 mtpmr(PMRN_PMLCB1, val);
141 break;
142 case 2:
143 mtpmr(PMRN_PMLCB2, val);
144 break;
145 case 3:
146 mtpmr(PMRN_PMLCB3, val);
147 break;
148 default:
149 printk(KERN_ERR "oops trying to write PMLCB%d\n", idx);
150 }
151
152 isync();
153}
154
155static void fsl_emb_pmu_read(struct perf_event *event)
156{
157 s64 val, delta, prev;
158
159 /*
160 * Performance monitor interrupts come even when interrupts
161 * are soft-disabled, as long as interrupts are hard-enabled.
162 * Therefore we treat them like NMIs.
163 */
164 do {
165 prev = atomic64_read(&event->hw.prev_count);
166 barrier();
167 val = read_pmc(event->hw.idx);
168 } while (atomic64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
169
170 /* The counters are only 32 bits wide */
171 delta = (val - prev) & 0xfffffffful;
172 atomic64_add(delta, &event->count);
173 atomic64_sub(delta, &event->hw.period_left);
174}
175
176/*
177 * Disable all events to prevent PMU interrupts and to allow
178 * events to be added or removed.
179 */
180void hw_perf_disable(void)
181{
182 struct cpu_hw_events *cpuhw;
183 unsigned long flags;
184
185 local_irq_save(flags);
186 cpuhw = &__get_cpu_var(cpu_hw_events);
187
188 if (!cpuhw->disabled) {
189 cpuhw->disabled = 1;
190
191 /*
192 * Check if we ever enabled the PMU on this cpu.
193 */
194 if (!cpuhw->pmcs_enabled) {
195 ppc_enable_pmcs();
196 cpuhw->pmcs_enabled = 1;
197 }
198
199 if (atomic_read(&num_events)) {
200 /*
201 * Set the 'freeze all counters' bit, and disable
202 * interrupts. The barrier is to make sure the
203 * mtpmr has been executed and the PMU has frozen
204 * the events before we return.
205 */
206
207 mtpmr(PMRN_PMGC0, PMGC0_FAC);
208 isync();
209 }
210 }
211 local_irq_restore(flags);
212}
213
214/*
215 * Re-enable all events if disable == 0.
216 * If we were previously disabled and events were added, then
217 * put the new config on the PMU.
218 */
219void hw_perf_enable(void)
220{
221 struct cpu_hw_events *cpuhw;
222 unsigned long flags;
223
224 local_irq_save(flags);
225 cpuhw = &__get_cpu_var(cpu_hw_events);
226 if (!cpuhw->disabled)
227 goto out;
228
229 cpuhw->disabled = 0;
230 ppc_set_pmu_inuse(cpuhw->n_events != 0);
231
232 if (cpuhw->n_events > 0) {
233 mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE);
234 isync();
235 }
236
237 out:
238 local_irq_restore(flags);
239}
240
241static int collect_events(struct perf_event *group, int max_count,
242 struct perf_event *ctrs[])
243{
244 int n = 0;
245 struct perf_event *event;
246
247 if (!is_software_event(group)) {
248 if (n >= max_count)
249 return -1;
250 ctrs[n] = group;
251 n++;
252 }
253 list_for_each_entry(event, &group->sibling_list, group_entry) {
254 if (!is_software_event(event) &&
255 event->state != PERF_EVENT_STATE_OFF) {
256 if (n >= max_count)
257 return -1;
258 ctrs[n] = event;
259 n++;
260 }
261 }
262 return n;
263}
264
265/* perf must be disabled, context locked on entry */
266static int fsl_emb_pmu_enable(struct perf_event *event)
267{
268 struct cpu_hw_events *cpuhw;
269 int ret = -EAGAIN;
270 int num_counters = ppmu->n_counter;
271 u64 val;
272 int i;
273
274 cpuhw = &get_cpu_var(cpu_hw_events);
275
276 if (event->hw.config & FSL_EMB_EVENT_RESTRICTED)
277 num_counters = ppmu->n_restricted;
278
279 /*
280 * Allocate counters from top-down, so that restricted-capable
281 * counters are kept free as long as possible.
282 */
283 for (i = num_counters - 1; i >= 0; i--) {
284 if (cpuhw->event[i])
285 continue;
286
287 break;
288 }
289
290 if (i < 0)
291 goto out;
292
293 event->hw.idx = i;
294 cpuhw->event[i] = event;
295 ++cpuhw->n_events;
296
297 val = 0;
298 if (event->hw.sample_period) {
299 s64 left = atomic64_read(&event->hw.period_left);
300 if (left < 0x80000000L)
301 val = 0x80000000L - left;
302 }
303 atomic64_set(&event->hw.prev_count, val);
304 write_pmc(i, val);
305 perf_event_update_userpage(event);
306
307 write_pmlcb(i, event->hw.config >> 32);
308 write_pmlca(i, event->hw.config_base);
309
310 ret = 0;
311 out:
312 put_cpu_var(cpu_hw_events);
313 return ret;
314}
315
316/* perf must be disabled, context locked on entry */
317static void fsl_emb_pmu_disable(struct perf_event *event)
318{
319 struct cpu_hw_events *cpuhw;
320 int i = event->hw.idx;
321
322 if (i < 0)
323 goto out;
324
325 fsl_emb_pmu_read(event);
326
327 cpuhw = &get_cpu_var(cpu_hw_events);
328
329 WARN_ON(event != cpuhw->event[event->hw.idx]);
330
331 write_pmlca(i, 0);
332 write_pmlcb(i, 0);
333 write_pmc(i, 0);
334
335 cpuhw->event[i] = NULL;
336 event->hw.idx = -1;
337
338 /*
339 * TODO: if at least one restricted event exists, and we
340 * just freed up a non-restricted-capable counter, and
341 * there is a restricted-capable counter occupied by
342 * a non-restricted event, migrate that event to the
343 * vacated counter.
344 */
345
346 cpuhw->n_events--;
347
348 out:
349 put_cpu_var(cpu_hw_events);
350}
351
352/*
353 * Re-enable interrupts on a event after they were throttled
354 * because they were coming too fast.
355 *
356 * Context is locked on entry, but perf is not disabled.
357 */
358static void fsl_emb_pmu_unthrottle(struct perf_event *event)
359{
360 s64 val, left;
361 unsigned long flags;
362
363 if (event->hw.idx < 0 || !event->hw.sample_period)
364 return;
365 local_irq_save(flags);
366 perf_disable();
367 fsl_emb_pmu_read(event);
368 left = event->hw.sample_period;
369 event->hw.last_period = left;
370 val = 0;
371 if (left < 0x80000000L)
372 val = 0x80000000L - left;
373 write_pmc(event->hw.idx, val);
374 atomic64_set(&event->hw.prev_count, val);
375 atomic64_set(&event->hw.period_left, left);
376 perf_event_update_userpage(event);
377 perf_enable();
378 local_irq_restore(flags);
379}
380
381static struct pmu fsl_emb_pmu = {
382 .enable = fsl_emb_pmu_enable,
383 .disable = fsl_emb_pmu_disable,
384 .read = fsl_emb_pmu_read,
385 .unthrottle = fsl_emb_pmu_unthrottle,
386};
387
388/*
389 * Release the PMU if this is the last perf_event.
390 */
391static void hw_perf_event_destroy(struct perf_event *event)
392{
393 if (!atomic_add_unless(&num_events, -1, 1)) {
394 mutex_lock(&pmc_reserve_mutex);
395 if (atomic_dec_return(&num_events) == 0)
396 release_pmc_hardware();
397 mutex_unlock(&pmc_reserve_mutex);
398 }
399}
400
401/*
402 * Translate a generic cache event_id config to a raw event_id code.
403 */
404static int hw_perf_cache_event(u64 config, u64 *eventp)
405{
406 unsigned long type, op, result;
407 int ev;
408
409 if (!ppmu->cache_events)
410 return -EINVAL;
411
412 /* unpack config */
413 type = config & 0xff;
414 op = (config >> 8) & 0xff;
415 result = (config >> 16) & 0xff;
416
417 if (type >= PERF_COUNT_HW_CACHE_MAX ||
418 op >= PERF_COUNT_HW_CACHE_OP_MAX ||
419 result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
420 return -EINVAL;
421
422 ev = (*ppmu->cache_events)[type][op][result];
423 if (ev == 0)
424 return -EOPNOTSUPP;
425 if (ev == -1)
426 return -EINVAL;
427 *eventp = ev;
428 return 0;
429}
430
431const struct pmu *hw_perf_event_init(struct perf_event *event)
432{
433 u64 ev;
434 struct perf_event *events[MAX_HWEVENTS];
435 int n;
436 int err;
437 int num_restricted;
438 int i;
439
440 switch (event->attr.type) {
441 case PERF_TYPE_HARDWARE:
442 ev = event->attr.config;
443 if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
444 return ERR_PTR(-EOPNOTSUPP);
445 ev = ppmu->generic_events[ev];
446 break;
447
448 case PERF_TYPE_HW_CACHE:
449 err = hw_perf_cache_event(event->attr.config, &ev);
450 if (err)
451 return ERR_PTR(err);
452 break;
453
454 case PERF_TYPE_RAW:
455 ev = event->attr.config;
456 break;
457
458 default:
459 return ERR_PTR(-EINVAL);
460 }
461
462 event->hw.config = ppmu->xlate_event(ev);
463 if (!(event->hw.config & FSL_EMB_EVENT_VALID))
464 return ERR_PTR(-EINVAL);
465
466 /*
467 * If this is in a group, check if it can go on with all the
468 * other hardware events in the group. We assume the event
469 * hasn't been linked into its leader's sibling list at this point.
470 */
471 n = 0;
472 if (event->group_leader != event) {
473 n = collect_events(event->group_leader,
474 ppmu->n_counter - 1, events);
475 if (n < 0)
476 return ERR_PTR(-EINVAL);
477 }
478
479 if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) {
480 num_restricted = 0;
481 for (i = 0; i < n; i++) {
482 if (events[i]->hw.config & FSL_EMB_EVENT_RESTRICTED)
483 num_restricted++;
484 }
485
486 if (num_restricted >= ppmu->n_restricted)
487 return ERR_PTR(-EINVAL);
488 }
489
490 event->hw.idx = -1;
491
492 event->hw.config_base = PMLCA_CE | PMLCA_FCM1 |
493 (u32)((ev << 16) & PMLCA_EVENT_MASK);
494
495 if (event->attr.exclude_user)
496 event->hw.config_base |= PMLCA_FCU;
497 if (event->attr.exclude_kernel)
498 event->hw.config_base |= PMLCA_FCS;
499 if (event->attr.exclude_idle)
500 return ERR_PTR(-ENOTSUPP);
501
502 event->hw.last_period = event->hw.sample_period;
503 atomic64_set(&event->hw.period_left, event->hw.last_period);
504
505 /*
506 * See if we need to reserve the PMU.
507 * If no events are currently in use, then we have to take a
508 * mutex to ensure that we don't race with another task doing
509 * reserve_pmc_hardware or release_pmc_hardware.
510 */
511 err = 0;
512 if (!atomic_inc_not_zero(&num_events)) {
513 mutex_lock(&pmc_reserve_mutex);
514 if (atomic_read(&num_events) == 0 &&
515 reserve_pmc_hardware(perf_event_interrupt))
516 err = -EBUSY;
517 else
518 atomic_inc(&num_events);
519 mutex_unlock(&pmc_reserve_mutex);
520
521 mtpmr(PMRN_PMGC0, PMGC0_FAC);
522 isync();
523 }
524 event->destroy = hw_perf_event_destroy;
525
526 if (err)
527 return ERR_PTR(err);
528 return &fsl_emb_pmu;
529}
530
531/*
532 * A counter has overflowed; update its count and record
533 * things if requested. Note that interrupts are hard-disabled
534 * here so there is no possibility of being interrupted.
535 */
536static void record_and_restart(struct perf_event *event, unsigned long val,
537 struct pt_regs *regs, int nmi)
538{
539 u64 period = event->hw.sample_period;
540 s64 prev, delta, left;
541 int record = 0;
542
543 /* we don't have to worry about interrupts here */
544 prev = atomic64_read(&event->hw.prev_count);
545 delta = (val - prev) & 0xfffffffful;
546 atomic64_add(delta, &event->count);
547
548 /*
549 * See if the total period for this event has expired,
550 * and update for the next period.
551 */
552 val = 0;
553 left = atomic64_read(&event->hw.period_left) - delta;
554 if (period) {
555 if (left <= 0) {
556 left += period;
557 if (left <= 0)
558 left = period;
559 record = 1;
560 }
561 if (left < 0x80000000LL)
562 val = 0x80000000LL - left;
563 }
564
565 /*
566 * Finally record data if requested.
567 */
568 if (record) {
569 struct perf_sample_data data = {
570 .period = event->hw.last_period,
571 };
572
573 if (perf_event_overflow(event, nmi, &data, regs)) {
574 /*
575 * Interrupts are coming too fast - throttle them
576 * by setting the event to 0, so it will be
577 * at least 2^30 cycles until the next interrupt
578 * (assuming each event counts at most 2 counts
579 * per cycle).
580 */
581 val = 0;
582 left = ~0ULL >> 1;
583 }
584 }
585
586 write_pmc(event->hw.idx, val);
587 atomic64_set(&event->hw.prev_count, val);
588 atomic64_set(&event->hw.period_left, left);
589 perf_event_update_userpage(event);
590}
591
592static void perf_event_interrupt(struct pt_regs *regs)
593{
594 int i;
595 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
596 struct perf_event *event;
597 unsigned long val;
598 int found = 0;
599 int nmi;
600
601 nmi = perf_intr_is_nmi(regs);
602 if (nmi)
603 nmi_enter();
604 else
605 irq_enter();
606
607 for (i = 0; i < ppmu->n_counter; ++i) {
608 event = cpuhw->event[i];
609
610 val = read_pmc(i);
611 if ((int)val < 0) {
612 if (event) {
613 /* event has overflowed */
614 found = 1;
615 record_and_restart(event, val, regs, nmi);
616 } else {
617 /*
618 * Disabled counter is negative,
619 * reset it just in case.
620 */
621 write_pmc(i, 0);
622 }
623 }
624 }
625
626 /* PMM will keep counters frozen until we return from the interrupt. */
627 mtmsr(mfmsr() | MSR_PMM);
628 mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE);
629 isync();
630
631 if (nmi)
632 nmi_exit();
633 else
634 irq_exit();
635}
636
637void hw_perf_event_setup(int cpu)
638{
639 struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
640
641 memset(cpuhw, 0, sizeof(*cpuhw));
642}
643
644int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu)
645{
646 if (ppmu)
647 return -EBUSY; /* something's already registered */
648
649 ppmu = pmu;
650 pr_info("%s performance monitor hardware support registered\n",
651 pmu->name);
652
653 return 0;
654}
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index fa0f690d3867..a8aae0b54579 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -144,6 +144,16 @@ config FSL_EMB_PERFMON
144 and some e300 cores (c3 and c4). Select this only if your 144 and some e300 cores (c3 and c4). Select this only if your
145 core supports the Embedded Performance Monitor APU 145 core supports the Embedded Performance Monitor APU
146 146
147config FSL_EMB_PERF_EVENT
148 bool
149 depends on FSL_EMB_PERFMON && PERF_EVENTS && !PPC_PERF_CTRS
150 default y
151
152config FSL_EMB_PERF_EVENT_E500
153 bool
154 depends on FSL_EMB_PERF_EVENT && E500
155 default y
156
147config 4xx 157config 4xx
148 bool 158 bool
149 depends on 40x || 44x 159 depends on 40x || 44x