aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/perf_event.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-01-27 17:07:47 -0500
committerIngo Molnar <mingo@elte.hu>2010-01-29 03:01:47 -0500
commit1a6e21f791fe85b40a9ddbafe999ab8ccffc3f78 (patch)
tree2224e77f3b346e588e42b2e097abcc48ad6bf68c /arch/x86/kernel/cpu/perf_event.c
parented8777fc132e589d48a0ba854fdbb5d8203b58e5 (diff)
perf_events, x86: Clean up hw_perf_*_all() implementation
Put the recursion avoidance code in the generic hook instead of replicating it in each implementation. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Stephane Eranian <eranian@google.com> LKML-Reference: <20100127221122.057507285@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event.c')
-rw-r--r--arch/x86/kernel/cpu/perf_event.c59
1 files changed, 14 insertions, 45 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 951213a51489..cf10839f20ea 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1099,15 +1099,8 @@ static int __hw_perf_event_init(struct perf_event *event)
1099 1099
1100static void p6_pmu_disable_all(void) 1100static void p6_pmu_disable_all(void)
1101{ 1101{
1102 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1103 u64 val; 1102 u64 val;
1104 1103
1105 if (!cpuc->enabled)
1106 return;
1107
1108 cpuc->enabled = 0;
1109 barrier();
1110
1111 /* p6 only has one enable register */ 1104 /* p6 only has one enable register */
1112 rdmsrl(MSR_P6_EVNTSEL0, val); 1105 rdmsrl(MSR_P6_EVNTSEL0, val);
1113 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; 1106 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
@@ -1118,12 +1111,6 @@ static void intel_pmu_disable_all(void)
1118{ 1111{
1119 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1112 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1120 1113
1121 if (!cpuc->enabled)
1122 return;
1123
1124 cpuc->enabled = 0;
1125 barrier();
1126
1127 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); 1114 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1128 1115
1129 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) 1116 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
@@ -1135,17 +1122,6 @@ static void amd_pmu_disable_all(void)
1135 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1122 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1136 int idx; 1123 int idx;
1137 1124
1138 if (!cpuc->enabled)
1139 return;
1140
1141 cpuc->enabled = 0;
1142 /*
1143 * ensure we write the disable before we start disabling the
1144 * events proper, so that amd_pmu_enable_event() does the
1145 * right thing.
1146 */
1147 barrier();
1148
1149 for (idx = 0; idx < x86_pmu.num_events; idx++) { 1125 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1150 u64 val; 1126 u64 val;
1151 1127
@@ -1166,23 +1142,20 @@ void hw_perf_disable(void)
1166 if (!x86_pmu_initialized()) 1142 if (!x86_pmu_initialized())
1167 return; 1143 return;
1168 1144
1169 if (cpuc->enabled) 1145 if (!cpuc->enabled)
1170 cpuc->n_added = 0; 1146 return;
1147
1148 cpuc->n_added = 0;
1149 cpuc->enabled = 0;
1150 barrier();
1171 1151
1172 x86_pmu.disable_all(); 1152 x86_pmu.disable_all();
1173} 1153}
1174 1154
1175static void p6_pmu_enable_all(void) 1155static void p6_pmu_enable_all(void)
1176{ 1156{
1177 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1178 unsigned long val; 1157 unsigned long val;
1179 1158
1180 if (cpuc->enabled)
1181 return;
1182
1183 cpuc->enabled = 1;
1184 barrier();
1185
1186 /* p6 only has one enable register */ 1159 /* p6 only has one enable register */
1187 rdmsrl(MSR_P6_EVNTSEL0, val); 1160 rdmsrl(MSR_P6_EVNTSEL0, val);
1188 val |= ARCH_PERFMON_EVENTSEL0_ENABLE; 1161 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
@@ -1193,12 +1166,6 @@ static void intel_pmu_enable_all(void)
1193{ 1166{
1194 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1167 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1195 1168
1196 if (cpuc->enabled)
1197 return;
1198
1199 cpuc->enabled = 1;
1200 barrier();
1201
1202 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); 1169 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
1203 1170
1204 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { 1171 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
@@ -1217,12 +1184,6 @@ static void amd_pmu_enable_all(void)
1217 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1184 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1218 int idx; 1185 int idx;
1219 1186
1220 if (cpuc->enabled)
1221 return;
1222
1223 cpuc->enabled = 1;
1224 barrier();
1225
1226 for (idx = 0; idx < x86_pmu.num_events; idx++) { 1187 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1227 struct perf_event *event = cpuc->events[idx]; 1188 struct perf_event *event = cpuc->events[idx];
1228 u64 val; 1189 u64 val;
@@ -1417,6 +1378,10 @@ void hw_perf_enable(void)
1417 1378
1418 if (!x86_pmu_initialized()) 1379 if (!x86_pmu_initialized())
1419 return; 1380 return;
1381
1382 if (cpuc->enabled)
1383 return;
1384
1420 if (cpuc->n_added) { 1385 if (cpuc->n_added) {
1421 /* 1386 /*
1422 * apply assignment obtained either from 1387 * apply assignment obtained either from
@@ -1461,6 +1426,10 @@ void hw_perf_enable(void)
1461 cpuc->n_added = 0; 1426 cpuc->n_added = 0;
1462 perf_events_lapic_init(); 1427 perf_events_lapic_init();
1463 } 1428 }
1429
1430 cpuc->enabled = 1;
1431 barrier();
1432
1464 x86_pmu.enable_all(); 1433 x86_pmu.enable_all();
1465} 1434}
1466 1435