aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2009-04-29 06:47:04 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-29 08:51:04 -0400
commit5f4ec28ffe77c840354cce1820a3436106e9e0f1 (patch)
treee8af6690949c71bd8412e27c58870f326d645673 /arch
parent4aeb0b4239bb3b67ed402cb9cef3e000c892cadf (diff)
perf_counter, x86: rename struct pmc_x86_ops into struct x86_pmu
This patch renames struct pmc_x86_ops into struct x86_pmu. It introduces a structure to describe an x86 model specific pmu (performance monitoring unit). It may contain ops and data. The new name of the structure fits better, is shorter, and thus better to handle. Where it was appropriate, names of function and variable have been changed too. [ Impact: cleanup ] Signed-off-by: Robert Richter <robert.richter@amd.com> Cc: Paul Mackerras <paulus@samba.org> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1241002046-8832-8-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c135
1 files changed, 68 insertions, 67 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 95de980c74a0..808a1a113463 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -44,9 +44,9 @@ struct cpu_hw_counters {
44}; 44};
45 45
46/* 46/*
47 * struct pmc_x86_ops - performance counter x86 ops 47 * struct x86_pmu - generic x86 pmu
48 */ 48 */
49struct pmc_x86_ops { 49struct x86_pmu {
50 u64 (*save_disable_all)(void); 50 u64 (*save_disable_all)(void);
51 void (*restore_all)(u64); 51 void (*restore_all)(u64);
52 u64 (*get_status)(u64); 52 u64 (*get_status)(u64);
@@ -60,7 +60,7 @@ struct pmc_x86_ops {
60 int max_events; 60 int max_events;
61}; 61};
62 62
63static struct pmc_x86_ops *pmc_ops __read_mostly; 63static struct x86_pmu *x86_pmu __read_mostly;
64 64
65static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { 65static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = {
66 .enabled = 1, 66 .enabled = 1,
@@ -82,12 +82,12 @@ static const u64 intel_perfmon_event_map[] =
82 [PERF_COUNT_BUS_CYCLES] = 0x013c, 82 [PERF_COUNT_BUS_CYCLES] = 0x013c,
83}; 83};
84 84
85static u64 pmc_intel_event_map(int event) 85static u64 intel_pmu_event_map(int event)
86{ 86{
87 return intel_perfmon_event_map[event]; 87 return intel_perfmon_event_map[event];
88} 88}
89 89
90static u64 pmc_intel_raw_event(u64 event) 90static u64 intel_pmu_raw_event(u64 event)
91{ 91{
92#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL 92#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
93#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL 93#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
@@ -114,12 +114,12 @@ static const u64 amd_perfmon_event_map[] =
114 [PERF_COUNT_BRANCH_MISSES] = 0x00c5, 114 [PERF_COUNT_BRANCH_MISSES] = 0x00c5,
115}; 115};
116 116
117static u64 pmc_amd_event_map(int event) 117static u64 amd_pmu_event_map(int event)
118{ 118{
119 return amd_perfmon_event_map[event]; 119 return amd_perfmon_event_map[event];
120} 120}
121 121
122static u64 pmc_amd_raw_event(u64 event) 122static u64 amd_pmu_raw_event(u64 event)
123{ 123{
124#define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL 124#define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
125#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL 125#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
@@ -184,12 +184,12 @@ static bool reserve_pmc_hardware(void)
184 disable_lapic_nmi_watchdog(); 184 disable_lapic_nmi_watchdog();
185 185
186 for (i = 0; i < nr_counters_generic; i++) { 186 for (i = 0; i < nr_counters_generic; i++) {
187 if (!reserve_perfctr_nmi(pmc_ops->perfctr + i)) 187 if (!reserve_perfctr_nmi(x86_pmu->perfctr + i))
188 goto perfctr_fail; 188 goto perfctr_fail;
189 } 189 }
190 190
191 for (i = 0; i < nr_counters_generic; i++) { 191 for (i = 0; i < nr_counters_generic; i++) {
192 if (!reserve_evntsel_nmi(pmc_ops->eventsel + i)) 192 if (!reserve_evntsel_nmi(x86_pmu->eventsel + i))
193 goto eventsel_fail; 193 goto eventsel_fail;
194 } 194 }
195 195
@@ -197,13 +197,13 @@ static bool reserve_pmc_hardware(void)
197 197
198eventsel_fail: 198eventsel_fail:
199 for (i--; i >= 0; i--) 199 for (i--; i >= 0; i--)
200 release_evntsel_nmi(pmc_ops->eventsel + i); 200 release_evntsel_nmi(x86_pmu->eventsel + i);
201 201
202 i = nr_counters_generic; 202 i = nr_counters_generic;
203 203
204perfctr_fail: 204perfctr_fail:
205 for (i--; i >= 0; i--) 205 for (i--; i >= 0; i--)
206 release_perfctr_nmi(pmc_ops->perfctr + i); 206 release_perfctr_nmi(x86_pmu->perfctr + i);
207 207
208 if (nmi_watchdog == NMI_LOCAL_APIC) 208 if (nmi_watchdog == NMI_LOCAL_APIC)
209 enable_lapic_nmi_watchdog(); 209 enable_lapic_nmi_watchdog();
@@ -216,8 +216,8 @@ static void release_pmc_hardware(void)
216 int i; 216 int i;
217 217
218 for (i = 0; i < nr_counters_generic; i++) { 218 for (i = 0; i < nr_counters_generic; i++) {
219 release_perfctr_nmi(pmc_ops->perfctr + i); 219 release_perfctr_nmi(x86_pmu->perfctr + i);
220 release_evntsel_nmi(pmc_ops->eventsel + i); 220 release_evntsel_nmi(x86_pmu->eventsel + i);
221 } 221 }
222 222
223 if (nmi_watchdog == NMI_LOCAL_APIC) 223 if (nmi_watchdog == NMI_LOCAL_APIC)
@@ -293,14 +293,14 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
293 * Raw event type provide the config in the event structure 293 * Raw event type provide the config in the event structure
294 */ 294 */
295 if (perf_event_raw(hw_event)) { 295 if (perf_event_raw(hw_event)) {
296 hwc->config |= pmc_ops->raw_event(perf_event_config(hw_event)); 296 hwc->config |= x86_pmu->raw_event(perf_event_config(hw_event));
297 } else { 297 } else {
298 if (perf_event_id(hw_event) >= pmc_ops->max_events) 298 if (perf_event_id(hw_event) >= x86_pmu->max_events)
299 return -EINVAL; 299 return -EINVAL;
300 /* 300 /*
301 * The generic map: 301 * The generic map:
302 */ 302 */
303 hwc->config |= pmc_ops->event_map(perf_event_id(hw_event)); 303 hwc->config |= x86_pmu->event_map(perf_event_id(hw_event));
304 } 304 }
305 305
306 counter->destroy = hw_perf_counter_destroy; 306 counter->destroy = hw_perf_counter_destroy;
@@ -308,7 +308,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
308 return 0; 308 return 0;
309} 309}
310 310
311static u64 pmc_intel_save_disable_all(void) 311static u64 intel_pmu_save_disable_all(void)
312{ 312{
313 u64 ctrl; 313 u64 ctrl;
314 314
@@ -318,7 +318,7 @@ static u64 pmc_intel_save_disable_all(void)
318 return ctrl; 318 return ctrl;
319} 319}
320 320
321static u64 pmc_amd_save_disable_all(void) 321static u64 amd_pmu_save_disable_all(void)
322{ 322{
323 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 323 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
324 int enabled, idx; 324 int enabled, idx;
@@ -327,7 +327,8 @@ static u64 pmc_amd_save_disable_all(void)
327 cpuc->enabled = 0; 327 cpuc->enabled = 0;
328 /* 328 /*
329 * ensure we write the disable before we start disabling the 329 * ensure we write the disable before we start disabling the
330 * counters proper, so that pcm_amd_enable() does the right thing. 330 * counters proper, so that amd_pmu_enable_counter() does the
331 * right thing.
331 */ 332 */
332 barrier(); 333 barrier();
333 334
@@ -351,19 +352,19 @@ u64 hw_perf_save_disable(void)
351 if (unlikely(!perf_counters_initialized)) 352 if (unlikely(!perf_counters_initialized))
352 return 0; 353 return 0;
353 354
354 return pmc_ops->save_disable_all(); 355 return x86_pmu->save_disable_all();
355} 356}
356/* 357/*
357 * Exported because of ACPI idle 358 * Exported because of ACPI idle
358 */ 359 */
359EXPORT_SYMBOL_GPL(hw_perf_save_disable); 360EXPORT_SYMBOL_GPL(hw_perf_save_disable);
360 361
361static void pmc_intel_restore_all(u64 ctrl) 362static void intel_pmu_restore_all(u64 ctrl)
362{ 363{
363 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); 364 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
364} 365}
365 366
366static void pmc_amd_restore_all(u64 ctrl) 367static void amd_pmu_restore_all(u64 ctrl)
367{ 368{
368 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 369 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
369 int idx; 370 int idx;
@@ -391,14 +392,14 @@ void hw_perf_restore(u64 ctrl)
391 if (unlikely(!perf_counters_initialized)) 392 if (unlikely(!perf_counters_initialized))
392 return; 393 return;
393 394
394 pmc_ops->restore_all(ctrl); 395 x86_pmu->restore_all(ctrl);
395} 396}
396/* 397/*
397 * Exported because of ACPI idle 398 * Exported because of ACPI idle
398 */ 399 */
399EXPORT_SYMBOL_GPL(hw_perf_restore); 400EXPORT_SYMBOL_GPL(hw_perf_restore);
400 401
401static u64 pmc_intel_get_status(u64 mask) 402static u64 intel_pmu_get_status(u64 mask)
402{ 403{
403 u64 status; 404 u64 status;
404 405
@@ -407,7 +408,7 @@ static u64 pmc_intel_get_status(u64 mask)
407 return status; 408 return status;
408} 409}
409 410
410static u64 pmc_amd_get_status(u64 mask) 411static u64 amd_pmu_get_status(u64 mask)
411{ 412{
412 u64 status = 0; 413 u64 status = 0;
413 int idx; 414 int idx;
@@ -432,15 +433,15 @@ static u64 hw_perf_get_status(u64 mask)
432 if (unlikely(!perf_counters_initialized)) 433 if (unlikely(!perf_counters_initialized))
433 return 0; 434 return 0;
434 435
435 return pmc_ops->get_status(mask); 436 return x86_pmu->get_status(mask);
436} 437}
437 438
438static void pmc_intel_ack_status(u64 ack) 439static void intel_pmu_ack_status(u64 ack)
439{ 440{
440 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); 441 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
441} 442}
442 443
443static void pmc_amd_ack_status(u64 ack) 444static void amd_pmu_ack_status(u64 ack)
444{ 445{
445} 446}
446 447
@@ -449,16 +450,16 @@ static void hw_perf_ack_status(u64 ack)
449 if (unlikely(!perf_counters_initialized)) 450 if (unlikely(!perf_counters_initialized))
450 return; 451 return;
451 452
452 pmc_ops->ack_status(ack); 453 x86_pmu->ack_status(ack);
453} 454}
454 455
455static void pmc_intel_enable(int idx, u64 config) 456static void intel_pmu_enable_counter(int idx, u64 config)
456{ 457{
457 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, 458 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx,
458 config | ARCH_PERFMON_EVENTSEL0_ENABLE); 459 config | ARCH_PERFMON_EVENTSEL0_ENABLE);
459} 460}
460 461
461static void pmc_amd_enable(int idx, u64 config) 462static void amd_pmu_enable_counter(int idx, u64 config)
462{ 463{
463 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 464 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
464 465
@@ -474,15 +475,15 @@ static void hw_perf_enable(int idx, u64 config)
474 if (unlikely(!perf_counters_initialized)) 475 if (unlikely(!perf_counters_initialized))
475 return; 476 return;
476 477
477 pmc_ops->enable(idx, config); 478 x86_pmu->enable(idx, config);
478} 479}
479 480
480static void pmc_intel_disable(int idx, u64 config) 481static void intel_pmu_disable_counter(int idx, u64 config)
481{ 482{
482 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, config); 483 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, config);
483} 484}
484 485
485static void pmc_amd_disable(int idx, u64 config) 486static void amd_pmu_disable_counter(int idx, u64 config)
486{ 487{
487 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 488 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
488 489
@@ -496,7 +497,7 @@ static void hw_perf_disable(int idx, u64 config)
496 if (unlikely(!perf_counters_initialized)) 497 if (unlikely(!perf_counters_initialized))
497 return; 498 return;
498 499
499 pmc_ops->disable(idx, config); 500 x86_pmu->disable(idx, config);
500} 501}
501 502
502static inline void 503static inline void
@@ -613,11 +614,11 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
613 614
614 event = hwc->config & ARCH_PERFMON_EVENT_MASK; 615 event = hwc->config & ARCH_PERFMON_EVENT_MASK;
615 616
616 if (unlikely(event == pmc_ops->event_map(PERF_COUNT_INSTRUCTIONS))) 617 if (unlikely(event == x86_pmu->event_map(PERF_COUNT_INSTRUCTIONS)))
617 return X86_PMC_IDX_FIXED_INSTRUCTIONS; 618 return X86_PMC_IDX_FIXED_INSTRUCTIONS;
618 if (unlikely(event == pmc_ops->event_map(PERF_COUNT_CPU_CYCLES))) 619 if (unlikely(event == x86_pmu->event_map(PERF_COUNT_CPU_CYCLES)))
619 return X86_PMC_IDX_FIXED_CPU_CYCLES; 620 return X86_PMC_IDX_FIXED_CPU_CYCLES;
620 if (unlikely(event == pmc_ops->event_map(PERF_COUNT_BUS_CYCLES))) 621 if (unlikely(event == x86_pmu->event_map(PERF_COUNT_BUS_CYCLES)))
621 return X86_PMC_IDX_FIXED_BUS_CYCLES; 622 return X86_PMC_IDX_FIXED_BUS_CYCLES;
622 623
623 return -1; 624 return -1;
@@ -661,8 +662,8 @@ try_generic:
661 set_bit(idx, cpuc->used); 662 set_bit(idx, cpuc->used);
662 hwc->idx = idx; 663 hwc->idx = idx;
663 } 664 }
664 hwc->config_base = pmc_ops->eventsel; 665 hwc->config_base = x86_pmu->eventsel;
665 hwc->counter_base = pmc_ops->perfctr; 666 hwc->counter_base = x86_pmu->perfctr;
666 } 667 }
667 668
668 perf_counters_lapic_init(hwc->nmi); 669 perf_counters_lapic_init(hwc->nmi);
@@ -710,8 +711,8 @@ void perf_counter_print_debug(void)
710 pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used); 711 pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used);
711 712
712 for (idx = 0; idx < nr_counters_generic; idx++) { 713 for (idx = 0; idx < nr_counters_generic; idx++) {
713 rdmsrl(pmc_ops->eventsel + idx, pmc_ctrl); 714 rdmsrl(x86_pmu->eventsel + idx, pmc_ctrl);
714 rdmsrl(pmc_ops->perfctr + idx, pmc_count); 715 rdmsrl(x86_pmu->perfctr + idx, pmc_count);
715 716
716 prev_left = per_cpu(prev_left[idx], cpu); 717 prev_left = per_cpu(prev_left[idx], cpu);
717 718
@@ -918,35 +919,35 @@ static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
918 .priority = 1 919 .priority = 1
919}; 920};
920 921
921static struct pmc_x86_ops pmc_intel_ops = { 922static struct x86_pmu intel_pmu = {
922 .save_disable_all = pmc_intel_save_disable_all, 923 .save_disable_all = intel_pmu_save_disable_all,
923 .restore_all = pmc_intel_restore_all, 924 .restore_all = intel_pmu_restore_all,
924 .get_status = pmc_intel_get_status, 925 .get_status = intel_pmu_get_status,
925 .ack_status = pmc_intel_ack_status, 926 .ack_status = intel_pmu_ack_status,
926 .enable = pmc_intel_enable, 927 .enable = intel_pmu_enable_counter,
927 .disable = pmc_intel_disable, 928 .disable = intel_pmu_disable_counter,
928 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, 929 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
929 .perfctr = MSR_ARCH_PERFMON_PERFCTR0, 930 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
930 .event_map = pmc_intel_event_map, 931 .event_map = intel_pmu_event_map,
931 .raw_event = pmc_intel_raw_event, 932 .raw_event = intel_pmu_raw_event,
932 .max_events = ARRAY_SIZE(intel_perfmon_event_map), 933 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
933}; 934};
934 935
935static struct pmc_x86_ops pmc_amd_ops = { 936static struct x86_pmu amd_pmu = {
936 .save_disable_all = pmc_amd_save_disable_all, 937 .save_disable_all = amd_pmu_save_disable_all,
937 .restore_all = pmc_amd_restore_all, 938 .restore_all = amd_pmu_restore_all,
938 .get_status = pmc_amd_get_status, 939 .get_status = amd_pmu_get_status,
939 .ack_status = pmc_amd_ack_status, 940 .ack_status = amd_pmu_ack_status,
940 .enable = pmc_amd_enable, 941 .enable = amd_pmu_enable_counter,
941 .disable = pmc_amd_disable, 942 .disable = amd_pmu_disable_counter,
942 .eventsel = MSR_K7_EVNTSEL0, 943 .eventsel = MSR_K7_EVNTSEL0,
943 .perfctr = MSR_K7_PERFCTR0, 944 .perfctr = MSR_K7_PERFCTR0,
944 .event_map = pmc_amd_event_map, 945 .event_map = amd_pmu_event_map,
945 .raw_event = pmc_amd_raw_event, 946 .raw_event = amd_pmu_raw_event,
946 .max_events = ARRAY_SIZE(amd_perfmon_event_map), 947 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
947}; 948};
948 949
949static struct pmc_x86_ops *pmc_intel_init(void) 950static struct x86_pmu *intel_pmu_init(void)
950{ 951{
951 union cpuid10_edx edx; 952 union cpuid10_edx edx;
952 union cpuid10_eax eax; 953 union cpuid10_eax eax;
@@ -977,10 +978,10 @@ static struct pmc_x86_ops *pmc_intel_init(void)
977 nr_counters_fixed = edx.split.num_counters_fixed; 978 nr_counters_fixed = edx.split.num_counters_fixed;
978 counter_value_mask = (1ULL << eax.split.bit_width) - 1; 979 counter_value_mask = (1ULL << eax.split.bit_width) - 1;
979 980
980 return &pmc_intel_ops; 981 return &intel_pmu;
981} 982}
982 983
983static struct pmc_x86_ops *pmc_amd_init(void) 984static struct x86_pmu *amd_pmu_init(void)
984{ 985{
985 nr_counters_generic = 4; 986 nr_counters_generic = 4;
986 nr_counters_fixed = 0; 987 nr_counters_fixed = 0;
@@ -989,22 +990,22 @@ static struct pmc_x86_ops *pmc_amd_init(void)
989 990
990 pr_info("AMD Performance Monitoring support detected.\n"); 991 pr_info("AMD Performance Monitoring support detected.\n");
991 992
992 return &pmc_amd_ops; 993 return &amd_pmu;
993} 994}
994 995
995void __init init_hw_perf_counters(void) 996void __init init_hw_perf_counters(void)
996{ 997{
997 switch (boot_cpu_data.x86_vendor) { 998 switch (boot_cpu_data.x86_vendor) {
998 case X86_VENDOR_INTEL: 999 case X86_VENDOR_INTEL:
999 pmc_ops = pmc_intel_init(); 1000 x86_pmu = intel_pmu_init();
1000 break; 1001 break;
1001 case X86_VENDOR_AMD: 1002 case X86_VENDOR_AMD:
1002 pmc_ops = pmc_amd_init(); 1003 x86_pmu = amd_pmu_init();
1003 break; 1004 break;
1004 default: 1005 default:
1005 return; 1006 return;
1006 } 1007 }
1007 if (!pmc_ops) 1008 if (!x86_pmu)
1008 return; 1009 return;
1009 1010
1010 pr_info("... num counters: %d\n", nr_counters_generic); 1011 pr_info("... num counters: %d\n", nr_counters_generic);