aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2009-04-29 06:47:15 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-29 08:51:09 -0400
commit93904966934193204ad08e951f806d5631c29eb3 (patch)
tree536d5b60ea48f44fa1373a5904254878a01dcca4 /arch
parentbb775fc2d1dcd1aa6eafde37a8289ba2d80783aa (diff)
perf_counter, x86: rename cpuc->active_mask
This is to have a consistent naming scheme with cpuc->used. [ Impact: cleanup ] Signed-off-by: Robert Richter <robert.richter@amd.com> Cc: Paul Mackerras <paulus@samba.org> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1241002046-8832-19-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 3f3ae477a7dc..9ec51a662db5 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -29,9 +29,9 @@ static u64 perf_counter_mask __read_mostly;
29struct cpu_hw_counters { 29struct cpu_hw_counters {
30 struct perf_counter *counters[X86_PMC_IDX_MAX]; 30 struct perf_counter *counters[X86_PMC_IDX_MAX];
31 unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 31 unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
32 unsigned long active[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
32 unsigned long interrupts; 33 unsigned long interrupts;
33 u64 throttle_ctrl; 34 u64 throttle_ctrl;
34 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
35 int enabled; 35 int enabled;
36}; 36};
37 37
@@ -334,7 +334,7 @@ static u64 amd_pmu_save_disable_all(void)
334 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 334 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
335 u64 val; 335 u64 val;
336 336
337 if (!test_bit(idx, cpuc->active_mask)) 337 if (!test_bit(idx, cpuc->active))
338 continue; 338 continue;
339 rdmsrl(MSR_K7_EVNTSEL0 + idx, val); 339 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
340 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE)) 340 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
@@ -376,7 +376,7 @@ static void amd_pmu_restore_all(u64 ctrl)
376 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 376 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
377 u64 val; 377 u64 val;
378 378
379 if (!test_bit(idx, cpuc->active_mask)) 379 if (!test_bit(idx, cpuc->active))
380 continue; 380 continue;
381 rdmsrl(MSR_K7_EVNTSEL0 + idx, val); 381 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
382 if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) 382 if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
@@ -424,7 +424,7 @@ static void amd_pmu_enable_counter(int idx, u64 config)
424{ 424{
425 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 425 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
426 426
427 set_bit(idx, cpuc->active_mask); 427 set_bit(idx, cpuc->active);
428 if (cpuc->enabled) 428 if (cpuc->enabled)
429 config |= ARCH_PERFMON_EVENTSEL0_ENABLE; 429 config |= ARCH_PERFMON_EVENTSEL0_ENABLE;
430 430
@@ -448,7 +448,7 @@ static void amd_pmu_disable_counter(int idx, u64 config)
448{ 448{
449 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 449 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
450 450
451 clear_bit(idx, cpuc->active_mask); 451 clear_bit(idx, cpuc->active);
452 wrmsrl(MSR_K7_EVNTSEL0 + idx, config); 452 wrmsrl(MSR_K7_EVNTSEL0 + idx, config);
453 453
454} 454}