aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2009-04-29 10:55:56 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-29 16:19:36 -0400
commit43f6201a22dbf1c5abe1cab96b49bd56fa9df8f4 (patch)
tree5e481766115902221ca1c3fbf8c547831c5fa794 /arch
parentab7ef2e50a557af92f4f90689f51fadadafc16b2 (diff)
perf_counter, x86: rename bitmasks to ->used_mask and ->active_mask
Standardize on explicitly mentioning '_mask' in fields that are not plain flags but masks. This avoids typos like: if (cpuc->used) (which could easily slip through review unnoticed), while if a typo looks like this: if (cpuc->used_mask) it might get noticed during review. [ Impact: cleanup ] Signed-off-by: Robert Richter <robert.richter@amd.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> LKML-Reference: <1241016956-24648-1-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 47e563bfd4cf..fc06f4d32644 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -28,8 +28,8 @@ static u64 perf_counter_mask __read_mostly;
28 28
29struct cpu_hw_counters { 29struct cpu_hw_counters {
30 struct perf_counter *counters[X86_PMC_IDX_MAX]; 30 struct perf_counter *counters[X86_PMC_IDX_MAX];
31 unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 31 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
32 unsigned long active[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 32 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
33 unsigned long interrupts; 33 unsigned long interrupts;
34 u64 throttle_ctrl; 34 u64 throttle_ctrl;
35 int enabled; 35 int enabled;
@@ -332,7 +332,7 @@ static u64 amd_pmu_save_disable_all(void)
332 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 332 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
333 u64 val; 333 u64 val;
334 334
335 if (!test_bit(idx, cpuc->active)) 335 if (!test_bit(idx, cpuc->active_mask))
336 continue; 336 continue;
337 rdmsrl(MSR_K7_EVNTSEL0 + idx, val); 337 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
338 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE)) 338 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
@@ -373,7 +373,7 @@ static void amd_pmu_restore_all(u64 ctrl)
373 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 373 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
374 u64 val; 374 u64 val;
375 375
376 if (!test_bit(idx, cpuc->active)) 376 if (!test_bit(idx, cpuc->active_mask))
377 continue; 377 continue;
378 rdmsrl(MSR_K7_EVNTSEL0 + idx, val); 378 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
379 if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) 379 if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
@@ -576,7 +576,7 @@ static int x86_pmu_enable(struct perf_counter *counter)
576 * Try to get the fixed counter, if that is already taken 576 * Try to get the fixed counter, if that is already taken
577 * then try to get a generic counter: 577 * then try to get a generic counter:
578 */ 578 */
579 if (test_and_set_bit(idx, cpuc->used)) 579 if (test_and_set_bit(idx, cpuc->used_mask))
580 goto try_generic; 580 goto try_generic;
581 581
582 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; 582 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
@@ -590,14 +590,14 @@ static int x86_pmu_enable(struct perf_counter *counter)
590 } else { 590 } else {
591 idx = hwc->idx; 591 idx = hwc->idx;
592 /* Try to get the previous generic counter again */ 592 /* Try to get the previous generic counter again */
593 if (test_and_set_bit(idx, cpuc->used)) { 593 if (test_and_set_bit(idx, cpuc->used_mask)) {
594try_generic: 594try_generic:
595 idx = find_first_zero_bit(cpuc->used, 595 idx = find_first_zero_bit(cpuc->used_mask,
596 x86_pmu.num_counters); 596 x86_pmu.num_counters);
597 if (idx == x86_pmu.num_counters) 597 if (idx == x86_pmu.num_counters)
598 return -EAGAIN; 598 return -EAGAIN;
599 599
600 set_bit(idx, cpuc->used); 600 set_bit(idx, cpuc->used_mask);
601 hwc->idx = idx; 601 hwc->idx = idx;
602 } 602 }
603 hwc->config_base = x86_pmu.eventsel; 603 hwc->config_base = x86_pmu.eventsel;
@@ -609,7 +609,7 @@ try_generic:
609 x86_pmu.disable(hwc, idx); 609 x86_pmu.disable(hwc, idx);
610 610
611 cpuc->counters[idx] = counter; 611 cpuc->counters[idx] = counter;
612 set_bit(idx, cpuc->active); 612 set_bit(idx, cpuc->active_mask);
613 613
614 x86_perf_counter_set_period(counter, hwc, idx); 614 x86_perf_counter_set_period(counter, hwc, idx);
615 x86_pmu.enable(hwc, idx); 615 x86_pmu.enable(hwc, idx);
@@ -643,7 +643,7 @@ void perf_counter_print_debug(void)
643 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow); 643 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
644 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed); 644 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
645 } 645 }
646 pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used); 646 pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used_mask);
647 647
648 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 648 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
649 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); 649 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
@@ -677,7 +677,7 @@ static void x86_pmu_disable(struct perf_counter *counter)
677 * Must be done before we disable, otherwise the nmi handler 677 * Must be done before we disable, otherwise the nmi handler
678 * could reenable again: 678 * could reenable again:
679 */ 679 */
680 clear_bit(idx, cpuc->active); 680 clear_bit(idx, cpuc->active_mask);
681 x86_pmu.disable(hwc, idx); 681 x86_pmu.disable(hwc, idx);
682 682
683 /* 683 /*
@@ -692,7 +692,7 @@ static void x86_pmu_disable(struct perf_counter *counter)
692 */ 692 */
693 x86_perf_counter_update(counter, hwc, idx); 693 x86_perf_counter_update(counter, hwc, idx);
694 cpuc->counters[idx] = NULL; 694 cpuc->counters[idx] = NULL;
695 clear_bit(idx, cpuc->used); 695 clear_bit(idx, cpuc->used_mask);
696} 696}
697 697
698/* 698/*
@@ -741,7 +741,7 @@ again:
741 struct perf_counter *counter = cpuc->counters[bit]; 741 struct perf_counter *counter = cpuc->counters[bit];
742 742
743 clear_bit(bit, (unsigned long *) &status); 743 clear_bit(bit, (unsigned long *) &status);
744 if (!test_bit(bit, cpuc->active)) 744 if (!test_bit(bit, cpuc->active_mask))
745 continue; 745 continue;
746 746
747 intel_pmu_save_and_restart(counter); 747 intel_pmu_save_and_restart(counter);
@@ -779,7 +779,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi)
779 779
780 ++cpuc->interrupts; 780 ++cpuc->interrupts;
781 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 781 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
782 if (!test_bit(idx, cpuc->active)) 782 if (!test_bit(idx, cpuc->active_mask))
783 continue; 783 continue;
784 counter = cpuc->counters[idx]; 784 counter = cpuc->counters[idx];
785 hwc = &counter->hw; 785 hwc = &counter->hw;