aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2009-04-29 06:47:16 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-29 08:51:10 -0400
commit095342389e2ed8deed07b3076f990260ce3c7c9f (patch)
tree53f2666d3d428cd4b4683a4b168a30f871896083
parent93904966934193204ad08e951f806d5631c29eb3 (diff)
perf_counter, x86: generic use of cpuc->active
cpuc->active will now be used to indicate an enabled counter which implies also valid pointers of cpuc->counters[]. In contrast, cpuc->used only locks the counter, but it can be still uninitialized. [ Impact: refactor and generalize code ] Signed-off-by: Robert Richter <robert.richter@amd.com> Cc: Paul Mackerras <paulus@samba.org> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1241002046-8832-20-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c20
1 files changed, 9 insertions, 11 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 9ec51a662db5..f7fd4a355159 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -424,7 +424,6 @@ static void amd_pmu_enable_counter(int idx, u64 config)
424{ 424{
425 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 425 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
426 426
427 set_bit(idx, cpuc->active);
428 if (cpuc->enabled) 427 if (cpuc->enabled)
429 config |= ARCH_PERFMON_EVENTSEL0_ENABLE; 428 config |= ARCH_PERFMON_EVENTSEL0_ENABLE;
430 429
@@ -446,9 +445,6 @@ static void intel_pmu_disable_counter(int idx, u64 config)
446 445
447static void amd_pmu_disable_counter(int idx, u64 config) 446static void amd_pmu_disable_counter(int idx, u64 config)
448{ 447{
449 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
450
451 clear_bit(idx, cpuc->active);
452 wrmsrl(MSR_K7_EVNTSEL0 + idx, config); 448 wrmsrl(MSR_K7_EVNTSEL0 + idx, config);
453 449
454} 450}
@@ -633,10 +629,7 @@ try_generic:
633 __x86_pmu_disable(counter, hwc, idx); 629 __x86_pmu_disable(counter, hwc, idx);
634 630
635 cpuc->counters[idx] = counter; 631 cpuc->counters[idx] = counter;
636 /* 632 set_bit(idx, cpuc->active);
637 * Make it visible before enabling the hw:
638 */
639 barrier();
640 633
641 x86_perf_counter_set_period(counter, hwc, idx); 634 x86_perf_counter_set_period(counter, hwc, idx);
642 __x86_pmu_enable(counter, hwc, idx); 635 __x86_pmu_enable(counter, hwc, idx);
@@ -700,10 +693,13 @@ static void x86_pmu_disable(struct perf_counter *counter)
700 struct hw_perf_counter *hwc = &counter->hw; 693 struct hw_perf_counter *hwc = &counter->hw;
701 unsigned int idx = hwc->idx; 694 unsigned int idx = hwc->idx;
702 695
696 /*
697 * Must be done before we disable, otherwise the nmi handler
698 * could reenable again:
699 */
700 clear_bit(idx, cpuc->active);
703 __x86_pmu_disable(counter, hwc, idx); 701 __x86_pmu_disable(counter, hwc, idx);
704 702
705 clear_bit(idx, cpuc->used);
706 cpuc->counters[idx] = NULL;
707 /* 703 /*
708 * Make sure the cleared pointer becomes visible before we 704 * Make sure the cleared pointer becomes visible before we
709 * (potentially) free the counter: 705 * (potentially) free the counter:
@@ -715,6 +711,8 @@ static void x86_pmu_disable(struct perf_counter *counter)
715 * that we are disabling: 711 * that we are disabling:
716 */ 712 */
717 x86_perf_counter_update(counter, hwc, idx); 713 x86_perf_counter_update(counter, hwc, idx);
714 cpuc->counters[idx] = NULL;
715 clear_bit(idx, cpuc->used);
718} 716}
719 717
720/* 718/*
@@ -763,7 +761,7 @@ again:
763 struct perf_counter *counter = cpuc->counters[bit]; 761 struct perf_counter *counter = cpuc->counters[bit];
764 762
765 clear_bit(bit, (unsigned long *) &status); 763 clear_bit(bit, (unsigned long *) &status);
766 if (!counter) 764 if (!test_bit(bit, cpuc->active))
767 continue; 765 continue;
768 766
769 intel_pmu_save_and_restart(counter); 767 intel_pmu_save_and_restart(counter);