diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2010-06-14 02:49:00 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-09-09 14:46:29 -0400 |
commit | 33696fc0d141bbbcb12f75b69608ea83282e3117 (patch) | |
tree | 72e08dba377d57eb7dd8c08a937a6de10e8af9c4 /kernel/perf_event.c | |
parent | 24cd7f54a0d47e1d5b3de29e2456bfbd2d8447b7 (diff) |
perf: Per PMU disable
Changes perf_disable() into perf_pmu_disable().
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r-- | kernel/perf_event.c | 31 |
1 files changed, 19 insertions, 12 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 9a98ce953561..5ed0c06765bb 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -71,23 +71,20 @@ static atomic64_t perf_event_id; | |||
71 | */ | 71 | */ |
72 | static DEFINE_SPINLOCK(perf_resource_lock); | 72 | static DEFINE_SPINLOCK(perf_resource_lock); |
73 | 73 | ||
74 | void __weak hw_perf_disable(void) { barrier(); } | ||
75 | void __weak hw_perf_enable(void) { barrier(); } | ||
76 | |||
77 | void __weak perf_event_print_debug(void) { } | 74 | void __weak perf_event_print_debug(void) { } |
78 | 75 | ||
79 | static DEFINE_PER_CPU(int, perf_disable_count); | 76 | void perf_pmu_disable(struct pmu *pmu) |
80 | |||
81 | void perf_disable(void) | ||
82 | { | 77 | { |
83 | if (!__get_cpu_var(perf_disable_count)++) | 78 | int *count = this_cpu_ptr(pmu->pmu_disable_count); |
84 | hw_perf_disable(); | 79 | if (!(*count)++) |
80 | pmu->pmu_disable(pmu); | ||
85 | } | 81 | } |
86 | 82 | ||
87 | void perf_enable(void) | 83 | void perf_pmu_enable(struct pmu *pmu) |
88 | { | 84 | { |
89 | if (!--__get_cpu_var(perf_disable_count)) | 85 | int *count = this_cpu_ptr(pmu->pmu_disable_count); |
90 | hw_perf_enable(); | 86 | if (!--(*count)) |
87 | pmu->pmu_enable(pmu); | ||
91 | } | 88 | } |
92 | 89 | ||
93 | static void get_ctx(struct perf_event_context *ctx) | 90 | static void get_ctx(struct perf_event_context *ctx) |
@@ -4970,11 +4967,19 @@ static struct srcu_struct pmus_srcu; | |||
4970 | 4967 | ||
4971 | int perf_pmu_register(struct pmu *pmu) | 4968 | int perf_pmu_register(struct pmu *pmu) |
4972 | { | 4969 | { |
4970 | int ret; | ||
4971 | |||
4973 | mutex_lock(&pmus_lock); | 4972 | mutex_lock(&pmus_lock); |
4973 | ret = -ENOMEM; | ||
4974 | pmu->pmu_disable_count = alloc_percpu(int); | ||
4975 | if (!pmu->pmu_disable_count) | ||
4976 | goto unlock; | ||
4974 | list_add_rcu(&pmu->entry, &pmus); | 4977 | list_add_rcu(&pmu->entry, &pmus); |
4978 | ret = 0; | ||
4979 | unlock: | ||
4975 | mutex_unlock(&pmus_lock); | 4980 | mutex_unlock(&pmus_lock); |
4976 | 4981 | ||
4977 | return 0; | 4982 | return ret; |
4978 | } | 4983 | } |
4979 | 4984 | ||
4980 | void perf_pmu_unregister(struct pmu *pmu) | 4985 | void perf_pmu_unregister(struct pmu *pmu) |
@@ -4984,6 +4989,8 @@ void perf_pmu_unregister(struct pmu *pmu) | |||
4984 | mutex_unlock(&pmus_lock); | 4989 | mutex_unlock(&pmus_lock); |
4985 | 4990 | ||
4986 | synchronize_srcu(&pmus_srcu); | 4991 | synchronize_srcu(&pmus_srcu); |
4992 | |||
4993 | free_percpu(pmu->pmu_disable_count); | ||
4987 | } | 4994 | } |
4988 | 4995 | ||
4989 | struct pmu *perf_init_event(struct perf_event *event) | 4996 | struct pmu *perf_init_event(struct perf_event *event) |