diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2010-06-14 02:49:00 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-09-09 14:46:29 -0400 |
commit | 33696fc0d141bbbcb12f75b69608ea83282e3117 (patch) | |
tree | 72e08dba377d57eb7dd8c08a937a6de10e8af9c4 /arch/sh/kernel/perf_event.c | |
parent | 24cd7f54a0d47e1d5b3de29e2456bfbd2d8447b7 (diff) |
perf: Per PMU disable
Changes perf_disable() into perf_pmu_disable().
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/sh/kernel/perf_event.c')
-rw-r--r-- | arch/sh/kernel/perf_event.c | 38 |
1 files changed, 20 insertions, 18 deletions
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c index d042989ceb45..4bbe19058a58 100644 --- a/arch/sh/kernel/perf_event.c +++ b/arch/sh/kernel/perf_event.c | |||
@@ -232,7 +232,7 @@ static int sh_pmu_enable(struct perf_event *event) | |||
232 | int idx = hwc->idx; | 232 | int idx = hwc->idx; |
233 | int ret = -EAGAIN; | 233 | int ret = -EAGAIN; |
234 | 234 | ||
235 | perf_disable(); | 235 | perf_pmu_disable(event->pmu); |
236 | 236 | ||
237 | if (test_and_set_bit(idx, cpuc->used_mask)) { | 237 | if (test_and_set_bit(idx, cpuc->used_mask)) { |
238 | idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events); | 238 | idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events); |
@@ -253,7 +253,7 @@ static int sh_pmu_enable(struct perf_event *event) | |||
253 | perf_event_update_userpage(event); | 253 | perf_event_update_userpage(event); |
254 | ret = 0; | 254 | ret = 0; |
255 | out: | 255 | out: |
256 | perf_enable(); | 256 | perf_pmu_enable(event->pmu); |
257 | return ret; | 257 | return ret; |
258 | } | 258 | } |
259 | 259 | ||
@@ -285,7 +285,25 @@ static int sh_pmu_event_init(struct perf_event *event) | |||
285 | return err; | 285 | return err; |
286 | } | 286 | } |
287 | 287 | ||
288 | static void sh_pmu_pmu_enable(struct pmu *pmu) | ||
289 | { | ||
290 | if (!sh_pmu_initialized()) | ||
291 | return; | ||
292 | |||
293 | sh_pmu->enable_all(); | ||
294 | } | ||
295 | |||
296 | static void sh_pmu_pmu_disable(struct pmu *pmu) | ||
297 | { | ||
298 | if (!sh_pmu_initialized()) | ||
299 | return; | ||
300 | |||
301 | sh_pmu->disable_all(); | ||
302 | } | ||
303 | |||
288 | static struct pmu pmu = { | 304 | static struct pmu pmu = { |
305 | .pmu_enable = sh_pmu_pmu_enable, | ||
306 | .pmu_disable = sh_pmu_pmu_disable, | ||
289 | .event_init = sh_pmu_event_init, | 307 | .event_init = sh_pmu_event_init, |
290 | .enable = sh_pmu_enable, | 308 | .enable = sh_pmu_enable, |
291 | .disable = sh_pmu_disable, | 309 | .disable = sh_pmu_disable, |
@@ -316,22 +334,6 @@ sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | |||
316 | return NOTIFY_OK; | 334 | return NOTIFY_OK; |
317 | } | 335 | } |
318 | 336 | ||
319 | void hw_perf_enable(void) | ||
320 | { | ||
321 | if (!sh_pmu_initialized()) | ||
322 | return; | ||
323 | |||
324 | sh_pmu->enable_all(); | ||
325 | } | ||
326 | |||
327 | void hw_perf_disable(void) | ||
328 | { | ||
329 | if (!sh_pmu_initialized()) | ||
330 | return; | ||
331 | |||
332 | sh_pmu->disable_all(); | ||
333 | } | ||
334 | |||
335 | int __cpuinit register_sh_pmu(struct sh_pmu *pmu) | 337 | int __cpuinit register_sh_pmu(struct sh_pmu *pmu) |
336 | { | 338 | { |
337 | if (sh_pmu) | 339 | if (sh_pmu) |