diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2010-06-11 11:32:03 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-09-09 14:46:29 -0400 |
commit | 24cd7f54a0d47e1d5b3de29e2456bfbd2d8447b7 (patch) | |
tree | a37d3a4cb101e3f67635a1920f447c9e9e8d8ab2 /arch/sh/kernel/perf_event.c | |
parent | 9ed6060d286b1eb55974d09080f442f809408c42 (diff) |
perf: Reduce perf_disable() usage
Since the current perf_disable() usage is only an optimization,
remove it for now. This eases the removal of the __weak
hw_perf_enable() interface.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/sh/kernel/perf_event.c')
-rw-r--r-- | arch/sh/kernel/perf_event.c | 11 |
1 files changed, 8 insertions, 3 deletions
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c index 8cb206597e0c..d042989ceb45 100644 --- a/arch/sh/kernel/perf_event.c +++ b/arch/sh/kernel/perf_event.c | |||
@@ -230,11 +230,14 @@ static int sh_pmu_enable(struct perf_event *event) | |||
230 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 230 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
231 | struct hw_perf_event *hwc = &event->hw; | 231 | struct hw_perf_event *hwc = &event->hw; |
232 | int idx = hwc->idx; | 232 | int idx = hwc->idx; |
233 | int ret = -EAGAIN; | ||
234 | |||
235 | perf_disable(); | ||
233 | 236 | ||
234 | if (test_and_set_bit(idx, cpuc->used_mask)) { | 237 | if (test_and_set_bit(idx, cpuc->used_mask)) { |
235 | idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events); | 238 | idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events); |
236 | if (idx == sh_pmu->num_events) | 239 | if (idx == sh_pmu->num_events) |
237 | return -EAGAIN; | 240 | goto out; |
238 | 241 | ||
239 | set_bit(idx, cpuc->used_mask); | 242 | set_bit(idx, cpuc->used_mask); |
240 | hwc->idx = idx; | 243 | hwc->idx = idx; |
@@ -248,8 +251,10 @@ static int sh_pmu_enable(struct perf_event *event) | |||
248 | sh_pmu->enable(hwc, idx); | 251 | sh_pmu->enable(hwc, idx); |
249 | 252 | ||
250 | perf_event_update_userpage(event); | 253 | perf_event_update_userpage(event); |
251 | 254 | ret = 0; | |
252 | return 0; | 255 | out: |
256 | perf_enable(); | ||
257 | return ret; | ||
253 | } | 258 | } |
254 | 259 | ||
255 | static void sh_pmu_read(struct perf_event *event) | 260 | static void sh_pmu_read(struct perf_event *event) |