diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2010-06-16 08:37:10 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-09-09 14:46:30 -0400 |
commit | a4eaf7f14675cb512d69f0c928055e73d0c6d252 (patch) | |
tree | e8a0f631fc28d4bd9becd2e9e2c71743c64ee3ec /arch/arm/kernel/perf_event.c | |
parent | fa407f35e0298d841e4088f95a7f9cf6e725c6d5 (diff) |
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
1) We disable the counter:
a) the pmu has per-counter enable bits, we flip that
b) we program a NOP event, preserving the counter state
2) We store the counter state and ignore all read/overflow events
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/arm/kernel/perf_event.c')
-rw-r--r-- | arch/arm/kernel/perf_event.c | 96 |
1 files changed, 61 insertions, 35 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 3343f3f4b973..448cfa6b3ef0 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -221,46 +221,56 @@ again: | |||
221 | } | 221 | } |
222 | 222 | ||
223 | static void | 223 | static void |
224 | armpmu_disable(struct perf_event *event) | 224 | armpmu_read(struct perf_event *event) |
225 | { | 225 | { |
226 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
227 | struct hw_perf_event *hwc = &event->hw; | 226 | struct hw_perf_event *hwc = &event->hw; |
228 | int idx = hwc->idx; | ||
229 | |||
230 | WARN_ON(idx < 0); | ||
231 | |||
232 | clear_bit(idx, cpuc->active_mask); | ||
233 | armpmu->disable(hwc, idx); | ||
234 | |||
235 | barrier(); | ||
236 | 227 | ||
237 | armpmu_event_update(event, hwc, idx); | 228 | /* Don't read disabled counters! */ |
238 | cpuc->events[idx] = NULL; | 229 | if (hwc->idx < 0) |
239 | clear_bit(idx, cpuc->used_mask); | 230 | return; |
240 | 231 | ||
241 | perf_event_update_userpage(event); | 232 | armpmu_event_update(event, hwc, hwc->idx); |
242 | } | 233 | } |
243 | 234 | ||
244 | static void | 235 | static void |
245 | armpmu_read(struct perf_event *event) | 236 | armpmu_stop(struct perf_event *event, int flags) |
246 | { | 237 | { |
247 | struct hw_perf_event *hwc = &event->hw; | 238 | struct hw_perf_event *hwc = &event->hw; |
248 | 239 | ||
249 | /* Don't read disabled counters! */ | 240 | if (!armpmu) |
250 | if (hwc->idx < 0) | ||
251 | return; | 241 | return; |
252 | 242 | ||
253 | armpmu_event_update(event, hwc, hwc->idx); | 243 | /* |
244 | * ARM pmu always has to update the counter, so ignore | ||
245 | * PERF_EF_UPDATE, see comments in armpmu_start(). | ||
246 | */ | ||
247 | if (!(hwc->state & PERF_HES_STOPPED)) { | ||
248 | armpmu->disable(hwc, hwc->idx); | ||
249 | barrier(); /* why? */ | ||
250 | armpmu_event_update(event, hwc, hwc->idx); | ||
251 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; | ||
252 | } | ||
254 | } | 253 | } |
255 | 254 | ||
256 | static void | 255 | static void |
257 | armpmu_unthrottle(struct perf_event *event) | 256 | armpmu_start(struct perf_event *event, int flags) |
258 | { | 257 | { |
259 | struct hw_perf_event *hwc = &event->hw; | 258 | struct hw_perf_event *hwc = &event->hw; |
260 | 259 | ||
260 | if (!armpmu) | ||
261 | return; | ||
262 | |||
263 | /* | ||
264 | * ARM pmu always has to reprogram the period, so ignore | ||
265 | * PERF_EF_RELOAD, see the comment below. | ||
266 | */ | ||
267 | if (flags & PERF_EF_RELOAD) | ||
268 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | ||
269 | |||
270 | hwc->state = 0; | ||
261 | /* | 271 | /* |
262 | * Set the period again. Some counters can't be stopped, so when we | 272 | * Set the period again. Some counters can't be stopped, so when we |
263 | * were throttled we simply disabled the IRQ source and the counter | 273 | * were stopped we simply disabled the IRQ source and the counter |
264 | * may have been left counting. If we don't do this step then we may | 274 | * may have been left counting. If we don't do this step then we may |
265 | * get an interrupt too soon or *way* too late if the overflow has | 275 | * get an interrupt too soon or *way* too late if the overflow has |
266 | * happened since disabling. | 276 | * happened since disabling. |
@@ -269,8 +279,25 @@ armpmu_unthrottle(struct perf_event *event) | |||
269 | armpmu->enable(hwc, hwc->idx); | 279 | armpmu->enable(hwc, hwc->idx); |
270 | } | 280 | } |
271 | 281 | ||
282 | static void | ||
283 | armpmu_del(struct perf_event *event, int flags) | ||
284 | { | ||
285 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
286 | struct hw_perf_event *hwc = &event->hw; | ||
287 | int idx = hwc->idx; | ||
288 | |||
289 | WARN_ON(idx < 0); | ||
290 | |||
291 | clear_bit(idx, cpuc->active_mask); | ||
292 | armpmu_stop(event, PERF_EF_UPDATE); | ||
293 | cpuc->events[idx] = NULL; | ||
294 | clear_bit(idx, cpuc->used_mask); | ||
295 | |||
296 | perf_event_update_userpage(event); | ||
297 | } | ||
298 | |||
272 | static int | 299 | static int |
273 | armpmu_enable(struct perf_event *event) | 300 | armpmu_add(struct perf_event *event, int flags) |
274 | { | 301 | { |
275 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 302 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
276 | struct hw_perf_event *hwc = &event->hw; | 303 | struct hw_perf_event *hwc = &event->hw; |
@@ -295,11 +322,9 @@ armpmu_enable(struct perf_event *event) | |||
295 | cpuc->events[idx] = event; | 322 | cpuc->events[idx] = event; |
296 | set_bit(idx, cpuc->active_mask); | 323 | set_bit(idx, cpuc->active_mask); |
297 | 324 | ||
298 | /* Set the period for the event. */ | 325 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; |
299 | armpmu_event_set_period(event, hwc, idx); | 326 | if (flags & PERF_EF_START) |
300 | 327 | armpmu_start(event, PERF_EF_RELOAD); | |
301 | /* Enable the event. */ | ||
302 | armpmu->enable(hwc, idx); | ||
303 | 328 | ||
304 | /* Propagate our changes to the userspace mapping. */ | 329 | /* Propagate our changes to the userspace mapping. */ |
305 | perf_event_update_userpage(event); | 330 | perf_event_update_userpage(event); |
@@ -534,7 +559,7 @@ static int armpmu_event_init(struct perf_event *event) | |||
534 | return err; | 559 | return err; |
535 | } | 560 | } |
536 | 561 | ||
537 | static void armpmu_pmu_enable(struct pmu *pmu) | 562 | static void armpmu_enable(struct pmu *pmu) |
538 | { | 563 | { |
539 | /* Enable all of the perf events on hardware. */ | 564 | /* Enable all of the perf events on hardware. */ |
540 | int idx; | 565 | int idx; |
@@ -555,20 +580,21 @@ static void armpmu_pmu_enable(struct pmu *pmu) | |||
555 | armpmu->start(); | 580 | armpmu->start(); |
556 | } | 581 | } |
557 | 582 | ||
558 | static void armpmu_pmu_disable(struct pmu *pmu) | 583 | static void armpmu_disable(struct pmu *pmu) |
559 | { | 584 | { |
560 | if (armpmu) | 585 | if (armpmu) |
561 | armpmu->stop(); | 586 | armpmu->stop(); |
562 | } | 587 | } |
563 | 588 | ||
564 | static struct pmu pmu = { | 589 | static struct pmu pmu = { |
565 | .pmu_enable = armpmu_pmu_enable, | 590 | .pmu_enable = armpmu_enable, |
566 | .pmu_disable= armpmu_pmu_disable, | 591 | .pmu_disable = armpmu_disable, |
567 | .event_init = armpmu_event_init, | 592 | .event_init = armpmu_event_init, |
568 | .enable = armpmu_enable, | 593 | .add = armpmu_add, |
569 | .disable = armpmu_disable, | 594 | .del = armpmu_del, |
570 | .unthrottle = armpmu_unthrottle, | 595 | .start = armpmu_start, |
571 | .read = armpmu_read, | 596 | .stop = armpmu_stop, |
597 | .read = armpmu_read, | ||
572 | }; | 598 | }; |
573 | 599 | ||
574 | /* | 600 | /* |