diff options
author | Will Deacon <will.deacon@arm.com> | 2011-07-19 06:57:30 -0400 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2011-08-31 05:18:01 -0400 |
commit | 05d22fde3c0b86c8395d8f12ac01fbbc524d73ca (patch) | |
tree | e0ea6c8b2a90a8dc02c7bcea9288b9176b3f7b69 /arch | |
parent | ecf5a893211c26e02b9d4cfd6ba2183473ac0203 (diff) |
ARM: perf: allow armpmu to implement mode exclusion
Modern PMUs allow for mode exclusion, so we no longer wish to return
-EPERM if it is requested.
This patch provides a hook in the armpmu structure for implementing
mode exclusion. The hw_perf_event initialisation is slightly delayed so
that the backend code can update the structure if required.
Acked-by: Jamie Iles <jamie@jamieiles.com>
Reviewed-by: Jean Pihet <j-pihet@ti.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/kernel/perf_event.c | 44 |
1 files changed, 25 insertions, 19 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index c668c91d0c0a..5d60c9c25964 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -75,6 +75,8 @@ struct arm_pmu { | |||
75 | void (*disable)(struct hw_perf_event *evt, int idx); | 75 | void (*disable)(struct hw_perf_event *evt, int idx); |
76 | int (*get_event_idx)(struct cpu_hw_events *cpuc, | 76 | int (*get_event_idx)(struct cpu_hw_events *cpuc, |
77 | struct hw_perf_event *hwc); | 77 | struct hw_perf_event *hwc); |
78 | int (*set_event_filter)(struct hw_perf_event *evt, | ||
79 | struct perf_event_attr *attr); | ||
78 | u32 (*read_counter)(int idx); | 80 | u32 (*read_counter)(int idx); |
79 | void (*write_counter)(int idx, u32 val); | 81 | void (*write_counter)(int idx, u32 val); |
80 | void (*start)(void); | 82 | void (*start)(void); |
@@ -478,6 +480,13 @@ hw_perf_event_destroy(struct perf_event *event) | |||
478 | } | 480 | } |
479 | 481 | ||
480 | static int | 482 | static int |
483 | event_requires_mode_exclusion(struct perf_event_attr *attr) | ||
484 | { | ||
485 | return attr->exclude_idle || attr->exclude_user || | ||
486 | attr->exclude_kernel || attr->exclude_hv; | ||
487 | } | ||
488 | |||
489 | static int | ||
481 | __hw_perf_event_init(struct perf_event *event) | 490 | __hw_perf_event_init(struct perf_event *event) |
482 | { | 491 | { |
483 | struct hw_perf_event *hwc = &event->hw; | 492 | struct hw_perf_event *hwc = &event->hw; |
@@ -502,34 +511,31 @@ __hw_perf_event_init(struct perf_event *event) | |||
502 | } | 511 | } |
503 | 512 | ||
504 | /* | 513 | /* |
514 | * We don't assign an index until we actually place the event onto | ||
515 | * hardware. Use -1 to signify that we haven't decided where to put it | ||
516 | * yet. For SMP systems, each core has it's own PMU so we can't do any | ||
517 | * clever allocation or constraints checking at this point. | ||
518 | */ | ||
519 | hwc->idx = -1; | ||
520 | hwc->config_base = 0; | ||
521 | hwc->config = 0; | ||
522 | hwc->event_base = 0; | ||
523 | |||
524 | /* | ||
505 | * Check whether we need to exclude the counter from certain modes. | 525 | * Check whether we need to exclude the counter from certain modes. |
506 | * The ARM performance counters are on all of the time so if someone | ||
507 | * has asked us for some excludes then we have to fail. | ||
508 | */ | 526 | */ |
509 | if (event->attr.exclude_kernel || event->attr.exclude_user || | 527 | if ((!armpmu->set_event_filter || |
510 | event->attr.exclude_hv || event->attr.exclude_idle) { | 528 | armpmu->set_event_filter(hwc, &event->attr)) && |
529 | event_requires_mode_exclusion(&event->attr)) { | ||
511 | pr_debug("ARM performance counters do not support " | 530 | pr_debug("ARM performance counters do not support " |
512 | "mode exclusion\n"); | 531 | "mode exclusion\n"); |
513 | return -EPERM; | 532 | return -EPERM; |
514 | } | 533 | } |
515 | 534 | ||
516 | /* | 535 | /* |
517 | * We don't assign an index until we actually place the event onto | 536 | * Store the event encoding into the config_base field. |
518 | * hardware. Use -1 to signify that we haven't decided where to put it | ||
519 | * yet. For SMP systems, each core has it's own PMU so we can't do any | ||
520 | * clever allocation or constraints checking at this point. | ||
521 | */ | ||
522 | hwc->idx = -1; | ||
523 | |||
524 | /* | ||
525 | * Store the event encoding into the config_base field. config and | ||
526 | * event_base are unused as the only 2 things we need to know are | ||
527 | * the event mapping and the counter to use. The counter to use is | ||
528 | * also the indx and the config_base is the event type. | ||
529 | */ | 537 | */ |
530 | hwc->config_base = (unsigned long)mapping; | 538 | hwc->config_base |= (unsigned long)mapping; |
531 | hwc->config = 0; | ||
532 | hwc->event_base = 0; | ||
533 | 539 | ||
534 | if (!hwc->sample_period) { | 540 | if (!hwc->sample_period) { |
535 | hwc->sample_period = armpmu->max_period; | 541 | hwc->sample_period = armpmu->max_period; |