diff options
author | Stephane Eranian <eranian@google.com> | 2011-01-03 11:20:01 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-01-07 09:08:51 -0500 |
commit | 5632ab12e9e1fcd7e94058567e181d8f35e83798 (patch) | |
tree | 70b077bd489e889bb049ded097119ea382a254f3 /kernel/perf_event.c | |
parent | 0b3fcf178deefd7b64154c2c0760a2c63df0b74f (diff) |
perf_events: Generalize use of event_filter_match()
Replace all occurrences of:
event->cpu != -1 && event->cpu == smp_processor_id()
by a call to:
event_filter_match(event)
This makes the code more consistent and will make the cgroup
patch smaller.
Signed-off-by: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <4d220593.2308e30a.48c5.ffff8ae9@mx.google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r-- | kernel/perf_event.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 2c14e3afdf0d..dcdb19ed83a6 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -949,7 +949,7 @@ static void __perf_install_in_context(void *info) | |||
949 | 949 | ||
950 | add_event_to_ctx(event, ctx); | 950 | add_event_to_ctx(event, ctx); |
951 | 951 | ||
952 | if (event->cpu != -1 && event->cpu != smp_processor_id()) | 952 | if (!event_filter_match(event)) |
953 | goto unlock; | 953 | goto unlock; |
954 | 954 | ||
955 | /* | 955 | /* |
@@ -1094,7 +1094,7 @@ static void __perf_event_enable(void *info) | |||
1094 | goto unlock; | 1094 | goto unlock; |
1095 | __perf_event_mark_enabled(event, ctx); | 1095 | __perf_event_mark_enabled(event, ctx); |
1096 | 1096 | ||
1097 | if (event->cpu != -1 && event->cpu != smp_processor_id()) | 1097 | if (!event_filter_match(event)) |
1098 | goto unlock; | 1098 | goto unlock; |
1099 | 1099 | ||
1100 | /* | 1100 | /* |
@@ -1441,7 +1441,7 @@ ctx_pinned_sched_in(struct perf_event_context *ctx, | |||
1441 | list_for_each_entry(event, &ctx->pinned_groups, group_entry) { | 1441 | list_for_each_entry(event, &ctx->pinned_groups, group_entry) { |
1442 | if (event->state <= PERF_EVENT_STATE_OFF) | 1442 | if (event->state <= PERF_EVENT_STATE_OFF) |
1443 | continue; | 1443 | continue; |
1444 | if (event->cpu != -1 && event->cpu != smp_processor_id()) | 1444 | if (!event_filter_match(event)) |
1445 | continue; | 1445 | continue; |
1446 | 1446 | ||
1447 | if (group_can_go_on(event, cpuctx, 1)) | 1447 | if (group_can_go_on(event, cpuctx, 1)) |
@@ -1473,7 +1473,7 @@ ctx_flexible_sched_in(struct perf_event_context *ctx, | |||
1473 | * Listen to the 'cpu' scheduling filter constraint | 1473 | * Listen to the 'cpu' scheduling filter constraint |
1474 | * of events: | 1474 | * of events: |
1475 | */ | 1475 | */ |
1476 | if (event->cpu != -1 && event->cpu != smp_processor_id()) | 1476 | if (!event_filter_match(event)) |
1477 | continue; | 1477 | continue; |
1478 | 1478 | ||
1479 | if (group_can_go_on(event, cpuctx, can_add_hw)) { | 1479 | if (group_can_go_on(event, cpuctx, can_add_hw)) { |
@@ -1700,7 +1700,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period) | |||
1700 | if (event->state != PERF_EVENT_STATE_ACTIVE) | 1700 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
1701 | continue; | 1701 | continue; |
1702 | 1702 | ||
1703 | if (event->cpu != -1 && event->cpu != smp_processor_id()) | 1703 | if (!event_filter_match(event)) |
1704 | continue; | 1704 | continue; |
1705 | 1705 | ||
1706 | hwc = &event->hw; | 1706 | hwc = &event->hw; |
@@ -3899,7 +3899,7 @@ static int perf_event_task_match(struct perf_event *event) | |||
3899 | if (event->state < PERF_EVENT_STATE_INACTIVE) | 3899 | if (event->state < PERF_EVENT_STATE_INACTIVE) |
3900 | return 0; | 3900 | return 0; |
3901 | 3901 | ||
3902 | if (event->cpu != -1 && event->cpu != smp_processor_id()) | 3902 | if (!event_filter_match(event)) |
3903 | return 0; | 3903 | return 0; |
3904 | 3904 | ||
3905 | if (event->attr.comm || event->attr.mmap || | 3905 | if (event->attr.comm || event->attr.mmap || |
@@ -4036,7 +4036,7 @@ static int perf_event_comm_match(struct perf_event *event) | |||
4036 | if (event->state < PERF_EVENT_STATE_INACTIVE) | 4036 | if (event->state < PERF_EVENT_STATE_INACTIVE) |
4037 | return 0; | 4037 | return 0; |
4038 | 4038 | ||
4039 | if (event->cpu != -1 && event->cpu != smp_processor_id()) | 4039 | if (!event_filter_match(event)) |
4040 | return 0; | 4040 | return 0; |
4041 | 4041 | ||
4042 | if (event->attr.comm) | 4042 | if (event->attr.comm) |
@@ -4184,7 +4184,7 @@ static int perf_event_mmap_match(struct perf_event *event, | |||
4184 | if (event->state < PERF_EVENT_STATE_INACTIVE) | 4184 | if (event->state < PERF_EVENT_STATE_INACTIVE) |
4185 | return 0; | 4185 | return 0; |
4186 | 4186 | ||
4187 | if (event->cpu != -1 && event->cpu != smp_processor_id()) | 4187 | if (!event_filter_match(event)) |
4188 | return 0; | 4188 | return 0; |
4189 | 4189 | ||
4190 | if ((!executable && event->attr.mmap_data) || | 4190 | if ((!executable && event->attr.mmap_data) || |