diff options
author | Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> | 2009-09-25 01:51:17 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-10-01 03:30:44 -0400 |
commit | 8c9ed8e14c342ec5e7f27e7e498f62409a10eb29 (patch) | |
tree | 9f4765f480a567a3a6139083bf334ae2f83f9055 /kernel/perf_event.c | |
parent | 39a90a8ef17fe6fbf4b45e46e3c10d3b8b4a3dea (diff) |
perf_event: Fix event group handling in __perf_event_sched_*()
Paul Mackerras says:
"Actually, looking at this more closely, it has to be a group
leader anyway since it's at the top level of ctx->group_list. In
fact I see four places where we do:
list_for_each_entry(event, &ctx->group_list, group_entry) {
if (event == event->group_leader)
...
or the equivalent, three of which appear to have been introduced
by afedadf2 ("perf_counter: Optimize sched in/out of counters")
back in May by Peter Z.
As far as I can see the if () is superfluous in each case (a
singleton event will be a group of 1 and will have its
group_leader pointing to itself)."
[ See: http://marc.info/?l=linux-kernel&m=125361238901442&w=2 ]
And Peter Zijlstra points out this is a bugfix:
"The intent was to call event_sched_{in,out}() for single event
groups because that's cheaper than group_sched_{in,out}(),
however..
- as you noticed, I got the condition wrong, it should have read:
list_empty(&event->sibling_list)
- it failed to call group_can_go_on() which deals with ->exclusive.
- it also doesn't call hw_perf_group_sched_in() which might break
power."
[ See: http://marc.info/?l=linux-kernel&m=125369523318583&w=2 ]
Changelog v1->v2:
- Fix the title name according to Peter Zijlstra's suggestion
- Remove the comments and WARN_ON_ONCE() as Peter Zijlstra's
suggestion
Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <4ABC5A55.7000208@cn.fujitsu.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r-- | kernel/perf_event.c | 30 |
1 files changed, 8 insertions, 22 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 0f86feb6db0c..e50543db642a 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -1030,14 +1030,10 @@ void __perf_event_sched_out(struct perf_event_context *ctx, | |||
1030 | update_context_time(ctx); | 1030 | update_context_time(ctx); |
1031 | 1031 | ||
1032 | perf_disable(); | 1032 | perf_disable(); |
1033 | if (ctx->nr_active) { | 1033 | if (ctx->nr_active) |
1034 | list_for_each_entry(event, &ctx->group_list, group_entry) { | 1034 | list_for_each_entry(event, &ctx->group_list, group_entry) |
1035 | if (event != event->group_leader) | 1035 | group_sched_out(event, cpuctx, ctx); |
1036 | event_sched_out(event, cpuctx, ctx); | 1036 | |
1037 | else | ||
1038 | group_sched_out(event, cpuctx, ctx); | ||
1039 | } | ||
1040 | } | ||
1041 | perf_enable(); | 1037 | perf_enable(); |
1042 | out: | 1038 | out: |
1043 | spin_unlock(&ctx->lock); | 1039 | spin_unlock(&ctx->lock); |
@@ -1258,12 +1254,8 @@ __perf_event_sched_in(struct perf_event_context *ctx, | |||
1258 | if (event->cpu != -1 && event->cpu != cpu) | 1254 | if (event->cpu != -1 && event->cpu != cpu) |
1259 | continue; | 1255 | continue; |
1260 | 1256 | ||
1261 | if (event != event->group_leader) | 1257 | if (group_can_go_on(event, cpuctx, 1)) |
1262 | event_sched_in(event, cpuctx, ctx, cpu); | 1258 | group_sched_in(event, cpuctx, ctx, cpu); |
1263 | else { | ||
1264 | if (group_can_go_on(event, cpuctx, 1)) | ||
1265 | group_sched_in(event, cpuctx, ctx, cpu); | ||
1266 | } | ||
1267 | 1259 | ||
1268 | /* | 1260 | /* |
1269 | * If this pinned group hasn't been scheduled, | 1261 | * If this pinned group hasn't been scheduled, |
@@ -1291,15 +1283,9 @@ __perf_event_sched_in(struct perf_event_context *ctx, | |||
1291 | if (event->cpu != -1 && event->cpu != cpu) | 1283 | if (event->cpu != -1 && event->cpu != cpu) |
1292 | continue; | 1284 | continue; |
1293 | 1285 | ||
1294 | if (event != event->group_leader) { | 1286 | if (group_can_go_on(event, cpuctx, can_add_hw)) |
1295 | if (event_sched_in(event, cpuctx, ctx, cpu)) | 1287 | if (group_sched_in(event, cpuctx, ctx, cpu)) |
1296 | can_add_hw = 0; | 1288 | can_add_hw = 0; |
1297 | } else { | ||
1298 | if (group_can_go_on(event, cpuctx, can_add_hw)) { | ||
1299 | if (group_sched_in(event, cpuctx, ctx, cpu)) | ||
1300 | can_add_hw = 0; | ||
1301 | } | ||
1302 | } | ||
1303 | } | 1289 | } |
1304 | perf_enable(); | 1290 | perf_enable(); |
1305 | out: | 1291 | out: |