diff options
author | Ingo Molnar <mingo@elte.hu> | 2011-09-26 06:53:42 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-09-26 06:54:28 -0400 |
commit | ed3982cf3748b657ffb79d9d1c2e4a562661db2d (patch) | |
tree | 6e3654f460e23aa1b1512896aa3f03886a69be1b /kernel/events | |
parent | cba9bd22a5f8f857534b9a7f3fb3cafa0ac5fb75 (diff) | |
parent | d93dc5c4478c1fd5de85a3e8aece9aad7bbae044 (diff) |
Merge commit 'v3.1-rc7' into perf/core
Merge reason: Pick up the latest upstream fixes.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/events')
-rw-r--r-- | kernel/events/core.c | 67 |
1 files changed, 55 insertions, 12 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index adc3ef37b7e..d1a1bee3522 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -400,14 +400,54 @@ void perf_cgroup_switch(struct task_struct *task, int mode) | |||
400 | local_irq_restore(flags); | 400 | local_irq_restore(flags); |
401 | } | 401 | } |
402 | 402 | ||
403 | static inline void perf_cgroup_sched_out(struct task_struct *task) | 403 | static inline void perf_cgroup_sched_out(struct task_struct *task, |
404 | struct task_struct *next) | ||
404 | { | 405 | { |
405 | perf_cgroup_switch(task, PERF_CGROUP_SWOUT); | 406 | struct perf_cgroup *cgrp1; |
407 | struct perf_cgroup *cgrp2 = NULL; | ||
408 | |||
409 | /* | ||
410 | * we come here when we know perf_cgroup_events > 0 | ||
411 | */ | ||
412 | cgrp1 = perf_cgroup_from_task(task); | ||
413 | |||
414 | /* | ||
415 | * next is NULL when called from perf_event_enable_on_exec() | ||
416 | * that will systematically cause a cgroup_switch() | ||
417 | */ | ||
418 | if (next) | ||
419 | cgrp2 = perf_cgroup_from_task(next); | ||
420 | |||
421 | /* | ||
422 | * only schedule out current cgroup events if we know | ||
423 | * that we are switching to a different cgroup. Otherwise, | ||
424 | * do no touch the cgroup events. | ||
425 | */ | ||
426 | if (cgrp1 != cgrp2) | ||
427 | perf_cgroup_switch(task, PERF_CGROUP_SWOUT); | ||
406 | } | 428 | } |
407 | 429 | ||
408 | static inline void perf_cgroup_sched_in(struct task_struct *task) | 430 | static inline void perf_cgroup_sched_in(struct task_struct *prev, |
431 | struct task_struct *task) | ||
409 | { | 432 | { |
410 | perf_cgroup_switch(task, PERF_CGROUP_SWIN); | 433 | struct perf_cgroup *cgrp1; |
434 | struct perf_cgroup *cgrp2 = NULL; | ||
435 | |||
436 | /* | ||
437 | * we come here when we know perf_cgroup_events > 0 | ||
438 | */ | ||
439 | cgrp1 = perf_cgroup_from_task(task); | ||
440 | |||
441 | /* prev can never be NULL */ | ||
442 | cgrp2 = perf_cgroup_from_task(prev); | ||
443 | |||
444 | /* | ||
445 | * only need to schedule in cgroup events if we are changing | ||
446 | * cgroup during ctxsw. Cgroup events were not scheduled | ||
447 | * out of ctxsw out if that was not the case. | ||
448 | */ | ||
449 | if (cgrp1 != cgrp2) | ||
450 | perf_cgroup_switch(task, PERF_CGROUP_SWIN); | ||
411 | } | 451 | } |
412 | 452 | ||
413 | static inline int perf_cgroup_connect(int fd, struct perf_event *event, | 453 | static inline int perf_cgroup_connect(int fd, struct perf_event *event, |
@@ -519,11 +559,13 @@ static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) | |||
519 | { | 559 | { |
520 | } | 560 | } |
521 | 561 | ||
522 | static inline void perf_cgroup_sched_out(struct task_struct *task) | 562 | static inline void perf_cgroup_sched_out(struct task_struct *task, |
563 | struct task_struct *next) | ||
523 | { | 564 | { |
524 | } | 565 | } |
525 | 566 | ||
526 | static inline void perf_cgroup_sched_in(struct task_struct *task) | 567 | static inline void perf_cgroup_sched_in(struct task_struct *prev, |
568 | struct task_struct *task) | ||
527 | { | 569 | { |
528 | } | 570 | } |
529 | 571 | ||
@@ -1989,7 +2031,7 @@ void __perf_event_task_sched_out(struct task_struct *task, | |||
1989 | * cgroup event are system-wide mode only | 2031 | * cgroup event are system-wide mode only |
1990 | */ | 2032 | */ |
1991 | if (atomic_read(&__get_cpu_var(perf_cgroup_events))) | 2033 | if (atomic_read(&__get_cpu_var(perf_cgroup_events))) |
1992 | perf_cgroup_sched_out(task); | 2034 | perf_cgroup_sched_out(task, next); |
1993 | } | 2035 | } |
1994 | 2036 | ||
1995 | static void task_ctx_sched_out(struct perf_event_context *ctx) | 2037 | static void task_ctx_sched_out(struct perf_event_context *ctx) |
@@ -2154,7 +2196,8 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx, | |||
2154 | * accessing the event control register. If a NMI hits, then it will | 2196 | * accessing the event control register. If a NMI hits, then it will |
2155 | * keep the event running. | 2197 | * keep the event running. |
2156 | */ | 2198 | */ |
2157 | void __perf_event_task_sched_in(struct task_struct *task) | 2199 | void __perf_event_task_sched_in(struct task_struct *prev, |
2200 | struct task_struct *task) | ||
2158 | { | 2201 | { |
2159 | struct perf_event_context *ctx; | 2202 | struct perf_event_context *ctx; |
2160 | int ctxn; | 2203 | int ctxn; |
@@ -2172,7 +2215,7 @@ void __perf_event_task_sched_in(struct task_struct *task) | |||
2172 | * cgroup event are system-wide mode only | 2215 | * cgroup event are system-wide mode only |
2173 | */ | 2216 | */ |
2174 | if (atomic_read(&__get_cpu_var(perf_cgroup_events))) | 2217 | if (atomic_read(&__get_cpu_var(perf_cgroup_events))) |
2175 | perf_cgroup_sched_in(task); | 2218 | perf_cgroup_sched_in(prev, task); |
2176 | } | 2219 | } |
2177 | 2220 | ||
2178 | static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) | 2221 | static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) |
@@ -2428,7 +2471,7 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx) | |||
2428 | * ctxswin cgroup events which are already scheduled | 2471 | * ctxswin cgroup events which are already scheduled |
2429 | * in. | 2472 | * in. |
2430 | */ | 2473 | */ |
2431 | perf_cgroup_sched_out(current); | 2474 | perf_cgroup_sched_out(current, NULL); |
2432 | 2475 | ||
2433 | raw_spin_lock(&ctx->lock); | 2476 | raw_spin_lock(&ctx->lock); |
2434 | task_ctx_sched_out(ctx); | 2477 | task_ctx_sched_out(ctx); |
@@ -3354,8 +3397,8 @@ static int perf_event_index(struct perf_event *event) | |||
3354 | } | 3397 | } |
3355 | 3398 | ||
3356 | static void calc_timer_values(struct perf_event *event, | 3399 | static void calc_timer_values(struct perf_event *event, |
3357 | u64 *running, | 3400 | u64 *enabled, |
3358 | u64 *enabled) | 3401 | u64 *running) |
3359 | { | 3402 | { |
3360 | u64 now, ctx_time; | 3403 | u64 now, ctx_time; |
3361 | 3404 | ||