diff options
Diffstat (limited to 'kernel/events')
-rw-r--r-- | kernel/events/core.c | 63 |
1 files changed, 53 insertions, 10 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index b8785e26ee1c..45847fbb599a 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -399,14 +399,54 @@ void perf_cgroup_switch(struct task_struct *task, int mode) | |||
399 | local_irq_restore(flags); | 399 | local_irq_restore(flags); |
400 | } | 400 | } |
401 | 401 | ||
402 | static inline void perf_cgroup_sched_out(struct task_struct *task) | 402 | static inline void perf_cgroup_sched_out(struct task_struct *task, |
403 | struct task_struct *next) | ||
403 | { | 404 | { |
404 | perf_cgroup_switch(task, PERF_CGROUP_SWOUT); | 405 | struct perf_cgroup *cgrp1; |
406 | struct perf_cgroup *cgrp2 = NULL; | ||
407 | |||
408 | /* | ||
409 | * we come here when we know perf_cgroup_events > 0 | ||
410 | */ | ||
411 | cgrp1 = perf_cgroup_from_task(task); | ||
412 | |||
413 | /* | ||
414 | * next is NULL when called from perf_event_enable_on_exec() | ||
415 | * that will systematically cause a cgroup_switch() | ||
416 | */ | ||
417 | if (next) | ||
418 | cgrp2 = perf_cgroup_from_task(next); | ||
419 | |||
420 | /* | ||
421 | * only schedule out current cgroup events if we know | ||
422 | * that we are switching to a different cgroup. Otherwise, | ||
423 | * do no touch the cgroup events. | ||
424 | */ | ||
425 | if (cgrp1 != cgrp2) | ||
426 | perf_cgroup_switch(task, PERF_CGROUP_SWOUT); | ||
405 | } | 427 | } |
406 | 428 | ||
407 | static inline void perf_cgroup_sched_in(struct task_struct *task) | 429 | static inline void perf_cgroup_sched_in(struct task_struct *prev, |
430 | struct task_struct *task) | ||
408 | { | 431 | { |
409 | perf_cgroup_switch(task, PERF_CGROUP_SWIN); | 432 | struct perf_cgroup *cgrp1; |
433 | struct perf_cgroup *cgrp2 = NULL; | ||
434 | |||
435 | /* | ||
436 | * we come here when we know perf_cgroup_events > 0 | ||
437 | */ | ||
438 | cgrp1 = perf_cgroup_from_task(task); | ||
439 | |||
440 | /* prev can never be NULL */ | ||
441 | cgrp2 = perf_cgroup_from_task(prev); | ||
442 | |||
443 | /* | ||
444 | * only need to schedule in cgroup events if we are changing | ||
445 | * cgroup during ctxsw. Cgroup events were not scheduled | ||
446 | * out of ctxsw out if that was not the case. | ||
447 | */ | ||
448 | if (cgrp1 != cgrp2) | ||
449 | perf_cgroup_switch(task, PERF_CGROUP_SWIN); | ||
410 | } | 450 | } |
411 | 451 | ||
412 | static inline int perf_cgroup_connect(int fd, struct perf_event *event, | 452 | static inline int perf_cgroup_connect(int fd, struct perf_event *event, |
@@ -518,11 +558,13 @@ static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) | |||
518 | { | 558 | { |
519 | } | 559 | } |
520 | 560 | ||
521 | static inline void perf_cgroup_sched_out(struct task_struct *task) | 561 | static inline void perf_cgroup_sched_out(struct task_struct *task, |
562 | struct task_struct *next) | ||
522 | { | 563 | { |
523 | } | 564 | } |
524 | 565 | ||
525 | static inline void perf_cgroup_sched_in(struct task_struct *task) | 566 | static inline void perf_cgroup_sched_in(struct task_struct *prev, |
567 | struct task_struct *task) | ||
526 | { | 568 | { |
527 | } | 569 | } |
528 | 570 | ||
@@ -1988,7 +2030,7 @@ void __perf_event_task_sched_out(struct task_struct *task, | |||
1988 | * cgroup event are system-wide mode only | 2030 | * cgroup event are system-wide mode only |
1989 | */ | 2031 | */ |
1990 | if (atomic_read(&__get_cpu_var(perf_cgroup_events))) | 2032 | if (atomic_read(&__get_cpu_var(perf_cgroup_events))) |
1991 | perf_cgroup_sched_out(task); | 2033 | perf_cgroup_sched_out(task, next); |
1992 | } | 2034 | } |
1993 | 2035 | ||
1994 | static void task_ctx_sched_out(struct perf_event_context *ctx) | 2036 | static void task_ctx_sched_out(struct perf_event_context *ctx) |
@@ -2153,7 +2195,8 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx, | |||
2153 | * accessing the event control register. If a NMI hits, then it will | 2195 | * accessing the event control register. If a NMI hits, then it will |
2154 | * keep the event running. | 2196 | * keep the event running. |
2155 | */ | 2197 | */ |
2156 | void __perf_event_task_sched_in(struct task_struct *task) | 2198 | void __perf_event_task_sched_in(struct task_struct *prev, |
2199 | struct task_struct *task) | ||
2157 | { | 2200 | { |
2158 | struct perf_event_context *ctx; | 2201 | struct perf_event_context *ctx; |
2159 | int ctxn; | 2202 | int ctxn; |
@@ -2171,7 +2214,7 @@ void __perf_event_task_sched_in(struct task_struct *task) | |||
2171 | * cgroup event are system-wide mode only | 2214 | * cgroup event are system-wide mode only |
2172 | */ | 2215 | */ |
2173 | if (atomic_read(&__get_cpu_var(perf_cgroup_events))) | 2216 | if (atomic_read(&__get_cpu_var(perf_cgroup_events))) |
2174 | perf_cgroup_sched_in(task); | 2217 | perf_cgroup_sched_in(prev, task); |
2175 | } | 2218 | } |
2176 | 2219 | ||
2177 | static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) | 2220 | static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) |
@@ -2427,7 +2470,7 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx) | |||
2427 | * ctxswin cgroup events which are already scheduled | 2470 | * ctxswin cgroup events which are already scheduled |
2428 | * in. | 2471 | * in. |
2429 | */ | 2472 | */ |
2430 | perf_cgroup_sched_out(current); | 2473 | perf_cgroup_sched_out(current, NULL); |
2431 | 2474 | ||
2432 | raw_spin_lock(&ctx->lock); | 2475 | raw_spin_lock(&ctx->lock); |
2433 | task_ctx_sched_out(ctx); | 2476 | task_ctx_sched_out(ctx); |