diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-09-07 16:00:11 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-09-07 16:00:11 -0400 |
| commit | b0fb422281c8c09c8dcf03ca44ec343f0ff9df0b (patch) | |
| tree | 85866700657ec97d8eae343146dd61519771f699 /kernel | |
| parent | 54d6d5374491387eafe7f6e05e065232071cc4f0 (diff) | |
| parent | 20afc60f892d285fde179ead4b24e6a7938c2f1b (diff) | |
Merge branch 'perf-fixes-for-linus' of git://tesla.tglx.de/git/linux-2.6-tip
* 'perf-fixes-for-linus' of git://tesla.tglx.de/git/linux-2.6-tip:
x86, perf: Check that current->mm is alive before getting user callchain
perf_event: Fix broken calc_timer_values()
perf events: Fix slow and broken cgroup context switch code
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/events/core.c | 67 | ||||
| -rw-r--r-- | kernel/sched.c | 2 |
2 files changed, 56 insertions, 13 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index b8785e26ee1c..0f857782d06f 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
| @@ -399,14 +399,54 @@ void perf_cgroup_switch(struct task_struct *task, int mode) | |||
| 399 | local_irq_restore(flags); | 399 | local_irq_restore(flags); |
| 400 | } | 400 | } |
| 401 | 401 | ||
| 402 | static inline void perf_cgroup_sched_out(struct task_struct *task) | 402 | static inline void perf_cgroup_sched_out(struct task_struct *task, |
| 403 | struct task_struct *next) | ||
| 403 | { | 404 | { |
| 404 | perf_cgroup_switch(task, PERF_CGROUP_SWOUT); | 405 | struct perf_cgroup *cgrp1; |
| 406 | struct perf_cgroup *cgrp2 = NULL; | ||
| 407 | |||
| 408 | /* | ||
| 409 | * we come here when we know perf_cgroup_events > 0 | ||
| 410 | */ | ||
| 411 | cgrp1 = perf_cgroup_from_task(task); | ||
| 412 | |||
| 413 | /* | ||
| 414 | * next is NULL when called from perf_event_enable_on_exec() | ||
| 415 | * that will systematically cause a cgroup_switch() | ||
| 416 | */ | ||
| 417 | if (next) | ||
| 418 | cgrp2 = perf_cgroup_from_task(next); | ||
| 419 | |||
| 420 | /* | ||
| 421 | * only schedule out current cgroup events if we know | ||
| 422 | * that we are switching to a different cgroup. Otherwise, | ||
| 423 | * do no touch the cgroup events. | ||
| 424 | */ | ||
| 425 | if (cgrp1 != cgrp2) | ||
| 426 | perf_cgroup_switch(task, PERF_CGROUP_SWOUT); | ||
| 405 | } | 427 | } |
| 406 | 428 | ||
| 407 | static inline void perf_cgroup_sched_in(struct task_struct *task) | 429 | static inline void perf_cgroup_sched_in(struct task_struct *prev, |
| 430 | struct task_struct *task) | ||
| 408 | { | 431 | { |
| 409 | perf_cgroup_switch(task, PERF_CGROUP_SWIN); | 432 | struct perf_cgroup *cgrp1; |
| 433 | struct perf_cgroup *cgrp2 = NULL; | ||
| 434 | |||
| 435 | /* | ||
| 436 | * we come here when we know perf_cgroup_events > 0 | ||
| 437 | */ | ||
| 438 | cgrp1 = perf_cgroup_from_task(task); | ||
| 439 | |||
| 440 | /* prev can never be NULL */ | ||
| 441 | cgrp2 = perf_cgroup_from_task(prev); | ||
| 442 | |||
| 443 | /* | ||
| 444 | * only need to schedule in cgroup events if we are changing | ||
| 445 | * cgroup during ctxsw. Cgroup events were not scheduled | ||
| 446 | * out of ctxsw out if that was not the case. | ||
| 447 | */ | ||
| 448 | if (cgrp1 != cgrp2) | ||
| 449 | perf_cgroup_switch(task, PERF_CGROUP_SWIN); | ||
| 410 | } | 450 | } |
| 411 | 451 | ||
| 412 | static inline int perf_cgroup_connect(int fd, struct perf_event *event, | 452 | static inline int perf_cgroup_connect(int fd, struct perf_event *event, |
| @@ -518,11 +558,13 @@ static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) | |||
| 518 | { | 558 | { |
| 519 | } | 559 | } |
| 520 | 560 | ||
| 521 | static inline void perf_cgroup_sched_out(struct task_struct *task) | 561 | static inline void perf_cgroup_sched_out(struct task_struct *task, |
| 562 | struct task_struct *next) | ||
| 522 | { | 563 | { |
| 523 | } | 564 | } |
| 524 | 565 | ||
| 525 | static inline void perf_cgroup_sched_in(struct task_struct *task) | 566 | static inline void perf_cgroup_sched_in(struct task_struct *prev, |
| 567 | struct task_struct *task) | ||
| 526 | { | 568 | { |
| 527 | } | 569 | } |
| 528 | 570 | ||
| @@ -1988,7 +2030,7 @@ void __perf_event_task_sched_out(struct task_struct *task, | |||
| 1988 | * cgroup event are system-wide mode only | 2030 | * cgroup event are system-wide mode only |
| 1989 | */ | 2031 | */ |
| 1990 | if (atomic_read(&__get_cpu_var(perf_cgroup_events))) | 2032 | if (atomic_read(&__get_cpu_var(perf_cgroup_events))) |
| 1991 | perf_cgroup_sched_out(task); | 2033 | perf_cgroup_sched_out(task, next); |
| 1992 | } | 2034 | } |
| 1993 | 2035 | ||
| 1994 | static void task_ctx_sched_out(struct perf_event_context *ctx) | 2036 | static void task_ctx_sched_out(struct perf_event_context *ctx) |
| @@ -2153,7 +2195,8 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx, | |||
| 2153 | * accessing the event control register. If a NMI hits, then it will | 2195 | * accessing the event control register. If a NMI hits, then it will |
| 2154 | * keep the event running. | 2196 | * keep the event running. |
| 2155 | */ | 2197 | */ |
| 2156 | void __perf_event_task_sched_in(struct task_struct *task) | 2198 | void __perf_event_task_sched_in(struct task_struct *prev, |
| 2199 | struct task_struct *task) | ||
| 2157 | { | 2200 | { |
| 2158 | struct perf_event_context *ctx; | 2201 | struct perf_event_context *ctx; |
| 2159 | int ctxn; | 2202 | int ctxn; |
| @@ -2171,7 +2214,7 @@ void __perf_event_task_sched_in(struct task_struct *task) | |||
| 2171 | * cgroup event are system-wide mode only | 2214 | * cgroup event are system-wide mode only |
| 2172 | */ | 2215 | */ |
| 2173 | if (atomic_read(&__get_cpu_var(perf_cgroup_events))) | 2216 | if (atomic_read(&__get_cpu_var(perf_cgroup_events))) |
| 2174 | perf_cgroup_sched_in(task); | 2217 | perf_cgroup_sched_in(prev, task); |
| 2175 | } | 2218 | } |
| 2176 | 2219 | ||
| 2177 | static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) | 2220 | static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) |
| @@ -2427,7 +2470,7 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx) | |||
| 2427 | * ctxswin cgroup events which are already scheduled | 2470 | * ctxswin cgroup events which are already scheduled |
| 2428 | * in. | 2471 | * in. |
| 2429 | */ | 2472 | */ |
| 2430 | perf_cgroup_sched_out(current); | 2473 | perf_cgroup_sched_out(current, NULL); |
| 2431 | 2474 | ||
| 2432 | raw_spin_lock(&ctx->lock); | 2475 | raw_spin_lock(&ctx->lock); |
| 2433 | task_ctx_sched_out(ctx); | 2476 | task_ctx_sched_out(ctx); |
| @@ -3353,8 +3396,8 @@ static int perf_event_index(struct perf_event *event) | |||
| 3353 | } | 3396 | } |
| 3354 | 3397 | ||
| 3355 | static void calc_timer_values(struct perf_event *event, | 3398 | static void calc_timer_values(struct perf_event *event, |
| 3356 | u64 *running, | 3399 | u64 *enabled, |
| 3357 | u64 *enabled) | 3400 | u64 *running) |
| 3358 | { | 3401 | { |
| 3359 | u64 now, ctx_time; | 3402 | u64 now, ctx_time; |
| 3360 | 3403 | ||
diff --git a/kernel/sched.c b/kernel/sched.c index ccacdbdecf45..0408cdc6d572 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -3065,7 +3065,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) | |||
| 3065 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | 3065 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW |
| 3066 | local_irq_disable(); | 3066 | local_irq_disable(); |
| 3067 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ | 3067 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ |
| 3068 | perf_event_task_sched_in(current); | 3068 | perf_event_task_sched_in(prev, current); |
| 3069 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | 3069 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW |
| 3070 | local_irq_enable(); | 3070 | local_irq_enable(); |
| 3071 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ | 3071 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ |
