diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-09-07 16:00:11 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-09-07 16:00:11 -0400 |
commit | b0fb422281c8c09c8dcf03ca44ec343f0ff9df0b (patch) | |
tree | 85866700657ec97d8eae343146dd61519771f699 | |
parent | 54d6d5374491387eafe7f6e05e065232071cc4f0 (diff) | |
parent | 20afc60f892d285fde179ead4b24e6a7938c2f1b (diff) |
Merge branch 'perf-fixes-for-linus' of git://tesla.tglx.de/git/linux-2.6-tip
* 'perf-fixes-for-linus' of git://tesla.tglx.de/git/linux-2.6-tip:
x86, perf: Check that current->mm is alive before getting user callchain
perf_event: Fix broken calc_timer_values()
perf events: Fix slow and broken cgroup context switch code
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 3 | ||||
-rw-r--r-- | include/linux/perf_event.h | 24 | ||||
-rw-r--r-- | kernel/events/core.c | 67 | ||||
-rw-r--r-- | kernel/sched.c | 2 |
4 files changed, 74 insertions, 22 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 4ee3abf20ed6..cfa62ec090ec 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -1900,6 +1900,9 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) | |||
1900 | 1900 | ||
1901 | perf_callchain_store(entry, regs->ip); | 1901 | perf_callchain_store(entry, regs->ip); |
1902 | 1902 | ||
1903 | if (!current->mm) | ||
1904 | return; | ||
1905 | |||
1903 | if (perf_callchain_user32(regs, entry)) | 1906 | if (perf_callchain_user32(regs, entry)) |
1904 | return; | 1907 | return; |
1905 | 1908 | ||
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 245bafdafd5e..c816075c01ce 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -944,8 +944,10 @@ extern void perf_pmu_unregister(struct pmu *pmu); | |||
944 | 944 | ||
945 | extern int perf_num_counters(void); | 945 | extern int perf_num_counters(void); |
946 | extern const char *perf_pmu_name(void); | 946 | extern const char *perf_pmu_name(void); |
947 | extern void __perf_event_task_sched_in(struct task_struct *task); | 947 | extern void __perf_event_task_sched_in(struct task_struct *prev, |
948 | extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); | 948 | struct task_struct *task); |
949 | extern void __perf_event_task_sched_out(struct task_struct *prev, | ||
950 | struct task_struct *next); | ||
949 | extern int perf_event_init_task(struct task_struct *child); | 951 | extern int perf_event_init_task(struct task_struct *child); |
950 | extern void perf_event_exit_task(struct task_struct *child); | 952 | extern void perf_event_exit_task(struct task_struct *child); |
951 | extern void perf_event_free_task(struct task_struct *task); | 953 | extern void perf_event_free_task(struct task_struct *task); |
@@ -1059,17 +1061,20 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) | |||
1059 | 1061 | ||
1060 | extern struct jump_label_key perf_sched_events; | 1062 | extern struct jump_label_key perf_sched_events; |
1061 | 1063 | ||
1062 | static inline void perf_event_task_sched_in(struct task_struct *task) | 1064 | static inline void perf_event_task_sched_in(struct task_struct *prev, |
1065 | struct task_struct *task) | ||
1063 | { | 1066 | { |
1064 | if (static_branch(&perf_sched_events)) | 1067 | if (static_branch(&perf_sched_events)) |
1065 | __perf_event_task_sched_in(task); | 1068 | __perf_event_task_sched_in(prev, task); |
1066 | } | 1069 | } |
1067 | 1070 | ||
1068 | static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) | 1071 | static inline void perf_event_task_sched_out(struct task_struct *prev, |
1072 | struct task_struct *next) | ||
1069 | { | 1073 | { |
1070 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); | 1074 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); |
1071 | 1075 | ||
1072 | __perf_event_task_sched_out(task, next); | 1076 | if (static_branch(&perf_sched_events)) |
1077 | __perf_event_task_sched_out(prev, next); | ||
1073 | } | 1078 | } |
1074 | 1079 | ||
1075 | extern void perf_event_mmap(struct vm_area_struct *vma); | 1080 | extern void perf_event_mmap(struct vm_area_struct *vma); |
@@ -1139,10 +1144,11 @@ extern void perf_event_disable(struct perf_event *event); | |||
1139 | extern void perf_event_task_tick(void); | 1144 | extern void perf_event_task_tick(void); |
1140 | #else | 1145 | #else |
1141 | static inline void | 1146 | static inline void |
1142 | perf_event_task_sched_in(struct task_struct *task) { } | 1147 | perf_event_task_sched_in(struct task_struct *prev, |
1148 | struct task_struct *task) { } | ||
1143 | static inline void | 1149 | static inline void |
1144 | perf_event_task_sched_out(struct task_struct *task, | 1150 | perf_event_task_sched_out(struct task_struct *prev, |
1145 | struct task_struct *next) { } | 1151 | struct task_struct *next) { } |
1146 | static inline int perf_event_init_task(struct task_struct *child) { return 0; } | 1152 | static inline int perf_event_init_task(struct task_struct *child) { return 0; } |
1147 | static inline void perf_event_exit_task(struct task_struct *child) { } | 1153 | static inline void perf_event_exit_task(struct task_struct *child) { } |
1148 | static inline void perf_event_free_task(struct task_struct *task) { } | 1154 | static inline void perf_event_free_task(struct task_struct *task) { } |
diff --git a/kernel/events/core.c b/kernel/events/core.c index b8785e26ee1c..0f857782d06f 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -399,14 +399,54 @@ void perf_cgroup_switch(struct task_struct *task, int mode) | |||
399 | local_irq_restore(flags); | 399 | local_irq_restore(flags); |
400 | } | 400 | } |
401 | 401 | ||
402 | static inline void perf_cgroup_sched_out(struct task_struct *task) | 402 | static inline void perf_cgroup_sched_out(struct task_struct *task, |
403 | struct task_struct *next) | ||
403 | { | 404 | { |
404 | perf_cgroup_switch(task, PERF_CGROUP_SWOUT); | 405 | struct perf_cgroup *cgrp1; |
406 | struct perf_cgroup *cgrp2 = NULL; | ||
407 | |||
408 | /* | ||
409 | * we come here when we know perf_cgroup_events > 0 | ||
410 | */ | ||
411 | cgrp1 = perf_cgroup_from_task(task); | ||
412 | |||
413 | /* | ||
414 | * next is NULL when called from perf_event_enable_on_exec() | ||
415 | * that will systematically cause a cgroup_switch() | ||
416 | */ | ||
417 | if (next) | ||
418 | cgrp2 = perf_cgroup_from_task(next); | ||
419 | |||
420 | /* | ||
421 | * only schedule out current cgroup events if we know | ||
422 | * that we are switching to a different cgroup. Otherwise, | ||
423 | * do no touch the cgroup events. | ||
424 | */ | ||
425 | if (cgrp1 != cgrp2) | ||
426 | perf_cgroup_switch(task, PERF_CGROUP_SWOUT); | ||
405 | } | 427 | } |
406 | 428 | ||
407 | static inline void perf_cgroup_sched_in(struct task_struct *task) | 429 | static inline void perf_cgroup_sched_in(struct task_struct *prev, |
430 | struct task_struct *task) | ||
408 | { | 431 | { |
409 | perf_cgroup_switch(task, PERF_CGROUP_SWIN); | 432 | struct perf_cgroup *cgrp1; |
433 | struct perf_cgroup *cgrp2 = NULL; | ||
434 | |||
435 | /* | ||
436 | * we come here when we know perf_cgroup_events > 0 | ||
437 | */ | ||
438 | cgrp1 = perf_cgroup_from_task(task); | ||
439 | |||
440 | /* prev can never be NULL */ | ||
441 | cgrp2 = perf_cgroup_from_task(prev); | ||
442 | |||
443 | /* | ||
444 | * only need to schedule in cgroup events if we are changing | ||
445 | * cgroup during ctxsw. Cgroup events were not scheduled | ||
446 | * out of ctxsw out if that was not the case. | ||
447 | */ | ||
448 | if (cgrp1 != cgrp2) | ||
449 | perf_cgroup_switch(task, PERF_CGROUP_SWIN); | ||
410 | } | 450 | } |
411 | 451 | ||
412 | static inline int perf_cgroup_connect(int fd, struct perf_event *event, | 452 | static inline int perf_cgroup_connect(int fd, struct perf_event *event, |
@@ -518,11 +558,13 @@ static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) | |||
518 | { | 558 | { |
519 | } | 559 | } |
520 | 560 | ||
521 | static inline void perf_cgroup_sched_out(struct task_struct *task) | 561 | static inline void perf_cgroup_sched_out(struct task_struct *task, |
562 | struct task_struct *next) | ||
522 | { | 563 | { |
523 | } | 564 | } |
524 | 565 | ||
525 | static inline void perf_cgroup_sched_in(struct task_struct *task) | 566 | static inline void perf_cgroup_sched_in(struct task_struct *prev, |
567 | struct task_struct *task) | ||
526 | { | 568 | { |
527 | } | 569 | } |
528 | 570 | ||
@@ -1988,7 +2030,7 @@ void __perf_event_task_sched_out(struct task_struct *task, | |||
1988 | * cgroup event are system-wide mode only | 2030 | * cgroup event are system-wide mode only |
1989 | */ | 2031 | */ |
1990 | if (atomic_read(&__get_cpu_var(perf_cgroup_events))) | 2032 | if (atomic_read(&__get_cpu_var(perf_cgroup_events))) |
1991 | perf_cgroup_sched_out(task); | 2033 | perf_cgroup_sched_out(task, next); |
1992 | } | 2034 | } |
1993 | 2035 | ||
1994 | static void task_ctx_sched_out(struct perf_event_context *ctx) | 2036 | static void task_ctx_sched_out(struct perf_event_context *ctx) |
@@ -2153,7 +2195,8 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx, | |||
2153 | * accessing the event control register. If a NMI hits, then it will | 2195 | * accessing the event control register. If a NMI hits, then it will |
2154 | * keep the event running. | 2196 | * keep the event running. |
2155 | */ | 2197 | */ |
2156 | void __perf_event_task_sched_in(struct task_struct *task) | 2198 | void __perf_event_task_sched_in(struct task_struct *prev, |
2199 | struct task_struct *task) | ||
2157 | { | 2200 | { |
2158 | struct perf_event_context *ctx; | 2201 | struct perf_event_context *ctx; |
2159 | int ctxn; | 2202 | int ctxn; |
@@ -2171,7 +2214,7 @@ void __perf_event_task_sched_in(struct task_struct *task) | |||
2171 | * cgroup event are system-wide mode only | 2214 | * cgroup event are system-wide mode only |
2172 | */ | 2215 | */ |
2173 | if (atomic_read(&__get_cpu_var(perf_cgroup_events))) | 2216 | if (atomic_read(&__get_cpu_var(perf_cgroup_events))) |
2174 | perf_cgroup_sched_in(task); | 2217 | perf_cgroup_sched_in(prev, task); |
2175 | } | 2218 | } |
2176 | 2219 | ||
2177 | static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) | 2220 | static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) |
@@ -2427,7 +2470,7 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx) | |||
2427 | * ctxswin cgroup events which are already scheduled | 2470 | * ctxswin cgroup events which are already scheduled |
2428 | * in. | 2471 | * in. |
2429 | */ | 2472 | */ |
2430 | perf_cgroup_sched_out(current); | 2473 | perf_cgroup_sched_out(current, NULL); |
2431 | 2474 | ||
2432 | raw_spin_lock(&ctx->lock); | 2475 | raw_spin_lock(&ctx->lock); |
2433 | task_ctx_sched_out(ctx); | 2476 | task_ctx_sched_out(ctx); |
@@ -3353,8 +3396,8 @@ static int perf_event_index(struct perf_event *event) | |||
3353 | } | 3396 | } |
3354 | 3397 | ||
3355 | static void calc_timer_values(struct perf_event *event, | 3398 | static void calc_timer_values(struct perf_event *event, |
3356 | u64 *running, | 3399 | u64 *enabled, |
3357 | u64 *enabled) | 3400 | u64 *running) |
3358 | { | 3401 | { |
3359 | u64 now, ctx_time; | 3402 | u64 now, ctx_time; |
3360 | 3403 | ||
diff --git a/kernel/sched.c b/kernel/sched.c index ccacdbdecf45..0408cdc6d572 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -3065,7 +3065,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) | |||
3065 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | 3065 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW |
3066 | local_irq_disable(); | 3066 | local_irq_disable(); |
3067 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ | 3067 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ |
3068 | perf_event_task_sched_in(current); | 3068 | perf_event_task_sched_in(prev, current); |
3069 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | 3069 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW |
3070 | local_irq_enable(); | 3070 | local_irq_enable(); |
3071 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ | 3071 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ |