aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-09-07 16:00:11 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-09-07 16:00:11 -0400
commitb0fb422281c8c09c8dcf03ca44ec343f0ff9df0b (patch)
tree85866700657ec97d8eae343146dd61519771f699
parent54d6d5374491387eafe7f6e05e065232071cc4f0 (diff)
parent20afc60f892d285fde179ead4b24e6a7938c2f1b (diff)
Merge branch 'perf-fixes-for-linus' of git://tesla.tglx.de/git/linux-2.6-tip
* 'perf-fixes-for-linus' of git://tesla.tglx.de/git/linux-2.6-tip: x86, perf: Check that current->mm is alive before getting user callchain perf_event: Fix broken calc_timer_values() perf events: Fix slow and broken cgroup context switch code
-rw-r--r--arch/x86/kernel/cpu/perf_event.c3
-rw-r--r--include/linux/perf_event.h24
-rw-r--r--kernel/events/core.c67
-rw-r--r--kernel/sched.c2
4 files changed, 74 insertions, 22 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 4ee3abf20ed6..cfa62ec090ec 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1900,6 +1900,9 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
1900 1900
1901 perf_callchain_store(entry, regs->ip); 1901 perf_callchain_store(entry, regs->ip);
1902 1902
1903 if (!current->mm)
1904 return;
1905
1903 if (perf_callchain_user32(regs, entry)) 1906 if (perf_callchain_user32(regs, entry))
1904 return; 1907 return;
1905 1908
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 245bafdafd5e..c816075c01ce 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -944,8 +944,10 @@ extern void perf_pmu_unregister(struct pmu *pmu);
944 944
945extern int perf_num_counters(void); 945extern int perf_num_counters(void);
946extern const char *perf_pmu_name(void); 946extern const char *perf_pmu_name(void);
947extern void __perf_event_task_sched_in(struct task_struct *task); 947extern void __perf_event_task_sched_in(struct task_struct *prev,
948extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); 948 struct task_struct *task);
949extern void __perf_event_task_sched_out(struct task_struct *prev,
950 struct task_struct *next);
949extern int perf_event_init_task(struct task_struct *child); 951extern int perf_event_init_task(struct task_struct *child);
950extern void perf_event_exit_task(struct task_struct *child); 952extern void perf_event_exit_task(struct task_struct *child);
951extern void perf_event_free_task(struct task_struct *task); 953extern void perf_event_free_task(struct task_struct *task);
@@ -1059,17 +1061,20 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
1059 1061
1060extern struct jump_label_key perf_sched_events; 1062extern struct jump_label_key perf_sched_events;
1061 1063
1062static inline void perf_event_task_sched_in(struct task_struct *task) 1064static inline void perf_event_task_sched_in(struct task_struct *prev,
1065 struct task_struct *task)
1063{ 1066{
1064 if (static_branch(&perf_sched_events)) 1067 if (static_branch(&perf_sched_events))
1065 __perf_event_task_sched_in(task); 1068 __perf_event_task_sched_in(prev, task);
1066} 1069}
1067 1070
1068static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) 1071static inline void perf_event_task_sched_out(struct task_struct *prev,
1072 struct task_struct *next)
1069{ 1073{
1070 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); 1074 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
1071 1075
1072 __perf_event_task_sched_out(task, next); 1076 if (static_branch(&perf_sched_events))
1077 __perf_event_task_sched_out(prev, next);
1073} 1078}
1074 1079
1075extern void perf_event_mmap(struct vm_area_struct *vma); 1080extern void perf_event_mmap(struct vm_area_struct *vma);
@@ -1139,10 +1144,11 @@ extern void perf_event_disable(struct perf_event *event);
1139extern void perf_event_task_tick(void); 1144extern void perf_event_task_tick(void);
1140#else 1145#else
1141static inline void 1146static inline void
1142perf_event_task_sched_in(struct task_struct *task) { } 1147perf_event_task_sched_in(struct task_struct *prev,
1148 struct task_struct *task) { }
1143static inline void 1149static inline void
1144perf_event_task_sched_out(struct task_struct *task, 1150perf_event_task_sched_out(struct task_struct *prev,
1145 struct task_struct *next) { } 1151 struct task_struct *next) { }
1146static inline int perf_event_init_task(struct task_struct *child) { return 0; } 1152static inline int perf_event_init_task(struct task_struct *child) { return 0; }
1147static inline void perf_event_exit_task(struct task_struct *child) { } 1153static inline void perf_event_exit_task(struct task_struct *child) { }
1148static inline void perf_event_free_task(struct task_struct *task) { } 1154static inline void perf_event_free_task(struct task_struct *task) { }
diff --git a/kernel/events/core.c b/kernel/events/core.c
index b8785e26ee1c..0f857782d06f 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -399,14 +399,54 @@ void perf_cgroup_switch(struct task_struct *task, int mode)
399 local_irq_restore(flags); 399 local_irq_restore(flags);
400} 400}
401 401
402static inline void perf_cgroup_sched_out(struct task_struct *task) 402static inline void perf_cgroup_sched_out(struct task_struct *task,
403 struct task_struct *next)
403{ 404{
404 perf_cgroup_switch(task, PERF_CGROUP_SWOUT); 405 struct perf_cgroup *cgrp1;
406 struct perf_cgroup *cgrp2 = NULL;
407
408 /*
409 * we come here when we know perf_cgroup_events > 0
410 */
411 cgrp1 = perf_cgroup_from_task(task);
412
413 /*
414 * next is NULL when called from perf_event_enable_on_exec()
415 * that will systematically cause a cgroup_switch()
416 */
417 if (next)
418 cgrp2 = perf_cgroup_from_task(next);
419
420 /*
421 * only schedule out current cgroup events if we know
422 * that we are switching to a different cgroup. Otherwise,
423 * do no touch the cgroup events.
424 */
425 if (cgrp1 != cgrp2)
426 perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
405} 427}
406 428
407static inline void perf_cgroup_sched_in(struct task_struct *task) 429static inline void perf_cgroup_sched_in(struct task_struct *prev,
430 struct task_struct *task)
408{ 431{
409 perf_cgroup_switch(task, PERF_CGROUP_SWIN); 432 struct perf_cgroup *cgrp1;
433 struct perf_cgroup *cgrp2 = NULL;
434
435 /*
436 * we come here when we know perf_cgroup_events > 0
437 */
438 cgrp1 = perf_cgroup_from_task(task);
439
440 /* prev can never be NULL */
441 cgrp2 = perf_cgroup_from_task(prev);
442
443 /*
444 * only need to schedule in cgroup events if we are changing
445 * cgroup during ctxsw. Cgroup events were not scheduled
446 * out of ctxsw out if that was not the case.
447 */
448 if (cgrp1 != cgrp2)
449 perf_cgroup_switch(task, PERF_CGROUP_SWIN);
410} 450}
411 451
412static inline int perf_cgroup_connect(int fd, struct perf_event *event, 452static inline int perf_cgroup_connect(int fd, struct perf_event *event,
@@ -518,11 +558,13 @@ static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
518{ 558{
519} 559}
520 560
521static inline void perf_cgroup_sched_out(struct task_struct *task) 561static inline void perf_cgroup_sched_out(struct task_struct *task,
562 struct task_struct *next)
522{ 563{
523} 564}
524 565
525static inline void perf_cgroup_sched_in(struct task_struct *task) 566static inline void perf_cgroup_sched_in(struct task_struct *prev,
567 struct task_struct *task)
526{ 568{
527} 569}
528 570
@@ -1988,7 +2030,7 @@ void __perf_event_task_sched_out(struct task_struct *task,
1988 * cgroup event are system-wide mode only 2030 * cgroup event are system-wide mode only
1989 */ 2031 */
1990 if (atomic_read(&__get_cpu_var(perf_cgroup_events))) 2032 if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
1991 perf_cgroup_sched_out(task); 2033 perf_cgroup_sched_out(task, next);
1992} 2034}
1993 2035
1994static void task_ctx_sched_out(struct perf_event_context *ctx) 2036static void task_ctx_sched_out(struct perf_event_context *ctx)
@@ -2153,7 +2195,8 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
2153 * accessing the event control register. If a NMI hits, then it will 2195 * accessing the event control register. If a NMI hits, then it will
2154 * keep the event running. 2196 * keep the event running.
2155 */ 2197 */
2156void __perf_event_task_sched_in(struct task_struct *task) 2198void __perf_event_task_sched_in(struct task_struct *prev,
2199 struct task_struct *task)
2157{ 2200{
2158 struct perf_event_context *ctx; 2201 struct perf_event_context *ctx;
2159 int ctxn; 2202 int ctxn;
@@ -2171,7 +2214,7 @@ void __perf_event_task_sched_in(struct task_struct *task)
2171 * cgroup event are system-wide mode only 2214 * cgroup event are system-wide mode only
2172 */ 2215 */
2173 if (atomic_read(&__get_cpu_var(perf_cgroup_events))) 2216 if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
2174 perf_cgroup_sched_in(task); 2217 perf_cgroup_sched_in(prev, task);
2175} 2218}
2176 2219
2177static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) 2220static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
@@ -2427,7 +2470,7 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
2427 * ctxswin cgroup events which are already scheduled 2470 * ctxswin cgroup events which are already scheduled
2428 * in. 2471 * in.
2429 */ 2472 */
2430 perf_cgroup_sched_out(current); 2473 perf_cgroup_sched_out(current, NULL);
2431 2474
2432 raw_spin_lock(&ctx->lock); 2475 raw_spin_lock(&ctx->lock);
2433 task_ctx_sched_out(ctx); 2476 task_ctx_sched_out(ctx);
@@ -3353,8 +3396,8 @@ static int perf_event_index(struct perf_event *event)
3353} 3396}
3354 3397
3355static void calc_timer_values(struct perf_event *event, 3398static void calc_timer_values(struct perf_event *event,
3356 u64 *running, 3399 u64 *enabled,
3357 u64 *enabled) 3400 u64 *running)
3358{ 3401{
3359 u64 now, ctx_time; 3402 u64 now, ctx_time;
3360 3403
diff --git a/kernel/sched.c b/kernel/sched.c
index ccacdbdecf45..0408cdc6d572 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3065,7 +3065,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
3065#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 3065#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
3066 local_irq_disable(); 3066 local_irq_disable();
3067#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ 3067#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
3068 perf_event_task_sched_in(current); 3068 perf_event_task_sched_in(prev, current);
3069#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 3069#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
3070 local_irq_enable(); 3070 local_irq_enable();
3071#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ 3071#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */