aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJiri Olsa <jolsa@redhat.com>2012-05-23 07:13:02 -0400
committerIngo Molnar <mingo@kernel.org>2012-05-23 11:40:51 -0400
commitab0cce560ef177bdc7a8f73e9962be9d829a7b2c (patch)
treed9875b21c42862201624eb7d9ea2c36f4ea4035d
parent26252ea675663d1bc6747125fcaa2b7cc4ed8a03 (diff)
Revert "sched, perf: Use a single callback into the scheduler"
This reverts commit cb04ff9ac424 ("sched, perf: Use a single callback into the scheduler"). Before this change was introduced, the process switch worked like this (wrt. to perf event schedule): schedule (prev, next) - schedule out all perf events for prev - switch to next - schedule in all perf events for current (next) After the commit, the process switch looks like: schedule (prev, next) - schedule out all perf events for prev - schedule in all perf events for (next) - switch to next The problem is, that after we schedule perf events in, the pmu is enabled and we can receive events even before we make the switch to next - so "current" still being prev process (event SAMPLE data are filled based on the value of the "current" process). Thats exactly what we see for test__PERF_RECORD test. We receive SAMPLES with PID of the process that our tracee is scheduled from. Discussed with Peter Zijlstra: > Bah!, yeah I guess reverting is the right thing for now. Sad > though. > > So by having the two hooks we have a black-spot between them > where we receive no events at all, this black-spot covers the > hand-over of current and we thus don't receive the 'wrong' > events. > > I rather liked we could do away with both that black-spot and > clean up the code a little, but apparently people rely on it. Signed-off-by: Jiri Olsa <jolsa@redhat.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: acme@redhat.com Cc: paulus@samba.org Cc: cjashfor@linux.vnet.ibm.com Cc: fweisbec@gmail.com Cc: eranian@google.com Link: http://lkml.kernel.org/r/20120523111302.GC1638@m.brq.redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--include/linux/perf_event.h24
-rw-r--r--kernel/events/core.c14
-rw-r--r--kernel/sched/core.c9
3 files changed, 30 insertions, 17 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 8adf70e9e3cc..f32578634d9d 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1084,8 +1084,10 @@ extern void perf_pmu_unregister(struct pmu *pmu);
1084 1084
1085extern int perf_num_counters(void); 1085extern int perf_num_counters(void);
1086extern const char *perf_pmu_name(void); 1086extern const char *perf_pmu_name(void);
1087extern void __perf_event_task_sched(struct task_struct *prev, 1087extern void __perf_event_task_sched_in(struct task_struct *prev,
1088 struct task_struct *next); 1088 struct task_struct *task);
1089extern void __perf_event_task_sched_out(struct task_struct *prev,
1090 struct task_struct *next);
1089extern int perf_event_init_task(struct task_struct *child); 1091extern int perf_event_init_task(struct task_struct *child);
1090extern void perf_event_exit_task(struct task_struct *child); 1092extern void perf_event_exit_task(struct task_struct *child);
1091extern void perf_event_free_task(struct task_struct *task); 1093extern void perf_event_free_task(struct task_struct *task);
@@ -1205,13 +1207,20 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
1205 1207
1206extern struct static_key_deferred perf_sched_events; 1208extern struct static_key_deferred perf_sched_events;
1207 1209
1208static inline void perf_event_task_sched(struct task_struct *prev, 1210static inline void perf_event_task_sched_in(struct task_struct *prev,
1209 struct task_struct *task) 1211 struct task_struct *task)
1210{ 1212{
1213 if (static_key_false(&perf_sched_events.key))
1214 __perf_event_task_sched_in(prev, task);
1215}
1216
1217static inline void perf_event_task_sched_out(struct task_struct *prev,
1218 struct task_struct *next)
1219{
1211 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); 1220 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
1212 1221
1213 if (static_key_false(&perf_sched_events.key)) 1222 if (static_key_false(&perf_sched_events.key))
1214 __perf_event_task_sched(prev, task); 1223 __perf_event_task_sched_out(prev, next);
1215} 1224}
1216 1225
1217extern void perf_event_mmap(struct vm_area_struct *vma); 1226extern void perf_event_mmap(struct vm_area_struct *vma);
@@ -1286,8 +1295,11 @@ extern void perf_event_disable(struct perf_event *event);
1286extern void perf_event_task_tick(void); 1295extern void perf_event_task_tick(void);
1287#else 1296#else
1288static inline void 1297static inline void
1289perf_event_task_sched(struct task_struct *prev, 1298perf_event_task_sched_in(struct task_struct *prev,
1290 struct task_struct *task) { } 1299 struct task_struct *task) { }
1300static inline void
1301perf_event_task_sched_out(struct task_struct *prev,
1302 struct task_struct *next) { }
1291static inline int perf_event_init_task(struct task_struct *child) { return 0; } 1303static inline int perf_event_init_task(struct task_struct *child) { return 0; }
1292static inline void perf_event_exit_task(struct task_struct *child) { } 1304static inline void perf_event_exit_task(struct task_struct *child) { }
1293static inline void perf_event_free_task(struct task_struct *task) { } 1305static inline void perf_event_free_task(struct task_struct *task) { }
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 91a445925855..5b06cbbf6931 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2039,8 +2039,8 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
2039 * accessing the event control register. If a NMI hits, then it will 2039 * accessing the event control register. If a NMI hits, then it will
2040 * not restart the event. 2040 * not restart the event.
2041 */ 2041 */
2042static void __perf_event_task_sched_out(struct task_struct *task, 2042void __perf_event_task_sched_out(struct task_struct *task,
2043 struct task_struct *next) 2043 struct task_struct *next)
2044{ 2044{
2045 int ctxn; 2045 int ctxn;
2046 2046
@@ -2279,8 +2279,8 @@ static void perf_branch_stack_sched_in(struct task_struct *prev,
2279 * accessing the event control register. If a NMI hits, then it will 2279 * accessing the event control register. If a NMI hits, then it will
2280 * keep the event running. 2280 * keep the event running.
2281 */ 2281 */
2282static void __perf_event_task_sched_in(struct task_struct *prev, 2282void __perf_event_task_sched_in(struct task_struct *prev,
2283 struct task_struct *task) 2283 struct task_struct *task)
2284{ 2284{
2285 struct perf_event_context *ctx; 2285 struct perf_event_context *ctx;
2286 int ctxn; 2286 int ctxn;
@@ -2305,12 +2305,6 @@ static void __perf_event_task_sched_in(struct task_struct *prev,
2305 perf_branch_stack_sched_in(prev, task); 2305 perf_branch_stack_sched_in(prev, task);
2306} 2306}
2307 2307
2308void __perf_event_task_sched(struct task_struct *prev, struct task_struct *next)
2309{
2310 __perf_event_task_sched_out(prev, next);
2311 __perf_event_task_sched_in(prev, next);
2312}
2313
2314static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) 2308static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
2315{ 2309{
2316 u64 frequency = event->attr.sample_freq; 2310 u64 frequency = event->attr.sample_freq;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 13c38837f2cd..0533a688ce22 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1913,7 +1913,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
1913 struct task_struct *next) 1913 struct task_struct *next)
1914{ 1914{
1915 sched_info_switch(prev, next); 1915 sched_info_switch(prev, next);
1916 perf_event_task_sched(prev, next); 1916 perf_event_task_sched_out(prev, next);
1917 fire_sched_out_preempt_notifiers(prev, next); 1917 fire_sched_out_preempt_notifiers(prev, next);
1918 prepare_lock_switch(rq, next); 1918 prepare_lock_switch(rq, next);
1919 prepare_arch_switch(next); 1919 prepare_arch_switch(next);
@@ -1956,6 +1956,13 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
1956 */ 1956 */
1957 prev_state = prev->state; 1957 prev_state = prev->state;
1958 finish_arch_switch(prev); 1958 finish_arch_switch(prev);
1959#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1960 local_irq_disable();
1961#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
1962 perf_event_task_sched_in(prev, current);
1963#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1964 local_irq_enable();
1965#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
1959 finish_lock_switch(rq, prev); 1966 finish_lock_switch(rq, prev);
1960 finish_arch_post_lock_switch(); 1967 finish_arch_post_lock_switch();
1961 1968