diff options
author | Jiri Olsa <jolsa@redhat.com> | 2012-05-23 07:13:02 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-05-23 11:40:51 -0400 |
commit | ab0cce560ef177bdc7a8f73e9962be9d829a7b2c (patch) | |
tree | d9875b21c42862201624eb7d9ea2c36f4ea4035d /kernel | |
parent | 26252ea675663d1bc6747125fcaa2b7cc4ed8a03 (diff) |
Revert "sched, perf: Use a single callback into the scheduler"
This reverts commit cb04ff9ac424 ("sched, perf: Use a single
callback into the scheduler").
Before this change was introduced, the process switch worked
like this (wrt. to perf event schedule):
schedule (prev, next)
- schedule out all perf events for prev
- switch to next
- schedule in all perf events for current (next)
After the commit, the process switch looks like:
schedule (prev, next)
- schedule out all perf events for prev
- schedule in all perf events for (next)
- switch to next
The problem is, that after we schedule perf events in, the pmu
is enabled and we can receive events even before we make the
switch to next - so "current" still being prev process (event
SAMPLE data are filled based on the value of the "current"
process).
Thats exactly what we see for test__PERF_RECORD test. We receive
SAMPLES with PID of the process that our tracee is scheduled
from.
Discussed with Peter Zijlstra:
> Bah!, yeah I guess reverting is the right thing for now. Sad
> though.
>
> So by having the two hooks we have a black-spot between them
> where we receive no events at all, this black-spot covers the
> hand-over of current and we thus don't receive the 'wrong'
> events.
>
> I rather liked we could do away with both that black-spot and
> clean up the code a little, but apparently people rely on it.
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: acme@redhat.com
Cc: paulus@samba.org
Cc: cjashfor@linux.vnet.ibm.com
Cc: fweisbec@gmail.com
Cc: eranian@google.com
Link: http://lkml.kernel.org/r/20120523111302.GC1638@m.brq.redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/events/core.c | 14 | ||||
-rw-r--r-- | kernel/sched/core.c | 9 |
2 files changed, 12 insertions, 11 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 91a445925855..5b06cbbf6931 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -2039,8 +2039,8 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn, | |||
2039 | * accessing the event control register. If a NMI hits, then it will | 2039 | * accessing the event control register. If a NMI hits, then it will |
2040 | * not restart the event. | 2040 | * not restart the event. |
2041 | */ | 2041 | */ |
2042 | static void __perf_event_task_sched_out(struct task_struct *task, | 2042 | void __perf_event_task_sched_out(struct task_struct *task, |
2043 | struct task_struct *next) | 2043 | struct task_struct *next) |
2044 | { | 2044 | { |
2045 | int ctxn; | 2045 | int ctxn; |
2046 | 2046 | ||
@@ -2279,8 +2279,8 @@ static void perf_branch_stack_sched_in(struct task_struct *prev, | |||
2279 | * accessing the event control register. If a NMI hits, then it will | 2279 | * accessing the event control register. If a NMI hits, then it will |
2280 | * keep the event running. | 2280 | * keep the event running. |
2281 | */ | 2281 | */ |
2282 | static void __perf_event_task_sched_in(struct task_struct *prev, | 2282 | void __perf_event_task_sched_in(struct task_struct *prev, |
2283 | struct task_struct *task) | 2283 | struct task_struct *task) |
2284 | { | 2284 | { |
2285 | struct perf_event_context *ctx; | 2285 | struct perf_event_context *ctx; |
2286 | int ctxn; | 2286 | int ctxn; |
@@ -2305,12 +2305,6 @@ static void __perf_event_task_sched_in(struct task_struct *prev, | |||
2305 | perf_branch_stack_sched_in(prev, task); | 2305 | perf_branch_stack_sched_in(prev, task); |
2306 | } | 2306 | } |
2307 | 2307 | ||
2308 | void __perf_event_task_sched(struct task_struct *prev, struct task_struct *next) | ||
2309 | { | ||
2310 | __perf_event_task_sched_out(prev, next); | ||
2311 | __perf_event_task_sched_in(prev, next); | ||
2312 | } | ||
2313 | |||
2314 | static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) | 2308 | static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) |
2315 | { | 2309 | { |
2316 | u64 frequency = event->attr.sample_freq; | 2310 | u64 frequency = event->attr.sample_freq; |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 13c38837f2cd..0533a688ce22 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -1913,7 +1913,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev, | |||
1913 | struct task_struct *next) | 1913 | struct task_struct *next) |
1914 | { | 1914 | { |
1915 | sched_info_switch(prev, next); | 1915 | sched_info_switch(prev, next); |
1916 | perf_event_task_sched(prev, next); | 1916 | perf_event_task_sched_out(prev, next); |
1917 | fire_sched_out_preempt_notifiers(prev, next); | 1917 | fire_sched_out_preempt_notifiers(prev, next); |
1918 | prepare_lock_switch(rq, next); | 1918 | prepare_lock_switch(rq, next); |
1919 | prepare_arch_switch(next); | 1919 | prepare_arch_switch(next); |
@@ -1956,6 +1956,13 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) | |||
1956 | */ | 1956 | */ |
1957 | prev_state = prev->state; | 1957 | prev_state = prev->state; |
1958 | finish_arch_switch(prev); | 1958 | finish_arch_switch(prev); |
1959 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | ||
1960 | local_irq_disable(); | ||
1961 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ | ||
1962 | perf_event_task_sched_in(prev, current); | ||
1963 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | ||
1964 | local_irq_enable(); | ||
1965 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ | ||
1959 | finish_lock_switch(rq, prev); | 1966 | finish_lock_switch(rq, prev); |
1960 | finish_arch_post_lock_switch(); | 1967 | finish_arch_post_lock_switch(); |
1961 | 1968 | ||