diff options
author | Ingo Molnar <mingo@elte.hu> | 2005-09-10 03:26:16 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-09-10 13:06:23 -0400 |
commit | 67f9a619e7460b7d07284a9d0745727a77d3ade6 (patch) | |
tree | d76bcca7ad5e9430150ebcdb391180bf2e0878e8 /kernel | |
parent | d79fc0fc6645b0cf5cd980da76942ca6d6300fa4 (diff) |
[PATCH] sched: fix SMT scheduler latency bug
William Weston reported unusually high scheduling latencies on his x86 HT
box, on the -RT kernel. I managed to reproduce it on my HT box and the
latency tracer shows the incident in action:
_------=> CPU#
/ _-----=> irqs-off
| / _----=> need-resched
|| / _---=> hardirq/softirq
||| / _--=> preempt-depth
|||| /
||||| delay
cmd pid ||||| time | caller
\ / ||||| \ | /
du-2803 3Dnh2 0us : __trace_start_sched_wakeup (try_to_wake_up)
..............................................................
... we are running on CPU#3, PID 2778 gets woken to CPU#1: ...
..............................................................
du-2803 3Dnh2 0us : __trace_start_sched_wakeup <<...>-2778> (73 1)
du-2803 3Dnh2 0us : _raw_spin_unlock (try_to_wake_up)
................................................
... still on CPU#3, we send an IPI to CPU#1: ...
................................................
du-2803 3Dnh1 0us : resched_task (try_to_wake_up)
du-2803 3Dnh1 1us : smp_send_reschedule (try_to_wake_up)
du-2803 3Dnh1 1us : send_IPI_mask_bitmask (smp_send_reschedule)
du-2803 3Dnh1 2us : _raw_spin_unlock_irqrestore (try_to_wake_up)
...............................................
... 1 usec later, the IPI arrives on CPU#1: ...
...............................................
<idle>-0 1Dnh. 2us : smp_reschedule_interrupt (c0100c5a 0 0)
So far so good, this is the normal wakeup/preemption mechanism. But here
comes the scheduler anomaly on CPU#1:
<idle>-0 1Dnh. 2us : preempt_schedule_irq (need_resched)
<idle>-0 1Dnh. 2us : preempt_schedule_irq (need_resched)
<idle>-0 1Dnh. 3us : __schedule (preempt_schedule_irq)
<idle>-0 1Dnh. 3us : profile_hit (__schedule)
<idle>-0 1Dnh1 3us : sched_clock (__schedule)
<idle>-0 1Dnh1 4us : _raw_spin_lock_irq (__schedule)
<idle>-0 1Dnh1 4us : _raw_spin_lock_irqsave (__schedule)
<idle>-0 1Dnh2 5us : _raw_spin_unlock (__schedule)
<idle>-0 1Dnh1 5us : preempt_schedule (__schedule)
<idle>-0 1Dnh1 6us : _raw_spin_lock (__schedule)
<idle>-0 1Dnh2 6us : find_next_bit (__schedule)
<idle>-0 1Dnh2 6us : _raw_spin_lock (__schedule)
<idle>-0 1Dnh3 7us : find_next_bit (__schedule)
<idle>-0 1Dnh3 7us : find_next_bit (__schedule)
<idle>-0 1Dnh3 8us : _raw_spin_unlock (__schedule)
<idle>-0 1Dnh2 8us : preempt_schedule (__schedule)
<idle>-0 1Dnh2 8us : find_next_bit (__schedule)
<idle>-0 1Dnh2 9us : trace_stop_sched_switched (__schedule)
<idle>-0 1Dnh2 9us : _raw_spin_lock (trace_stop_sched_switched)
<idle>-0 1Dnh3 10us : trace_stop_sched_switched <<...>-2778> (73 8c)
<idle>-0 1Dnh3 10us : _raw_spin_unlock (trace_stop_sched_switched)
<idle>-0 1Dnh1 10us : _raw_spin_unlock (__schedule)
<idle>-0 1Dnh. 11us : local_irq_enable_noresched (preempt_schedule_irq)
<idle>-0 1Dnh. 11us < (0)
we didnt pick up pid 2778! It only gets scheduled much later:
<...>-2778 1Dnh2 412us : __switch_to (__schedule)
<...>-2778 1Dnh2 413us : __schedule <<idle>-0> (8c 73)
<...>-2778 1Dnh2 413us : _raw_spin_unlock (__schedule)
<...>-2778 1Dnh1 413us : trace_stop_sched_switched (__schedule)
<...>-2778 1Dnh1 414us : _raw_spin_lock (trace_stop_sched_switched)
<...>-2778 1Dnh2 414us : trace_stop_sched_switched <<...>-2778> (73 1)
<...>-2778 1Dnh2 414us : _raw_spin_unlock (trace_stop_sched_switched)
<...>-2778 1Dnh1 415us : trace_stop_sched_switched (__schedule)
the reason for this anomaly is the following code in dependent_sleeper():
/*
* If a user task with lower static priority than the
* running task on the SMT sibling is trying to schedule,
* delay it till there is proportionately less timeslice
* left of the sibling task to prevent a lower priority
* task from using an unfair proportion of the
* physical cpu's resources. -ck
*/
[...]
if (((smt_curr->time_slice * (100 - sd->per_cpu_gain) /
100) > task_timeslice(p)))
ret = 1;
Note that in contrast to the comment above, we dont actually do the check
based on static priority, we do the check based on timeslices. But
timeslices go up and down, and even highprio tasks can randomly have very
low timeslices (just before their next refill) and can thus be judged as
'lowprio' by the above piece of code. This condition is clearly buggy.
The correct test is to check for static_prio _and_ to check for the
preemption priority. Even on different static priority levels, a
higher-prio interactive task should not be delayed due to a
higher-static-prio CPU hog.
There is a symmetric bug in the 'kick SMT sibling' code of this function as
well, which can be solved in a similar way.
The patch below (against the current scheduler queue in -mm) fixes both
bugs. I have build and boot-tested this on x86 SMT, and nice +20 tasks
still get properly throttled - so the dependent-sleeper logic is still in
action.
btw., these bugs pessimised the SMT scheduler because the 'delay wakeup'
property was applied too liberally, so this fix is likely a throughput
improvement as well.
I separated out a smt_slice() function to make the code easier to read.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 19 |
1 files changed, 15 insertions, 4 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 6da13bba3e23..c61ee3451a04 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2650,6 +2650,16 @@ static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq) | |||
2650 | */ | 2650 | */ |
2651 | } | 2651 | } |
2652 | 2652 | ||
2653 | /* | ||
2654 | * number of 'lost' timeslices this task wont be able to fully | ||
2655 | * utilize, if another task runs on a sibling. This models the | ||
2656 | * slowdown effect of other tasks running on siblings: | ||
2657 | */ | ||
2658 | static inline unsigned long smt_slice(task_t *p, struct sched_domain *sd) | ||
2659 | { | ||
2660 | return p->time_slice * (100 - sd->per_cpu_gain) / 100; | ||
2661 | } | ||
2662 | |||
2653 | static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq) | 2663 | static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq) |
2654 | { | 2664 | { |
2655 | struct sched_domain *tmp, *sd = NULL; | 2665 | struct sched_domain *tmp, *sd = NULL; |
@@ -2714,8 +2724,9 @@ static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq) | |||
2714 | (sd->per_cpu_gain * DEF_TIMESLICE / 100)) | 2724 | (sd->per_cpu_gain * DEF_TIMESLICE / 100)) |
2715 | ret = 1; | 2725 | ret = 1; |
2716 | } else | 2726 | } else |
2717 | if (((smt_curr->time_slice * (100 - sd->per_cpu_gain) / | 2727 | if (smt_curr->static_prio < p->static_prio && |
2718 | 100) > task_timeslice(p))) | 2728 | !TASK_PREEMPTS_CURR(p, smt_rq) && |
2729 | smt_slice(smt_curr, sd) > task_timeslice(p)) | ||
2719 | ret = 1; | 2730 | ret = 1; |
2720 | 2731 | ||
2721 | check_smt_task: | 2732 | check_smt_task: |
@@ -2737,8 +2748,8 @@ check_smt_task: | |||
2737 | (sd->per_cpu_gain * DEF_TIMESLICE / 100)) | 2748 | (sd->per_cpu_gain * DEF_TIMESLICE / 100)) |
2738 | resched_task(smt_curr); | 2749 | resched_task(smt_curr); |
2739 | } else { | 2750 | } else { |
2740 | if ((p->time_slice * (100 - sd->per_cpu_gain) / 100) > | 2751 | if (TASK_PREEMPTS_CURR(p, smt_rq) && |
2741 | task_timeslice(smt_curr)) | 2752 | smt_slice(p, sd) > task_timeslice(smt_curr)) |
2742 | resched_task(smt_curr); | 2753 | resched_task(smt_curr); |
2743 | else | 2754 | else |
2744 | wakeup_busy_runqueue(smt_rq); | 2755 | wakeup_busy_runqueue(smt_rq); |