diff options
author | Nick Piggin <npiggin@suse.de> | 2006-10-11 04:21:52 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-10-11 14:14:22 -0400 |
commit | beed33a816204cb402c69266475b6a60a2433ceb (patch) | |
tree | 4eaa7e5a1ccf2960d1478774cdfcab671384accb /kernel | |
parent | f33d9bd50478c9a969b65f58feb6b69a3ad478cb (diff) |
[PATCH] sched: likely profiling
This likely profiling is pretty fun. I found a few possible problems
in sched.c.
This patch may be not measurable, but when I did measure long ago,
nooping (un)likely cost a couple of % on scheduler heavy benchmarks, so
it all adds up.
Tweak some branch hints:
- the 2nd 64 bits in the bitmask is likely to be populated, because it
contains the first 28 bits (nearly 3/4) of the normal priorities.
(ratio of 669669:691 ~= 1000:1).
- it isn't unlikely that context switching switches to another process. it
might be very rapidly switching to and from the idle process (ratio of
475815:419004 and 471330:423544). Let the branch predictor decide.
- preempt_enable seems to be very often called in a nested preempt_disable
or with interrupts disabled (ratio of 3567760:87965 ~= 40:1)
Signed-off-by: Nick Piggin <npiggin@suse.de>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: Daniel Walker <dwalker@mvista.com>
Cc: Hua Zhong <hzhong@gmail.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 53608a59d6e3..094b5687eef6 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1822,14 +1822,14 @@ context_switch(struct rq *rq, struct task_struct *prev, | |||
1822 | struct mm_struct *mm = next->mm; | 1822 | struct mm_struct *mm = next->mm; |
1823 | struct mm_struct *oldmm = prev->active_mm; | 1823 | struct mm_struct *oldmm = prev->active_mm; |
1824 | 1824 | ||
1825 | if (unlikely(!mm)) { | 1825 | if (!mm) { |
1826 | next->active_mm = oldmm; | 1826 | next->active_mm = oldmm; |
1827 | atomic_inc(&oldmm->mm_count); | 1827 | atomic_inc(&oldmm->mm_count); |
1828 | enter_lazy_tlb(oldmm, next); | 1828 | enter_lazy_tlb(oldmm, next); |
1829 | } else | 1829 | } else |
1830 | switch_mm(oldmm, mm, next); | 1830 | switch_mm(oldmm, mm, next); |
1831 | 1831 | ||
1832 | if (unlikely(!prev->mm)) { | 1832 | if (!prev->mm) { |
1833 | prev->active_mm = NULL; | 1833 | prev->active_mm = NULL; |
1834 | WARN_ON(rq->prev_mm); | 1834 | WARN_ON(rq->prev_mm); |
1835 | rq->prev_mm = oldmm; | 1835 | rq->prev_mm = oldmm; |
@@ -3491,7 +3491,7 @@ asmlinkage void __sched preempt_schedule(void) | |||
3491 | * If there is a non-zero preempt_count or interrupts are disabled, | 3491 | * If there is a non-zero preempt_count or interrupts are disabled, |
3492 | * we do not want to preempt the current task. Just return.. | 3492 | * we do not want to preempt the current task. Just return.. |
3493 | */ | 3493 | */ |
3494 | if (unlikely(ti->preempt_count || irqs_disabled())) | 3494 | if (likely(ti->preempt_count || irqs_disabled())) |
3495 | return; | 3495 | return; |
3496 | 3496 | ||
3497 | need_resched: | 3497 | need_resched: |