diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-10 15:42:31 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-10 15:42:31 -0400 |
commit | b11ce8a26d26ed9019a8803aa90d580b52f23e79 (patch) | |
tree | 332f7b59487335229119c0ede371af3a9783d577 /kernel/sched_idletask.c | |
parent | f6bccf695431da0e9bd773550ae91b8cb9ffb227 (diff) | |
parent | a5d8c3483a6e19aca95ef6a2c5890e33bfa5b293 (diff) |
Merge branch 'sched-v28-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-v28-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (38 commits)
sched debug: add name to sched_domain sysctl entries
sched: sync wakeups vs avg_overlap
sched: remove redundant code in cpu_cgroup_create()
sched_rt.c: resch needed in rt_rq_enqueue() for the root rt_rq
cpusets: scan_for_empty_cpusets(), cpuset doesn't seem to be so const
sched: minor optimizations in wake_affine and select_task_rq_fair
sched: maintain only task entities in cfs_rq->tasks list
sched: fixup buddy selection
sched: more sanity checks on the bandwidth settings
sched: add some comments to the bandwidth code
sched: fixlet for group load balance
sched: rework wakeup preemption
CFS scheduler: documentation about scheduling policies
sched: clarify ifdef tangle
sched: fix list traversal to use _rcu variant
sched: turn off WAKEUP_OVERLAP
sched: wakeup preempt when small overlap
kernel/cpu.c: create a CPU_STARTING cpu_chain notifier
kernel/cpu.c: Move the CPU_DYING notifiers
sched: fix __load_balance_iterator() for cfq with only one task
...
Diffstat (limited to 'kernel/sched_idletask.c')
-rw-r--r-- | kernel/sched_idletask.c | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c index 3a4f92dbbe66..dec4ccabe2f5 100644 --- a/kernel/sched_idletask.c +++ b/kernel/sched_idletask.c | |||
@@ -14,7 +14,7 @@ static int select_task_rq_idle(struct task_struct *p, int sync) | |||
14 | /* | 14 | /* |
15 | * Idle tasks are unconditionally rescheduled: | 15 | * Idle tasks are unconditionally rescheduled: |
16 | */ | 16 | */ |
17 | static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p) | 17 | static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int sync) |
18 | { | 18 | { |
19 | resched_task(rq->idle); | 19 | resched_task(rq->idle); |
20 | } | 20 | } |
@@ -76,7 +76,7 @@ static void switched_to_idle(struct rq *rq, struct task_struct *p, | |||
76 | if (running) | 76 | if (running) |
77 | resched_task(rq->curr); | 77 | resched_task(rq->curr); |
78 | else | 78 | else |
79 | check_preempt_curr(rq, p); | 79 | check_preempt_curr(rq, p, 0); |
80 | } | 80 | } |
81 | 81 | ||
82 | static void prio_changed_idle(struct rq *rq, struct task_struct *p, | 82 | static void prio_changed_idle(struct rq *rq, struct task_struct *p, |
@@ -93,7 +93,7 @@ static void prio_changed_idle(struct rq *rq, struct task_struct *p, | |||
93 | if (p->prio > oldprio) | 93 | if (p->prio > oldprio) |
94 | resched_task(rq->curr); | 94 | resched_task(rq->curr); |
95 | } else | 95 | } else |
96 | check_preempt_curr(rq, p); | 96 | check_preempt_curr(rq, p, 0); |
97 | } | 97 | } |
98 | 98 | ||
99 | /* | 99 | /* |