aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorAndrew Morton <akpm@linux-foundation.org>2007-05-08 23:27:06 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-08 23:41:15 -0400
commitd5f9f942c601fdebe57f7805e4b4fbad9c28ada8 (patch)
tree6a212ceabd844fbaf517a13a9f885be5ab1c59b5 /kernel
parentda4e8ca376a1b3dca470eba14dcec321a6a27b8b (diff)
revert 'sched: redundant reschedule when set_user_nice() boosts a prio of a task from the "expired" array'
Revert commit bd53f96ca54a21c07e7a0ae1886fa623d370b85f. Con says: This is no good, sorry. The one I saw originally was with the staircase deadline cpu scheduler in situ and was different. #define TASK_PREEMPTS_CURR(p, rq) \ ((p)->prio < (rq)->curr->prio) (((p)->prio < (rq)->curr->prio) && ((p)->array == (rq)->active)) This will fail to wake up a runqueue for a task that has been migrated to the expired array of a runqueue which is otherwise idle which can happen with smp balancing, Cc: Dmitry Adamushko <dmitry.adamushko@gmail.com> Cc: Con Kolivas <kernel@kolivas.org> Cc: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c34
1 files changed, 18 insertions, 16 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index a3a04085e794..66bd7ff23f18 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -169,7 +169,7 @@ unsigned long long __attribute__((weak)) sched_clock(void)
169 (MAX_BONUS / 2 + DELTA((p)) + 1) / MAX_BONUS - 1)) 169 (MAX_BONUS / 2 + DELTA((p)) + 1) / MAX_BONUS - 1))
170 170
171#define TASK_PREEMPTS_CURR(p, rq) \ 171#define TASK_PREEMPTS_CURR(p, rq) \
172 (((p)->prio < (rq)->curr->prio) && ((p)->array == (rq)->active)) 172 ((p)->prio < (rq)->curr->prio)
173 173
174#define SCALE_PRIO(x, prio) \ 174#define SCALE_PRIO(x, prio) \
175 max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE) 175 max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE)
@@ -4076,13 +4076,13 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
4076 struct prio_array *array; 4076 struct prio_array *array;
4077 unsigned long flags; 4077 unsigned long flags;
4078 struct rq *rq; 4078 struct rq *rq;
4079 int delta; 4079 int oldprio;
4080 4080
4081 BUG_ON(prio < 0 || prio > MAX_PRIO); 4081 BUG_ON(prio < 0 || prio > MAX_PRIO);
4082 4082
4083 rq = task_rq_lock(p, &flags); 4083 rq = task_rq_lock(p, &flags);
4084 4084
4085 delta = prio - p->prio; 4085 oldprio = p->prio;
4086 array = p->array; 4086 array = p->array;
4087 if (array) 4087 if (array)
4088 dequeue_task(p, array); 4088 dequeue_task(p, array);
@@ -4098,11 +4098,13 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
4098 enqueue_task(p, array); 4098 enqueue_task(p, array);
4099 /* 4099 /*
4100 * Reschedule if we are currently running on this runqueue and 4100 * Reschedule if we are currently running on this runqueue and
4101 * our priority decreased, or if our priority became higher 4101 * our priority decreased, or if we are not currently running on
4102 * than the current's. 4102 * this runqueue and our priority is higher than the current's
4103 */ 4103 */
4104 if (TASK_PREEMPTS_CURR(p, rq) || 4104 if (task_running(rq, p)) {
4105 (delta > 0 && task_running(rq, p))) 4105 if (p->prio > oldprio)
4106 resched_task(rq->curr);
4107 } else if (TASK_PREEMPTS_CURR(p, rq))
4106 resched_task(rq->curr); 4108 resched_task(rq->curr);
4107 } 4109 }
4108 task_rq_unlock(rq, &flags); 4110 task_rq_unlock(rq, &flags);
@@ -4150,12 +4152,10 @@ void set_user_nice(struct task_struct *p, long nice)
4150 enqueue_task(p, array); 4152 enqueue_task(p, array);
4151 inc_raw_weighted_load(rq, p); 4153 inc_raw_weighted_load(rq, p);
4152 /* 4154 /*
4153 * Reschedule if we are currently running on this runqueue and 4155 * If the task increased its priority or is running and
4154 * our priority decreased, or if our priority became higher 4156 * lowered its priority, then reschedule its CPU:
4155 * than the current's.
4156 */ 4157 */
4157 if (TASK_PREEMPTS_CURR(p, rq) || 4158 if (delta < 0 || (delta > 0 && task_running(rq, p)))
4158 (delta > 0 && task_running(rq, p)))
4159 resched_task(rq->curr); 4159 resched_task(rq->curr);
4160 } 4160 }
4161out_unlock: 4161out_unlock:
@@ -4382,11 +4382,13 @@ recheck:
4382 __activate_task(p, rq); 4382 __activate_task(p, rq);
4383 /* 4383 /*
4384 * Reschedule if we are currently running on this runqueue and 4384 * Reschedule if we are currently running on this runqueue and
4385 * our priority decreased, or our priority became higher 4385 * our priority decreased, or if we are not currently running on
4386 * than the current's. 4386 * this runqueue and our priority is higher than the current's
4387 */ 4387 */
4388 if (TASK_PREEMPTS_CURR(p, rq) || 4388 if (task_running(rq, p)) {
4389 (task_running(rq, p) && p->prio > oldprio)) 4389 if (p->prio > oldprio)
4390 resched_task(rq->curr);
4391 } else if (TASK_PREEMPTS_CURR(p, rq))
4390 resched_task(rq->curr); 4392 resched_task(rq->curr);
4391 } 4393 }
4392 __task_rq_unlock(rq); 4394 __task_rq_unlock(rq);