aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-07-09 12:51:59 -0400
committerIngo Molnar <mingo@elte.hu>2007-07-09 12:51:59 -0400
commitf2ac58ee617fd9f6cd9922fbcd291b661d7c9954 (patch)
tree6eee6329575aafba0efe055b8f8f3c39674b1196 /kernel
parent45bf76df4814a4cd1c57226ae001c464467cb656 (diff)
sched: remove sleep_type
remove the sleep_type heuristics from the core scheduler - scheduling policy is implemented in the scheduling-policy modules. (and CFS does not use this type of sleep-type heuristics) Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c91
1 files changed, 2 insertions, 89 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 6e5a89ba4f76..26795adab3ad 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -990,33 +990,8 @@ static int recalc_task_prio(struct task_struct *p, unsigned long long now)
990 * with one single large enough sleep. 990 * with one single large enough sleep.
991 */ 991 */
992 p->sleep_avg = ceiling; 992 p->sleep_avg = ceiling;
993 /*
994 * Using INTERACTIVE_SLEEP() as a ceiling places a
995 * nice(0) task 1ms sleep away from promotion, and
996 * gives it 700ms to round-robin with no chance of
997 * being demoted. This is more than generous, so
998 * mark this sleep as non-interactive to prevent the
999 * on-runqueue bonus logic from intervening should
1000 * this task not receive cpu immediately.
1001 */
1002 p->sleep_type = SLEEP_NONINTERACTIVE;
1003 } else { 993 } else {
1004 /* 994 /*
1005 * Tasks waking from uninterruptible sleep are
1006 * limited in their sleep_avg rise as they
1007 * are likely to be waiting on I/O
1008 */
1009 if (p->sleep_type == SLEEP_NONINTERACTIVE && p->mm) {
1010 if (p->sleep_avg >= ceiling)
1011 sleep_time = 0;
1012 else if (p->sleep_avg + sleep_time >=
1013 ceiling) {
1014 p->sleep_avg = ceiling;
1015 sleep_time = 0;
1016 }
1017 }
1018
1019 /*
1020 * This code gives a bonus to interactive tasks. 995 * This code gives a bonus to interactive tasks.
1021 * 996 *
1022 * The boost works by updating the 'average sleep time' 997 * The boost works by updating the 'average sleep time'
@@ -1069,29 +1044,6 @@ static void activate_task(struct task_struct *p, struct rq *rq, int local)
1069 } 1044 }
1070 1045
1071 p->prio = recalc_task_prio(p, now); 1046 p->prio = recalc_task_prio(p, now);
1072
1073 /*
1074 * This checks to make sure it's not an uninterruptible task
1075 * that is now waking up.
1076 */
1077 if (p->sleep_type == SLEEP_NORMAL) {
1078 /*
1079 * Tasks which were woken up by interrupts (ie. hw events)
1080 * are most likely of interactive nature. So we give them
1081 * the credit of extending their sleep time to the period
1082 * of time they spend on the runqueue, waiting for execution
1083 * on a CPU, first time around:
1084 */
1085 if (in_interrupt())
1086 p->sleep_type = SLEEP_INTERRUPTED;
1087 else {
1088 /*
1089 * Normal first-time wakeups get a credit too for
1090 * on-runqueue time, but it will be weighted down:
1091 */
1092 p->sleep_type = SLEEP_INTERACTIVE;
1093 }
1094 }
1095 p->timestamp = now; 1047 p->timestamp = now;
1096out: 1048out:
1097 __activate_task(p, rq); 1049 __activate_task(p, rq);
@@ -1641,23 +1593,8 @@ out_set_cpu:
1641 1593
1642out_activate: 1594out_activate:
1643#endif /* CONFIG_SMP */ 1595#endif /* CONFIG_SMP */
1644 if (old_state == TASK_UNINTERRUPTIBLE) { 1596 if (old_state == TASK_UNINTERRUPTIBLE)
1645 rq->nr_uninterruptible--; 1597 rq->nr_uninterruptible--;
1646 /*
1647 * Tasks on involuntary sleep don't earn
1648 * sleep_avg beyond just interactive state.
1649 */
1650 p->sleep_type = SLEEP_NONINTERACTIVE;
1651 } else
1652
1653 /*
1654 * Tasks that have marked their sleep as noninteractive get
1655 * woken up with their sleep average not weighted in an
1656 * interactive way.
1657 */
1658 if (old_state & TASK_NONINTERACTIVE)
1659 p->sleep_type = SLEEP_NONINTERACTIVE;
1660
1661 1598
1662 activate_task(p, rq, cpu == this_cpu); 1599 activate_task(p, rq, cpu == this_cpu);
1663 /* 1600 /*
@@ -3533,12 +3470,6 @@ EXPORT_SYMBOL(sub_preempt_count);
3533 3470
3534#endif 3471#endif
3535 3472
3536static inline int interactive_sleep(enum sleep_type sleep_type)
3537{
3538 return (sleep_type == SLEEP_INTERACTIVE ||
3539 sleep_type == SLEEP_INTERRUPTED);
3540}
3541
3542/* 3473/*
3543 * schedule() is the main scheduler function. 3474 * schedule() is the main scheduler function.
3544 */ 3475 */
@@ -3549,7 +3480,7 @@ asmlinkage void __sched schedule(void)
3549 struct list_head *queue; 3480 struct list_head *queue;
3550 unsigned long long now; 3481 unsigned long long now;
3551 unsigned long run_time; 3482 unsigned long run_time;
3552 int cpu, idx, new_prio; 3483 int cpu, idx;
3553 long *switch_count; 3484 long *switch_count;
3554 struct rq *rq; 3485 struct rq *rq;
3555 3486
@@ -3642,24 +3573,6 @@ need_resched_nonpreemptible:
3642 queue = array->queue + idx; 3573 queue = array->queue + idx;
3643 next = list_entry(queue->next, struct task_struct, run_list); 3574 next = list_entry(queue->next, struct task_struct, run_list);
3644 3575
3645 if (!rt_task(next) && interactive_sleep(next->sleep_type)) {
3646 unsigned long long delta = now - next->timestamp;
3647 if (unlikely((long long)(now - next->timestamp) < 0))
3648 delta = 0;
3649
3650 if (next->sleep_type == SLEEP_INTERACTIVE)
3651 delta = delta * (ON_RUNQUEUE_WEIGHT * 128 / 100) / 128;
3652
3653 array = next->array;
3654 new_prio = recalc_task_prio(next, next->timestamp + delta);
3655
3656 if (unlikely(next->prio != new_prio)) {
3657 dequeue_task(next, array);
3658 next->prio = new_prio;
3659 enqueue_task(next, array);
3660 }
3661 }
3662 next->sleep_type = SLEEP_NORMAL;
3663switch_tasks: 3576switch_tasks:
3664 if (next == rq->idle) 3577 if (next == rq->idle)
3665 schedstat_inc(rq, sched_goidle); 3578 schedstat_inc(rq, sched_goidle);