aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c47
1 files changed, 1 insertions, 46 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 8e44da609c9b..f5a204b46655 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -894,7 +894,7 @@ static inline int __normal_prio(struct task_struct *p)
894{ 894{
895 int bonus, prio; 895 int bonus, prio;
896 896
897 bonus = CURRENT_BONUS(p) - MAX_BONUS / 2; 897 bonus = 0;
898 898
899 prio = p->static_prio - bonus; 899 prio = p->static_prio - bonus;
900 if (prio < MAX_RT_PRIO) 900 if (prio < MAX_RT_PRIO)
@@ -970,42 +970,6 @@ static inline void __activate_idle_task(struct task_struct *p, struct rq *rq)
970 */ 970 */
971static int recalc_task_prio(struct task_struct *p, unsigned long long now) 971static int recalc_task_prio(struct task_struct *p, unsigned long long now)
972{ 972{
973 /* Caller must always ensure 'now >= p->timestamp' */
974 unsigned long sleep_time = now - p->timestamp;
975
976 if (batch_task(p))
977 sleep_time = 0;
978
979 if (likely(sleep_time > 0)) {
980 /*
981 * This ceiling is set to the lowest priority that would allow
982 * a task to be reinserted into the active array on timeslice
983 * completion.
984 */
985 unsigned long ceiling = INTERACTIVE_SLEEP(p);
986
987 if (p->mm && sleep_time > ceiling && p->sleep_avg < ceiling) {
988 /*
989 * Prevents user tasks from achieving best priority
990 * with one single large enough sleep.
991 */
992 p->sleep_avg = ceiling;
993 } else {
994 /*
995 * This code gives a bonus to interactive tasks.
996 *
997 * The boost works by updating the 'average sleep time'
998 * value here, based on ->timestamp. The more time a
999 * task spends sleeping, the higher the average gets -
1000 * and the higher the priority boost gets as well.
1001 */
1002 p->sleep_avg += sleep_time;
1003
1004 }
1005 if (p->sleep_avg > NS_MAX_SLEEP_AVG)
1006 p->sleep_avg = NS_MAX_SLEEP_AVG;
1007 }
1008
1009 return effective_prio(p); 973 return effective_prio(p);
1010} 974}
1011 975
@@ -3560,9 +3524,6 @@ switch_tasks:
3560 clear_tsk_need_resched(prev); 3524 clear_tsk_need_resched(prev);
3561 rcu_qsctr_inc(task_cpu(prev)); 3525 rcu_qsctr_inc(task_cpu(prev));
3562 3526
3563 prev->sleep_avg -= run_time;
3564 if ((long)prev->sleep_avg <= 0)
3565 prev->sleep_avg = 0;
3566 prev->timestamp = prev->last_ran = now; 3527 prev->timestamp = prev->last_ran = now;
3567 3528
3568 sched_info_switch(prev, next); 3529 sched_info_switch(prev, next);
@@ -4204,11 +4165,6 @@ static void __setscheduler(struct task_struct *p, int policy, int prio)
4204 p->normal_prio = normal_prio(p); 4165 p->normal_prio = normal_prio(p);
4205 /* we are holding p->pi_lock already */ 4166 /* we are holding p->pi_lock already */
4206 p->prio = rt_mutex_getprio(p); 4167 p->prio = rt_mutex_getprio(p);
4207 /*
4208 * SCHED_BATCH tasks are treated as perpetual CPU hogs:
4209 */
4210 if (policy == SCHED_BATCH)
4211 p->sleep_avg = 0;
4212 set_load_weight(p); 4168 set_load_weight(p);
4213} 4169}
4214 4170
@@ -4931,7 +4887,6 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
4931 unsigned long flags; 4887 unsigned long flags;
4932 4888
4933 idle->timestamp = sched_clock(); 4889 idle->timestamp = sched_clock();
4934 idle->sleep_avg = 0;
4935 idle->array = NULL; 4890 idle->array = NULL;
4936 idle->prio = idle->normal_prio = MAX_PRIO; 4891 idle->prio = idle->normal_prio = MAX_PRIO;
4937 idle->state = TASK_RUNNING; 4892 idle->state = TASK_RUNNING;