aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c52
1 files changed, 27 insertions, 25 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 582faed3d360..cb146219d532 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -686,33 +686,35 @@ static inline void __activate_idle_task(task_t *p, runqueue_t *rq)
686static int recalc_task_prio(task_t *p, unsigned long long now) 686static int recalc_task_prio(task_t *p, unsigned long long now)
687{ 687{
688 /* Caller must always ensure 'now >= p->timestamp' */ 688 /* Caller must always ensure 'now >= p->timestamp' */
689 unsigned long long __sleep_time = now - p->timestamp; 689 unsigned long sleep_time = now - p->timestamp;
690 unsigned long sleep_time;
691 690
692 if (batch_task(p)) 691 if (batch_task(p))
693 sleep_time = 0; 692 sleep_time = 0;
694 else {
695 if (__sleep_time > NS_MAX_SLEEP_AVG)
696 sleep_time = NS_MAX_SLEEP_AVG;
697 else
698 sleep_time = (unsigned long)__sleep_time;
699 }
700 693
701 if (likely(sleep_time > 0)) { 694 if (likely(sleep_time > 0)) {
702 /* 695 /*
703 * User tasks that sleep a long time are categorised as 696 * This ceiling is set to the lowest priority that would allow
704 * idle. They will only have their sleep_avg increased to a 697 * a task to be reinserted into the active array on timeslice
705 * level that makes them just interactive priority to stay 698 * completion.
706 * active yet prevent them suddenly becoming cpu hogs and
707 * starving other processes.
708 */ 699 */
709 if (p->mm && sleep_time > INTERACTIVE_SLEEP(p)) { 700 unsigned long ceiling = INTERACTIVE_SLEEP(p);
710 unsigned long ceiling;
711 701
712 ceiling = JIFFIES_TO_NS(MAX_SLEEP_AVG - 702 if (p->mm && sleep_time > ceiling && p->sleep_avg < ceiling) {
713 DEF_TIMESLICE); 703 /*
714 if (p->sleep_avg < ceiling) 704 * Prevents user tasks from achieving best priority
715 p->sleep_avg = ceiling; 705 * with one single large enough sleep.
706 */
707 p->sleep_avg = ceiling;
708 /*
709 * Using INTERACTIVE_SLEEP() as a ceiling places a
710 * nice(0) task 1ms sleep away from promotion, and
711 * gives it 700ms to round-robin with no chance of
712 * being demoted. This is more than generous, so
713 * mark this sleep as non-interactive to prevent the
714 * on-runqueue bonus logic from intervening should
715 * this task not receive cpu immediately.
716 */
717 p->sleep_type = SLEEP_NONINTERACTIVE;
716 } else { 718 } else {
717 /* 719 /*
718 * Tasks waking from uninterruptible sleep are 720 * Tasks waking from uninterruptible sleep are
@@ -720,12 +722,12 @@ static int recalc_task_prio(task_t *p, unsigned long long now)
720 * are likely to be waiting on I/O 722 * are likely to be waiting on I/O
721 */ 723 */
722 if (p->sleep_type == SLEEP_NONINTERACTIVE && p->mm) { 724 if (p->sleep_type == SLEEP_NONINTERACTIVE && p->mm) {
723 if (p->sleep_avg >= INTERACTIVE_SLEEP(p)) 725 if (p->sleep_avg >= ceiling)
724 sleep_time = 0; 726 sleep_time = 0;
725 else if (p->sleep_avg + sleep_time >= 727 else if (p->sleep_avg + sleep_time >=
726 INTERACTIVE_SLEEP(p)) { 728 ceiling) {
727 p->sleep_avg = INTERACTIVE_SLEEP(p); 729 p->sleep_avg = ceiling;
728 sleep_time = 0; 730 sleep_time = 0;
729 } 731 }
730 } 732 }
731 733
@@ -739,9 +741,9 @@ static int recalc_task_prio(task_t *p, unsigned long long now)
739 */ 741 */
740 p->sleep_avg += sleep_time; 742 p->sleep_avg += sleep_time;
741 743
742 if (p->sleep_avg > NS_MAX_SLEEP_AVG)
743 p->sleep_avg = NS_MAX_SLEEP_AVG;
744 } 744 }
745 if (p->sleep_avg > NS_MAX_SLEEP_AVG)
746 p->sleep_avg = NS_MAX_SLEEP_AVG;
745 } 747 }
746 748
747 return effective_prio(p); 749 return effective_prio(p);