aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-07-09 12:51:59 -0400
committerIngo Molnar <mingo@elte.hu>2007-07-09 12:51:59 -0400
commit14531189f0a1071b928586e9e1a89eceac91d95f (patch)
treebb5ddb4a284e42b76a2f9378788ddb0ec4c8689f
parent71f8bd4600521fecb08644072052b85853a5a615 (diff)
sched: clean up __normal_prio() position
clean up: move __normal_prio() in head of normal_prio(). no code changed. Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/sched.c58
1 files changed, 29 insertions, 29 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 683d2a524e61..5cd069b77fd7 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -672,35 +672,6 @@ static inline void resched_task(struct task_struct *p)
672#include "sched_stats.h" 672#include "sched_stats.h"
673 673
674/* 674/*
675 * __normal_prio - return the priority that is based on the static
676 * priority but is modified by bonuses/penalties.
677 *
678 * We scale the actual sleep average [0 .... MAX_SLEEP_AVG]
679 * into the -5 ... 0 ... +5 bonus/penalty range.
680 *
681 * We use 25% of the full 0...39 priority range so that:
682 *
683 * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs.
684 * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks.
685 *
686 * Both properties are important to certain workloads.
687 */
688
689static inline int __normal_prio(struct task_struct *p)
690{
691 int bonus, prio;
692
693 bonus = CURRENT_BONUS(p) - MAX_BONUS / 2;
694
695 prio = p->static_prio - bonus;
696 if (prio < MAX_RT_PRIO)
697 prio = MAX_RT_PRIO;
698 if (prio > MAX_PRIO-1)
699 prio = MAX_PRIO-1;
700 return prio;
701}
702
703/*
704 * To aid in avoiding the subversion of "niceness" due to uneven distribution 675 * To aid in avoiding the subversion of "niceness" due to uneven distribution
705 * of tasks with abnormal "nice" values across CPUs the contribution that 676 * of tasks with abnormal "nice" values across CPUs the contribution that
706 * each task makes to its run queue's load is weighted according to its 677 * each task makes to its run queue's load is weighted according to its
@@ -803,6 +774,35 @@ enqueue_task_head(struct task_struct *p, struct prio_array *array)
803} 774}
804 775
805/* 776/*
777 * __normal_prio - return the priority that is based on the static
778 * priority but is modified by bonuses/penalties.
779 *
780 * We scale the actual sleep average [0 .... MAX_SLEEP_AVG]
781 * into the -5 ... 0 ... +5 bonus/penalty range.
782 *
783 * We use 25% of the full 0...39 priority range so that:
784 *
785 * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs.
786 * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks.
787 *
788 * Both properties are important to certain workloads.
789 */
790
791static inline int __normal_prio(struct task_struct *p)
792{
793 int bonus, prio;
794
795 bonus = CURRENT_BONUS(p) - MAX_BONUS / 2;
796
797 prio = p->static_prio - bonus;
798 if (prio < MAX_RT_PRIO)
799 prio = MAX_RT_PRIO;
800 if (prio > MAX_PRIO-1)
801 prio = MAX_PRIO-1;
802 return prio;
803}
804
805/*
806 * Calculate the expected normal priority: i.e. priority 806 * Calculate the expected normal priority: i.e. priority
807 * without taking RT-inheritance into account. Might be 807 * without taking RT-inheritance into account. Might be
808 * boosted by interactivity modifiers. Changes upon fork, 808 * boosted by interactivity modifiers. Changes upon fork,