summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorHenrik Austad <henrik@austad.us>2015-09-09 11:00:41 -0400
committerIngo Molnar <mingo@kernel.org>2015-09-18 03:23:13 -0400
commit20f9cd2acb1d74a8bf4b4087267f586e6ecdbc03 (patch)
tree0fbd33d1065457c055134c5149bc2650491291a8 /kernel/sched
parent0c6a5b4319928b769ba81eff45bd679737a29ba1 (diff)
sched/core: Make policy-testing consistent
Most of the policy-tests are done via the <class>_policy() helpers with the notable exception of idle. A new wrapper for valid_policy() has also been added to improve readability in set_load_weight(). This commit does not change the logical behavior of the scheduler core. Signed-off-by: Henrik Austad <henrik@austad.us> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Link: http://lkml.kernel.org/r/1441810841-4756-1-git-send-email-henrik@austad.us Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c9
-rw-r--r--kernel/sched/sched.h9
2 files changed, 12 insertions, 6 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 6ab415aa15c4..1b30b5b24a4a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -817,7 +817,7 @@ static void set_load_weight(struct task_struct *p)
817 /* 817 /*
818 * SCHED_IDLE tasks get minimal weight: 818 * SCHED_IDLE tasks get minimal weight:
819 */ 819 */
820 if (p->policy == SCHED_IDLE) { 820 if (idle_policy(p->policy)) {
821 load->weight = scale_load(WEIGHT_IDLEPRIO); 821 load->weight = scale_load(WEIGHT_IDLEPRIO);
822 load->inv_weight = WMULT_IDLEPRIO; 822 load->inv_weight = WMULT_IDLEPRIO;
823 return; 823 return;
@@ -3733,10 +3733,7 @@ recheck:
3733 } else { 3733 } else {
3734 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK); 3734 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
3735 3735
3736 if (policy != SCHED_DEADLINE && 3736 if (!valid_policy(policy))
3737 policy != SCHED_FIFO && policy != SCHED_RR &&
3738 policy != SCHED_NORMAL && policy != SCHED_BATCH &&
3739 policy != SCHED_IDLE)
3740 return -EINVAL; 3737 return -EINVAL;
3741 } 3738 }
3742 3739
@@ -3792,7 +3789,7 @@ recheck:
3792 * Treat SCHED_IDLE as nice 20. Only allow a switch to 3789 * Treat SCHED_IDLE as nice 20. Only allow a switch to
3793 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. 3790 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
3794 */ 3791 */
3795 if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) { 3792 if (idle_policy(p->policy) && !idle_policy(policy)) {
3796 if (!can_nice(p, task_nice(p))) 3793 if (!can_nice(p, task_nice(p)))
3797 return -EPERM; 3794 return -EPERM;
3798 } 3795 }
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 167ab4844ee6..3845a711c65e 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -84,6 +84,10 @@ static inline void update_cpu_load_active(struct rq *this_rq) { }
84 */ 84 */
85#define RUNTIME_INF ((u64)~0ULL) 85#define RUNTIME_INF ((u64)~0ULL)
86 86
87static inline int idle_policy(int policy)
88{
89 return policy == SCHED_IDLE;
90}
87static inline int fair_policy(int policy) 91static inline int fair_policy(int policy)
88{ 92{
89 return policy == SCHED_NORMAL || policy == SCHED_BATCH; 93 return policy == SCHED_NORMAL || policy == SCHED_BATCH;
@@ -98,6 +102,11 @@ static inline int dl_policy(int policy)
98{ 102{
99 return policy == SCHED_DEADLINE; 103 return policy == SCHED_DEADLINE;
100} 104}
105static inline bool valid_policy(int policy)
106{
107 return idle_policy(policy) || fair_policy(policy) ||
108 rt_policy(policy) || dl_policy(policy);
109}
101 110
102static inline int task_has_rt_policy(struct task_struct *p) 111static inline int task_has_rt_policy(struct task_struct *p)
103{ 112{