aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-07-09 12:51:59 -0400
committerIngo Molnar <mingo@elte.hu>2007-07-09 12:51:59 -0400
commite05606d3301525aa67b081ad9fccade2b31ab35a (patch)
tree2a3e5a477dfca70ce32f3ea8dbc5e16034c98c23
parent138a8aeb5b9e5c5abd5e5ec22b6d1848e7e9c50b (diff)
sched: clean up the rt priority macros
clean up the rt priority macros, pointed out by Andrew Morton. Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/sched.h61
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/sched.c22
3 files changed, 54 insertions, 31 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3e7f1890e55d..4dcc61cca00a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -525,31 +525,6 @@ struct signal_struct {
525#define SIGNAL_STOP_CONTINUED 0x00000004 /* SIGCONT since WCONTINUED reap */ 525#define SIGNAL_STOP_CONTINUED 0x00000004 /* SIGCONT since WCONTINUED reap */
526#define SIGNAL_GROUP_EXIT 0x00000008 /* group exit in progress */ 526#define SIGNAL_GROUP_EXIT 0x00000008 /* group exit in progress */
527 527
528
529/*
530 * Priority of a process goes from 0..MAX_PRIO-1, valid RT
531 * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
532 * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
533 * values are inverted: lower p->prio value means higher priority.
534 *
535 * The MAX_USER_RT_PRIO value allows the actual maximum
536 * RT priority to be separate from the value exported to
537 * user-space. This allows kernel threads to set their
538 * priority to a value higher than any user task. Note:
539 * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
540 */
541
542#define MAX_USER_RT_PRIO 100
543#define MAX_RT_PRIO MAX_USER_RT_PRIO
544
545#define MAX_PRIO (MAX_RT_PRIO + 40)
546
547#define rt_prio(prio) unlikely((prio) < MAX_RT_PRIO)
548#define rt_task(p) rt_prio((p)->prio)
549#define batch_task(p) (unlikely((p)->policy == SCHED_BATCH))
550#define is_rt_policy(p) ((p) != SCHED_NORMAL && (p) != SCHED_BATCH)
551#define has_rt_policy(p) unlikely(is_rt_policy((p)->policy))
552
553/* 528/*
554 * Some day this will be a full-fledged user tracking system.. 529 * Some day this will be a full-fledged user tracking system..
555 */ 530 */
@@ -1164,6 +1139,42 @@ struct task_struct {
1164#endif 1139#endif
1165}; 1140};
1166 1141
1142/*
1143 * Priority of a process goes from 0..MAX_PRIO-1, valid RT
1144 * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
1145 * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
1146 * values are inverted: lower p->prio value means higher priority.
1147 *
1148 * The MAX_USER_RT_PRIO value allows the actual maximum
1149 * RT priority to be separate from the value exported to
1150 * user-space. This allows kernel threads to set their
1151 * priority to a value higher than any user task. Note:
1152 * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
1153 */
1154
1155#define MAX_USER_RT_PRIO 100
1156#define MAX_RT_PRIO MAX_USER_RT_PRIO
1157
1158#define MAX_PRIO (MAX_RT_PRIO + 40)
1159#define DEFAULT_PRIO (MAX_RT_PRIO + 20)
1160
1161static inline int rt_prio(int prio)
1162{
1163 if (unlikely(prio < MAX_RT_PRIO))
1164 return 1;
1165 return 0;
1166}
1167
1168static inline int rt_task(struct task_struct *p)
1169{
1170 return rt_prio(p->prio);
1171}
1172
1173static inline int batch_task(struct task_struct *p)
1174{
1175 return p->policy == SCHED_BATCH;
1176}
1177
1167static inline pid_t process_group(struct task_struct *tsk) 1178static inline pid_t process_group(struct task_struct *tsk)
1168{ 1179{
1169 return tsk->signal->pgrp; 1180 return tsk->signal->pgrp;
diff --git a/kernel/exit.c b/kernel/exit.c
index 6c7699240327..8fd7acd7bbd0 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -290,7 +290,7 @@ static void reparent_to_kthreadd(void)
290 /* Set the exit signal to SIGCHLD so we signal init on exit */ 290 /* Set the exit signal to SIGCHLD so we signal init on exit */
291 current->exit_signal = SIGCHLD; 291 current->exit_signal = SIGCHLD;
292 292
293 if (!has_rt_policy(current) && (task_nice(current) < 0)) 293 if (task_nice(current) < 0)
294 set_user_nice(current, 0); 294 set_user_nice(current, 0);
295 /* cpus_allowed? */ 295 /* cpus_allowed? */
296 /* rt_priority? */ 296 /* rt_priority? */
diff --git a/kernel/sched.c b/kernel/sched.c
index d9ed9274bf0a..53c0ee742f69 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -220,6 +220,18 @@ static inline unsigned int task_timeslice(struct task_struct *p)
220 return static_prio_timeslice(p->static_prio); 220 return static_prio_timeslice(p->static_prio);
221} 221}
222 222
223static inline int rt_policy(int policy)
224{
225 if (unlikely(policy == SCHED_FIFO) || unlikely(policy == SCHED_RR))
226 return 1;
227 return 0;
228}
229
230static inline int task_has_rt_policy(struct task_struct *p)
231{
232 return rt_policy(p->policy);
233}
234
223/* 235/*
224 * This is the priority-queue data structure of the RT scheduling class: 236 * This is the priority-queue data structure of the RT scheduling class:
225 */ 237 */
@@ -698,7 +710,7 @@ static inline int __normal_prio(struct task_struct *p)
698 710
699static void set_load_weight(struct task_struct *p) 711static void set_load_weight(struct task_struct *p)
700{ 712{
701 if (has_rt_policy(p)) { 713 if (task_has_rt_policy(p)) {
702#ifdef CONFIG_SMP 714#ifdef CONFIG_SMP
703 if (p == task_rq(p)->migration_thread) 715 if (p == task_rq(p)->migration_thread)
704 /* 716 /*
@@ -749,7 +761,7 @@ static inline int normal_prio(struct task_struct *p)
749{ 761{
750 int prio; 762 int prio;
751 763
752 if (has_rt_policy(p)) 764 if (task_has_rt_policy(p))
753 prio = MAX_RT_PRIO-1 - p->rt_priority; 765 prio = MAX_RT_PRIO-1 - p->rt_priority;
754 else 766 else
755 prio = __normal_prio(p); 767 prio = __normal_prio(p);
@@ -4051,7 +4063,7 @@ void set_user_nice(struct task_struct *p, long nice)
4051 * it wont have any effect on scheduling until the task is 4063 * it wont have any effect on scheduling until the task is
4052 * not SCHED_NORMAL/SCHED_BATCH: 4064 * not SCHED_NORMAL/SCHED_BATCH:
4053 */ 4065 */
4054 if (has_rt_policy(p)) { 4066 if (task_has_rt_policy(p)) {
4055 p->static_prio = NICE_TO_PRIO(nice); 4067 p->static_prio = NICE_TO_PRIO(nice);
4056 goto out_unlock; 4068 goto out_unlock;
4057 } 4069 }
@@ -4240,14 +4252,14 @@ recheck:
4240 (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) || 4252 (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
4241 (!p->mm && param->sched_priority > MAX_RT_PRIO-1)) 4253 (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
4242 return -EINVAL; 4254 return -EINVAL;
4243 if (is_rt_policy(policy) != (param->sched_priority != 0)) 4255 if (rt_policy(policy) != (param->sched_priority != 0))
4244 return -EINVAL; 4256 return -EINVAL;
4245 4257
4246 /* 4258 /*
4247 * Allow unprivileged RT tasks to decrease priority: 4259 * Allow unprivileged RT tasks to decrease priority:
4248 */ 4260 */
4249 if (!capable(CAP_SYS_NICE)) { 4261 if (!capable(CAP_SYS_NICE)) {
4250 if (is_rt_policy(policy)) { 4262 if (rt_policy(policy)) {
4251 unsigned long rlim_rtprio; 4263 unsigned long rlim_rtprio;
4252 unsigned long flags; 4264 unsigned long flags;
4253 4265