diff options
author | Ingo Molnar <mingo@elte.hu> | 2007-07-09 12:51:59 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-07-09 12:51:59 -0400 |
commit | e05606d3301525aa67b081ad9fccade2b31ab35a (patch) | |
tree | 2a3e5a477dfca70ce32f3ea8dbc5e16034c98c23 /kernel/sched.c | |
parent | 138a8aeb5b9e5c5abd5e5ec22b6d1848e7e9c50b (diff) |
sched: clean up the rt priority macros
clean up the rt priority macros, pointed out by Andrew Morton.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 22 |
1 files changed, 17 insertions, 5 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index d9ed9274bf0a..53c0ee742f69 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -220,6 +220,18 @@ static inline unsigned int task_timeslice(struct task_struct *p) | |||
220 | return static_prio_timeslice(p->static_prio); | 220 | return static_prio_timeslice(p->static_prio); |
221 | } | 221 | } |
222 | 222 | ||
223 | static inline int rt_policy(int policy) | ||
224 | { | ||
225 | if (unlikely(policy == SCHED_FIFO) || unlikely(policy == SCHED_RR)) | ||
226 | return 1; | ||
227 | return 0; | ||
228 | } | ||
229 | |||
230 | static inline int task_has_rt_policy(struct task_struct *p) | ||
231 | { | ||
232 | return rt_policy(p->policy); | ||
233 | } | ||
234 | |||
223 | /* | 235 | /* |
224 | * This is the priority-queue data structure of the RT scheduling class: | 236 | * This is the priority-queue data structure of the RT scheduling class: |
225 | */ | 237 | */ |
@@ -698,7 +710,7 @@ static inline int __normal_prio(struct task_struct *p) | |||
698 | 710 | ||
699 | static void set_load_weight(struct task_struct *p) | 711 | static void set_load_weight(struct task_struct *p) |
700 | { | 712 | { |
701 | if (has_rt_policy(p)) { | 713 | if (task_has_rt_policy(p)) { |
702 | #ifdef CONFIG_SMP | 714 | #ifdef CONFIG_SMP |
703 | if (p == task_rq(p)->migration_thread) | 715 | if (p == task_rq(p)->migration_thread) |
704 | /* | 716 | /* |
@@ -749,7 +761,7 @@ static inline int normal_prio(struct task_struct *p) | |||
749 | { | 761 | { |
750 | int prio; | 762 | int prio; |
751 | 763 | ||
752 | if (has_rt_policy(p)) | 764 | if (task_has_rt_policy(p)) |
753 | prio = MAX_RT_PRIO-1 - p->rt_priority; | 765 | prio = MAX_RT_PRIO-1 - p->rt_priority; |
754 | else | 766 | else |
755 | prio = __normal_prio(p); | 767 | prio = __normal_prio(p); |
@@ -4051,7 +4063,7 @@ void set_user_nice(struct task_struct *p, long nice) | |||
4051 | * it wont have any effect on scheduling until the task is | 4063 | * it wont have any effect on scheduling until the task is |
4052 | * not SCHED_NORMAL/SCHED_BATCH: | 4064 | * not SCHED_NORMAL/SCHED_BATCH: |
4053 | */ | 4065 | */ |
4054 | if (has_rt_policy(p)) { | 4066 | if (task_has_rt_policy(p)) { |
4055 | p->static_prio = NICE_TO_PRIO(nice); | 4067 | p->static_prio = NICE_TO_PRIO(nice); |
4056 | goto out_unlock; | 4068 | goto out_unlock; |
4057 | } | 4069 | } |
@@ -4240,14 +4252,14 @@ recheck: | |||
4240 | (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) || | 4252 | (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) || |
4241 | (!p->mm && param->sched_priority > MAX_RT_PRIO-1)) | 4253 | (!p->mm && param->sched_priority > MAX_RT_PRIO-1)) |
4242 | return -EINVAL; | 4254 | return -EINVAL; |
4243 | if (is_rt_policy(policy) != (param->sched_priority != 0)) | 4255 | if (rt_policy(policy) != (param->sched_priority != 0)) |
4244 | return -EINVAL; | 4256 | return -EINVAL; |
4245 | 4257 | ||
4246 | /* | 4258 | /* |
4247 | * Allow unprivileged RT tasks to decrease priority: | 4259 | * Allow unprivileged RT tasks to decrease priority: |
4248 | */ | 4260 | */ |
4249 | if (!capable(CAP_SYS_NICE)) { | 4261 | if (!capable(CAP_SYS_NICE)) { |
4250 | if (is_rt_policy(policy)) { | 4262 | if (rt_policy(policy)) { |
4251 | unsigned long rlim_rtprio; | 4263 | unsigned long rlim_rtprio; |
4252 | unsigned long flags; | 4264 | unsigned long flags; |
4253 | 4265 | ||