diff options
| -rw-r--r-- | kernel/sched.c | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 4aac8aa16037..97017356669a 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -136,7 +136,7 @@ static inline void sg_inc_cpu_power(struct sched_group *sg, u32 val) | |||
| 136 | 136 | ||
| 137 | static inline int rt_policy(int policy) | 137 | static inline int rt_policy(int policy) |
| 138 | { | 138 | { |
| 139 | if (unlikely(policy == SCHED_FIFO) || unlikely(policy == SCHED_RR)) | 139 | if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR)) |
| 140 | return 1; | 140 | return 1; |
| 141 | return 0; | 141 | return 0; |
| 142 | } | 142 | } |
| @@ -4433,7 +4433,7 @@ static inline void schedule_debug(struct task_struct *prev) | |||
| 4433 | * schedule() atomically, we ignore that path for now. | 4433 | * schedule() atomically, we ignore that path for now. |
| 4434 | * Otherwise, whine if we are scheduling when we should not be. | 4434 | * Otherwise, whine if we are scheduling when we should not be. |
| 4435 | */ | 4435 | */ |
| 4436 | if (unlikely(in_atomic_preempt_off()) && unlikely(!prev->exit_state)) | 4436 | if (unlikely(in_atomic_preempt_off() && !prev->exit_state)) |
| 4437 | __schedule_bug(prev); | 4437 | __schedule_bug(prev); |
| 4438 | 4438 | ||
| 4439 | profile_hit(SCHED_PROFILING, __builtin_return_address(0)); | 4439 | profile_hit(SCHED_PROFILING, __builtin_return_address(0)); |
