diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 24 |
1 files changed, 12 insertions, 12 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 53608a59d6e3..3399701c680e 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -160,15 +160,6 @@ | |||
160 | #define TASK_PREEMPTS_CURR(p, rq) \ | 160 | #define TASK_PREEMPTS_CURR(p, rq) \ |
161 | ((p)->prio < (rq)->curr->prio) | 161 | ((p)->prio < (rq)->curr->prio) |
162 | 162 | ||
163 | /* | ||
164 | * task_timeslice() scales user-nice values [ -20 ... 0 ... 19 ] | ||
165 | * to time slice values: [800ms ... 100ms ... 5ms] | ||
166 | * | ||
167 | * The higher a thread's priority, the bigger timeslices | ||
168 | * it gets during one round of execution. But even the lowest | ||
169 | * priority thread gets MIN_TIMESLICE worth of execution time. | ||
170 | */ | ||
171 | |||
172 | #define SCALE_PRIO(x, prio) \ | 163 | #define SCALE_PRIO(x, prio) \ |
173 | max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE) | 164 | max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE) |
174 | 165 | ||
@@ -180,6 +171,15 @@ static unsigned int static_prio_timeslice(int static_prio) | |||
180 | return SCALE_PRIO(DEF_TIMESLICE, static_prio); | 171 | return SCALE_PRIO(DEF_TIMESLICE, static_prio); |
181 | } | 172 | } |
182 | 173 | ||
174 | /* | ||
175 | * task_timeslice() scales user-nice values [ -20 ... 0 ... 19 ] | ||
176 | * to time slice values: [800ms ... 100ms ... 5ms] | ||
177 | * | ||
178 | * The higher a thread's priority, the bigger timeslices | ||
179 | * it gets during one round of execution. But even the lowest | ||
180 | * priority thread gets MIN_TIMESLICE worth of execution time. | ||
181 | */ | ||
182 | |||
183 | static inline unsigned int task_timeslice(struct task_struct *p) | 183 | static inline unsigned int task_timeslice(struct task_struct *p) |
184 | { | 184 | { |
185 | return static_prio_timeslice(p->static_prio); | 185 | return static_prio_timeslice(p->static_prio); |
@@ -1822,14 +1822,14 @@ context_switch(struct rq *rq, struct task_struct *prev, | |||
1822 | struct mm_struct *mm = next->mm; | 1822 | struct mm_struct *mm = next->mm; |
1823 | struct mm_struct *oldmm = prev->active_mm; | 1823 | struct mm_struct *oldmm = prev->active_mm; |
1824 | 1824 | ||
1825 | if (unlikely(!mm)) { | 1825 | if (!mm) { |
1826 | next->active_mm = oldmm; | 1826 | next->active_mm = oldmm; |
1827 | atomic_inc(&oldmm->mm_count); | 1827 | atomic_inc(&oldmm->mm_count); |
1828 | enter_lazy_tlb(oldmm, next); | 1828 | enter_lazy_tlb(oldmm, next); |
1829 | } else | 1829 | } else |
1830 | switch_mm(oldmm, mm, next); | 1830 | switch_mm(oldmm, mm, next); |
1831 | 1831 | ||
1832 | if (unlikely(!prev->mm)) { | 1832 | if (!prev->mm) { |
1833 | prev->active_mm = NULL; | 1833 | prev->active_mm = NULL; |
1834 | WARN_ON(rq->prev_mm); | 1834 | WARN_ON(rq->prev_mm); |
1835 | rq->prev_mm = oldmm; | 1835 | rq->prev_mm = oldmm; |
@@ -3491,7 +3491,7 @@ asmlinkage void __sched preempt_schedule(void) | |||
3491 | * If there is a non-zero preempt_count or interrupts are disabled, | 3491 | * If there is a non-zero preempt_count or interrupts are disabled, |
3492 | * we do not want to preempt the current task. Just return.. | 3492 | * we do not want to preempt the current task. Just return.. |
3493 | */ | 3493 | */ |
3494 | if (unlikely(ti->preempt_count || irqs_disabled())) | 3494 | if (likely(ti->preempt_count || irqs_disabled())) |
3495 | return; | 3495 | return; |
3496 | 3496 | ||
3497 | need_resched: | 3497 | need_resched: |