diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 34 |
1 files changed, 26 insertions, 8 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index cca93cc0dd7d..0dc757246d89 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -57,6 +57,16 @@ | |||
57 | #include <asm/unistd.h> | 57 | #include <asm/unistd.h> |
58 | 58 | ||
59 | /* | 59 | /* |
60 | * Scheduler clock - returns current time in nanosec units. | ||
61 | * This is default implementation. | ||
62 | * Architectures and sub-architectures can override this. | ||
63 | */ | ||
64 | unsigned long long __attribute__((weak)) sched_clock(void) | ||
65 | { | ||
66 | return (unsigned long long)jiffies * (1000000000 / HZ); | ||
67 | } | ||
68 | |||
69 | /* | ||
60 | * Convert user-nice values [ -20 ... 0 ... 19 ] | 70 | * Convert user-nice values [ -20 ... 0 ... 19 ] |
61 | * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], | 71 | * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], |
62 | * and back. | 72 | * and back. |
@@ -1843,6 +1853,13 @@ context_switch(struct rq *rq, struct task_struct *prev, | |||
1843 | struct mm_struct *mm = next->mm; | 1853 | struct mm_struct *mm = next->mm; |
1844 | struct mm_struct *oldmm = prev->active_mm; | 1854 | struct mm_struct *oldmm = prev->active_mm; |
1845 | 1855 | ||
1856 | /* | ||
1857 | * For paravirt, this is coupled with an exit in switch_to to | ||
1858 | * combine the page table reload and the switch backend into | ||
1859 | * one hypercall. | ||
1860 | */ | ||
1861 | arch_enter_lazy_cpu_mode(); | ||
1862 | |||
1846 | if (!mm) { | 1863 | if (!mm) { |
1847 | next->active_mm = oldmm; | 1864 | next->active_mm = oldmm; |
1848 | atomic_inc(&oldmm->mm_count); | 1865 | atomic_inc(&oldmm->mm_count); |
@@ -2887,14 +2904,16 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) | |||
2887 | static void update_load(struct rq *this_rq) | 2904 | static void update_load(struct rq *this_rq) |
2888 | { | 2905 | { |
2889 | unsigned long this_load; | 2906 | unsigned long this_load; |
2890 | int i, scale; | 2907 | unsigned int i, scale; |
2891 | 2908 | ||
2892 | this_load = this_rq->raw_weighted_load; | 2909 | this_load = this_rq->raw_weighted_load; |
2893 | 2910 | ||
2894 | /* Update our load: */ | 2911 | /* Update our load: */ |
2895 | for (i = 0, scale = 1; i < 3; i++, scale <<= 1) { | 2912 | for (i = 0, scale = 1; i < 3; i++, scale += scale) { |
2896 | unsigned long old_load, new_load; | 2913 | unsigned long old_load, new_load; |
2897 | 2914 | ||
2915 | /* scale is effectively 1 << i now, and >> i divides by scale */ | ||
2916 | |||
2898 | old_load = this_rq->cpu_load[i]; | 2917 | old_load = this_rq->cpu_load[i]; |
2899 | new_load = this_load; | 2918 | new_load = this_load; |
2900 | /* | 2919 | /* |
@@ -2904,7 +2923,7 @@ static void update_load(struct rq *this_rq) | |||
2904 | */ | 2923 | */ |
2905 | if (new_load > old_load) | 2924 | if (new_load > old_load) |
2906 | new_load += scale-1; | 2925 | new_load += scale-1; |
2907 | this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) / scale; | 2926 | this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i; |
2908 | } | 2927 | } |
2909 | } | 2928 | } |
2910 | 2929 | ||
@@ -4193,13 +4212,12 @@ static void __setscheduler(struct task_struct *p, int policy, int prio) | |||
4193 | } | 4212 | } |
4194 | 4213 | ||
4195 | /** | 4214 | /** |
4196 | * sched_setscheduler - change the scheduling policy and/or RT priority of | 4215 | * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. |
4197 | * a thread. | ||
4198 | * @p: the task in question. | 4216 | * @p: the task in question. |
4199 | * @policy: new policy. | 4217 | * @policy: new policy. |
4200 | * @param: structure containing the new RT priority. | 4218 | * @param: structure containing the new RT priority. |
4201 | * | 4219 | * |
4202 | * NOTE: the task may be already dead | 4220 | * NOTE that the task may be already dead. |
4203 | */ | 4221 | */ |
4204 | int sched_setscheduler(struct task_struct *p, int policy, | 4222 | int sched_setscheduler(struct task_struct *p, int policy, |
4205 | struct sched_param *param) | 4223 | struct sched_param *param) |
@@ -4567,7 +4585,7 @@ asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len, | |||
4567 | /** | 4585 | /** |
4568 | * sys_sched_yield - yield the current processor to other threads. | 4586 | * sys_sched_yield - yield the current processor to other threads. |
4569 | * | 4587 | * |
4570 | * this function yields the current CPU by moving the calling thread | 4588 | * This function yields the current CPU by moving the calling thread |
4571 | * to the expired array. If there are no other threads running on this | 4589 | * to the expired array. If there are no other threads running on this |
4572 | * CPU then this function will return. | 4590 | * CPU then this function will return. |
4573 | */ | 4591 | */ |
@@ -4694,7 +4712,7 @@ EXPORT_SYMBOL(cond_resched_softirq); | |||
4694 | /** | 4712 | /** |
4695 | * yield - yield the current processor to other threads. | 4713 | * yield - yield the current processor to other threads. |
4696 | * | 4714 | * |
4697 | * this is a shortcut for kernel-space yielding - it marks the | 4715 | * This is a shortcut for kernel-space yielding - it marks the |
4698 | * thread runnable and calls sys_sched_yield(). | 4716 | * thread runnable and calls sys_sched_yield(). |
4699 | */ | 4717 | */ |
4700 | void __sched yield(void) | 4718 | void __sched yield(void) |