diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-01-21 10:37:27 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-21 10:37:27 -0500 |
commit | 77835492ed489c0b870f82f4c50687bd267acc0a (patch) | |
tree | d80903ce1b8dd30aa44ccfc756616ad4d6c74d63 /kernel/sched.c | |
parent | af37501c792107c2bde1524bdae38d9a247b841a (diff) | |
parent | 1de9e8e70f5acc441550ca75433563d91b269bbe (diff) |
Merge commit 'v2.6.29-rc2' into perfcounters/core
Conflicts:
include/linux/syscalls.h
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 43 |
1 files changed, 25 insertions, 18 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index ce9fecab5f02..40d70d9c0af3 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1343,8 +1343,8 @@ static inline void update_load_sub(struct load_weight *lw, unsigned long dec) | |||
1343 | * slice expiry etc. | 1343 | * slice expiry etc. |
1344 | */ | 1344 | */ |
1345 | 1345 | ||
1346 | #define WEIGHT_IDLEPRIO 2 | 1346 | #define WEIGHT_IDLEPRIO 3 |
1347 | #define WMULT_IDLEPRIO (1 << 31) | 1347 | #define WMULT_IDLEPRIO 1431655765 |
1348 | 1348 | ||
1349 | /* | 1349 | /* |
1350 | * Nice levels are multiplicative, with a gentle 10% change for every | 1350 | * Nice levels are multiplicative, with a gentle 10% change for every |
@@ -4509,7 +4509,7 @@ void __kprobes sub_preempt_count(int val) | |||
4509 | /* | 4509 | /* |
4510 | * Underflow? | 4510 | * Underflow? |
4511 | */ | 4511 | */ |
4512 | if (DEBUG_LOCKS_WARN_ON(val > preempt_count() - (!!kernel_locked()))) | 4512 | if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) |
4513 | return; | 4513 | return; |
4514 | /* | 4514 | /* |
4515 | * Is the spinlock portion underflowing? | 4515 | * Is the spinlock portion underflowing? |
@@ -5196,7 +5196,7 @@ int can_nice(const struct task_struct *p, const int nice) | |||
5196 | * sys_setpriority is a more generic, but much slower function that | 5196 | * sys_setpriority is a more generic, but much slower function that |
5197 | * does similar things. | 5197 | * does similar things. |
5198 | */ | 5198 | */ |
5199 | asmlinkage long sys_nice(int increment) | 5199 | SYSCALL_DEFINE1(nice, int, increment) |
5200 | { | 5200 | { |
5201 | long nice, retval; | 5201 | long nice, retval; |
5202 | 5202 | ||
@@ -5503,8 +5503,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) | |||
5503 | * @policy: new policy. | 5503 | * @policy: new policy. |
5504 | * @param: structure containing the new RT priority. | 5504 | * @param: structure containing the new RT priority. |
5505 | */ | 5505 | */ |
5506 | asmlinkage long | 5506 | SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, |
5507 | sys_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) | 5507 | struct sched_param __user *, param) |
5508 | { | 5508 | { |
5509 | /* negative values for policy are not valid */ | 5509 | /* negative values for policy are not valid */ |
5510 | if (policy < 0) | 5510 | if (policy < 0) |
@@ -5518,7 +5518,7 @@ sys_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) | |||
5518 | * @pid: the pid in question. | 5518 | * @pid: the pid in question. |
5519 | * @param: structure containing the new RT priority. | 5519 | * @param: structure containing the new RT priority. |
5520 | */ | 5520 | */ |
5521 | asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param) | 5521 | SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) |
5522 | { | 5522 | { |
5523 | return do_sched_setscheduler(pid, -1, param); | 5523 | return do_sched_setscheduler(pid, -1, param); |
5524 | } | 5524 | } |
@@ -5527,7 +5527,7 @@ asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param) | |||
5527 | * sys_sched_getscheduler - get the policy (scheduling class) of a thread | 5527 | * sys_sched_getscheduler - get the policy (scheduling class) of a thread |
5528 | * @pid: the pid in question. | 5528 | * @pid: the pid in question. |
5529 | */ | 5529 | */ |
5530 | asmlinkage long sys_sched_getscheduler(pid_t pid) | 5530 | SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) |
5531 | { | 5531 | { |
5532 | struct task_struct *p; | 5532 | struct task_struct *p; |
5533 | int retval; | 5533 | int retval; |
@@ -5552,7 +5552,7 @@ asmlinkage long sys_sched_getscheduler(pid_t pid) | |||
5552 | * @pid: the pid in question. | 5552 | * @pid: the pid in question. |
5553 | * @param: structure containing the RT priority. | 5553 | * @param: structure containing the RT priority. |
5554 | */ | 5554 | */ |
5555 | asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param) | 5555 | SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) |
5556 | { | 5556 | { |
5557 | struct sched_param lp; | 5557 | struct sched_param lp; |
5558 | struct task_struct *p; | 5558 | struct task_struct *p; |
@@ -5670,8 +5670,8 @@ static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, | |||
5670 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr | 5670 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr |
5671 | * @user_mask_ptr: user-space pointer to the new cpu mask | 5671 | * @user_mask_ptr: user-space pointer to the new cpu mask |
5672 | */ | 5672 | */ |
5673 | asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, | 5673 | SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, |
5674 | unsigned long __user *user_mask_ptr) | 5674 | unsigned long __user *, user_mask_ptr) |
5675 | { | 5675 | { |
5676 | cpumask_var_t new_mask; | 5676 | cpumask_var_t new_mask; |
5677 | int retval; | 5677 | int retval; |
@@ -5718,8 +5718,8 @@ out_unlock: | |||
5718 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr | 5718 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr |
5719 | * @user_mask_ptr: user-space pointer to hold the current cpu mask | 5719 | * @user_mask_ptr: user-space pointer to hold the current cpu mask |
5720 | */ | 5720 | */ |
5721 | asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len, | 5721 | SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, |
5722 | unsigned long __user *user_mask_ptr) | 5722 | unsigned long __user *, user_mask_ptr) |
5723 | { | 5723 | { |
5724 | int ret; | 5724 | int ret; |
5725 | cpumask_var_t mask; | 5725 | cpumask_var_t mask; |
@@ -5748,7 +5748,7 @@ asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len, | |||
5748 | * This function yields the current CPU to other tasks. If there are no | 5748 | * This function yields the current CPU to other tasks. If there are no |
5749 | * other threads running on this CPU then this function will return. | 5749 | * other threads running on this CPU then this function will return. |
5750 | */ | 5750 | */ |
5751 | asmlinkage long sys_sched_yield(void) | 5751 | SYSCALL_DEFINE0(sched_yield) |
5752 | { | 5752 | { |
5753 | struct rq *rq = this_rq_lock(); | 5753 | struct rq *rq = this_rq_lock(); |
5754 | 5754 | ||
@@ -5889,7 +5889,7 @@ long __sched io_schedule_timeout(long timeout) | |||
5889 | * this syscall returns the maximum rt_priority that can be used | 5889 | * this syscall returns the maximum rt_priority that can be used |
5890 | * by a given scheduling class. | 5890 | * by a given scheduling class. |
5891 | */ | 5891 | */ |
5892 | asmlinkage long sys_sched_get_priority_max(int policy) | 5892 | SYSCALL_DEFINE1(sched_get_priority_max, int, policy) |
5893 | { | 5893 | { |
5894 | int ret = -EINVAL; | 5894 | int ret = -EINVAL; |
5895 | 5895 | ||
@@ -5914,7 +5914,7 @@ asmlinkage long sys_sched_get_priority_max(int policy) | |||
5914 | * this syscall returns the minimum rt_priority that can be used | 5914 | * this syscall returns the minimum rt_priority that can be used |
5915 | * by a given scheduling class. | 5915 | * by a given scheduling class. |
5916 | */ | 5916 | */ |
5917 | asmlinkage long sys_sched_get_priority_min(int policy) | 5917 | SYSCALL_DEFINE1(sched_get_priority_min, int, policy) |
5918 | { | 5918 | { |
5919 | int ret = -EINVAL; | 5919 | int ret = -EINVAL; |
5920 | 5920 | ||
@@ -5939,8 +5939,8 @@ asmlinkage long sys_sched_get_priority_min(int policy) | |||
5939 | * this syscall writes the default timeslice value of a given process | 5939 | * this syscall writes the default timeslice value of a given process |
5940 | * into the user-space timespec buffer. A value of '0' means infinity. | 5940 | * into the user-space timespec buffer. A value of '0' means infinity. |
5941 | */ | 5941 | */ |
5942 | asmlinkage | 5942 | SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, |
5943 | long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval) | 5943 | struct timespec __user *, interval) |
5944 | { | 5944 | { |
5945 | struct task_struct *p; | 5945 | struct task_struct *p; |
5946 | unsigned int time_slice; | 5946 | unsigned int time_slice; |
@@ -9120,6 +9120,13 @@ static int tg_schedulable(struct task_group *tg, void *data) | |||
9120 | runtime = d->rt_runtime; | 9120 | runtime = d->rt_runtime; |
9121 | } | 9121 | } |
9122 | 9122 | ||
9123 | #ifdef CONFIG_USER_SCHED | ||
9124 | if (tg == &root_task_group) { | ||
9125 | period = global_rt_period(); | ||
9126 | runtime = global_rt_runtime(); | ||
9127 | } | ||
9128 | #endif | ||
9129 | |||
9123 | /* | 9130 | /* |
9124 | * Cannot have more runtime than the period. | 9131 | * Cannot have more runtime than the period. |
9125 | */ | 9132 | */ |