diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-02-07 12:31:54 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-02-07 12:31:54 -0500 |
commit | 673f8205914a12e928c65afbcd78ae748f78df53 (patch) | |
tree | 38c60215646d079fab3bff812e094e914960c7ec /kernel/sched.c | |
parent | cf47b8f3d96b0b8b10b557444a28b3ca4024ff82 (diff) | |
parent | ae1a25da8448271a99745da03100d5299575a269 (diff) |
Merge branch 'linus' into core/locking
Conflicts:
fs/btrfs/locking.c
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 57 |
1 files changed, 37 insertions, 20 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 589e7308c615..186c6fd08acf 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1323,8 +1323,8 @@ static inline void update_load_sub(struct load_weight *lw, unsigned long dec) | |||
1323 | * slice expiry etc. | 1323 | * slice expiry etc. |
1324 | */ | 1324 | */ |
1325 | 1325 | ||
1326 | #define WEIGHT_IDLEPRIO 2 | 1326 | #define WEIGHT_IDLEPRIO 3 |
1327 | #define WMULT_IDLEPRIO (1 << 31) | 1327 | #define WMULT_IDLEPRIO 1431655765 |
1328 | 1328 | ||
1329 | /* | 1329 | /* |
1330 | * Nice levels are multiplicative, with a gentle 10% change for every | 1330 | * Nice levels are multiplicative, with a gentle 10% change for every |
@@ -2266,6 +2266,16 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) | |||
2266 | if (!sched_feat(SYNC_WAKEUPS)) | 2266 | if (!sched_feat(SYNC_WAKEUPS)) |
2267 | sync = 0; | 2267 | sync = 0; |
2268 | 2268 | ||
2269 | if (!sync) { | ||
2270 | if (current->se.avg_overlap < sysctl_sched_migration_cost && | ||
2271 | p->se.avg_overlap < sysctl_sched_migration_cost) | ||
2272 | sync = 1; | ||
2273 | } else { | ||
2274 | if (current->se.avg_overlap >= sysctl_sched_migration_cost || | ||
2275 | p->se.avg_overlap >= sysctl_sched_migration_cost) | ||
2276 | sync = 0; | ||
2277 | } | ||
2278 | |||
2269 | #ifdef CONFIG_SMP | 2279 | #ifdef CONFIG_SMP |
2270 | if (sched_feat(LB_WAKEUP_UPDATE)) { | 2280 | if (sched_feat(LB_WAKEUP_UPDATE)) { |
2271 | struct sched_domain *sd; | 2281 | struct sched_domain *sd; |
@@ -4440,7 +4450,7 @@ void __kprobes sub_preempt_count(int val) | |||
4440 | /* | 4450 | /* |
4441 | * Underflow? | 4451 | * Underflow? |
4442 | */ | 4452 | */ |
4443 | if (DEBUG_LOCKS_WARN_ON(val > preempt_count() - (!!kernel_locked()))) | 4453 | if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) |
4444 | return; | 4454 | return; |
4445 | /* | 4455 | /* |
4446 | * Is the spinlock portion underflowing? | 4456 | * Is the spinlock portion underflowing? |
@@ -4752,8 +4762,8 @@ EXPORT_SYMBOL(default_wake_function); | |||
4752 | * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns | 4762 | * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns |
4753 | * zero in this (rare) case, and we handle it by continuing to scan the queue. | 4763 | * zero in this (rare) case, and we handle it by continuing to scan the queue. |
4754 | */ | 4764 | */ |
4755 | static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, | 4765 | void __wake_up_common(wait_queue_head_t *q, unsigned int mode, |
4756 | int nr_exclusive, int sync, void *key) | 4766 | int nr_exclusive, int sync, void *key) |
4757 | { | 4767 | { |
4758 | wait_queue_t *curr, *next; | 4768 | wait_queue_t *curr, *next; |
4759 | 4769 | ||
@@ -5191,7 +5201,7 @@ int can_nice(const struct task_struct *p, const int nice) | |||
5191 | * sys_setpriority is a more generic, but much slower function that | 5201 | * sys_setpriority is a more generic, but much slower function that |
5192 | * does similar things. | 5202 | * does similar things. |
5193 | */ | 5203 | */ |
5194 | asmlinkage long sys_nice(int increment) | 5204 | SYSCALL_DEFINE1(nice, int, increment) |
5195 | { | 5205 | { |
5196 | long nice, retval; | 5206 | long nice, retval; |
5197 | 5207 | ||
@@ -5498,8 +5508,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) | |||
5498 | * @policy: new policy. | 5508 | * @policy: new policy. |
5499 | * @param: structure containing the new RT priority. | 5509 | * @param: structure containing the new RT priority. |
5500 | */ | 5510 | */ |
5501 | asmlinkage long | 5511 | SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, |
5502 | sys_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) | 5512 | struct sched_param __user *, param) |
5503 | { | 5513 | { |
5504 | /* negative values for policy are not valid */ | 5514 | /* negative values for policy are not valid */ |
5505 | if (policy < 0) | 5515 | if (policy < 0) |
@@ -5513,7 +5523,7 @@ sys_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) | |||
5513 | * @pid: the pid in question. | 5523 | * @pid: the pid in question. |
5514 | * @param: structure containing the new RT priority. | 5524 | * @param: structure containing the new RT priority. |
5515 | */ | 5525 | */ |
5516 | asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param) | 5526 | SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) |
5517 | { | 5527 | { |
5518 | return do_sched_setscheduler(pid, -1, param); | 5528 | return do_sched_setscheduler(pid, -1, param); |
5519 | } | 5529 | } |
@@ -5522,7 +5532,7 @@ asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param) | |||
5522 | * sys_sched_getscheduler - get the policy (scheduling class) of a thread | 5532 | * sys_sched_getscheduler - get the policy (scheduling class) of a thread |
5523 | * @pid: the pid in question. | 5533 | * @pid: the pid in question. |
5524 | */ | 5534 | */ |
5525 | asmlinkage long sys_sched_getscheduler(pid_t pid) | 5535 | SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) |
5526 | { | 5536 | { |
5527 | struct task_struct *p; | 5537 | struct task_struct *p; |
5528 | int retval; | 5538 | int retval; |
@@ -5547,7 +5557,7 @@ asmlinkage long sys_sched_getscheduler(pid_t pid) | |||
5547 | * @pid: the pid in question. | 5557 | * @pid: the pid in question. |
5548 | * @param: structure containing the RT priority. | 5558 | * @param: structure containing the RT priority. |
5549 | */ | 5559 | */ |
5550 | asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param) | 5560 | SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) |
5551 | { | 5561 | { |
5552 | struct sched_param lp; | 5562 | struct sched_param lp; |
5553 | struct task_struct *p; | 5563 | struct task_struct *p; |
@@ -5665,8 +5675,8 @@ static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, | |||
5665 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr | 5675 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr |
5666 | * @user_mask_ptr: user-space pointer to the new cpu mask | 5676 | * @user_mask_ptr: user-space pointer to the new cpu mask |
5667 | */ | 5677 | */ |
5668 | asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, | 5678 | SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, |
5669 | unsigned long __user *user_mask_ptr) | 5679 | unsigned long __user *, user_mask_ptr) |
5670 | { | 5680 | { |
5671 | cpumask_var_t new_mask; | 5681 | cpumask_var_t new_mask; |
5672 | int retval; | 5682 | int retval; |
@@ -5713,8 +5723,8 @@ out_unlock: | |||
5713 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr | 5723 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr |
5714 | * @user_mask_ptr: user-space pointer to hold the current cpu mask | 5724 | * @user_mask_ptr: user-space pointer to hold the current cpu mask |
5715 | */ | 5725 | */ |
5716 | asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len, | 5726 | SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, |
5717 | unsigned long __user *user_mask_ptr) | 5727 | unsigned long __user *, user_mask_ptr) |
5718 | { | 5728 | { |
5719 | int ret; | 5729 | int ret; |
5720 | cpumask_var_t mask; | 5730 | cpumask_var_t mask; |
@@ -5743,7 +5753,7 @@ asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len, | |||
5743 | * This function yields the current CPU to other tasks. If there are no | 5753 | * This function yields the current CPU to other tasks. If there are no |
5744 | * other threads running on this CPU then this function will return. | 5754 | * other threads running on this CPU then this function will return. |
5745 | */ | 5755 | */ |
5746 | asmlinkage long sys_sched_yield(void) | 5756 | SYSCALL_DEFINE0(sched_yield) |
5747 | { | 5757 | { |
5748 | struct rq *rq = this_rq_lock(); | 5758 | struct rq *rq = this_rq_lock(); |
5749 | 5759 | ||
@@ -5884,7 +5894,7 @@ long __sched io_schedule_timeout(long timeout) | |||
5884 | * this syscall returns the maximum rt_priority that can be used | 5894 | * this syscall returns the maximum rt_priority that can be used |
5885 | * by a given scheduling class. | 5895 | * by a given scheduling class. |
5886 | */ | 5896 | */ |
5887 | asmlinkage long sys_sched_get_priority_max(int policy) | 5897 | SYSCALL_DEFINE1(sched_get_priority_max, int, policy) |
5888 | { | 5898 | { |
5889 | int ret = -EINVAL; | 5899 | int ret = -EINVAL; |
5890 | 5900 | ||
@@ -5909,7 +5919,7 @@ asmlinkage long sys_sched_get_priority_max(int policy) | |||
5909 | * this syscall returns the minimum rt_priority that can be used | 5919 | * this syscall returns the minimum rt_priority that can be used |
5910 | * by a given scheduling class. | 5920 | * by a given scheduling class. |
5911 | */ | 5921 | */ |
5912 | asmlinkage long sys_sched_get_priority_min(int policy) | 5922 | SYSCALL_DEFINE1(sched_get_priority_min, int, policy) |
5913 | { | 5923 | { |
5914 | int ret = -EINVAL; | 5924 | int ret = -EINVAL; |
5915 | 5925 | ||
@@ -5934,8 +5944,8 @@ asmlinkage long sys_sched_get_priority_min(int policy) | |||
5934 | * this syscall writes the default timeslice value of a given process | 5944 | * this syscall writes the default timeslice value of a given process |
5935 | * into the user-space timespec buffer. A value of '0' means infinity. | 5945 | * into the user-space timespec buffer. A value of '0' means infinity. |
5936 | */ | 5946 | */ |
5937 | asmlinkage | 5947 | SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, |
5938 | long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval) | 5948 | struct timespec __user *, interval) |
5939 | { | 5949 | { |
5940 | struct task_struct *p; | 5950 | struct task_struct *p; |
5941 | unsigned int time_slice; | 5951 | unsigned int time_slice; |
@@ -9115,6 +9125,13 @@ static int tg_schedulable(struct task_group *tg, void *data) | |||
9115 | runtime = d->rt_runtime; | 9125 | runtime = d->rt_runtime; |
9116 | } | 9126 | } |
9117 | 9127 | ||
9128 | #ifdef CONFIG_USER_SCHED | ||
9129 | if (tg == &root_task_group) { | ||
9130 | period = global_rt_period(); | ||
9131 | runtime = global_rt_runtime(); | ||
9132 | } | ||
9133 | #endif | ||
9134 | |||
9118 | /* | 9135 | /* |
9119 | * Cannot have more runtime than the period. | 9136 | * Cannot have more runtime than the period. |
9120 | */ | 9137 | */ |