diff options
author | Jiri Kosina <jkosina@suse.cz> | 2011-04-26 04:22:15 -0400 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2011-04-26 04:22:59 -0400 |
commit | 07f9479a40cc778bc1462ada11f95b01360ae4ff (patch) | |
tree | 0676cf38df3844004bb3ebfd99dfa67a4a8998f5 /kernel/sched.c | |
parent | 9d5e6bdb3013acfb311ab407eeca0b6a6a3dedbf (diff) | |
parent | cd2e49e90f1cae7726c9a2c54488d881d7f1cd1c (diff) |
Merge branch 'master' into for-next
Fast-forwarded to current state of Linus' tree as there are patches to be
applied for files that didn't exist on the old branch.
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 46 |
1 files changed, 37 insertions, 9 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index a172494a9a63..312f8b95c2d4 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2309,7 +2309,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) | |||
2309 | * Cause a process which is running on another CPU to enter | 2309 | * Cause a process which is running on another CPU to enter |
2310 | * kernel-mode, without any delay. (to get signals handled.) | 2310 | * kernel-mode, without any delay. (to get signals handled.) |
2311 | * | 2311 | * |
2312 | * NOTE: this function doesnt have to take the runqueue lock, | 2312 | * NOTE: this function doesn't have to take the runqueue lock, |
2313 | * because all it wants to ensure is that the remote task enters | 2313 | * because all it wants to ensure is that the remote task enters |
2314 | * the kernel. If the IPI races and the task has been migrated | 2314 | * the kernel. If the IPI races and the task has been migrated |
2315 | * to another CPU then no harm is done and the purpose has been | 2315 | * to another CPU then no harm is done and the purpose has been |
@@ -4111,6 +4111,16 @@ need_resched: | |||
4111 | try_to_wake_up_local(to_wakeup); | 4111 | try_to_wake_up_local(to_wakeup); |
4112 | } | 4112 | } |
4113 | deactivate_task(rq, prev, DEQUEUE_SLEEP); | 4113 | deactivate_task(rq, prev, DEQUEUE_SLEEP); |
4114 | |||
4115 | /* | ||
4116 | * If we are going to sleep and we have plugged IO queued, make | ||
4117 | * sure to submit it to avoid deadlocks. | ||
4118 | */ | ||
4119 | if (blk_needs_flush_plug(prev)) { | ||
4120 | raw_spin_unlock(&rq->lock); | ||
4121 | blk_schedule_flush_plug(prev); | ||
4122 | raw_spin_lock(&rq->lock); | ||
4123 | } | ||
4114 | } | 4124 | } |
4115 | switch_count = &prev->nvcsw; | 4125 | switch_count = &prev->nvcsw; |
4116 | } | 4126 | } |
@@ -4892,8 +4902,11 @@ static bool check_same_owner(struct task_struct *p) | |||
4892 | 4902 | ||
4893 | rcu_read_lock(); | 4903 | rcu_read_lock(); |
4894 | pcred = __task_cred(p); | 4904 | pcred = __task_cred(p); |
4895 | match = (cred->euid == pcred->euid || | 4905 | if (cred->user->user_ns == pcred->user->user_ns) |
4896 | cred->euid == pcred->uid); | 4906 | match = (cred->euid == pcred->euid || |
4907 | cred->euid == pcred->uid); | ||
4908 | else | ||
4909 | match = false; | ||
4897 | rcu_read_unlock(); | 4910 | rcu_read_unlock(); |
4898 | return match; | 4911 | return match; |
4899 | } | 4912 | } |
@@ -4984,7 +4997,7 @@ recheck: | |||
4984 | */ | 4997 | */ |
4985 | raw_spin_lock_irqsave(&p->pi_lock, flags); | 4998 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
4986 | /* | 4999 | /* |
4987 | * To be able to change p->policy safely, the apropriate | 5000 | * To be able to change p->policy safely, the appropriate |
4988 | * runqueue lock must be held. | 5001 | * runqueue lock must be held. |
4989 | */ | 5002 | */ |
4990 | rq = __task_rq_lock(p); | 5003 | rq = __task_rq_lock(p); |
@@ -4998,6 +5011,17 @@ recheck: | |||
4998 | return -EINVAL; | 5011 | return -EINVAL; |
4999 | } | 5012 | } |
5000 | 5013 | ||
5014 | /* | ||
5015 | * If not changing anything there's no need to proceed further: | ||
5016 | */ | ||
5017 | if (unlikely(policy == p->policy && (!rt_policy(policy) || | ||
5018 | param->sched_priority == p->rt_priority))) { | ||
5019 | |||
5020 | __task_rq_unlock(rq); | ||
5021 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); | ||
5022 | return 0; | ||
5023 | } | ||
5024 | |||
5001 | #ifdef CONFIG_RT_GROUP_SCHED | 5025 | #ifdef CONFIG_RT_GROUP_SCHED |
5002 | if (user) { | 5026 | if (user) { |
5003 | /* | 5027 | /* |
@@ -5221,7 +5245,7 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) | |||
5221 | goto out_free_cpus_allowed; | 5245 | goto out_free_cpus_allowed; |
5222 | } | 5246 | } |
5223 | retval = -EPERM; | 5247 | retval = -EPERM; |
5224 | if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) | 5248 | if (!check_same_owner(p) && !task_ns_capable(p, CAP_SYS_NICE)) |
5225 | goto out_unlock; | 5249 | goto out_unlock; |
5226 | 5250 | ||
5227 | retval = security_task_setscheduler(p); | 5251 | retval = security_task_setscheduler(p); |
@@ -5460,6 +5484,8 @@ EXPORT_SYMBOL(yield); | |||
5460 | * yield_to - yield the current processor to another thread in | 5484 | * yield_to - yield the current processor to another thread in |
5461 | * your thread group, or accelerate that thread toward the | 5485 | * your thread group, or accelerate that thread toward the |
5462 | * processor it's on. | 5486 | * processor it's on. |
5487 | * @p: target task | ||
5488 | * @preempt: whether task preemption is allowed or not | ||
5463 | * | 5489 | * |
5464 | * It's the caller's job to ensure that the target task struct | 5490 | * It's the caller's job to ensure that the target task struct |
5465 | * can't go away on us before we can do any checks. | 5491 | * can't go away on us before we can do any checks. |
@@ -5525,6 +5551,7 @@ void __sched io_schedule(void) | |||
5525 | 5551 | ||
5526 | delayacct_blkio_start(); | 5552 | delayacct_blkio_start(); |
5527 | atomic_inc(&rq->nr_iowait); | 5553 | atomic_inc(&rq->nr_iowait); |
5554 | blk_flush_plug(current); | ||
5528 | current->in_iowait = 1; | 5555 | current->in_iowait = 1; |
5529 | schedule(); | 5556 | schedule(); |
5530 | current->in_iowait = 0; | 5557 | current->in_iowait = 0; |
@@ -5540,6 +5567,7 @@ long __sched io_schedule_timeout(long timeout) | |||
5540 | 5567 | ||
5541 | delayacct_blkio_start(); | 5568 | delayacct_blkio_start(); |
5542 | atomic_inc(&rq->nr_iowait); | 5569 | atomic_inc(&rq->nr_iowait); |
5570 | blk_flush_plug(current); | ||
5543 | current->in_iowait = 1; | 5571 | current->in_iowait = 1; |
5544 | ret = schedule_timeout(timeout); | 5572 | ret = schedule_timeout(timeout); |
5545 | current->in_iowait = 0; | 5573 | current->in_iowait = 0; |
@@ -5688,7 +5716,7 @@ void show_state_filter(unsigned long state_filter) | |||
5688 | do_each_thread(g, p) { | 5716 | do_each_thread(g, p) { |
5689 | /* | 5717 | /* |
5690 | * reset the NMI-timeout, listing all files on a slow | 5718 | * reset the NMI-timeout, listing all files on a slow |
5691 | * console might take alot of time: | 5719 | * console might take a lot of time: |
5692 | */ | 5720 | */ |
5693 | touch_nmi_watchdog(); | 5721 | touch_nmi_watchdog(); |
5694 | if (!state_filter || (p->state & state_filter)) | 5722 | if (!state_filter || (p->state & state_filter)) |
@@ -6303,6 +6331,9 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
6303 | break; | 6331 | break; |
6304 | #endif | 6332 | #endif |
6305 | } | 6333 | } |
6334 | |||
6335 | update_max_interval(); | ||
6336 | |||
6306 | return NOTIFY_OK; | 6337 | return NOTIFY_OK; |
6307 | } | 6338 | } |
6308 | 6339 | ||
@@ -8434,7 +8465,6 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) | |||
8434 | { | 8465 | { |
8435 | struct cfs_rq *cfs_rq; | 8466 | struct cfs_rq *cfs_rq; |
8436 | struct sched_entity *se; | 8467 | struct sched_entity *se; |
8437 | struct rq *rq; | ||
8438 | int i; | 8468 | int i; |
8439 | 8469 | ||
8440 | tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL); | 8470 | tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL); |
@@ -8447,8 +8477,6 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) | |||
8447 | tg->shares = NICE_0_LOAD; | 8477 | tg->shares = NICE_0_LOAD; |
8448 | 8478 | ||
8449 | for_each_possible_cpu(i) { | 8479 | for_each_possible_cpu(i) { |
8450 | rq = cpu_rq(i); | ||
8451 | |||
8452 | cfs_rq = kzalloc_node(sizeof(struct cfs_rq), | 8480 | cfs_rq = kzalloc_node(sizeof(struct cfs_rq), |
8453 | GFP_KERNEL, cpu_to_node(i)); | 8481 | GFP_KERNEL, cpu_to_node(i)); |
8454 | if (!cfs_rq) | 8482 | if (!cfs_rq) |