diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 26 |
1 files changed, 20 insertions, 6 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index a172494a9a63..f592ce6f8616 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -4115,6 +4115,16 @@ need_resched: | |||
4115 | switch_count = &prev->nvcsw; | 4115 | switch_count = &prev->nvcsw; |
4116 | } | 4116 | } |
4117 | 4117 | ||
4118 | /* | ||
4119 | * If we are going to sleep and we have plugged IO queued, make | ||
4120 | * sure to submit it to avoid deadlocks. | ||
4121 | */ | ||
4122 | if (prev->state != TASK_RUNNING && blk_needs_flush_plug(prev)) { | ||
4123 | raw_spin_unlock(&rq->lock); | ||
4124 | blk_flush_plug(prev); | ||
4125 | raw_spin_lock(&rq->lock); | ||
4126 | } | ||
4127 | |||
4118 | pre_schedule(rq, prev); | 4128 | pre_schedule(rq, prev); |
4119 | 4129 | ||
4120 | if (unlikely(!rq->nr_running)) | 4130 | if (unlikely(!rq->nr_running)) |
@@ -4892,8 +4902,11 @@ static bool check_same_owner(struct task_struct *p) | |||
4892 | 4902 | ||
4893 | rcu_read_lock(); | 4903 | rcu_read_lock(); |
4894 | pcred = __task_cred(p); | 4904 | pcred = __task_cred(p); |
4895 | match = (cred->euid == pcred->euid || | 4905 | if (cred->user->user_ns == pcred->user->user_ns) |
4896 | cred->euid == pcred->uid); | 4906 | match = (cred->euid == pcred->euid || |
4907 | cred->euid == pcred->uid); | ||
4908 | else | ||
4909 | match = false; | ||
4897 | rcu_read_unlock(); | 4910 | rcu_read_unlock(); |
4898 | return match; | 4911 | return match; |
4899 | } | 4912 | } |
@@ -5221,7 +5234,7 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) | |||
5221 | goto out_free_cpus_allowed; | 5234 | goto out_free_cpus_allowed; |
5222 | } | 5235 | } |
5223 | retval = -EPERM; | 5236 | retval = -EPERM; |
5224 | if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) | 5237 | if (!check_same_owner(p) && !task_ns_capable(p, CAP_SYS_NICE)) |
5225 | goto out_unlock; | 5238 | goto out_unlock; |
5226 | 5239 | ||
5227 | retval = security_task_setscheduler(p); | 5240 | retval = security_task_setscheduler(p); |
@@ -5460,6 +5473,8 @@ EXPORT_SYMBOL(yield); | |||
5460 | * yield_to - yield the current processor to another thread in | 5473 | * yield_to - yield the current processor to another thread in |
5461 | * your thread group, or accelerate that thread toward the | 5474 | * your thread group, or accelerate that thread toward the |
5462 | * processor it's on. | 5475 | * processor it's on. |
5476 | * @p: target task | ||
5477 | * @preempt: whether task preemption is allowed or not | ||
5463 | * | 5478 | * |
5464 | * It's the caller's job to ensure that the target task struct | 5479 | * It's the caller's job to ensure that the target task struct |
5465 | * can't go away on us before we can do any checks. | 5480 | * can't go away on us before we can do any checks. |
@@ -5525,6 +5540,7 @@ void __sched io_schedule(void) | |||
5525 | 5540 | ||
5526 | delayacct_blkio_start(); | 5541 | delayacct_blkio_start(); |
5527 | atomic_inc(&rq->nr_iowait); | 5542 | atomic_inc(&rq->nr_iowait); |
5543 | blk_flush_plug(current); | ||
5528 | current->in_iowait = 1; | 5544 | current->in_iowait = 1; |
5529 | schedule(); | 5545 | schedule(); |
5530 | current->in_iowait = 0; | 5546 | current->in_iowait = 0; |
@@ -5540,6 +5556,7 @@ long __sched io_schedule_timeout(long timeout) | |||
5540 | 5556 | ||
5541 | delayacct_blkio_start(); | 5557 | delayacct_blkio_start(); |
5542 | atomic_inc(&rq->nr_iowait); | 5558 | atomic_inc(&rq->nr_iowait); |
5559 | blk_flush_plug(current); | ||
5543 | current->in_iowait = 1; | 5560 | current->in_iowait = 1; |
5544 | ret = schedule_timeout(timeout); | 5561 | ret = schedule_timeout(timeout); |
5545 | current->in_iowait = 0; | 5562 | current->in_iowait = 0; |
@@ -8434,7 +8451,6 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) | |||
8434 | { | 8451 | { |
8435 | struct cfs_rq *cfs_rq; | 8452 | struct cfs_rq *cfs_rq; |
8436 | struct sched_entity *se; | 8453 | struct sched_entity *se; |
8437 | struct rq *rq; | ||
8438 | int i; | 8454 | int i; |
8439 | 8455 | ||
8440 | tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL); | 8456 | tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL); |
@@ -8447,8 +8463,6 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) | |||
8447 | tg->shares = NICE_0_LOAD; | 8463 | tg->shares = NICE_0_LOAD; |
8448 | 8464 | ||
8449 | for_each_possible_cpu(i) { | 8465 | for_each_possible_cpu(i) { |
8450 | rq = cpu_rq(i); | ||
8451 | |||
8452 | cfs_rq = kzalloc_node(sizeof(struct cfs_rq), | 8466 | cfs_rq = kzalloc_node(sizeof(struct cfs_rq), |
8453 | GFP_KERNEL, cpu_to_node(i)); | 8467 | GFP_KERNEL, cpu_to_node(i)); |
8454 | if (!cfs_rq) | 8468 | if (!cfs_rq) |