diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 74 |
1 files changed, 52 insertions, 22 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 2f76e06bea58..e88689522e66 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -676,6 +676,7 @@ inline void update_rq_clock(struct rq *rq) | |||
676 | 676 | ||
677 | /** | 677 | /** |
678 | * runqueue_is_locked | 678 | * runqueue_is_locked |
679 | * @cpu: the processor in question. | ||
679 | * | 680 | * |
680 | * Returns true if the current cpu runqueue is locked. | 681 | * Returns true if the current cpu runqueue is locked. |
681 | * This interface allows printk to be called with the runqueue lock | 682 | * This interface allows printk to be called with the runqueue lock |
@@ -780,7 +781,7 @@ static int sched_feat_open(struct inode *inode, struct file *filp) | |||
780 | return single_open(filp, sched_feat_show, NULL); | 781 | return single_open(filp, sched_feat_show, NULL); |
781 | } | 782 | } |
782 | 783 | ||
783 | static struct file_operations sched_feat_fops = { | 784 | static const struct file_operations sched_feat_fops = { |
784 | .open = sched_feat_open, | 785 | .open = sched_feat_open, |
785 | .write = sched_feat_write, | 786 | .write = sched_feat_write, |
786 | .read = seq_read, | 787 | .read = seq_read, |
@@ -2311,7 +2312,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
2311 | { | 2312 | { |
2312 | int cpu, orig_cpu, this_cpu, success = 0; | 2313 | int cpu, orig_cpu, this_cpu, success = 0; |
2313 | unsigned long flags; | 2314 | unsigned long flags; |
2314 | struct rq *rq; | 2315 | struct rq *rq, *orig_rq; |
2315 | 2316 | ||
2316 | if (!sched_feat(SYNC_WAKEUPS)) | 2317 | if (!sched_feat(SYNC_WAKEUPS)) |
2317 | wake_flags &= ~WF_SYNC; | 2318 | wake_flags &= ~WF_SYNC; |
@@ -2319,7 +2320,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
2319 | this_cpu = get_cpu(); | 2320 | this_cpu = get_cpu(); |
2320 | 2321 | ||
2321 | smp_wmb(); | 2322 | smp_wmb(); |
2322 | rq = task_rq_lock(p, &flags); | 2323 | rq = orig_rq = task_rq_lock(p, &flags); |
2323 | update_rq_clock(rq); | 2324 | update_rq_clock(rq); |
2324 | if (!(p->state & state)) | 2325 | if (!(p->state & state)) |
2325 | goto out; | 2326 | goto out; |
@@ -2350,6 +2351,10 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
2350 | set_task_cpu(p, cpu); | 2351 | set_task_cpu(p, cpu); |
2351 | 2352 | ||
2352 | rq = task_rq_lock(p, &flags); | 2353 | rq = task_rq_lock(p, &flags); |
2354 | |||
2355 | if (rq != orig_rq) | ||
2356 | update_rq_clock(rq); | ||
2357 | |||
2353 | WARN_ON(p->state != TASK_WAKING); | 2358 | WARN_ON(p->state != TASK_WAKING); |
2354 | cpu = task_cpu(p); | 2359 | cpu = task_cpu(p); |
2355 | 2360 | ||
@@ -2515,22 +2520,17 @@ void sched_fork(struct task_struct *p, int clone_flags) | |||
2515 | __sched_fork(p); | 2520 | __sched_fork(p); |
2516 | 2521 | ||
2517 | /* | 2522 | /* |
2518 | * Make sure we do not leak PI boosting priority to the child. | ||
2519 | */ | ||
2520 | p->prio = current->normal_prio; | ||
2521 | |||
2522 | /* | ||
2523 | * Revert to default priority/policy on fork if requested. | 2523 | * Revert to default priority/policy on fork if requested. |
2524 | */ | 2524 | */ |
2525 | if (unlikely(p->sched_reset_on_fork)) { | 2525 | if (unlikely(p->sched_reset_on_fork)) { |
2526 | if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) | 2526 | if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) { |
2527 | p->policy = SCHED_NORMAL; | 2527 | p->policy = SCHED_NORMAL; |
2528 | 2528 | p->normal_prio = p->static_prio; | |
2529 | if (p->normal_prio < DEFAULT_PRIO) | 2529 | } |
2530 | p->prio = DEFAULT_PRIO; | ||
2531 | 2530 | ||
2532 | if (PRIO_TO_NICE(p->static_prio) < 0) { | 2531 | if (PRIO_TO_NICE(p->static_prio) < 0) { |
2533 | p->static_prio = NICE_TO_PRIO(0); | 2532 | p->static_prio = NICE_TO_PRIO(0); |
2533 | p->normal_prio = p->static_prio; | ||
2534 | set_load_weight(p); | 2534 | set_load_weight(p); |
2535 | } | 2535 | } |
2536 | 2536 | ||
@@ -2541,6 +2541,11 @@ void sched_fork(struct task_struct *p, int clone_flags) | |||
2541 | p->sched_reset_on_fork = 0; | 2541 | p->sched_reset_on_fork = 0; |
2542 | } | 2542 | } |
2543 | 2543 | ||
2544 | /* | ||
2545 | * Make sure we do not leak PI boosting priority to the child. | ||
2546 | */ | ||
2547 | p->prio = current->normal_prio; | ||
2548 | |||
2544 | if (!rt_prio(p->prio)) | 2549 | if (!rt_prio(p->prio)) |
2545 | p->sched_class = &fair_sched_class; | 2550 | p->sched_class = &fair_sched_class; |
2546 | 2551 | ||
@@ -2581,8 +2586,6 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | |||
2581 | BUG_ON(p->state != TASK_RUNNING); | 2586 | BUG_ON(p->state != TASK_RUNNING); |
2582 | update_rq_clock(rq); | 2587 | update_rq_clock(rq); |
2583 | 2588 | ||
2584 | p->prio = effective_prio(p); | ||
2585 | |||
2586 | if (!p->sched_class->task_new || !current->se.on_rq) { | 2589 | if (!p->sched_class->task_new || !current->se.on_rq) { |
2587 | activate_task(rq, p, 0); | 2590 | activate_task(rq, p, 0); |
2588 | } else { | 2591 | } else { |
@@ -3658,6 +3661,7 @@ static void update_group_power(struct sched_domain *sd, int cpu) | |||
3658 | 3661 | ||
3659 | /** | 3662 | /** |
3660 | * update_sg_lb_stats - Update sched_group's statistics for load balancing. | 3663 | * update_sg_lb_stats - Update sched_group's statistics for load balancing. |
3664 | * @sd: The sched_domain whose statistics are to be updated. | ||
3661 | * @group: sched_group whose statistics are to be updated. | 3665 | * @group: sched_group whose statistics are to be updated. |
3662 | * @this_cpu: Cpu for which load balance is currently performed. | 3666 | * @this_cpu: Cpu for which load balance is currently performed. |
3663 | * @idle: Idle status of this_cpu | 3667 | * @idle: Idle status of this_cpu |
@@ -6720,9 +6724,6 @@ EXPORT_SYMBOL(yield); | |||
6720 | /* | 6724 | /* |
6721 | * This task is about to go to sleep on IO. Increment rq->nr_iowait so | 6725 | * This task is about to go to sleep on IO. Increment rq->nr_iowait so |
6722 | * that process accounting knows that this is a task in IO wait state. | 6726 | * that process accounting knows that this is a task in IO wait state. |
6723 | * | ||
6724 | * But don't do that if it is a deliberate, throttling IO wait (this task | ||
6725 | * has set its backing_dev_info: the queue against which it should throttle) | ||
6726 | */ | 6727 | */ |
6727 | void __sched io_schedule(void) | 6728 | void __sched io_schedule(void) |
6728 | { | 6729 | { |
@@ -10312,7 +10313,7 @@ static int sched_rt_global_constraints(void) | |||
10312 | #endif /* CONFIG_RT_GROUP_SCHED */ | 10313 | #endif /* CONFIG_RT_GROUP_SCHED */ |
10313 | 10314 | ||
10314 | int sched_rt_handler(struct ctl_table *table, int write, | 10315 | int sched_rt_handler(struct ctl_table *table, int write, |
10315 | struct file *filp, void __user *buffer, size_t *lenp, | 10316 | void __user *buffer, size_t *lenp, |
10316 | loff_t *ppos) | 10317 | loff_t *ppos) |
10317 | { | 10318 | { |
10318 | int ret; | 10319 | int ret; |
@@ -10323,7 +10324,7 @@ int sched_rt_handler(struct ctl_table *table, int write, | |||
10323 | old_period = sysctl_sched_rt_period; | 10324 | old_period = sysctl_sched_rt_period; |
10324 | old_runtime = sysctl_sched_rt_runtime; | 10325 | old_runtime = sysctl_sched_rt_runtime; |
10325 | 10326 | ||
10326 | ret = proc_dointvec(table, write, filp, buffer, lenp, ppos); | 10327 | ret = proc_dointvec(table, write, buffer, lenp, ppos); |
10327 | 10328 | ||
10328 | if (!ret && write) { | 10329 | if (!ret && write) { |
10329 | ret = sched_rt_global_constraints(); | 10330 | ret = sched_rt_global_constraints(); |
@@ -10377,8 +10378,7 @@ cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) | |||
10377 | } | 10378 | } |
10378 | 10379 | ||
10379 | static int | 10380 | static int |
10380 | cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | 10381 | cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk) |
10381 | struct task_struct *tsk) | ||
10382 | { | 10382 | { |
10383 | #ifdef CONFIG_RT_GROUP_SCHED | 10383 | #ifdef CONFIG_RT_GROUP_SCHED |
10384 | if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk)) | 10384 | if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk)) |
@@ -10388,15 +10388,45 @@ cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | |||
10388 | if (tsk->sched_class != &fair_sched_class) | 10388 | if (tsk->sched_class != &fair_sched_class) |
10389 | return -EINVAL; | 10389 | return -EINVAL; |
10390 | #endif | 10390 | #endif |
10391 | return 0; | ||
10392 | } | ||
10391 | 10393 | ||
10394 | static int | ||
10395 | cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | ||
10396 | struct task_struct *tsk, bool threadgroup) | ||
10397 | { | ||
10398 | int retval = cpu_cgroup_can_attach_task(cgrp, tsk); | ||
10399 | if (retval) | ||
10400 | return retval; | ||
10401 | if (threadgroup) { | ||
10402 | struct task_struct *c; | ||
10403 | rcu_read_lock(); | ||
10404 | list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { | ||
10405 | retval = cpu_cgroup_can_attach_task(cgrp, c); | ||
10406 | if (retval) { | ||
10407 | rcu_read_unlock(); | ||
10408 | return retval; | ||
10409 | } | ||
10410 | } | ||
10411 | rcu_read_unlock(); | ||
10412 | } | ||
10392 | return 0; | 10413 | return 0; |
10393 | } | 10414 | } |
10394 | 10415 | ||
10395 | static void | 10416 | static void |
10396 | cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | 10417 | cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, |
10397 | struct cgroup *old_cont, struct task_struct *tsk) | 10418 | struct cgroup *old_cont, struct task_struct *tsk, |
10419 | bool threadgroup) | ||
10398 | { | 10420 | { |
10399 | sched_move_task(tsk); | 10421 | sched_move_task(tsk); |
10422 | if (threadgroup) { | ||
10423 | struct task_struct *c; | ||
10424 | rcu_read_lock(); | ||
10425 | list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { | ||
10426 | sched_move_task(c); | ||
10427 | } | ||
10428 | rcu_read_unlock(); | ||
10429 | } | ||
10400 | } | 10430 | } |
10401 | 10431 | ||
10402 | #ifdef CONFIG_FAIR_GROUP_SCHED | 10432 | #ifdef CONFIG_FAIR_GROUP_SCHED |