aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c69
1 files changed, 26 insertions, 43 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f9123a82cbb6..57bd333bc4ab 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1016,13 +1016,6 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
1016 rq_clock_skip_update(rq, true); 1016 rq_clock_skip_update(rq, true);
1017} 1017}
1018 1018
1019static ATOMIC_NOTIFIER_HEAD(task_migration_notifier);
1020
1021void register_task_migration_notifier(struct notifier_block *n)
1022{
1023 atomic_notifier_chain_register(&task_migration_notifier, n);
1024}
1025
1026#ifdef CONFIG_SMP 1019#ifdef CONFIG_SMP
1027void set_task_cpu(struct task_struct *p, unsigned int new_cpu) 1020void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1028{ 1021{
@@ -1053,18 +1046,10 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1053 trace_sched_migrate_task(p, new_cpu); 1046 trace_sched_migrate_task(p, new_cpu);
1054 1047
1055 if (task_cpu(p) != new_cpu) { 1048 if (task_cpu(p) != new_cpu) {
1056 struct task_migration_notifier tmn;
1057
1058 if (p->sched_class->migrate_task_rq) 1049 if (p->sched_class->migrate_task_rq)
1059 p->sched_class->migrate_task_rq(p, new_cpu); 1050 p->sched_class->migrate_task_rq(p, new_cpu);
1060 p->se.nr_migrations++; 1051 p->se.nr_migrations++;
1061 perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0); 1052 perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0);
1062
1063 tmn.task = p;
1064 tmn.from_cpu = task_cpu(p);
1065 tmn.to_cpu = new_cpu;
1066
1067 atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn);
1068 } 1053 }
1069 1054
1070 __set_task_cpu(p, new_cpu); 1055 __set_task_cpu(p, new_cpu);
@@ -3315,15 +3300,18 @@ static void __setscheduler_params(struct task_struct *p,
3315 3300
3316/* Actually do priority change: must hold pi & rq lock. */ 3301/* Actually do priority change: must hold pi & rq lock. */
3317static void __setscheduler(struct rq *rq, struct task_struct *p, 3302static void __setscheduler(struct rq *rq, struct task_struct *p,
3318 const struct sched_attr *attr) 3303 const struct sched_attr *attr, bool keep_boost)
3319{ 3304{
3320 __setscheduler_params(p, attr); 3305 __setscheduler_params(p, attr);
3321 3306
3322 /* 3307 /*
3323 * If we get here, there was no pi waiters boosting the 3308 * Keep a potential priority boosting if called from
3324 * task. It is safe to use the normal prio. 3309 * sched_setscheduler().
3325 */ 3310 */
3326 p->prio = normal_prio(p); 3311 if (keep_boost)
3312 p->prio = rt_mutex_get_effective_prio(p, normal_prio(p));
3313 else
3314 p->prio = normal_prio(p);
3327 3315
3328 if (dl_prio(p->prio)) 3316 if (dl_prio(p->prio))
3329 p->sched_class = &dl_sched_class; 3317 p->sched_class = &dl_sched_class;
@@ -3423,7 +3411,7 @@ static int __sched_setscheduler(struct task_struct *p,
3423 int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 : 3411 int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
3424 MAX_RT_PRIO - 1 - attr->sched_priority; 3412 MAX_RT_PRIO - 1 - attr->sched_priority;
3425 int retval, oldprio, oldpolicy = -1, queued, running; 3413 int retval, oldprio, oldpolicy = -1, queued, running;
3426 int policy = attr->sched_policy; 3414 int new_effective_prio, policy = attr->sched_policy;
3427 unsigned long flags; 3415 unsigned long flags;
3428 const struct sched_class *prev_class; 3416 const struct sched_class *prev_class;
3429 struct rq *rq; 3417 struct rq *rq;
@@ -3605,15 +3593,14 @@ change:
3605 oldprio = p->prio; 3593 oldprio = p->prio;
3606 3594
3607 /* 3595 /*
3608 * Special case for priority boosted tasks. 3596 * Take priority boosted tasks into account. If the new
3609 * 3597 * effective priority is unchanged, we just store the new
3610 * If the new priority is lower or equal (user space view)
3611 * than the current (boosted) priority, we just store the new
3612 * normal parameters and do not touch the scheduler class and 3598 * normal parameters and do not touch the scheduler class and
3613 * the runqueue. This will be done when the task deboost 3599 * the runqueue. This will be done when the task deboost
3614 * itself. 3600 * itself.
3615 */ 3601 */
3616 if (rt_mutex_check_prio(p, newprio)) { 3602 new_effective_prio = rt_mutex_get_effective_prio(p, newprio);
3603 if (new_effective_prio == oldprio) {
3617 __setscheduler_params(p, attr); 3604 __setscheduler_params(p, attr);
3618 task_rq_unlock(rq, p, &flags); 3605 task_rq_unlock(rq, p, &flags);
3619 return 0; 3606 return 0;
@@ -3627,7 +3614,7 @@ change:
3627 put_prev_task(rq, p); 3614 put_prev_task(rq, p);
3628 3615
3629 prev_class = p->sched_class; 3616 prev_class = p->sched_class;
3630 __setscheduler(rq, p, attr); 3617 __setscheduler(rq, p, attr, true);
3631 3618
3632 if (running) 3619 if (running)
3633 p->sched_class->set_curr_task(rq); 3620 p->sched_class->set_curr_task(rq);
@@ -7012,27 +6999,23 @@ static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
7012 unsigned long flags; 6999 unsigned long flags;
7013 long cpu = (long)hcpu; 7000 long cpu = (long)hcpu;
7014 struct dl_bw *dl_b; 7001 struct dl_bw *dl_b;
7002 bool overflow;
7003 int cpus;
7015 7004
7016 switch (action & ~CPU_TASKS_FROZEN) { 7005 switch (action) {
7017 case CPU_DOWN_PREPARE: 7006 case CPU_DOWN_PREPARE:
7018 /* explicitly allow suspend */ 7007 rcu_read_lock_sched();
7019 if (!(action & CPU_TASKS_FROZEN)) { 7008 dl_b = dl_bw_of(cpu);
7020 bool overflow;
7021 int cpus;
7022
7023 rcu_read_lock_sched();
7024 dl_b = dl_bw_of(cpu);
7025 7009
7026 raw_spin_lock_irqsave(&dl_b->lock, flags); 7010 raw_spin_lock_irqsave(&dl_b->lock, flags);
7027 cpus = dl_bw_cpus(cpu); 7011 cpus = dl_bw_cpus(cpu);
7028 overflow = __dl_overflow(dl_b, cpus, 0, 0); 7012 overflow = __dl_overflow(dl_b, cpus, 0, 0);
7029 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 7013 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
7030 7014
7031 rcu_read_unlock_sched(); 7015 rcu_read_unlock_sched();
7032 7016
7033 if (overflow) 7017 if (overflow)
7034 return notifier_from_errno(-EBUSY); 7018 return notifier_from_errno(-EBUSY);
7035 }
7036 cpuset_update_active_cpus(false); 7019 cpuset_update_active_cpus(false);
7037 break; 7020 break;
7038 case CPU_DOWN_PREPARE_FROZEN: 7021 case CPU_DOWN_PREPARE_FROZEN:
@@ -7361,7 +7344,7 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
7361 queued = task_on_rq_queued(p); 7344 queued = task_on_rq_queued(p);
7362 if (queued) 7345 if (queued)
7363 dequeue_task(rq, p, 0); 7346 dequeue_task(rq, p, 0);
7364 __setscheduler(rq, p, &attr); 7347 __setscheduler(rq, p, &attr, false);
7365 if (queued) { 7348 if (queued) {
7366 enqueue_task(rq, p, 0); 7349 enqueue_task(rq, p, 0);
7367 resched_curr(rq); 7350 resched_curr(rq);