aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c54
1 files changed, 40 insertions, 14 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index dc0be113f41d..f0ed81b71282 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -529,6 +529,7 @@ struct rq {
529 int push_cpu; 529 int push_cpu;
530 /* cpu of this runqueue: */ 530 /* cpu of this runqueue: */
531 int cpu; 531 int cpu;
532 int online;
532 533
533 struct task_struct *migration_thread; 534 struct task_struct *migration_thread;
534 struct list_head migration_queue; 535 struct list_head migration_queue;
@@ -1498,6 +1499,8 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
1498#endif 1499#endif
1499 1500
1500#define sched_class_highest (&rt_sched_class) 1501#define sched_class_highest (&rt_sched_class)
1502#define for_each_class(class) \
1503 for (class = sched_class_highest; class; class = class->next)
1501 1504
1502static inline void inc_load(struct rq *rq, const struct task_struct *p) 1505static inline void inc_load(struct rq *rq, const struct task_struct *p)
1503{ 1506{
@@ -6065,6 +6068,36 @@ static void unregister_sched_domain_sysctl(void)
6065} 6068}
6066#endif 6069#endif
6067 6070
6071static void set_rq_online(struct rq *rq)
6072{
6073 if (!rq->online) {
6074 const struct sched_class *class;
6075
6076 cpu_set(rq->cpu, rq->rd->online);
6077 rq->online = 1;
6078
6079 for_each_class(class) {
6080 if (class->rq_online)
6081 class->rq_online(rq);
6082 }
6083 }
6084}
6085
6086static void set_rq_offline(struct rq *rq)
6087{
6088 if (rq->online) {
6089 const struct sched_class *class;
6090
6091 for_each_class(class) {
6092 if (class->rq_offline)
6093 class->rq_offline(rq);
6094 }
6095
6096 cpu_clear(rq->cpu, rq->rd->online);
6097 rq->online = 0;
6098 }
6099}
6100
6068/* 6101/*
6069 * migration_call - callback that gets triggered when a CPU is added. 6102 * migration_call - callback that gets triggered when a CPU is added.
6070 * Here we can start up the necessary migration thread for the new CPU. 6103 * Here we can start up the necessary migration thread for the new CPU.
@@ -6102,7 +6135,8 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
6102 spin_lock_irqsave(&rq->lock, flags); 6135 spin_lock_irqsave(&rq->lock, flags);
6103 if (rq->rd) { 6136 if (rq->rd) {
6104 BUG_ON(!cpu_isset(cpu, rq->rd->span)); 6137 BUG_ON(!cpu_isset(cpu, rq->rd->span));
6105 cpu_set(cpu, rq->rd->online); 6138
6139 set_rq_online(rq);
6106 } 6140 }
6107 spin_unlock_irqrestore(&rq->lock, flags); 6141 spin_unlock_irqrestore(&rq->lock, flags);
6108 break; 6142 break;
@@ -6163,7 +6197,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
6163 spin_lock_irqsave(&rq->lock, flags); 6197 spin_lock_irqsave(&rq->lock, flags);
6164 if (rq->rd) { 6198 if (rq->rd) {
6165 BUG_ON(!cpu_isset(cpu, rq->rd->span)); 6199 BUG_ON(!cpu_isset(cpu, rq->rd->span));
6166 cpu_clear(cpu, rq->rd->online); 6200 set_rq_offline(rq);
6167 } 6201 }
6168 spin_unlock_irqrestore(&rq->lock, flags); 6202 spin_unlock_irqrestore(&rq->lock, flags);
6169 break; 6203 break;
@@ -6385,20 +6419,16 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
6385static void rq_attach_root(struct rq *rq, struct root_domain *rd) 6419static void rq_attach_root(struct rq *rq, struct root_domain *rd)
6386{ 6420{
6387 unsigned long flags; 6421 unsigned long flags;
6388 const struct sched_class *class;
6389 6422
6390 spin_lock_irqsave(&rq->lock, flags); 6423 spin_lock_irqsave(&rq->lock, flags);
6391 6424
6392 if (rq->rd) { 6425 if (rq->rd) {
6393 struct root_domain *old_rd = rq->rd; 6426 struct root_domain *old_rd = rq->rd;
6394 6427
6395 for (class = sched_class_highest; class; class = class->next) { 6428 if (cpu_isset(rq->cpu, old_rd->online))
6396 if (class->leave_domain) 6429 set_rq_offline(rq);
6397 class->leave_domain(rq);
6398 }
6399 6430
6400 cpu_clear(rq->cpu, old_rd->span); 6431 cpu_clear(rq->cpu, old_rd->span);
6401 cpu_clear(rq->cpu, old_rd->online);
6402 6432
6403 if (atomic_dec_and_test(&old_rd->refcount)) 6433 if (atomic_dec_and_test(&old_rd->refcount))
6404 kfree(old_rd); 6434 kfree(old_rd);
@@ -6409,12 +6439,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
6409 6439
6410 cpu_set(rq->cpu, rd->span); 6440 cpu_set(rq->cpu, rd->span);
6411 if (cpu_isset(rq->cpu, cpu_online_map)) 6441 if (cpu_isset(rq->cpu, cpu_online_map))
6412 cpu_set(rq->cpu, rd->online); 6442 set_rq_online(rq);
6413
6414 for (class = sched_class_highest; class; class = class->next) {
6415 if (class->join_domain)
6416 class->join_domain(rq);
6417 }
6418 6443
6419 spin_unlock_irqrestore(&rq->lock, flags); 6444 spin_unlock_irqrestore(&rq->lock, flags);
6420} 6445}
@@ -7824,6 +7849,7 @@ void __init sched_init(void)
7824 rq->next_balance = jiffies; 7849 rq->next_balance = jiffies;
7825 rq->push_cpu = 0; 7850 rq->push_cpu = 0;
7826 rq->cpu = i; 7851 rq->cpu = i;
7852 rq->online = 0;
7827 rq->migration_thread = NULL; 7853 rq->migration_thread = NULL;
7828 INIT_LIST_HEAD(&rq->migration_queue); 7854 INIT_LIST_HEAD(&rq->migration_queue);
7829 rq_attach_root(rq, &def_root_domain); 7855 rq_attach_root(rq, &def_root_domain);