diff options
-rw-r--r-- | include/linux/sched.h | 4 | ||||
-rw-r--r-- | kernel/sched.c | 54 | ||||
-rw-r--r-- | kernel/sched_rt.c | 24 |
3 files changed, 60 insertions, 22 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index ea2857b99596..d25acf600a32 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -903,8 +903,8 @@ struct sched_class { | |||
903 | void (*set_cpus_allowed)(struct task_struct *p, | 903 | void (*set_cpus_allowed)(struct task_struct *p, |
904 | const cpumask_t *newmask); | 904 | const cpumask_t *newmask); |
905 | 905 | ||
906 | void (*join_domain)(struct rq *rq); | 906 | void (*rq_online)(struct rq *rq); |
907 | void (*leave_domain)(struct rq *rq); | 907 | void (*rq_offline)(struct rq *rq); |
908 | 908 | ||
909 | void (*switched_from) (struct rq *this_rq, struct task_struct *task, | 909 | void (*switched_from) (struct rq *this_rq, struct task_struct *task, |
910 | int running); | 910 | int running); |
diff --git a/kernel/sched.c b/kernel/sched.c index dc0be113f41d..f0ed81b71282 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -529,6 +529,7 @@ struct rq { | |||
529 | int push_cpu; | 529 | int push_cpu; |
530 | /* cpu of this runqueue: */ | 530 | /* cpu of this runqueue: */ |
531 | int cpu; | 531 | int cpu; |
532 | int online; | ||
532 | 533 | ||
533 | struct task_struct *migration_thread; | 534 | struct task_struct *migration_thread; |
534 | struct list_head migration_queue; | 535 | struct list_head migration_queue; |
@@ -1498,6 +1499,8 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares) | |||
1498 | #endif | 1499 | #endif |
1499 | 1500 | ||
1500 | #define sched_class_highest (&rt_sched_class) | 1501 | #define sched_class_highest (&rt_sched_class) |
1502 | #define for_each_class(class) \ | ||
1503 | for (class = sched_class_highest; class; class = class->next) | ||
1501 | 1504 | ||
1502 | static inline void inc_load(struct rq *rq, const struct task_struct *p) | 1505 | static inline void inc_load(struct rq *rq, const struct task_struct *p) |
1503 | { | 1506 | { |
@@ -6065,6 +6068,36 @@ static void unregister_sched_domain_sysctl(void) | |||
6065 | } | 6068 | } |
6066 | #endif | 6069 | #endif |
6067 | 6070 | ||
6071 | static void set_rq_online(struct rq *rq) | ||
6072 | { | ||
6073 | if (!rq->online) { | ||
6074 | const struct sched_class *class; | ||
6075 | |||
6076 | cpu_set(rq->cpu, rq->rd->online); | ||
6077 | rq->online = 1; | ||
6078 | |||
6079 | for_each_class(class) { | ||
6080 | if (class->rq_online) | ||
6081 | class->rq_online(rq); | ||
6082 | } | ||
6083 | } | ||
6084 | } | ||
6085 | |||
6086 | static void set_rq_offline(struct rq *rq) | ||
6087 | { | ||
6088 | if (rq->online) { | ||
6089 | const struct sched_class *class; | ||
6090 | |||
6091 | for_each_class(class) { | ||
6092 | if (class->rq_offline) | ||
6093 | class->rq_offline(rq); | ||
6094 | } | ||
6095 | |||
6096 | cpu_clear(rq->cpu, rq->rd->online); | ||
6097 | rq->online = 0; | ||
6098 | } | ||
6099 | } | ||
6100 | |||
6068 | /* | 6101 | /* |
6069 | * migration_call - callback that gets triggered when a CPU is added. | 6102 | * migration_call - callback that gets triggered when a CPU is added. |
6070 | * Here we can start up the necessary migration thread for the new CPU. | 6103 | * Here we can start up the necessary migration thread for the new CPU. |
@@ -6102,7 +6135,8 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
6102 | spin_lock_irqsave(&rq->lock, flags); | 6135 | spin_lock_irqsave(&rq->lock, flags); |
6103 | if (rq->rd) { | 6136 | if (rq->rd) { |
6104 | BUG_ON(!cpu_isset(cpu, rq->rd->span)); | 6137 | BUG_ON(!cpu_isset(cpu, rq->rd->span)); |
6105 | cpu_set(cpu, rq->rd->online); | 6138 | |
6139 | set_rq_online(rq); | ||
6106 | } | 6140 | } |
6107 | spin_unlock_irqrestore(&rq->lock, flags); | 6141 | spin_unlock_irqrestore(&rq->lock, flags); |
6108 | break; | 6142 | break; |
@@ -6163,7 +6197,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
6163 | spin_lock_irqsave(&rq->lock, flags); | 6197 | spin_lock_irqsave(&rq->lock, flags); |
6164 | if (rq->rd) { | 6198 | if (rq->rd) { |
6165 | BUG_ON(!cpu_isset(cpu, rq->rd->span)); | 6199 | BUG_ON(!cpu_isset(cpu, rq->rd->span)); |
6166 | cpu_clear(cpu, rq->rd->online); | 6200 | set_rq_offline(rq); |
6167 | } | 6201 | } |
6168 | spin_unlock_irqrestore(&rq->lock, flags); | 6202 | spin_unlock_irqrestore(&rq->lock, flags); |
6169 | break; | 6203 | break; |
@@ -6385,20 +6419,16 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) | |||
6385 | static void rq_attach_root(struct rq *rq, struct root_domain *rd) | 6419 | static void rq_attach_root(struct rq *rq, struct root_domain *rd) |
6386 | { | 6420 | { |
6387 | unsigned long flags; | 6421 | unsigned long flags; |
6388 | const struct sched_class *class; | ||
6389 | 6422 | ||
6390 | spin_lock_irqsave(&rq->lock, flags); | 6423 | spin_lock_irqsave(&rq->lock, flags); |
6391 | 6424 | ||
6392 | if (rq->rd) { | 6425 | if (rq->rd) { |
6393 | struct root_domain *old_rd = rq->rd; | 6426 | struct root_domain *old_rd = rq->rd; |
6394 | 6427 | ||
6395 | for (class = sched_class_highest; class; class = class->next) { | 6428 | if (cpu_isset(rq->cpu, old_rd->online)) |
6396 | if (class->leave_domain) | 6429 | set_rq_offline(rq); |
6397 | class->leave_domain(rq); | ||
6398 | } | ||
6399 | 6430 | ||
6400 | cpu_clear(rq->cpu, old_rd->span); | 6431 | cpu_clear(rq->cpu, old_rd->span); |
6401 | cpu_clear(rq->cpu, old_rd->online); | ||
6402 | 6432 | ||
6403 | if (atomic_dec_and_test(&old_rd->refcount)) | 6433 | if (atomic_dec_and_test(&old_rd->refcount)) |
6404 | kfree(old_rd); | 6434 | kfree(old_rd); |
@@ -6409,12 +6439,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) | |||
6409 | 6439 | ||
6410 | cpu_set(rq->cpu, rd->span); | 6440 | cpu_set(rq->cpu, rd->span); |
6411 | if (cpu_isset(rq->cpu, cpu_online_map)) | 6441 | if (cpu_isset(rq->cpu, cpu_online_map)) |
6412 | cpu_set(rq->cpu, rd->online); | 6442 | set_rq_online(rq); |
6413 | |||
6414 | for (class = sched_class_highest; class; class = class->next) { | ||
6415 | if (class->join_domain) | ||
6416 | class->join_domain(rq); | ||
6417 | } | ||
6418 | 6443 | ||
6419 | spin_unlock_irqrestore(&rq->lock, flags); | 6444 | spin_unlock_irqrestore(&rq->lock, flags); |
6420 | } | 6445 | } |
@@ -7824,6 +7849,7 @@ void __init sched_init(void) | |||
7824 | rq->next_balance = jiffies; | 7849 | rq->next_balance = jiffies; |
7825 | rq->push_cpu = 0; | 7850 | rq->push_cpu = 0; |
7826 | rq->cpu = i; | 7851 | rq->cpu = i; |
7852 | rq->online = 0; | ||
7827 | rq->migration_thread = NULL; | 7853 | rq->migration_thread = NULL; |
7828 | INIT_LIST_HEAD(&rq->migration_queue); | 7854 | INIT_LIST_HEAD(&rq->migration_queue); |
7829 | rq_attach_root(rq, &def_root_domain); | 7855 | rq_attach_root(rq, &def_root_domain); |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 44b06d75416e..e4821593d4de 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -12,6 +12,9 @@ static inline int rt_overloaded(struct rq *rq) | |||
12 | 12 | ||
13 | static inline void rt_set_overload(struct rq *rq) | 13 | static inline void rt_set_overload(struct rq *rq) |
14 | { | 14 | { |
15 | if (!rq->online) | ||
16 | return; | ||
17 | |||
15 | cpu_set(rq->cpu, rq->rd->rto_mask); | 18 | cpu_set(rq->cpu, rq->rd->rto_mask); |
16 | /* | 19 | /* |
17 | * Make sure the mask is visible before we set | 20 | * Make sure the mask is visible before we set |
@@ -26,6 +29,9 @@ static inline void rt_set_overload(struct rq *rq) | |||
26 | 29 | ||
27 | static inline void rt_clear_overload(struct rq *rq) | 30 | static inline void rt_clear_overload(struct rq *rq) |
28 | { | 31 | { |
32 | if (!rq->online) | ||
33 | return; | ||
34 | |||
29 | /* the order here really doesn't matter */ | 35 | /* the order here really doesn't matter */ |
30 | atomic_dec(&rq->rd->rto_count); | 36 | atomic_dec(&rq->rd->rto_count); |
31 | cpu_clear(rq->cpu, rq->rd->rto_mask); | 37 | cpu_clear(rq->cpu, rq->rd->rto_mask); |
@@ -394,7 +400,10 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | |||
394 | if (rt_se_prio(rt_se) < rt_rq->highest_prio) { | 400 | if (rt_se_prio(rt_se) < rt_rq->highest_prio) { |
395 | struct rq *rq = rq_of_rt_rq(rt_rq); | 401 | struct rq *rq = rq_of_rt_rq(rt_rq); |
396 | rt_rq->highest_prio = rt_se_prio(rt_se); | 402 | rt_rq->highest_prio = rt_se_prio(rt_se); |
397 | cpupri_set(&rq->rd->cpupri, rq->cpu, rt_se_prio(rt_se)); | 403 | |
404 | if (rq->online) | ||
405 | cpupri_set(&rq->rd->cpupri, rq->cpu, | ||
406 | rt_se_prio(rt_se)); | ||
398 | } | 407 | } |
399 | #endif | 408 | #endif |
400 | #ifdef CONFIG_SMP | 409 | #ifdef CONFIG_SMP |
@@ -448,7 +457,10 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | |||
448 | 457 | ||
449 | if (rt_rq->highest_prio != highest_prio) { | 458 | if (rt_rq->highest_prio != highest_prio) { |
450 | struct rq *rq = rq_of_rt_rq(rt_rq); | 459 | struct rq *rq = rq_of_rt_rq(rt_rq); |
451 | cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio); | 460 | |
461 | if (rq->online) | ||
462 | cpupri_set(&rq->rd->cpupri, rq->cpu, | ||
463 | rt_rq->highest_prio); | ||
452 | } | 464 | } |
453 | 465 | ||
454 | update_rt_migration(rq_of_rt_rq(rt_rq)); | 466 | update_rt_migration(rq_of_rt_rq(rt_rq)); |
@@ -1154,7 +1166,7 @@ static void set_cpus_allowed_rt(struct task_struct *p, | |||
1154 | } | 1166 | } |
1155 | 1167 | ||
1156 | /* Assumes rq->lock is held */ | 1168 | /* Assumes rq->lock is held */ |
1157 | static void join_domain_rt(struct rq *rq) | 1169 | static void rq_online_rt(struct rq *rq) |
1158 | { | 1170 | { |
1159 | if (rq->rt.overloaded) | 1171 | if (rq->rt.overloaded) |
1160 | rt_set_overload(rq); | 1172 | rt_set_overload(rq); |
@@ -1163,7 +1175,7 @@ static void join_domain_rt(struct rq *rq) | |||
1163 | } | 1175 | } |
1164 | 1176 | ||
1165 | /* Assumes rq->lock is held */ | 1177 | /* Assumes rq->lock is held */ |
1166 | static void leave_domain_rt(struct rq *rq) | 1178 | static void rq_offline_rt(struct rq *rq) |
1167 | { | 1179 | { |
1168 | if (rq->rt.overloaded) | 1180 | if (rq->rt.overloaded) |
1169 | rt_clear_overload(rq); | 1181 | rt_clear_overload(rq); |
@@ -1331,8 +1343,8 @@ static const struct sched_class rt_sched_class = { | |||
1331 | .load_balance = load_balance_rt, | 1343 | .load_balance = load_balance_rt, |
1332 | .move_one_task = move_one_task_rt, | 1344 | .move_one_task = move_one_task_rt, |
1333 | .set_cpus_allowed = set_cpus_allowed_rt, | 1345 | .set_cpus_allowed = set_cpus_allowed_rt, |
1334 | .join_domain = join_domain_rt, | 1346 | .rq_online = rq_online_rt, |
1335 | .leave_domain = leave_domain_rt, | 1347 | .rq_offline = rq_offline_rt, |
1336 | .pre_schedule = pre_schedule_rt, | 1348 | .pre_schedule = pre_schedule_rt, |
1337 | .post_schedule = post_schedule_rt, | 1349 | .post_schedule = post_schedule_rt, |
1338 | .task_wake_up = task_wake_up_rt, | 1350 | .task_wake_up = task_wake_up_rt, |