aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/sched.c69
-rw-r--r--kernel/sched_rt.c26
2 files changed, 64 insertions, 31 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 2723d7a4a42d..93309c3034de 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -487,14 +487,14 @@ struct rt_rq {
487 */ 487 */
488struct root_domain { 488struct root_domain {
489 atomic_t refcount; 489 atomic_t refcount;
490 cpumask_t span; 490 cpumask_var_t span;
491 cpumask_t online; 491 cpumask_var_t online;
492 492
493 /* 493 /*
494 * The "RT overload" flag: it gets set if a CPU has more than 494 * The "RT overload" flag: it gets set if a CPU has more than
495 * one runnable RT task. 495 * one runnable RT task.
496 */ 496 */
497 cpumask_t rto_mask; 497 cpumask_var_t rto_mask;
498 atomic_t rto_count; 498 atomic_t rto_count;
499#ifdef CONFIG_SMP 499#ifdef CONFIG_SMP
500 struct cpupri cpupri; 500 struct cpupri cpupri;
@@ -6444,7 +6444,7 @@ static void set_rq_online(struct rq *rq)
6444 if (!rq->online) { 6444 if (!rq->online) {
6445 const struct sched_class *class; 6445 const struct sched_class *class;
6446 6446
6447 cpu_set(rq->cpu, rq->rd->online); 6447 cpumask_set_cpu(rq->cpu, rq->rd->online);
6448 rq->online = 1; 6448 rq->online = 1;
6449 6449
6450 for_each_class(class) { 6450 for_each_class(class) {
@@ -6464,7 +6464,7 @@ static void set_rq_offline(struct rq *rq)
6464 class->rq_offline(rq); 6464 class->rq_offline(rq);
6465 } 6465 }
6466 6466
6467 cpu_clear(rq->cpu, rq->rd->online); 6467 cpumask_clear_cpu(rq->cpu, rq->rd->online);
6468 rq->online = 0; 6468 rq->online = 0;
6469 } 6469 }
6470} 6470}
@@ -6505,7 +6505,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
6505 rq = cpu_rq(cpu); 6505 rq = cpu_rq(cpu);
6506 spin_lock_irqsave(&rq->lock, flags); 6506 spin_lock_irqsave(&rq->lock, flags);
6507 if (rq->rd) { 6507 if (rq->rd) {
6508 BUG_ON(!cpu_isset(cpu, rq->rd->span)); 6508 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
6509 6509
6510 set_rq_online(rq); 6510 set_rq_online(rq);
6511 } 6511 }
@@ -6567,7 +6567,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
6567 rq = cpu_rq(cpu); 6567 rq = cpu_rq(cpu);
6568 spin_lock_irqsave(&rq->lock, flags); 6568 spin_lock_irqsave(&rq->lock, flags);
6569 if (rq->rd) { 6569 if (rq->rd) {
6570 BUG_ON(!cpu_isset(cpu, rq->rd->span)); 6570 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
6571 set_rq_offline(rq); 6571 set_rq_offline(rq);
6572 } 6572 }
6573 spin_unlock_irqrestore(&rq->lock, flags); 6573 spin_unlock_irqrestore(&rq->lock, flags);
@@ -6768,6 +6768,14 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
6768 return 1; 6768 return 1;
6769} 6769}
6770 6770
6771static void free_rootdomain(struct root_domain *rd)
6772{
6773 free_cpumask_var(rd->rto_mask);
6774 free_cpumask_var(rd->online);
6775 free_cpumask_var(rd->span);
6776 kfree(rd);
6777}
6778
6771static void rq_attach_root(struct rq *rq, struct root_domain *rd) 6779static void rq_attach_root(struct rq *rq, struct root_domain *rd)
6772{ 6780{
6773 unsigned long flags; 6781 unsigned long flags;
@@ -6777,38 +6785,60 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
6777 if (rq->rd) { 6785 if (rq->rd) {
6778 struct root_domain *old_rd = rq->rd; 6786 struct root_domain *old_rd = rq->rd;
6779 6787
6780 if (cpu_isset(rq->cpu, old_rd->online)) 6788 if (cpumask_test_cpu(rq->cpu, old_rd->online))
6781 set_rq_offline(rq); 6789 set_rq_offline(rq);
6782 6790
6783 cpu_clear(rq->cpu, old_rd->span); 6791 cpumask_clear_cpu(rq->cpu, old_rd->span);
6784 6792
6785 if (atomic_dec_and_test(&old_rd->refcount)) 6793 if (atomic_dec_and_test(&old_rd->refcount))
6786 kfree(old_rd); 6794 free_rootdomain(old_rd);
6787 } 6795 }
6788 6796
6789 atomic_inc(&rd->refcount); 6797 atomic_inc(&rd->refcount);
6790 rq->rd = rd; 6798 rq->rd = rd;
6791 6799
6792 cpu_set(rq->cpu, rd->span); 6800 cpumask_set_cpu(rq->cpu, rd->span);
6793 if (cpu_isset(rq->cpu, cpu_online_map)) 6801 if (cpumask_test_cpu(rq->cpu, cpu_online_mask))
6794 set_rq_online(rq); 6802 set_rq_online(rq);
6795 6803
6796 spin_unlock_irqrestore(&rq->lock, flags); 6804 spin_unlock_irqrestore(&rq->lock, flags);
6797} 6805}
6798 6806
6799static void init_rootdomain(struct root_domain *rd) 6807static int init_rootdomain(struct root_domain *rd, bool bootmem)
6800{ 6808{
6801 memset(rd, 0, sizeof(*rd)); 6809 memset(rd, 0, sizeof(*rd));
6802 6810
6803 cpus_clear(rd->span); 6811 if (bootmem) {
6804 cpus_clear(rd->online); 6812 alloc_bootmem_cpumask_var(&def_root_domain.span);
6813 alloc_bootmem_cpumask_var(&def_root_domain.online);
6814 alloc_bootmem_cpumask_var(&def_root_domain.rto_mask);
6815 cpupri_init(&rd->cpupri);
6816 return 0;
6817 }
6818
6819 if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
6820 goto free_rd;
6821 if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
6822 goto free_span;
6823 if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
6824 goto free_online;
6805 6825
6806 cpupri_init(&rd->cpupri); 6826 cpupri_init(&rd->cpupri);
6827 return 0;
6828
6829free_online:
6830 free_cpumask_var(rd->online);
6831free_span:
6832 free_cpumask_var(rd->span);
6833free_rd:
6834 kfree(rd);
6835 return -ENOMEM;
6807} 6836}
6808 6837
6809static void init_defrootdomain(void) 6838static void init_defrootdomain(void)
6810{ 6839{
6811 init_rootdomain(&def_root_domain); 6840 init_rootdomain(&def_root_domain, true);
6841
6812 atomic_set(&def_root_domain.refcount, 1); 6842 atomic_set(&def_root_domain.refcount, 1);
6813} 6843}
6814 6844
@@ -6820,7 +6850,10 @@ static struct root_domain *alloc_rootdomain(void)
6820 if (!rd) 6850 if (!rd)
6821 return NULL; 6851 return NULL;
6822 6852
6823 init_rootdomain(rd); 6853 if (init_rootdomain(rd, false) != 0) {
6854 kfree(rd);
6855 return NULL;
6856 }
6824 6857
6825 return rd; 6858 return rd;
6826} 6859}
@@ -7632,7 +7665,7 @@ free_sched_groups:
7632#ifdef CONFIG_NUMA 7665#ifdef CONFIG_NUMA
7633error: 7666error:
7634 free_sched_groups(cpu_map, tmpmask); 7667 free_sched_groups(cpu_map, tmpmask);
7635 kfree(rd); 7668 free_rootdomain(rd);
7636 goto free_tmpmask; 7669 goto free_tmpmask;
7637#endif 7670#endif
7638} 7671}
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 4cd813abc23a..820fc422c6df 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -15,7 +15,7 @@ static inline void rt_set_overload(struct rq *rq)
15 if (!rq->online) 15 if (!rq->online)
16 return; 16 return;
17 17
18 cpu_set(rq->cpu, rq->rd->rto_mask); 18 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
19 /* 19 /*
20 * Make sure the mask is visible before we set 20 * Make sure the mask is visible before we set
21 * the overload count. That is checked to determine 21 * the overload count. That is checked to determine
@@ -34,7 +34,7 @@ static inline void rt_clear_overload(struct rq *rq)
34 34
35 /* the order here really doesn't matter */ 35 /* the order here really doesn't matter */
36 atomic_dec(&rq->rd->rto_count); 36 atomic_dec(&rq->rd->rto_count);
37 cpu_clear(rq->cpu, rq->rd->rto_mask); 37 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
38} 38}
39 39
40static void update_rt_migration(struct rq *rq) 40static void update_rt_migration(struct rq *rq)
@@ -139,14 +139,14 @@ static int rt_se_boosted(struct sched_rt_entity *rt_se)
139} 139}
140 140
141#ifdef CONFIG_SMP 141#ifdef CONFIG_SMP
142static inline cpumask_t sched_rt_period_mask(void) 142static inline const struct cpumask *sched_rt_period_mask(void)
143{ 143{
144 return cpu_rq(smp_processor_id())->rd->span; 144 return cpu_rq(smp_processor_id())->rd->span;
145} 145}
146#else 146#else
147static inline cpumask_t sched_rt_period_mask(void) 147static inline const struct cpumask *sched_rt_period_mask(void)
148{ 148{
149 return cpu_online_map; 149 return cpu_online_mask;
150} 150}
151#endif 151#endif
152 152
@@ -212,9 +212,9 @@ static inline int rt_rq_throttled(struct rt_rq *rt_rq)
212 return rt_rq->rt_throttled; 212 return rt_rq->rt_throttled;
213} 213}
214 214
215static inline cpumask_t sched_rt_period_mask(void) 215static inline const struct cpumask *sched_rt_period_mask(void)
216{ 216{
217 return cpu_online_map; 217 return cpu_online_mask;
218} 218}
219 219
220static inline 220static inline
@@ -241,11 +241,11 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
241 int i, weight, more = 0; 241 int i, weight, more = 0;
242 u64 rt_period; 242 u64 rt_period;
243 243
244 weight = cpus_weight(rd->span); 244 weight = cpumask_weight(rd->span);
245 245
246 spin_lock(&rt_b->rt_runtime_lock); 246 spin_lock(&rt_b->rt_runtime_lock);
247 rt_period = ktime_to_ns(rt_b->rt_period); 247 rt_period = ktime_to_ns(rt_b->rt_period);
248 for_each_cpu_mask_nr(i, rd->span) { 248 for_each_cpu(i, rd->span) {
249 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); 249 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
250 s64 diff; 250 s64 diff;
251 251
@@ -324,7 +324,7 @@ static void __disable_runtime(struct rq *rq)
324 /* 324 /*
325 * Greedy reclaim, take back as much as we can. 325 * Greedy reclaim, take back as much as we can.
326 */ 326 */
327 for_each_cpu_mask(i, rd->span) { 327 for_each_cpu(i, rd->span) {
328 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); 328 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
329 s64 diff; 329 s64 diff;
330 330
@@ -429,13 +429,13 @@ static inline int balance_runtime(struct rt_rq *rt_rq)
429static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) 429static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
430{ 430{
431 int i, idle = 1; 431 int i, idle = 1;
432 cpumask_t span; 432 const struct cpumask *span;
433 433
434 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) 434 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
435 return 1; 435 return 1;
436 436
437 span = sched_rt_period_mask(); 437 span = sched_rt_period_mask();
438 for_each_cpu_mask(i, span) { 438 for_each_cpu(i, span) {
439 int enqueue = 0; 439 int enqueue = 0;
440 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); 440 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
441 struct rq *rq = rq_of_rt_rq(rt_rq); 441 struct rq *rq = rq_of_rt_rq(rt_rq);
@@ -1181,7 +1181,7 @@ static int pull_rt_task(struct rq *this_rq)
1181 1181
1182 next = pick_next_task_rt(this_rq); 1182 next = pick_next_task_rt(this_rq);
1183 1183
1184 for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) { 1184 for_each_cpu(cpu, this_rq->rd->rto_mask) {
1185 if (this_cpu == cpu) 1185 if (this_cpu == cpu)
1186 continue; 1186 continue;
1187 1187