aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorGregory Haskins <ghaskins@novell.com>2008-01-25 15:08:26 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-25 15:08:26 -0500
commitdc938520d2bf343b239795cfa24e4f44649358dc (patch)
tree576a88bdc234cf663e649c058392478cf24e1f62 /kernel/sched.c
parentf85d6c7168887e6659f4d00fa5f34fa23dbde1bb (diff)
sched: dynamically update the root-domain span/online maps
The baseline code statically builds the span maps when the domain is formed. Previous attempts at dynamically updating the maps caused a suspend-to-ram regression, which should now be fixed. Signed-off-by: Gregory Haskins <ghaskins@novell.com> CC: Gautham R Shenoy <ego@in.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c31
1 files changed, 19 insertions, 12 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 5834c7fb79a5..02d468844a91 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -359,8 +359,6 @@ struct rt_rq {
359 * exclusive cpuset is created, we also create and attach a new root-domain 359 * exclusive cpuset is created, we also create and attach a new root-domain
360 * object. 360 * object.
361 * 361 *
362 * By default the system creates a single root-domain with all cpus as
363 * members (mimicking the global state we have today).
364 */ 362 */
365struct root_domain { 363struct root_domain {
366 atomic_t refcount; 364 atomic_t refcount;
@@ -375,6 +373,10 @@ struct root_domain {
375 atomic_t rto_count; 373 atomic_t rto_count;
376}; 374};
377 375
376/*
377 * By default the system creates a single root-domain with all cpus as
378 * members (mimicking the global state we have today).
379 */
378static struct root_domain def_root_domain; 380static struct root_domain def_root_domain;
379 381
380#endif 382#endif
@@ -5859,6 +5861,9 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
5859 class->leave_domain(rq); 5861 class->leave_domain(rq);
5860 } 5862 }
5861 5863
5864 cpu_clear(rq->cpu, old_rd->span);
5865 cpu_clear(rq->cpu, old_rd->online);
5866
5862 if (atomic_dec_and_test(&old_rd->refcount)) 5867 if (atomic_dec_and_test(&old_rd->refcount))
5863 kfree(old_rd); 5868 kfree(old_rd);
5864 } 5869 }
@@ -5866,6 +5871,10 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
5866 atomic_inc(&rd->refcount); 5871 atomic_inc(&rd->refcount);
5867 rq->rd = rd; 5872 rq->rd = rd;
5868 5873
5874 cpu_set(rq->cpu, rd->span);
5875 if (cpu_isset(rq->cpu, cpu_online_map))
5876 cpu_set(rq->cpu, rd->online);
5877
5869 for (class = sched_class_highest; class; class = class->next) { 5878 for (class = sched_class_highest; class; class = class->next) {
5870 if (class->join_domain) 5879 if (class->join_domain)
5871 class->join_domain(rq); 5880 class->join_domain(rq);
@@ -5874,23 +5883,21 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
5874 spin_unlock_irqrestore(&rq->lock, flags); 5883 spin_unlock_irqrestore(&rq->lock, flags);
5875} 5884}
5876 5885
5877static void init_rootdomain(struct root_domain *rd, const cpumask_t *map) 5886static void init_rootdomain(struct root_domain *rd)
5878{ 5887{
5879 memset(rd, 0, sizeof(*rd)); 5888 memset(rd, 0, sizeof(*rd));
5880 5889
5881 rd->span = *map; 5890 cpus_clear(rd->span);
5882 cpus_and(rd->online, rd->span, cpu_online_map); 5891 cpus_clear(rd->online);
5883} 5892}
5884 5893
5885static void init_defrootdomain(void) 5894static void init_defrootdomain(void)
5886{ 5895{
5887 cpumask_t cpus = CPU_MASK_ALL; 5896 init_rootdomain(&def_root_domain);
5888
5889 init_rootdomain(&def_root_domain, &cpus);
5890 atomic_set(&def_root_domain.refcount, 1); 5897 atomic_set(&def_root_domain.refcount, 1);
5891} 5898}
5892 5899
5893static struct root_domain *alloc_rootdomain(const cpumask_t *map) 5900static struct root_domain *alloc_rootdomain(void)
5894{ 5901{
5895 struct root_domain *rd; 5902 struct root_domain *rd;
5896 5903
@@ -5898,7 +5905,7 @@ static struct root_domain *alloc_rootdomain(const cpumask_t *map)
5898 if (!rd) 5905 if (!rd)
5899 return NULL; 5906 return NULL;
5900 5907
5901 init_rootdomain(rd, map); 5908 init_rootdomain(rd);
5902 5909
5903 return rd; 5910 return rd;
5904} 5911}
@@ -6319,7 +6326,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
6319 sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes; 6326 sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes;
6320#endif 6327#endif
6321 6328
6322 rd = alloc_rootdomain(cpu_map); 6329 rd = alloc_rootdomain();
6323 if (!rd) { 6330 if (!rd) {
6324 printk(KERN_WARNING "Cannot alloc root domain\n"); 6331 printk(KERN_WARNING "Cannot alloc root domain\n");
6325 return -ENOMEM; 6332 return -ENOMEM;
@@ -6894,7 +6901,6 @@ void __init sched_init(void)
6894#ifdef CONFIG_SMP 6901#ifdef CONFIG_SMP
6895 rq->sd = NULL; 6902 rq->sd = NULL;
6896 rq->rd = NULL; 6903 rq->rd = NULL;
6897 rq_attach_root(rq, &def_root_domain);
6898 rq->active_balance = 0; 6904 rq->active_balance = 0;
6899 rq->next_balance = jiffies; 6905 rq->next_balance = jiffies;
6900 rq->push_cpu = 0; 6906 rq->push_cpu = 0;
@@ -6903,6 +6909,7 @@ void __init sched_init(void)
6903 INIT_LIST_HEAD(&rq->migration_queue); 6909 INIT_LIST_HEAD(&rq->migration_queue);
6904 rq->rt.highest_prio = MAX_RT_PRIO; 6910 rq->rt.highest_prio = MAX_RT_PRIO;
6905 rq->rt.overloaded = 0; 6911 rq->rt.overloaded = 0;
6912 rq_attach_root(rq, &def_root_domain);
6906#endif 6913#endif
6907 atomic_set(&rq->nr_iowait, 0); 6914 atomic_set(&rq->nr_iowait, 0);
6908 6915