aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorNick Piggin <nickpiggin@yahoo.com.au>2005-06-25 17:57:24 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-25 19:24:43 -0400
commit41c7ce9ad9a859871dffbe7dbc8b1f9571724e3c (patch)
treee6046310efc8b0c3ec71922eb86ea2d3da11b2f7 /kernel
parent4866cde064afbb6c2a488c265e696879de616daa (diff)
[PATCH] sched: null domains
Fix the last 2 places that directly access a runqueue's sched-domain and assume it cannot be NULL. That allows the use of NULL for domain, instead of a dummy domain, to signify no balancing is to happen. No functional changes. Signed-off-by: Nick Piggin <nickpiggin@yahoo.com.au> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c36
1 files changed, 21 insertions, 15 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index b1410577f9a8..77c07c2928b9 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2579,11 +2579,15 @@ out:
2579#ifdef CONFIG_SCHED_SMT 2579#ifdef CONFIG_SCHED_SMT
2580static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq) 2580static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
2581{ 2581{
2582 struct sched_domain *sd = this_rq->sd; 2582 struct sched_domain *tmp, *sd = NULL;
2583 cpumask_t sibling_map; 2583 cpumask_t sibling_map;
2584 int i; 2584 int i;
2585 2585
2586 if (!(sd->flags & SD_SHARE_CPUPOWER)) 2586 for_each_domain(this_cpu, tmp)
2587 if (tmp->flags & SD_SHARE_CPUPOWER)
2588 sd = tmp;
2589
2590 if (!sd)
2587 return; 2591 return;
2588 2592
2589 /* 2593 /*
@@ -2624,13 +2628,17 @@ static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
2624 2628
2625static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq) 2629static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
2626{ 2630{
2627 struct sched_domain *sd = this_rq->sd; 2631 struct sched_domain *tmp, *sd = NULL;
2628 cpumask_t sibling_map; 2632 cpumask_t sibling_map;
2629 prio_array_t *array; 2633 prio_array_t *array;
2630 int ret = 0, i; 2634 int ret = 0, i;
2631 task_t *p; 2635 task_t *p;
2632 2636
2633 if (!(sd->flags & SD_SHARE_CPUPOWER)) 2637 for_each_domain(this_cpu, tmp)
2638 if (tmp->flags & SD_SHARE_CPUPOWER)
2639 sd = tmp;
2640
2641 if (!sd)
2634 return 0; 2642 return 0;
2635 2643
2636 /* 2644 /*
@@ -4617,6 +4625,11 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
4617{ 4625{
4618 int level = 0; 4626 int level = 0;
4619 4627
4628 if (!sd) {
4629 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
4630 return;
4631 }
4632
4620 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); 4633 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
4621 4634
4622 do { 4635 do {
@@ -4874,7 +4887,7 @@ static void __devinit arch_init_sched_domains(void)
4874 cpus_and(cpu_default_map, cpu_default_map, cpu_online_map); 4887 cpus_and(cpu_default_map, cpu_default_map, cpu_online_map);
4875 4888
4876 /* 4889 /*
4877 * Set up domains. Isolated domains just stay on the dummy domain. 4890 * Set up domains. Isolated domains just stay on the NULL domain.
4878 */ 4891 */
4879 for_each_cpu_mask(i, cpu_default_map) { 4892 for_each_cpu_mask(i, cpu_default_map) {
4880 int group; 4893 int group;
@@ -4987,18 +5000,11 @@ static void __devinit arch_destroy_sched_domains(void)
4987 5000
4988#endif /* ARCH_HAS_SCHED_DOMAIN */ 5001#endif /* ARCH_HAS_SCHED_DOMAIN */
4989 5002
4990/*
4991 * Initial dummy domain for early boot and for hotplug cpu. Being static,
4992 * it is initialized to zero, so all balancing flags are cleared which is
4993 * what we want.
4994 */
4995static struct sched_domain sched_domain_dummy;
4996
4997#ifdef CONFIG_HOTPLUG_CPU 5003#ifdef CONFIG_HOTPLUG_CPU
4998/* 5004/*
4999 * Force a reinitialization of the sched domains hierarchy. The domains 5005 * Force a reinitialization of the sched domains hierarchy. The domains
5000 * and groups cannot be updated in place without racing with the balancing 5006 * and groups cannot be updated in place without racing with the balancing
5001 * code, so we temporarily attach all running cpus to a "dummy" domain 5007 * code, so we temporarily attach all running cpus to the NULL domain
5002 * which will prevent rebalancing while the sched domains are recalculated. 5008 * which will prevent rebalancing while the sched domains are recalculated.
5003 */ 5009 */
5004static int update_sched_domains(struct notifier_block *nfb, 5010static int update_sched_domains(struct notifier_block *nfb,
@@ -5010,7 +5016,7 @@ static int update_sched_domains(struct notifier_block *nfb,
5010 case CPU_UP_PREPARE: 5016 case CPU_UP_PREPARE:
5011 case CPU_DOWN_PREPARE: 5017 case CPU_DOWN_PREPARE:
5012 for_each_online_cpu(i) 5018 for_each_online_cpu(i)
5013 cpu_attach_domain(&sched_domain_dummy, i); 5019 cpu_attach_domain(NULL, i);
5014 arch_destroy_sched_domains(); 5020 arch_destroy_sched_domains();
5015 return NOTIFY_OK; 5021 return NOTIFY_OK;
5016 5022
@@ -5072,7 +5078,7 @@ void __init sched_init(void)
5072 rq->best_expired_prio = MAX_PRIO; 5078 rq->best_expired_prio = MAX_PRIO;
5073 5079
5074#ifdef CONFIG_SMP 5080#ifdef CONFIG_SMP
5075 rq->sd = &sched_domain_dummy; 5081 rq->sd = NULL;
5076 for (j = 1; j < 3; j++) 5082 for (j = 1; j < 3; j++)
5077 rq->cpu_load[j] = 0; 5083 rq->cpu_load[j] = 0;
5078 rq->active_balance = 0; 5084 rq->active_balance = 0;