diff options
author | Suresh Siddha <suresh.b.siddha@intel.com> | 2005-06-25 17:57:25 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-06-25 19:24:43 -0400 |
commit | 245af2c7870bd5940f7bfad19a0a03b32751fbc5 (patch) | |
tree | 7c54e2b290a6b1a9fd15fa99f194c7ed5e9f0a11 /kernel | |
parent | 41c7ce9ad9a859871dffbe7dbc8b1f9571724e3c (diff) |
[PATCH] sched: remove degenerate domains
Remove degenerate scheduler domains during the sched-domain init.
For example on x86_64, we always have NUMA configured in. On Intel EM64T
systems, top most sched domain will be of NUMA and with only one sched_group
in it.
With fork/exec balances(recent Nick's fixes in -mm tree), we always endup
taking wrong decisions because of this topmost domain (as it contains only one
group and find_idlest_group always returns NULL). We will endup loading HT
package completely first, letting active load balance kickin and correct it.
In general, this patch also makes sense with out recent Nick's fixes in -mm.
From: Nick Piggin <nickpiggin@yahoo.com.au>
Modified to account for more than just sched_groups when scanning for
degenerate domains by Nick Piggin. And allow a runqueue's sd to go NULL
rather than keep a single degenerate domain around (this happens when you run
with maxcpus=1).
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: Nick Piggin <nickpiggin@yahoo.com.au>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 64 |
1 files changed, 64 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 77c07c2928b9..e75b301b5340 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -4712,6 +4712,57 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) | |||
4712 | #define sched_domain_debug(sd, cpu) {} | 4712 | #define sched_domain_debug(sd, cpu) {} |
4713 | #endif | 4713 | #endif |
4714 | 4714 | ||
4715 | static int __devinit sd_degenerate(struct sched_domain *sd) | ||
4716 | { | ||
4717 | if (cpus_weight(sd->span) == 1) | ||
4718 | return 1; | ||
4719 | |||
4720 | /* Following flags need at least 2 groups */ | ||
4721 | if (sd->flags & (SD_LOAD_BALANCE | | ||
4722 | SD_BALANCE_NEWIDLE | | ||
4723 | SD_BALANCE_FORK | | ||
4724 | SD_BALANCE_EXEC)) { | ||
4725 | if (sd->groups != sd->groups->next) | ||
4726 | return 0; | ||
4727 | } | ||
4728 | |||
4729 | /* Following flags don't use groups */ | ||
4730 | if (sd->flags & (SD_WAKE_IDLE | | ||
4731 | SD_WAKE_AFFINE | | ||
4732 | SD_WAKE_BALANCE)) | ||
4733 | return 0; | ||
4734 | |||
4735 | return 1; | ||
4736 | } | ||
4737 | |||
4738 | static int __devinit sd_parent_degenerate(struct sched_domain *sd, | ||
4739 | struct sched_domain *parent) | ||
4740 | { | ||
4741 | unsigned long cflags = sd->flags, pflags = parent->flags; | ||
4742 | |||
4743 | if (sd_degenerate(parent)) | ||
4744 | return 1; | ||
4745 | |||
4746 | if (!cpus_equal(sd->span, parent->span)) | ||
4747 | return 0; | ||
4748 | |||
4749 | /* Does parent contain flags not in child? */ | ||
4750 | /* WAKE_BALANCE is a subset of WAKE_AFFINE */ | ||
4751 | if (cflags & SD_WAKE_AFFINE) | ||
4752 | pflags &= ~SD_WAKE_BALANCE; | ||
4753 | /* Flags needing groups don't count if only 1 group in parent */ | ||
4754 | if (parent->groups == parent->groups->next) { | ||
4755 | pflags &= ~(SD_LOAD_BALANCE | | ||
4756 | SD_BALANCE_NEWIDLE | | ||
4757 | SD_BALANCE_FORK | | ||
4758 | SD_BALANCE_EXEC); | ||
4759 | } | ||
4760 | if (~cflags & pflags) | ||
4761 | return 0; | ||
4762 | |||
4763 | return 1; | ||
4764 | } | ||
4765 | |||
4715 | /* | 4766 | /* |
4716 | * Attach the domain 'sd' to 'cpu' as its base domain. Callers must | 4767 | * Attach the domain 'sd' to 'cpu' as its base domain. Callers must |
4717 | * hold the hotplug lock. | 4768 | * hold the hotplug lock. |
@@ -4722,6 +4773,19 @@ void __devinit cpu_attach_domain(struct sched_domain *sd, int cpu) | |||
4722 | unsigned long flags; | 4773 | unsigned long flags; |
4723 | runqueue_t *rq = cpu_rq(cpu); | 4774 | runqueue_t *rq = cpu_rq(cpu); |
4724 | int local = 1; | 4775 | int local = 1; |
4776 | struct sched_domain *tmp; | ||
4777 | |||
4778 | /* Remove the sched domains which do not contribute to scheduling. */ | ||
4779 | for (tmp = sd; tmp; tmp = tmp->parent) { | ||
4780 | struct sched_domain *parent = tmp->parent; | ||
4781 | if (!parent) | ||
4782 | break; | ||
4783 | if (sd_parent_degenerate(tmp, parent)) | ||
4784 | tmp->parent = parent->parent; | ||
4785 | } | ||
4786 | |||
4787 | if (sd && sd_degenerate(sd)) | ||
4788 | sd = sd->parent; | ||
4725 | 4789 | ||
4726 | sched_domain_debug(sd, cpu); | 4790 | sched_domain_debug(sd, cpu); |
4727 | 4791 | ||