diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-07-15 04:35:52 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-07-20 12:32:41 -0400 |
commit | e3589f6c81e4764d32a25d2a2a0afe54fa344f5c (patch) | |
tree | 414bf6bdbad3f04f629fa2a72254ea85acf723f4 /include/linux/sched.h | |
parent | 9c3f75cbd144014bea6af866a154cc2e73ab2287 (diff) |
sched: Allow for overlapping sched_domain spans
Allow for sched_domain spans that overlap by giving such domains their
own sched_group list instead of sharing the sched_groups amongst
each-other.
This is needed for machines with more than 16 nodes, because
sched_domain_node_span() will generate a node mask from the
16 nearest nodes without regard if these masks have any overlap.
Currently sched_domains have a sched_group that maps to their child
sched_domain span, and since there is no overlap we share the
sched_group between the sched_domains of the various CPUs. If however
there is overlap, we would need to link the sched_group list in
different ways for each cpu, and hence sharing isn't possible.
In order to solve this, allocate private sched_groups for each CPU's
sched_domain but have the sched_groups share a sched_group_power
structure such that we can uniquely track the power.
Reported-and-tested-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/n/tip-08bxqw9wis3qti9u5inifh3y@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 2 |
1 files changed, 2 insertions, 0 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 2e5b3c8e2d3e..bde99d5358dc 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -844,6 +844,7 @@ enum cpu_idle_type { | |||
844 | #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ | 844 | #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ |
845 | #define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ | 845 | #define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ |
846 | #define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ | 846 | #define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ |
847 | #define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ | ||
847 | 848 | ||
848 | enum powersavings_balance_level { | 849 | enum powersavings_balance_level { |
849 | POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */ | 850 | POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */ |
@@ -894,6 +895,7 @@ static inline int sd_power_saving_flags(void) | |||
894 | } | 895 | } |
895 | 896 | ||
896 | struct sched_group_power { | 897 | struct sched_group_power { |
898 | atomic_t ref; | ||
897 | /* | 899 | /* |
898 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a | 900 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a |
899 | * single CPU. | 901 | * single CPU. |