diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-04-07 08:09:50 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-04-11 06:58:19 -0400 |
commit | dce840a08702bd13a9a186e07e63d1ef82256b5e (patch) | |
tree | 168bb98aed7f5761ebe31aa92c34959e9d0f238a /include/linux/sched.h | |
parent | a9c9a9b6bff27ac9c746344a9c1a19bf3327002c (diff) |
sched: Dynamically allocate sched_domain/sched_group data-structures
Instead of relying on static allocations for the sched_domain and
sched_group trees, dynamically allocate and RCU free them.
Allocating this dynamically also allows for some build_sched_groups()
simplification since we can now (like with other simplifications) rely
on the sched_domain tree instead of hard-coded knowledge.
One tricky to note is that detach_destroy_domains() needs to hold
rcu_read_lock() over the entire tear-down, per-cpu is not sufficient
since that can lead to partial sched_group existance (could possibly
be solved by doing the tear-down backwards but this is much more
robust).
A concequence of the above is that we can no longer print the
sched_domain debug stuff from cpu_attach_domain() since that might now
run with preemption disabled (due to classic RCU etc.) and
sched_domain_debug() does some GFP_KERNEL allocations.
Another thing to note is that we now fully rely on normal RCU and not
RCU-sched, this is because with the new and exiting RCU flavours we
grew over the years BH doesn't necessarily hold off RCU-sched grace
periods (-rt is known to break this). This would in fact already cause
us grief since we do sched_domain/sched_group iterations from softirq
context.
This patch is somewhat larger than I would like it to be, but I didn't
find any means of shrinking/splitting this.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/r/20110407122942.245307941@chello.nl
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 5 |
1 files changed, 5 insertions, 0 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 4ec2c027e92c..020b79d6c486 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -868,6 +868,7 @@ static inline int sd_power_saving_flags(void) | |||
868 | 868 | ||
869 | struct sched_group { | 869 | struct sched_group { |
870 | struct sched_group *next; /* Must be a circular list */ | 870 | struct sched_group *next; /* Must be a circular list */ |
871 | atomic_t ref; | ||
871 | 872 | ||
872 | /* | 873 | /* |
873 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a | 874 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a |
@@ -973,6 +974,10 @@ struct sched_domain { | |||
973 | #ifdef CONFIG_SCHED_DEBUG | 974 | #ifdef CONFIG_SCHED_DEBUG |
974 | char *name; | 975 | char *name; |
975 | #endif | 976 | #endif |
977 | union { | ||
978 | void *private; /* used during construction */ | ||
979 | struct rcu_head rcu; /* used during destruction */ | ||
980 | }; | ||
976 | 981 | ||
977 | unsigned int span_weight; | 982 | unsigned int span_weight; |
978 | /* | 983 | /* |