diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2012-04-24 18:30:36 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-05-09 09:00:53 -0400 |
commit | 0ce90475dcdbe90affc218e9688c8401e468e84d (patch) | |
tree | 30771f6a6791af0214c5e7d57958f6395f3b97f5 /include/linux/sched.h | |
parent | c22402a2f76e88b04b7a8b6c0597ad9ba6fd71de (diff) |
sched/fair: Add some serialization to the sched_domain load-balance walk
Since the sched_domain walk is completely unserialized (!SD_SERIALIZE)
it is possible that multiple cpus in the group get elected to do the
next level. Avoid this by adding some serialization.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/n/tip-vqh9ai6s0ewmeakjz80w4qz6@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 1 |
1 files changed, 1 insertions, 0 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 4a559bf0622f..3cbfb55bde25 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -927,6 +927,7 @@ struct sched_group_power { | |||
927 | struct sched_group { | 927 | struct sched_group { |
928 | struct sched_group *next; /* Must be a circular list */ | 928 | struct sched_group *next; /* Must be a circular list */ |
929 | atomic_t ref; | 929 | atomic_t ref; |
930 | int balance_cpu; | ||
930 | 931 | ||
931 | unsigned int group_weight; | 932 | unsigned int group_weight; |
932 | struct sched_group_power *sgp; | 933 | struct sched_group_power *sgp; |