diff options
author | Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> | 2008-01-25 15:08:00 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-25 15:08:00 -0500 |
commit | a183561567b5446d3362b4839bd4f744f4b2af1e (patch) | |
tree | 7bfa46fd0bf4a96e96500732d188f1ef4b04454d | |
parent | 58e2d4ca581167c2a079f4ee02be2f0bc52e8729 (diff) |
sched: introduce a mutex and corresponding API to serialize access to doms_curarray
doms_cur[] array represents various scheduling domains which are
mutually exclusive. Currently cpusets code can modify this array (by
calling partition_sched_domains()) as a result of user modifying
sched_load_balance flag for various cpusets.
This patch introduces a mutex and corresponding API (only when
CONFIG_FAIR_GROUP_SCHED is defined) which allows a reader to safely read
the doms_cur[] array w/o worrying abt concurrent modifications to the
array.
The fair group scheduler code (introduced in next patch of this series)
makes use of this mutex to walk thr' doms_cur[] array while rebalancing
shares of task groups across cpus.
Signed-off-by: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | kernel/sched.c | 19 |
1 files changed, 19 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index c915f3e6e593..d9585f15043f 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -185,6 +185,9 @@ static struct cfs_rq *init_cfs_rq_p[NR_CPUS]; | |||
185 | */ | 185 | */ |
186 | static DEFINE_MUTEX(task_group_mutex); | 186 | static DEFINE_MUTEX(task_group_mutex); |
187 | 187 | ||
188 | /* doms_cur_mutex serializes access to doms_cur[] array */ | ||
189 | static DEFINE_MUTEX(doms_cur_mutex); | ||
190 | |||
188 | /* Default task group. | 191 | /* Default task group. |
189 | * Every task in system belong to this group at bootup. | 192 | * Every task in system belong to this group at bootup. |
190 | */ | 193 | */ |
@@ -234,11 +237,23 @@ static inline void unlock_task_group_list(void) | |||
234 | mutex_unlock(&task_group_mutex); | 237 | mutex_unlock(&task_group_mutex); |
235 | } | 238 | } |
236 | 239 | ||
240 | static inline void lock_doms_cur(void) | ||
241 | { | ||
242 | mutex_lock(&doms_cur_mutex); | ||
243 | } | ||
244 | |||
245 | static inline void unlock_doms_cur(void) | ||
246 | { | ||
247 | mutex_unlock(&doms_cur_mutex); | ||
248 | } | ||
249 | |||
237 | #else | 250 | #else |
238 | 251 | ||
239 | static inline void set_task_cfs_rq(struct task_struct *p, unsigned int cpu) { } | 252 | static inline void set_task_cfs_rq(struct task_struct *p, unsigned int cpu) { } |
240 | static inline void lock_task_group_list(void) { } | 253 | static inline void lock_task_group_list(void) { } |
241 | static inline void unlock_task_group_list(void) { } | 254 | static inline void unlock_task_group_list(void) { } |
255 | static inline void lock_doms_cur(void) { } | ||
256 | static inline void unlock_doms_cur(void) { } | ||
242 | 257 | ||
243 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | 258 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
244 | 259 | ||
@@ -6543,6 +6558,8 @@ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new) | |||
6543 | { | 6558 | { |
6544 | int i, j; | 6559 | int i, j; |
6545 | 6560 | ||
6561 | lock_doms_cur(); | ||
6562 | |||
6546 | /* always unregister in case we don't destroy any domains */ | 6563 | /* always unregister in case we don't destroy any domains */ |
6547 | unregister_sched_domain_sysctl(); | 6564 | unregister_sched_domain_sysctl(); |
6548 | 6565 | ||
@@ -6583,6 +6600,8 @@ match2: | |||
6583 | ndoms_cur = ndoms_new; | 6600 | ndoms_cur = ndoms_new; |
6584 | 6601 | ||
6585 | register_sched_domain_sysctl(); | 6602 | register_sched_domain_sysctl(); |
6603 | |||
6604 | unlock_doms_cur(); | ||
6586 | } | 6605 | } |
6587 | 6606 | ||
6588 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | 6607 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) |