aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorDaniel J Blueman <daniel.blueman@gmail.com>2010-06-01 09:06:13 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2010-06-23 09:50:44 -0400
commitf3b577dec1f2ce32d2db6d2ca6badff7002512af (patch)
treec9d5d84b2d3e4ae1251f69932a526367a2bac7fa /kernel/sched_fair.c
parent7e27d6e778cd87b6f2415515d7127eba53fe5d02 (diff)
rcu: apply RCU protection to wake_affine()
The task_group() function returns a pointer that must be protected by either RCU, the ->alloc_lock, or the cgroup lock (see the rcu_dereference_check() in task_subsys_state(), which is invoked by task_group()). The wake_affine() function currently does none of these, which means that a concurrent update would be within its rights to free the structure returned by task_group(). Because wake_affine() uses this structure only to compute load-balancing heuristics, there is no reason to acquire either of the two locks. Therefore, this commit introduces an RCU read-side critical section that starts before the first call to task_group() and ends after the last use of the "tg" pointer returned from task_group(). Thanks to Li Zefan for pointing out the need to extend the RCU read-side critical section from that proposed by the original patch. Signed-off-by: Daniel J Blueman <daniel.blueman@gmail.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c2
1 files changed, 2 insertions, 0 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index eed35eded602..a878b5332daa 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1240,6 +1240,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
1240 * effect of the currently running task from the load 1240 * effect of the currently running task from the load
1241 * of the current CPU: 1241 * of the current CPU:
1242 */ 1242 */
1243 rcu_read_lock();
1243 if (sync) { 1244 if (sync) {
1244 tg = task_group(current); 1245 tg = task_group(current);
1245 weight = current->se.load.weight; 1246 weight = current->se.load.weight;
@@ -1275,6 +1276,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
1275 balanced = this_eff_load <= prev_eff_load; 1276 balanced = this_eff_load <= prev_eff_load;
1276 } else 1277 } else
1277 balanced = true; 1278 balanced = true;
1279 rcu_read_unlock();
1278 1280
1279 /* 1281 /*
1280 * If the currently running task will sleep within 1282 * If the currently running task will sleep within