aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/cpuset.c
diff options
context:
space:
mode:
authorLi Zefan <lizefan@huawei.com>2014-03-03 17:28:36 -0500
committerTejun Heo <tj@kernel.org>2014-03-03 17:28:36 -0500
commitb8dadcb58d542ecbf1d3dae5fefcd3fd8cb26539 (patch)
treefc8075d0506b22f79213d97cf4e19408b622454d /kernel/cpuset.c
parenta60bed296ac67b9e2765646dec8e36e3b4d7c395 (diff)
cpuset: use rcu_read_lock() to protect task_cs()
We no longer use task_lock() to protect tsk->cgroups. Reported-by: Fengguang Wu <fengguang.wu@intel.com> Signed-off-by: Li Zefan <lizefan@huawei.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/cpuset.c')
-rw-r--r--kernel/cpuset.c24
1 files changed, 13 insertions, 11 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index d8bec21d7a11..8d5324583aa4 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2239,10 +2239,10 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
2239 struct cpuset *cpus_cs; 2239 struct cpuset *cpus_cs;
2240 2240
2241 mutex_lock(&callback_mutex); 2241 mutex_lock(&callback_mutex);
2242 task_lock(tsk); 2242 rcu_read_lock();
2243 cpus_cs = effective_cpumask_cpuset(task_cs(tsk)); 2243 cpus_cs = effective_cpumask_cpuset(task_cs(tsk));
2244 guarantee_online_cpus(cpus_cs, pmask); 2244 guarantee_online_cpus(cpus_cs, pmask);
2245 task_unlock(tsk); 2245 rcu_read_unlock();
2246 mutex_unlock(&callback_mutex); 2246 mutex_unlock(&callback_mutex);
2247} 2247}
2248 2248
@@ -2295,10 +2295,10 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
2295 nodemask_t mask; 2295 nodemask_t mask;
2296 2296
2297 mutex_lock(&callback_mutex); 2297 mutex_lock(&callback_mutex);
2298 task_lock(tsk); 2298 rcu_read_lock();
2299 mems_cs = effective_nodemask_cpuset(task_cs(tsk)); 2299 mems_cs = effective_nodemask_cpuset(task_cs(tsk));
2300 guarantee_online_mems(mems_cs, &mask); 2300 guarantee_online_mems(mems_cs, &mask);
2301 task_unlock(tsk); 2301 rcu_read_unlock();
2302 mutex_unlock(&callback_mutex); 2302 mutex_unlock(&callback_mutex);
2303 2303
2304 return mask; 2304 return mask;
@@ -2414,9 +2414,9 @@ int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
2414 /* Not hardwall and node outside mems_allowed: scan up cpusets */ 2414 /* Not hardwall and node outside mems_allowed: scan up cpusets */
2415 mutex_lock(&callback_mutex); 2415 mutex_lock(&callback_mutex);
2416 2416
2417 task_lock(current); 2417 rcu_read_lock();
2418 cs = nearest_hardwall_ancestor(task_cs(current)); 2418 cs = nearest_hardwall_ancestor(task_cs(current));
2419 task_unlock(current); 2419 rcu_read_unlock();
2420 2420
2421 allowed = node_isset(node, cs->mems_allowed); 2421 allowed = node_isset(node, cs->mems_allowed);
2422 mutex_unlock(&callback_mutex); 2422 mutex_unlock(&callback_mutex);
@@ -2543,24 +2543,26 @@ int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
2543 * @task: pointer to task_struct of some task. 2543 * @task: pointer to task_struct of some task.
2544 * 2544 *
2545 * Description: Prints @task's name, cpuset name, and cached copy of its 2545 * Description: Prints @task's name, cpuset name, and cached copy of its
2546 * mems_allowed to the kernel log. Must hold task_lock(task) to allow 2546 * mems_allowed to the kernel log.
2547 * dereferencing task_cs(task).
2548 */ 2547 */
2549void cpuset_print_task_mems_allowed(struct task_struct *tsk) 2548void cpuset_print_task_mems_allowed(struct task_struct *tsk)
2550{ 2549{
2551 /* Statically allocated to prevent using excess stack. */ 2550 /* Statically allocated to prevent using excess stack. */
2552 static char cpuset_nodelist[CPUSET_NODELIST_LEN]; 2551 static char cpuset_nodelist[CPUSET_NODELIST_LEN];
2553 static DEFINE_SPINLOCK(cpuset_buffer_lock); 2552 static DEFINE_SPINLOCK(cpuset_buffer_lock);
2554 struct cgroup *cgrp = task_cs(tsk)->css.cgroup; 2553 struct cgroup *cgrp;
2555 2554
2556 spin_lock(&cpuset_buffer_lock); 2555 spin_lock(&cpuset_buffer_lock);
2556 rcu_read_lock();
2557 2557
2558 cgrp = task_cs(tsk)->css.cgroup;
2558 nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN, 2559 nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN,
2559 tsk->mems_allowed); 2560 tsk->mems_allowed);
2560 printk(KERN_INFO "%s cpuset=", tsk->comm); 2561 printk(KERN_INFO "%s cpuset=", tsk->comm);
2561 pr_cont_cgroup_name(cgrp); 2562 pr_cont_cgroup_name(cgrp);
2562 pr_cont(" mems_allowed=%s\n", cpuset_nodelist); 2563 pr_cont(" mems_allowed=%s\n", cpuset_nodelist);
2563 2564
2565 rcu_read_unlock();
2564 spin_unlock(&cpuset_buffer_lock); 2566 spin_unlock(&cpuset_buffer_lock);
2565} 2567}
2566 2568
@@ -2592,9 +2594,9 @@ int cpuset_memory_pressure_enabled __read_mostly;
2592 2594
2593void __cpuset_memory_pressure_bump(void) 2595void __cpuset_memory_pressure_bump(void)
2594{ 2596{
2595 task_lock(current); 2597 rcu_read_lock();
2596 fmeter_markevent(&task_cs(current)->fmeter); 2598 fmeter_markevent(&task_cs(current)->fmeter);
2597 task_unlock(current); 2599 rcu_read_unlock();
2598} 2600}
2599 2601
2600#ifdef CONFIG_PROC_PID_CPUSET 2602#ifdef CONFIG_PROC_PID_CPUSET