aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-12-09 05:15:01 -0500
committerIngo Molnar <mingo@elte.hu>2009-12-14 11:11:35 -0500
commit23f5d142519621b16cf2b378cf8adf4dcf01a616 (patch)
treec7907cb635ea1ad6cab71598d687b752c17adf37 /kernel
parent5fe85be081edf0ac92d83f9c39e0ab5c1371eb82 (diff)
sched: Use rcu in sched_get/set_affinity()
tasklist_lock is held read locked to protect the find_task_by_vpid() call and to prevent the task going away. sched_setaffinity acquires a task struct ref and drops tasklist lock right away. The access to the cpus_allowed mask is protected by rq->lock. rcu_read_lock() provides the same protection here. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> LKML-Reference: <20091209100706.789059966@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c16
1 files changed, 6 insertions, 10 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 1782beed2fa7..79893123325c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6516,22 +6516,18 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
6516 int retval; 6516 int retval;
6517 6517
6518 get_online_cpus(); 6518 get_online_cpus();
6519 read_lock(&tasklist_lock); 6519 rcu_read_lock();
6520 6520
6521 p = find_process_by_pid(pid); 6521 p = find_process_by_pid(pid);
6522 if (!p) { 6522 if (!p) {
6523 read_unlock(&tasklist_lock); 6523 rcu_read_unlock();
6524 put_online_cpus(); 6524 put_online_cpus();
6525 return -ESRCH; 6525 return -ESRCH;
6526 } 6526 }
6527 6527
6528 /* 6528 /* Prevent p going away */
6529 * It is not safe to call set_cpus_allowed with the
6530 * tasklist_lock held. We will bump the task_struct's
6531 * usage count and then drop tasklist_lock.
6532 */
6533 get_task_struct(p); 6529 get_task_struct(p);
6534 read_unlock(&tasklist_lock); 6530 rcu_read_unlock();
6535 6531
6536 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { 6532 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
6537 retval = -ENOMEM; 6533 retval = -ENOMEM;
@@ -6617,7 +6613,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
6617 int retval; 6613 int retval;
6618 6614
6619 get_online_cpus(); 6615 get_online_cpus();
6620 read_lock(&tasklist_lock); 6616 rcu_read_lock();
6621 6617
6622 retval = -ESRCH; 6618 retval = -ESRCH;
6623 p = find_process_by_pid(pid); 6619 p = find_process_by_pid(pid);
@@ -6633,7 +6629,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
6633 task_rq_unlock(rq, &flags); 6629 task_rq_unlock(rq, &flags);
6634 6630
6635out_unlock: 6631out_unlock:
6636 read_unlock(&tasklist_lock); 6632 rcu_read_unlock();
6637 put_online_cpus(); 6633 put_online_cpus();
6638 6634
6639 return retval; 6635 return retval;