aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2010-05-28 22:19:57 -0400
committerRalf Baechle <ralf@linux-mips.org>2010-07-05 12:17:32 -0400
commit17c04139fd2aeaef30fda380bb91b32de7b41a8f (patch)
tree4f0a1534d7423ff95cc0c6ab41120ee2ccec14ce /arch/mips
parent1d84267480ce8cf9943b79b70da86ddb3f95e3dd (diff)
MIPS: MT: Fix FPU affinity.
The fragile MT sys_sched_setaffinity wrapper needs its regular dose of fixes. Nose-poked-at-pile-o-crap-by: Julia Lawall <julia@diku.dk> Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c87
1 files changed, 61 insertions, 26 deletions
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index f5981c499109..2340f11dc29c 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -3,6 +3,7 @@
3 * Copyright (C) 2005 Mips Technologies, Inc 3 * Copyright (C) 2005 Mips Technologies, Inc
4 */ 4 */
5#include <linux/cpu.h> 5#include <linux/cpu.h>
6#include <linux/cpuset.h>
6#include <linux/cpumask.h> 7#include <linux/cpumask.h>
7#include <linux/delay.h> 8#include <linux/delay.h>
8#include <linux/kernel.h> 9#include <linux/kernel.h>
@@ -39,6 +40,21 @@ static inline struct task_struct *find_process_by_pid(pid_t pid)
39 return pid ? find_task_by_vpid(pid) : current; 40 return pid ? find_task_by_vpid(pid) : current;
40} 41}
41 42
43/*
44 * check the target process has a UID that matches the current process's
45 */
46static bool check_same_owner(struct task_struct *p)
47{
48 const struct cred *cred = current_cred(), *pcred;
49 bool match;
50
51 rcu_read_lock();
52 pcred = __task_cred(p);
53 match = (cred->euid == pcred->euid ||
54 cred->euid == pcred->uid);
55 rcu_read_unlock();
56 return match;
57}
42 58
43/* 59/*
44 * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process 60 * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
@@ -46,12 +62,10 @@ static inline struct task_struct *find_process_by_pid(pid_t pid)
46asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, 62asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
47 unsigned long __user *user_mask_ptr) 63 unsigned long __user *user_mask_ptr)
48{ 64{
49 cpumask_t new_mask; 65 cpumask_var_t cpus_allowed, new_mask, effective_mask;
50 cpumask_t effective_mask;
51 int retval;
52 struct task_struct *p;
53 struct thread_info *ti; 66 struct thread_info *ti;
54 uid_t euid; 67 struct task_struct *p;
68 int retval;
55 69
56 if (len < sizeof(new_mask)) 70 if (len < sizeof(new_mask))
57 return -EINVAL; 71 return -EINVAL;
@@ -60,53 +74,74 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
60 return -EFAULT; 74 return -EFAULT;
61 75
62 get_online_cpus(); 76 get_online_cpus();
63 read_lock(&tasklist_lock); 77 rcu_read_lock();
64 78
65 p = find_process_by_pid(pid); 79 p = find_process_by_pid(pid);
66 if (!p) { 80 if (!p) {
67 read_unlock(&tasklist_lock); 81 rcu_read_unlock();
68 put_online_cpus(); 82 put_online_cpus();
69 return -ESRCH; 83 return -ESRCH;
70 } 84 }
71 85
72 /* 86 /* Prevent p going away */
73 * It is not safe to call set_cpus_allowed with the
74 * tasklist_lock held. We will bump the task_struct's
75 * usage count and drop tasklist_lock before invoking
76 * set_cpus_allowed.
77 */
78 get_task_struct(p); 87 get_task_struct(p);
88 rcu_read_unlock();
79 89
80 euid = current_euid(); 90 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
91 retval = -ENOMEM;
92 goto out_put_task;
93 }
94 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
95 retval = -ENOMEM;
96 goto out_free_cpus_allowed;
97 }
98 if (!alloc_cpumask_var(&effective_mask, GFP_KERNEL)) {
99 retval = -ENOMEM;
100 goto out_free_new_mask;
101 }
81 retval = -EPERM; 102 retval = -EPERM;
82 if (euid != p->cred->euid && euid != p->cred->uid && 103 if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
83 !capable(CAP_SYS_NICE)) {
84 read_unlock(&tasklist_lock);
85 goto out_unlock; 104 goto out_unlock;
86 }
87 105
88 retval = security_task_setscheduler(p, 0, NULL); 106 retval = security_task_setscheduler(p, 0, NULL);
89 if (retval) 107 if (retval)
90 goto out_unlock; 108 goto out_unlock;
91 109
92 /* Record new user-specified CPU set for future reference */ 110 /* Record new user-specified CPU set for future reference */
93 p->thread.user_cpus_allowed = new_mask; 111 cpumask_copy(&p->thread.user_cpus_allowed, new_mask);
94
95 /* Unlock the task list */
96 read_unlock(&tasklist_lock);
97 112
113 again:
98 /* Compute new global allowed CPU set if necessary */ 114 /* Compute new global allowed CPU set if necessary */
99 ti = task_thread_info(p); 115 ti = task_thread_info(p);
100 if (test_ti_thread_flag(ti, TIF_FPUBOUND) && 116 if (test_ti_thread_flag(ti, TIF_FPUBOUND) &&
101 cpus_intersects(new_mask, mt_fpu_cpumask)) { 117 cpus_intersects(*new_mask, mt_fpu_cpumask)) {
102 cpus_and(effective_mask, new_mask, mt_fpu_cpumask); 118 cpus_and(*effective_mask, *new_mask, mt_fpu_cpumask);
103 retval = set_cpus_allowed_ptr(p, &effective_mask); 119 retval = set_cpus_allowed_ptr(p, effective_mask);
104 } else { 120 } else {
121 cpumask_copy(effective_mask, new_mask);
105 clear_ti_thread_flag(ti, TIF_FPUBOUND); 122 clear_ti_thread_flag(ti, TIF_FPUBOUND);
106 retval = set_cpus_allowed_ptr(p, &new_mask); 123 retval = set_cpus_allowed_ptr(p, new_mask);
107 } 124 }
108 125
126 if (!retval) {
127 cpuset_cpus_allowed(p, cpus_allowed);
128 if (!cpumask_subset(effective_mask, cpus_allowed)) {
129 /*
130 * We must have raced with a concurrent cpuset
131 * update. Just reset the cpus_allowed to the
132 * cpuset's cpus_allowed
133 */
134 cpumask_copy(new_mask, cpus_allowed);
135 goto again;
136 }
137 }
109out_unlock: 138out_unlock:
139 free_cpumask_var(effective_mask);
140out_free_new_mask:
141 free_cpumask_var(new_mask);
142out_free_cpus_allowed:
143 free_cpumask_var(cpus_allowed);
144out_put_task:
110 put_task_struct(p); 145 put_task_struct(p);
111 put_online_cpus(); 146 put_online_cpus();
112 return retval; 147 return retval;