aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/compat.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/compat.c')
-rw-r--r--kernel/compat.c53
1 files changed, 14 insertions, 39 deletions
diff --git a/kernel/compat.c b/kernel/compat.c
index 143990e48cb9..8eafe3eb50d9 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -23,6 +23,7 @@
23#include <linux/timex.h> 23#include <linux/timex.h>
24#include <linux/migrate.h> 24#include <linux/migrate.h>
25#include <linux/posix-timers.h> 25#include <linux/posix-timers.h>
26#include <linux/times.h>
26 27
27#include <asm/uaccess.h> 28#include <asm/uaccess.h>
28 29
@@ -208,49 +209,23 @@ asmlinkage long compat_sys_setitimer(int which,
208 return 0; 209 return 0;
209} 210}
210 211
212static compat_clock_t clock_t_to_compat_clock_t(clock_t x)
213{
214 return compat_jiffies_to_clock_t(clock_t_to_jiffies(x));
215}
216
211asmlinkage long compat_sys_times(struct compat_tms __user *tbuf) 217asmlinkage long compat_sys_times(struct compat_tms __user *tbuf)
212{ 218{
213 /*
214 * In the SMP world we might just be unlucky and have one of
215 * the times increment as we use it. Since the value is an
216 * atomically safe type this is just fine. Conceptually its
217 * as if the syscall took an instant longer to occur.
218 */
219 if (tbuf) { 219 if (tbuf) {
220 struct tms tms;
220 struct compat_tms tmp; 221 struct compat_tms tmp;
221 struct task_struct *tsk = current; 222
222 struct task_struct *t; 223 do_sys_times(&tms);
223 cputime_t utime, stime, cutime, cstime; 224 /* Convert our struct tms to the compat version. */
224 225 tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime);
225 read_lock(&tasklist_lock); 226 tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime);
226 utime = tsk->signal->utime; 227 tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime);
227 stime = tsk->signal->stime; 228 tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime);
228 t = tsk;
229 do {
230 utime = cputime_add(utime, t->utime);
231 stime = cputime_add(stime, t->stime);
232 t = next_thread(t);
233 } while (t != tsk);
234
235 /*
236 * While we have tasklist_lock read-locked, no dying thread
237 * can be updating current->signal->[us]time. Instead,
238 * we got their counts included in the live thread loop.
239 * However, another thread can come in right now and
240 * do a wait call that updates current->signal->c[us]time.
241 * To make sure we always see that pair updated atomically,
242 * we take the siglock around fetching them.
243 */
244 spin_lock_irq(&tsk->sighand->siglock);
245 cutime = tsk->signal->cutime;
246 cstime = tsk->signal->cstime;
247 spin_unlock_irq(&tsk->sighand->siglock);
248 read_unlock(&tasklist_lock);
249
250 tmp.tms_utime = compat_jiffies_to_clock_t(cputime_to_jiffies(utime));
251 tmp.tms_stime = compat_jiffies_to_clock_t(cputime_to_jiffies(stime));
252 tmp.tms_cutime = compat_jiffies_to_clock_t(cputime_to_jiffies(cutime));
253 tmp.tms_cstime = compat_jiffies_to_clock_t(cputime_to_jiffies(cstime));
254 if (copy_to_user(tbuf, &tmp, sizeof(tmp))) 229 if (copy_to_user(tbuf, &tmp, sizeof(tmp)))
255 return -EFAULT; 230 return -EFAULT;
256 } 231 }