diff options
Diffstat (limited to 'fs/proc/array.c')
-rw-r--r-- | fs/proc/array.c | 74 |
1 files changed, 4 insertions, 70 deletions
diff --git a/fs/proc/array.c b/fs/proc/array.c index 0d6eb33597c6..bb9f4b05703d 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c | |||
@@ -86,11 +86,6 @@ | |||
86 | #include <asm/processor.h> | 86 | #include <asm/processor.h> |
87 | #include "internal.h" | 87 | #include "internal.h" |
88 | 88 | ||
89 | /* Gcc optimizes away "strlen(x)" for constant x */ | ||
90 | #define ADDBUF(buffer, string) \ | ||
91 | do { memcpy(buffer, string, strlen(string)); \ | ||
92 | buffer += strlen(string); } while (0) | ||
93 | |||
94 | static inline void task_name(struct seq_file *m, struct task_struct *p) | 89 | static inline void task_name(struct seq_file *m, struct task_struct *p) |
95 | { | 90 | { |
96 | int i; | 91 | int i; |
@@ -261,7 +256,6 @@ static inline void task_sig(struct seq_file *m, struct task_struct *p) | |||
261 | sigemptyset(&ignored); | 256 | sigemptyset(&ignored); |
262 | sigemptyset(&caught); | 257 | sigemptyset(&caught); |
263 | 258 | ||
264 | rcu_read_lock(); | ||
265 | if (lock_task_sighand(p, &flags)) { | 259 | if (lock_task_sighand(p, &flags)) { |
266 | pending = p->pending.signal; | 260 | pending = p->pending.signal; |
267 | shpending = p->signal->shared_pending.signal; | 261 | shpending = p->signal->shared_pending.signal; |
@@ -272,7 +266,6 @@ static inline void task_sig(struct seq_file *m, struct task_struct *p) | |||
272 | qlim = p->signal->rlim[RLIMIT_SIGPENDING].rlim_cur; | 266 | qlim = p->signal->rlim[RLIMIT_SIGPENDING].rlim_cur; |
273 | unlock_task_sighand(p, &flags); | 267 | unlock_task_sighand(p, &flags); |
274 | } | 268 | } |
275 | rcu_read_unlock(); | ||
276 | 269 | ||
277 | seq_printf(m, "Threads:\t%d\n", num_threads); | 270 | seq_printf(m, "Threads:\t%d\n", num_threads); |
278 | seq_printf(m, "SigQ:\t%lu/%lu\n", qsize, qlim); | 271 | seq_printf(m, "SigQ:\t%lu/%lu\n", qsize, qlim); |
@@ -337,65 +330,6 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, | |||
337 | return 0; | 330 | return 0; |
338 | } | 331 | } |
339 | 332 | ||
340 | /* | ||
341 | * Use precise platform statistics if available: | ||
342 | */ | ||
343 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
344 | static cputime_t task_utime(struct task_struct *p) | ||
345 | { | ||
346 | return p->utime; | ||
347 | } | ||
348 | |||
349 | static cputime_t task_stime(struct task_struct *p) | ||
350 | { | ||
351 | return p->stime; | ||
352 | } | ||
353 | #else | ||
354 | static cputime_t task_utime(struct task_struct *p) | ||
355 | { | ||
356 | clock_t utime = cputime_to_clock_t(p->utime), | ||
357 | total = utime + cputime_to_clock_t(p->stime); | ||
358 | u64 temp; | ||
359 | |||
360 | /* | ||
361 | * Use CFS's precise accounting: | ||
362 | */ | ||
363 | temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime); | ||
364 | |||
365 | if (total) { | ||
366 | temp *= utime; | ||
367 | do_div(temp, total); | ||
368 | } | ||
369 | utime = (clock_t)temp; | ||
370 | |||
371 | p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime)); | ||
372 | return p->prev_utime; | ||
373 | } | ||
374 | |||
375 | static cputime_t task_stime(struct task_struct *p) | ||
376 | { | ||
377 | clock_t stime; | ||
378 | |||
379 | /* | ||
380 | * Use CFS's precise accounting. (we subtract utime from | ||
381 | * the total, to make sure the total observed by userspace | ||
382 | * grows monotonically - apps rely on that): | ||
383 | */ | ||
384 | stime = nsec_to_clock_t(p->se.sum_exec_runtime) - | ||
385 | cputime_to_clock_t(task_utime(p)); | ||
386 | |||
387 | if (stime >= 0) | ||
388 | p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime)); | ||
389 | |||
390 | return p->prev_stime; | ||
391 | } | ||
392 | #endif | ||
393 | |||
394 | static cputime_t task_gtime(struct task_struct *p) | ||
395 | { | ||
396 | return p->gtime; | ||
397 | } | ||
398 | |||
399 | static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, | 333 | static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, |
400 | struct pid *pid, struct task_struct *task, int whole) | 334 | struct pid *pid, struct task_struct *task, int whole) |
401 | { | 335 | { |
@@ -454,20 +388,20 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, | |||
454 | 388 | ||
455 | /* add up live thread stats at the group level */ | 389 | /* add up live thread stats at the group level */ |
456 | if (whole) { | 390 | if (whole) { |
391 | struct task_cputime cputime; | ||
457 | struct task_struct *t = task; | 392 | struct task_struct *t = task; |
458 | do { | 393 | do { |
459 | min_flt += t->min_flt; | 394 | min_flt += t->min_flt; |
460 | maj_flt += t->maj_flt; | 395 | maj_flt += t->maj_flt; |
461 | utime = cputime_add(utime, task_utime(t)); | ||
462 | stime = cputime_add(stime, task_stime(t)); | ||
463 | gtime = cputime_add(gtime, task_gtime(t)); | 396 | gtime = cputime_add(gtime, task_gtime(t)); |
464 | t = next_thread(t); | 397 | t = next_thread(t); |
465 | } while (t != task); | 398 | } while (t != task); |
466 | 399 | ||
467 | min_flt += sig->min_flt; | 400 | min_flt += sig->min_flt; |
468 | maj_flt += sig->maj_flt; | 401 | maj_flt += sig->maj_flt; |
469 | utime = cputime_add(utime, sig->utime); | 402 | thread_group_cputime(task, &cputime); |
470 | stime = cputime_add(stime, sig->stime); | 403 | utime = cputime.utime; |
404 | stime = cputime.stime; | ||
471 | gtime = cputime_add(gtime, sig->gtime); | 405 | gtime = cputime_add(gtime, sig->gtime); |
472 | } | 406 | } |
473 | 407 | ||