aboutsummaryrefslogtreecommitdiffstats
path: root/fs/proc/array.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/proc/array.c')
-rw-r--r--fs/proc/array.c132
1 files changed, 28 insertions, 104 deletions
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 822c2d506518..e51f2ec2c5e5 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -68,7 +68,6 @@
68#include <linux/hugetlb.h> 68#include <linux/hugetlb.h>
69#include <linux/pagemap.h> 69#include <linux/pagemap.h>
70#include <linux/swap.h> 70#include <linux/swap.h>
71#include <linux/slab.h>
72#include <linux/smp.h> 71#include <linux/smp.h>
73#include <linux/signal.h> 72#include <linux/signal.h>
74#include <linux/highmem.h> 73#include <linux/highmem.h>
@@ -134,13 +133,16 @@ static inline void task_name(struct seq_file *m, struct task_struct *p)
134 * simple bit tests. 133 * simple bit tests.
135 */ 134 */
136static const char *task_state_array[] = { 135static const char *task_state_array[] = {
137 "R (running)", /* 0 */ 136 "R (running)", /* 0 */
138 "S (sleeping)", /* 1 */ 137 "S (sleeping)", /* 1 */
139 "D (disk sleep)", /* 2 */ 138 "D (disk sleep)", /* 2 */
140 "T (stopped)", /* 4 */ 139 "T (stopped)", /* 4 */
141 "T (tracing stop)", /* 8 */ 140 "t (tracing stop)", /* 8 */
142 "Z (zombie)", /* 16 */ 141 "Z (zombie)", /* 16 */
143 "X (dead)" /* 32 */ 142 "X (dead)", /* 32 */
143 "x (dead)", /* 64 */
144 "K (wakekill)", /* 128 */
145 "W (waking)", /* 256 */
144}; 146};
145 147
146static inline const char *get_task_state(struct task_struct *tsk) 148static inline const char *get_task_state(struct task_struct *tsk)
@@ -148,6 +150,8 @@ static inline const char *get_task_state(struct task_struct *tsk)
148 unsigned int state = (tsk->state & TASK_REPORT) | tsk->exit_state; 150 unsigned int state = (tsk->state & TASK_REPORT) | tsk->exit_state;
149 const char **p = &task_state_array[0]; 151 const char **p = &task_state_array[0];
150 152
153 BUILD_BUG_ON(1 + ilog2(TASK_STATE_MAX) != ARRAY_SIZE(task_state_array));
154
151 while (state) { 155 while (state) {
152 p++; 156 p++;
153 state >>= 1; 157 state >>= 1;
@@ -265,8 +269,10 @@ static inline void task_sig(struct seq_file *m, struct task_struct *p)
265 blocked = p->blocked; 269 blocked = p->blocked;
266 collect_sigign_sigcatch(p, &ignored, &caught); 270 collect_sigign_sigcatch(p, &ignored, &caught);
267 num_threads = atomic_read(&p->signal->count); 271 num_threads = atomic_read(&p->signal->count);
272 rcu_read_lock(); /* FIXME: is this correct? */
268 qsize = atomic_read(&__task_cred(p)->user->sigpending); 273 qsize = atomic_read(&__task_cred(p)->user->sigpending);
269 qlim = p->signal->rlim[RLIMIT_SIGPENDING].rlim_cur; 274 rcu_read_unlock();
275 qlim = task_rlimit(p, RLIMIT_SIGPENDING);
270 unlock_task_sighand(p, &flags); 276 unlock_task_sighand(p, &flags);
271 } 277 }
272 278
@@ -322,93 +328,15 @@ static inline void task_context_switch_counts(struct seq_file *m,
322 p->nivcsw); 328 p->nivcsw);
323} 329}
324 330
325#ifdef CONFIG_MMU 331static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
326
327struct stack_stats {
328 struct vm_area_struct *vma;
329 unsigned long startpage;
330 unsigned long usage;
331};
332
333static int stack_usage_pte_range(pmd_t *pmd, unsigned long addr,
334 unsigned long end, struct mm_walk *walk)
335{
336 struct stack_stats *ss = walk->private;
337 struct vm_area_struct *vma = ss->vma;
338 pte_t *pte, ptent;
339 spinlock_t *ptl;
340 int ret = 0;
341
342 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
343 for (; addr != end; pte++, addr += PAGE_SIZE) {
344 ptent = *pte;
345
346#ifdef CONFIG_STACK_GROWSUP
347 if (pte_present(ptent) || is_swap_pte(ptent))
348 ss->usage = addr - ss->startpage + PAGE_SIZE;
349#else
350 if (pte_present(ptent) || is_swap_pte(ptent)) {
351 ss->usage = ss->startpage - addr + PAGE_SIZE;
352 pte++;
353 ret = 1;
354 break;
355 }
356#endif
357 }
358 pte_unmap_unlock(pte - 1, ptl);
359 cond_resched();
360 return ret;
361}
362
363static inline unsigned long get_stack_usage_in_bytes(struct vm_area_struct *vma,
364 struct task_struct *task)
365{
366 struct stack_stats ss;
367 struct mm_walk stack_walk = {
368 .pmd_entry = stack_usage_pte_range,
369 .mm = vma->vm_mm,
370 .private = &ss,
371 };
372
373 if (!vma->vm_mm || is_vm_hugetlb_page(vma))
374 return 0;
375
376 ss.vma = vma;
377 ss.startpage = task->stack_start & PAGE_MASK;
378 ss.usage = 0;
379
380#ifdef CONFIG_STACK_GROWSUP
381 walk_page_range(KSTK_ESP(task) & PAGE_MASK, vma->vm_end,
382 &stack_walk);
383#else
384 walk_page_range(vma->vm_start, (KSTK_ESP(task) & PAGE_MASK) + PAGE_SIZE,
385 &stack_walk);
386#endif
387 return ss.usage;
388}
389
390static inline void task_show_stack_usage(struct seq_file *m,
391 struct task_struct *task)
392{
393 struct vm_area_struct *vma;
394 struct mm_struct *mm = get_task_mm(task);
395
396 if (mm) {
397 down_read(&mm->mmap_sem);
398 vma = find_vma(mm, task->stack_start);
399 if (vma)
400 seq_printf(m, "Stack usage:\t%lu kB\n",
401 get_stack_usage_in_bytes(vma, task) >> 10);
402
403 up_read(&mm->mmap_sem);
404 mmput(mm);
405 }
406}
407#else
408static void task_show_stack_usage(struct seq_file *m, struct task_struct *task)
409{ 332{
333 seq_printf(m, "Cpus_allowed:\t");
334 seq_cpumask(m, &task->cpus_allowed);
335 seq_printf(m, "\n");
336 seq_printf(m, "Cpus_allowed_list:\t");
337 seq_cpumask_list(m, &task->cpus_allowed);
338 seq_printf(m, "\n");
410} 339}
411#endif /* CONFIG_MMU */
412 340
413int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, 341int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
414 struct pid *pid, struct task_struct *task) 342 struct pid *pid, struct task_struct *task)
@@ -424,12 +352,12 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
424 } 352 }
425 task_sig(m, task); 353 task_sig(m, task);
426 task_cap(m, task); 354 task_cap(m, task);
355 task_cpus_allowed(m, task);
427 cpuset_task_status_allowed(m, task); 356 cpuset_task_status_allowed(m, task);
428#if defined(CONFIG_S390) 357#if defined(CONFIG_S390)
429 task_show_regs(m, task); 358 task_show_regs(m, task);
430#endif 359#endif
431 task_context_switch_counts(m, task); 360 task_context_switch_counts(m, task);
432 task_show_stack_usage(m, task);
433 return 0; 361 return 0;
434} 362}
435 363
@@ -491,24 +419,21 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
491 cutime = sig->cutime; 419 cutime = sig->cutime;
492 cstime = sig->cstime; 420 cstime = sig->cstime;
493 cgtime = sig->cgtime; 421 cgtime = sig->cgtime;
494 rsslim = sig->rlim[RLIMIT_RSS].rlim_cur; 422 rsslim = ACCESS_ONCE(sig->rlim[RLIMIT_RSS].rlim_cur);
495 423
496 /* add up live thread stats at the group level */ 424 /* add up live thread stats at the group level */
497 if (whole) { 425 if (whole) {
498 struct task_cputime cputime;
499 struct task_struct *t = task; 426 struct task_struct *t = task;
500 do { 427 do {
501 min_flt += t->min_flt; 428 min_flt += t->min_flt;
502 maj_flt += t->maj_flt; 429 maj_flt += t->maj_flt;
503 gtime = cputime_add(gtime, task_gtime(t)); 430 gtime = cputime_add(gtime, t->gtime);
504 t = next_thread(t); 431 t = next_thread(t);
505 } while (t != task); 432 } while (t != task);
506 433
507 min_flt += sig->min_flt; 434 min_flt += sig->min_flt;
508 maj_flt += sig->maj_flt; 435 maj_flt += sig->maj_flt;
509 thread_group_cputime(task, &cputime); 436 thread_group_times(task, &utime, &stime);
510 utime = cputime.utime;
511 stime = cputime.stime;
512 gtime = cputime_add(gtime, sig->gtime); 437 gtime = cputime_add(gtime, sig->gtime);
513 } 438 }
514 439
@@ -524,9 +449,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
524 if (!whole) { 449 if (!whole) {
525 min_flt = task->min_flt; 450 min_flt = task->min_flt;
526 maj_flt = task->maj_flt; 451 maj_flt = task->maj_flt;
527 utime = task_utime(task); 452 task_times(task, &utime, &stime);
528 stime = task_stime(task); 453 gtime = task->gtime;
529 gtime = task_gtime(task);
530 } 454 }
531 455
532 /* scale priority and nice values from timeslices to -20..20 */ 456 /* scale priority and nice values from timeslices to -20..20 */