aboutsummaryrefslogtreecommitdiffstats
path: root/fs/proc/array.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/proc/array.c')
-rw-r--r--fs/proc/array.c125
1 files changed, 24 insertions, 101 deletions
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 822c2d506518..13b5d0708175 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -134,13 +134,16 @@ static inline void task_name(struct seq_file *m, struct task_struct *p)
134 * simple bit tests. 134 * simple bit tests.
135 */ 135 */
136static const char *task_state_array[] = { 136static const char *task_state_array[] = {
137 "R (running)", /* 0 */ 137 "R (running)", /* 0 */
138 "S (sleeping)", /* 1 */ 138 "S (sleeping)", /* 1 */
139 "D (disk sleep)", /* 2 */ 139 "D (disk sleep)", /* 2 */
140 "T (stopped)", /* 4 */ 140 "T (stopped)", /* 4 */
141 "T (tracing stop)", /* 8 */ 141 "t (tracing stop)", /* 8 */
142 "Z (zombie)", /* 16 */ 142 "Z (zombie)", /* 16 */
143 "X (dead)" /* 32 */ 143 "X (dead)", /* 32 */
144 "x (dead)", /* 64 */
145 "K (wakekill)", /* 128 */
146 "W (waking)", /* 256 */
144}; 147};
145 148
146static inline const char *get_task_state(struct task_struct *tsk) 149static inline const char *get_task_state(struct task_struct *tsk)
@@ -148,6 +151,8 @@ static inline const char *get_task_state(struct task_struct *tsk)
148 unsigned int state = (tsk->state & TASK_REPORT) | tsk->exit_state; 151 unsigned int state = (tsk->state & TASK_REPORT) | tsk->exit_state;
149 const char **p = &task_state_array[0]; 152 const char **p = &task_state_array[0];
150 153
154 BUILD_BUG_ON(1 + ilog2(TASK_STATE_MAX) != ARRAY_SIZE(task_state_array));
155
151 while (state) { 156 while (state) {
152 p++; 157 p++;
153 state >>= 1; 158 state >>= 1;
@@ -322,93 +327,15 @@ static inline void task_context_switch_counts(struct seq_file *m,
322 p->nivcsw); 327 p->nivcsw);
323} 328}
324 329
325#ifdef CONFIG_MMU 330static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
326
327struct stack_stats {
328 struct vm_area_struct *vma;
329 unsigned long startpage;
330 unsigned long usage;
331};
332
333static int stack_usage_pte_range(pmd_t *pmd, unsigned long addr,
334 unsigned long end, struct mm_walk *walk)
335{
336 struct stack_stats *ss = walk->private;
337 struct vm_area_struct *vma = ss->vma;
338 pte_t *pte, ptent;
339 spinlock_t *ptl;
340 int ret = 0;
341
342 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
343 for (; addr != end; pte++, addr += PAGE_SIZE) {
344 ptent = *pte;
345
346#ifdef CONFIG_STACK_GROWSUP
347 if (pte_present(ptent) || is_swap_pte(ptent))
348 ss->usage = addr - ss->startpage + PAGE_SIZE;
349#else
350 if (pte_present(ptent) || is_swap_pte(ptent)) {
351 ss->usage = ss->startpage - addr + PAGE_SIZE;
352 pte++;
353 ret = 1;
354 break;
355 }
356#endif
357 }
358 pte_unmap_unlock(pte - 1, ptl);
359 cond_resched();
360 return ret;
361}
362
363static inline unsigned long get_stack_usage_in_bytes(struct vm_area_struct *vma,
364 struct task_struct *task)
365{
366 struct stack_stats ss;
367 struct mm_walk stack_walk = {
368 .pmd_entry = stack_usage_pte_range,
369 .mm = vma->vm_mm,
370 .private = &ss,
371 };
372
373 if (!vma->vm_mm || is_vm_hugetlb_page(vma))
374 return 0;
375
376 ss.vma = vma;
377 ss.startpage = task->stack_start & PAGE_MASK;
378 ss.usage = 0;
379
380#ifdef CONFIG_STACK_GROWSUP
381 walk_page_range(KSTK_ESP(task) & PAGE_MASK, vma->vm_end,
382 &stack_walk);
383#else
384 walk_page_range(vma->vm_start, (KSTK_ESP(task) & PAGE_MASK) + PAGE_SIZE,
385 &stack_walk);
386#endif
387 return ss.usage;
388}
389
390static inline void task_show_stack_usage(struct seq_file *m,
391 struct task_struct *task)
392{
393 struct vm_area_struct *vma;
394 struct mm_struct *mm = get_task_mm(task);
395
396 if (mm) {
397 down_read(&mm->mmap_sem);
398 vma = find_vma(mm, task->stack_start);
399 if (vma)
400 seq_printf(m, "Stack usage:\t%lu kB\n",
401 get_stack_usage_in_bytes(vma, task) >> 10);
402
403 up_read(&mm->mmap_sem);
404 mmput(mm);
405 }
406}
407#else
408static void task_show_stack_usage(struct seq_file *m, struct task_struct *task)
409{ 331{
332 seq_printf(m, "Cpus_allowed:\t");
333 seq_cpumask(m, &task->cpus_allowed);
334 seq_printf(m, "\n");
335 seq_printf(m, "Cpus_allowed_list:\t");
336 seq_cpumask_list(m, &task->cpus_allowed);
337 seq_printf(m, "\n");
410} 338}
411#endif /* CONFIG_MMU */
412 339
413int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, 340int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
414 struct pid *pid, struct task_struct *task) 341 struct pid *pid, struct task_struct *task)
@@ -424,12 +351,12 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
424 } 351 }
425 task_sig(m, task); 352 task_sig(m, task);
426 task_cap(m, task); 353 task_cap(m, task);
354 task_cpus_allowed(m, task);
427 cpuset_task_status_allowed(m, task); 355 cpuset_task_status_allowed(m, task);
428#if defined(CONFIG_S390) 356#if defined(CONFIG_S390)
429 task_show_regs(m, task); 357 task_show_regs(m, task);
430#endif 358#endif
431 task_context_switch_counts(m, task); 359 task_context_switch_counts(m, task);
432 task_show_stack_usage(m, task);
433 return 0; 360 return 0;
434} 361}
435 362
@@ -495,20 +422,17 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
495 422
496 /* add up live thread stats at the group level */ 423 /* add up live thread stats at the group level */
497 if (whole) { 424 if (whole) {
498 struct task_cputime cputime;
499 struct task_struct *t = task; 425 struct task_struct *t = task;
500 do { 426 do {
501 min_flt += t->min_flt; 427 min_flt += t->min_flt;
502 maj_flt += t->maj_flt; 428 maj_flt += t->maj_flt;
503 gtime = cputime_add(gtime, task_gtime(t)); 429 gtime = cputime_add(gtime, t->gtime);
504 t = next_thread(t); 430 t = next_thread(t);
505 } while (t != task); 431 } while (t != task);
506 432
507 min_flt += sig->min_flt; 433 min_flt += sig->min_flt;
508 maj_flt += sig->maj_flt; 434 maj_flt += sig->maj_flt;
509 thread_group_cputime(task, &cputime); 435 thread_group_times(task, &utime, &stime);
510 utime = cputime.utime;
511 stime = cputime.stime;
512 gtime = cputime_add(gtime, sig->gtime); 436 gtime = cputime_add(gtime, sig->gtime);
513 } 437 }
514 438
@@ -524,9 +448,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
524 if (!whole) { 448 if (!whole) {
525 min_flt = task->min_flt; 449 min_flt = task->min_flt;
526 maj_flt = task->maj_flt; 450 maj_flt = task->maj_flt;
527 utime = task_utime(task); 451 task_times(task, &utime, &stime);
528 stime = task_stime(task); 452 gtime = task->gtime;
529 gtime = task_gtime(task);
530 } 453 }
531 454
532 /* scale priority and nice values from timeslices to -20..20 */ 455 /* scale priority and nice values from timeslices to -20..20 */