aboutsummaryrefslogtreecommitdiffstats
path: root/fs/proc/array.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/proc/array.c')
-rw-r--r--fs/proc/array.c129
1 files changed, 27 insertions, 102 deletions
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 07f77a7945c3..18e20feee251 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -134,13 +134,16 @@ static inline void task_name(struct seq_file *m, struct task_struct *p)
134 * simple bit tests. 134 * simple bit tests.
135 */ 135 */
136static const char *task_state_array[] = { 136static const char *task_state_array[] = {
137 "R (running)", /* 0 */ 137 "R (running)", /* 0 */
138 "S (sleeping)", /* 1 */ 138 "S (sleeping)", /* 1 */
139 "D (disk sleep)", /* 2 */ 139 "D (disk sleep)", /* 2 */
140 "T (stopped)", /* 4 */ 140 "T (stopped)", /* 4 */
141 "T (tracing stop)", /* 8 */ 141 "t (tracing stop)", /* 8 */
142 "Z (zombie)", /* 16 */ 142 "Z (zombie)", /* 16 */
143 "X (dead)" /* 32 */ 143 "X (dead)", /* 32 */
144 "x (dead)", /* 64 */
145 "K (wakekill)", /* 128 */
146 "W (waking)", /* 256 */
144}; 147};
145 148
146static inline const char *get_task_state(struct task_struct *tsk) 149static inline const char *get_task_state(struct task_struct *tsk)
@@ -148,6 +151,8 @@ static inline const char *get_task_state(struct task_struct *tsk)
148 unsigned int state = (tsk->state & TASK_REPORT) | tsk->exit_state; 151 unsigned int state = (tsk->state & TASK_REPORT) | tsk->exit_state;
149 const char **p = &task_state_array[0]; 152 const char **p = &task_state_array[0];
150 153
154 BUILD_BUG_ON(1 + ilog2(TASK_STATE_MAX) != ARRAY_SIZE(task_state_array));
155
151 while (state) { 156 while (state) {
152 p++; 157 p++;
153 state >>= 1; 158 state >>= 1;
@@ -265,7 +270,9 @@ static inline void task_sig(struct seq_file *m, struct task_struct *p)
265 blocked = p->blocked; 270 blocked = p->blocked;
266 collect_sigign_sigcatch(p, &ignored, &caught); 271 collect_sigign_sigcatch(p, &ignored, &caught);
267 num_threads = atomic_read(&p->signal->count); 272 num_threads = atomic_read(&p->signal->count);
273 rcu_read_lock(); /* FIXME: is this correct? */
268 qsize = atomic_read(&__task_cred(p)->user->sigpending); 274 qsize = atomic_read(&__task_cred(p)->user->sigpending);
275 rcu_read_unlock();
269 qlim = p->signal->rlim[RLIMIT_SIGPENDING].rlim_cur; 276 qlim = p->signal->rlim[RLIMIT_SIGPENDING].rlim_cur;
270 unlock_task_sighand(p, &flags); 277 unlock_task_sighand(p, &flags);
271 } 278 }
@@ -322,93 +329,15 @@ static inline void task_context_switch_counts(struct seq_file *m,
322 p->nivcsw); 329 p->nivcsw);
323} 330}
324 331
325#ifdef CONFIG_MMU 332static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
326
327struct stack_stats {
328 struct vm_area_struct *vma;
329 unsigned long startpage;
330 unsigned long usage;
331};
332
333static int stack_usage_pte_range(pmd_t *pmd, unsigned long addr,
334 unsigned long end, struct mm_walk *walk)
335{
336 struct stack_stats *ss = walk->private;
337 struct vm_area_struct *vma = ss->vma;
338 pte_t *pte, ptent;
339 spinlock_t *ptl;
340 int ret = 0;
341
342 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
343 for (; addr != end; pte++, addr += PAGE_SIZE) {
344 ptent = *pte;
345
346#ifdef CONFIG_STACK_GROWSUP
347 if (pte_present(ptent) || is_swap_pte(ptent))
348 ss->usage = addr - ss->startpage + PAGE_SIZE;
349#else
350 if (pte_present(ptent) || is_swap_pte(ptent)) {
351 ss->usage = ss->startpage - addr + PAGE_SIZE;
352 pte++;
353 ret = 1;
354 break;
355 }
356#endif
357 }
358 pte_unmap_unlock(pte - 1, ptl);
359 cond_resched();
360 return ret;
361}
362
363static inline unsigned long get_stack_usage_in_bytes(struct vm_area_struct *vma,
364 struct task_struct *task)
365{
366 struct stack_stats ss;
367 struct mm_walk stack_walk = {
368 .pmd_entry = stack_usage_pte_range,
369 .mm = vma->vm_mm,
370 .private = &ss,
371 };
372
373 if (!vma->vm_mm || is_vm_hugetlb_page(vma))
374 return 0;
375
376 ss.vma = vma;
377 ss.startpage = task->stack_start & PAGE_MASK;
378 ss.usage = 0;
379
380#ifdef CONFIG_STACK_GROWSUP
381 walk_page_range(KSTK_ESP(task) & PAGE_MASK, vma->vm_end,
382 &stack_walk);
383#else
384 walk_page_range(vma->vm_start, (KSTK_ESP(task) & PAGE_MASK) + PAGE_SIZE,
385 &stack_walk);
386#endif
387 return ss.usage;
388}
389
390static inline void task_show_stack_usage(struct seq_file *m,
391 struct task_struct *task)
392{
393 struct vm_area_struct *vma;
394 struct mm_struct *mm = get_task_mm(task);
395
396 if (mm) {
397 down_read(&mm->mmap_sem);
398 vma = find_vma(mm, task->stack_start);
399 if (vma)
400 seq_printf(m, "Stack usage:\t%lu kB\n",
401 get_stack_usage_in_bytes(vma, task) >> 10);
402
403 up_read(&mm->mmap_sem);
404 mmput(mm);
405 }
406}
407#else
408static void task_show_stack_usage(struct seq_file *m, struct task_struct *task)
409{ 333{
334 seq_printf(m, "Cpus_allowed:\t");
335 seq_cpumask(m, &task->cpus_allowed);
336 seq_printf(m, "\n");
337 seq_printf(m, "Cpus_allowed_list:\t");
338 seq_cpumask_list(m, &task->cpus_allowed);
339 seq_printf(m, "\n");
410} 340}
411#endif /* CONFIG_MMU */
412 341
413int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, 342int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
414 struct pid *pid, struct task_struct *task) 343 struct pid *pid, struct task_struct *task)
@@ -424,12 +353,12 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
424 } 353 }
425 task_sig(m, task); 354 task_sig(m, task);
426 task_cap(m, task); 355 task_cap(m, task);
356 task_cpus_allowed(m, task);
427 cpuset_task_status_allowed(m, task); 357 cpuset_task_status_allowed(m, task);
428#if defined(CONFIG_S390) 358#if defined(CONFIG_S390)
429 task_show_regs(m, task); 359 task_show_regs(m, task);
430#endif 360#endif
431 task_context_switch_counts(m, task); 361 task_context_switch_counts(m, task);
432 task_show_stack_usage(m, task);
433 return 0; 362 return 0;
434} 363}
435 364
@@ -495,20 +424,17 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
495 424
496 /* add up live thread stats at the group level */ 425 /* add up live thread stats at the group level */
497 if (whole) { 426 if (whole) {
498 struct task_cputime cputime;
499 struct task_struct *t = task; 427 struct task_struct *t = task;
500 do { 428 do {
501 min_flt += t->min_flt; 429 min_flt += t->min_flt;
502 maj_flt += t->maj_flt; 430 maj_flt += t->maj_flt;
503 gtime = cputime_add(gtime, task_gtime(t)); 431 gtime = cputime_add(gtime, t->gtime);
504 t = next_thread(t); 432 t = next_thread(t);
505 } while (t != task); 433 } while (t != task);
506 434
507 min_flt += sig->min_flt; 435 min_flt += sig->min_flt;
508 maj_flt += sig->maj_flt; 436 maj_flt += sig->maj_flt;
509 thread_group_cputime(task, &cputime); 437 thread_group_times(task, &utime, &stime);
510 utime = cputime.utime;
511 stime = cputime.stime;
512 gtime = cputime_add(gtime, sig->gtime); 438 gtime = cputime_add(gtime, sig->gtime);
513 } 439 }
514 440
@@ -524,9 +450,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
524 if (!whole) { 450 if (!whole) {
525 min_flt = task->min_flt; 451 min_flt = task->min_flt;
526 maj_flt = task->maj_flt; 452 maj_flt = task->maj_flt;
527 utime = task_utime(task); 453 task_times(task, &utime, &stime);
528 stime = task_stime(task); 454 gtime = task->gtime;
529 gtime = task_gtime(task);
530 } 455 }
531 456
532 /* scale priority and nice values from timeslices to -20..20 */ 457 /* scale priority and nice values from timeslices to -20..20 */
@@ -571,7 +496,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
571 rsslim, 496 rsslim,
572 mm ? mm->start_code : 0, 497 mm ? mm->start_code : 0,
573 mm ? mm->end_code : 0, 498 mm ? mm->end_code : 0,
574 (permitted) ? task->stack_start : 0, 499 (permitted && mm) ? task->stack_start : 0,
575 esp, 500 esp,
576 eip, 501 eip,
577 /* The signal information here is obsolete. 502 /* The signal information here is obsolete.