aboutsummaryrefslogtreecommitdiffstats
path: root/fs/proc/array.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/proc/array.c')
-rw-r--r--fs/proc/array.c114
1 files changed, 16 insertions, 98 deletions
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 4badde179b18..aa8637b81028 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -134,13 +134,16 @@ static inline void task_name(struct seq_file *m, struct task_struct *p)
134 * simple bit tests. 134 * simple bit tests.
135 */ 135 */
136static const char *task_state_array[] = { 136static const char *task_state_array[] = {
137 "R (running)", /* 0 */ 137 "R (running)", /* 0 */
138 "S (sleeping)", /* 1 */ 138 "S (sleeping)", /* 1 */
139 "D (disk sleep)", /* 2 */ 139 "D (disk sleep)", /* 2 */
140 "T (stopped)", /* 4 */ 140 "T (stopped)", /* 4 */
141 "T (tracing stop)", /* 8 */ 141 "t (tracing stop)", /* 8 */
142 "Z (zombie)", /* 16 */ 142 "Z (zombie)", /* 16 */
143 "X (dead)" /* 32 */ 143 "X (dead)", /* 32 */
144 "x (dead)", /* 64 */
145 "K (wakekill)", /* 128 */
146 "W (waking)", /* 256 */
144}; 147};
145 148
146static inline const char *get_task_state(struct task_struct *tsk) 149static inline const char *get_task_state(struct task_struct *tsk)
@@ -148,6 +151,8 @@ static inline const char *get_task_state(struct task_struct *tsk)
148 unsigned int state = (tsk->state & TASK_REPORT) | tsk->exit_state; 151 unsigned int state = (tsk->state & TASK_REPORT) | tsk->exit_state;
149 const char **p = &task_state_array[0]; 152 const char **p = &task_state_array[0];
150 153
154 BUILD_BUG_ON(1 + ilog2(TASK_STATE_MAX) != ARRAY_SIZE(task_state_array));
155
151 while (state) { 156 while (state) {
152 p++; 157 p++;
153 state >>= 1; 158 state >>= 1;
@@ -265,8 +270,10 @@ static inline void task_sig(struct seq_file *m, struct task_struct *p)
265 blocked = p->blocked; 270 blocked = p->blocked;
266 collect_sigign_sigcatch(p, &ignored, &caught); 271 collect_sigign_sigcatch(p, &ignored, &caught);
267 num_threads = atomic_read(&p->signal->count); 272 num_threads = atomic_read(&p->signal->count);
273 rcu_read_lock(); /* FIXME: is this correct? */
268 qsize = atomic_read(&__task_cred(p)->user->sigpending); 274 qsize = atomic_read(&__task_cred(p)->user->sigpending);
269 qlim = p->signal->rlim[RLIMIT_SIGPENDING].rlim_cur; 275 rcu_read_unlock();
276 qlim = task_rlimit(p, RLIMIT_SIGPENDING);
270 unlock_task_sighand(p, &flags); 277 unlock_task_sighand(p, &flags);
271 } 278 }
272 279
@@ -322,94 +329,6 @@ static inline void task_context_switch_counts(struct seq_file *m,
322 p->nivcsw); 329 p->nivcsw);
323} 330}
324 331
325#ifdef CONFIG_MMU
326
327struct stack_stats {
328 struct vm_area_struct *vma;
329 unsigned long startpage;
330 unsigned long usage;
331};
332
333static int stack_usage_pte_range(pmd_t *pmd, unsigned long addr,
334 unsigned long end, struct mm_walk *walk)
335{
336 struct stack_stats *ss = walk->private;
337 struct vm_area_struct *vma = ss->vma;
338 pte_t *pte, ptent;
339 spinlock_t *ptl;
340 int ret = 0;
341
342 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
343 for (; addr != end; pte++, addr += PAGE_SIZE) {
344 ptent = *pte;
345
346#ifdef CONFIG_STACK_GROWSUP
347 if (pte_present(ptent) || is_swap_pte(ptent))
348 ss->usage = addr - ss->startpage + PAGE_SIZE;
349#else
350 if (pte_present(ptent) || is_swap_pte(ptent)) {
351 ss->usage = ss->startpage - addr + PAGE_SIZE;
352 pte++;
353 ret = 1;
354 break;
355 }
356#endif
357 }
358 pte_unmap_unlock(pte - 1, ptl);
359 cond_resched();
360 return ret;
361}
362
363static inline unsigned long get_stack_usage_in_bytes(struct vm_area_struct *vma,
364 struct task_struct *task)
365{
366 struct stack_stats ss;
367 struct mm_walk stack_walk = {
368 .pmd_entry = stack_usage_pte_range,
369 .mm = vma->vm_mm,
370 .private = &ss,
371 };
372
373 if (!vma->vm_mm || is_vm_hugetlb_page(vma))
374 return 0;
375
376 ss.vma = vma;
377 ss.startpage = task->stack_start & PAGE_MASK;
378 ss.usage = 0;
379
380#ifdef CONFIG_STACK_GROWSUP
381 walk_page_range(KSTK_ESP(task) & PAGE_MASK, vma->vm_end,
382 &stack_walk);
383#else
384 walk_page_range(vma->vm_start, (KSTK_ESP(task) & PAGE_MASK) + PAGE_SIZE,
385 &stack_walk);
386#endif
387 return ss.usage;
388}
389
390static inline void task_show_stack_usage(struct seq_file *m,
391 struct task_struct *task)
392{
393 struct vm_area_struct *vma;
394 struct mm_struct *mm = get_task_mm(task);
395
396 if (mm) {
397 down_read(&mm->mmap_sem);
398 vma = find_vma(mm, task->stack_start);
399 if (vma)
400 seq_printf(m, "Stack usage:\t%lu kB\n",
401 get_stack_usage_in_bytes(vma, task) >> 10);
402
403 up_read(&mm->mmap_sem);
404 mmput(mm);
405 }
406}
407#else
408static void task_show_stack_usage(struct seq_file *m, struct task_struct *task)
409{
410}
411#endif /* CONFIG_MMU */
412
413static void task_cpus_allowed(struct seq_file *m, struct task_struct *task) 332static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
414{ 333{
415 seq_printf(m, "Cpus_allowed:\t"); 334 seq_printf(m, "Cpus_allowed:\t");
@@ -440,7 +359,6 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
440 task_show_regs(m, task); 359 task_show_regs(m, task);
441#endif 360#endif
442 task_context_switch_counts(m, task); 361 task_context_switch_counts(m, task);
443 task_show_stack_usage(m, task);
444 return 0; 362 return 0;
445} 363}
446 364
@@ -502,7 +420,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
502 cutime = sig->cutime; 420 cutime = sig->cutime;
503 cstime = sig->cstime; 421 cstime = sig->cstime;
504 cgtime = sig->cgtime; 422 cgtime = sig->cgtime;
505 rsslim = sig->rlim[RLIMIT_RSS].rlim_cur; 423 rsslim = ACCESS_ONCE(sig->rlim[RLIMIT_RSS].rlim_cur);
506 424
507 /* add up live thread stats at the group level */ 425 /* add up live thread stats at the group level */
508 if (whole) { 426 if (whole) {