aboutsummaryrefslogtreecommitdiffstats
path: root/fs/proc
diff options
context:
space:
mode:
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>2010-01-08 17:42:56 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2010-01-11 12:34:06 -0500
commit1306d603fcf1f6682f8575d1ff23631a24184b21 (patch)
tree8e2278cfabf874cb16bf03bd182ae75b87055741 /fs/proc
parentf146aabfe921006b98dfa4a78506763aedfd3206 (diff)
proc: partially revert "procfs: provide stack information for threads"
Commit d899bf7b (procfs: provide stack information for threads) introduced to show stack information in /proc/{pid}/status. But it cause large performance regression. Unfortunately /proc/{pid}/status is used ps command too and ps is one of most important component. Because both to take mmap_sem and page table walk are heavily operation. If many process run, the ps performance is, [before d899bf7b] % perf stat ps >/dev/null Performance counter stats for 'ps': 4090.435806 task-clock-msecs # 0.032 CPUs 229 context-switches # 0.000 M/sec 0 CPU-migrations # 0.000 M/sec 234 page-faults # 0.000 M/sec 8587565207 cycles # 2099.425 M/sec 9866662403 instructions # 1.149 IPC 3789415411 cache-references # 926.409 M/sec 30419509 cache-misses # 7.437 M/sec 128.859521955 seconds time elapsed [after d899bf7b] % perf stat ps > /dev/null Performance counter stats for 'ps': 4305.081146 task-clock-msecs # 0.028 CPUs 480 context-switches # 0.000 M/sec 2 CPU-migrations # 0.000 M/sec 237 page-faults # 0.000 M/sec 9021211334 cycles # 2095.480 M/sec 10605887536 instructions # 1.176 IPC 3612650999 cache-references # 839.160 M/sec 23917502 cache-misses # 5.556 M/sec 152.277819582 seconds time elapsed Thus, this patch revert it. Fortunately /proc/{pid}/task/{tid}/smaps provide almost same information. we can use it. Commit d899bf7b introduced two features: 1) Add the annotattion of [thread stack: xxxx] mark to /proc/{pid}/task/{tid}/maps. 2) Add StackUsage field to /proc/{pid}/status. I only revert (2), because I haven't seen (1) cause regression. Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Stefani Seibold <stefani@seibold.net> Cc: Ingo Molnar <mingo@elte.hu> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Alexey Dobriyan <adobriyan@gmail.com> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Cc: Randy Dunlap <randy.dunlap@oracle.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andi Kleen <andi@firstfloor.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/proc')
-rw-r--r--fs/proc/array.c89
1 files changed, 0 insertions, 89 deletions
diff --git a/fs/proc/array.c b/fs/proc/array.c
index f560325c444f..13b5d0708175 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -327,94 +327,6 @@ static inline void task_context_switch_counts(struct seq_file *m,
327 p->nivcsw); 327 p->nivcsw);
328} 328}
329 329
330#ifdef CONFIG_MMU
331
332struct stack_stats {
333 struct vm_area_struct *vma;
334 unsigned long startpage;
335 unsigned long usage;
336};
337
338static int stack_usage_pte_range(pmd_t *pmd, unsigned long addr,
339 unsigned long end, struct mm_walk *walk)
340{
341 struct stack_stats *ss = walk->private;
342 struct vm_area_struct *vma = ss->vma;
343 pte_t *pte, ptent;
344 spinlock_t *ptl;
345 int ret = 0;
346
347 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
348 for (; addr != end; pte++, addr += PAGE_SIZE) {
349 ptent = *pte;
350
351#ifdef CONFIG_STACK_GROWSUP
352 if (pte_present(ptent) || is_swap_pte(ptent))
353 ss->usage = addr - ss->startpage + PAGE_SIZE;
354#else
355 if (pte_present(ptent) || is_swap_pte(ptent)) {
356 ss->usage = ss->startpage - addr + PAGE_SIZE;
357 pte++;
358 ret = 1;
359 break;
360 }
361#endif
362 }
363 pte_unmap_unlock(pte - 1, ptl);
364 cond_resched();
365 return ret;
366}
367
368static inline unsigned long get_stack_usage_in_bytes(struct vm_area_struct *vma,
369 struct task_struct *task)
370{
371 struct stack_stats ss;
372 struct mm_walk stack_walk = {
373 .pmd_entry = stack_usage_pte_range,
374 .mm = vma->vm_mm,
375 .private = &ss,
376 };
377
378 if (!vma->vm_mm || is_vm_hugetlb_page(vma))
379 return 0;
380
381 ss.vma = vma;
382 ss.startpage = task->stack_start & PAGE_MASK;
383 ss.usage = 0;
384
385#ifdef CONFIG_STACK_GROWSUP
386 walk_page_range(KSTK_ESP(task) & PAGE_MASK, vma->vm_end,
387 &stack_walk);
388#else
389 walk_page_range(vma->vm_start, (KSTK_ESP(task) & PAGE_MASK) + PAGE_SIZE,
390 &stack_walk);
391#endif
392 return ss.usage;
393}
394
395static inline void task_show_stack_usage(struct seq_file *m,
396 struct task_struct *task)
397{
398 struct vm_area_struct *vma;
399 struct mm_struct *mm = get_task_mm(task);
400
401 if (mm) {
402 down_read(&mm->mmap_sem);
403 vma = find_vma(mm, task->stack_start);
404 if (vma)
405 seq_printf(m, "Stack usage:\t%lu kB\n",
406 get_stack_usage_in_bytes(vma, task) >> 10);
407
408 up_read(&mm->mmap_sem);
409 mmput(mm);
410 }
411}
412#else
413static void task_show_stack_usage(struct seq_file *m, struct task_struct *task)
414{
415}
416#endif /* CONFIG_MMU */
417
418static void task_cpus_allowed(struct seq_file *m, struct task_struct *task) 330static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
419{ 331{
420 seq_printf(m, "Cpus_allowed:\t"); 332 seq_printf(m, "Cpus_allowed:\t");
@@ -445,7 +357,6 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
445 task_show_regs(m, task); 357 task_show_regs(m, task);
446#endif 358#endif
447 task_context_switch_counts(m, task); 359 task_context_switch_counts(m, task);
448 task_show_stack_usage(m, task);
449 return 0; 360 return 0;
450} 361}
451 362