diff options
author | Stephen Wilson <wilsons@start.ca> | 2011-05-24 20:12:47 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-25 11:39:34 -0400 |
commit | f69ff943df6972aae96c10733b6847fa094d8a59 (patch) | |
tree | b0812b5e0b1376f193a9db088ebd8856deabed00 /mm/mempolicy.c | |
parent | 13057efb0a0063eb8042d99093ec88a52c4f1593 (diff) |
mm: proc: move show_numa_map() to fs/proc/task_mmu.c
Moving show_numa_map() from mempolicy.c to task_mmu.c solves several
issues.
- Having the show() operation "miles away" from the corresponding
seq_file iteration operations is a maintenance burden.
- The need to export ad hoc info like struct proc_maps_private is
eliminated.
- The implementation of show_numa_map() can be improved in a simple
manner by cooperating with the other seq_file operations (start,
stop, etc) -- something that would be messy to do without this
change.
Signed-off-by: Stephen Wilson <wilsons@start.ca>
Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Christoph Lameter <cl@linux-foundation.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mempolicy.c')
-rw-r--r-- | mm/mempolicy.c | 183 |
1 files changed, 0 insertions, 183 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 3b6e4057fab8..e7fb9d25c54e 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -2525,186 +2525,3 @@ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context) | |||
2525 | } | 2525 | } |
2526 | return p - buffer; | 2526 | return p - buffer; |
2527 | } | 2527 | } |
2528 | |||
2529 | struct numa_maps { | ||
2530 | struct vm_area_struct *vma; | ||
2531 | unsigned long pages; | ||
2532 | unsigned long anon; | ||
2533 | unsigned long active; | ||
2534 | unsigned long writeback; | ||
2535 | unsigned long mapcount_max; | ||
2536 | unsigned long dirty; | ||
2537 | unsigned long swapcache; | ||
2538 | unsigned long node[MAX_NUMNODES]; | ||
2539 | }; | ||
2540 | |||
2541 | static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty) | ||
2542 | { | ||
2543 | int count = page_mapcount(page); | ||
2544 | |||
2545 | md->pages++; | ||
2546 | if (pte_dirty || PageDirty(page)) | ||
2547 | md->dirty++; | ||
2548 | |||
2549 | if (PageSwapCache(page)) | ||
2550 | md->swapcache++; | ||
2551 | |||
2552 | if (PageActive(page) || PageUnevictable(page)) | ||
2553 | md->active++; | ||
2554 | |||
2555 | if (PageWriteback(page)) | ||
2556 | md->writeback++; | ||
2557 | |||
2558 | if (PageAnon(page)) | ||
2559 | md->anon++; | ||
2560 | |||
2561 | if (count > md->mapcount_max) | ||
2562 | md->mapcount_max = count; | ||
2563 | |||
2564 | md->node[page_to_nid(page)]++; | ||
2565 | } | ||
2566 | |||
2567 | static int gather_pte_stats(pmd_t *pmd, unsigned long addr, | ||
2568 | unsigned long end, struct mm_walk *walk) | ||
2569 | { | ||
2570 | struct numa_maps *md; | ||
2571 | spinlock_t *ptl; | ||
2572 | pte_t *orig_pte; | ||
2573 | pte_t *pte; | ||
2574 | |||
2575 | md = walk->private; | ||
2576 | orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); | ||
2577 | do { | ||
2578 | struct page *page; | ||
2579 | int nid; | ||
2580 | |||
2581 | if (!pte_present(*pte)) | ||
2582 | continue; | ||
2583 | |||
2584 | page = vm_normal_page(md->vma, addr, *pte); | ||
2585 | if (!page) | ||
2586 | continue; | ||
2587 | |||
2588 | if (PageReserved(page)) | ||
2589 | continue; | ||
2590 | |||
2591 | nid = page_to_nid(page); | ||
2592 | if (!node_isset(nid, node_states[N_HIGH_MEMORY])) | ||
2593 | continue; | ||
2594 | |||
2595 | gather_stats(page, md, pte_dirty(*pte)); | ||
2596 | |||
2597 | } while (pte++, addr += PAGE_SIZE, addr != end); | ||
2598 | pte_unmap_unlock(orig_pte, ptl); | ||
2599 | return 0; | ||
2600 | } | ||
2601 | |||
2602 | #ifdef CONFIG_HUGETLB_PAGE | ||
2603 | static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask, | ||
2604 | unsigned long addr, unsigned long end, struct mm_walk *walk) | ||
2605 | { | ||
2606 | struct numa_maps *md; | ||
2607 | struct page *page; | ||
2608 | |||
2609 | if (pte_none(*pte)) | ||
2610 | return 0; | ||
2611 | |||
2612 | page = pte_page(*pte); | ||
2613 | if (!page) | ||
2614 | return 0; | ||
2615 | |||
2616 | md = walk->private; | ||
2617 | gather_stats(page, md, pte_dirty(*pte)); | ||
2618 | return 0; | ||
2619 | } | ||
2620 | |||
2621 | #else | ||
2622 | static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask, | ||
2623 | unsigned long addr, unsigned long end, struct mm_walk *walk) | ||
2624 | { | ||
2625 | return 0; | ||
2626 | } | ||
2627 | #endif | ||
2628 | |||
2629 | /* | ||
2630 | * Display pages allocated per node and memory policy via /proc. | ||
2631 | */ | ||
2632 | int show_numa_map(struct seq_file *m, void *v) | ||
2633 | { | ||
2634 | struct proc_maps_private *priv = m->private; | ||
2635 | struct vm_area_struct *vma = v; | ||
2636 | struct numa_maps *md; | ||
2637 | struct file *file = vma->vm_file; | ||
2638 | struct mm_struct *mm = vma->vm_mm; | ||
2639 | struct mm_walk walk = {}; | ||
2640 | struct mempolicy *pol; | ||
2641 | int n; | ||
2642 | char buffer[50]; | ||
2643 | |||
2644 | if (!mm) | ||
2645 | return 0; | ||
2646 | |||
2647 | md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL); | ||
2648 | if (!md) | ||
2649 | return 0; | ||
2650 | |||
2651 | md->vma = vma; | ||
2652 | |||
2653 | walk.hugetlb_entry = gather_hugetbl_stats; | ||
2654 | walk.pmd_entry = gather_pte_stats; | ||
2655 | walk.private = md; | ||
2656 | walk.mm = mm; | ||
2657 | |||
2658 | pol = get_vma_policy(priv->task, vma, vma->vm_start); | ||
2659 | mpol_to_str(buffer, sizeof(buffer), pol, 0); | ||
2660 | mpol_cond_put(pol); | ||
2661 | |||
2662 | seq_printf(m, "%08lx %s", vma->vm_start, buffer); | ||
2663 | |||
2664 | if (file) { | ||
2665 | seq_printf(m, " file="); | ||
2666 | seq_path(m, &file->f_path, "\n\t= "); | ||
2667 | } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { | ||
2668 | seq_printf(m, " heap"); | ||
2669 | } else if (vma->vm_start <= mm->start_stack && | ||
2670 | vma->vm_end >= mm->start_stack) { | ||
2671 | seq_printf(m, " stack"); | ||
2672 | } | ||
2673 | |||
2674 | walk_page_range(vma->vm_start, vma->vm_end, &walk); | ||
2675 | |||
2676 | if (!md->pages) | ||
2677 | goto out; | ||
2678 | |||
2679 | if (md->anon) | ||
2680 | seq_printf(m," anon=%lu",md->anon); | ||
2681 | |||
2682 | if (md->dirty) | ||
2683 | seq_printf(m," dirty=%lu",md->dirty); | ||
2684 | |||
2685 | if (md->pages != md->anon && md->pages != md->dirty) | ||
2686 | seq_printf(m, " mapped=%lu", md->pages); | ||
2687 | |||
2688 | if (md->mapcount_max > 1) | ||
2689 | seq_printf(m, " mapmax=%lu", md->mapcount_max); | ||
2690 | |||
2691 | if (md->swapcache) | ||
2692 | seq_printf(m," swapcache=%lu", md->swapcache); | ||
2693 | |||
2694 | if (md->active < md->pages && !is_vm_hugetlb_page(vma)) | ||
2695 | seq_printf(m," active=%lu", md->active); | ||
2696 | |||
2697 | if (md->writeback) | ||
2698 | seq_printf(m," writeback=%lu", md->writeback); | ||
2699 | |||
2700 | for_each_node_state(n, N_HIGH_MEMORY) | ||
2701 | if (md->node[n]) | ||
2702 | seq_printf(m, " N%d=%lu", n, md->node[n]); | ||
2703 | out: | ||
2704 | seq_putc(m, '\n'); | ||
2705 | kfree(md); | ||
2706 | |||
2707 | if (m->count < m->size) | ||
2708 | m->version = (vma != priv->tail_vma) ? vma->vm_start : 0; | ||
2709 | return 0; | ||
2710 | } | ||