diff options
author | Stephen Wilson <wilsons@start.ca> | 2011-05-24 20:12:42 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-25 11:39:32 -0400 |
commit | 29ea2f6982f1edc4302729116f2246dd7b45471d (patch) | |
tree | b40d85bf30c25537bd269407748abbdb3644a54f /mm/mempolicy.c | |
parent | d98f6cb67fb5b9376d4957d7ba9f32eac35c2e08 (diff) |
mm: use walk_page_range() instead of custom page table walking code
Converting show_numa_map() to use the generic routine decouples the
function from mempolicy.c, allowing it to be moved out of the mm subsystem
and into fs/proc.
Also, include KSM pages in /proc/pid/numa_maps statistics. The pagewalk
logic implemented by check_pte_range() failed to account for such pages as
they were not applicable to the page migration case.
Signed-off-by: Stephen Wilson <wilsons@start.ca>
Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Christoph Lameter <cl@linux-foundation.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mempolicy.c')
-rw-r--r-- | mm/mempolicy.c | 75 |
1 files changed, 68 insertions, 7 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 5bfb03ef3cb0..945e85de2d4c 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -2531,6 +2531,7 @@ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context) | |||
2531 | } | 2531 | } |
2532 | 2532 | ||
2533 | struct numa_maps { | 2533 | struct numa_maps { |
2534 | struct vm_area_struct *vma; | ||
2534 | unsigned long pages; | 2535 | unsigned long pages; |
2535 | unsigned long anon; | 2536 | unsigned long anon; |
2536 | unsigned long active; | 2537 | unsigned long active; |
@@ -2568,6 +2569,41 @@ static void gather_stats(struct page *page, void *private, int pte_dirty) | |||
2568 | md->node[page_to_nid(page)]++; | 2569 | md->node[page_to_nid(page)]++; |
2569 | } | 2570 | } |
2570 | 2571 | ||
2572 | static int gather_pte_stats(pmd_t *pmd, unsigned long addr, | ||
2573 | unsigned long end, struct mm_walk *walk) | ||
2574 | { | ||
2575 | struct numa_maps *md; | ||
2576 | spinlock_t *ptl; | ||
2577 | pte_t *orig_pte; | ||
2578 | pte_t *pte; | ||
2579 | |||
2580 | md = walk->private; | ||
2581 | orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); | ||
2582 | do { | ||
2583 | struct page *page; | ||
2584 | int nid; | ||
2585 | |||
2586 | if (!pte_present(*pte)) | ||
2587 | continue; | ||
2588 | |||
2589 | page = vm_normal_page(md->vma, addr, *pte); | ||
2590 | if (!page) | ||
2591 | continue; | ||
2592 | |||
2593 | if (PageReserved(page)) | ||
2594 | continue; | ||
2595 | |||
2596 | nid = page_to_nid(page); | ||
2597 | if (!node_isset(nid, node_states[N_HIGH_MEMORY])) | ||
2598 | continue; | ||
2599 | |||
2600 | gather_stats(page, md, pte_dirty(*pte)); | ||
2601 | |||
2602 | } while (pte++, addr += PAGE_SIZE, addr != end); | ||
2603 | pte_unmap_unlock(orig_pte, ptl); | ||
2604 | return 0; | ||
2605 | } | ||
2606 | |||
2571 | #ifdef CONFIG_HUGETLB_PAGE | 2607 | #ifdef CONFIG_HUGETLB_PAGE |
2572 | static void check_huge_range(struct vm_area_struct *vma, | 2608 | static void check_huge_range(struct vm_area_struct *vma, |
2573 | unsigned long start, unsigned long end, | 2609 | unsigned long start, unsigned long end, |
@@ -2597,12 +2633,35 @@ static void check_huge_range(struct vm_area_struct *vma, | |||
2597 | gather_stats(page, md, pte_dirty(*ptep)); | 2633 | gather_stats(page, md, pte_dirty(*ptep)); |
2598 | } | 2634 | } |
2599 | } | 2635 | } |
2636 | |||
2637 | static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask, | ||
2638 | unsigned long addr, unsigned long end, struct mm_walk *walk) | ||
2639 | { | ||
2640 | struct page *page; | ||
2641 | |||
2642 | if (pte_none(*pte)) | ||
2643 | return 0; | ||
2644 | |||
2645 | page = pte_page(*pte); | ||
2646 | if (!page) | ||
2647 | return 0; | ||
2648 | |||
2649 | gather_stats(page, walk->private, pte_dirty(*pte)); | ||
2650 | return 0; | ||
2651 | } | ||
2652 | |||
2600 | #else | 2653 | #else |
2601 | static inline void check_huge_range(struct vm_area_struct *vma, | 2654 | static inline void check_huge_range(struct vm_area_struct *vma, |
2602 | unsigned long start, unsigned long end, | 2655 | unsigned long start, unsigned long end, |
2603 | struct numa_maps *md) | 2656 | struct numa_maps *md) |
2604 | { | 2657 | { |
2605 | } | 2658 | } |
2659 | |||
2660 | static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask, | ||
2661 | unsigned long addr, unsigned long end, struct mm_walk *walk) | ||
2662 | { | ||
2663 | return 0; | ||
2664 | } | ||
2606 | #endif | 2665 | #endif |
2607 | 2666 | ||
2608 | /* | 2667 | /* |
@@ -2615,6 +2674,7 @@ int show_numa_map(struct seq_file *m, void *v) | |||
2615 | struct numa_maps *md; | 2674 | struct numa_maps *md; |
2616 | struct file *file = vma->vm_file; | 2675 | struct file *file = vma->vm_file; |
2617 | struct mm_struct *mm = vma->vm_mm; | 2676 | struct mm_struct *mm = vma->vm_mm; |
2677 | struct mm_walk walk = {}; | ||
2618 | struct mempolicy *pol; | 2678 | struct mempolicy *pol; |
2619 | int n; | 2679 | int n; |
2620 | char buffer[50]; | 2680 | char buffer[50]; |
@@ -2626,6 +2686,13 @@ int show_numa_map(struct seq_file *m, void *v) | |||
2626 | if (!md) | 2686 | if (!md) |
2627 | return 0; | 2687 | return 0; |
2628 | 2688 | ||
2689 | md->vma = vma; | ||
2690 | |||
2691 | walk.hugetlb_entry = gather_hugetbl_stats; | ||
2692 | walk.pmd_entry = gather_pte_stats; | ||
2693 | walk.private = md; | ||
2694 | walk.mm = mm; | ||
2695 | |||
2629 | pol = get_vma_policy(priv->task, vma, vma->vm_start); | 2696 | pol = get_vma_policy(priv->task, vma, vma->vm_start); |
2630 | mpol_to_str(buffer, sizeof(buffer), pol, 0); | 2697 | mpol_to_str(buffer, sizeof(buffer), pol, 0); |
2631 | mpol_cond_put(pol); | 2698 | mpol_cond_put(pol); |
@@ -2642,13 +2709,7 @@ int show_numa_map(struct seq_file *m, void *v) | |||
2642 | seq_printf(m, " stack"); | 2709 | seq_printf(m, " stack"); |
2643 | } | 2710 | } |
2644 | 2711 | ||
2645 | if (is_vm_hugetlb_page(vma)) { | 2712 | walk_page_range(vma->vm_start, vma->vm_end, &walk); |
2646 | check_huge_range(vma, vma->vm_start, vma->vm_end, md); | ||
2647 | seq_printf(m, " huge"); | ||
2648 | } else { | ||
2649 | check_pgd_range(vma, vma->vm_start, vma->vm_end, | ||
2650 | &node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md); | ||
2651 | } | ||
2652 | 2713 | ||
2653 | if (!md->pages) | 2714 | if (!md->pages) |
2654 | goto out; | 2715 | goto out; |