diff options
author | Stephen Wilson <wilsons@start.ca> | 2011-05-24 20:12:47 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-25 11:39:34 -0400 |
commit | f69ff943df6972aae96c10733b6847fa094d8a59 (patch) | |
tree | b0812b5e0b1376f193a9db088ebd8856deabed00 /fs/proc | |
parent | 13057efb0a0063eb8042d99093ec88a52c4f1593 (diff) |
mm: proc: move show_numa_map() to fs/proc/task_mmu.c
Moving show_numa_map() from mempolicy.c to task_mmu.c solves several
issues.
- Having the show() operation "miles away" from the corresponding
seq_file iteration operations is a maintenance burden.
- The need to export ad hoc info like struct proc_maps_private is
eliminated.
- The implementation of show_numa_map() can be improved in a simple
manner by cooperating with the other seq_file operations (start,
stop, etc) -- something that would be messy to do without this
change.
Signed-off-by: Stephen Wilson <wilsons@start.ca>
Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Christoph Lameter <cl@linux-foundation.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/proc')
-rw-r--r-- | fs/proc/task_mmu.c | 184 |
1 files changed, 182 insertions, 2 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 318d8654989b..2ed53d18b2ef 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -858,8 +858,188 @@ const struct file_operations proc_pagemap_operations = { | |||
858 | #endif /* CONFIG_PROC_PAGE_MONITOR */ | 858 | #endif /* CONFIG_PROC_PAGE_MONITOR */ |
859 | 859 | ||
860 | #ifdef CONFIG_NUMA | 860 | #ifdef CONFIG_NUMA |
861 | extern int show_numa_map(struct seq_file *m, void *v); | ||
862 | 861 | ||
862 | struct numa_maps { | ||
863 | struct vm_area_struct *vma; | ||
864 | unsigned long pages; | ||
865 | unsigned long anon; | ||
866 | unsigned long active; | ||
867 | unsigned long writeback; | ||
868 | unsigned long mapcount_max; | ||
869 | unsigned long dirty; | ||
870 | unsigned long swapcache; | ||
871 | unsigned long node[MAX_NUMNODES]; | ||
872 | }; | ||
873 | |||
874 | static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty) | ||
875 | { | ||
876 | int count = page_mapcount(page); | ||
877 | |||
878 | md->pages++; | ||
879 | if (pte_dirty || PageDirty(page)) | ||
880 | md->dirty++; | ||
881 | |||
882 | if (PageSwapCache(page)) | ||
883 | md->swapcache++; | ||
884 | |||
885 | if (PageActive(page) || PageUnevictable(page)) | ||
886 | md->active++; | ||
887 | |||
888 | if (PageWriteback(page)) | ||
889 | md->writeback++; | ||
890 | |||
891 | if (PageAnon(page)) | ||
892 | md->anon++; | ||
893 | |||
894 | if (count > md->mapcount_max) | ||
895 | md->mapcount_max = count; | ||
896 | |||
897 | md->node[page_to_nid(page)]++; | ||
898 | } | ||
899 | |||
900 | static int gather_pte_stats(pmd_t *pmd, unsigned long addr, | ||
901 | unsigned long end, struct mm_walk *walk) | ||
902 | { | ||
903 | struct numa_maps *md; | ||
904 | spinlock_t *ptl; | ||
905 | pte_t *orig_pte; | ||
906 | pte_t *pte; | ||
907 | |||
908 | md = walk->private; | ||
909 | orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); | ||
910 | do { | ||
911 | struct page *page; | ||
912 | int nid; | ||
913 | |||
914 | if (!pte_present(*pte)) | ||
915 | continue; | ||
916 | |||
917 | page = vm_normal_page(md->vma, addr, *pte); | ||
918 | if (!page) | ||
919 | continue; | ||
920 | |||
921 | if (PageReserved(page)) | ||
922 | continue; | ||
923 | |||
924 | nid = page_to_nid(page); | ||
925 | if (!node_isset(nid, node_states[N_HIGH_MEMORY])) | ||
926 | continue; | ||
927 | |||
928 | gather_stats(page, md, pte_dirty(*pte)); | ||
929 | |||
930 | } while (pte++, addr += PAGE_SIZE, addr != end); | ||
931 | pte_unmap_unlock(orig_pte, ptl); | ||
932 | return 0; | ||
933 | } | ||
934 | #ifdef CONFIG_HUGETLB_PAGE | ||
935 | static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask, | ||
936 | unsigned long addr, unsigned long end, struct mm_walk *walk) | ||
937 | { | ||
938 | struct numa_maps *md; | ||
939 | struct page *page; | ||
940 | |||
941 | if (pte_none(*pte)) | ||
942 | return 0; | ||
943 | |||
944 | page = pte_page(*pte); | ||
945 | if (!page) | ||
946 | return 0; | ||
947 | |||
948 | md = walk->private; | ||
949 | gather_stats(page, md, pte_dirty(*pte)); | ||
950 | return 0; | ||
951 | } | ||
952 | |||
953 | #else | ||
954 | static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask, | ||
955 | unsigned long addr, unsigned long end, struct mm_walk *walk) | ||
956 | { | ||
957 | return 0; | ||
958 | } | ||
959 | #endif | ||
960 | |||
961 | /* | ||
962 | * Display pages allocated per node and memory policy via /proc. | ||
963 | */ | ||
964 | static int show_numa_map(struct seq_file *m, void *v) | ||
965 | { | ||
966 | struct proc_maps_private *priv = m->private; | ||
967 | struct vm_area_struct *vma = v; | ||
968 | struct numa_maps *md; | ||
969 | struct file *file = vma->vm_file; | ||
970 | struct mm_struct *mm = vma->vm_mm; | ||
971 | struct mm_walk walk = {}; | ||
972 | struct mempolicy *pol; | ||
973 | int n; | ||
974 | char buffer[50]; | ||
975 | |||
976 | if (!mm) | ||
977 | return 0; | ||
978 | |||
979 | md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL); | ||
980 | if (!md) | ||
981 | return 0; | ||
982 | |||
983 | md->vma = vma; | ||
984 | |||
985 | walk.hugetlb_entry = gather_hugetbl_stats; | ||
986 | walk.pmd_entry = gather_pte_stats; | ||
987 | walk.private = md; | ||
988 | walk.mm = mm; | ||
989 | |||
990 | pol = get_vma_policy(priv->task, vma, vma->vm_start); | ||
991 | mpol_to_str(buffer, sizeof(buffer), pol, 0); | ||
992 | mpol_cond_put(pol); | ||
993 | |||
994 | seq_printf(m, "%08lx %s", vma->vm_start, buffer); | ||
995 | |||
996 | if (file) { | ||
997 | seq_printf(m, " file="); | ||
998 | seq_path(m, &file->f_path, "\n\t= "); | ||
999 | } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { | ||
1000 | seq_printf(m, " heap"); | ||
1001 | } else if (vma->vm_start <= mm->start_stack && | ||
1002 | vma->vm_end >= mm->start_stack) { | ||
1003 | seq_printf(m, " stack"); | ||
1004 | } | ||
1005 | |||
1006 | walk_page_range(vma->vm_start, vma->vm_end, &walk); | ||
1007 | |||
1008 | if (!md->pages) | ||
1009 | goto out; | ||
1010 | |||
1011 | if (md->anon) | ||
1012 | seq_printf(m, " anon=%lu", md->anon); | ||
1013 | |||
1014 | if (md->dirty) | ||
1015 | seq_printf(m, " dirty=%lu", md->dirty); | ||
1016 | |||
1017 | if (md->pages != md->anon && md->pages != md->dirty) | ||
1018 | seq_printf(m, " mapped=%lu", md->pages); | ||
1019 | |||
1020 | if (md->mapcount_max > 1) | ||
1021 | seq_printf(m, " mapmax=%lu", md->mapcount_max); | ||
1022 | |||
1023 | if (md->swapcache) | ||
1024 | seq_printf(m, " swapcache=%lu", md->swapcache); | ||
1025 | |||
1026 | if (md->active < md->pages && !is_vm_hugetlb_page(vma)) | ||
1027 | seq_printf(m, " active=%lu", md->active); | ||
1028 | |||
1029 | if (md->writeback) | ||
1030 | seq_printf(m, " writeback=%lu", md->writeback); | ||
1031 | |||
1032 | for_each_node_state(n, N_HIGH_MEMORY) | ||
1033 | if (md->node[n]) | ||
1034 | seq_printf(m, " N%d=%lu", n, md->node[n]); | ||
1035 | out: | ||
1036 | seq_putc(m, '\n'); | ||
1037 | kfree(md); | ||
1038 | |||
1039 | if (m->count < m->size) | ||
1040 | m->version = (vma != priv->tail_vma) ? vma->vm_start : 0; | ||
1041 | return 0; | ||
1042 | } | ||
863 | static const struct seq_operations proc_pid_numa_maps_op = { | 1043 | static const struct seq_operations proc_pid_numa_maps_op = { |
864 | .start = m_start, | 1044 | .start = m_start, |
865 | .next = m_next, | 1045 | .next = m_next, |
@@ -878,4 +1058,4 @@ const struct file_operations proc_numa_maps_operations = { | |||
878 | .llseek = seq_lseek, | 1058 | .llseek = seq_lseek, |
879 | .release = seq_release_private, | 1059 | .release = seq_release_private, |
880 | }; | 1060 | }; |
881 | #endif | 1061 | #endif /* CONFIG_NUMA */ |