aboutsummaryrefslogtreecommitdiffstats
path: root/fs/proc/task_mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/proc/task_mmu.c')
-rw-r--r--fs/proc/task_mmu.c218
1 files changed, 209 insertions, 9 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 2e7addfd9803..db15935fa757 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -211,10 +211,10 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
211{ 211{
212 struct mm_struct *mm = vma->vm_mm; 212 struct mm_struct *mm = vma->vm_mm;
213 struct file *file = vma->vm_file; 213 struct file *file = vma->vm_file;
214 int flags = vma->vm_flags; 214 vm_flags_t flags = vma->vm_flags;
215 unsigned long ino = 0; 215 unsigned long ino = 0;
216 unsigned long long pgoff = 0; 216 unsigned long long pgoff = 0;
217 unsigned long start; 217 unsigned long start, end;
218 dev_t dev = 0; 218 dev_t dev = 0;
219 int len; 219 int len;
220 220
@@ -227,13 +227,15 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
227 227
228 /* We don't show the stack guard page in /proc/maps */ 228 /* We don't show the stack guard page in /proc/maps */
229 start = vma->vm_start; 229 start = vma->vm_start;
230 if (vma->vm_flags & VM_GROWSDOWN) 230 if (stack_guard_page_start(vma, start))
231 if (!vma_stack_continue(vma->vm_prev, vma->vm_start)) 231 start += PAGE_SIZE;
232 start += PAGE_SIZE; 232 end = vma->vm_end;
233 if (stack_guard_page_end(vma, end))
234 end -= PAGE_SIZE;
233 235
234 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", 236 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
235 start, 237 start,
236 vma->vm_end, 238 end,
237 flags & VM_READ ? 'r' : '-', 239 flags & VM_READ ? 'r' : '-',
238 flags & VM_WRITE ? 'w' : '-', 240 flags & VM_WRITE ? 'w' : '-',
239 flags & VM_EXEC ? 'x' : '-', 241 flags & VM_EXEC ? 'x' : '-',
@@ -856,7 +858,192 @@ const struct file_operations proc_pagemap_operations = {
856#endif /* CONFIG_PROC_PAGE_MONITOR */ 858#endif /* CONFIG_PROC_PAGE_MONITOR */
857 859
858#ifdef CONFIG_NUMA 860#ifdef CONFIG_NUMA
859extern int show_numa_map(struct seq_file *m, void *v); 861
862struct numa_maps {
863 struct vm_area_struct *vma;
864 unsigned long pages;
865 unsigned long anon;
866 unsigned long active;
867 unsigned long writeback;
868 unsigned long mapcount_max;
869 unsigned long dirty;
870 unsigned long swapcache;
871 unsigned long node[MAX_NUMNODES];
872};
873
874struct numa_maps_private {
875 struct proc_maps_private proc_maps;
876 struct numa_maps md;
877};
878
879static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty)
880{
881 int count = page_mapcount(page);
882
883 md->pages++;
884 if (pte_dirty || PageDirty(page))
885 md->dirty++;
886
887 if (PageSwapCache(page))
888 md->swapcache++;
889
890 if (PageActive(page) || PageUnevictable(page))
891 md->active++;
892
893 if (PageWriteback(page))
894 md->writeback++;
895
896 if (PageAnon(page))
897 md->anon++;
898
899 if (count > md->mapcount_max)
900 md->mapcount_max = count;
901
902 md->node[page_to_nid(page)]++;
903}
904
905static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
906 unsigned long end, struct mm_walk *walk)
907{
908 struct numa_maps *md;
909 spinlock_t *ptl;
910 pte_t *orig_pte;
911 pte_t *pte;
912
913 md = walk->private;
914 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
915 do {
916 struct page *page;
917 int nid;
918
919 if (!pte_present(*pte))
920 continue;
921
922 page = vm_normal_page(md->vma, addr, *pte);
923 if (!page)
924 continue;
925
926 if (PageReserved(page))
927 continue;
928
929 nid = page_to_nid(page);
930 if (!node_isset(nid, node_states[N_HIGH_MEMORY]))
931 continue;
932
933 gather_stats(page, md, pte_dirty(*pte));
934
935 } while (pte++, addr += PAGE_SIZE, addr != end);
936 pte_unmap_unlock(orig_pte, ptl);
937 return 0;
938}
939#ifdef CONFIG_HUGETLB_PAGE
940static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
941 unsigned long addr, unsigned long end, struct mm_walk *walk)
942{
943 struct numa_maps *md;
944 struct page *page;
945
946 if (pte_none(*pte))
947 return 0;
948
949 page = pte_page(*pte);
950 if (!page)
951 return 0;
952
953 md = walk->private;
954 gather_stats(page, md, pte_dirty(*pte));
955 return 0;
956}
957
958#else
959static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
960 unsigned long addr, unsigned long end, struct mm_walk *walk)
961{
962 return 0;
963}
964#endif
965
966/*
967 * Display pages allocated per node and memory policy via /proc.
968 */
969static int show_numa_map(struct seq_file *m, void *v)
970{
971 struct numa_maps_private *numa_priv = m->private;
972 struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
973 struct vm_area_struct *vma = v;
974 struct numa_maps *md = &numa_priv->md;
975 struct file *file = vma->vm_file;
976 struct mm_struct *mm = vma->vm_mm;
977 struct mm_walk walk = {};
978 struct mempolicy *pol;
979 int n;
980 char buffer[50];
981
982 if (!mm)
983 return 0;
984
985 /* Ensure we start with an empty set of numa_maps statistics. */
986 memset(md, 0, sizeof(*md));
987
988 md->vma = vma;
989
990 walk.hugetlb_entry = gather_hugetbl_stats;
991 walk.pmd_entry = gather_pte_stats;
992 walk.private = md;
993 walk.mm = mm;
994
995 pol = get_vma_policy(proc_priv->task, vma, vma->vm_start);
996 mpol_to_str(buffer, sizeof(buffer), pol, 0);
997 mpol_cond_put(pol);
998
999 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1000
1001 if (file) {
1002 seq_printf(m, " file=");
1003 seq_path(m, &file->f_path, "\n\t= ");
1004 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1005 seq_printf(m, " heap");
1006 } else if (vma->vm_start <= mm->start_stack &&
1007 vma->vm_end >= mm->start_stack) {
1008 seq_printf(m, " stack");
1009 }
1010
1011 walk_page_range(vma->vm_start, vma->vm_end, &walk);
1012
1013 if (!md->pages)
1014 goto out;
1015
1016 if (md->anon)
1017 seq_printf(m, " anon=%lu", md->anon);
1018
1019 if (md->dirty)
1020 seq_printf(m, " dirty=%lu", md->dirty);
1021
1022 if (md->pages != md->anon && md->pages != md->dirty)
1023 seq_printf(m, " mapped=%lu", md->pages);
1024
1025 if (md->mapcount_max > 1)
1026 seq_printf(m, " mapmax=%lu", md->mapcount_max);
1027
1028 if (md->swapcache)
1029 seq_printf(m, " swapcache=%lu", md->swapcache);
1030
1031 if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1032 seq_printf(m, " active=%lu", md->active);
1033
1034 if (md->writeback)
1035 seq_printf(m, " writeback=%lu", md->writeback);
1036
1037 for_each_node_state(n, N_HIGH_MEMORY)
1038 if (md->node[n])
1039 seq_printf(m, " N%d=%lu", n, md->node[n]);
1040out:
1041 seq_putc(m, '\n');
1042
1043 if (m->count < m->size)
1044 m->version = (vma != proc_priv->tail_vma) ? vma->vm_start : 0;
1045 return 0;
1046}
860 1047
861static const struct seq_operations proc_pid_numa_maps_op = { 1048static const struct seq_operations proc_pid_numa_maps_op = {
862 .start = m_start, 1049 .start = m_start,
@@ -867,7 +1054,20 @@ static const struct seq_operations proc_pid_numa_maps_op = {
867 1054
868static int numa_maps_open(struct inode *inode, struct file *file) 1055static int numa_maps_open(struct inode *inode, struct file *file)
869{ 1056{
870 return do_maps_open(inode, file, &proc_pid_numa_maps_op); 1057 struct numa_maps_private *priv;
1058 int ret = -ENOMEM;
1059 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1060 if (priv) {
1061 priv->proc_maps.pid = proc_pid(inode);
1062 ret = seq_open(file, &proc_pid_numa_maps_op);
1063 if (!ret) {
1064 struct seq_file *m = file->private_data;
1065 m->private = priv;
1066 } else {
1067 kfree(priv);
1068 }
1069 }
1070 return ret;
871} 1071}
872 1072
873const struct file_operations proc_numa_maps_operations = { 1073const struct file_operations proc_numa_maps_operations = {
@@ -876,4 +1076,4 @@ const struct file_operations proc_numa_maps_operations = {
876 .llseek = seq_lseek, 1076 .llseek = seq_lseek,
877 .release = seq_release_private, 1077 .release = seq_release_private,
878}; 1078};
879#endif 1079#endif /* CONFIG_NUMA */