diff options
author | Konstantin Khlebnikov <khlebnikov@yandex-team.ru> | 2015-09-08 18:00:04 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-09-08 18:35:28 -0400 |
commit | 356515e7b64c2629f686109d426baaf868cdf7e8 (patch) | |
tree | 369d85857cd3eb403ebb0d07125b5e38e333c3f8 /fs | |
parent | deb945441b9408d6cd15751f5232eeca9f50a5a1 (diff) |
pagemap: rework hugetlb and thp report
This patch moves pmd dissection out of reporting loop: huge pages are
reported as bunch of normal pages with contiguous PFNs.
Add missing "FILE" bit in hugetlb vmas.
Signed-off-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Reviewed-by: Mark Williamson <mwilliamson@undo-software.com>
Tested-by: Mark Williamson <mwilliamson@undo-software.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/proc/task_mmu.c | 100 |
1 files changed, 44 insertions, 56 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 41c0a0a500f7..98ba9ea96b19 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -1040,33 +1040,7 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm, | |||
1040 | return make_pme(frame, flags); | 1040 | return make_pme(frame, flags); |
1041 | } | 1041 | } |
1042 | 1042 | ||
1043 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 1043 | static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, |
1044 | static pagemap_entry_t thp_pmd_to_pagemap_entry(struct pagemapread *pm, | ||
1045 | pmd_t pmd, int offset, u64 flags) | ||
1046 | { | ||
1047 | u64 frame = 0; | ||
1048 | |||
1049 | /* | ||
1050 | * Currently pmd for thp is always present because thp can not be | ||
1051 | * swapped-out, migrated, or HWPOISONed (split in such cases instead.) | ||
1052 | * This if-check is just to prepare for future implementation. | ||
1053 | */ | ||
1054 | if (pmd_present(pmd)) { | ||
1055 | frame = pmd_pfn(pmd) + offset; | ||
1056 | flags |= PM_PRESENT; | ||
1057 | } | ||
1058 | |||
1059 | return make_pme(frame, flags); | ||
1060 | } | ||
1061 | #else | ||
1062 | static pagemap_entry_t thp_pmd_to_pagemap_entry(struct pagemapread *pm, | ||
1063 | pmd_t pmd, int offset, u64 flags) | ||
1064 | { | ||
1065 | return make_pme(0, 0); | ||
1066 | } | ||
1067 | #endif | ||
1068 | |||
1069 | static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, | ||
1070 | struct mm_walk *walk) | 1044 | struct mm_walk *walk) |
1071 | { | 1045 | { |
1072 | struct vm_area_struct *vma = walk->vma; | 1046 | struct vm_area_struct *vma = walk->vma; |
@@ -1075,35 +1049,48 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, | |||
1075 | pte_t *pte, *orig_pte; | 1049 | pte_t *pte, *orig_pte; |
1076 | int err = 0; | 1050 | int err = 0; |
1077 | 1051 | ||
1078 | if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { | 1052 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
1079 | u64 flags = 0; | 1053 | if (pmd_trans_huge_lock(pmdp, vma, &ptl) == 1) { |
1054 | u64 flags = 0, frame = 0; | ||
1055 | pmd_t pmd = *pmdp; | ||
1080 | 1056 | ||
1081 | if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(*pmd)) | 1057 | if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(pmd)) |
1082 | flags |= PM_SOFT_DIRTY; | 1058 | flags |= PM_SOFT_DIRTY; |
1083 | 1059 | ||
1060 | /* | ||
1061 | * Currently pmd for thp is always present because thp | ||
1062 | * can not be swapped-out, migrated, or HWPOISONed | ||
1063 | * (split in such cases instead.) | ||
1064 | * This if-check is just to prepare for future implementation. | ||
1065 | */ | ||
1066 | if (pmd_present(pmd)) { | ||
1067 | flags |= PM_PRESENT; | ||
1068 | frame = pmd_pfn(pmd) + | ||
1069 | ((addr & ~PMD_MASK) >> PAGE_SHIFT); | ||
1070 | } | ||
1071 | |||
1084 | for (; addr != end; addr += PAGE_SIZE) { | 1072 | for (; addr != end; addr += PAGE_SIZE) { |
1085 | unsigned long offset; | 1073 | pagemap_entry_t pme = make_pme(frame, flags); |
1086 | pagemap_entry_t pme; | ||
1087 | 1074 | ||
1088 | offset = (addr & ~PAGEMAP_WALK_MASK) >> | ||
1089 | PAGE_SHIFT; | ||
1090 | pme = thp_pmd_to_pagemap_entry(pm, *pmd, offset, flags); | ||
1091 | err = add_to_pagemap(addr, &pme, pm); | 1075 | err = add_to_pagemap(addr, &pme, pm); |
1092 | if (err) | 1076 | if (err) |
1093 | break; | 1077 | break; |
1078 | if (flags & PM_PRESENT) | ||
1079 | frame++; | ||
1094 | } | 1080 | } |
1095 | spin_unlock(ptl); | 1081 | spin_unlock(ptl); |
1096 | return err; | 1082 | return err; |
1097 | } | 1083 | } |
1098 | 1084 | ||
1099 | if (pmd_trans_unstable(pmd)) | 1085 | if (pmd_trans_unstable(pmdp)) |
1100 | return 0; | 1086 | return 0; |
1087 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
1101 | 1088 | ||
1102 | /* | 1089 | /* |
1103 | * We can assume that @vma always points to a valid one and @end never | 1090 | * We can assume that @vma always points to a valid one and @end never |
1104 | * goes beyond vma->vm_end. | 1091 | * goes beyond vma->vm_end. |
1105 | */ | 1092 | */ |
1106 | orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); | 1093 | orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl); |
1107 | for (; addr < end; pte++, addr += PAGE_SIZE) { | 1094 | for (; addr < end; pte++, addr += PAGE_SIZE) { |
1108 | pagemap_entry_t pme; | 1095 | pagemap_entry_t pme; |
1109 | 1096 | ||
@@ -1120,39 +1107,40 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, | |||
1120 | } | 1107 | } |
1121 | 1108 | ||
1122 | #ifdef CONFIG_HUGETLB_PAGE | 1109 | #ifdef CONFIG_HUGETLB_PAGE |
1123 | static pagemap_entry_t huge_pte_to_pagemap_entry(struct pagemapread *pm, | ||
1124 | pte_t pte, int offset, u64 flags) | ||
1125 | { | ||
1126 | u64 frame = 0; | ||
1127 | |||
1128 | if (pte_present(pte)) { | ||
1129 | frame = pte_pfn(pte) + offset; | ||
1130 | flags |= PM_PRESENT; | ||
1131 | } | ||
1132 | |||
1133 | return make_pme(frame, flags); | ||
1134 | } | ||
1135 | |||
1136 | /* This function walks within one hugetlb entry in the single call */ | 1110 | /* This function walks within one hugetlb entry in the single call */ |
1137 | static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask, | 1111 | static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask, |
1138 | unsigned long addr, unsigned long end, | 1112 | unsigned long addr, unsigned long end, |
1139 | struct mm_walk *walk) | 1113 | struct mm_walk *walk) |
1140 | { | 1114 | { |
1141 | struct pagemapread *pm = walk->private; | 1115 | struct pagemapread *pm = walk->private; |
1142 | struct vm_area_struct *vma = walk->vma; | 1116 | struct vm_area_struct *vma = walk->vma; |
1117 | u64 flags = 0, frame = 0; | ||
1143 | int err = 0; | 1118 | int err = 0; |
1144 | u64 flags = 0; | 1119 | pte_t pte; |
1145 | pagemap_entry_t pme; | ||
1146 | 1120 | ||
1147 | if (vma->vm_flags & VM_SOFTDIRTY) | 1121 | if (vma->vm_flags & VM_SOFTDIRTY) |
1148 | flags |= PM_SOFT_DIRTY; | 1122 | flags |= PM_SOFT_DIRTY; |
1149 | 1123 | ||
1124 | pte = huge_ptep_get(ptep); | ||
1125 | if (pte_present(pte)) { | ||
1126 | struct page *page = pte_page(pte); | ||
1127 | |||
1128 | if (!PageAnon(page)) | ||
1129 | flags |= PM_FILE; | ||
1130 | |||
1131 | flags |= PM_PRESENT; | ||
1132 | frame = pte_pfn(pte) + | ||
1133 | ((addr & ~hmask) >> PAGE_SHIFT); | ||
1134 | } | ||
1135 | |||
1150 | for (; addr != end; addr += PAGE_SIZE) { | 1136 | for (; addr != end; addr += PAGE_SIZE) { |
1151 | int offset = (addr & ~hmask) >> PAGE_SHIFT; | 1137 | pagemap_entry_t pme = make_pme(frame, flags); |
1152 | pme = huge_pte_to_pagemap_entry(pm, *pte, offset, flags); | 1138 | |
1153 | err = add_to_pagemap(addr, &pme, pm); | 1139 | err = add_to_pagemap(addr, &pme, pm); |
1154 | if (err) | 1140 | if (err) |
1155 | return err; | 1141 | return err; |
1142 | if (flags & PM_PRESENT) | ||
1143 | frame++; | ||
1156 | } | 1144 | } |
1157 | 1145 | ||
1158 | cond_resched(); | 1146 | cond_resched(); |
@@ -1216,7 +1204,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, | |||
1216 | if (!pm.buffer) | 1204 | if (!pm.buffer) |
1217 | goto out_mm; | 1205 | goto out_mm; |
1218 | 1206 | ||
1219 | pagemap_walk.pmd_entry = pagemap_pte_range; | 1207 | pagemap_walk.pmd_entry = pagemap_pmd_range; |
1220 | pagemap_walk.pte_hole = pagemap_pte_hole; | 1208 | pagemap_walk.pte_hole = pagemap_pte_hole; |
1221 | #ifdef CONFIG_HUGETLB_PAGE | 1209 | #ifdef CONFIG_HUGETLB_PAGE |
1222 | pagemap_walk.hugetlb_entry = pagemap_hugetlb_range; | 1210 | pagemap_walk.hugetlb_entry = pagemap_hugetlb_range; |