aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2013-09-12 18:14:06 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-12 18:38:03 -0400
commit17766dde364813568e4f876517c72bab70838646 (patch)
tree22da9c5e44b3b55145986603ad7804fd5bacfaec /mm
parentc02925540ca7019465a43c00f8a3c0186ddace2b (diff)
mm, thp: count thp_fault_fallback anytime thp fault fails
Currently, thp_fault_fallback in vmstat only gets incremented if a hugepage allocation fails. If current's memcg hits its limit or the page fault handler returns an error, it is incorrectly accounted as a successful thp_fault_alloc. Count thp_fault_fallback anytime the page fault handler falls back to using regular pages and only count thp_fault_alloc when a hugepage has actually been faulted. Signed-off-by: David Rientjes <rientjes@google.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c10
1 files changed, 7 insertions, 3 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 243f4cc75777..f60c4ebaa30c 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -820,17 +820,19 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
820 count_vm_event(THP_FAULT_FALLBACK); 820 count_vm_event(THP_FAULT_FALLBACK);
821 return VM_FAULT_FALLBACK; 821 return VM_FAULT_FALLBACK;
822 } 822 }
823 count_vm_event(THP_FAULT_ALLOC);
824 if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { 823 if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
825 put_page(page); 824 put_page(page);
825 count_vm_event(THP_FAULT_FALLBACK);
826 return VM_FAULT_FALLBACK; 826 return VM_FAULT_FALLBACK;
827 } 827 }
828 if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page))) { 828 if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page))) {
829 mem_cgroup_uncharge_page(page); 829 mem_cgroup_uncharge_page(page);
830 put_page(page); 830 put_page(page);
831 count_vm_event(THP_FAULT_FALLBACK);
831 return VM_FAULT_FALLBACK; 832 return VM_FAULT_FALLBACK;
832 } 833 }
833 834
835 count_vm_event(THP_FAULT_ALLOC);
834 return 0; 836 return 0;
835} 837}
836 838
@@ -1143,7 +1145,6 @@ alloc:
1143 new_page = NULL; 1145 new_page = NULL;
1144 1146
1145 if (unlikely(!new_page)) { 1147 if (unlikely(!new_page)) {
1146 count_vm_event(THP_FAULT_FALLBACK);
1147 if (is_huge_zero_pmd(orig_pmd)) { 1148 if (is_huge_zero_pmd(orig_pmd)) {
1148 ret = do_huge_pmd_wp_zero_page_fallback(mm, vma, 1149 ret = do_huge_pmd_wp_zero_page_fallback(mm, vma,
1149 address, pmd, orig_pmd, haddr); 1150 address, pmd, orig_pmd, haddr);
@@ -1154,9 +1155,9 @@ alloc:
1154 split_huge_page(page); 1155 split_huge_page(page);
1155 put_page(page); 1156 put_page(page);
1156 } 1157 }
1158 count_vm_event(THP_FAULT_FALLBACK);
1157 goto out; 1159 goto out;
1158 } 1160 }
1159 count_vm_event(THP_FAULT_ALLOC);
1160 1161
1161 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { 1162 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
1162 put_page(new_page); 1163 put_page(new_page);
@@ -1164,10 +1165,13 @@ alloc:
1164 split_huge_page(page); 1165 split_huge_page(page);
1165 put_page(page); 1166 put_page(page);
1166 } 1167 }
1168 count_vm_event(THP_FAULT_FALLBACK);
1167 ret |= VM_FAULT_OOM; 1169 ret |= VM_FAULT_OOM;
1168 goto out; 1170 goto out;
1169 } 1171 }
1170 1172
1173 count_vm_event(THP_FAULT_ALLOC);
1174
1171 if (is_huge_zero_pmd(orig_pmd)) 1175 if (is_huge_zero_pmd(orig_pmd))
1172 clear_huge_page(new_page, haddr, HPAGE_PMD_NR); 1176 clear_huge_page(new_page, haddr, HPAGE_PMD_NR);
1173 else 1177 else