aboutsummaryrefslogtreecommitdiffstats
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
authorAndi Kleen <ak@linux.intel.com>2011-04-14 18:22:06 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-04-14 19:06:55 -0400
commit81ab4201fb7d91d6b0cd9ad5b4b16776e4bed145 (patch)
tree92f57a83fa43ac8f742ddb227a397028ab022afc /mm/huge_memory.c
parentc897401bac2b099dd2ff673a9afe7193723d253c (diff)
mm: add VM counters for transparent hugepages
I found it difficult to make sense of transparent huge pages without having any counters for its actions. Add some counters to vmstat for allocation of transparent hugepages and fallback to smaller pages. Optional patch, but useful for development and understanding the system. Contains improvements from Andrea Arcangeli and Johannes Weiner [akpm@linux-foundation.org: coding-style fixes] [hannes@cmpxchg.org: fix vmstat_text[] entries] Signed-off-by: Andi Kleen <ak@linux.intel.com> Acked-by: Andrea Arcangeli <aarcange@redhat.com> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c25
1 files changed, 21 insertions, 4 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 0a619e0e2e0b..1722683bde23 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -680,8 +680,11 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
680 return VM_FAULT_OOM; 680 return VM_FAULT_OOM;
681 page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), 681 page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
682 vma, haddr, numa_node_id(), 0); 682 vma, haddr, numa_node_id(), 0);
683 if (unlikely(!page)) 683 if (unlikely(!page)) {
684 count_vm_event(THP_FAULT_FALLBACK);
684 goto out; 685 goto out;
686 }
687 count_vm_event(THP_FAULT_ALLOC);
685 if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { 688 if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
686 put_page(page); 689 put_page(page);
687 goto out; 690 goto out;
@@ -909,11 +912,13 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
909 new_page = NULL; 912 new_page = NULL;
910 913
911 if (unlikely(!new_page)) { 914 if (unlikely(!new_page)) {
915 count_vm_event(THP_FAULT_FALLBACK);
912 ret = do_huge_pmd_wp_page_fallback(mm, vma, address, 916 ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
913 pmd, orig_pmd, page, haddr); 917 pmd, orig_pmd, page, haddr);
914 put_page(page); 918 put_page(page);
915 goto out; 919 goto out;
916 } 920 }
921 count_vm_event(THP_FAULT_ALLOC);
917 922
918 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { 923 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
919 put_page(new_page); 924 put_page(new_page);
@@ -1390,6 +1395,7 @@ int split_huge_page(struct page *page)
1390 1395
1391 BUG_ON(!PageSwapBacked(page)); 1396 BUG_ON(!PageSwapBacked(page));
1392 __split_huge_page(page, anon_vma); 1397 __split_huge_page(page, anon_vma);
1398 count_vm_event(THP_SPLIT);
1393 1399
1394 BUG_ON(PageCompound(page)); 1400 BUG_ON(PageCompound(page));
1395out_unlock: 1401out_unlock:
@@ -1784,9 +1790,11 @@ static void collapse_huge_page(struct mm_struct *mm,
1784 node, __GFP_OTHER_NODE); 1790 node, __GFP_OTHER_NODE);
1785 if (unlikely(!new_page)) { 1791 if (unlikely(!new_page)) {
1786 up_read(&mm->mmap_sem); 1792 up_read(&mm->mmap_sem);
1793 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
1787 *hpage = ERR_PTR(-ENOMEM); 1794 *hpage = ERR_PTR(-ENOMEM);
1788 return; 1795 return;
1789 } 1796 }
1797 count_vm_event(THP_COLLAPSE_ALLOC);
1790 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { 1798 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
1791 up_read(&mm->mmap_sem); 1799 up_read(&mm->mmap_sem);
1792 put_page(new_page); 1800 put_page(new_page);
@@ -2151,8 +2159,11 @@ static void khugepaged_do_scan(struct page **hpage)
2151#ifndef CONFIG_NUMA 2159#ifndef CONFIG_NUMA
2152 if (!*hpage) { 2160 if (!*hpage) {
2153 *hpage = alloc_hugepage(khugepaged_defrag()); 2161 *hpage = alloc_hugepage(khugepaged_defrag());
2154 if (unlikely(!*hpage)) 2162 if (unlikely(!*hpage)) {
2163 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2155 break; 2164 break;
2165 }
2166 count_vm_event(THP_COLLAPSE_ALLOC);
2156 } 2167 }
2157#else 2168#else
2158 if (IS_ERR(*hpage)) 2169 if (IS_ERR(*hpage))
@@ -2192,8 +2203,11 @@ static struct page *khugepaged_alloc_hugepage(void)
2192 2203
2193 do { 2204 do {
2194 hpage = alloc_hugepage(khugepaged_defrag()); 2205 hpage = alloc_hugepage(khugepaged_defrag());
2195 if (!hpage) 2206 if (!hpage) {
2207 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2196 khugepaged_alloc_sleep(); 2208 khugepaged_alloc_sleep();
2209 } else
2210 count_vm_event(THP_COLLAPSE_ALLOC);
2197 } while (unlikely(!hpage) && 2211 } while (unlikely(!hpage) &&
2198 likely(khugepaged_enabled())); 2212 likely(khugepaged_enabled()));
2199 return hpage; 2213 return hpage;
@@ -2210,8 +2224,11 @@ static void khugepaged_loop(void)
2210 while (likely(khugepaged_enabled())) { 2224 while (likely(khugepaged_enabled())) {
2211#ifndef CONFIG_NUMA 2225#ifndef CONFIG_NUMA
2212 hpage = khugepaged_alloc_hugepage(); 2226 hpage = khugepaged_alloc_hugepage();
2213 if (unlikely(!hpage)) 2227 if (unlikely(!hpage)) {
2228 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2214 break; 2229 break;
2230 }
2231 count_vm_event(THP_COLLAPSE_ALLOC);
2215#else 2232#else
2216 if (IS_ERR(hpage)) { 2233 if (IS_ERR(hpage)) {
2217 khugepaged_alloc_sleep(); 2234 khugepaged_alloc_sleep();