aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/vmstat.h7
-rw-r--r--mm/huge_memory.c25
-rw-r--r--mm/vmstat.c9
3 files changed, 37 insertions, 4 deletions
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 461c0119664f..2b3831b58aa4 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -58,6 +58,13 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
58 UNEVICTABLE_PGCLEARED, /* on COW, page truncate */ 58 UNEVICTABLE_PGCLEARED, /* on COW, page truncate */
59 UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */ 59 UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
60 UNEVICTABLE_MLOCKFREED, 60 UNEVICTABLE_MLOCKFREED,
61#ifdef CONFIG_TRANSPARENT_HUGEPAGE
62 THP_FAULT_ALLOC,
63 THP_FAULT_FALLBACK,
64 THP_COLLAPSE_ALLOC,
65 THP_COLLAPSE_ALLOC_FAILED,
66 THP_SPLIT,
67#endif
61 NR_VM_EVENT_ITEMS 68 NR_VM_EVENT_ITEMS
62}; 69};
63 70
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 0a619e0e2e0b..1722683bde23 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -680,8 +680,11 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
680 return VM_FAULT_OOM; 680 return VM_FAULT_OOM;
681 page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), 681 page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
682 vma, haddr, numa_node_id(), 0); 682 vma, haddr, numa_node_id(), 0);
683 if (unlikely(!page)) 683 if (unlikely(!page)) {
684 count_vm_event(THP_FAULT_FALLBACK);
684 goto out; 685 goto out;
686 }
687 count_vm_event(THP_FAULT_ALLOC);
685 if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { 688 if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
686 put_page(page); 689 put_page(page);
687 goto out; 690 goto out;
@@ -909,11 +912,13 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
909 new_page = NULL; 912 new_page = NULL;
910 913
911 if (unlikely(!new_page)) { 914 if (unlikely(!new_page)) {
915 count_vm_event(THP_FAULT_FALLBACK);
912 ret = do_huge_pmd_wp_page_fallback(mm, vma, address, 916 ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
913 pmd, orig_pmd, page, haddr); 917 pmd, orig_pmd, page, haddr);
914 put_page(page); 918 put_page(page);
915 goto out; 919 goto out;
916 } 920 }
921 count_vm_event(THP_FAULT_ALLOC);
917 922
918 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { 923 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
919 put_page(new_page); 924 put_page(new_page);
@@ -1390,6 +1395,7 @@ int split_huge_page(struct page *page)
1390 1395
1391 BUG_ON(!PageSwapBacked(page)); 1396 BUG_ON(!PageSwapBacked(page));
1392 __split_huge_page(page, anon_vma); 1397 __split_huge_page(page, anon_vma);
1398 count_vm_event(THP_SPLIT);
1393 1399
1394 BUG_ON(PageCompound(page)); 1400 BUG_ON(PageCompound(page));
1395out_unlock: 1401out_unlock:
@@ -1784,9 +1790,11 @@ static void collapse_huge_page(struct mm_struct *mm,
1784 node, __GFP_OTHER_NODE); 1790 node, __GFP_OTHER_NODE);
1785 if (unlikely(!new_page)) { 1791 if (unlikely(!new_page)) {
1786 up_read(&mm->mmap_sem); 1792 up_read(&mm->mmap_sem);
1793 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
1787 *hpage = ERR_PTR(-ENOMEM); 1794 *hpage = ERR_PTR(-ENOMEM);
1788 return; 1795 return;
1789 } 1796 }
1797 count_vm_event(THP_COLLAPSE_ALLOC);
1790 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { 1798 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
1791 up_read(&mm->mmap_sem); 1799 up_read(&mm->mmap_sem);
1792 put_page(new_page); 1800 put_page(new_page);
@@ -2151,8 +2159,11 @@ static void khugepaged_do_scan(struct page **hpage)
2151#ifndef CONFIG_NUMA 2159#ifndef CONFIG_NUMA
2152 if (!*hpage) { 2160 if (!*hpage) {
2153 *hpage = alloc_hugepage(khugepaged_defrag()); 2161 *hpage = alloc_hugepage(khugepaged_defrag());
2154 if (unlikely(!*hpage)) 2162 if (unlikely(!*hpage)) {
2163 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2155 break; 2164 break;
2165 }
2166 count_vm_event(THP_COLLAPSE_ALLOC);
2156 } 2167 }
2157#else 2168#else
2158 if (IS_ERR(*hpage)) 2169 if (IS_ERR(*hpage))
@@ -2192,8 +2203,11 @@ static struct page *khugepaged_alloc_hugepage(void)
2192 2203
2193 do { 2204 do {
2194 hpage = alloc_hugepage(khugepaged_defrag()); 2205 hpage = alloc_hugepage(khugepaged_defrag());
2195 if (!hpage) 2206 if (!hpage) {
2207 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2196 khugepaged_alloc_sleep(); 2208 khugepaged_alloc_sleep();
2209 } else
2210 count_vm_event(THP_COLLAPSE_ALLOC);
2197 } while (unlikely(!hpage) && 2211 } while (unlikely(!hpage) &&
2198 likely(khugepaged_enabled())); 2212 likely(khugepaged_enabled()));
2199 return hpage; 2213 return hpage;
@@ -2210,8 +2224,11 @@ static void khugepaged_loop(void)
2210 while (likely(khugepaged_enabled())) { 2224 while (likely(khugepaged_enabled())) {
2211#ifndef CONFIG_NUMA 2225#ifndef CONFIG_NUMA
2212 hpage = khugepaged_alloc_hugepage(); 2226 hpage = khugepaged_alloc_hugepage();
2213 if (unlikely(!hpage)) 2227 if (unlikely(!hpage)) {
2228 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2214 break; 2229 break;
2230 }
2231 count_vm_event(THP_COLLAPSE_ALLOC);
2215#else 2232#else
2216 if (IS_ERR(hpage)) { 2233 if (IS_ERR(hpage)) {
2217 khugepaged_alloc_sleep(); 2234 khugepaged_alloc_sleep();
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 8cb0f0a703e5..897ea9e88238 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -948,7 +948,16 @@ static const char * const vmstat_text[] = {
948 "unevictable_pgs_cleared", 948 "unevictable_pgs_cleared",
949 "unevictable_pgs_stranded", 949 "unevictable_pgs_stranded",
950 "unevictable_pgs_mlockfreed", 950 "unevictable_pgs_mlockfreed",
951
952#ifdef CONFIG_TRANSPARENT_HUGEPAGE
953 "thp_fault_alloc",
954 "thp_fault_fallback",
955 "thp_collapse_alloc",
956 "thp_collapse_alloc_failed",
957 "thp_split",
951#endif 958#endif
959
960#endif /* CONFIG_VM_EVENTS_COUNTERS */
952}; 961};
953 962
954static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, 963static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,