diff options
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r-- | mm/huge_memory.c | 69 |
1 files changed, 46 insertions, 23 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 113e35c47502..470dcda10add 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -244,24 +244,28 @@ static ssize_t single_flag_show(struct kobject *kobj, | |||
244 | struct kobj_attribute *attr, char *buf, | 244 | struct kobj_attribute *attr, char *buf, |
245 | enum transparent_hugepage_flag flag) | 245 | enum transparent_hugepage_flag flag) |
246 | { | 246 | { |
247 | if (test_bit(flag, &transparent_hugepage_flags)) | 247 | return sprintf(buf, "%d\n", |
248 | return sprintf(buf, "[yes] no\n"); | 248 | !!test_bit(flag, &transparent_hugepage_flags)); |
249 | else | ||
250 | return sprintf(buf, "yes [no]\n"); | ||
251 | } | 249 | } |
250 | |||
252 | static ssize_t single_flag_store(struct kobject *kobj, | 251 | static ssize_t single_flag_store(struct kobject *kobj, |
253 | struct kobj_attribute *attr, | 252 | struct kobj_attribute *attr, |
254 | const char *buf, size_t count, | 253 | const char *buf, size_t count, |
255 | enum transparent_hugepage_flag flag) | 254 | enum transparent_hugepage_flag flag) |
256 | { | 255 | { |
257 | if (!memcmp("yes", buf, | 256 | unsigned long value; |
258 | min(sizeof("yes")-1, count))) { | 257 | int ret; |
258 | |||
259 | ret = kstrtoul(buf, 10, &value); | ||
260 | if (ret < 0) | ||
261 | return ret; | ||
262 | if (value > 1) | ||
263 | return -EINVAL; | ||
264 | |||
265 | if (value) | ||
259 | set_bit(flag, &transparent_hugepage_flags); | 266 | set_bit(flag, &transparent_hugepage_flags); |
260 | } else if (!memcmp("no", buf, | 267 | else |
261 | min(sizeof("no")-1, count))) { | ||
262 | clear_bit(flag, &transparent_hugepage_flags); | 268 | clear_bit(flag, &transparent_hugepage_flags); |
263 | } else | ||
264 | return -EINVAL; | ||
265 | 269 | ||
266 | return count; | 270 | return count; |
267 | } | 271 | } |
@@ -643,23 +647,24 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, | |||
643 | return ret; | 647 | return ret; |
644 | } | 648 | } |
645 | 649 | ||
646 | static inline gfp_t alloc_hugepage_gfpmask(int defrag) | 650 | static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp) |
647 | { | 651 | { |
648 | return GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT); | 652 | return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp; |
649 | } | 653 | } |
650 | 654 | ||
651 | static inline struct page *alloc_hugepage_vma(int defrag, | 655 | static inline struct page *alloc_hugepage_vma(int defrag, |
652 | struct vm_area_struct *vma, | 656 | struct vm_area_struct *vma, |
653 | unsigned long haddr, int nd) | 657 | unsigned long haddr, int nd, |
658 | gfp_t extra_gfp) | ||
654 | { | 659 | { |
655 | return alloc_pages_vma(alloc_hugepage_gfpmask(defrag), | 660 | return alloc_pages_vma(alloc_hugepage_gfpmask(defrag, extra_gfp), |
656 | HPAGE_PMD_ORDER, vma, haddr, nd); | 661 | HPAGE_PMD_ORDER, vma, haddr, nd); |
657 | } | 662 | } |
658 | 663 | ||
659 | #ifndef CONFIG_NUMA | 664 | #ifndef CONFIG_NUMA |
660 | static inline struct page *alloc_hugepage(int defrag) | 665 | static inline struct page *alloc_hugepage(int defrag) |
661 | { | 666 | { |
662 | return alloc_pages(alloc_hugepage_gfpmask(defrag), | 667 | return alloc_pages(alloc_hugepage_gfpmask(defrag, 0), |
663 | HPAGE_PMD_ORDER); | 668 | HPAGE_PMD_ORDER); |
664 | } | 669 | } |
665 | #endif | 670 | #endif |
@@ -678,9 +683,12 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
678 | if (unlikely(khugepaged_enter(vma))) | 683 | if (unlikely(khugepaged_enter(vma))) |
679 | return VM_FAULT_OOM; | 684 | return VM_FAULT_OOM; |
680 | page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), | 685 | page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), |
681 | vma, haddr, numa_node_id()); | 686 | vma, haddr, numa_node_id(), 0); |
682 | if (unlikely(!page)) | 687 | if (unlikely(!page)) { |
688 | count_vm_event(THP_FAULT_FALLBACK); | ||
683 | goto out; | 689 | goto out; |
690 | } | ||
691 | count_vm_event(THP_FAULT_ALLOC); | ||
684 | if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { | 692 | if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { |
685 | put_page(page); | 693 | put_page(page); |
686 | goto out; | 694 | goto out; |
@@ -799,7 +807,8 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, | |||
799 | } | 807 | } |
800 | 808 | ||
801 | for (i = 0; i < HPAGE_PMD_NR; i++) { | 809 | for (i = 0; i < HPAGE_PMD_NR; i++) { |
802 | pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, | 810 | pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE | |
811 | __GFP_OTHER_NODE, | ||
803 | vma, address, page_to_nid(page)); | 812 | vma, address, page_to_nid(page)); |
804 | if (unlikely(!pages[i] || | 813 | if (unlikely(!pages[i] || |
805 | mem_cgroup_newpage_charge(pages[i], mm, | 814 | mem_cgroup_newpage_charge(pages[i], mm, |
@@ -902,16 +911,18 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
902 | if (transparent_hugepage_enabled(vma) && | 911 | if (transparent_hugepage_enabled(vma) && |
903 | !transparent_hugepage_debug_cow()) | 912 | !transparent_hugepage_debug_cow()) |
904 | new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), | 913 | new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), |
905 | vma, haddr, numa_node_id()); | 914 | vma, haddr, numa_node_id(), 0); |
906 | else | 915 | else |
907 | new_page = NULL; | 916 | new_page = NULL; |
908 | 917 | ||
909 | if (unlikely(!new_page)) { | 918 | if (unlikely(!new_page)) { |
919 | count_vm_event(THP_FAULT_FALLBACK); | ||
910 | ret = do_huge_pmd_wp_page_fallback(mm, vma, address, | 920 | ret = do_huge_pmd_wp_page_fallback(mm, vma, address, |
911 | pmd, orig_pmd, page, haddr); | 921 | pmd, orig_pmd, page, haddr); |
912 | put_page(page); | 922 | put_page(page); |
913 | goto out; | 923 | goto out; |
914 | } | 924 | } |
925 | count_vm_event(THP_FAULT_ALLOC); | ||
915 | 926 | ||
916 | if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { | 927 | if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { |
917 | put_page(new_page); | 928 | put_page(new_page); |
@@ -1388,6 +1399,7 @@ int split_huge_page(struct page *page) | |||
1388 | 1399 | ||
1389 | BUG_ON(!PageSwapBacked(page)); | 1400 | BUG_ON(!PageSwapBacked(page)); |
1390 | __split_huge_page(page, anon_vma); | 1401 | __split_huge_page(page, anon_vma); |
1402 | count_vm_event(THP_SPLIT); | ||
1391 | 1403 | ||
1392 | BUG_ON(PageCompound(page)); | 1404 | BUG_ON(PageCompound(page)); |
1393 | out_unlock: | 1405 | out_unlock: |
@@ -1779,12 +1791,14 @@ static void collapse_huge_page(struct mm_struct *mm, | |||
1779 | * scalability. | 1791 | * scalability. |
1780 | */ | 1792 | */ |
1781 | new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address, | 1793 | new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address, |
1782 | node); | 1794 | node, __GFP_OTHER_NODE); |
1783 | if (unlikely(!new_page)) { | 1795 | if (unlikely(!new_page)) { |
1784 | up_read(&mm->mmap_sem); | 1796 | up_read(&mm->mmap_sem); |
1797 | count_vm_event(THP_COLLAPSE_ALLOC_FAILED); | ||
1785 | *hpage = ERR_PTR(-ENOMEM); | 1798 | *hpage = ERR_PTR(-ENOMEM); |
1786 | return; | 1799 | return; |
1787 | } | 1800 | } |
1801 | count_vm_event(THP_COLLAPSE_ALLOC); | ||
1788 | if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { | 1802 | if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { |
1789 | up_read(&mm->mmap_sem); | 1803 | up_read(&mm->mmap_sem); |
1790 | put_page(new_page); | 1804 | put_page(new_page); |
@@ -2149,8 +2163,11 @@ static void khugepaged_do_scan(struct page **hpage) | |||
2149 | #ifndef CONFIG_NUMA | 2163 | #ifndef CONFIG_NUMA |
2150 | if (!*hpage) { | 2164 | if (!*hpage) { |
2151 | *hpage = alloc_hugepage(khugepaged_defrag()); | 2165 | *hpage = alloc_hugepage(khugepaged_defrag()); |
2152 | if (unlikely(!*hpage)) | 2166 | if (unlikely(!*hpage)) { |
2167 | count_vm_event(THP_COLLAPSE_ALLOC_FAILED); | ||
2153 | break; | 2168 | break; |
2169 | } | ||
2170 | count_vm_event(THP_COLLAPSE_ALLOC); | ||
2154 | } | 2171 | } |
2155 | #else | 2172 | #else |
2156 | if (IS_ERR(*hpage)) | 2173 | if (IS_ERR(*hpage)) |
@@ -2190,8 +2207,11 @@ static struct page *khugepaged_alloc_hugepage(void) | |||
2190 | 2207 | ||
2191 | do { | 2208 | do { |
2192 | hpage = alloc_hugepage(khugepaged_defrag()); | 2209 | hpage = alloc_hugepage(khugepaged_defrag()); |
2193 | if (!hpage) | 2210 | if (!hpage) { |
2211 | count_vm_event(THP_COLLAPSE_ALLOC_FAILED); | ||
2194 | khugepaged_alloc_sleep(); | 2212 | khugepaged_alloc_sleep(); |
2213 | } else | ||
2214 | count_vm_event(THP_COLLAPSE_ALLOC); | ||
2195 | } while (unlikely(!hpage) && | 2215 | } while (unlikely(!hpage) && |
2196 | likely(khugepaged_enabled())); | 2216 | likely(khugepaged_enabled())); |
2197 | return hpage; | 2217 | return hpage; |
@@ -2208,8 +2228,11 @@ static void khugepaged_loop(void) | |||
2208 | while (likely(khugepaged_enabled())) { | 2228 | while (likely(khugepaged_enabled())) { |
2209 | #ifndef CONFIG_NUMA | 2229 | #ifndef CONFIG_NUMA |
2210 | hpage = khugepaged_alloc_hugepage(); | 2230 | hpage = khugepaged_alloc_hugepage(); |
2211 | if (unlikely(!hpage)) | 2231 | if (unlikely(!hpage)) { |
2232 | count_vm_event(THP_COLLAPSE_ALLOC_FAILED); | ||
2212 | break; | 2233 | break; |
2234 | } | ||
2235 | count_vm_event(THP_COLLAPSE_ALLOC); | ||
2213 | #else | 2236 | #else |
2214 | if (IS_ERR(hpage)) { | 2237 | if (IS_ERR(hpage)) { |
2215 | khugepaged_alloc_sleep(); | 2238 | khugepaged_alloc_sleep(); |