diff options
Diffstat (limited to 'mm/huge_memory.c')
| -rw-r--r-- | mm/huge_memory.c | 49 |
1 files changed, 35 insertions, 14 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 0a619e0e2e0b..470dcda10add 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
| @@ -244,24 +244,28 @@ static ssize_t single_flag_show(struct kobject *kobj, | |||
| 244 | struct kobj_attribute *attr, char *buf, | 244 | struct kobj_attribute *attr, char *buf, |
| 245 | enum transparent_hugepage_flag flag) | 245 | enum transparent_hugepage_flag flag) |
| 246 | { | 246 | { |
| 247 | if (test_bit(flag, &transparent_hugepage_flags)) | 247 | return sprintf(buf, "%d\n", |
| 248 | return sprintf(buf, "[yes] no\n"); | 248 | !!test_bit(flag, &transparent_hugepage_flags)); |
| 249 | else | ||
| 250 | return sprintf(buf, "yes [no]\n"); | ||
| 251 | } | 249 | } |
| 250 | |||
| 252 | static ssize_t single_flag_store(struct kobject *kobj, | 251 | static ssize_t single_flag_store(struct kobject *kobj, |
| 253 | struct kobj_attribute *attr, | 252 | struct kobj_attribute *attr, |
| 254 | const char *buf, size_t count, | 253 | const char *buf, size_t count, |
| 255 | enum transparent_hugepage_flag flag) | 254 | enum transparent_hugepage_flag flag) |
| 256 | { | 255 | { |
| 257 | if (!memcmp("yes", buf, | 256 | unsigned long value; |
| 258 | min(sizeof("yes")-1, count))) { | 257 | int ret; |
| 258 | |||
| 259 | ret = kstrtoul(buf, 10, &value); | ||
| 260 | if (ret < 0) | ||
| 261 | return ret; | ||
| 262 | if (value > 1) | ||
| 263 | return -EINVAL; | ||
| 264 | |||
| 265 | if (value) | ||
| 259 | set_bit(flag, &transparent_hugepage_flags); | 266 | set_bit(flag, &transparent_hugepage_flags); |
| 260 | } else if (!memcmp("no", buf, | 267 | else |
| 261 | min(sizeof("no")-1, count))) { | ||
| 262 | clear_bit(flag, &transparent_hugepage_flags); | 268 | clear_bit(flag, &transparent_hugepage_flags); |
| 263 | } else | ||
| 264 | return -EINVAL; | ||
| 265 | 269 | ||
| 266 | return count; | 270 | return count; |
| 267 | } | 271 | } |
| @@ -680,8 +684,11 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 680 | return VM_FAULT_OOM; | 684 | return VM_FAULT_OOM; |
| 681 | page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), | 685 | page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), |
| 682 | vma, haddr, numa_node_id(), 0); | 686 | vma, haddr, numa_node_id(), 0); |
| 683 | if (unlikely(!page)) | 687 | if (unlikely(!page)) { |
| 688 | count_vm_event(THP_FAULT_FALLBACK); | ||
| 684 | goto out; | 689 | goto out; |
| 690 | } | ||
| 691 | count_vm_event(THP_FAULT_ALLOC); | ||
| 685 | if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { | 692 | if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { |
| 686 | put_page(page); | 693 | put_page(page); |
| 687 | goto out; | 694 | goto out; |
| @@ -909,11 +916,13 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 909 | new_page = NULL; | 916 | new_page = NULL; |
| 910 | 917 | ||
| 911 | if (unlikely(!new_page)) { | 918 | if (unlikely(!new_page)) { |
| 919 | count_vm_event(THP_FAULT_FALLBACK); | ||
| 912 | ret = do_huge_pmd_wp_page_fallback(mm, vma, address, | 920 | ret = do_huge_pmd_wp_page_fallback(mm, vma, address, |
| 913 | pmd, orig_pmd, page, haddr); | 921 | pmd, orig_pmd, page, haddr); |
| 914 | put_page(page); | 922 | put_page(page); |
| 915 | goto out; | 923 | goto out; |
| 916 | } | 924 | } |
| 925 | count_vm_event(THP_FAULT_ALLOC); | ||
| 917 | 926 | ||
| 918 | if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { | 927 | if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { |
| 919 | put_page(new_page); | 928 | put_page(new_page); |
| @@ -1390,6 +1399,7 @@ int split_huge_page(struct page *page) | |||
| 1390 | 1399 | ||
| 1391 | BUG_ON(!PageSwapBacked(page)); | 1400 | BUG_ON(!PageSwapBacked(page)); |
| 1392 | __split_huge_page(page, anon_vma); | 1401 | __split_huge_page(page, anon_vma); |
| 1402 | count_vm_event(THP_SPLIT); | ||
| 1393 | 1403 | ||
| 1394 | BUG_ON(PageCompound(page)); | 1404 | BUG_ON(PageCompound(page)); |
| 1395 | out_unlock: | 1405 | out_unlock: |
| @@ -1784,9 +1794,11 @@ static void collapse_huge_page(struct mm_struct *mm, | |||
| 1784 | node, __GFP_OTHER_NODE); | 1794 | node, __GFP_OTHER_NODE); |
| 1785 | if (unlikely(!new_page)) { | 1795 | if (unlikely(!new_page)) { |
| 1786 | up_read(&mm->mmap_sem); | 1796 | up_read(&mm->mmap_sem); |
| 1797 | count_vm_event(THP_COLLAPSE_ALLOC_FAILED); | ||
| 1787 | *hpage = ERR_PTR(-ENOMEM); | 1798 | *hpage = ERR_PTR(-ENOMEM); |
| 1788 | return; | 1799 | return; |
| 1789 | } | 1800 | } |
| 1801 | count_vm_event(THP_COLLAPSE_ALLOC); | ||
| 1790 | if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { | 1802 | if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { |
| 1791 | up_read(&mm->mmap_sem); | 1803 | up_read(&mm->mmap_sem); |
| 1792 | put_page(new_page); | 1804 | put_page(new_page); |
| @@ -2151,8 +2163,11 @@ static void khugepaged_do_scan(struct page **hpage) | |||
| 2151 | #ifndef CONFIG_NUMA | 2163 | #ifndef CONFIG_NUMA |
| 2152 | if (!*hpage) { | 2164 | if (!*hpage) { |
| 2153 | *hpage = alloc_hugepage(khugepaged_defrag()); | 2165 | *hpage = alloc_hugepage(khugepaged_defrag()); |
| 2154 | if (unlikely(!*hpage)) | 2166 | if (unlikely(!*hpage)) { |
| 2167 | count_vm_event(THP_COLLAPSE_ALLOC_FAILED); | ||
| 2155 | break; | 2168 | break; |
| 2169 | } | ||
| 2170 | count_vm_event(THP_COLLAPSE_ALLOC); | ||
| 2156 | } | 2171 | } |
| 2157 | #else | 2172 | #else |
| 2158 | if (IS_ERR(*hpage)) | 2173 | if (IS_ERR(*hpage)) |
| @@ -2192,8 +2207,11 @@ static struct page *khugepaged_alloc_hugepage(void) | |||
| 2192 | 2207 | ||
| 2193 | do { | 2208 | do { |
| 2194 | hpage = alloc_hugepage(khugepaged_defrag()); | 2209 | hpage = alloc_hugepage(khugepaged_defrag()); |
| 2195 | if (!hpage) | 2210 | if (!hpage) { |
| 2211 | count_vm_event(THP_COLLAPSE_ALLOC_FAILED); | ||
| 2196 | khugepaged_alloc_sleep(); | 2212 | khugepaged_alloc_sleep(); |
| 2213 | } else | ||
| 2214 | count_vm_event(THP_COLLAPSE_ALLOC); | ||
| 2197 | } while (unlikely(!hpage) && | 2215 | } while (unlikely(!hpage) && |
| 2198 | likely(khugepaged_enabled())); | 2216 | likely(khugepaged_enabled())); |
| 2199 | return hpage; | 2217 | return hpage; |
| @@ -2210,8 +2228,11 @@ static void khugepaged_loop(void) | |||
| 2210 | while (likely(khugepaged_enabled())) { | 2228 | while (likely(khugepaged_enabled())) { |
| 2211 | #ifndef CONFIG_NUMA | 2229 | #ifndef CONFIG_NUMA |
| 2212 | hpage = khugepaged_alloc_hugepage(); | 2230 | hpage = khugepaged_alloc_hugepage(); |
| 2213 | if (unlikely(!hpage)) | 2231 | if (unlikely(!hpage)) { |
| 2232 | count_vm_event(THP_COLLAPSE_ALLOC_FAILED); | ||
| 2214 | break; | 2233 | break; |
| 2234 | } | ||
| 2235 | count_vm_event(THP_COLLAPSE_ALLOC); | ||
| 2215 | #else | 2236 | #else |
| 2216 | if (IS_ERR(hpage)) { | 2237 | if (IS_ERR(hpage)) { |
| 2217 | khugepaged_alloc_sleep(); | 2238 | khugepaged_alloc_sleep(); |
