diff options
author | Kirill A. Shutemov <kirill.shutemov@linux.intel.com> | 2012-12-12 16:51:12 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-12 20:38:32 -0500 |
commit | 79da5407eeadc740fbf4b45d6df7d7f8e6adaf2c (patch) | |
tree | a9f1ca92b2711bb84a4707c904fcada24614d60e /mm/huge_memory.c | |
parent | d8a8e1f0da3d29d7268b3300c96a059d63901b76 (diff) |
thp: introduce sysfs knob to disable huge zero page
By default kernel tries to use huge zero page on read page fault. It's
possible to disable huge zero page by writing 0 or enable it back by
writing 1:
echo 0 >/sys/kernel/mm/transparent_hugepage/khugepaged/use_zero_page
echo 1 >/sys/kernel/mm/transparent_hugepage/khugepaged/use_zero_page
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: "H. Peter Anvin" <hpa@linux.intel.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r-- | mm/huge_memory.c | 21 |
1 files changed, 19 insertions, 2 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 9a5d45dfad44..72835cea0b0f 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -39,7 +39,8 @@ unsigned long transparent_hugepage_flags __read_mostly = | |||
39 | (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)| | 39 | (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)| |
40 | #endif | 40 | #endif |
41 | (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)| | 41 | (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)| |
42 | (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); | 42 | (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)| |
43 | (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); | ||
43 | 44 | ||
44 | /* default scan 8*512 pte (or vmas) every 30 second */ | 45 | /* default scan 8*512 pte (or vmas) every 30 second */ |
45 | static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8; | 46 | static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8; |
@@ -357,6 +358,20 @@ static ssize_t defrag_store(struct kobject *kobj, | |||
357 | static struct kobj_attribute defrag_attr = | 358 | static struct kobj_attribute defrag_attr = |
358 | __ATTR(defrag, 0644, defrag_show, defrag_store); | 359 | __ATTR(defrag, 0644, defrag_show, defrag_store); |
359 | 360 | ||
361 | static ssize_t use_zero_page_show(struct kobject *kobj, | ||
362 | struct kobj_attribute *attr, char *buf) | ||
363 | { | ||
364 | return single_flag_show(kobj, attr, buf, | ||
365 | TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); | ||
366 | } | ||
367 | static ssize_t use_zero_page_store(struct kobject *kobj, | ||
368 | struct kobj_attribute *attr, const char *buf, size_t count) | ||
369 | { | ||
370 | return single_flag_store(kobj, attr, buf, count, | ||
371 | TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); | ||
372 | } | ||
373 | static struct kobj_attribute use_zero_page_attr = | ||
374 | __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store); | ||
360 | #ifdef CONFIG_DEBUG_VM | 375 | #ifdef CONFIG_DEBUG_VM |
361 | static ssize_t debug_cow_show(struct kobject *kobj, | 376 | static ssize_t debug_cow_show(struct kobject *kobj, |
362 | struct kobj_attribute *attr, char *buf) | 377 | struct kobj_attribute *attr, char *buf) |
@@ -378,6 +393,7 @@ static struct kobj_attribute debug_cow_attr = | |||
378 | static struct attribute *hugepage_attr[] = { | 393 | static struct attribute *hugepage_attr[] = { |
379 | &enabled_attr.attr, | 394 | &enabled_attr.attr, |
380 | &defrag_attr.attr, | 395 | &defrag_attr.attr, |
396 | &use_zero_page_attr.attr, | ||
381 | #ifdef CONFIG_DEBUG_VM | 397 | #ifdef CONFIG_DEBUG_VM |
382 | &debug_cow_attr.attr, | 398 | &debug_cow_attr.attr, |
383 | #endif | 399 | #endif |
@@ -779,7 +795,8 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
779 | return VM_FAULT_OOM; | 795 | return VM_FAULT_OOM; |
780 | if (unlikely(khugepaged_enter(vma))) | 796 | if (unlikely(khugepaged_enter(vma))) |
781 | return VM_FAULT_OOM; | 797 | return VM_FAULT_OOM; |
782 | if (!(flags & FAULT_FLAG_WRITE)) { | 798 | if (!(flags & FAULT_FLAG_WRITE) && |
799 | transparent_hugepage_use_zero_page()) { | ||
783 | pgtable_t pgtable; | 800 | pgtable_t pgtable; |
784 | unsigned long zero_pfn; | 801 | unsigned long zero_pfn; |
785 | pgtable = pte_alloc_one(mm, haddr); | 802 | pgtable = pte_alloc_one(mm, haddr); |