diff options
author | Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | 2015-02-11 18:27:12 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-11 20:06:04 -0500 |
commit | 077fcf116c8c2bd7ee9487b645aa3b50368db7e1 (patch) | |
tree | 29e2513e00bcc29395a19c696a6d14f52e3c5b1d /mm/huge_memory.c | |
parent | 24e2716f63e613cf15d3beba3faa0711bcacc427 (diff) |
mm/thp: allocate transparent hugepages on local node
This make sure that we try to allocate hugepages from local node if
allowed by mempolicy. If we can't, we fallback to small page allocation
based on mempolicy. This is based on the observation that allocating
pages on local node is more beneficial than allocating hugepages on remote
node.
With this patch applied we may find transparent huge page allocation
failures if the current node doesn't have enough freee hugepages. Before
this patch such failures result in us retrying the allocation on other
nodes in the numa node mask.
[akpm@linux-foundation.org: fix comment, add CONFIG_TRANSPARENT_HUGEPAGE dependency]
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: David Rientjes <rientjes@google.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r-- | mm/huge_memory.c | 24 |
1 files changed, 9 insertions, 15 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 889713180980..0531ea7dd7cf 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -761,15 +761,6 @@ static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp) | |||
761 | return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp; | 761 | return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp; |
762 | } | 762 | } |
763 | 763 | ||
764 | static inline struct page *alloc_hugepage_vma(int defrag, | ||
765 | struct vm_area_struct *vma, | ||
766 | unsigned long haddr, int nd, | ||
767 | gfp_t extra_gfp) | ||
768 | { | ||
769 | return alloc_pages_vma(alloc_hugepage_gfpmask(defrag, extra_gfp), | ||
770 | HPAGE_PMD_ORDER, vma, haddr, nd); | ||
771 | } | ||
772 | |||
773 | /* Caller must hold page table lock. */ | 764 | /* Caller must hold page table lock. */ |
774 | static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, | 765 | static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, |
775 | struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, | 766 | struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, |
@@ -790,6 +781,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
790 | unsigned long address, pmd_t *pmd, | 781 | unsigned long address, pmd_t *pmd, |
791 | unsigned int flags) | 782 | unsigned int flags) |
792 | { | 783 | { |
784 | gfp_t gfp; | ||
793 | struct page *page; | 785 | struct page *page; |
794 | unsigned long haddr = address & HPAGE_PMD_MASK; | 786 | unsigned long haddr = address & HPAGE_PMD_MASK; |
795 | 787 | ||
@@ -824,8 +816,8 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
824 | } | 816 | } |
825 | return 0; | 817 | return 0; |
826 | } | 818 | } |
827 | page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), | 819 | gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0); |
828 | vma, haddr, numa_node_id(), 0); | 820 | page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER); |
829 | if (unlikely(!page)) { | 821 | if (unlikely(!page)) { |
830 | count_vm_event(THP_FAULT_FALLBACK); | 822 | count_vm_event(THP_FAULT_FALLBACK); |
831 | return VM_FAULT_FALLBACK; | 823 | return VM_FAULT_FALLBACK; |
@@ -1113,10 +1105,12 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1113 | spin_unlock(ptl); | 1105 | spin_unlock(ptl); |
1114 | alloc: | 1106 | alloc: |
1115 | if (transparent_hugepage_enabled(vma) && | 1107 | if (transparent_hugepage_enabled(vma) && |
1116 | !transparent_hugepage_debug_cow()) | 1108 | !transparent_hugepage_debug_cow()) { |
1117 | new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), | 1109 | gfp_t gfp; |
1118 | vma, haddr, numa_node_id(), 0); | 1110 | |
1119 | else | 1111 | gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0); |
1112 | new_page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER); | ||
1113 | } else | ||
1120 | new_page = NULL; | 1114 | new_page = NULL; |
1121 | 1115 | ||
1122 | if (unlikely(!new_page)) { | 1116 | if (unlikely(!new_page)) { |