diff options
author | Andi Kleen <ak@linux.intel.com> | 2011-03-22 19:33:13 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-22 20:44:05 -0400 |
commit | cc5d462f7777c06c5cf0b55d736be325cda747b3 (patch) | |
tree | 3b0fc1539e85c0357ab0ae8a718b69b39377ede5 /mm/huge_memory.c | |
parent | 78afd5612deb8268bafc8b6507d72341d5ed9aac (diff) |
mm: use __GFP_OTHER_NODE for transparent huge pages
Pass __GFP_OTHER_NODE for transparent hugepages NUMA allocations done by the
hugepages daemon. This way the low level accounting for local versus
remote pages works correctly.
Contains improvements from Andrea Arcangeli
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r-- | mm/huge_memory.c | 20 |
1 files changed, 11 insertions, 9 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 113e35c47502..0a619e0e2e0b 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -643,23 +643,24 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, | |||
643 | return ret; | 643 | return ret; |
644 | } | 644 | } |
645 | 645 | ||
646 | static inline gfp_t alloc_hugepage_gfpmask(int defrag) | 646 | static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp) |
647 | { | 647 | { |
648 | return GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT); | 648 | return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp; |
649 | } | 649 | } |
650 | 650 | ||
651 | static inline struct page *alloc_hugepage_vma(int defrag, | 651 | static inline struct page *alloc_hugepage_vma(int defrag, |
652 | struct vm_area_struct *vma, | 652 | struct vm_area_struct *vma, |
653 | unsigned long haddr, int nd) | 653 | unsigned long haddr, int nd, |
654 | gfp_t extra_gfp) | ||
654 | { | 655 | { |
655 | return alloc_pages_vma(alloc_hugepage_gfpmask(defrag), | 656 | return alloc_pages_vma(alloc_hugepage_gfpmask(defrag, extra_gfp), |
656 | HPAGE_PMD_ORDER, vma, haddr, nd); | 657 | HPAGE_PMD_ORDER, vma, haddr, nd); |
657 | } | 658 | } |
658 | 659 | ||
659 | #ifndef CONFIG_NUMA | 660 | #ifndef CONFIG_NUMA |
660 | static inline struct page *alloc_hugepage(int defrag) | 661 | static inline struct page *alloc_hugepage(int defrag) |
661 | { | 662 | { |
662 | return alloc_pages(alloc_hugepage_gfpmask(defrag), | 663 | return alloc_pages(alloc_hugepage_gfpmask(defrag, 0), |
663 | HPAGE_PMD_ORDER); | 664 | HPAGE_PMD_ORDER); |
664 | } | 665 | } |
665 | #endif | 666 | #endif |
@@ -678,7 +679,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
678 | if (unlikely(khugepaged_enter(vma))) | 679 | if (unlikely(khugepaged_enter(vma))) |
679 | return VM_FAULT_OOM; | 680 | return VM_FAULT_OOM; |
680 | page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), | 681 | page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), |
681 | vma, haddr, numa_node_id()); | 682 | vma, haddr, numa_node_id(), 0); |
682 | if (unlikely(!page)) | 683 | if (unlikely(!page)) |
683 | goto out; | 684 | goto out; |
684 | if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { | 685 | if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { |
@@ -799,7 +800,8 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, | |||
799 | } | 800 | } |
800 | 801 | ||
801 | for (i = 0; i < HPAGE_PMD_NR; i++) { | 802 | for (i = 0; i < HPAGE_PMD_NR; i++) { |
802 | pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, | 803 | pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE | |
804 | __GFP_OTHER_NODE, | ||
803 | vma, address, page_to_nid(page)); | 805 | vma, address, page_to_nid(page)); |
804 | if (unlikely(!pages[i] || | 806 | if (unlikely(!pages[i] || |
805 | mem_cgroup_newpage_charge(pages[i], mm, | 807 | mem_cgroup_newpage_charge(pages[i], mm, |
@@ -902,7 +904,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
902 | if (transparent_hugepage_enabled(vma) && | 904 | if (transparent_hugepage_enabled(vma) && |
903 | !transparent_hugepage_debug_cow()) | 905 | !transparent_hugepage_debug_cow()) |
904 | new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), | 906 | new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), |
905 | vma, haddr, numa_node_id()); | 907 | vma, haddr, numa_node_id(), 0); |
906 | else | 908 | else |
907 | new_page = NULL; | 909 | new_page = NULL; |
908 | 910 | ||
@@ -1779,7 +1781,7 @@ static void collapse_huge_page(struct mm_struct *mm, | |||
1779 | * scalability. | 1781 | * scalability. |
1780 | */ | 1782 | */ |
1781 | new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address, | 1783 | new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address, |
1782 | node); | 1784 | node, __GFP_OTHER_NODE); |
1783 | if (unlikely(!new_page)) { | 1785 | if (unlikely(!new_page)) { |
1784 | up_read(&mm->mmap_sem); | 1786 | up_read(&mm->mmap_sem); |
1785 | *hpage = ERR_PTR(-ENOMEM); | 1787 | *hpage = ERR_PTR(-ENOMEM); |