diff options
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r-- | mm/huge_memory.c | 34 |
1 files changed, 24 insertions, 10 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 3e29781ee762..113e35c47502 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -650,10 +650,10 @@ static inline gfp_t alloc_hugepage_gfpmask(int defrag) | |||
650 | 650 | ||
651 | static inline struct page *alloc_hugepage_vma(int defrag, | 651 | static inline struct page *alloc_hugepage_vma(int defrag, |
652 | struct vm_area_struct *vma, | 652 | struct vm_area_struct *vma, |
653 | unsigned long haddr) | 653 | unsigned long haddr, int nd) |
654 | { | 654 | { |
655 | return alloc_pages_vma(alloc_hugepage_gfpmask(defrag), | 655 | return alloc_pages_vma(alloc_hugepage_gfpmask(defrag), |
656 | HPAGE_PMD_ORDER, vma, haddr); | 656 | HPAGE_PMD_ORDER, vma, haddr, nd); |
657 | } | 657 | } |
658 | 658 | ||
659 | #ifndef CONFIG_NUMA | 659 | #ifndef CONFIG_NUMA |
@@ -678,7 +678,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
678 | if (unlikely(khugepaged_enter(vma))) | 678 | if (unlikely(khugepaged_enter(vma))) |
679 | return VM_FAULT_OOM; | 679 | return VM_FAULT_OOM; |
680 | page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), | 680 | page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), |
681 | vma, haddr); | 681 | vma, haddr, numa_node_id()); |
682 | if (unlikely(!page)) | 682 | if (unlikely(!page)) |
683 | goto out; | 683 | goto out; |
684 | if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { | 684 | if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { |
@@ -799,8 +799,8 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, | |||
799 | } | 799 | } |
800 | 800 | ||
801 | for (i = 0; i < HPAGE_PMD_NR; i++) { | 801 | for (i = 0; i < HPAGE_PMD_NR; i++) { |
802 | pages[i] = alloc_page_vma(GFP_HIGHUSER_MOVABLE, | 802 | pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, |
803 | vma, address); | 803 | vma, address, page_to_nid(page)); |
804 | if (unlikely(!pages[i] || | 804 | if (unlikely(!pages[i] || |
805 | mem_cgroup_newpage_charge(pages[i], mm, | 805 | mem_cgroup_newpage_charge(pages[i], mm, |
806 | GFP_KERNEL))) { | 806 | GFP_KERNEL))) { |
@@ -902,7 +902,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
902 | if (transparent_hugepage_enabled(vma) && | 902 | if (transparent_hugepage_enabled(vma) && |
903 | !transparent_hugepage_debug_cow()) | 903 | !transparent_hugepage_debug_cow()) |
904 | new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), | 904 | new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), |
905 | vma, haddr); | 905 | vma, haddr, numa_node_id()); |
906 | else | 906 | else |
907 | new_page = NULL; | 907 | new_page = NULL; |
908 | 908 | ||
@@ -1745,7 +1745,8 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page, | |||
1745 | static void collapse_huge_page(struct mm_struct *mm, | 1745 | static void collapse_huge_page(struct mm_struct *mm, |
1746 | unsigned long address, | 1746 | unsigned long address, |
1747 | struct page **hpage, | 1747 | struct page **hpage, |
1748 | struct vm_area_struct *vma) | 1748 | struct vm_area_struct *vma, |
1749 | int node) | ||
1749 | { | 1750 | { |
1750 | pgd_t *pgd; | 1751 | pgd_t *pgd; |
1751 | pud_t *pud; | 1752 | pud_t *pud; |
@@ -1761,6 +1762,10 @@ static void collapse_huge_page(struct mm_struct *mm, | |||
1761 | #ifndef CONFIG_NUMA | 1762 | #ifndef CONFIG_NUMA |
1762 | VM_BUG_ON(!*hpage); | 1763 | VM_BUG_ON(!*hpage); |
1763 | new_page = *hpage; | 1764 | new_page = *hpage; |
1765 | if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { | ||
1766 | up_read(&mm->mmap_sem); | ||
1767 | return; | ||
1768 | } | ||
1764 | #else | 1769 | #else |
1765 | VM_BUG_ON(*hpage); | 1770 | VM_BUG_ON(*hpage); |
1766 | /* | 1771 | /* |
@@ -1773,18 +1778,19 @@ static void collapse_huge_page(struct mm_struct *mm, | |||
1773 | * mmap_sem in read mode is good idea also to allow greater | 1778 | * mmap_sem in read mode is good idea also to allow greater |
1774 | * scalability. | 1779 | * scalability. |
1775 | */ | 1780 | */ |
1776 | new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address); | 1781 | new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address, |
1782 | node); | ||
1777 | if (unlikely(!new_page)) { | 1783 | if (unlikely(!new_page)) { |
1778 | up_read(&mm->mmap_sem); | 1784 | up_read(&mm->mmap_sem); |
1779 | *hpage = ERR_PTR(-ENOMEM); | 1785 | *hpage = ERR_PTR(-ENOMEM); |
1780 | return; | 1786 | return; |
1781 | } | 1787 | } |
1782 | #endif | ||
1783 | if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { | 1788 | if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { |
1784 | up_read(&mm->mmap_sem); | 1789 | up_read(&mm->mmap_sem); |
1785 | put_page(new_page); | 1790 | put_page(new_page); |
1786 | return; | 1791 | return; |
1787 | } | 1792 | } |
1793 | #endif | ||
1788 | 1794 | ||
1789 | /* after allocating the hugepage upgrade to mmap_sem write mode */ | 1795 | /* after allocating the hugepage upgrade to mmap_sem write mode */ |
1790 | up_read(&mm->mmap_sem); | 1796 | up_read(&mm->mmap_sem); |
@@ -1919,6 +1925,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, | |||
1919 | struct page *page; | 1925 | struct page *page; |
1920 | unsigned long _address; | 1926 | unsigned long _address; |
1921 | spinlock_t *ptl; | 1927 | spinlock_t *ptl; |
1928 | int node = -1; | ||
1922 | 1929 | ||
1923 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | 1930 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
1924 | 1931 | ||
@@ -1949,6 +1956,13 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, | |||
1949 | page = vm_normal_page(vma, _address, pteval); | 1956 | page = vm_normal_page(vma, _address, pteval); |
1950 | if (unlikely(!page)) | 1957 | if (unlikely(!page)) |
1951 | goto out_unmap; | 1958 | goto out_unmap; |
1959 | /* | ||
1960 | * Chose the node of the first page. This could | ||
1961 | * be more sophisticated and look at more pages, | ||
1962 | * but isn't for now. | ||
1963 | */ | ||
1964 | if (node == -1) | ||
1965 | node = page_to_nid(page); | ||
1952 | VM_BUG_ON(PageCompound(page)); | 1966 | VM_BUG_ON(PageCompound(page)); |
1953 | if (!PageLRU(page) || PageLocked(page) || !PageAnon(page)) | 1967 | if (!PageLRU(page) || PageLocked(page) || !PageAnon(page)) |
1954 | goto out_unmap; | 1968 | goto out_unmap; |
@@ -1965,7 +1979,7 @@ out_unmap: | |||
1965 | pte_unmap_unlock(pte, ptl); | 1979 | pte_unmap_unlock(pte, ptl); |
1966 | if (ret) | 1980 | if (ret) |
1967 | /* collapse_huge_page will return with the mmap_sem released */ | 1981 | /* collapse_huge_page will return with the mmap_sem released */ |
1968 | collapse_huge_page(mm, address, hpage, vma); | 1982 | collapse_huge_page(mm, address, hpage, vma, node); |
1969 | out: | 1983 | out: |
1970 | return ret; | 1984 | return ret; |
1971 | } | 1985 | } |