aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mempolicy.c
diff options
context:
space:
mode:
authorAndrea Arcangeli <aarcange@redhat.com>2011-01-13 18:47:05 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 20:32:45 -0500
commit0bbbc0b33d141f78a0d9218a54a47f50621220d3 (patch)
tree3ef3363c189ac536926119731eb86dcf989f4adb /mm/mempolicy.c
parentd39d33c332c611094f84cee39715866f4cbf79e2 (diff)
thp: add numa awareness to hugepage allocations
It's mostly a matter of replacing alloc_pages with alloc_pages_vma after introducing alloc_pages_vma. khugepaged needs special handling as the allocation has to happen inside collapse_huge_page where the vma is known and an error has to be returned to the outer loop to sleep alloc_sleep_millisecs in case of failure. But it retains the more efficient logic of handling allocation failures in khugepaged in case of CONFIG_NUMA=n. Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mempolicy.c')
-rw-r--r--mm/mempolicy.c13
1 files changed, 8 insertions, 5 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 83b7df309fc4..368fc9d23610 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1796,7 +1796,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1796} 1796}
1797 1797
1798/** 1798/**
1799 * alloc_page_vma - Allocate a page for a VMA. 1799 * alloc_pages_vma - Allocate a page for a VMA.
1800 * 1800 *
1801 * @gfp: 1801 * @gfp:
1802 * %GFP_USER user allocation. 1802 * %GFP_USER user allocation.
@@ -1805,6 +1805,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1805 * %GFP_FS allocation should not call back into a file system. 1805 * %GFP_FS allocation should not call back into a file system.
1806 * %GFP_ATOMIC don't sleep. 1806 * %GFP_ATOMIC don't sleep.
1807 * 1807 *
1808 * @order:Order of the GFP allocation.
1808 * @vma: Pointer to VMA or NULL if not available. 1809 * @vma: Pointer to VMA or NULL if not available.
1809 * @addr: Virtual Address of the allocation. Must be inside the VMA. 1810 * @addr: Virtual Address of the allocation. Must be inside the VMA.
1810 * 1811 *
@@ -1818,7 +1819,8 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1818 * Should be called with the mm_sem of the vma hold. 1819 * Should be called with the mm_sem of the vma hold.
1819 */ 1820 */
1820struct page * 1821struct page *
1821alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr) 1822alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
1823 unsigned long addr)
1822{ 1824{
1823 struct mempolicy *pol = get_vma_policy(current, vma, addr); 1825 struct mempolicy *pol = get_vma_policy(current, vma, addr);
1824 struct zonelist *zl; 1826 struct zonelist *zl;
@@ -1830,7 +1832,7 @@ alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
1830 1832
1831 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT); 1833 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
1832 mpol_cond_put(pol); 1834 mpol_cond_put(pol);
1833 page = alloc_page_interleave(gfp, 0, nid); 1835 page = alloc_page_interleave(gfp, order, nid);
1834 put_mems_allowed(); 1836 put_mems_allowed();
1835 return page; 1837 return page;
1836 } 1838 }
@@ -1839,7 +1841,7 @@ alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
1839 /* 1841 /*
1840 * slow path: ref counted shared policy 1842 * slow path: ref counted shared policy
1841 */ 1843 */
1842 struct page *page = __alloc_pages_nodemask(gfp, 0, 1844 struct page *page = __alloc_pages_nodemask(gfp, order,
1843 zl, policy_nodemask(gfp, pol)); 1845 zl, policy_nodemask(gfp, pol));
1844 __mpol_put(pol); 1846 __mpol_put(pol);
1845 put_mems_allowed(); 1847 put_mems_allowed();
@@ -1848,7 +1850,8 @@ alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
1848 /* 1850 /*
1849 * fast path: default or task policy 1851 * fast path: default or task policy
1850 */ 1852 */
1851 page = __alloc_pages_nodemask(gfp, 0, zl, policy_nodemask(gfp, pol)); 1853 page = __alloc_pages_nodemask(gfp, order, zl,
1854 policy_nodemask(gfp, pol));
1852 put_mems_allowed(); 1855 put_mems_allowed();
1853 return page; 1856 return page;
1854} 1857}