aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/gfp.h
diff options
context:
space:
mode:
authorAndrea Arcangeli <aarcange@redhat.com>2011-01-13 18:47:05 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 20:32:45 -0500
commit0bbbc0b33d141f78a0d9218a54a47f50621220d3 (patch)
tree3ef3363c189ac536926119731eb86dcf989f4adb /include/linux/gfp.h
parentd39d33c332c611094f84cee39715866f4cbf79e2 (diff)
thp: add numa awareness to hugepage allocations
It's mostly a matter of replacing alloc_pages with alloc_pages_vma after introducing alloc_pages_vma. khugepaged needs special handling as the allocation has to happen inside collapse_huge_page where the vma is known and an error has to be returned to the outer loop to sleep alloc_sleep_millisecs in case of failure. But it retains the more efficient logic of handling allocation failures in khugepaged in case of CONFIG_NUMA=n. Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/gfp.h')
-rw-r--r--include/linux/gfp.h7
1 files changed, 5 insertions, 2 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index d95082cc6f4a..a3b148a91874 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -331,14 +331,17 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
331{ 331{
332 return alloc_pages_current(gfp_mask, order); 332 return alloc_pages_current(gfp_mask, order);
333} 333}
334extern struct page *alloc_page_vma(gfp_t gfp_mask, 334extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
335 struct vm_area_struct *vma, unsigned long addr); 335 struct vm_area_struct *vma, unsigned long addr);
336#else 336#else
337#define alloc_pages(gfp_mask, order) \ 337#define alloc_pages(gfp_mask, order) \
338 alloc_pages_node(numa_node_id(), gfp_mask, order) 338 alloc_pages_node(numa_node_id(), gfp_mask, order)
339#define alloc_page_vma(gfp_mask, vma, addr) alloc_pages(gfp_mask, 0) 339#define alloc_pages_vma(gfp_mask, order, vma, addr) \
340 alloc_pages(gfp_mask, order)
340#endif 341#endif
341#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) 342#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
343#define alloc_page_vma(gfp_mask, vma, addr) \
344 alloc_pages_vma(gfp_mask, 0, vma, addr)
342 345
343extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); 346extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
344extern unsigned long get_zeroed_page(gfp_t gfp_mask); 347extern unsigned long get_zeroed_page(gfp_t gfp_mask);