aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJan Beulich <JBeulich@novell.com>2009-12-14 20:58:39 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-15 11:53:13 -0500
commit976d6dfbb0175d136fc098854bbce0c028a3924b (patch)
tree1ae6511b70d1272fffa1124d8ba69fc6a2d90422 /mm
parentbad44b5be84cf3bb1ff900bec02ee61e1993328c (diff)
vmalloc(): adjust gfp mask passed on nested vmalloc() invocation
- avoid wasting more precious resources (DMA or DMA32 pools), when being called through vmalloc_32{,_user}() - explicitly allow using high memory here even if the outer allocation request doesn't allow it Signed-off-by: Jan Beulich <jbeulich@novell.com> Acked-by: Hugh Dickins <hugh.dickins@tiscali.co.uk> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmalloc.c7
1 files changed, 3 insertions, 4 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 9b08d790df6f..37e69295f250 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1411,6 +1411,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1411{ 1411{
1412 struct page **pages; 1412 struct page **pages;
1413 unsigned int nr_pages, array_size, i; 1413 unsigned int nr_pages, array_size, i;
1414 gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
1414 1415
1415 nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT; 1416 nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
1416 array_size = (nr_pages * sizeof(struct page *)); 1417 array_size = (nr_pages * sizeof(struct page *));
@@ -1418,13 +1419,11 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1418 area->nr_pages = nr_pages; 1419 area->nr_pages = nr_pages;
1419 /* Please note that the recursion is strictly bounded. */ 1420 /* Please note that the recursion is strictly bounded. */
1420 if (array_size > PAGE_SIZE) { 1421 if (array_size > PAGE_SIZE) {
1421 pages = __vmalloc_node(array_size, 1, gfp_mask | __GFP_ZERO, 1422 pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
1422 PAGE_KERNEL, node, caller); 1423 PAGE_KERNEL, node, caller);
1423 area->flags |= VM_VPAGES; 1424 area->flags |= VM_VPAGES;
1424 } else { 1425 } else {
1425 pages = kmalloc_node(array_size, 1426 pages = kmalloc_node(array_size, nested_gfp, node);
1426 (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO,
1427 node);
1428 } 1427 }
1429 area->pages = pages; 1428 area->pages = pages;
1430 area->caller = caller; 1429 area->caller = caller;