aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2011-05-24 20:11:43 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-25 11:39:11 -0400
commit248ac0e1943ad1796393d281b096184719eb3f97 (patch)
tree5188b0957a0c1250d8424f24fe5517ca8c417f93 /mm/vmalloc.c
parent82d4b5779a75887750748609f3415f01c1bb9f81 (diff)
mm/vmalloc: remove guard page from between vmap blocks
The vmap allocator is used to, among other things, allocate per-cpu vmap blocks, where each vmap block is naturally aligned to its own size. Obviously, leaving a guard page after each vmap area forbids packing vmap blocks efficiently and can make the kernel run out of possible vmap blocks long before overall vmap space is exhausted. The new interface to map a user-supplied page array into linear vmalloc space (vm_map_ram) insists on allocating from a vmap block (instead of falling back to a custom area) when the area size is below a certain threshold. With heavy users of this interface (e.g. XFS) and limited vmalloc space on 32-bit, vmap block exhaustion is a real problem. Remove the guard page from the core vmap allocator. vmalloc and the old vmap interface enforce a guard page on their own at a higher level. Note that without this patch, we had accidental guard pages after those vm_map_ram areas that happened to be at the end of a vmap block, but not between every area. This patch removes this accidental guard page only. If we want guard pages after every vm_map_ram area, this should be done separately. And just like with vmalloc and the old interface on a different level, not in the core allocator. Mel pointed out: "If necessary, the guard page could be reintroduced as a debugging-only option (CONFIG_DEBUG_PAGEALLOC?). Otherwise it seems reasonable." Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Dave Chinner <david@fromorbit.com> Acked-by: Mel Gorman <mel@csn.ul.ie> Cc: Hugh Dickins <hughd@google.com> Cc: Christoph Hellwig <hch@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 5d6030235d7a..4581ddcdda50 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -375,7 +375,7 @@ nocache:
375 /* find starting point for our search */ 375 /* find starting point for our search */
376 if (free_vmap_cache) { 376 if (free_vmap_cache) {
377 first = rb_entry(free_vmap_cache, struct vmap_area, rb_node); 377 first = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
378 addr = ALIGN(first->va_end + PAGE_SIZE, align); 378 addr = ALIGN(first->va_end, align);
379 if (addr < vstart) 379 if (addr < vstart)
380 goto nocache; 380 goto nocache;
381 if (addr + size - 1 < addr) 381 if (addr + size - 1 < addr)
@@ -406,10 +406,10 @@ nocache:
406 } 406 }
407 407
408 /* from the starting point, walk areas until a suitable hole is found */ 408 /* from the starting point, walk areas until a suitable hole is found */
409 while (addr + size >= first->va_start && addr + size <= vend) { 409 while (addr + size > first->va_start && addr + size <= vend) {
410 if (addr + cached_hole_size < first->va_start) 410 if (addr + cached_hole_size < first->va_start)
411 cached_hole_size = first->va_start - addr; 411 cached_hole_size = first->va_start - addr;
412 addr = ALIGN(first->va_end + PAGE_SIZE, align); 412 addr = ALIGN(first->va_end, align);
413 if (addr + size - 1 < addr) 413 if (addr + size - 1 < addr)
414 goto overflow; 414 goto overflow;
415 415