aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorGlauber Costa <glommer@redhat.com>2008-11-19 18:36:33 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2008-11-19 21:49:58 -0500
commit0ae15132a4f5c758a6ffcde74495641dc3f62ba1 (patch)
treec81f885105a038aa694c700e4aec37377f640881 /mm/vmalloc.c
parent496850e5f5a372029ceb2b35c811770a9bb073b6 (diff)
mm: vmalloc search restart fix
Current vmalloc restart search for a free area in case we can't find one. The reason is there are areas which are lazily freed, and could be possibly freed now. However, current implementation start searching the tree from the last failing address, which is pretty much by definition at the end of address space. So, we fail. The proposal of this patch is to restart the search from the beginning of the requested vstart address. This fixes the regression in running KVM virtual machines for me, described in http://lkml.org/lkml/2008/10/28/349, caused by commit db64fe02258f1507e13fe5212a989922323685ce. Signed-off-by: Glauber Costa <glommer@redhat.com> Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 04f5e320e744..30f826d484f0 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -324,14 +324,14 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
324 324
325 BUG_ON(size & ~PAGE_MASK); 325 BUG_ON(size & ~PAGE_MASK);
326 326
327 addr = ALIGN(vstart, align);
328
329 va = kmalloc_node(sizeof(struct vmap_area), 327 va = kmalloc_node(sizeof(struct vmap_area),
330 gfp_mask & GFP_RECLAIM_MASK, node); 328 gfp_mask & GFP_RECLAIM_MASK, node);
331 if (unlikely(!va)) 329 if (unlikely(!va))
332 return ERR_PTR(-ENOMEM); 330 return ERR_PTR(-ENOMEM);
333 331
334retry: 332retry:
333 addr = ALIGN(vstart, align);
334
335 spin_lock(&vmap_area_lock); 335 spin_lock(&vmap_area_lock);
336 /* XXX: could have a last_hole cache */ 336 /* XXX: could have a last_hole cache */
337 n = vmap_area_root.rb_node; 337 n = vmap_area_root.rb_node;