aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2008-11-19 18:36:33 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2008-11-19 21:49:58 -0500
commit496850e5f5a372029ceb2b35c811770a9bb073b6 (patch)
treec6aa155a0dcf81e967a85dfe2b8142d3b8fa9063 /mm
parentf011c2dae6cffc50ef67d9bd937b488ba5db8913 (diff)
mm: vmalloc failure flush fix
An initial vmalloc failure should start off a synchronous flush of lazy areas, in case someone is in progress flushing them already, which could cause us to return an allocation failure even if there is plenty of KVA free. Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmalloc.c15
1 files changed, 13 insertions, 2 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 46aab4dbf618..04f5e320e744 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -522,13 +522,24 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
522} 522}
523 523
524/* 524/*
525 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
526 * is already purging.
527 */
528static void try_purge_vmap_area_lazy(void)
529{
530 unsigned long start = ULONG_MAX, end = 0;
531
532 __purge_vmap_area_lazy(&start, &end, 0, 0);
533}
534
535/*
525 * Kick off a purge of the outstanding lazy areas. 536 * Kick off a purge of the outstanding lazy areas.
526 */ 537 */
527static void purge_vmap_area_lazy(void) 538static void purge_vmap_area_lazy(void)
528{ 539{
529 unsigned long start = ULONG_MAX, end = 0; 540 unsigned long start = ULONG_MAX, end = 0;
530 541
531 __purge_vmap_area_lazy(&start, &end, 0, 0); 542 __purge_vmap_area_lazy(&start, &end, 1, 0);
532} 543}
533 544
534/* 545/*
@@ -539,7 +550,7 @@ static void free_unmap_vmap_area(struct vmap_area *va)
539 va->flags |= VM_LAZY_FREE; 550 va->flags |= VM_LAZY_FREE;
540 atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); 551 atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
541 if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages())) 552 if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages()))
542 purge_vmap_area_lazy(); 553 try_purge_vmap_area_lazy();
543} 554}
544 555
545static struct vmap_area *find_vmap_area(unsigned long addr) 556static struct vmap_area *find_vmap_area(unsigned long addr)