aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorHong zhi guo <honkiko@gmail.com>2012-07-31 19:41:35 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-31 21:42:39 -0400
commit92ca922f0a19145f2dcc99d84fe656fa55b52c2e (patch)
tree3d7e616ce15d79d44c1c21f16958978a211fc738 /mm/vmalloc.c
parentc2cddf991974a00aa7b40a21e829bc034b8199b6 (diff)
vmalloc: walk vmap_areas by sorted list instead of rb_next()
There's a walk by repeating rb_next to find a suitable hole. Could be simply replaced by walk on the sorted vmap_area_list. More simpler and efficient. Mutation of the list and tree only happens in pair within __insert_vmap_area and __free_vmap_area, under protection of vmap_area_lock. The patch code is also under vmap_area_lock, so the list walk is safe, and consistent with the tree walk. Tested on SMP by repeating batch of vmalloc anf vfree for random sizes and rounds for hours. Signed-off-by: Hong Zhiguo <honkiko@gmail.com> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index e03f4c7307a5..7e25ee3ce6e5 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -413,11 +413,11 @@ nocache:
413 if (addr + size - 1 < addr) 413 if (addr + size - 1 < addr)
414 goto overflow; 414 goto overflow;
415 415
416 n = rb_next(&first->rb_node); 416 if (list_is_last(&first->list, &vmap_area_list))
417 if (n)
418 first = rb_entry(n, struct vmap_area, rb_node);
419 else
420 goto found; 417 goto found;
418
419 first = list_entry(first->list.next,
420 struct vmap_area, list);
421 } 421 }
422 422
423found: 423found: