aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2008-02-05 01:28:34 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-05 12:44:14 -0500
commitbf53d6f8fa467397a16de2a2500312ae26528d34 (patch)
tree5fe5fb6e7e4835d358dcc67e7b38f1aa00b9e525 /mm/vmalloc.c
parent9e2779fa281cfda13ac060753d674bbcaa23367e (diff)
vmalloc: clean up page array indexing
The page array is repeatedly indexed both in vunmap and vmalloc_area_node(). Add a temporary variable to make it easier to read (and easier to patch later). Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c16
1 files changed, 11 insertions, 5 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 21abac2c3941..83625b6fcc36 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -384,8 +384,10 @@ static void __vunmap(const void *addr, int deallocate_pages)
384 int i; 384 int i;
385 385
386 for (i = 0; i < area->nr_pages; i++) { 386 for (i = 0; i < area->nr_pages; i++) {
387 BUG_ON(!area->pages[i]); 387 struct page *page = area->pages[i];
388 __free_page(area->pages[i]); 388
389 BUG_ON(!page);
390 __free_page(page);
389 } 391 }
390 392
391 if (area->flags & VM_VPAGES) 393 if (area->flags & VM_VPAGES)
@@ -489,15 +491,19 @@ void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
489 } 491 }
490 492
491 for (i = 0; i < area->nr_pages; i++) { 493 for (i = 0; i < area->nr_pages; i++) {
494 struct page *page;
495
492 if (node < 0) 496 if (node < 0)
493 area->pages[i] = alloc_page(gfp_mask); 497 page = alloc_page(gfp_mask);
494 else 498 else
495 area->pages[i] = alloc_pages_node(node, gfp_mask, 0); 499 page = alloc_pages_node(node, gfp_mask, 0);
496 if (unlikely(!area->pages[i])) { 500
501 if (unlikely(!page)) {
497 /* Successfully allocated i pages, free them in __vunmap() */ 502 /* Successfully allocated i pages, free them in __vunmap() */
498 area->nr_pages = i; 503 area->nr_pages = i;
499 goto fail; 504 goto fail;
500 } 505 }
506 area->pages[i] = page;
501 } 507 }
502 508
503 if (map_vm_area(area, prot, &pages)) 509 if (map_vm_area(area, prot, &pages))