summaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index f095843fc243..fcadd3e25c0c 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2409,7 +2409,6 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
2409 nr_pages = get_vm_area_size(area) >> PAGE_SHIFT; 2409 nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
2410 array_size = (nr_pages * sizeof(struct page *)); 2410 array_size = (nr_pages * sizeof(struct page *));
2411 2411
2412 area->nr_pages = nr_pages;
2413 /* Please note that the recursion is strictly bounded. */ 2412 /* Please note that the recursion is strictly bounded. */
2414 if (array_size > PAGE_SIZE) { 2413 if (array_size > PAGE_SIZE) {
2415 pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask, 2414 pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask,
@@ -2417,13 +2416,16 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
2417 } else { 2416 } else {
2418 pages = kmalloc_node(array_size, nested_gfp, node); 2417 pages = kmalloc_node(array_size, nested_gfp, node);
2419 } 2418 }
2420 area->pages = pages; 2419
2421 if (!area->pages) { 2420 if (!pages) {
2422 remove_vm_area(area->addr); 2421 remove_vm_area(area->addr);
2423 kfree(area); 2422 kfree(area);
2424 return NULL; 2423 return NULL;
2425 } 2424 }
2426 2425
2426 area->pages = pages;
2427 area->nr_pages = nr_pages;
2428
2427 for (i = 0; i < area->nr_pages; i++) { 2429 for (i = 0; i < area->nr_pages; i++) {
2428 struct page *page; 2430 struct page *page;
2429 2431