diff options
author | Jan Kiszka <jan.kiszka@web.de> | 2006-07-14 03:23:56 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-07-15 00:53:51 -0400 |
commit | 8757d5fa6b75e8ea906baf0309d49b980e7f9bc9 (patch) | |
tree | ec3c1a505b5254133cd453c922beb8db226d03e6 | |
parent | e322fedf0c59938716cdfbafbe364a170919aa1a (diff) |
[PATCH] mm: fix oom roll-back of __vmalloc_area_node
__vunmap must not rely on area->nr_pages when picking the release methode
for area->pages. It may be too small when __vmalloc_area_node failed early
due to lacking memory. Instead, use a flag in vmstruct to differentiate.
Signed-off-by: Jan Kiszka <jan.kiszka@web.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | include/linux/vmalloc.h | 1 | ||||
-rw-r--r-- | mm/vmalloc.c | 7 |
2 files changed, 5 insertions, 3 deletions
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index f6024ab4eff..71b6363caaa 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h | |||
@@ -11,6 +11,7 @@ struct vm_area_struct; | |||
11 | #define VM_ALLOC 0x00000002 /* vmalloc() */ | 11 | #define VM_ALLOC 0x00000002 /* vmalloc() */ |
12 | #define VM_MAP 0x00000004 /* vmap()ed pages */ | 12 | #define VM_MAP 0x00000004 /* vmap()ed pages */ |
13 | #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ | 13 | #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ |
14 | #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */ | ||
14 | /* bits [20..32] reserved for arch specific ioremap internals */ | 15 | /* bits [20..32] reserved for arch specific ioremap internals */ |
15 | 16 | ||
16 | /* | 17 | /* |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 7b450798b45..266162d2ba2 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -340,7 +340,7 @@ void __vunmap(void *addr, int deallocate_pages) | |||
340 | __free_page(area->pages[i]); | 340 | __free_page(area->pages[i]); |
341 | } | 341 | } |
342 | 342 | ||
343 | if (area->nr_pages > PAGE_SIZE/sizeof(struct page *)) | 343 | if (area->flags & VM_VPAGES) |
344 | vfree(area->pages); | 344 | vfree(area->pages); |
345 | else | 345 | else |
346 | kfree(area->pages); | 346 | kfree(area->pages); |
@@ -427,9 +427,10 @@ void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, | |||
427 | 427 | ||
428 | area->nr_pages = nr_pages; | 428 | area->nr_pages = nr_pages; |
429 | /* Please note that the recursion is strictly bounded. */ | 429 | /* Please note that the recursion is strictly bounded. */ |
430 | if (array_size > PAGE_SIZE) | 430 | if (array_size > PAGE_SIZE) { |
431 | pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node); | 431 | pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node); |
432 | else | 432 | area->flags |= VM_VPAGES; |
433 | } else | ||
433 | pages = kmalloc_node(array_size, (gfp_mask & ~__GFP_HIGHMEM), node); | 434 | pages = kmalloc_node(array_size, (gfp_mask & ~__GFP_HIGHMEM), node); |
434 | area->pages = pages; | 435 | area->pages = pages; |
435 | if (!area->pages) { | 436 | if (!area->pages) { |