diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-01-16 11:46:22 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-16 11:46:22 -0500 |
commit | 5a2dd72abdae75ea2960145e0549635ce4e0be96 (patch) | |
tree | 44dba0119c75679a17215200f92ab23bdde9efc2 /mm/vmalloc.c | |
parent | efdc64f0c792ea744bcc9203f35b908e66d42f41 (diff) | |
parent | 7cb36b6ccdca03bd87e8faca7fd920643dd1aec7 (diff) |
Merge branch 'linus' into irq/genirq
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r-- | mm/vmalloc.c | 57 |
1 files changed, 49 insertions, 8 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 1ddb77ba3995..75f49d312e8c 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/rbtree.h> | 23 | #include <linux/rbtree.h> |
24 | #include <linux/radix-tree.h> | 24 | #include <linux/radix-tree.h> |
25 | #include <linux/rcupdate.h> | 25 | #include <linux/rcupdate.h> |
26 | #include <linux/bootmem.h> | ||
26 | 27 | ||
27 | #include <asm/atomic.h> | 28 | #include <asm/atomic.h> |
28 | #include <asm/uaccess.h> | 29 | #include <asm/uaccess.h> |
@@ -151,11 +152,12 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr, | |||
151 | * | 152 | * |
152 | * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] | 153 | * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] |
153 | */ | 154 | */ |
154 | static int vmap_page_range(unsigned long addr, unsigned long end, | 155 | static int vmap_page_range(unsigned long start, unsigned long end, |
155 | pgprot_t prot, struct page **pages) | 156 | pgprot_t prot, struct page **pages) |
156 | { | 157 | { |
157 | pgd_t *pgd; | 158 | pgd_t *pgd; |
158 | unsigned long next; | 159 | unsigned long next; |
160 | unsigned long addr = start; | ||
159 | int err = 0; | 161 | int err = 0; |
160 | int nr = 0; | 162 | int nr = 0; |
161 | 163 | ||
@@ -167,7 +169,7 @@ static int vmap_page_range(unsigned long addr, unsigned long end, | |||
167 | if (err) | 169 | if (err) |
168 | break; | 170 | break; |
169 | } while (pgd++, addr = next, addr != end); | 171 | } while (pgd++, addr = next, addr != end); |
170 | flush_cache_vmap(addr, end); | 172 | flush_cache_vmap(start, end); |
171 | 173 | ||
172 | if (unlikely(err)) | 174 | if (unlikely(err)) |
173 | return err; | 175 | return err; |
@@ -380,8 +382,9 @@ found: | |||
380 | goto retry; | 382 | goto retry; |
381 | } | 383 | } |
382 | if (printk_ratelimit()) | 384 | if (printk_ratelimit()) |
383 | printk(KERN_WARNING "vmap allocation failed: " | 385 | printk(KERN_WARNING |
384 | "use vmalloc=<size> to increase size.\n"); | 386 | "vmap allocation for size %lu failed: " |
387 | "use vmalloc=<size> to increase size.\n", size); | ||
385 | return ERR_PTR(-EBUSY); | 388 | return ERR_PTR(-EBUSY); |
386 | } | 389 | } |
387 | 390 | ||
@@ -431,6 +434,27 @@ static void unmap_vmap_area(struct vmap_area *va) | |||
431 | vunmap_page_range(va->va_start, va->va_end); | 434 | vunmap_page_range(va->va_start, va->va_end); |
432 | } | 435 | } |
433 | 436 | ||
437 | static void vmap_debug_free_range(unsigned long start, unsigned long end) | ||
438 | { | ||
439 | /* | ||
440 | * Unmap page tables and force a TLB flush immediately if | ||
441 | * CONFIG_DEBUG_PAGEALLOC is set. This catches use after free | ||
442 | * bugs similarly to those in linear kernel virtual address | ||
443 | * space after a page has been freed. | ||
444 | * | ||
445 | * All the lazy freeing logic is still retained, in order to | ||
446 | * minimise intrusiveness of this debugging feature. | ||
447 | * | ||
448 | * This is going to be *slow* (linear kernel virtual address | ||
449 | * debugging doesn't do a broadcast TLB flush so it is a lot | ||
450 | * faster). | ||
451 | */ | ||
452 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
453 | vunmap_page_range(start, end); | ||
454 | flush_tlb_kernel_range(start, end); | ||
455 | #endif | ||
456 | } | ||
457 | |||
434 | /* | 458 | /* |
435 | * lazy_max_pages is the maximum amount of virtual address space we gather up | 459 | * lazy_max_pages is the maximum amount of virtual address space we gather up |
436 | * before attempting to purge with a TLB flush. | 460 | * before attempting to purge with a TLB flush. |
@@ -911,6 +935,7 @@ void vm_unmap_ram(const void *mem, unsigned int count) | |||
911 | BUG_ON(addr & (PAGE_SIZE-1)); | 935 | BUG_ON(addr & (PAGE_SIZE-1)); |
912 | 936 | ||
913 | debug_check_no_locks_freed(mem, size); | 937 | debug_check_no_locks_freed(mem, size); |
938 | vmap_debug_free_range(addr, addr+size); | ||
914 | 939 | ||
915 | if (likely(count <= VMAP_MAX_ALLOC)) | 940 | if (likely(count <= VMAP_MAX_ALLOC)) |
916 | vb_free(mem, size); | 941 | vb_free(mem, size); |
@@ -959,6 +984,8 @@ EXPORT_SYMBOL(vm_map_ram); | |||
959 | 984 | ||
960 | void __init vmalloc_init(void) | 985 | void __init vmalloc_init(void) |
961 | { | 986 | { |
987 | struct vmap_area *va; | ||
988 | struct vm_struct *tmp; | ||
962 | int i; | 989 | int i; |
963 | 990 | ||
964 | for_each_possible_cpu(i) { | 991 | for_each_possible_cpu(i) { |
@@ -971,6 +998,14 @@ void __init vmalloc_init(void) | |||
971 | vbq->nr_dirty = 0; | 998 | vbq->nr_dirty = 0; |
972 | } | 999 | } |
973 | 1000 | ||
1001 | /* Import existing vmlist entries. */ | ||
1002 | for (tmp = vmlist; tmp; tmp = tmp->next) { | ||
1003 | va = alloc_bootmem(sizeof(struct vmap_area)); | ||
1004 | va->flags = tmp->flags | VM_VM_AREA; | ||
1005 | va->va_start = (unsigned long)tmp->addr; | ||
1006 | va->va_end = va->va_start + tmp->size; | ||
1007 | __insert_vmap_area(va); | ||
1008 | } | ||
974 | vmap_initialized = true; | 1009 | vmap_initialized = true; |
975 | } | 1010 | } |
976 | 1011 | ||
@@ -1127,6 +1162,8 @@ struct vm_struct *remove_vm_area(const void *addr) | |||
1127 | if (va && va->flags & VM_VM_AREA) { | 1162 | if (va && va->flags & VM_VM_AREA) { |
1128 | struct vm_struct *vm = va->private; | 1163 | struct vm_struct *vm = va->private; |
1129 | struct vm_struct *tmp, **p; | 1164 | struct vm_struct *tmp, **p; |
1165 | |||
1166 | vmap_debug_free_range(va->va_start, va->va_end); | ||
1130 | free_unmap_vmap_area(va); | 1167 | free_unmap_vmap_area(va); |
1131 | vm->size -= PAGE_SIZE; | 1168 | vm->size -= PAGE_SIZE; |
1132 | 1169 | ||
@@ -1374,7 +1411,8 @@ void *vmalloc_user(unsigned long size) | |||
1374 | struct vm_struct *area; | 1411 | struct vm_struct *area; |
1375 | void *ret; | 1412 | void *ret; |
1376 | 1413 | ||
1377 | ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); | 1414 | ret = __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, |
1415 | PAGE_KERNEL, -1, __builtin_return_address(0)); | ||
1378 | if (ret) { | 1416 | if (ret) { |
1379 | area = find_vm_area(ret); | 1417 | area = find_vm_area(ret); |
1380 | area->flags |= VM_USERMAP; | 1418 | area->flags |= VM_USERMAP; |
@@ -1419,7 +1457,8 @@ EXPORT_SYMBOL(vmalloc_node); | |||
1419 | 1457 | ||
1420 | void *vmalloc_exec(unsigned long size) | 1458 | void *vmalloc_exec(unsigned long size) |
1421 | { | 1459 | { |
1422 | return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC); | 1460 | return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, |
1461 | -1, __builtin_return_address(0)); | ||
1423 | } | 1462 | } |
1424 | 1463 | ||
1425 | #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) | 1464 | #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) |
@@ -1439,7 +1478,8 @@ void *vmalloc_exec(unsigned long size) | |||
1439 | */ | 1478 | */ |
1440 | void *vmalloc_32(unsigned long size) | 1479 | void *vmalloc_32(unsigned long size) |
1441 | { | 1480 | { |
1442 | return __vmalloc(size, GFP_VMALLOC32, PAGE_KERNEL); | 1481 | return __vmalloc_node(size, GFP_VMALLOC32, PAGE_KERNEL, |
1482 | -1, __builtin_return_address(0)); | ||
1443 | } | 1483 | } |
1444 | EXPORT_SYMBOL(vmalloc_32); | 1484 | EXPORT_SYMBOL(vmalloc_32); |
1445 | 1485 | ||
@@ -1455,7 +1495,8 @@ void *vmalloc_32_user(unsigned long size) | |||
1455 | struct vm_struct *area; | 1495 | struct vm_struct *area; |
1456 | void *ret; | 1496 | void *ret; |
1457 | 1497 | ||
1458 | ret = __vmalloc(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL); | 1498 | ret = __vmalloc_node(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, |
1499 | -1, __builtin_return_address(0)); | ||
1459 | if (ret) { | 1500 | if (ret) { |
1460 | area = find_vm_area(ret); | 1501 | area = find_vm_area(ret); |
1461 | area->flags |= VM_USERMAP; | 1502 | area->flags |= VM_USERMAP; |