diff options
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r-- | mm/vmalloc.c | 50 |
1 files changed, 40 insertions, 10 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 7465f22fec0c..c5db9a7264d9 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/highmem.h> | 14 | #include <linux/highmem.h> |
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/spinlock.h> | 16 | #include <linux/spinlock.h> |
17 | #include <linux/mutex.h> | ||
17 | #include <linux/interrupt.h> | 18 | #include <linux/interrupt.h> |
18 | #include <linux/proc_fs.h> | 19 | #include <linux/proc_fs.h> |
19 | #include <linux/seq_file.h> | 20 | #include <linux/seq_file.h> |
@@ -381,8 +382,9 @@ found: | |||
381 | goto retry; | 382 | goto retry; |
382 | } | 383 | } |
383 | if (printk_ratelimit()) | 384 | if (printk_ratelimit()) |
384 | printk(KERN_WARNING "vmap allocation failed: " | 385 | printk(KERN_WARNING |
385 | "use vmalloc=<size> to increase size.\n"); | 386 | "vmap allocation for size %lu failed: " |
387 | "use vmalloc=<size> to increase size.\n", size); | ||
386 | return ERR_PTR(-EBUSY); | 388 | return ERR_PTR(-EBUSY); |
387 | } | 389 | } |
388 | 390 | ||
@@ -432,6 +434,27 @@ static void unmap_vmap_area(struct vmap_area *va) | |||
432 | vunmap_page_range(va->va_start, va->va_end); | 434 | vunmap_page_range(va->va_start, va->va_end); |
433 | } | 435 | } |
434 | 436 | ||
437 | static void vmap_debug_free_range(unsigned long start, unsigned long end) | ||
438 | { | ||
439 | /* | ||
440 | * Unmap page tables and force a TLB flush immediately if | ||
441 | * CONFIG_DEBUG_PAGEALLOC is set. This catches use after free | ||
442 | * bugs similarly to those in linear kernel virtual address | ||
443 | * space after a page has been freed. | ||
444 | * | ||
445 | * All the lazy freeing logic is still retained, in order to | ||
446 | * minimise intrusiveness of this debugging feature. | ||
447 | * | ||
448 | * This is going to be *slow* (linear kernel virtual address | ||
449 | * debugging doesn't do a broadcast TLB flush so it is a lot | ||
450 | * faster). | ||
451 | */ | ||
452 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
453 | vunmap_page_range(start, end); | ||
454 | flush_tlb_kernel_range(start, end); | ||
455 | #endif | ||
456 | } | ||
457 | |||
435 | /* | 458 | /* |
436 | * lazy_max_pages is the maximum amount of virtual address space we gather up | 459 | * lazy_max_pages is the maximum amount of virtual address space we gather up |
437 | * before attempting to purge with a TLB flush. | 460 | * before attempting to purge with a TLB flush. |
@@ -472,7 +495,7 @@ static atomic_t vmap_lazy_nr = ATOMIC_INIT(0); | |||
472 | static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, | 495 | static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, |
473 | int sync, int force_flush) | 496 | int sync, int force_flush) |
474 | { | 497 | { |
475 | static DEFINE_SPINLOCK(purge_lock); | 498 | static DEFINE_MUTEX(purge_lock); |
476 | LIST_HEAD(valist); | 499 | LIST_HEAD(valist); |
477 | struct vmap_area *va; | 500 | struct vmap_area *va; |
478 | int nr = 0; | 501 | int nr = 0; |
@@ -483,10 +506,10 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, | |||
483 | * the case that isn't actually used at the moment anyway. | 506 | * the case that isn't actually used at the moment anyway. |
484 | */ | 507 | */ |
485 | if (!sync && !force_flush) { | 508 | if (!sync && !force_flush) { |
486 | if (!spin_trylock(&purge_lock)) | 509 | if (!mutex_trylock(&purge_lock)) |
487 | return; | 510 | return; |
488 | } else | 511 | } else |
489 | spin_lock(&purge_lock); | 512 | mutex_lock(&purge_lock); |
490 | 513 | ||
491 | rcu_read_lock(); | 514 | rcu_read_lock(); |
492 | list_for_each_entry_rcu(va, &vmap_area_list, list) { | 515 | list_for_each_entry_rcu(va, &vmap_area_list, list) { |
@@ -518,7 +541,7 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, | |||
518 | __free_vmap_area(va); | 541 | __free_vmap_area(va); |
519 | spin_unlock(&vmap_area_lock); | 542 | spin_unlock(&vmap_area_lock); |
520 | } | 543 | } |
521 | spin_unlock(&purge_lock); | 544 | mutex_unlock(&purge_lock); |
522 | } | 545 | } |
523 | 546 | ||
524 | /* | 547 | /* |
@@ -912,6 +935,7 @@ void vm_unmap_ram(const void *mem, unsigned int count) | |||
912 | BUG_ON(addr & (PAGE_SIZE-1)); | 935 | BUG_ON(addr & (PAGE_SIZE-1)); |
913 | 936 | ||
914 | debug_check_no_locks_freed(mem, size); | 937 | debug_check_no_locks_freed(mem, size); |
938 | vmap_debug_free_range(addr, addr+size); | ||
915 | 939 | ||
916 | if (likely(count <= VMAP_MAX_ALLOC)) | 940 | if (likely(count <= VMAP_MAX_ALLOC)) |
917 | vb_free(mem, size); | 941 | vb_free(mem, size); |
@@ -1128,6 +1152,8 @@ struct vm_struct *remove_vm_area(const void *addr) | |||
1128 | if (va && va->flags & VM_VM_AREA) { | 1152 | if (va && va->flags & VM_VM_AREA) { |
1129 | struct vm_struct *vm = va->private; | 1153 | struct vm_struct *vm = va->private; |
1130 | struct vm_struct *tmp, **p; | 1154 | struct vm_struct *tmp, **p; |
1155 | |||
1156 | vmap_debug_free_range(va->va_start, va->va_end); | ||
1131 | free_unmap_vmap_area(va); | 1157 | free_unmap_vmap_area(va); |
1132 | vm->size -= PAGE_SIZE; | 1158 | vm->size -= PAGE_SIZE; |
1133 | 1159 | ||
@@ -1375,7 +1401,8 @@ void *vmalloc_user(unsigned long size) | |||
1375 | struct vm_struct *area; | 1401 | struct vm_struct *area; |
1376 | void *ret; | 1402 | void *ret; |
1377 | 1403 | ||
1378 | ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); | 1404 | ret = __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, |
1405 | PAGE_KERNEL, -1, __builtin_return_address(0)); | ||
1379 | if (ret) { | 1406 | if (ret) { |
1380 | area = find_vm_area(ret); | 1407 | area = find_vm_area(ret); |
1381 | area->flags |= VM_USERMAP; | 1408 | area->flags |= VM_USERMAP; |
@@ -1420,7 +1447,8 @@ EXPORT_SYMBOL(vmalloc_node); | |||
1420 | 1447 | ||
1421 | void *vmalloc_exec(unsigned long size) | 1448 | void *vmalloc_exec(unsigned long size) |
1422 | { | 1449 | { |
1423 | return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC); | 1450 | return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, |
1451 | -1, __builtin_return_address(0)); | ||
1424 | } | 1452 | } |
1425 | 1453 | ||
1426 | #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) | 1454 | #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) |
@@ -1440,7 +1468,8 @@ void *vmalloc_exec(unsigned long size) | |||
1440 | */ | 1468 | */ |
1441 | void *vmalloc_32(unsigned long size) | 1469 | void *vmalloc_32(unsigned long size) |
1442 | { | 1470 | { |
1443 | return __vmalloc(size, GFP_VMALLOC32, PAGE_KERNEL); | 1471 | return __vmalloc_node(size, GFP_VMALLOC32, PAGE_KERNEL, |
1472 | -1, __builtin_return_address(0)); | ||
1444 | } | 1473 | } |
1445 | EXPORT_SYMBOL(vmalloc_32); | 1474 | EXPORT_SYMBOL(vmalloc_32); |
1446 | 1475 | ||
@@ -1456,7 +1485,8 @@ void *vmalloc_32_user(unsigned long size) | |||
1456 | struct vm_struct *area; | 1485 | struct vm_struct *area; |
1457 | void *ret; | 1486 | void *ret; |
1458 | 1487 | ||
1459 | ret = __vmalloc(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL); | 1488 | ret = __vmalloc_node(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, |
1489 | -1, __builtin_return_address(0)); | ||
1460 | if (ret) { | 1490 | if (ret) { |
1461 | area = find_vm_area(ret); | 1491 | area = find_vm_area(ret); |
1462 | area->flags |= VM_USERMAP; | 1492 | area->flags |= VM_USERMAP; |