aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2009-01-06 17:39:20 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-06 18:59:01 -0500
commitcd52858c73f9f7df859a08fb08496ca39b9b3d8d (patch)
treec8c3d8e641484618f44dcf3b7d55ba4d42c90750 /mm/vmalloc.c
parente97a630eb0f5b8b380fd67504de6cedebb489003 (diff)
mm: vmalloc make lazy unmapping configurable
Lazy unmapping in the vmalloc code has now opened the possibility for use after free bugs to go undetected. We can catch those by forcing an unmap and flush (which is going to be slow, but that's what happens). Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c24
1 files changed, 24 insertions, 0 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 78689cba178f..c5db9a7264d9 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -434,6 +434,27 @@ static void unmap_vmap_area(struct vmap_area *va)
434 vunmap_page_range(va->va_start, va->va_end); 434 vunmap_page_range(va->va_start, va->va_end);
435} 435}
436 436
437static void vmap_debug_free_range(unsigned long start, unsigned long end)
438{
439 /*
440 * Unmap page tables and force a TLB flush immediately if
441 * CONFIG_DEBUG_PAGEALLOC is set. This catches use after free
442 * bugs similarly to those in linear kernel virtual address
443 * space after a page has been freed.
444 *
445 * All the lazy freeing logic is still retained, in order to
446 * minimise intrusiveness of this debugging feature.
447 *
448 * This is going to be *slow* (linear kernel virtual address
449 * debugging doesn't do a broadcast TLB flush so it is a lot
450 * faster).
451 */
452#ifdef CONFIG_DEBUG_PAGEALLOC
453 vunmap_page_range(start, end);
454 flush_tlb_kernel_range(start, end);
455#endif
456}
457
437/* 458/*
438 * lazy_max_pages is the maximum amount of virtual address space we gather up 459 * lazy_max_pages is the maximum amount of virtual address space we gather up
439 * before attempting to purge with a TLB flush. 460 * before attempting to purge with a TLB flush.
@@ -914,6 +935,7 @@ void vm_unmap_ram(const void *mem, unsigned int count)
914 BUG_ON(addr & (PAGE_SIZE-1)); 935 BUG_ON(addr & (PAGE_SIZE-1));
915 936
916 debug_check_no_locks_freed(mem, size); 937 debug_check_no_locks_freed(mem, size);
938 vmap_debug_free_range(addr, addr+size);
917 939
918 if (likely(count <= VMAP_MAX_ALLOC)) 940 if (likely(count <= VMAP_MAX_ALLOC))
919 vb_free(mem, size); 941 vb_free(mem, size);
@@ -1130,6 +1152,8 @@ struct vm_struct *remove_vm_area(const void *addr)
1130 if (va && va->flags & VM_VM_AREA) { 1152 if (va && va->flags & VM_VM_AREA) {
1131 struct vm_struct *vm = va->private; 1153 struct vm_struct *vm = va->private;
1132 struct vm_struct *tmp, **p; 1154 struct vm_struct *tmp, **p;
1155
1156 vmap_debug_free_range(va->va_start, va->va_end);
1133 free_unmap_vmap_area(va); 1157 free_unmap_vmap_area(va);
1134 vm->size -= PAGE_SIZE; 1158 vm->size -= PAGE_SIZE;
1135 1159