aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c77
1 files changed, 68 insertions, 9 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 1ddb77ba3995..520a75980269 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -23,6 +23,7 @@
23#include <linux/rbtree.h> 23#include <linux/rbtree.h>
24#include <linux/radix-tree.h> 24#include <linux/radix-tree.h>
25#include <linux/rcupdate.h> 25#include <linux/rcupdate.h>
26#include <linux/bootmem.h>
26 27
27#include <asm/atomic.h> 28#include <asm/atomic.h>
28#include <asm/uaccess.h> 29#include <asm/uaccess.h>
@@ -151,11 +152,12 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
151 * 152 *
152 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] 153 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
153 */ 154 */
154static int vmap_page_range(unsigned long addr, unsigned long end, 155static int vmap_page_range(unsigned long start, unsigned long end,
155 pgprot_t prot, struct page **pages) 156 pgprot_t prot, struct page **pages)
156{ 157{
157 pgd_t *pgd; 158 pgd_t *pgd;
158 unsigned long next; 159 unsigned long next;
160 unsigned long addr = start;
159 int err = 0; 161 int err = 0;
160 int nr = 0; 162 int nr = 0;
161 163
@@ -167,7 +169,7 @@ static int vmap_page_range(unsigned long addr, unsigned long end,
167 if (err) 169 if (err)
168 break; 170 break;
169 } while (pgd++, addr = next, addr != end); 171 } while (pgd++, addr = next, addr != end);
170 flush_cache_vmap(addr, end); 172 flush_cache_vmap(start, end);
171 173
172 if (unlikely(err)) 174 if (unlikely(err))
173 return err; 175 return err;
@@ -321,6 +323,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
321 unsigned long addr; 323 unsigned long addr;
322 int purged = 0; 324 int purged = 0;
323 325
326 BUG_ON(!size);
324 BUG_ON(size & ~PAGE_MASK); 327 BUG_ON(size & ~PAGE_MASK);
325 328
326 va = kmalloc_node(sizeof(struct vmap_area), 329 va = kmalloc_node(sizeof(struct vmap_area),
@@ -332,6 +335,9 @@ retry:
332 addr = ALIGN(vstart, align); 335 addr = ALIGN(vstart, align);
333 336
334 spin_lock(&vmap_area_lock); 337 spin_lock(&vmap_area_lock);
338 if (addr + size - 1 < addr)
339 goto overflow;
340
335 /* XXX: could have a last_hole cache */ 341 /* XXX: could have a last_hole cache */
336 n = vmap_area_root.rb_node; 342 n = vmap_area_root.rb_node;
337 if (n) { 343 if (n) {
@@ -363,6 +369,8 @@ retry:
363 369
364 while (addr + size > first->va_start && addr + size <= vend) { 370 while (addr + size > first->va_start && addr + size <= vend) {
365 addr = ALIGN(first->va_end + PAGE_SIZE, align); 371 addr = ALIGN(first->va_end + PAGE_SIZE, align);
372 if (addr + size - 1 < addr)
373 goto overflow;
366 374
367 n = rb_next(&first->rb_node); 375 n = rb_next(&first->rb_node);
368 if (n) 376 if (n)
@@ -373,6 +381,7 @@ retry:
373 } 381 }
374found: 382found:
375 if (addr + size > vend) { 383 if (addr + size > vend) {
384overflow:
376 spin_unlock(&vmap_area_lock); 385 spin_unlock(&vmap_area_lock);
377 if (!purged) { 386 if (!purged) {
378 purge_vmap_area_lazy(); 387 purge_vmap_area_lazy();
@@ -380,8 +389,9 @@ found:
380 goto retry; 389 goto retry;
381 } 390 }
382 if (printk_ratelimit()) 391 if (printk_ratelimit())
383 printk(KERN_WARNING "vmap allocation failed: " 392 printk(KERN_WARNING
384 "use vmalloc=<size> to increase size.\n"); 393 "vmap allocation for size %lu failed: "
394 "use vmalloc=<size> to increase size.\n", size);
385 return ERR_PTR(-EBUSY); 395 return ERR_PTR(-EBUSY);
386 } 396 }
387 397
@@ -431,6 +441,27 @@ static void unmap_vmap_area(struct vmap_area *va)
431 vunmap_page_range(va->va_start, va->va_end); 441 vunmap_page_range(va->va_start, va->va_end);
432} 442}
433 443
444static void vmap_debug_free_range(unsigned long start, unsigned long end)
445{
446 /*
447 * Unmap page tables and force a TLB flush immediately if
448 * CONFIG_DEBUG_PAGEALLOC is set. This catches use after free
449 * bugs similarly to those in linear kernel virtual address
450 * space after a page has been freed.
451 *
452 * All the lazy freeing logic is still retained, in order to
453 * minimise intrusiveness of this debugging feature.
454 *
455 * This is going to be *slow* (linear kernel virtual address
456 * debugging doesn't do a broadcast TLB flush so it is a lot
457 * faster).
458 */
459#ifdef CONFIG_DEBUG_PAGEALLOC
460 vunmap_page_range(start, end);
461 flush_tlb_kernel_range(start, end);
462#endif
463}
464
434/* 465/*
435 * lazy_max_pages is the maximum amount of virtual address space we gather up 466 * lazy_max_pages is the maximum amount of virtual address space we gather up
436 * before attempting to purge with a TLB flush. 467 * before attempting to purge with a TLB flush.
@@ -474,6 +505,7 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
474 static DEFINE_SPINLOCK(purge_lock); 505 static DEFINE_SPINLOCK(purge_lock);
475 LIST_HEAD(valist); 506 LIST_HEAD(valist);
476 struct vmap_area *va; 507 struct vmap_area *va;
508 struct vmap_area *n_va;
477 int nr = 0; 509 int nr = 0;
478 510
479 /* 511 /*
@@ -513,7 +545,7 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
513 545
514 if (nr) { 546 if (nr) {
515 spin_lock(&vmap_area_lock); 547 spin_lock(&vmap_area_lock);
516 list_for_each_entry(va, &valist, purge_list) 548 list_for_each_entry_safe(va, n_va, &valist, purge_list)
517 __free_vmap_area(va); 549 __free_vmap_area(va);
518 spin_unlock(&vmap_area_lock); 550 spin_unlock(&vmap_area_lock);
519 } 551 }
@@ -911,6 +943,7 @@ void vm_unmap_ram(const void *mem, unsigned int count)
911 BUG_ON(addr & (PAGE_SIZE-1)); 943 BUG_ON(addr & (PAGE_SIZE-1));
912 944
913 debug_check_no_locks_freed(mem, size); 945 debug_check_no_locks_freed(mem, size);
946 vmap_debug_free_range(addr, addr+size);
914 947
915 if (likely(count <= VMAP_MAX_ALLOC)) 948 if (likely(count <= VMAP_MAX_ALLOC))
916 vb_free(mem, size); 949 vb_free(mem, size);
@@ -959,6 +992,8 @@ EXPORT_SYMBOL(vm_map_ram);
959 992
960void __init vmalloc_init(void) 993void __init vmalloc_init(void)
961{ 994{
995 struct vmap_area *va;
996 struct vm_struct *tmp;
962 int i; 997 int i;
963 998
964 for_each_possible_cpu(i) { 999 for_each_possible_cpu(i) {
@@ -971,12 +1006,22 @@ void __init vmalloc_init(void)
971 vbq->nr_dirty = 0; 1006 vbq->nr_dirty = 0;
972 } 1007 }
973 1008
1009 /* Import existing vmlist entries. */
1010 for (tmp = vmlist; tmp; tmp = tmp->next) {
1011 va = alloc_bootmem(sizeof(struct vmap_area));
1012 va->flags = tmp->flags | VM_VM_AREA;
1013 va->va_start = (unsigned long)tmp->addr;
1014 va->va_end = va->va_start + tmp->size;
1015 __insert_vmap_area(va);
1016 }
974 vmap_initialized = true; 1017 vmap_initialized = true;
975} 1018}
976 1019
977void unmap_kernel_range(unsigned long addr, unsigned long size) 1020void unmap_kernel_range(unsigned long addr, unsigned long size)
978{ 1021{
979 unsigned long end = addr + size; 1022 unsigned long end = addr + size;
1023
1024 flush_cache_vunmap(addr, end);
980 vunmap_page_range(addr, end); 1025 vunmap_page_range(addr, end);
981 flush_tlb_kernel_range(addr, end); 1026 flush_tlb_kernel_range(addr, end);
982} 1027}
@@ -1071,6 +1116,14 @@ struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
1071} 1116}
1072EXPORT_SYMBOL_GPL(__get_vm_area); 1117EXPORT_SYMBOL_GPL(__get_vm_area);
1073 1118
1119struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
1120 unsigned long start, unsigned long end,
1121 void *caller)
1122{
1123 return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
1124 caller);
1125}
1126
1074/** 1127/**
1075 * get_vm_area - reserve a contiguous kernel virtual area 1128 * get_vm_area - reserve a contiguous kernel virtual area
1076 * @size: size of the area 1129 * @size: size of the area
@@ -1127,6 +1180,8 @@ struct vm_struct *remove_vm_area(const void *addr)
1127 if (va && va->flags & VM_VM_AREA) { 1180 if (va && va->flags & VM_VM_AREA) {
1128 struct vm_struct *vm = va->private; 1181 struct vm_struct *vm = va->private;
1129 struct vm_struct *tmp, **p; 1182 struct vm_struct *tmp, **p;
1183
1184 vmap_debug_free_range(va->va_start, va->va_end);
1130 free_unmap_vmap_area(va); 1185 free_unmap_vmap_area(va);
1131 vm->size -= PAGE_SIZE; 1186 vm->size -= PAGE_SIZE;
1132 1187
@@ -1374,7 +1429,8 @@ void *vmalloc_user(unsigned long size)
1374 struct vm_struct *area; 1429 struct vm_struct *area;
1375 void *ret; 1430 void *ret;
1376 1431
1377 ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); 1432 ret = __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
1433 PAGE_KERNEL, -1, __builtin_return_address(0));
1378 if (ret) { 1434 if (ret) {
1379 area = find_vm_area(ret); 1435 area = find_vm_area(ret);
1380 area->flags |= VM_USERMAP; 1436 area->flags |= VM_USERMAP;
@@ -1419,7 +1475,8 @@ EXPORT_SYMBOL(vmalloc_node);
1419 1475
1420void *vmalloc_exec(unsigned long size) 1476void *vmalloc_exec(unsigned long size)
1421{ 1477{
1422 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC); 1478 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
1479 -1, __builtin_return_address(0));
1423} 1480}
1424 1481
1425#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) 1482#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
@@ -1439,7 +1496,8 @@ void *vmalloc_exec(unsigned long size)
1439 */ 1496 */
1440void *vmalloc_32(unsigned long size) 1497void *vmalloc_32(unsigned long size)
1441{ 1498{
1442 return __vmalloc(size, GFP_VMALLOC32, PAGE_KERNEL); 1499 return __vmalloc_node(size, GFP_VMALLOC32, PAGE_KERNEL,
1500 -1, __builtin_return_address(0));
1443} 1501}
1444EXPORT_SYMBOL(vmalloc_32); 1502EXPORT_SYMBOL(vmalloc_32);
1445 1503
@@ -1455,7 +1513,8 @@ void *vmalloc_32_user(unsigned long size)
1455 struct vm_struct *area; 1513 struct vm_struct *area;
1456 void *ret; 1514 void *ret;
1457 1515
1458 ret = __vmalloc(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL); 1516 ret = __vmalloc_node(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
1517 -1, __builtin_return_address(0));
1459 if (ret) { 1518 if (ret) {
1460 area = find_vm_area(ret); 1519 area = find_vm_area(ret);
1461 area->flags |= VM_USERMAP; 1520 area->flags |= VM_USERMAP;