aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c43
1 files changed, 37 insertions, 6 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index c5db9a7264d9..11a929872ebd 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -14,7 +14,6 @@
14#include <linux/highmem.h> 14#include <linux/highmem.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/spinlock.h> 16#include <linux/spinlock.h>
17#include <linux/mutex.h>
18#include <linux/interrupt.h> 17#include <linux/interrupt.h>
19#include <linux/proc_fs.h> 18#include <linux/proc_fs.h>
20#include <linux/seq_file.h> 19#include <linux/seq_file.h>
@@ -24,6 +23,7 @@
24#include <linux/rbtree.h> 23#include <linux/rbtree.h>
25#include <linux/radix-tree.h> 24#include <linux/radix-tree.h>
26#include <linux/rcupdate.h> 25#include <linux/rcupdate.h>
26#include <linux/bootmem.h>
27 27
28#include <asm/atomic.h> 28#include <asm/atomic.h>
29#include <asm/uaccess.h> 29#include <asm/uaccess.h>
@@ -323,6 +323,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
323 unsigned long addr; 323 unsigned long addr;
324 int purged = 0; 324 int purged = 0;
325 325
326 BUG_ON(!size);
326 BUG_ON(size & ~PAGE_MASK); 327 BUG_ON(size & ~PAGE_MASK);
327 328
328 va = kmalloc_node(sizeof(struct vmap_area), 329 va = kmalloc_node(sizeof(struct vmap_area),
@@ -334,6 +335,9 @@ retry:
334 addr = ALIGN(vstart, align); 335 addr = ALIGN(vstart, align);
335 336
336 spin_lock(&vmap_area_lock); 337 spin_lock(&vmap_area_lock);
338 if (addr + size - 1 < addr)
339 goto overflow;
340
337 /* XXX: could have a last_hole cache */ 341 /* XXX: could have a last_hole cache */
338 n = vmap_area_root.rb_node; 342 n = vmap_area_root.rb_node;
339 if (n) { 343 if (n) {
@@ -365,6 +369,8 @@ retry:
365 369
366 while (addr + size > first->va_start && addr + size <= vend) { 370 while (addr + size > first->va_start && addr + size <= vend) {
367 addr = ALIGN(first->va_end + PAGE_SIZE, align); 371 addr = ALIGN(first->va_end + PAGE_SIZE, align);
372 if (addr + size - 1 < addr)
373 goto overflow;
368 374
369 n = rb_next(&first->rb_node); 375 n = rb_next(&first->rb_node);
370 if (n) 376 if (n)
@@ -375,6 +381,7 @@ retry:
375 } 381 }
376found: 382found:
377 if (addr + size > vend) { 383 if (addr + size > vend) {
384overflow:
378 spin_unlock(&vmap_area_lock); 385 spin_unlock(&vmap_area_lock);
379 if (!purged) { 386 if (!purged) {
380 purge_vmap_area_lazy(); 387 purge_vmap_area_lazy();
@@ -495,9 +502,10 @@ static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
495static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, 502static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
496 int sync, int force_flush) 503 int sync, int force_flush)
497{ 504{
498 static DEFINE_MUTEX(purge_lock); 505 static DEFINE_SPINLOCK(purge_lock);
499 LIST_HEAD(valist); 506 LIST_HEAD(valist);
500 struct vmap_area *va; 507 struct vmap_area *va;
508 struct vmap_area *n_va;
501 int nr = 0; 509 int nr = 0;
502 510
503 /* 511 /*
@@ -506,10 +514,10 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
506 * the case that isn't actually used at the moment anyway. 514 * the case that isn't actually used at the moment anyway.
507 */ 515 */
508 if (!sync && !force_flush) { 516 if (!sync && !force_flush) {
509 if (!mutex_trylock(&purge_lock)) 517 if (!spin_trylock(&purge_lock))
510 return; 518 return;
511 } else 519 } else
512 mutex_lock(&purge_lock); 520 spin_lock(&purge_lock);
513 521
514 rcu_read_lock(); 522 rcu_read_lock();
515 list_for_each_entry_rcu(va, &vmap_area_list, list) { 523 list_for_each_entry_rcu(va, &vmap_area_list, list) {
@@ -537,11 +545,11 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
537 545
538 if (nr) { 546 if (nr) {
539 spin_lock(&vmap_area_lock); 547 spin_lock(&vmap_area_lock);
540 list_for_each_entry(va, &valist, purge_list) 548 list_for_each_entry_safe(va, n_va, &valist, purge_list)
541 __free_vmap_area(va); 549 __free_vmap_area(va);
542 spin_unlock(&vmap_area_lock); 550 spin_unlock(&vmap_area_lock);
543 } 551 }
544 mutex_unlock(&purge_lock); 552 spin_unlock(&purge_lock);
545} 553}
546 554
547/* 555/*
@@ -984,6 +992,8 @@ EXPORT_SYMBOL(vm_map_ram);
984 992
985void __init vmalloc_init(void) 993void __init vmalloc_init(void)
986{ 994{
995 struct vmap_area *va;
996 struct vm_struct *tmp;
987 int i; 997 int i;
988 998
989 for_each_possible_cpu(i) { 999 for_each_possible_cpu(i) {
@@ -996,12 +1006,22 @@ void __init vmalloc_init(void)
996 vbq->nr_dirty = 0; 1006 vbq->nr_dirty = 0;
997 } 1007 }
998 1008
1009 /* Import existing vmlist entries. */
1010 for (tmp = vmlist; tmp; tmp = tmp->next) {
1011 va = alloc_bootmem(sizeof(struct vmap_area));
1012 va->flags = tmp->flags | VM_VM_AREA;
1013 va->va_start = (unsigned long)tmp->addr;
1014 va->va_end = va->va_start + tmp->size;
1015 __insert_vmap_area(va);
1016 }
999 vmap_initialized = true; 1017 vmap_initialized = true;
1000} 1018}
1001 1019
1002void unmap_kernel_range(unsigned long addr, unsigned long size) 1020void unmap_kernel_range(unsigned long addr, unsigned long size)
1003{ 1021{
1004 unsigned long end = addr + size; 1022 unsigned long end = addr + size;
1023
1024 flush_cache_vunmap(addr, end);
1005 vunmap_page_range(addr, end); 1025 vunmap_page_range(addr, end);
1006 flush_tlb_kernel_range(addr, end); 1026 flush_tlb_kernel_range(addr, end);
1007} 1027}
@@ -1096,6 +1116,14 @@ struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
1096} 1116}
1097EXPORT_SYMBOL_GPL(__get_vm_area); 1117EXPORT_SYMBOL_GPL(__get_vm_area);
1098 1118
1119struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
1120 unsigned long start, unsigned long end,
1121 void *caller)
1122{
1123 return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
1124 caller);
1125}
1126
1099/** 1127/**
1100 * get_vm_area - reserve a contiguous kernel virtual area 1128 * get_vm_area - reserve a contiguous kernel virtual area
1101 * @size: size of the area 1129 * @size: size of the area
@@ -1239,6 +1267,7 @@ EXPORT_SYMBOL(vfree);
1239void vunmap(const void *addr) 1267void vunmap(const void *addr)
1240{ 1268{
1241 BUG_ON(in_interrupt()); 1269 BUG_ON(in_interrupt());
1270 might_sleep();
1242 __vunmap(addr, 0); 1271 __vunmap(addr, 0);
1243} 1272}
1244EXPORT_SYMBOL(vunmap); 1273EXPORT_SYMBOL(vunmap);
@@ -1258,6 +1287,8 @@ void *vmap(struct page **pages, unsigned int count,
1258{ 1287{
1259 struct vm_struct *area; 1288 struct vm_struct *area;
1260 1289
1290 might_sleep();
1291
1261 if (count > num_physpages) 1292 if (count > num_physpages)
1262 return NULL; 1293 return NULL;
1263 1294