aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c20
1 files changed, 15 insertions, 5 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index c5db9a7264d9..75f49d312e8c 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -14,7 +14,6 @@
14#include <linux/highmem.h> 14#include <linux/highmem.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/spinlock.h> 16#include <linux/spinlock.h>
17#include <linux/mutex.h>
18#include <linux/interrupt.h> 17#include <linux/interrupt.h>
19#include <linux/proc_fs.h> 18#include <linux/proc_fs.h>
20#include <linux/seq_file.h> 19#include <linux/seq_file.h>
@@ -24,6 +23,7 @@
24#include <linux/rbtree.h> 23#include <linux/rbtree.h>
25#include <linux/radix-tree.h> 24#include <linux/radix-tree.h>
26#include <linux/rcupdate.h> 25#include <linux/rcupdate.h>
26#include <linux/bootmem.h>
27 27
28#include <asm/atomic.h> 28#include <asm/atomic.h>
29#include <asm/uaccess.h> 29#include <asm/uaccess.h>
@@ -495,7 +495,7 @@ static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
495static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, 495static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
496 int sync, int force_flush) 496 int sync, int force_flush)
497{ 497{
498 static DEFINE_MUTEX(purge_lock); 498 static DEFINE_SPINLOCK(purge_lock);
499 LIST_HEAD(valist); 499 LIST_HEAD(valist);
500 struct vmap_area *va; 500 struct vmap_area *va;
501 int nr = 0; 501 int nr = 0;
@@ -506,10 +506,10 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
506 * the case that isn't actually used at the moment anyway. 506 * the case that isn't actually used at the moment anyway.
507 */ 507 */
508 if (!sync && !force_flush) { 508 if (!sync && !force_flush) {
509 if (!mutex_trylock(&purge_lock)) 509 if (!spin_trylock(&purge_lock))
510 return; 510 return;
511 } else 511 } else
512 mutex_lock(&purge_lock); 512 spin_lock(&purge_lock);
513 513
514 rcu_read_lock(); 514 rcu_read_lock();
515 list_for_each_entry_rcu(va, &vmap_area_list, list) { 515 list_for_each_entry_rcu(va, &vmap_area_list, list) {
@@ -541,7 +541,7 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
541 __free_vmap_area(va); 541 __free_vmap_area(va);
542 spin_unlock(&vmap_area_lock); 542 spin_unlock(&vmap_area_lock);
543 } 543 }
544 mutex_unlock(&purge_lock); 544 spin_unlock(&purge_lock);
545} 545}
546 546
547/* 547/*
@@ -984,6 +984,8 @@ EXPORT_SYMBOL(vm_map_ram);
984 984
985void __init vmalloc_init(void) 985void __init vmalloc_init(void)
986{ 986{
987 struct vmap_area *va;
988 struct vm_struct *tmp;
987 int i; 989 int i;
988 990
989 for_each_possible_cpu(i) { 991 for_each_possible_cpu(i) {
@@ -996,6 +998,14 @@ void __init vmalloc_init(void)
996 vbq->nr_dirty = 0; 998 vbq->nr_dirty = 0;
997 } 999 }
998 1000
1001 /* Import existing vmlist entries. */
1002 for (tmp = vmlist; tmp; tmp = tmp->next) {
1003 va = alloc_bootmem(sizeof(struct vmap_area));
1004 va->flags = tmp->flags | VM_VM_AREA;
1005 va->va_start = (unsigned long)tmp->addr;
1006 va->va_end = va->va_start + tmp->size;
1007 __insert_vmap_area(va);
1008 }
999 vmap_initialized = true; 1009 vmap_initialized = true;
1000} 1010}
1001 1011