aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c30
1 files changed, 24 insertions, 6 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index f1cc03bbf6ac..30f826d484f0 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -178,7 +178,7 @@ static int vmap_page_range(unsigned long addr, unsigned long end,
178static inline int is_vmalloc_or_module_addr(const void *x) 178static inline int is_vmalloc_or_module_addr(const void *x)
179{ 179{
180 /* 180 /*
181 * x86-64 and sparc64 put modules in a special place, 181 * ARM, x86-64 and sparc64 put modules in a special place,
182 * and fall back on vmalloc() if that fails. Others 182 * and fall back on vmalloc() if that fails. Others
183 * just put it in the vmalloc space. 183 * just put it in the vmalloc space.
184 */ 184 */
@@ -324,14 +324,14 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
324 324
325 BUG_ON(size & ~PAGE_MASK); 325 BUG_ON(size & ~PAGE_MASK);
326 326
327 addr = ALIGN(vstart, align);
328
329 va = kmalloc_node(sizeof(struct vmap_area), 327 va = kmalloc_node(sizeof(struct vmap_area),
330 gfp_mask & GFP_RECLAIM_MASK, node); 328 gfp_mask & GFP_RECLAIM_MASK, node);
331 if (unlikely(!va)) 329 if (unlikely(!va))
332 return ERR_PTR(-ENOMEM); 330 return ERR_PTR(-ENOMEM);
333 331
334retry: 332retry:
333 addr = ALIGN(vstart, align);
334
335 spin_lock(&vmap_area_lock); 335 spin_lock(&vmap_area_lock);
336 /* XXX: could have a last_hole cache */ 336 /* XXX: could have a last_hole cache */
337 n = vmap_area_root.rb_node; 337 n = vmap_area_root.rb_node;
@@ -362,7 +362,7 @@ retry:
362 goto found; 362 goto found;
363 } 363 }
364 364
365 while (addr + size >= first->va_start && addr + size <= vend) { 365 while (addr + size > first->va_start && addr + size <= vend) {
366 addr = ALIGN(first->va_end + PAGE_SIZE, align); 366 addr = ALIGN(first->va_end + PAGE_SIZE, align);
367 367
368 n = rb_next(&first->rb_node); 368 n = rb_next(&first->rb_node);
@@ -522,13 +522,24 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
522} 522}
523 523
524/* 524/*
525 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
526 * is already purging.
527 */
528static void try_purge_vmap_area_lazy(void)
529{
530 unsigned long start = ULONG_MAX, end = 0;
531
532 __purge_vmap_area_lazy(&start, &end, 0, 0);
533}
534
535/*
525 * Kick off a purge of the outstanding lazy areas. 536 * Kick off a purge of the outstanding lazy areas.
526 */ 537 */
527static void purge_vmap_area_lazy(void) 538static void purge_vmap_area_lazy(void)
528{ 539{
529 unsigned long start = ULONG_MAX, end = 0; 540 unsigned long start = ULONG_MAX, end = 0;
530 541
531 __purge_vmap_area_lazy(&start, &end, 0, 0); 542 __purge_vmap_area_lazy(&start, &end, 1, 0);
532} 543}
533 544
534/* 545/*
@@ -539,7 +550,7 @@ static void free_unmap_vmap_area(struct vmap_area *va)
539 va->flags |= VM_LAZY_FREE; 550 va->flags |= VM_LAZY_FREE;
540 atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); 551 atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
541 if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages())) 552 if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages()))
542 purge_vmap_area_lazy(); 553 try_purge_vmap_area_lazy();
543} 554}
544 555
545static struct vmap_area *find_vmap_area(unsigned long addr) 556static struct vmap_area *find_vmap_area(unsigned long addr)
@@ -592,6 +603,8 @@ static void free_unmap_vmap_area_addr(unsigned long addr)
592 603
593#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) 604#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
594 605
606static bool vmap_initialized __read_mostly = false;
607
595struct vmap_block_queue { 608struct vmap_block_queue {
596 spinlock_t lock; 609 spinlock_t lock;
597 struct list_head free; 610 struct list_head free;
@@ -828,6 +841,9 @@ void vm_unmap_aliases(void)
828 int cpu; 841 int cpu;
829 int flush = 0; 842 int flush = 0;
830 843
844 if (unlikely(!vmap_initialized))
845 return;
846
831 for_each_possible_cpu(cpu) { 847 for_each_possible_cpu(cpu) {
832 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 848 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
833 struct vmap_block *vb; 849 struct vmap_block *vb;
@@ -942,6 +958,8 @@ void __init vmalloc_init(void)
942 INIT_LIST_HEAD(&vbq->dirty); 958 INIT_LIST_HEAD(&vbq->dirty);
943 vbq->nr_dirty = 0; 959 vbq->nr_dirty = 0;
944 } 960 }
961
962 vmap_initialized = true;
945} 963}
946 964
947void unmap_kernel_range(unsigned long addr, unsigned long size) 965void unmap_kernel_range(unsigned long addr, unsigned long size)