diff options
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r-- | mm/vmalloc.c | 41 |
1 files changed, 32 insertions, 9 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index ba6b0f5f7fac..f3f6e0758562 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -77,7 +77,6 @@ static void vunmap_page_range(unsigned long addr, unsigned long end) | |||
77 | 77 | ||
78 | BUG_ON(addr >= end); | 78 | BUG_ON(addr >= end); |
79 | pgd = pgd_offset_k(addr); | 79 | pgd = pgd_offset_k(addr); |
80 | flush_cache_vunmap(addr, end); | ||
81 | do { | 80 | do { |
82 | next = pgd_addr_end(addr, end); | 81 | next = pgd_addr_end(addr, end); |
83 | if (pgd_none_or_clear_bad(pgd)) | 82 | if (pgd_none_or_clear_bad(pgd)) |
@@ -324,14 +323,14 @@ static struct vmap_area *alloc_vmap_area(unsigned long size, | |||
324 | 323 | ||
325 | BUG_ON(size & ~PAGE_MASK); | 324 | BUG_ON(size & ~PAGE_MASK); |
326 | 325 | ||
327 | addr = ALIGN(vstart, align); | ||
328 | |||
329 | va = kmalloc_node(sizeof(struct vmap_area), | 326 | va = kmalloc_node(sizeof(struct vmap_area), |
330 | gfp_mask & GFP_RECLAIM_MASK, node); | 327 | gfp_mask & GFP_RECLAIM_MASK, node); |
331 | if (unlikely(!va)) | 328 | if (unlikely(!va)) |
332 | return ERR_PTR(-ENOMEM); | 329 | return ERR_PTR(-ENOMEM); |
333 | 330 | ||
334 | retry: | 331 | retry: |
332 | addr = ALIGN(vstart, align); | ||
333 | |||
335 | spin_lock(&vmap_area_lock); | 334 | spin_lock(&vmap_area_lock); |
336 | /* XXX: could have a last_hole cache */ | 335 | /* XXX: could have a last_hole cache */ |
337 | n = vmap_area_root.rb_node; | 336 | n = vmap_area_root.rb_node; |
@@ -362,7 +361,7 @@ retry: | |||
362 | goto found; | 361 | goto found; |
363 | } | 362 | } |
364 | 363 | ||
365 | while (addr + size >= first->va_start && addr + size <= vend) { | 364 | while (addr + size > first->va_start && addr + size <= vend) { |
366 | addr = ALIGN(first->va_end + PAGE_SIZE, align); | 365 | addr = ALIGN(first->va_end + PAGE_SIZE, align); |
367 | 366 | ||
368 | n = rb_next(&first->rb_node); | 367 | n = rb_next(&first->rb_node); |
@@ -522,24 +521,45 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, | |||
522 | } | 521 | } |
523 | 522 | ||
524 | /* | 523 | /* |
524 | * Kick off a purge of the outstanding lazy areas. Don't bother if somebody | ||
525 | * is already purging. | ||
526 | */ | ||
527 | static void try_purge_vmap_area_lazy(void) | ||
528 | { | ||
529 | unsigned long start = ULONG_MAX, end = 0; | ||
530 | |||
531 | __purge_vmap_area_lazy(&start, &end, 0, 0); | ||
532 | } | ||
533 | |||
534 | /* | ||
525 | * Kick off a purge of the outstanding lazy areas. | 535 | * Kick off a purge of the outstanding lazy areas. |
526 | */ | 536 | */ |
527 | static void purge_vmap_area_lazy(void) | 537 | static void purge_vmap_area_lazy(void) |
528 | { | 538 | { |
529 | unsigned long start = ULONG_MAX, end = 0; | 539 | unsigned long start = ULONG_MAX, end = 0; |
530 | 540 | ||
531 | __purge_vmap_area_lazy(&start, &end, 0, 0); | 541 | __purge_vmap_area_lazy(&start, &end, 1, 0); |
532 | } | 542 | } |
533 | 543 | ||
534 | /* | 544 | /* |
535 | * Free and unmap a vmap area | 545 | * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been |
546 | * called for the correct range previously. | ||
536 | */ | 547 | */ |
537 | static void free_unmap_vmap_area(struct vmap_area *va) | 548 | static void free_unmap_vmap_area_noflush(struct vmap_area *va) |
538 | { | 549 | { |
539 | va->flags |= VM_LAZY_FREE; | 550 | va->flags |= VM_LAZY_FREE; |
540 | atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); | 551 | atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); |
541 | if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages())) | 552 | if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages())) |
542 | purge_vmap_area_lazy(); | 553 | try_purge_vmap_area_lazy(); |
554 | } | ||
555 | |||
556 | /* | ||
557 | * Free and unmap a vmap area | ||
558 | */ | ||
559 | static void free_unmap_vmap_area(struct vmap_area *va) | ||
560 | { | ||
561 | flush_cache_vunmap(va->va_start, va->va_end); | ||
562 | free_unmap_vmap_area_noflush(va); | ||
543 | } | 563 | } |
544 | 564 | ||
545 | static struct vmap_area *find_vmap_area(unsigned long addr) | 565 | static struct vmap_area *find_vmap_area(unsigned long addr) |
@@ -723,7 +743,7 @@ static void free_vmap_block(struct vmap_block *vb) | |||
723 | spin_unlock(&vmap_block_tree_lock); | 743 | spin_unlock(&vmap_block_tree_lock); |
724 | BUG_ON(tmp != vb); | 744 | BUG_ON(tmp != vb); |
725 | 745 | ||
726 | free_unmap_vmap_area(vb->va); | 746 | free_unmap_vmap_area_noflush(vb->va); |
727 | call_rcu(&vb->rcu_head, rcu_free_vb); | 747 | call_rcu(&vb->rcu_head, rcu_free_vb); |
728 | } | 748 | } |
729 | 749 | ||
@@ -785,6 +805,9 @@ static void vb_free(const void *addr, unsigned long size) | |||
785 | 805 | ||
786 | BUG_ON(size & ~PAGE_MASK); | 806 | BUG_ON(size & ~PAGE_MASK); |
787 | BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); | 807 | BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); |
808 | |||
809 | flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size); | ||
810 | |||
788 | order = get_order(size); | 811 | order = get_order(size); |
789 | 812 | ||
790 | offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1); | 813 | offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1); |