diff options
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r-- | mm/vmalloc.c | 55 |
1 files changed, 43 insertions, 12 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 036536945dd9..1ddb77ba3995 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -77,7 +77,6 @@ static void vunmap_page_range(unsigned long addr, unsigned long end) | |||
77 | 77 | ||
78 | BUG_ON(addr >= end); | 78 | BUG_ON(addr >= end); |
79 | pgd = pgd_offset_k(addr); | 79 | pgd = pgd_offset_k(addr); |
80 | flush_cache_vunmap(addr, end); | ||
81 | do { | 80 | do { |
82 | next = pgd_addr_end(addr, end); | 81 | next = pgd_addr_end(addr, end); |
83 | if (pgd_none_or_clear_bad(pgd)) | 82 | if (pgd_none_or_clear_bad(pgd)) |
@@ -178,7 +177,7 @@ static int vmap_page_range(unsigned long addr, unsigned long end, | |||
178 | static inline int is_vmalloc_or_module_addr(const void *x) | 177 | static inline int is_vmalloc_or_module_addr(const void *x) |
179 | { | 178 | { |
180 | /* | 179 | /* |
181 | * x86-64 and sparc64 put modules in a special place, | 180 | * ARM, x86-64 and sparc64 put modules in a special place, |
182 | * and fall back on vmalloc() if that fails. Others | 181 | * and fall back on vmalloc() if that fails. Others |
183 | * just put it in the vmalloc space. | 182 | * just put it in the vmalloc space. |
184 | */ | 183 | */ |
@@ -324,14 +323,14 @@ static struct vmap_area *alloc_vmap_area(unsigned long size, | |||
324 | 323 | ||
325 | BUG_ON(size & ~PAGE_MASK); | 324 | BUG_ON(size & ~PAGE_MASK); |
326 | 325 | ||
327 | addr = ALIGN(vstart, align); | ||
328 | |||
329 | va = kmalloc_node(sizeof(struct vmap_area), | 326 | va = kmalloc_node(sizeof(struct vmap_area), |
330 | gfp_mask & GFP_RECLAIM_MASK, node); | 327 | gfp_mask & GFP_RECLAIM_MASK, node); |
331 | if (unlikely(!va)) | 328 | if (unlikely(!va)) |
332 | return ERR_PTR(-ENOMEM); | 329 | return ERR_PTR(-ENOMEM); |
333 | 330 | ||
334 | retry: | 331 | retry: |
332 | addr = ALIGN(vstart, align); | ||
333 | |||
335 | spin_lock(&vmap_area_lock); | 334 | spin_lock(&vmap_area_lock); |
336 | /* XXX: could have a last_hole cache */ | 335 | /* XXX: could have a last_hole cache */ |
337 | n = vmap_area_root.rb_node; | 336 | n = vmap_area_root.rb_node; |
@@ -362,7 +361,7 @@ retry: | |||
362 | goto found; | 361 | goto found; |
363 | } | 362 | } |
364 | 363 | ||
365 | while (addr + size >= first->va_start && addr + size <= vend) { | 364 | while (addr + size > first->va_start && addr + size <= vend) { |
366 | addr = ALIGN(first->va_end + PAGE_SIZE, align); | 365 | addr = ALIGN(first->va_end + PAGE_SIZE, align); |
367 | 366 | ||
368 | n = rb_next(&first->rb_node); | 367 | n = rb_next(&first->rb_node); |
@@ -522,24 +521,45 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, | |||
522 | } | 521 | } |
523 | 522 | ||
524 | /* | 523 | /* |
524 | * Kick off a purge of the outstanding lazy areas. Don't bother if somebody | ||
525 | * is already purging. | ||
526 | */ | ||
527 | static void try_purge_vmap_area_lazy(void) | ||
528 | { | ||
529 | unsigned long start = ULONG_MAX, end = 0; | ||
530 | |||
531 | __purge_vmap_area_lazy(&start, &end, 0, 0); | ||
532 | } | ||
533 | |||
534 | /* | ||
525 | * Kick off a purge of the outstanding lazy areas. | 535 | * Kick off a purge of the outstanding lazy areas. |
526 | */ | 536 | */ |
527 | static void purge_vmap_area_lazy(void) | 537 | static void purge_vmap_area_lazy(void) |
528 | { | 538 | { |
529 | unsigned long start = ULONG_MAX, end = 0; | 539 | unsigned long start = ULONG_MAX, end = 0; |
530 | 540 | ||
531 | __purge_vmap_area_lazy(&start, &end, 0, 0); | 541 | __purge_vmap_area_lazy(&start, &end, 1, 0); |
532 | } | 542 | } |
533 | 543 | ||
534 | /* | 544 | /* |
535 | * Free and unmap a vmap area | 545 | * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been |
546 | * called for the correct range previously. | ||
536 | */ | 547 | */ |
537 | static void free_unmap_vmap_area(struct vmap_area *va) | 548 | static void free_unmap_vmap_area_noflush(struct vmap_area *va) |
538 | { | 549 | { |
539 | va->flags |= VM_LAZY_FREE; | 550 | va->flags |= VM_LAZY_FREE; |
540 | atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); | 551 | atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); |
541 | if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages())) | 552 | if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages())) |
542 | purge_vmap_area_lazy(); | 553 | try_purge_vmap_area_lazy(); |
554 | } | ||
555 | |||
556 | /* | ||
557 | * Free and unmap a vmap area | ||
558 | */ | ||
559 | static void free_unmap_vmap_area(struct vmap_area *va) | ||
560 | { | ||
561 | flush_cache_vunmap(va->va_start, va->va_end); | ||
562 | free_unmap_vmap_area_noflush(va); | ||
543 | } | 563 | } |
544 | 564 | ||
545 | static struct vmap_area *find_vmap_area(unsigned long addr) | 565 | static struct vmap_area *find_vmap_area(unsigned long addr) |
@@ -592,6 +612,8 @@ static void free_unmap_vmap_area_addr(unsigned long addr) | |||
592 | 612 | ||
593 | #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) | 613 | #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) |
594 | 614 | ||
615 | static bool vmap_initialized __read_mostly = false; | ||
616 | |||
595 | struct vmap_block_queue { | 617 | struct vmap_block_queue { |
596 | spinlock_t lock; | 618 | spinlock_t lock; |
597 | struct list_head free; | 619 | struct list_head free; |
@@ -721,7 +743,7 @@ static void free_vmap_block(struct vmap_block *vb) | |||
721 | spin_unlock(&vmap_block_tree_lock); | 743 | spin_unlock(&vmap_block_tree_lock); |
722 | BUG_ON(tmp != vb); | 744 | BUG_ON(tmp != vb); |
723 | 745 | ||
724 | free_unmap_vmap_area(vb->va); | 746 | free_unmap_vmap_area_noflush(vb->va); |
725 | call_rcu(&vb->rcu_head, rcu_free_vb); | 747 | call_rcu(&vb->rcu_head, rcu_free_vb); |
726 | } | 748 | } |
727 | 749 | ||
@@ -783,6 +805,9 @@ static void vb_free(const void *addr, unsigned long size) | |||
783 | 805 | ||
784 | BUG_ON(size & ~PAGE_MASK); | 806 | BUG_ON(size & ~PAGE_MASK); |
785 | BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); | 807 | BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); |
808 | |||
809 | flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size); | ||
810 | |||
786 | order = get_order(size); | 811 | order = get_order(size); |
787 | 812 | ||
788 | offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1); | 813 | offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1); |
@@ -828,6 +853,9 @@ void vm_unmap_aliases(void) | |||
828 | int cpu; | 853 | int cpu; |
829 | int flush = 0; | 854 | int flush = 0; |
830 | 855 | ||
856 | if (unlikely(!vmap_initialized)) | ||
857 | return; | ||
858 | |||
831 | for_each_possible_cpu(cpu) { | 859 | for_each_possible_cpu(cpu) { |
832 | struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); | 860 | struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); |
833 | struct vmap_block *vb; | 861 | struct vmap_block *vb; |
@@ -897,7 +925,8 @@ EXPORT_SYMBOL(vm_unmap_ram); | |||
897 | * @count: number of pages | 925 | * @count: number of pages |
898 | * @node: prefer to allocate data structures on this node | 926 | * @node: prefer to allocate data structures on this node |
899 | * @prot: memory protection to use. PAGE_KERNEL for regular RAM | 927 | * @prot: memory protection to use. PAGE_KERNEL for regular RAM |
900 | * @returns: a pointer to the address that has been mapped, or NULL on failure | 928 | * |
929 | * Returns: a pointer to the address that has been mapped, or %NULL on failure | ||
901 | */ | 930 | */ |
902 | void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) | 931 | void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) |
903 | { | 932 | { |
@@ -941,6 +970,8 @@ void __init vmalloc_init(void) | |||
941 | INIT_LIST_HEAD(&vbq->dirty); | 970 | INIT_LIST_HEAD(&vbq->dirty); |
942 | vbq->nr_dirty = 0; | 971 | vbq->nr_dirty = 0; |
943 | } | 972 | } |
973 | |||
974 | vmap_initialized = true; | ||
944 | } | 975 | } |
945 | 976 | ||
946 | void unmap_kernel_range(unsigned long addr, unsigned long size) | 977 | void unmap_kernel_range(unsigned long addr, unsigned long size) |
@@ -1686,7 +1717,7 @@ static int s_show(struct seq_file *m, void *p) | |||
1686 | v->addr, v->addr + v->size, v->size); | 1717 | v->addr, v->addr + v->size, v->size); |
1687 | 1718 | ||
1688 | if (v->caller) { | 1719 | if (v->caller) { |
1689 | char buff[2 * KSYM_NAME_LEN]; | 1720 | char buff[KSYM_SYMBOL_LEN]; |
1690 | 1721 | ||
1691 | seq_putc(m, ' '); | 1722 | seq_putc(m, ' '); |
1692 | sprint_symbol(buff, (unsigned long)v->caller); | 1723 | sprint_symbol(buff, (unsigned long)v->caller); |