diff options
Diffstat (limited to 'mm/vmalloc.c')
| -rw-r--r-- | mm/vmalloc.c | 12 |
1 files changed, 10 insertions, 2 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 036536945dd9..ba6b0f5f7fac 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
| @@ -178,7 +178,7 @@ static int vmap_page_range(unsigned long addr, unsigned long end, | |||
| 178 | static inline int is_vmalloc_or_module_addr(const void *x) | 178 | static inline int is_vmalloc_or_module_addr(const void *x) |
| 179 | { | 179 | { |
| 180 | /* | 180 | /* |
| 181 | * x86-64 and sparc64 put modules in a special place, | 181 | * ARM, x86-64 and sparc64 put modules in a special place, |
| 182 | * and fall back on vmalloc() if that fails. Others | 182 | * and fall back on vmalloc() if that fails. Others |
| 183 | * just put it in the vmalloc space. | 183 | * just put it in the vmalloc space. |
| 184 | */ | 184 | */ |
| @@ -592,6 +592,8 @@ static void free_unmap_vmap_area_addr(unsigned long addr) | |||
| 592 | 592 | ||
| 593 | #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) | 593 | #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) |
| 594 | 594 | ||
| 595 | static bool vmap_initialized __read_mostly = false; | ||
| 596 | |||
| 595 | struct vmap_block_queue { | 597 | struct vmap_block_queue { |
| 596 | spinlock_t lock; | 598 | spinlock_t lock; |
| 597 | struct list_head free; | 599 | struct list_head free; |
| @@ -828,6 +830,9 @@ void vm_unmap_aliases(void) | |||
| 828 | int cpu; | 830 | int cpu; |
| 829 | int flush = 0; | 831 | int flush = 0; |
| 830 | 832 | ||
| 833 | if (unlikely(!vmap_initialized)) | ||
| 834 | return; | ||
| 835 | |||
| 831 | for_each_possible_cpu(cpu) { | 836 | for_each_possible_cpu(cpu) { |
| 832 | struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); | 837 | struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); |
| 833 | struct vmap_block *vb; | 838 | struct vmap_block *vb; |
| @@ -897,7 +902,8 @@ EXPORT_SYMBOL(vm_unmap_ram); | |||
| 897 | * @count: number of pages | 902 | * @count: number of pages |
| 898 | * @node: prefer to allocate data structures on this node | 903 | * @node: prefer to allocate data structures on this node |
| 899 | * @prot: memory protection to use. PAGE_KERNEL for regular RAM | 904 | * @prot: memory protection to use. PAGE_KERNEL for regular RAM |
| 900 | * @returns: a pointer to the address that has been mapped, or NULL on failure | 905 | * |
| 906 | * Returns: a pointer to the address that has been mapped, or %NULL on failure | ||
| 901 | */ | 907 | */ |
| 902 | void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) | 908 | void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) |
| 903 | { | 909 | { |
| @@ -941,6 +947,8 @@ void __init vmalloc_init(void) | |||
| 941 | INIT_LIST_HEAD(&vbq->dirty); | 947 | INIT_LIST_HEAD(&vbq->dirty); |
| 942 | vbq->nr_dirty = 0; | 948 | vbq->nr_dirty = 0; |
| 943 | } | 949 | } |
| 950 | |||
| 951 | vmap_initialized = true; | ||
| 944 | } | 952 | } |
| 945 | 953 | ||
| 946 | void unmap_kernel_range(unsigned long addr, unsigned long size) | 954 | void unmap_kernel_range(unsigned long addr, unsigned long size) |
