aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorDavid Vrabel <david.vrabel@csr.com>2008-11-19 09:48:07 -0500
committerDavid Vrabel <david.vrabel@csr.com>2008-11-19 09:48:07 -0500
commitdba0a918722ee0f0ba3442575e4448c3ab622be4 (patch)
treefdb466cf09e7916135098d651b18924b2fe9ba5f /mm/vmalloc.c
parent0996e6382482ce9014787693d3884e9468153a5c (diff)
parent7f0f598a0069d1ab072375965a4b69137233169c (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into for-upstream
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c12
1 files changed, 10 insertions, 2 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 036536945dd9..ba6b0f5f7fac 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -178,7 +178,7 @@ static int vmap_page_range(unsigned long addr, unsigned long end,
178static inline int is_vmalloc_or_module_addr(const void *x) 178static inline int is_vmalloc_or_module_addr(const void *x)
179{ 179{
180 /* 180 /*
181 * x86-64 and sparc64 put modules in a special place, 181 * ARM, x86-64 and sparc64 put modules in a special place,
182 * and fall back on vmalloc() if that fails. Others 182 * and fall back on vmalloc() if that fails. Others
183 * just put it in the vmalloc space. 183 * just put it in the vmalloc space.
184 */ 184 */
@@ -592,6 +592,8 @@ static void free_unmap_vmap_area_addr(unsigned long addr)
592 592
593#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) 593#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
594 594
595static bool vmap_initialized __read_mostly = false;
596
595struct vmap_block_queue { 597struct vmap_block_queue {
596 spinlock_t lock; 598 spinlock_t lock;
597 struct list_head free; 599 struct list_head free;
@@ -828,6 +830,9 @@ void vm_unmap_aliases(void)
828 int cpu; 830 int cpu;
829 int flush = 0; 831 int flush = 0;
830 832
833 if (unlikely(!vmap_initialized))
834 return;
835
831 for_each_possible_cpu(cpu) { 836 for_each_possible_cpu(cpu) {
832 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 837 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
833 struct vmap_block *vb; 838 struct vmap_block *vb;
@@ -897,7 +902,8 @@ EXPORT_SYMBOL(vm_unmap_ram);
897 * @count: number of pages 902 * @count: number of pages
898 * @node: prefer to allocate data structures on this node 903 * @node: prefer to allocate data structures on this node
899 * @prot: memory protection to use. PAGE_KERNEL for regular RAM 904 * @prot: memory protection to use. PAGE_KERNEL for regular RAM
900 * @returns: a pointer to the address that has been mapped, or NULL on failure 905 *
906 * Returns: a pointer to the address that has been mapped, or %NULL on failure
901 */ 907 */
902void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) 908void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
903{ 909{
@@ -941,6 +947,8 @@ void __init vmalloc_init(void)
941 INIT_LIST_HEAD(&vbq->dirty); 947 INIT_LIST_HEAD(&vbq->dirty);
942 vbq->nr_dirty = 0; 948 vbq->nr_dirty = 0;
943 } 949 }
950
951 vmap_initialized = true;
944} 952}
945 953
946void unmap_kernel_range(unsigned long addr, unsigned long size) 954void unmap_kernel_range(unsigned long addr, unsigned long size)