aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2011-01-13 18:46:02 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 20:32:34 -0500
commitd0a21265dfb5fa8ae54e90d0fb6d1c215b10a28a (patch)
treea3bf2c96ad8e180f32a52e208667a40bb972275b
parentec3f64fc9c196a304c4b7db3e1ff56d640628509 (diff)
mm: unify module_alloc code for vmalloc
Four architectures (arm, mips, sparc, x86) use __vmalloc_area() for module_init(). Much of the code is duplicated and can be generalized in a globally accessible function, __vmalloc_node_range(). __vmalloc_node() now calls into __vmalloc_node_range() with a range of [VMALLOC_START, VMALLOC_END) for functionally equivalent behavior. Each architecture may then use __vmalloc_node_range() directly to remove the duplication of code. Signed-off-by: David Rientjes <rientjes@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: Russell King <linux@arm.linux.org.uk> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: Ingo Molnar <mingo@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/arm/kernel/module.c14
-rw-r--r--arch/mips/kernel/module.c14
-rw-r--r--arch/sparc/kernel/module.c14
-rw-r--r--arch/x86/kernel/module.c17
-rw-r--r--include/linux/vmalloc.h5
-rw-r--r--mm/vmalloc.c50
6 files changed, 46 insertions, 68 deletions
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 0c1bb68ff4a8..2cfe8161b478 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -38,17 +38,9 @@
38#ifdef CONFIG_MMU 38#ifdef CONFIG_MMU
39void *module_alloc(unsigned long size) 39void *module_alloc(unsigned long size)
40{ 40{
41 struct vm_struct *area; 41 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
42 42 GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
43 size = PAGE_ALIGN(size); 43 __builtin_return_address(0));
44 if (!size)
45 return NULL;
46
47 area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END);
48 if (!area)
49 return NULL;
50
51 return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL_EXEC);
52} 44}
53#else /* CONFIG_MMU */ 45#else /* CONFIG_MMU */
54void *module_alloc(unsigned long size) 46void *module_alloc(unsigned long size)
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index 6f51dda87fce..d87a72e9fac7 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -46,17 +46,9 @@ static DEFINE_SPINLOCK(dbe_lock);
46void *module_alloc(unsigned long size) 46void *module_alloc(unsigned long size)
47{ 47{
48#ifdef MODULE_START 48#ifdef MODULE_START
49 struct vm_struct *area; 49 return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END,
50 50 GFP_KERNEL, PAGE_KERNEL, -1,
51 size = PAGE_ALIGN(size); 51 __builtin_return_address(0));
52 if (!size)
53 return NULL;
54
55 area = __get_vm_area(size, VM_ALLOC, MODULE_START, MODULE_END);
56 if (!area)
57 return NULL;
58
59 return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL);
60#else 52#else
61 if (size == 0) 53 if (size == 0)
62 return NULL; 54 return NULL;
diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c
index ee3c7dde8d9f..8d348c474a2f 100644
--- a/arch/sparc/kernel/module.c
+++ b/arch/sparc/kernel/module.c
@@ -23,17 +23,11 @@
23 23
24static void *module_map(unsigned long size) 24static void *module_map(unsigned long size)
25{ 25{
26 struct vm_struct *area; 26 if (PAGE_ALIGN(size) > MODULES_LEN)
27
28 size = PAGE_ALIGN(size);
29 if (!size || size > MODULES_LEN)
30 return NULL;
31
32 area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END);
33 if (!area)
34 return NULL; 27 return NULL;
35 28 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
36 return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL); 29 GFP_KERNEL, PAGE_KERNEL, -1,
30 __builtin_return_address(0));
37} 31}
38 32
39static char *dot2underscore(char *name) 33static char *dot2underscore(char *name)
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index 8f2956091735..ab23f1ad4bf1 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -37,20 +37,11 @@
37 37
38void *module_alloc(unsigned long size) 38void *module_alloc(unsigned long size)
39{ 39{
40 struct vm_struct *area; 40 if (PAGE_ALIGN(size) > MODULES_LEN)
41
42 if (!size)
43 return NULL;
44 size = PAGE_ALIGN(size);
45 if (size > MODULES_LEN)
46 return NULL; 41 return NULL;
47 42 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
48 area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END); 43 GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
49 if (!area) 44 -1, __builtin_return_address(0));
50 return NULL;
51
52 return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
53 PAGE_KERNEL_EXEC);
54} 45}
55 46
56/* Free memory returned from module_alloc */ 47/* Free memory returned from module_alloc */
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index c7348b8d0a81..4ed6fcd6b726 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -59,8 +59,9 @@ extern void *vmalloc_exec(unsigned long size);
59extern void *vmalloc_32(unsigned long size); 59extern void *vmalloc_32(unsigned long size);
60extern void *vmalloc_32_user(unsigned long size); 60extern void *vmalloc_32_user(unsigned long size);
61extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); 61extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
62extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, 62extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
63 pgprot_t prot); 63 unsigned long start, unsigned long end, gfp_t gfp_mask,
64 pgprot_t prot, int node, void *caller);
64extern void vfree(const void *addr); 65extern void vfree(const void *addr);
65 66
66extern void *vmap(struct page **pages, unsigned int count, 67extern void *vmap(struct page **pages, unsigned int count,
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index f67546636322..284346ee0e91 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1530,25 +1530,12 @@ fail:
1530 return NULL; 1530 return NULL;
1531} 1531}
1532 1532
1533void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
1534{
1535 void *addr = __vmalloc_area_node(area, gfp_mask, prot, -1,
1536 __builtin_return_address(0));
1537
1538 /*
1539 * A ref_count = 3 is needed because the vm_struct and vmap_area
1540 * structures allocated in the __get_vm_area_node() function contain
1541 * references to the virtual address of the vmalloc'ed block.
1542 */
1543 kmemleak_alloc(addr, area->size - PAGE_SIZE, 3, gfp_mask);
1544
1545 return addr;
1546}
1547
1548/** 1533/**
1549 * __vmalloc_node - allocate virtually contiguous memory 1534 * __vmalloc_node_range - allocate virtually contiguous memory
1550 * @size: allocation size 1535 * @size: allocation size
1551 * @align: desired alignment 1536 * @align: desired alignment
1537 * @start: vm area range start
1538 * @end: vm area range end
1552 * @gfp_mask: flags for the page level allocator 1539 * @gfp_mask: flags for the page level allocator
1553 * @prot: protection mask for the allocated pages 1540 * @prot: protection mask for the allocated pages
1554 * @node: node to use for allocation or -1 1541 * @node: node to use for allocation or -1
@@ -1558,9 +1545,9 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
1558 * allocator with @gfp_mask flags. Map them into contiguous 1545 * allocator with @gfp_mask flags. Map them into contiguous
1559 * kernel virtual space, using a pagetable protection of @prot. 1546 * kernel virtual space, using a pagetable protection of @prot.
1560 */ 1547 */
1561static void *__vmalloc_node(unsigned long size, unsigned long align, 1548void *__vmalloc_node_range(unsigned long size, unsigned long align,
1562 gfp_t gfp_mask, pgprot_t prot, 1549 unsigned long start, unsigned long end, gfp_t gfp_mask,
1563 int node, void *caller) 1550 pgprot_t prot, int node, void *caller)
1564{ 1551{
1565 struct vm_struct *area; 1552 struct vm_struct *area;
1566 void *addr; 1553 void *addr;
@@ -1570,8 +1557,8 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
1570 if (!size || (size >> PAGE_SHIFT) > totalram_pages) 1557 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
1571 return NULL; 1558 return NULL;
1572 1559
1573 area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START, 1560 area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,
1574 VMALLOC_END, node, gfp_mask, caller); 1561 gfp_mask, caller);
1575 1562
1576 if (!area) 1563 if (!area)
1577 return NULL; 1564 return NULL;
@@ -1588,6 +1575,27 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
1588 return addr; 1575 return addr;
1589} 1576}
1590 1577
1578/**
1579 * __vmalloc_node - allocate virtually contiguous memory
1580 * @size: allocation size
1581 * @align: desired alignment
1582 * @gfp_mask: flags for the page level allocator
1583 * @prot: protection mask for the allocated pages
1584 * @node: node to use for allocation or -1
1585 * @caller: caller's return address
1586 *
1587 * Allocate enough pages to cover @size from the page level
1588 * allocator with @gfp_mask flags. Map them into contiguous
1589 * kernel virtual space, using a pagetable protection of @prot.
1590 */
1591static void *__vmalloc_node(unsigned long size, unsigned long align,
1592 gfp_t gfp_mask, pgprot_t prot,
1593 int node, void *caller)
1594{
1595 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
1596 gfp_mask, prot, node, caller);
1597}
1598
1591void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 1599void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
1592{ 1600{
1593 return __vmalloc_node(size, 1, gfp_mask, prot, -1, 1601 return __vmalloc_node(size, 1, gfp_mask, prot, -1,