diff options
author | David Rientjes <rientjes@google.com> | 2011-01-13 18:46:02 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-13 20:32:34 -0500 |
commit | d0a21265dfb5fa8ae54e90d0fb6d1c215b10a28a (patch) | |
tree | a3bf2c96ad8e180f32a52e208667a40bb972275b /mm/vmalloc.c | |
parent | ec3f64fc9c196a304c4b7db3e1ff56d640628509 (diff) |
mm: unify module_alloc code for vmalloc
Four architectures (arm, mips, sparc, x86) use __vmalloc_area() for
module_init(). Much of the code is duplicated and can be generalized in a
globally accessible function, __vmalloc_node_range().
__vmalloc_node() now calls into __vmalloc_node_range() with a range of
[VMALLOC_START, VMALLOC_END) for functionally equivalent behavior.
Each architecture may then use __vmalloc_node_range() directly to remove
the duplication of code.
Signed-off-by: David Rientjes <rientjes@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r-- | mm/vmalloc.c | 50 |
1 files changed, 29 insertions, 21 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index f67546636322..284346ee0e91 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -1530,25 +1530,12 @@ fail: | |||
1530 | return NULL; | 1530 | return NULL; |
1531 | } | 1531 | } |
1532 | 1532 | ||
1533 | void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) | ||
1534 | { | ||
1535 | void *addr = __vmalloc_area_node(area, gfp_mask, prot, -1, | ||
1536 | __builtin_return_address(0)); | ||
1537 | |||
1538 | /* | ||
1539 | * A ref_count = 3 is needed because the vm_struct and vmap_area | ||
1540 | * structures allocated in the __get_vm_area_node() function contain | ||
1541 | * references to the virtual address of the vmalloc'ed block. | ||
1542 | */ | ||
1543 | kmemleak_alloc(addr, area->size - PAGE_SIZE, 3, gfp_mask); | ||
1544 | |||
1545 | return addr; | ||
1546 | } | ||
1547 | |||
1548 | /** | 1533 | /** |
1549 | * __vmalloc_node - allocate virtually contiguous memory | 1534 | * __vmalloc_node_range - allocate virtually contiguous memory |
1550 | * @size: allocation size | 1535 | * @size: allocation size |
1551 | * @align: desired alignment | 1536 | * @align: desired alignment |
1537 | * @start: vm area range start | ||
1538 | * @end: vm area range end | ||
1552 | * @gfp_mask: flags for the page level allocator | 1539 | * @gfp_mask: flags for the page level allocator |
1553 | * @prot: protection mask for the allocated pages | 1540 | * @prot: protection mask for the allocated pages |
1554 | * @node: node to use for allocation or -1 | 1541 | * @node: node to use for allocation or -1 |
@@ -1558,9 +1545,9 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) | |||
1558 | * allocator with @gfp_mask flags. Map them into contiguous | 1545 | * allocator with @gfp_mask flags. Map them into contiguous |
1559 | * kernel virtual space, using a pagetable protection of @prot. | 1546 | * kernel virtual space, using a pagetable protection of @prot. |
1560 | */ | 1547 | */ |
1561 | static void *__vmalloc_node(unsigned long size, unsigned long align, | 1548 | void *__vmalloc_node_range(unsigned long size, unsigned long align, |
1562 | gfp_t gfp_mask, pgprot_t prot, | 1549 | unsigned long start, unsigned long end, gfp_t gfp_mask, |
1563 | int node, void *caller) | 1550 | pgprot_t prot, int node, void *caller) |
1564 | { | 1551 | { |
1565 | struct vm_struct *area; | 1552 | struct vm_struct *area; |
1566 | void *addr; | 1553 | void *addr; |
@@ -1570,8 +1557,8 @@ static void *__vmalloc_node(unsigned long size, unsigned long align, | |||
1570 | if (!size || (size >> PAGE_SHIFT) > totalram_pages) | 1557 | if (!size || (size >> PAGE_SHIFT) > totalram_pages) |
1571 | return NULL; | 1558 | return NULL; |
1572 | 1559 | ||
1573 | area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START, | 1560 | area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node, |
1574 | VMALLOC_END, node, gfp_mask, caller); | 1561 | gfp_mask, caller); |
1575 | 1562 | ||
1576 | if (!area) | 1563 | if (!area) |
1577 | return NULL; | 1564 | return NULL; |
@@ -1588,6 +1575,27 @@ static void *__vmalloc_node(unsigned long size, unsigned long align, | |||
1588 | return addr; | 1575 | return addr; |
1589 | } | 1576 | } |
1590 | 1577 | ||
1578 | /** | ||
1579 | * __vmalloc_node - allocate virtually contiguous memory | ||
1580 | * @size: allocation size | ||
1581 | * @align: desired alignment | ||
1582 | * @gfp_mask: flags for the page level allocator | ||
1583 | * @prot: protection mask for the allocated pages | ||
1584 | * @node: node to use for allocation or -1 | ||
1585 | * @caller: caller's return address | ||
1586 | * | ||
1587 | * Allocate enough pages to cover @size from the page level | ||
1588 | * allocator with @gfp_mask flags. Map them into contiguous | ||
1589 | * kernel virtual space, using a pagetable protection of @prot. | ||
1590 | */ | ||
1591 | static void *__vmalloc_node(unsigned long size, unsigned long align, | ||
1592 | gfp_t gfp_mask, pgprot_t prot, | ||
1593 | int node, void *caller) | ||
1594 | { | ||
1595 | return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, | ||
1596 | gfp_mask, prot, node, caller); | ||
1597 | } | ||
1598 | |||
1591 | void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) | 1599 | void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) |
1592 | { | 1600 | { |
1593 | return __vmalloc_node(size, 1, gfp_mask, prot, -1, | 1601 | return __vmalloc_node(size, 1, gfp_mask, prot, -1, |