aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/numa.c
diff options
context:
space:
mode:
authorDave Hansen <dave@linux.vnet.ibm.com>2008-12-09 03:21:36 -0500
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-01-08 00:25:09 -0500
commit893473df78b4407c9ab75cb55479409795953b01 (patch)
treef58da72162345985841badce4ede52cc98f69e02 /arch/powerpc/mm/numa.c
parent0be210fd664b07531cb238bafb453a2a54c2a7a8 (diff)
powerpc/mm: Cleanup careful_allocation(): consolidate memset()
Both users of careful_allocation() immediately memset() the result. So, just do it in one place. Also give careful_allocation() a 'z' prefix to bring it in line with kzmalloc() and friends. Signed-off-by: Dave Hansen <dave@linux.vnet.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm/numa.c')
-rw-r--r--arch/powerpc/mm/numa.c11
1 files changed, 5 insertions, 6 deletions
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 9ec9939f9fb0..7393bd76d698 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -824,7 +824,7 @@ static void __init dump_numa_memory_topology(void)
824 * 824 *
825 * Returns the virtual address of the memory. 825 * Returns the virtual address of the memory.
826 */ 826 */
827static void __init *careful_allocation(int nid, unsigned long size, 827static void __init *careful_zallocation(int nid, unsigned long size,
828 unsigned long align, 828 unsigned long align,
829 unsigned long end_pfn) 829 unsigned long end_pfn)
830{ 830{
@@ -864,6 +864,7 @@ static void __init *careful_allocation(int nid, unsigned long size,
864 dbg("alloc_bootmem %p %lx\n", ret, size); 864 dbg("alloc_bootmem %p %lx\n", ret, size);
865 } 865 }
866 866
867 memset(ret, 0, size);
867 return ret; 868 return ret;
868} 869}
869 870
@@ -971,10 +972,9 @@ void __init do_init_bootmem(void)
971 * previous nodes' bootmem to be initialized and have 972 * previous nodes' bootmem to be initialized and have
972 * all reserved areas marked. 973 * all reserved areas marked.
973 */ 974 */
974 NODE_DATA(nid) = careful_allocation(nid, 975 NODE_DATA(nid) = careful_zallocation(nid,
975 sizeof(struct pglist_data), 976 sizeof(struct pglist_data),
976 SMP_CACHE_BYTES, end_pfn); 977 SMP_CACHE_BYTES, end_pfn);
977 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
978 978
979 dbg("node %d\n", nid); 979 dbg("node %d\n", nid);
980 dbg("NODE_DATA() = %p\n", NODE_DATA(nid)); 980 dbg("NODE_DATA() = %p\n", NODE_DATA(nid));
@@ -990,10 +990,9 @@ void __init do_init_bootmem(void)
990 dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT); 990 dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT);
991 991
992 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); 992 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
993 bootmem_vaddr = careful_allocation(nid, 993 bootmem_vaddr = careful_zallocation(nid,
994 bootmap_pages << PAGE_SHIFT, 994 bootmap_pages << PAGE_SHIFT,
995 PAGE_SIZE, end_pfn); 995 PAGE_SIZE, end_pfn);
996 memset(bootmem_vaddr, 0, bootmap_pages << PAGE_SHIFT);
997 996
998 dbg("bootmap_vaddr = %p\n", bootmem_vaddr); 997 dbg("bootmap_vaddr = %p\n", bootmem_vaddr);
999 998
@@ -1004,7 +1003,7 @@ void __init do_init_bootmem(void)
1004 free_bootmem_with_active_regions(nid, end_pfn); 1003 free_bootmem_with_active_regions(nid, end_pfn);
1005 /* 1004 /*
1006 * Be very careful about moving this around. Future 1005 * Be very careful about moving this around. Future
1007 * calls to careful_allocation() depend on this getting 1006 * calls to careful_zallocation() depend on this getting
1008 * done correctly. 1007 * done correctly.
1009 */ 1008 */
1010 mark_reserved_regions_for_nid(nid); 1009 mark_reserved_regions_for_nid(nid);