aboutsummaryrefslogtreecommitdiffstats
path: root/mm/sparse.c
diff options
context:
space:
mode:
authorStephen Rothwell <sfr@canb.auug.org.au>2007-06-08 16:46:51 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-06-08 20:23:33 -0400
commit193faea9280a809cc30e81d7e503e01b1d7b7042 (patch)
treed4b80d4795ac1e708b8a13d45d571383617b761c /mm/sparse.c
parent4249e08e92647b406422553bfb16276b2bf849aa (diff)
Move three functions that are only needed for CONFIG_MEMORY_HOTPLUG
into the appropriate #ifdef. Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au> Cc: Yasunori Goto <y-goto@jp.fujitsu.com> Cc: Andy Whitcroft <apw@shadowen.org> Cc: Badari Pulavarty <pbadari@us.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/sparse.c')
-rw-r--r--mm/sparse.c42
1 files changed, 21 insertions, 21 deletions
diff --git a/mm/sparse.c b/mm/sparse.c
index 545e4d3afcdf..e03b39f3540f 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -240,6 +240,27 @@ static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
240 return NULL; 240 return NULL;
241} 241}
242 242
243/*
244 * Allocate the accumulated non-linear sections, allocate a mem_map
245 * for each and record the physical to section mapping.
246 */
247void __init sparse_init(void)
248{
249 unsigned long pnum;
250 struct page *map;
251
252 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
253 if (!valid_section_nr(pnum))
254 continue;
255
256 map = sparse_early_mem_map_alloc(pnum);
257 if (!map)
258 continue;
259 sparse_init_one_section(__nr_to_section(pnum), pnum, map);
260 }
261}
262
263#ifdef CONFIG_MEMORY_HOTPLUG
243static struct page *__kmalloc_section_memmap(unsigned long nr_pages) 264static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
244{ 265{
245 struct page *page, *ret; 266 struct page *page, *ret;
@@ -280,27 +301,6 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
280} 301}
281 302
282/* 303/*
283 * Allocate the accumulated non-linear sections, allocate a mem_map
284 * for each and record the physical to section mapping.
285 */
286void __init sparse_init(void)
287{
288 unsigned long pnum;
289 struct page *map;
290
291 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
292 if (!valid_section_nr(pnum))
293 continue;
294
295 map = sparse_early_mem_map_alloc(pnum);
296 if (!map)
297 continue;
298 sparse_init_one_section(__nr_to_section(pnum), pnum, map);
299 }
300}
301
302#ifdef CONFIG_MEMORY_HOTPLUG
303/*
304 * returns the number of sections whose mem_maps were properly 304 * returns the number of sections whose mem_maps were properly
305 * set. If this is <=0, then that means that the passed-in 305 * set. If this is <=0, then that means that the passed-in
306 * map was not consumed and must be freed. 306 * map was not consumed and must be freed.