aboutsummaryrefslogtreecommitdiffstats
path: root/mm/sparse.c
diff options
context:
space:
mode:
authorWanpeng Li <liwanp@linux.vnet.ibm.com>2013-09-11 17:22:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-11 18:58:01 -0400
commit187320932dcece9c4b93f38f56d1f888bd5c325f (patch)
treec4a6217d56fff3fffa58941a1a151d259d8e4d97 /mm/sparse.c
parent6e543d5780e36ff5ee56c44d7e2e30db3457a7ed (diff)
mm/sparse: introduce alloc_usemap_and_memmap
After commit 9bdac9142407 ("sparsemem: Put mem map for one node together."), vmemmap for one node will be allocated together, its logic is similar as memory allocation for pageblock flags. This patch introduces alloc_usemap_and_memmap to extract the same logic of memory alloction for pageblock flags and vmemmap. Signed-off-by: Wanpeng Li <liwanp@linux.vnet.ibm.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Rik van Riel <riel@redhat.com> Cc: Fengguang Wu <fengguang.wu@intel.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Tejun Heo <tj@kernel.org> Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com> Cc: David Rientjes <rientjes@google.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Yinghai Lu <yinghai@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/sparse.c')
-rw-r--r--mm/sparse.c133
1 files changed, 57 insertions, 76 deletions
diff --git a/mm/sparse.c b/mm/sparse.c
index 308d50331bc3..4ac1d7ef548f 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -339,13 +339,14 @@ static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
339} 339}
340#endif /* CONFIG_MEMORY_HOTREMOVE */ 340#endif /* CONFIG_MEMORY_HOTREMOVE */
341 341
342static void __init sparse_early_usemaps_alloc_node(unsigned long**usemap_map, 342static void __init sparse_early_usemaps_alloc_node(void *data,
343 unsigned long pnum_begin, 343 unsigned long pnum_begin,
344 unsigned long pnum_end, 344 unsigned long pnum_end,
345 unsigned long usemap_count, int nodeid) 345 unsigned long usemap_count, int nodeid)
346{ 346{
347 void *usemap; 347 void *usemap;
348 unsigned long pnum; 348 unsigned long pnum;
349 unsigned long **usemap_map = (unsigned long **)data;
349 int size = usemap_size(); 350 int size = usemap_size();
350 351
351 usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid), 352 usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
@@ -430,11 +431,12 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
430#endif /* !CONFIG_SPARSEMEM_VMEMMAP */ 431#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
431 432
432#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER 433#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
433static void __init sparse_early_mem_maps_alloc_node(struct page **map_map, 434static void __init sparse_early_mem_maps_alloc_node(void *data,
434 unsigned long pnum_begin, 435 unsigned long pnum_begin,
435 unsigned long pnum_end, 436 unsigned long pnum_end,
436 unsigned long map_count, int nodeid) 437 unsigned long map_count, int nodeid)
437{ 438{
439 struct page **map_map = (struct page **)data;
438 sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end, 440 sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end,
439 map_count, nodeid); 441 map_count, nodeid);
440} 442}
@@ -460,6 +462,55 @@ void __attribute__((weak)) __meminit vmemmap_populate_print_last(void)
460{ 462{
461} 463}
462 464
465/**
466 * alloc_usemap_and_memmap - memory alloction for pageblock flags and vmemmap
467 * @map: usemap_map for pageblock flags or mmap_map for vmemmap
468 */
469static void __init alloc_usemap_and_memmap(void (*alloc_func)
470 (void *, unsigned long, unsigned long,
471 unsigned long, int), void *data)
472{
473 unsigned long pnum;
474 unsigned long map_count;
475 int nodeid_begin = 0;
476 unsigned long pnum_begin = 0;
477
478 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
479 struct mem_section *ms;
480
481 if (!present_section_nr(pnum))
482 continue;
483 ms = __nr_to_section(pnum);
484 nodeid_begin = sparse_early_nid(ms);
485 pnum_begin = pnum;
486 break;
487 }
488 map_count = 1;
489 for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
490 struct mem_section *ms;
491 int nodeid;
492
493 if (!present_section_nr(pnum))
494 continue;
495 ms = __nr_to_section(pnum);
496 nodeid = sparse_early_nid(ms);
497 if (nodeid == nodeid_begin) {
498 map_count++;
499 continue;
500 }
501 /* ok, we need to take cake of from pnum_begin to pnum - 1*/
502 alloc_func(data, pnum_begin, pnum,
503 map_count, nodeid_begin);
504 /* new start, update count etc*/
505 nodeid_begin = nodeid;
506 pnum_begin = pnum;
507 map_count = 1;
508 }
509 /* ok, last chunk */
510 alloc_func(data, pnum_begin, NR_MEM_SECTIONS,
511 map_count, nodeid_begin);
512}
513
463/* 514/*
464 * Allocate the accumulated non-linear sections, allocate a mem_map 515 * Allocate the accumulated non-linear sections, allocate a mem_map
465 * for each and record the physical to section mapping. 516 * for each and record the physical to section mapping.
@@ -471,11 +522,7 @@ void __init sparse_init(void)
471 unsigned long *usemap; 522 unsigned long *usemap;
472 unsigned long **usemap_map; 523 unsigned long **usemap_map;
473 int size; 524 int size;
474 int nodeid_begin = 0;
475 unsigned long pnum_begin = 0;
476 unsigned long usemap_count;
477#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER 525#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
478 unsigned long map_count;
479 int size2; 526 int size2;
480 struct page **map_map; 527 struct page **map_map;
481#endif 528#endif
@@ -501,82 +548,16 @@ void __init sparse_init(void)
501 usemap_map = alloc_bootmem(size); 548 usemap_map = alloc_bootmem(size);
502 if (!usemap_map) 549 if (!usemap_map)
503 panic("can not allocate usemap_map\n"); 550 panic("can not allocate usemap_map\n");
504 551 alloc_usemap_and_memmap(sparse_early_usemaps_alloc_node,
505 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { 552 (void *)usemap_map);
506 struct mem_section *ms;
507
508 if (!present_section_nr(pnum))
509 continue;
510 ms = __nr_to_section(pnum);
511 nodeid_begin = sparse_early_nid(ms);
512 pnum_begin = pnum;
513 break;
514 }
515 usemap_count = 1;
516 for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
517 struct mem_section *ms;
518 int nodeid;
519
520 if (!present_section_nr(pnum))
521 continue;
522 ms = __nr_to_section(pnum);
523 nodeid = sparse_early_nid(ms);
524 if (nodeid == nodeid_begin) {
525 usemap_count++;
526 continue;
527 }
528 /* ok, we need to take cake of from pnum_begin to pnum - 1*/
529 sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, pnum,
530 usemap_count, nodeid_begin);
531 /* new start, update count etc*/
532 nodeid_begin = nodeid;
533 pnum_begin = pnum;
534 usemap_count = 1;
535 }
536 /* ok, last chunk */
537 sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, NR_MEM_SECTIONS,
538 usemap_count, nodeid_begin);
539 553
540#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER 554#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
541 size2 = sizeof(struct page *) * NR_MEM_SECTIONS; 555 size2 = sizeof(struct page *) * NR_MEM_SECTIONS;
542 map_map = alloc_bootmem(size2); 556 map_map = alloc_bootmem(size2);
543 if (!map_map) 557 if (!map_map)
544 panic("can not allocate map_map\n"); 558 panic("can not allocate map_map\n");
545 559 alloc_usemap_and_memmap(sparse_early_mem_maps_alloc_node,
546 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { 560 (void *)map_map);
547 struct mem_section *ms;
548
549 if (!present_section_nr(pnum))
550 continue;
551 ms = __nr_to_section(pnum);
552 nodeid_begin = sparse_early_nid(ms);
553 pnum_begin = pnum;
554 break;
555 }
556 map_count = 1;
557 for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
558 struct mem_section *ms;
559 int nodeid;
560
561 if (!present_section_nr(pnum))
562 continue;
563 ms = __nr_to_section(pnum);
564 nodeid = sparse_early_nid(ms);
565 if (nodeid == nodeid_begin) {
566 map_count++;
567 continue;
568 }
569 /* ok, we need to take cake of from pnum_begin to pnum - 1*/
570 sparse_early_mem_maps_alloc_node(map_map, pnum_begin, pnum,
571 map_count, nodeid_begin);
572 /* new start, update count etc*/
573 nodeid_begin = nodeid;
574 pnum_begin = pnum;
575 map_count = 1;
576 }
577 /* ok, last chunk */
578 sparse_early_mem_maps_alloc_node(map_map, pnum_begin, NR_MEM_SECTIONS,
579 map_count, nodeid_begin);
580#endif 561#endif
581 562
582 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { 563 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {