diff options
author | Pavel Tatashin <pasha.tatashin@oracle.com> | 2018-08-17 18:49:37 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-17 19:20:32 -0400 |
commit | 2a3cb8baef71e4dad4a6ec17f5f0db9e05f46a01 (patch) | |
tree | 37b814b70e116b69e9385568e98f44a1125a4e0d | |
parent | 85c77f79139062901727cc3bd87a65212c8c0a32 (diff) |
mm/sparse: delete old sparse_init and enable new one
Rename new_sparse_init() to sparse_init() which enables it. Delete old
sparse_init() and all the code that became obsolete with.
[pasha.tatashin@oracle.com: remove unused sparse_mem_maps_populate_node()]
Link: http://lkml.kernel.org/r/20180716174447.14529-6-pasha.tatashin@oracle.com
Link: http://lkml.kernel.org/r/20180712203730.8703-6-pasha.tatashin@oracle.com
Signed-off-by: Pavel Tatashin <pasha.tatashin@oracle.com>
Tested-by: Michael Ellerman <mpe@ellerman.id.au> [powerpc]
Tested-by: Oscar Salvador <osalvador@suse.de>
Reviewed-by: Oscar Salvador <osalvador@suse.de>
Cc: Pasha Tatashin <Pavel.Tatashin@microsoft.com>
Cc: Abdul Haleem <abdhalee@linux.vnet.ibm.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Daniel Jordan <daniel.m.jordan@oracle.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jan Kara <jack@suse.cz>
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Souptick Joarder <jrdr.linux@gmail.com>
Cc: Steven Sistare <steven.sistare@oracle.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Wei Yang <richard.weiyang@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/mm.h | 6 | ||||
-rw-r--r-- | mm/Kconfig | 4 | ||||
-rw-r--r-- | mm/sparse-vmemmap.c | 21 | ||||
-rw-r--r-- | mm/sparse.c | 237 |
4 files changed, 1 insertions, 267 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 48040510df05..a3cae495f9ce 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -2665,12 +2665,6 @@ extern int randomize_va_space; | |||
2665 | const char * arch_vma_name(struct vm_area_struct *vma); | 2665 | const char * arch_vma_name(struct vm_area_struct *vma); |
2666 | void print_vma_addr(char *prefix, unsigned long rip); | 2666 | void print_vma_addr(char *prefix, unsigned long rip); |
2667 | 2667 | ||
2668 | void sparse_mem_maps_populate_node(struct page **map_map, | ||
2669 | unsigned long pnum_begin, | ||
2670 | unsigned long pnum_end, | ||
2671 | unsigned long map_count, | ||
2672 | int nodeid); | ||
2673 | |||
2674 | void *sparse_buffer_alloc(unsigned long size); | 2668 | void *sparse_buffer_alloc(unsigned long size); |
2675 | struct page *sparse_mem_map_populate(unsigned long pnum, int nid, | 2669 | struct page *sparse_mem_map_populate(unsigned long pnum, int nid, |
2676 | struct vmem_altmap *altmap); | 2670 | struct vmem_altmap *altmap); |
diff --git a/mm/Kconfig b/mm/Kconfig index 08d8399bb93b..adfeae4decb4 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
@@ -118,10 +118,6 @@ config SPARSEMEM_EXTREME | |||
118 | config SPARSEMEM_VMEMMAP_ENABLE | 118 | config SPARSEMEM_VMEMMAP_ENABLE |
119 | bool | 119 | bool |
120 | 120 | ||
121 | config SPARSEMEM_ALLOC_MEM_MAP_TOGETHER | ||
122 | def_bool y | ||
123 | depends on SPARSEMEM && X86_64 | ||
124 | |||
125 | config SPARSEMEM_VMEMMAP | 121 | config SPARSEMEM_VMEMMAP |
126 | bool "Sparse Memory virtual memmap" | 122 | bool "Sparse Memory virtual memmap" |
127 | depends on SPARSEMEM && SPARSEMEM_VMEMMAP_ENABLE | 123 | depends on SPARSEMEM && SPARSEMEM_VMEMMAP_ENABLE |
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index cd15f3d252c3..8301293331a2 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c | |||
@@ -261,24 +261,3 @@ struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid, | |||
261 | 261 | ||
262 | return map; | 262 | return map; |
263 | } | 263 | } |
264 | |||
265 | void __init sparse_mem_maps_populate_node(struct page **map_map, | ||
266 | unsigned long pnum_begin, | ||
267 | unsigned long pnum_end, | ||
268 | unsigned long map_count, int nodeid) | ||
269 | { | ||
270 | unsigned long pnum; | ||
271 | int nr_consumed_maps = 0; | ||
272 | |||
273 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { | ||
274 | if (!present_section_nr(pnum)) | ||
275 | continue; | ||
276 | |||
277 | map_map[nr_consumed_maps] = | ||
278 | sparse_mem_map_populate(pnum, nodeid, NULL); | ||
279 | if (map_map[nr_consumed_maps++]) | ||
280 | continue; | ||
281 | pr_err("%s: sparsemem memory map backing failed some memory will not be available\n", | ||
282 | __func__); | ||
283 | } | ||
284 | } | ||
diff --git a/mm/sparse.c b/mm/sparse.c index 248d5d7bbf55..10b07eea9a6e 100644 --- a/mm/sparse.c +++ b/mm/sparse.c | |||
@@ -205,12 +205,6 @@ static inline unsigned long first_present_section_nr(void) | |||
205 | return next_present_section_nr(-1); | 205 | return next_present_section_nr(-1); |
206 | } | 206 | } |
207 | 207 | ||
208 | /* | ||
209 | * Record how many memory sections are marked as present | ||
210 | * during system bootup. | ||
211 | */ | ||
212 | static int __initdata nr_present_sections; | ||
213 | |||
214 | /* Record a memory area against a node. */ | 208 | /* Record a memory area against a node. */ |
215 | void __init memory_present(int nid, unsigned long start, unsigned long end) | 209 | void __init memory_present(int nid, unsigned long start, unsigned long end) |
216 | { | 210 | { |
@@ -240,7 +234,6 @@ void __init memory_present(int nid, unsigned long start, unsigned long end) | |||
240 | ms->section_mem_map = sparse_encode_early_nid(nid) | | 234 | ms->section_mem_map = sparse_encode_early_nid(nid) | |
241 | SECTION_IS_ONLINE; | 235 | SECTION_IS_ONLINE; |
242 | section_mark_present(ms); | 236 | section_mark_present(ms); |
243 | nr_present_sections++; | ||
244 | } | 237 | } |
245 | } | 238 | } |
246 | } | 239 | } |
@@ -377,37 +370,8 @@ static void __init check_usemap_section_nr(int nid, unsigned long *usemap) | |||
377 | } | 370 | } |
378 | #endif /* CONFIG_MEMORY_HOTREMOVE */ | 371 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
379 | 372 | ||
380 | static void __init sparse_early_usemaps_alloc_node(void *data, | ||
381 | unsigned long pnum_begin, | ||
382 | unsigned long pnum_end, | ||
383 | unsigned long usemap_count, int nodeid) | ||
384 | { | ||
385 | void *usemap; | ||
386 | unsigned long pnum; | ||
387 | unsigned long **usemap_map = (unsigned long **)data; | ||
388 | int size = usemap_size(); | ||
389 | int nr_consumed_maps = 0; | ||
390 | |||
391 | usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid), | ||
392 | size * usemap_count); | ||
393 | if (!usemap) { | ||
394 | pr_warn("%s: allocation failed\n", __func__); | ||
395 | return; | ||
396 | } | ||
397 | |||
398 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { | ||
399 | if (!present_section_nr(pnum)) | ||
400 | continue; | ||
401 | usemap_map[nr_consumed_maps] = usemap; | ||
402 | usemap += size; | ||
403 | check_usemap_section_nr(nodeid, usemap_map[nr_consumed_maps]); | ||
404 | nr_consumed_maps++; | ||
405 | } | ||
406 | } | ||
407 | |||
408 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | 373 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
409 | static unsigned long __init section_map_size(void) | 374 | static unsigned long __init section_map_size(void) |
410 | |||
411 | { | 375 | { |
412 | return ALIGN(sizeof(struct page) * PAGES_PER_SECTION, PMD_SIZE); | 376 | return ALIGN(sizeof(struct page) * PAGES_PER_SECTION, PMD_SIZE); |
413 | } | 377 | } |
@@ -432,25 +396,6 @@ struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid, | |||
432 | BOOTMEM_ALLOC_ACCESSIBLE, nid); | 396 | BOOTMEM_ALLOC_ACCESSIBLE, nid); |
433 | return map; | 397 | return map; |
434 | } | 398 | } |
435 | void __init sparse_mem_maps_populate_node(struct page **map_map, | ||
436 | unsigned long pnum_begin, | ||
437 | unsigned long pnum_end, | ||
438 | unsigned long map_count, int nodeid) | ||
439 | { | ||
440 | unsigned long pnum; | ||
441 | int nr_consumed_maps = 0; | ||
442 | |||
443 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { | ||
444 | if (!present_section_nr(pnum)) | ||
445 | continue; | ||
446 | map_map[nr_consumed_maps] = | ||
447 | sparse_mem_map_populate(pnum, nodeid, NULL); | ||
448 | if (map_map[nr_consumed_maps++]) | ||
449 | continue; | ||
450 | pr_err("%s: sparsemem memory map backing failed some memory will not be available\n", | ||
451 | __func__); | ||
452 | } | ||
453 | } | ||
454 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ | 399 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ |
455 | 400 | ||
456 | static void *sparsemap_buf __meminitdata; | 401 | static void *sparsemap_buf __meminitdata; |
@@ -489,190 +434,10 @@ void * __meminit sparse_buffer_alloc(unsigned long size) | |||
489 | return ptr; | 434 | return ptr; |
490 | } | 435 | } |
491 | 436 | ||
492 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER | ||
493 | static void __init sparse_early_mem_maps_alloc_node(void *data, | ||
494 | unsigned long pnum_begin, | ||
495 | unsigned long pnum_end, | ||
496 | unsigned long map_count, int nodeid) | ||
497 | { | ||
498 | struct page **map_map = (struct page **)data; | ||
499 | |||
500 | sparse_buffer_init(section_map_size() * map_count, nodeid); | ||
501 | sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end, | ||
502 | map_count, nodeid); | ||
503 | sparse_buffer_fini(); | ||
504 | } | ||
505 | #else | ||
506 | static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) | ||
507 | { | ||
508 | struct page *map; | ||
509 | struct mem_section *ms = __nr_to_section(pnum); | ||
510 | int nid = sparse_early_nid(ms); | ||
511 | |||
512 | map = sparse_mem_map_populate(pnum, nid, NULL); | ||
513 | if (map) | ||
514 | return map; | ||
515 | |||
516 | pr_err("%s: sparsemem memory map backing failed some memory will not be available\n", | ||
517 | __func__); | ||
518 | return NULL; | ||
519 | } | ||
520 | #endif | ||
521 | |||
522 | void __weak __meminit vmemmap_populate_print_last(void) | 437 | void __weak __meminit vmemmap_populate_print_last(void) |
523 | { | 438 | { |
524 | } | 439 | } |
525 | 440 | ||
526 | /** | ||
527 | * alloc_usemap_and_memmap - memory alloction for pageblock flags and vmemmap | ||
528 | * @map: usemap_map for pageblock flags or mmap_map for vmemmap | ||
529 | * @unit_size: size of map unit | ||
530 | */ | ||
531 | static void __init alloc_usemap_and_memmap(void (*alloc_func) | ||
532 | (void *, unsigned long, unsigned long, | ||
533 | unsigned long, int), void *data, | ||
534 | int data_unit_size) | ||
535 | { | ||
536 | unsigned long pnum; | ||
537 | unsigned long map_count; | ||
538 | int nodeid_begin = 0; | ||
539 | unsigned long pnum_begin = 0; | ||
540 | |||
541 | for_each_present_section_nr(0, pnum) { | ||
542 | struct mem_section *ms; | ||
543 | |||
544 | ms = __nr_to_section(pnum); | ||
545 | nodeid_begin = sparse_early_nid(ms); | ||
546 | pnum_begin = pnum; | ||
547 | break; | ||
548 | } | ||
549 | map_count = 1; | ||
550 | for_each_present_section_nr(pnum_begin + 1, pnum) { | ||
551 | struct mem_section *ms; | ||
552 | int nodeid; | ||
553 | |||
554 | ms = __nr_to_section(pnum); | ||
555 | nodeid = sparse_early_nid(ms); | ||
556 | if (nodeid == nodeid_begin) { | ||
557 | map_count++; | ||
558 | continue; | ||
559 | } | ||
560 | /* ok, we need to take cake of from pnum_begin to pnum - 1*/ | ||
561 | alloc_func(data, pnum_begin, pnum, | ||
562 | map_count, nodeid_begin); | ||
563 | /* new start, update count etc*/ | ||
564 | nodeid_begin = nodeid; | ||
565 | pnum_begin = pnum; | ||
566 | data += map_count * data_unit_size; | ||
567 | map_count = 1; | ||
568 | } | ||
569 | /* ok, last chunk */ | ||
570 | alloc_func(data, pnum_begin, __highest_present_section_nr+1, | ||
571 | map_count, nodeid_begin); | ||
572 | } | ||
573 | |||
574 | /* | ||
575 | * Allocate the accumulated non-linear sections, allocate a mem_map | ||
576 | * for each and record the physical to section mapping. | ||
577 | */ | ||
578 | void __init sparse_init(void) | ||
579 | { | ||
580 | unsigned long pnum; | ||
581 | struct page *map; | ||
582 | unsigned long *usemap; | ||
583 | unsigned long **usemap_map; | ||
584 | int size; | ||
585 | int nr_consumed_maps = 0; | ||
586 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER | ||
587 | int size2; | ||
588 | struct page **map_map; | ||
589 | #endif | ||
590 | |||
591 | /* see include/linux/mmzone.h 'struct mem_section' definition */ | ||
592 | BUILD_BUG_ON(!is_power_of_2(sizeof(struct mem_section))); | ||
593 | |||
594 | /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */ | ||
595 | set_pageblock_order(); | ||
596 | |||
597 | /* | ||
598 | * map is using big page (aka 2M in x86 64 bit) | ||
599 | * usemap is less one page (aka 24 bytes) | ||
600 | * so alloc 2M (with 2M align) and 24 bytes in turn will | ||
601 | * make next 2M slip to one more 2M later. | ||
602 | * then in big system, the memory will have a lot of holes... | ||
603 | * here try to allocate 2M pages continuously. | ||
604 | * | ||
605 | * powerpc need to call sparse_init_one_section right after each | ||
606 | * sparse_early_mem_map_alloc, so allocate usemap_map at first. | ||
607 | */ | ||
608 | size = sizeof(unsigned long *) * nr_present_sections; | ||
609 | usemap_map = memblock_virt_alloc(size, 0); | ||
610 | if (!usemap_map) | ||
611 | panic("can not allocate usemap_map\n"); | ||
612 | alloc_usemap_and_memmap(sparse_early_usemaps_alloc_node, | ||
613 | (void *)usemap_map, | ||
614 | sizeof(usemap_map[0])); | ||
615 | |||
616 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER | ||
617 | size2 = sizeof(struct page *) * nr_present_sections; | ||
618 | map_map = memblock_virt_alloc(size2, 0); | ||
619 | if (!map_map) | ||
620 | panic("can not allocate map_map\n"); | ||
621 | alloc_usemap_and_memmap(sparse_early_mem_maps_alloc_node, | ||
622 | (void *)map_map, | ||
623 | sizeof(map_map[0])); | ||
624 | #endif | ||
625 | |||
626 | /* | ||
627 | * The number of present sections stored in nr_present_sections | ||
628 | * are kept the same since mem sections are marked as present in | ||
629 | * memory_present(). In this for loop, we need check which sections | ||
630 | * failed to allocate memmap or usemap, then clear its | ||
631 | * ->section_mem_map accordingly. During this process, we need | ||
632 | * increase 'nr_consumed_maps' whether its allocation of memmap | ||
633 | * or usemap failed or not, so that after we handle the i-th | ||
634 | * memory section, can get memmap and usemap of (i+1)-th section | ||
635 | * correctly. | ||
636 | */ | ||
637 | for_each_present_section_nr(0, pnum) { | ||
638 | struct mem_section *ms; | ||
639 | |||
640 | if (nr_consumed_maps >= nr_present_sections) { | ||
641 | pr_err("nr_consumed_maps goes beyond nr_present_sections\n"); | ||
642 | break; | ||
643 | } | ||
644 | ms = __nr_to_section(pnum); | ||
645 | usemap = usemap_map[nr_consumed_maps]; | ||
646 | if (!usemap) { | ||
647 | ms->section_mem_map = 0; | ||
648 | nr_consumed_maps++; | ||
649 | continue; | ||
650 | } | ||
651 | |||
652 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER | ||
653 | map = map_map[nr_consumed_maps]; | ||
654 | #else | ||
655 | map = sparse_early_mem_map_alloc(pnum); | ||
656 | #endif | ||
657 | if (!map) { | ||
658 | ms->section_mem_map = 0; | ||
659 | nr_consumed_maps++; | ||
660 | continue; | ||
661 | } | ||
662 | |||
663 | sparse_init_one_section(__nr_to_section(pnum), pnum, map, | ||
664 | usemap); | ||
665 | nr_consumed_maps++; | ||
666 | } | ||
667 | |||
668 | vmemmap_populate_print_last(); | ||
669 | |||
670 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER | ||
671 | memblock_free_early(__pa(map_map), size2); | ||
672 | #endif | ||
673 | memblock_free_early(__pa(usemap_map), size); | ||
674 | } | ||
675 | |||
676 | /* | 441 | /* |
677 | * Initialize sparse on a specific node. The node spans [pnum_begin, pnum_end) | 442 | * Initialize sparse on a specific node. The node spans [pnum_begin, pnum_end) |
678 | * And number of present sections in this node is map_count. | 443 | * And number of present sections in this node is map_count. |
@@ -726,7 +491,7 @@ failed: | |||
726 | * Allocate the accumulated non-linear sections, allocate a mem_map | 491 | * Allocate the accumulated non-linear sections, allocate a mem_map |
727 | * for each and record the physical to section mapping. | 492 | * for each and record the physical to section mapping. |
728 | */ | 493 | */ |
729 | void __init new_sparse_init(void) | 494 | void __init sparse_init(void) |
730 | { | 495 | { |
731 | unsigned long pnum_begin = first_present_section_nr(); | 496 | unsigned long pnum_begin = first_present_section_nr(); |
732 | int nid_begin = sparse_early_nid(__nr_to_section(pnum_begin)); | 497 | int nid_begin = sparse_early_nid(__nr_to_section(pnum_begin)); |