diff options
Diffstat (limited to 'arch/s390/kernel/setup.c')
-rw-r--r-- | arch/s390/kernel/setup.c | 451 |
1 files changed, 160 insertions, 291 deletions
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 88d1ca81e2dd..1f5536c2fd02 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -78,10 +78,9 @@ EXPORT_SYMBOL(console_irq); | |||
78 | unsigned long elf_hwcap = 0; | 78 | unsigned long elf_hwcap = 0; |
79 | char elf_platform[ELF_PLATFORM_SIZE]; | 79 | char elf_platform[ELF_PLATFORM_SIZE]; |
80 | 80 | ||
81 | struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS]; | ||
82 | |||
83 | int __initdata memory_end_set; | 81 | int __initdata memory_end_set; |
84 | unsigned long __initdata memory_end; | 82 | unsigned long __initdata memory_end; |
83 | unsigned long __initdata max_physmem_end; | ||
85 | 84 | ||
86 | unsigned long VMALLOC_START; | 85 | unsigned long VMALLOC_START; |
87 | EXPORT_SYMBOL(VMALLOC_START); | 86 | EXPORT_SYMBOL(VMALLOC_START); |
@@ -273,6 +272,7 @@ EXPORT_SYMBOL_GPL(pm_power_off); | |||
273 | static int __init early_parse_mem(char *p) | 272 | static int __init early_parse_mem(char *p) |
274 | { | 273 | { |
275 | memory_end = memparse(p, &p); | 274 | memory_end = memparse(p, &p); |
275 | memory_end &= PAGE_MASK; | ||
276 | memory_end_set = 1; | 276 | memory_end_set = 1; |
277 | return 0; | 277 | return 0; |
278 | } | 278 | } |
@@ -401,7 +401,8 @@ static struct resource __initdata *standard_resources[] = { | |||
401 | static void __init setup_resources(void) | 401 | static void __init setup_resources(void) |
402 | { | 402 | { |
403 | struct resource *res, *std_res, *sub_res; | 403 | struct resource *res, *std_res, *sub_res; |
404 | int i, j; | 404 | struct memblock_region *reg; |
405 | int j; | ||
405 | 406 | ||
406 | code_resource.start = (unsigned long) &_text; | 407 | code_resource.start = (unsigned long) &_text; |
407 | code_resource.end = (unsigned long) &_etext - 1; | 408 | code_resource.end = (unsigned long) &_etext - 1; |
@@ -410,24 +411,13 @@ static void __init setup_resources(void) | |||
410 | bss_resource.start = (unsigned long) &__bss_start; | 411 | bss_resource.start = (unsigned long) &__bss_start; |
411 | bss_resource.end = (unsigned long) &__bss_stop - 1; | 412 | bss_resource.end = (unsigned long) &__bss_stop - 1; |
412 | 413 | ||
413 | for (i = 0; i < MEMORY_CHUNKS; i++) { | 414 | for_each_memblock(memory, reg) { |
414 | if (!memory_chunk[i].size) | ||
415 | continue; | ||
416 | res = alloc_bootmem_low(sizeof(*res)); | 415 | res = alloc_bootmem_low(sizeof(*res)); |
417 | res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; | 416 | res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; |
418 | switch (memory_chunk[i].type) { | 417 | |
419 | case CHUNK_READ_WRITE: | 418 | res->name = "System RAM"; |
420 | res->name = "System RAM"; | 419 | res->start = reg->base; |
421 | break; | 420 | res->end = reg->base + reg->size - 1; |
422 | case CHUNK_READ_ONLY: | ||
423 | res->name = "System ROM"; | ||
424 | res->flags |= IORESOURCE_READONLY; | ||
425 | break; | ||
426 | default: | ||
427 | res->name = "reserved"; | ||
428 | } | ||
429 | res->start = memory_chunk[i].addr; | ||
430 | res->end = res->start + memory_chunk[i].size - 1; | ||
431 | request_resource(&iomem_resource, res); | 421 | request_resource(&iomem_resource, res); |
432 | 422 | ||
433 | for (j = 0; j < ARRAY_SIZE(standard_resources); j++) { | 423 | for (j = 0; j < ARRAY_SIZE(standard_resources); j++) { |
@@ -451,48 +441,11 @@ static void __init setup_resources(void) | |||
451 | static void __init setup_memory_end(void) | 441 | static void __init setup_memory_end(void) |
452 | { | 442 | { |
453 | unsigned long vmax, vmalloc_size, tmp; | 443 | unsigned long vmax, vmalloc_size, tmp; |
454 | unsigned long real_memory_size = 0; | ||
455 | int i; | ||
456 | |||
457 | |||
458 | #ifdef CONFIG_ZFCPDUMP | ||
459 | if (ipl_info.type == IPL_TYPE_FCP_DUMP && | ||
460 | !OLDMEM_BASE && sclp_get_hsa_size()) { | ||
461 | memory_end = sclp_get_hsa_size(); | ||
462 | memory_end_set = 1; | ||
463 | } | ||
464 | #endif | ||
465 | memory_end &= PAGE_MASK; | ||
466 | |||
467 | /* | ||
468 | * Make sure all chunks are MAX_ORDER aligned so we don't need the | ||
469 | * extra checks that HOLES_IN_ZONE would require. | ||
470 | */ | ||
471 | for (i = 0; i < MEMORY_CHUNKS; i++) { | ||
472 | unsigned long start, end; | ||
473 | struct mem_chunk *chunk; | ||
474 | unsigned long align; | ||
475 | |||
476 | chunk = &memory_chunk[i]; | ||
477 | if (!chunk->size) | ||
478 | continue; | ||
479 | align = 1UL << (MAX_ORDER + PAGE_SHIFT - 1); | ||
480 | start = (chunk->addr + align - 1) & ~(align - 1); | ||
481 | end = (chunk->addr + chunk->size) & ~(align - 1); | ||
482 | if (start >= end) | ||
483 | memset(chunk, 0, sizeof(*chunk)); | ||
484 | else { | ||
485 | chunk->addr = start; | ||
486 | chunk->size = end - start; | ||
487 | } | ||
488 | real_memory_size = max(real_memory_size, | ||
489 | chunk->addr + chunk->size); | ||
490 | } | ||
491 | 444 | ||
492 | /* Choose kernel address space layout: 2, 3, or 4 levels. */ | 445 | /* Choose kernel address space layout: 2, 3, or 4 levels. */ |
493 | #ifdef CONFIG_64BIT | 446 | #ifdef CONFIG_64BIT |
494 | vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN; | 447 | vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN; |
495 | tmp = (memory_end ?: real_memory_size) / PAGE_SIZE; | 448 | tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE; |
496 | tmp = tmp * (sizeof(struct page) + PAGE_SIZE) + vmalloc_size; | 449 | tmp = tmp * (sizeof(struct page) + PAGE_SIZE) + vmalloc_size; |
497 | if (tmp <= (1UL << 42)) | 450 | if (tmp <= (1UL << 42)) |
498 | vmax = 1UL << 42; /* 3-level kernel page table */ | 451 | vmax = 1UL << 42; /* 3-level kernel page table */ |
@@ -520,21 +473,11 @@ static void __init setup_memory_end(void) | |||
520 | vmemmap = (struct page *) tmp; | 473 | vmemmap = (struct page *) tmp; |
521 | 474 | ||
522 | /* Take care that memory_end is set and <= vmemmap */ | 475 | /* Take care that memory_end is set and <= vmemmap */ |
523 | memory_end = min(memory_end ?: real_memory_size, tmp); | 476 | memory_end = min(memory_end ?: max_physmem_end, tmp); |
524 | 477 | max_pfn = max_low_pfn = PFN_DOWN(memory_end); | |
525 | /* Fixup memory chunk array to fit into 0..memory_end */ | 478 | memblock_remove(memory_end, ULONG_MAX); |
526 | for (i = 0; i < MEMORY_CHUNKS; i++) { | ||
527 | struct mem_chunk *chunk = &memory_chunk[i]; | ||
528 | 479 | ||
529 | if (!chunk->size) | 480 | pr_notice("Max memory size: %luMB\n", memory_end >> 20); |
530 | continue; | ||
531 | if (chunk->addr >= memory_end) { | ||
532 | memset(chunk, 0, sizeof(*chunk)); | ||
533 | continue; | ||
534 | } | ||
535 | if (chunk->addr + chunk->size > memory_end) | ||
536 | chunk->size = memory_end - chunk->addr; | ||
537 | } | ||
538 | } | 481 | } |
539 | 482 | ||
540 | static void __init setup_vmcoreinfo(void) | 483 | static void __init setup_vmcoreinfo(void) |
@@ -545,89 +488,6 @@ static void __init setup_vmcoreinfo(void) | |||
545 | #ifdef CONFIG_CRASH_DUMP | 488 | #ifdef CONFIG_CRASH_DUMP |
546 | 489 | ||
547 | /* | 490 | /* |
548 | * Find suitable location for crashkernel memory | ||
549 | */ | ||
550 | static unsigned long __init find_crash_base(unsigned long crash_size, | ||
551 | char **msg) | ||
552 | { | ||
553 | unsigned long crash_base; | ||
554 | struct mem_chunk *chunk; | ||
555 | int i; | ||
556 | |||
557 | if (memory_chunk[0].size < crash_size) { | ||
558 | *msg = "first memory chunk must be at least crashkernel size"; | ||
559 | return 0; | ||
560 | } | ||
561 | if (OLDMEM_BASE && crash_size == OLDMEM_SIZE) | ||
562 | return OLDMEM_BASE; | ||
563 | |||
564 | for (i = MEMORY_CHUNKS - 1; i >= 0; i--) { | ||
565 | chunk = &memory_chunk[i]; | ||
566 | if (chunk->size == 0) | ||
567 | continue; | ||
568 | if (chunk->type != CHUNK_READ_WRITE) | ||
569 | continue; | ||
570 | if (chunk->size < crash_size) | ||
571 | continue; | ||
572 | crash_base = (chunk->addr + chunk->size) - crash_size; | ||
573 | if (crash_base < crash_size) | ||
574 | continue; | ||
575 | if (crash_base < sclp_get_hsa_size()) | ||
576 | continue; | ||
577 | if (crash_base < (unsigned long) INITRD_START + INITRD_SIZE) | ||
578 | continue; | ||
579 | return crash_base; | ||
580 | } | ||
581 | *msg = "no suitable area found"; | ||
582 | return 0; | ||
583 | } | ||
584 | |||
585 | /* | ||
586 | * Check if crash_base and crash_size is valid | ||
587 | */ | ||
588 | static int __init verify_crash_base(unsigned long crash_base, | ||
589 | unsigned long crash_size, | ||
590 | char **msg) | ||
591 | { | ||
592 | struct mem_chunk *chunk; | ||
593 | int i; | ||
594 | |||
595 | /* | ||
596 | * Because we do the swap to zero, we must have at least 'crash_size' | ||
597 | * bytes free space before crash_base | ||
598 | */ | ||
599 | if (crash_size > crash_base) { | ||
600 | *msg = "crashkernel offset must be greater than size"; | ||
601 | return -EINVAL; | ||
602 | } | ||
603 | |||
604 | /* First memory chunk must be at least crash_size */ | ||
605 | if (memory_chunk[0].size < crash_size) { | ||
606 | *msg = "first memory chunk must be at least crashkernel size"; | ||
607 | return -EINVAL; | ||
608 | } | ||
609 | /* Check if we fit into the respective memory chunk */ | ||
610 | for (i = 0; i < MEMORY_CHUNKS; i++) { | ||
611 | chunk = &memory_chunk[i]; | ||
612 | if (chunk->size == 0) | ||
613 | continue; | ||
614 | if (crash_base < chunk->addr) | ||
615 | continue; | ||
616 | if (crash_base >= chunk->addr + chunk->size) | ||
617 | continue; | ||
618 | /* we have found the memory chunk */ | ||
619 | if (crash_base + crash_size > chunk->addr + chunk->size) { | ||
620 | *msg = "selected memory chunk is too small for " | ||
621 | "crashkernel memory"; | ||
622 | return -EINVAL; | ||
623 | } | ||
624 | return 0; | ||
625 | } | ||
626 | *msg = "invalid memory range specified"; | ||
627 | return -EINVAL; | ||
628 | } | ||
629 | |||
630 | /* | ||
631 | * When kdump is enabled, we have to ensure that no memory from | 491 | * When kdump is enabled, we have to ensure that no memory from |
632 | * the area [0 - crashkernel memory size] and | 492 | * the area [0 - crashkernel memory size] and |
633 | * [crashk_res.start - crashk_res.end] is set offline. | 493 | * [crashk_res.start - crashk_res.end] is set offline. |
@@ -653,23 +513,44 @@ static struct notifier_block kdump_mem_nb = { | |||
653 | #endif | 513 | #endif |
654 | 514 | ||
655 | /* | 515 | /* |
516 | * Make sure that the area behind memory_end is protected | ||
517 | */ | ||
518 | static void reserve_memory_end(void) | ||
519 | { | ||
520 | #ifdef CONFIG_ZFCPDUMP | ||
521 | if (ipl_info.type == IPL_TYPE_FCP_DUMP && | ||
522 | !OLDMEM_BASE && sclp_get_hsa_size()) { | ||
523 | memory_end = sclp_get_hsa_size(); | ||
524 | memory_end &= PAGE_MASK; | ||
525 | memory_end_set = 1; | ||
526 | } | ||
527 | #endif | ||
528 | if (!memory_end_set) | ||
529 | return; | ||
530 | memblock_reserve(memory_end, ULONG_MAX); | ||
531 | } | ||
532 | |||
533 | /* | ||
656 | * Make sure that oldmem, where the dump is stored, is protected | 534 | * Make sure that oldmem, where the dump is stored, is protected |
657 | */ | 535 | */ |
658 | static void reserve_oldmem(void) | 536 | static void reserve_oldmem(void) |
659 | { | 537 | { |
660 | #ifdef CONFIG_CRASH_DUMP | 538 | #ifdef CONFIG_CRASH_DUMP |
661 | unsigned long real_size = 0; | 539 | if (OLDMEM_BASE) |
662 | int i; | 540 | /* Forget all memory above the running kdump system */ |
663 | 541 | memblock_reserve(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX); | |
664 | if (!OLDMEM_BASE) | 542 | #endif |
665 | return; | 543 | } |
666 | for (i = 0; i < MEMORY_CHUNKS; i++) { | ||
667 | struct mem_chunk *chunk = &memory_chunk[i]; | ||
668 | 544 | ||
669 | real_size = max(real_size, chunk->addr + chunk->size); | 545 | /* |
670 | } | 546 | * Make sure that oldmem, where the dump is stored, is protected |
671 | create_mem_hole(memory_chunk, OLDMEM_BASE, OLDMEM_SIZE); | 547 | */ |
672 | create_mem_hole(memory_chunk, OLDMEM_SIZE, real_size - OLDMEM_SIZE); | 548 | static void remove_oldmem(void) |
549 | { | ||
550 | #ifdef CONFIG_CRASH_DUMP | ||
551 | if (OLDMEM_BASE) | ||
552 | /* Forget all memory above the running kdump system */ | ||
553 | memblock_remove(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX); | ||
673 | #endif | 554 | #endif |
674 | } | 555 | } |
675 | 556 | ||
@@ -680,167 +561,132 @@ static void __init reserve_crashkernel(void) | |||
680 | { | 561 | { |
681 | #ifdef CONFIG_CRASH_DUMP | 562 | #ifdef CONFIG_CRASH_DUMP |
682 | unsigned long long crash_base, crash_size; | 563 | unsigned long long crash_base, crash_size; |
683 | char *msg = NULL; | 564 | phys_addr_t low, high; |
684 | int rc; | 565 | int rc; |
685 | 566 | ||
686 | rc = parse_crashkernel(boot_command_line, memory_end, &crash_size, | 567 | rc = parse_crashkernel(boot_command_line, memory_end, &crash_size, |
687 | &crash_base); | 568 | &crash_base); |
688 | if (rc || crash_size == 0) | 569 | |
689 | return; | ||
690 | crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN); | 570 | crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN); |
691 | crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN); | 571 | crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN); |
692 | if (register_memory_notifier(&kdump_mem_nb)) | 572 | if (rc || crash_size == 0) |
693 | return; | 573 | return; |
694 | if (!crash_base) | 574 | |
695 | crash_base = find_crash_base(crash_size, &msg); | 575 | if (memblock.memory.regions[0].size < crash_size) { |
696 | if (!crash_base) { | 576 | pr_info("crashkernel reservation failed: %s\n", |
697 | pr_info("crashkernel reservation failed: %s\n", msg); | 577 | "first memory chunk must be at least crashkernel size"); |
698 | unregister_memory_notifier(&kdump_mem_nb); | ||
699 | return; | 578 | return; |
700 | } | 579 | } |
701 | if (verify_crash_base(crash_base, crash_size, &msg)) { | 580 | |
702 | pr_info("crashkernel reservation failed: %s\n", msg); | 581 | low = crash_base ?: OLDMEM_BASE; |
703 | unregister_memory_notifier(&kdump_mem_nb); | 582 | high = low + crash_size; |
583 | if (low >= OLDMEM_BASE && high <= OLDMEM_BASE + OLDMEM_SIZE) { | ||
584 | /* The crashkernel fits into OLDMEM, reuse OLDMEM */ | ||
585 | crash_base = low; | ||
586 | } else { | ||
587 | /* Find suitable area in free memory */ | ||
588 | low = max_t(unsigned long, crash_size, sclp_get_hsa_size()); | ||
589 | high = crash_base ? crash_base + crash_size : ULONG_MAX; | ||
590 | |||
591 | if (crash_base && crash_base < low) { | ||
592 | pr_info("crashkernel reservation failed: %s\n", | ||
593 | "crash_base too low"); | ||
594 | return; | ||
595 | } | ||
596 | low = crash_base ?: low; | ||
597 | crash_base = memblock_find_in_range(low, high, crash_size, | ||
598 | KEXEC_CRASH_MEM_ALIGN); | ||
599 | } | ||
600 | |||
601 | if (!crash_base) { | ||
602 | pr_info("crashkernel reservation failed: %s\n", | ||
603 | "no suitable area found"); | ||
704 | return; | 604 | return; |
705 | } | 605 | } |
606 | |||
607 | if (register_memory_notifier(&kdump_mem_nb)) | ||
608 | return; | ||
609 | |||
706 | if (!OLDMEM_BASE && MACHINE_IS_VM) | 610 | if (!OLDMEM_BASE && MACHINE_IS_VM) |
707 | diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size)); | 611 | diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size)); |
708 | crashk_res.start = crash_base; | 612 | crashk_res.start = crash_base; |
709 | crashk_res.end = crash_base + crash_size - 1; | 613 | crashk_res.end = crash_base + crash_size - 1; |
710 | insert_resource(&iomem_resource, &crashk_res); | 614 | insert_resource(&iomem_resource, &crashk_res); |
711 | create_mem_hole(memory_chunk, crash_base, crash_size); | 615 | memblock_remove(crash_base, crash_size); |
712 | pr_info("Reserving %lluMB of memory at %lluMB " | 616 | pr_info("Reserving %lluMB of memory at %lluMB " |
713 | "for crashkernel (System RAM: %luMB)\n", | 617 | "for crashkernel (System RAM: %luMB)\n", |
714 | crash_size >> 20, crash_base >> 20, memory_end >> 20); | 618 | crash_size >> 20, crash_base >> 20, |
619 | (unsigned long)memblock.memory.total_size >> 20); | ||
715 | os_info_crashkernel_add(crash_base, crash_size); | 620 | os_info_crashkernel_add(crash_base, crash_size); |
716 | #endif | 621 | #endif |
717 | } | 622 | } |
718 | 623 | ||
719 | static void __init setup_memory(void) | 624 | /* |
625 | * Reserve the initrd from being used by memblock | ||
626 | */ | ||
627 | static void __init reserve_initrd(void) | ||
720 | { | 628 | { |
721 | unsigned long bootmap_size; | 629 | #ifdef CONFIG_BLK_DEV_INITRD |
722 | unsigned long start_pfn, end_pfn; | 630 | initrd_start = INITRD_START; |
723 | int i; | 631 | initrd_end = initrd_start + INITRD_SIZE; |
632 | memblock_reserve(INITRD_START, INITRD_SIZE); | ||
633 | #endif | ||
634 | } | ||
724 | 635 | ||
725 | /* | 636 | /* |
726 | * partially used pages are not usable - thus | 637 | * Check for initrd being in usable memory |
727 | * we are rounding upwards: | 638 | */ |
728 | */ | 639 | static void __init check_initrd(void) |
640 | { | ||
641 | #ifdef CONFIG_BLK_DEV_INITRD | ||
642 | if (INITRD_START && INITRD_SIZE && | ||
643 | !memblock_is_region_memory(INITRD_START, INITRD_SIZE)) { | ||
644 | pr_err("initrd does not fit memory.\n"); | ||
645 | memblock_free(INITRD_START, INITRD_SIZE); | ||
646 | initrd_start = initrd_end = 0; | ||
647 | } | ||
648 | #endif | ||
649 | } | ||
650 | |||
651 | /* | ||
652 | * Reserve all kernel text | ||
653 | */ | ||
654 | static void __init reserve_kernel(void) | ||
655 | { | ||
656 | unsigned long start_pfn; | ||
729 | start_pfn = PFN_UP(__pa(&_end)); | 657 | start_pfn = PFN_UP(__pa(&_end)); |
730 | end_pfn = max_pfn = PFN_DOWN(memory_end); | ||
731 | 658 | ||
732 | #ifdef CONFIG_BLK_DEV_INITRD | ||
733 | /* | 659 | /* |
734 | * Move the initrd in case the bitmap of the bootmem allocater | 660 | * Reserve memory used for lowcore/command line/kernel image. |
735 | * would overwrite it. | ||
736 | */ | 661 | */ |
662 | memblock_reserve(0, (unsigned long)_ehead); | ||
663 | memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn) | ||
664 | - (unsigned long)_stext); | ||
665 | } | ||
737 | 666 | ||
738 | if (INITRD_START && INITRD_SIZE) { | 667 | static void __init reserve_elfcorehdr(void) |
739 | unsigned long bmap_size; | 668 | { |
740 | unsigned long start; | ||
741 | |||
742 | bmap_size = bootmem_bootmap_pages(end_pfn - start_pfn + 1); | ||
743 | bmap_size = PFN_PHYS(bmap_size); | ||
744 | |||
745 | if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) { | ||
746 | start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE; | ||
747 | |||
748 | #ifdef CONFIG_CRASH_DUMP | 669 | #ifdef CONFIG_CRASH_DUMP |
749 | if (OLDMEM_BASE) { | 670 | if (is_kdump_kernel()) |
750 | /* Move initrd behind kdump oldmem */ | 671 | memblock_reserve(elfcorehdr_addr - OLDMEM_BASE, |
751 | if (start + INITRD_SIZE > OLDMEM_BASE && | 672 | PAGE_ALIGN(elfcorehdr_size)); |
752 | start < OLDMEM_BASE + OLDMEM_SIZE) | ||
753 | start = OLDMEM_BASE + OLDMEM_SIZE; | ||
754 | } | ||
755 | #endif | ||
756 | if (start + INITRD_SIZE > memory_end) { | ||
757 | pr_err("initrd extends beyond end of " | ||
758 | "memory (0x%08lx > 0x%08lx) " | ||
759 | "disabling initrd\n", | ||
760 | start + INITRD_SIZE, memory_end); | ||
761 | INITRD_START = INITRD_SIZE = 0; | ||
762 | } else { | ||
763 | pr_info("Moving initrd (0x%08lx -> " | ||
764 | "0x%08lx, size: %ld)\n", | ||
765 | INITRD_START, start, INITRD_SIZE); | ||
766 | memmove((void *) start, (void *) INITRD_START, | ||
767 | INITRD_SIZE); | ||
768 | INITRD_START = start; | ||
769 | } | ||
770 | } | ||
771 | } | ||
772 | #endif | 673 | #endif |
674 | } | ||
773 | 675 | ||
774 | /* | 676 | static void __init setup_memory(void) |
775 | * Initialize the boot-time allocator | 677 | { |
776 | */ | 678 | struct memblock_region *reg; |
777 | bootmap_size = init_bootmem(start_pfn, end_pfn); | ||
778 | 679 | ||
779 | /* | 680 | /* |
780 | * Register RAM areas with the bootmem allocator. | 681 | * Init storage key for present memory |
781 | */ | 682 | */ |
782 | 683 | for_each_memblock(memory, reg) { | |
783 | for (i = 0; i < MEMORY_CHUNKS; i++) { | 684 | storage_key_init_range(reg->base, reg->base + reg->size); |
784 | unsigned long start_chunk, end_chunk, pfn; | ||
785 | |||
786 | if (!memory_chunk[i].size) | ||
787 | continue; | ||
788 | start_chunk = PFN_DOWN(memory_chunk[i].addr); | ||
789 | end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size); | ||
790 | end_chunk = min(end_chunk, end_pfn); | ||
791 | if (start_chunk >= end_chunk) | ||
792 | continue; | ||
793 | memblock_add_node(PFN_PHYS(start_chunk), | ||
794 | PFN_PHYS(end_chunk - start_chunk), 0); | ||
795 | pfn = max(start_chunk, start_pfn); | ||
796 | storage_key_init_range(PFN_PHYS(pfn), PFN_PHYS(end_chunk)); | ||
797 | } | 685 | } |
798 | |||
799 | psw_set_key(PAGE_DEFAULT_KEY); | 686 | psw_set_key(PAGE_DEFAULT_KEY); |
800 | 687 | ||
801 | free_bootmem_with_active_regions(0, max_pfn); | 688 | /* Only cosmetics */ |
802 | 689 | memblock_enforce_memory_limit(memblock_end_of_DRAM()); | |
803 | /* | ||
804 | * Reserve memory used for lowcore/command line/kernel image. | ||
805 | */ | ||
806 | reserve_bootmem(0, (unsigned long)_ehead, BOOTMEM_DEFAULT); | ||
807 | reserve_bootmem((unsigned long)_stext, | ||
808 | PFN_PHYS(start_pfn) - (unsigned long)_stext, | ||
809 | BOOTMEM_DEFAULT); | ||
810 | /* | ||
811 | * Reserve the bootmem bitmap itself as well. We do this in two | ||
812 | * steps (first step was init_bootmem()) because this catches | ||
813 | * the (very unlikely) case of us accidentally initializing the | ||
814 | * bootmem allocator with an invalid RAM area. | ||
815 | */ | ||
816 | reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size, | ||
817 | BOOTMEM_DEFAULT); | ||
818 | |||
819 | #ifdef CONFIG_CRASH_DUMP | ||
820 | if (crashk_res.start) | ||
821 | reserve_bootmem(crashk_res.start, | ||
822 | crashk_res.end - crashk_res.start + 1, | ||
823 | BOOTMEM_DEFAULT); | ||
824 | if (is_kdump_kernel()) | ||
825 | reserve_bootmem(elfcorehdr_addr - OLDMEM_BASE, | ||
826 | PAGE_ALIGN(elfcorehdr_size), BOOTMEM_DEFAULT); | ||
827 | #endif | ||
828 | #ifdef CONFIG_BLK_DEV_INITRD | ||
829 | if (INITRD_START && INITRD_SIZE) { | ||
830 | if (INITRD_START + INITRD_SIZE <= memory_end) { | ||
831 | reserve_bootmem(INITRD_START, INITRD_SIZE, | ||
832 | BOOTMEM_DEFAULT); | ||
833 | initrd_start = INITRD_START; | ||
834 | initrd_end = initrd_start + INITRD_SIZE; | ||
835 | } else { | ||
836 | pr_err("initrd extends beyond end of " | ||
837 | "memory (0x%08lx > 0x%08lx) " | ||
838 | "disabling initrd\n", | ||
839 | initrd_start + INITRD_SIZE, memory_end); | ||
840 | initrd_start = initrd_end = 0; | ||
841 | } | ||
842 | } | ||
843 | #endif | ||
844 | } | 690 | } |
845 | 691 | ||
846 | /* | 692 | /* |
@@ -989,23 +835,46 @@ void __init setup_arch(char **cmdline_p) | |||
989 | 835 | ||
990 | ROOT_DEV = Root_RAM0; | 836 | ROOT_DEV = Root_RAM0; |
991 | 837 | ||
838 | /* Is init_mm really needed? */ | ||
992 | init_mm.start_code = PAGE_OFFSET; | 839 | init_mm.start_code = PAGE_OFFSET; |
993 | init_mm.end_code = (unsigned long) &_etext; | 840 | init_mm.end_code = (unsigned long) &_etext; |
994 | init_mm.end_data = (unsigned long) &_edata; | 841 | init_mm.end_data = (unsigned long) &_edata; |
995 | init_mm.brk = (unsigned long) &_end; | 842 | init_mm.brk = (unsigned long) &_end; |
996 | 843 | ||
997 | parse_early_param(); | 844 | parse_early_param(); |
998 | detect_memory_layout(memory_chunk, memory_end); | ||
999 | os_info_init(); | 845 | os_info_init(); |
1000 | setup_ipl(); | 846 | setup_ipl(); |
847 | |||
848 | /* Do some memory reservations *before* memory is added to memblock */ | ||
849 | reserve_memory_end(); | ||
1001 | reserve_oldmem(); | 850 | reserve_oldmem(); |
851 | reserve_kernel(); | ||
852 | reserve_initrd(); | ||
853 | reserve_elfcorehdr(); | ||
854 | memblock_allow_resize(); | ||
855 | |||
856 | /* Get information about *all* installed memory */ | ||
857 | detect_memory_memblock(); | ||
858 | |||
859 | remove_oldmem(); | ||
860 | |||
861 | /* | ||
862 | * Make sure all chunks are MAX_ORDER aligned so we don't need the | ||
863 | * extra checks that HOLES_IN_ZONE would require. | ||
864 | * | ||
865 | * Is this still required? | ||
866 | */ | ||
867 | memblock_trim_memory(1UL << (MAX_ORDER - 1 + PAGE_SHIFT)); | ||
868 | |||
1002 | setup_memory_end(); | 869 | setup_memory_end(); |
1003 | reserve_crashkernel(); | ||
1004 | setup_memory(); | 870 | setup_memory(); |
871 | |||
872 | check_initrd(); | ||
873 | reserve_crashkernel(); | ||
874 | |||
1005 | setup_resources(); | 875 | setup_resources(); |
1006 | setup_vmcoreinfo(); | 876 | setup_vmcoreinfo(); |
1007 | setup_lowcore(); | 877 | setup_lowcore(); |
1008 | |||
1009 | smp_fill_possible_mask(); | 878 | smp_fill_possible_mask(); |
1010 | cpu_init(); | 879 | cpu_init(); |
1011 | s390_init_cpu_topology(); | 880 | s390_init_cpu_topology(); |