diff options
Diffstat (limited to 'arch/s390/kernel/setup.c')
-rw-r--r-- | arch/s390/kernel/setup.c | 275 |
1 files changed, 244 insertions, 31 deletions
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 7b371c37061d..8ac6bfa2786c 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -42,6 +42,9 @@ | |||
42 | #include <linux/reboot.h> | 42 | #include <linux/reboot.h> |
43 | #include <linux/topology.h> | 43 | #include <linux/topology.h> |
44 | #include <linux/ftrace.h> | 44 | #include <linux/ftrace.h> |
45 | #include <linux/kexec.h> | ||
46 | #include <linux/crash_dump.h> | ||
47 | #include <linux/memory.h> | ||
45 | 48 | ||
46 | #include <asm/ipl.h> | 49 | #include <asm/ipl.h> |
47 | #include <asm/uaccess.h> | 50 | #include <asm/uaccess.h> |
@@ -57,12 +60,13 @@ | |||
57 | #include <asm/ebcdic.h> | 60 | #include <asm/ebcdic.h> |
58 | #include <asm/compat.h> | 61 | #include <asm/compat.h> |
59 | #include <asm/kvm_virtio.h> | 62 | #include <asm/kvm_virtio.h> |
63 | #include <asm/diag.h> | ||
60 | 64 | ||
61 | long psw_kernel_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY | | 65 | long psw_kernel_bits = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_PRIMARY | |
62 | PSW_MASK_MCHECK | PSW_DEFAULT_KEY); | 66 | PSW_MASK_EA | PSW_MASK_BA; |
63 | long psw_user_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME | | 67 | long psw_user_bits = PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | |
64 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | | 68 | PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK | |
65 | PSW_MASK_PSTATE | PSW_DEFAULT_KEY); | 69 | PSW_MASK_PSTATE | PSW_ASC_HOME; |
66 | 70 | ||
67 | /* | 71 | /* |
68 | * User copy operations. | 72 | * User copy operations. |
@@ -274,22 +278,14 @@ early_param("mem", early_parse_mem); | |||
274 | unsigned int user_mode = HOME_SPACE_MODE; | 278 | unsigned int user_mode = HOME_SPACE_MODE; |
275 | EXPORT_SYMBOL_GPL(user_mode); | 279 | EXPORT_SYMBOL_GPL(user_mode); |
276 | 280 | ||
277 | static int set_amode_and_uaccess(unsigned long user_amode, | 281 | static int set_amode_primary(void) |
278 | unsigned long user32_amode) | ||
279 | { | 282 | { |
280 | psw_user_bits = PSW_BASE_BITS | PSW_MASK_DAT | user_amode | | 283 | psw_kernel_bits = (psw_kernel_bits & ~PSW_MASK_ASC) | PSW_ASC_HOME; |
281 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | | 284 | psw_user_bits = (psw_user_bits & ~PSW_MASK_ASC) | PSW_ASC_PRIMARY; |
282 | PSW_MASK_PSTATE | PSW_DEFAULT_KEY; | ||
283 | #ifdef CONFIG_COMPAT | 285 | #ifdef CONFIG_COMPAT |
284 | psw_user32_bits = PSW_BASE32_BITS | PSW_MASK_DAT | user_amode | | 286 | psw32_user_bits = |
285 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | | 287 | (psw32_user_bits & ~PSW32_MASK_ASC) | PSW32_ASC_PRIMARY; |
286 | PSW_MASK_PSTATE | PSW_DEFAULT_KEY; | ||
287 | psw32_user_bits = PSW32_BASE_BITS | PSW32_MASK_DAT | user32_amode | | ||
288 | PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK | | ||
289 | PSW32_MASK_PSTATE; | ||
290 | #endif | 288 | #endif |
291 | psw_kernel_bits = PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME | | ||
292 | PSW_MASK_MCHECK | PSW_DEFAULT_KEY; | ||
293 | 289 | ||
294 | if (MACHINE_HAS_MVCOS) { | 290 | if (MACHINE_HAS_MVCOS) { |
295 | memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess)); | 291 | memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess)); |
@@ -325,7 +321,7 @@ early_param("user_mode", early_parse_user_mode); | |||
325 | static void setup_addressing_mode(void) | 321 | static void setup_addressing_mode(void) |
326 | { | 322 | { |
327 | if (user_mode == PRIMARY_SPACE_MODE) { | 323 | if (user_mode == PRIMARY_SPACE_MODE) { |
328 | if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY)) | 324 | if (set_amode_primary()) |
329 | pr_info("Address spaces switched, " | 325 | pr_info("Address spaces switched, " |
330 | "mvcos available\n"); | 326 | "mvcos available\n"); |
331 | else | 327 | else |
@@ -344,24 +340,25 @@ setup_lowcore(void) | |||
344 | */ | 340 | */ |
345 | BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096); | 341 | BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096); |
346 | lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0); | 342 | lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0); |
347 | lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; | 343 | lc->restart_psw.mask = psw_kernel_bits; |
348 | lc->restart_psw.addr = | 344 | lc->restart_psw.addr = |
349 | PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; | 345 | PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; |
350 | if (user_mode != HOME_SPACE_MODE) | 346 | lc->external_new_psw.mask = psw_kernel_bits | |
351 | lc->restart_psw.mask |= PSW_ASC_HOME; | 347 | PSW_MASK_DAT | PSW_MASK_MCHECK; |
352 | lc->external_new_psw.mask = psw_kernel_bits; | ||
353 | lc->external_new_psw.addr = | 348 | lc->external_new_psw.addr = |
354 | PSW_ADDR_AMODE | (unsigned long) ext_int_handler; | 349 | PSW_ADDR_AMODE | (unsigned long) ext_int_handler; |
355 | lc->svc_new_psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT; | 350 | lc->svc_new_psw.mask = psw_kernel_bits | |
351 | PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; | ||
356 | lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; | 352 | lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; |
357 | lc->program_new_psw.mask = psw_kernel_bits; | 353 | lc->program_new_psw.mask = psw_kernel_bits | |
354 | PSW_MASK_DAT | PSW_MASK_MCHECK; | ||
358 | lc->program_new_psw.addr = | 355 | lc->program_new_psw.addr = |
359 | PSW_ADDR_AMODE | (unsigned long)pgm_check_handler; | 356 | PSW_ADDR_AMODE | (unsigned long) pgm_check_handler; |
360 | lc->mcck_new_psw.mask = | 357 | lc->mcck_new_psw.mask = psw_kernel_bits; |
361 | psw_kernel_bits & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT; | ||
362 | lc->mcck_new_psw.addr = | 358 | lc->mcck_new_psw.addr = |
363 | PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; | 359 | PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; |
364 | lc->io_new_psw.mask = psw_kernel_bits; | 360 | lc->io_new_psw.mask = psw_kernel_bits | |
361 | PSW_MASK_DAT | PSW_MASK_MCHECK; | ||
365 | lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; | 362 | lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; |
366 | lc->clock_comparator = -1ULL; | 363 | lc->clock_comparator = -1ULL; |
367 | lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; | 364 | lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; |
@@ -435,10 +432,14 @@ static void __init setup_resources(void) | |||
435 | for (i = 0; i < MEMORY_CHUNKS; i++) { | 432 | for (i = 0; i < MEMORY_CHUNKS; i++) { |
436 | if (!memory_chunk[i].size) | 433 | if (!memory_chunk[i].size) |
437 | continue; | 434 | continue; |
435 | if (memory_chunk[i].type == CHUNK_OLDMEM || | ||
436 | memory_chunk[i].type == CHUNK_CRASHK) | ||
437 | continue; | ||
438 | res = alloc_bootmem_low(sizeof(*res)); | 438 | res = alloc_bootmem_low(sizeof(*res)); |
439 | res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; | 439 | res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; |
440 | switch (memory_chunk[i].type) { | 440 | switch (memory_chunk[i].type) { |
441 | case CHUNK_READ_WRITE: | 441 | case CHUNK_READ_WRITE: |
442 | case CHUNK_CRASHK: | ||
442 | res->name = "System RAM"; | 443 | res->name = "System RAM"; |
443 | break; | 444 | break; |
444 | case CHUNK_READ_ONLY: | 445 | case CHUNK_READ_ONLY: |
@@ -479,6 +480,7 @@ static void __init setup_memory_end(void) | |||
479 | unsigned long max_mem; | 480 | unsigned long max_mem; |
480 | int i; | 481 | int i; |
481 | 482 | ||
483 | |||
482 | #ifdef CONFIG_ZFCPDUMP | 484 | #ifdef CONFIG_ZFCPDUMP |
483 | if (ipl_info.type == IPL_TYPE_FCP_DUMP) { | 485 | if (ipl_info.type == IPL_TYPE_FCP_DUMP) { |
484 | memory_end = ZFCPDUMP_HSA_SIZE; | 486 | memory_end = ZFCPDUMP_HSA_SIZE; |
@@ -545,11 +547,201 @@ static void __init setup_restart_psw(void) | |||
545 | * Setup restart PSW for absolute zero lowcore. This is necesary | 547 | * Setup restart PSW for absolute zero lowcore. This is necesary |
546 | * if PSW restart is done on an offline CPU that has lowcore zero | 548 | * if PSW restart is done on an offline CPU that has lowcore zero |
547 | */ | 549 | */ |
548 | psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; | 550 | psw.mask = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA; |
549 | psw.addr = PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; | 551 | psw.addr = PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; |
550 | copy_to_absolute_zero(&S390_lowcore.restart_psw, &psw, sizeof(psw)); | 552 | copy_to_absolute_zero(&S390_lowcore.restart_psw, &psw, sizeof(psw)); |
551 | } | 553 | } |
552 | 554 | ||
555 | static void __init setup_vmcoreinfo(void) | ||
556 | { | ||
557 | #ifdef CONFIG_KEXEC | ||
558 | unsigned long ptr = paddr_vmcoreinfo_note(); | ||
559 | |||
560 | copy_to_absolute_zero(&S390_lowcore.vmcore_info, &ptr, sizeof(ptr)); | ||
561 | #endif | ||
562 | } | ||
563 | |||
564 | #ifdef CONFIG_CRASH_DUMP | ||
565 | |||
566 | /* | ||
567 | * Find suitable location for crashkernel memory | ||
568 | */ | ||
569 | static unsigned long __init find_crash_base(unsigned long crash_size, | ||
570 | char **msg) | ||
571 | { | ||
572 | unsigned long crash_base; | ||
573 | struct mem_chunk *chunk; | ||
574 | int i; | ||
575 | |||
576 | if (memory_chunk[0].size < crash_size) { | ||
577 | *msg = "first memory chunk must be at least crashkernel size"; | ||
578 | return 0; | ||
579 | } | ||
580 | if (is_kdump_kernel() && (crash_size == OLDMEM_SIZE)) | ||
581 | return OLDMEM_BASE; | ||
582 | |||
583 | for (i = MEMORY_CHUNKS - 1; i >= 0; i--) { | ||
584 | chunk = &memory_chunk[i]; | ||
585 | if (chunk->size == 0) | ||
586 | continue; | ||
587 | if (chunk->type != CHUNK_READ_WRITE) | ||
588 | continue; | ||
589 | if (chunk->size < crash_size) | ||
590 | continue; | ||
591 | crash_base = (chunk->addr + chunk->size) - crash_size; | ||
592 | if (crash_base < crash_size) | ||
593 | continue; | ||
594 | if (crash_base < ZFCPDUMP_HSA_SIZE_MAX) | ||
595 | continue; | ||
596 | if (crash_base < (unsigned long) INITRD_START + INITRD_SIZE) | ||
597 | continue; | ||
598 | return crash_base; | ||
599 | } | ||
600 | *msg = "no suitable area found"; | ||
601 | return 0; | ||
602 | } | ||
603 | |||
604 | /* | ||
605 | * Check if crash_base and crash_size is valid | ||
606 | */ | ||
607 | static int __init verify_crash_base(unsigned long crash_base, | ||
608 | unsigned long crash_size, | ||
609 | char **msg) | ||
610 | { | ||
611 | struct mem_chunk *chunk; | ||
612 | int i; | ||
613 | |||
614 | /* | ||
615 | * Because we do the swap to zero, we must have at least 'crash_size' | ||
616 | * bytes free space before crash_base | ||
617 | */ | ||
618 | if (crash_size > crash_base) { | ||
619 | *msg = "crashkernel offset must be greater than size"; | ||
620 | return -EINVAL; | ||
621 | } | ||
622 | |||
623 | /* First memory chunk must be at least crash_size */ | ||
624 | if (memory_chunk[0].size < crash_size) { | ||
625 | *msg = "first memory chunk must be at least crashkernel size"; | ||
626 | return -EINVAL; | ||
627 | } | ||
628 | /* Check if we fit into the respective memory chunk */ | ||
629 | for (i = 0; i < MEMORY_CHUNKS; i++) { | ||
630 | chunk = &memory_chunk[i]; | ||
631 | if (chunk->size == 0) | ||
632 | continue; | ||
633 | if (crash_base < chunk->addr) | ||
634 | continue; | ||
635 | if (crash_base >= chunk->addr + chunk->size) | ||
636 | continue; | ||
637 | /* we have found the memory chunk */ | ||
638 | if (crash_base + crash_size > chunk->addr + chunk->size) { | ||
639 | *msg = "selected memory chunk is too small for " | ||
640 | "crashkernel memory"; | ||
641 | return -EINVAL; | ||
642 | } | ||
643 | return 0; | ||
644 | } | ||
645 | *msg = "invalid memory range specified"; | ||
646 | return -EINVAL; | ||
647 | } | ||
648 | |||
649 | /* | ||
650 | * Reserve kdump memory by creating a memory hole in the mem_chunk array | ||
651 | */ | ||
652 | static void __init reserve_kdump_bootmem(unsigned long addr, unsigned long size, | ||
653 | int type) | ||
654 | { | ||
655 | |||
656 | create_mem_hole(memory_chunk, addr, size, type); | ||
657 | } | ||
658 | |||
659 | /* | ||
660 | * When kdump is enabled, we have to ensure that no memory from | ||
661 | * the area [0 - crashkernel memory size] and | ||
662 | * [crashk_res.start - crashk_res.end] is set offline. | ||
663 | */ | ||
664 | static int kdump_mem_notifier(struct notifier_block *nb, | ||
665 | unsigned long action, void *data) | ||
666 | { | ||
667 | struct memory_notify *arg = data; | ||
668 | |||
669 | if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res))) | ||
670 | return NOTIFY_BAD; | ||
671 | if (arg->start_pfn > PFN_DOWN(crashk_res.end)) | ||
672 | return NOTIFY_OK; | ||
673 | if (arg->start_pfn + arg->nr_pages - 1 < PFN_DOWN(crashk_res.start)) | ||
674 | return NOTIFY_OK; | ||
675 | return NOTIFY_BAD; | ||
676 | } | ||
677 | |||
678 | static struct notifier_block kdump_mem_nb = { | ||
679 | .notifier_call = kdump_mem_notifier, | ||
680 | }; | ||
681 | |||
682 | #endif | ||
683 | |||
684 | /* | ||
685 | * Make sure that oldmem, where the dump is stored, is protected | ||
686 | */ | ||
687 | static void reserve_oldmem(void) | ||
688 | { | ||
689 | #ifdef CONFIG_CRASH_DUMP | ||
690 | if (!OLDMEM_BASE) | ||
691 | return; | ||
692 | |||
693 | reserve_kdump_bootmem(OLDMEM_BASE, OLDMEM_SIZE, CHUNK_OLDMEM); | ||
694 | reserve_kdump_bootmem(OLDMEM_SIZE, memory_end - OLDMEM_SIZE, | ||
695 | CHUNK_OLDMEM); | ||
696 | if (OLDMEM_BASE + OLDMEM_SIZE == real_memory_size) | ||
697 | saved_max_pfn = PFN_DOWN(OLDMEM_BASE) - 1; | ||
698 | else | ||
699 | saved_max_pfn = PFN_DOWN(real_memory_size) - 1; | ||
700 | #endif | ||
701 | } | ||
702 | |||
703 | /* | ||
704 | * Reserve memory for kdump kernel to be loaded with kexec | ||
705 | */ | ||
706 | static void __init reserve_crashkernel(void) | ||
707 | { | ||
708 | #ifdef CONFIG_CRASH_DUMP | ||
709 | unsigned long long crash_base, crash_size; | ||
710 | char *msg; | ||
711 | int rc; | ||
712 | |||
713 | rc = parse_crashkernel(boot_command_line, memory_end, &crash_size, | ||
714 | &crash_base); | ||
715 | if (rc || crash_size == 0) | ||
716 | return; | ||
717 | crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN); | ||
718 | crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN); | ||
719 | if (register_memory_notifier(&kdump_mem_nb)) | ||
720 | return; | ||
721 | if (!crash_base) | ||
722 | crash_base = find_crash_base(crash_size, &msg); | ||
723 | if (!crash_base) { | ||
724 | pr_info("crashkernel reservation failed: %s\n", msg); | ||
725 | unregister_memory_notifier(&kdump_mem_nb); | ||
726 | return; | ||
727 | } | ||
728 | if (verify_crash_base(crash_base, crash_size, &msg)) { | ||
729 | pr_info("crashkernel reservation failed: %s\n", msg); | ||
730 | unregister_memory_notifier(&kdump_mem_nb); | ||
731 | return; | ||
732 | } | ||
733 | if (!OLDMEM_BASE && MACHINE_IS_VM) | ||
734 | diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size)); | ||
735 | crashk_res.start = crash_base; | ||
736 | crashk_res.end = crash_base + crash_size - 1; | ||
737 | insert_resource(&iomem_resource, &crashk_res); | ||
738 | reserve_kdump_bootmem(crash_base, crash_size, CHUNK_CRASHK); | ||
739 | pr_info("Reserving %lluMB of memory at %lluMB " | ||
740 | "for crashkernel (System RAM: %luMB)\n", | ||
741 | crash_size >> 20, crash_base >> 20, memory_end >> 20); | ||
742 | #endif | ||
743 | } | ||
744 | |||
553 | static void __init | 745 | static void __init |
554 | setup_memory(void) | 746 | setup_memory(void) |
555 | { | 747 | { |
@@ -580,6 +772,14 @@ setup_memory(void) | |||
580 | if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) { | 772 | if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) { |
581 | start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE; | 773 | start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE; |
582 | 774 | ||
775 | #ifdef CONFIG_CRASH_DUMP | ||
776 | if (OLDMEM_BASE) { | ||
777 | /* Move initrd behind kdump oldmem */ | ||
778 | if (start + INITRD_SIZE > OLDMEM_BASE && | ||
779 | start < OLDMEM_BASE + OLDMEM_SIZE) | ||
780 | start = OLDMEM_BASE + OLDMEM_SIZE; | ||
781 | } | ||
782 | #endif | ||
583 | if (start + INITRD_SIZE > memory_end) { | 783 | if (start + INITRD_SIZE > memory_end) { |
584 | pr_err("initrd extends beyond end of " | 784 | pr_err("initrd extends beyond end of " |
585 | "memory (0x%08lx > 0x%08lx) " | 785 | "memory (0x%08lx > 0x%08lx) " |
@@ -610,7 +810,8 @@ setup_memory(void) | |||
610 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { | 810 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { |
611 | unsigned long start_chunk, end_chunk, pfn; | 811 | unsigned long start_chunk, end_chunk, pfn; |
612 | 812 | ||
613 | if (memory_chunk[i].type != CHUNK_READ_WRITE) | 813 | if (memory_chunk[i].type != CHUNK_READ_WRITE && |
814 | memory_chunk[i].type != CHUNK_CRASHK) | ||
614 | continue; | 815 | continue; |
615 | start_chunk = PFN_DOWN(memory_chunk[i].addr); | 816 | start_chunk = PFN_DOWN(memory_chunk[i].addr); |
616 | end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size); | 817 | end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size); |
@@ -644,6 +845,15 @@ setup_memory(void) | |||
644 | reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size, | 845 | reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size, |
645 | BOOTMEM_DEFAULT); | 846 | BOOTMEM_DEFAULT); |
646 | 847 | ||
848 | #ifdef CONFIG_CRASH_DUMP | ||
849 | if (crashk_res.start) | ||
850 | reserve_bootmem(crashk_res.start, | ||
851 | crashk_res.end - crashk_res.start + 1, | ||
852 | BOOTMEM_DEFAULT); | ||
853 | if (is_kdump_kernel()) | ||
854 | reserve_bootmem(elfcorehdr_addr - OLDMEM_BASE, | ||
855 | PAGE_ALIGN(elfcorehdr_size), BOOTMEM_DEFAULT); | ||
856 | #endif | ||
647 | #ifdef CONFIG_BLK_DEV_INITRD | 857 | #ifdef CONFIG_BLK_DEV_INITRD |
648 | if (INITRD_START && INITRD_SIZE) { | 858 | if (INITRD_START && INITRD_SIZE) { |
649 | if (INITRD_START + INITRD_SIZE <= memory_end) { | 859 | if (INITRD_START + INITRD_SIZE <= memory_end) { |
@@ -812,8 +1022,11 @@ setup_arch(char **cmdline_p) | |||
812 | setup_ipl(); | 1022 | setup_ipl(); |
813 | setup_memory_end(); | 1023 | setup_memory_end(); |
814 | setup_addressing_mode(); | 1024 | setup_addressing_mode(); |
1025 | reserve_oldmem(); | ||
1026 | reserve_crashkernel(); | ||
815 | setup_memory(); | 1027 | setup_memory(); |
816 | setup_resources(); | 1028 | setup_resources(); |
1029 | setup_vmcoreinfo(); | ||
817 | setup_restart_psw(); | 1030 | setup_restart_psw(); |
818 | setup_lowcore(); | 1031 | setup_lowcore(); |
819 | 1032 | ||