diff options
author | Michael Holzheu <holzheu@linux.vnet.ibm.com> | 2011-10-30 10:16:40 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2011-10-30 10:16:42 -0400 |
commit | 60a0c68df2632feaa4a986af084650d1165d89c5 (patch) | |
tree | f55907defeab43de02a5a3127c8d5a694a21b3a2 /arch/s390/kernel/setup.c | |
parent | 7f0bf656c66e4292e965c95fd9de55c72b6578bb (diff) |
[S390] kdump backend code
This patch provides the architecture specific part of the s390 kdump
support.
Signed-off-by: Michael Holzheu <holzheu@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/kernel/setup.c')
-rw-r--r-- | arch/s390/kernel/setup.c | 208 |
1 files changed, 208 insertions, 0 deletions
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 7b371c37061d..b5a30412b2e5 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -42,6 +42,9 @@ | |||
42 | #include <linux/reboot.h> | 42 | #include <linux/reboot.h> |
43 | #include <linux/topology.h> | 43 | #include <linux/topology.h> |
44 | #include <linux/ftrace.h> | 44 | #include <linux/ftrace.h> |
45 | #include <linux/kexec.h> | ||
46 | #include <linux/crash_dump.h> | ||
47 | #include <linux/memory.h> | ||
45 | 48 | ||
46 | #include <asm/ipl.h> | 49 | #include <asm/ipl.h> |
47 | #include <asm/uaccess.h> | 50 | #include <asm/uaccess.h> |
@@ -57,6 +60,7 @@ | |||
57 | #include <asm/ebcdic.h> | 60 | #include <asm/ebcdic.h> |
58 | #include <asm/compat.h> | 61 | #include <asm/compat.h> |
59 | #include <asm/kvm_virtio.h> | 62 | #include <asm/kvm_virtio.h> |
63 | #include <asm/diag.h> | ||
60 | 64 | ||
61 | long psw_kernel_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY | | 65 | long psw_kernel_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY | |
62 | PSW_MASK_MCHECK | PSW_DEFAULT_KEY); | 66 | PSW_MASK_MCHECK | PSW_DEFAULT_KEY); |
@@ -435,6 +439,9 @@ static void __init setup_resources(void) | |||
435 | for (i = 0; i < MEMORY_CHUNKS; i++) { | 439 | for (i = 0; i < MEMORY_CHUNKS; i++) { |
436 | if (!memory_chunk[i].size) | 440 | if (!memory_chunk[i].size) |
437 | continue; | 441 | continue; |
442 | if (memory_chunk[i].type == CHUNK_OLDMEM || | ||
443 | memory_chunk[i].type == CHUNK_CRASHK) | ||
444 | continue; | ||
438 | res = alloc_bootmem_low(sizeof(*res)); | 445 | res = alloc_bootmem_low(sizeof(*res)); |
439 | res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; | 446 | res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; |
440 | switch (memory_chunk[i].type) { | 447 | switch (memory_chunk[i].type) { |
@@ -479,6 +486,7 @@ static void __init setup_memory_end(void) | |||
479 | unsigned long max_mem; | 486 | unsigned long max_mem; |
480 | int i; | 487 | int i; |
481 | 488 | ||
489 | |||
482 | #ifdef CONFIG_ZFCPDUMP | 490 | #ifdef CONFIG_ZFCPDUMP |
483 | if (ipl_info.type == IPL_TYPE_FCP_DUMP) { | 491 | if (ipl_info.type == IPL_TYPE_FCP_DUMP) { |
484 | memory_end = ZFCPDUMP_HSA_SIZE; | 492 | memory_end = ZFCPDUMP_HSA_SIZE; |
@@ -550,6 +558,187 @@ static void __init setup_restart_psw(void) | |||
550 | copy_to_absolute_zero(&S390_lowcore.restart_psw, &psw, sizeof(psw)); | 558 | copy_to_absolute_zero(&S390_lowcore.restart_psw, &psw, sizeof(psw)); |
551 | } | 559 | } |
552 | 560 | ||
561 | #ifdef CONFIG_CRASH_DUMP | ||
562 | |||
563 | /* | ||
564 | * Find suitable location for crashkernel memory | ||
565 | */ | ||
566 | static unsigned long __init find_crash_base(unsigned long crash_size, | ||
567 | char **msg) | ||
568 | { | ||
569 | unsigned long crash_base; | ||
570 | struct mem_chunk *chunk; | ||
571 | int i; | ||
572 | |||
573 | if (memory_chunk[0].size < crash_size) { | ||
574 | *msg = "first memory chunk must be at least crashkernel size"; | ||
575 | return 0; | ||
576 | } | ||
577 | if (is_kdump_kernel() && (crash_size == OLDMEM_SIZE)) | ||
578 | return OLDMEM_BASE; | ||
579 | |||
580 | for (i = MEMORY_CHUNKS - 1; i >= 0; i--) { | ||
581 | chunk = &memory_chunk[i]; | ||
582 | if (chunk->size == 0) | ||
583 | continue; | ||
584 | if (chunk->type != CHUNK_READ_WRITE) | ||
585 | continue; | ||
586 | if (chunk->size < crash_size) | ||
587 | continue; | ||
588 | crash_base = (chunk->addr + chunk->size) - crash_size; | ||
589 | if (crash_base < crash_size) | ||
590 | continue; | ||
591 | if (crash_base < ZFCPDUMP_HSA_SIZE_MAX) | ||
592 | continue; | ||
593 | if (crash_base < (unsigned long) INITRD_START + INITRD_SIZE) | ||
594 | continue; | ||
595 | return crash_base; | ||
596 | } | ||
597 | *msg = "no suitable area found"; | ||
598 | return 0; | ||
599 | } | ||
600 | |||
601 | /* | ||
602 | * Check if crash_base and crash_size is valid | ||
603 | */ | ||
604 | static int __init verify_crash_base(unsigned long crash_base, | ||
605 | unsigned long crash_size, | ||
606 | char **msg) | ||
607 | { | ||
608 | struct mem_chunk *chunk; | ||
609 | int i; | ||
610 | |||
611 | /* | ||
612 | * Because we do the swap to zero, we must have at least 'crash_size' | ||
613 | * bytes free space before crash_base | ||
614 | */ | ||
615 | if (crash_size > crash_base) { | ||
616 | *msg = "crashkernel offset must be greater than size"; | ||
617 | return -EINVAL; | ||
618 | } | ||
619 | |||
620 | /* First memory chunk must be at least crash_size */ | ||
621 | if (memory_chunk[0].size < crash_size) { | ||
622 | *msg = "first memory chunk must be at least crashkernel size"; | ||
623 | return -EINVAL; | ||
624 | } | ||
625 | /* Check if we fit into the respective memory chunk */ | ||
626 | for (i = 0; i < MEMORY_CHUNKS; i++) { | ||
627 | chunk = &memory_chunk[i]; | ||
628 | if (chunk->size == 0) | ||
629 | continue; | ||
630 | if (crash_base < chunk->addr) | ||
631 | continue; | ||
632 | if (crash_base >= chunk->addr + chunk->size) | ||
633 | continue; | ||
634 | /* we have found the memory chunk */ | ||
635 | if (crash_base + crash_size > chunk->addr + chunk->size) { | ||
636 | *msg = "selected memory chunk is too small for " | ||
637 | "crashkernel memory"; | ||
638 | return -EINVAL; | ||
639 | } | ||
640 | return 0; | ||
641 | } | ||
642 | *msg = "invalid memory range specified"; | ||
643 | return -EINVAL; | ||
644 | } | ||
645 | |||
646 | /* | ||
647 | * Reserve kdump memory by creating a memory hole in the mem_chunk array | ||
648 | */ | ||
649 | static void __init reserve_kdump_bootmem(unsigned long addr, unsigned long size, | ||
650 | int type) | ||
651 | { | ||
652 | |||
653 | create_mem_hole(memory_chunk, addr, size, type); | ||
654 | } | ||
655 | |||
656 | /* | ||
657 | * When kdump is enabled, we have to ensure that no memory from | ||
658 | * the area [0 - crashkernel memory size] and | ||
659 | * [crashk_res.start - crashk_res.end] is set offline. | ||
660 | */ | ||
661 | static int kdump_mem_notifier(struct notifier_block *nb, | ||
662 | unsigned long action, void *data) | ||
663 | { | ||
664 | struct memory_notify *arg = data; | ||
665 | |||
666 | if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res))) | ||
667 | return NOTIFY_BAD; | ||
668 | if (arg->start_pfn > PFN_DOWN(crashk_res.end)) | ||
669 | return NOTIFY_OK; | ||
670 | if (arg->start_pfn + arg->nr_pages - 1 < PFN_DOWN(crashk_res.start)) | ||
671 | return NOTIFY_OK; | ||
672 | return NOTIFY_BAD; | ||
673 | } | ||
674 | |||
675 | static struct notifier_block kdump_mem_nb = { | ||
676 | .notifier_call = kdump_mem_notifier, | ||
677 | }; | ||
678 | |||
679 | #endif | ||
680 | |||
681 | /* | ||
682 | * Make sure that oldmem, where the dump is stored, is protected | ||
683 | */ | ||
684 | static void reserve_oldmem(void) | ||
685 | { | ||
686 | #ifdef CONFIG_CRASH_DUMP | ||
687 | if (!OLDMEM_BASE) | ||
688 | return; | ||
689 | |||
690 | reserve_kdump_bootmem(OLDMEM_BASE, OLDMEM_SIZE, CHUNK_OLDMEM); | ||
691 | reserve_kdump_bootmem(OLDMEM_SIZE, memory_end - OLDMEM_SIZE, | ||
692 | CHUNK_OLDMEM); | ||
693 | if (OLDMEM_BASE + OLDMEM_SIZE == real_memory_size) | ||
694 | saved_max_pfn = PFN_DOWN(OLDMEM_BASE) - 1; | ||
695 | else | ||
696 | saved_max_pfn = PFN_DOWN(real_memory_size) - 1; | ||
697 | #endif | ||
698 | } | ||
699 | |||
700 | /* | ||
701 | * Reserve memory for kdump kernel to be loaded with kexec | ||
702 | */ | ||
703 | static void __init reserve_crashkernel(void) | ||
704 | { | ||
705 | #ifdef CONFIG_CRASH_DUMP | ||
706 | unsigned long long crash_base, crash_size; | ||
707 | char *msg; | ||
708 | int rc; | ||
709 | |||
710 | rc = parse_crashkernel(boot_command_line, memory_end, &crash_size, | ||
711 | &crash_base); | ||
712 | if (rc || crash_size == 0) | ||
713 | return; | ||
714 | crash_base = PAGE_ALIGN(crash_base); | ||
715 | crash_size = PAGE_ALIGN(crash_size); | ||
716 | if (register_memory_notifier(&kdump_mem_nb)) | ||
717 | return; | ||
718 | if (!crash_base) | ||
719 | crash_base = find_crash_base(crash_size, &msg); | ||
720 | if (!crash_base) { | ||
721 | pr_info("crashkernel reservation failed: %s\n", msg); | ||
722 | unregister_memory_notifier(&kdump_mem_nb); | ||
723 | return; | ||
724 | } | ||
725 | if (verify_crash_base(crash_base, crash_size, &msg)) { | ||
726 | pr_info("crashkernel reservation failed: %s\n", msg); | ||
727 | unregister_memory_notifier(&kdump_mem_nb); | ||
728 | return; | ||
729 | } | ||
730 | if (!OLDMEM_BASE && MACHINE_IS_VM) | ||
731 | diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size)); | ||
732 | crashk_res.start = crash_base; | ||
733 | crashk_res.end = crash_base + crash_size - 1; | ||
734 | insert_resource(&iomem_resource, &crashk_res); | ||
735 | reserve_kdump_bootmem(crash_base, crash_size, CHUNK_READ_WRITE); | ||
736 | pr_info("Reserving %lluMB of memory at %lluMB " | ||
737 | "for crashkernel (System RAM: %luMB)\n", | ||
738 | crash_size >> 20, crash_base >> 20, memory_end >> 20); | ||
739 | #endif | ||
740 | } | ||
741 | |||
553 | static void __init | 742 | static void __init |
554 | setup_memory(void) | 743 | setup_memory(void) |
555 | { | 744 | { |
@@ -580,6 +769,14 @@ setup_memory(void) | |||
580 | if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) { | 769 | if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) { |
581 | start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE; | 770 | start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE; |
582 | 771 | ||
772 | #ifdef CONFIG_CRASH_DUMP | ||
773 | if (OLDMEM_BASE) { | ||
774 | /* Move initrd behind kdump oldmem */ | ||
775 | if (start + INITRD_SIZE > OLDMEM_BASE && | ||
776 | start < OLDMEM_BASE + OLDMEM_SIZE) | ||
777 | start = OLDMEM_BASE + OLDMEM_SIZE; | ||
778 | } | ||
779 | #endif | ||
583 | if (start + INITRD_SIZE > memory_end) { | 780 | if (start + INITRD_SIZE > memory_end) { |
584 | pr_err("initrd extends beyond end of " | 781 | pr_err("initrd extends beyond end of " |
585 | "memory (0x%08lx > 0x%08lx) " | 782 | "memory (0x%08lx > 0x%08lx) " |
@@ -644,6 +841,15 @@ setup_memory(void) | |||
644 | reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size, | 841 | reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size, |
645 | BOOTMEM_DEFAULT); | 842 | BOOTMEM_DEFAULT); |
646 | 843 | ||
844 | #ifdef CONFIG_CRASH_DUMP | ||
845 | if (crashk_res.start) | ||
846 | reserve_bootmem(crashk_res.start, | ||
847 | crashk_res.end - crashk_res.start + 1, | ||
848 | BOOTMEM_DEFAULT); | ||
849 | if (is_kdump_kernel()) | ||
850 | reserve_bootmem(elfcorehdr_addr - OLDMEM_BASE, | ||
851 | PAGE_ALIGN(elfcorehdr_size), BOOTMEM_DEFAULT); | ||
852 | #endif | ||
647 | #ifdef CONFIG_BLK_DEV_INITRD | 853 | #ifdef CONFIG_BLK_DEV_INITRD |
648 | if (INITRD_START && INITRD_SIZE) { | 854 | if (INITRD_START && INITRD_SIZE) { |
649 | if (INITRD_START + INITRD_SIZE <= memory_end) { | 855 | if (INITRD_START + INITRD_SIZE <= memory_end) { |
@@ -812,6 +1018,8 @@ setup_arch(char **cmdline_p) | |||
812 | setup_ipl(); | 1018 | setup_ipl(); |
813 | setup_memory_end(); | 1019 | setup_memory_end(); |
814 | setup_addressing_mode(); | 1020 | setup_addressing_mode(); |
1021 | reserve_oldmem(); | ||
1022 | reserve_crashkernel(); | ||
815 | setup_memory(); | 1023 | setup_memory(); |
816 | setup_resources(); | 1024 | setup_resources(); |
817 | setup_restart_psw(); | 1025 | setup_restart_psw(); |