aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel/setup.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kernel/setup.c')
-rw-r--r--arch/s390/kernel/setup.c279
1 files changed, 247 insertions, 32 deletions
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 7b371c37061d..e58a462949b1 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -42,6 +42,9 @@
42#include <linux/reboot.h> 42#include <linux/reboot.h>
43#include <linux/topology.h> 43#include <linux/topology.h>
44#include <linux/ftrace.h> 44#include <linux/ftrace.h>
45#include <linux/kexec.h>
46#include <linux/crash_dump.h>
47#include <linux/memory.h>
45 48
46#include <asm/ipl.h> 49#include <asm/ipl.h>
47#include <asm/uaccess.h> 50#include <asm/uaccess.h>
@@ -57,12 +60,13 @@
57#include <asm/ebcdic.h> 60#include <asm/ebcdic.h>
58#include <asm/compat.h> 61#include <asm/compat.h>
59#include <asm/kvm_virtio.h> 62#include <asm/kvm_virtio.h>
63#include <asm/diag.h>
60 64
61long psw_kernel_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY | 65long psw_kernel_bits = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_PRIMARY |
62 PSW_MASK_MCHECK | PSW_DEFAULT_KEY); 66 PSW_MASK_EA | PSW_MASK_BA;
63long psw_user_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME | 67long psw_user_bits = PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT |
64 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | 68 PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK |
65 PSW_MASK_PSTATE | PSW_DEFAULT_KEY); 69 PSW_MASK_PSTATE | PSW_ASC_HOME;
66 70
67/* 71/*
68 * User copy operations. 72 * User copy operations.
@@ -207,6 +211,8 @@ static void __init setup_zfcpdump(unsigned int console_devno)
207 211
208 if (ipl_info.type != IPL_TYPE_FCP_DUMP) 212 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
209 return; 213 return;
214 if (OLDMEM_BASE)
215 return;
210 if (console_devno != -1) 216 if (console_devno != -1)
211 sprintf(str, " cio_ignore=all,!0.0.%04x,!0.0.%04x", 217 sprintf(str, " cio_ignore=all,!0.0.%04x,!0.0.%04x",
212 ipl_info.data.fcp.dev_id.devno, console_devno); 218 ipl_info.data.fcp.dev_id.devno, console_devno);
@@ -274,22 +280,14 @@ early_param("mem", early_parse_mem);
274unsigned int user_mode = HOME_SPACE_MODE; 280unsigned int user_mode = HOME_SPACE_MODE;
275EXPORT_SYMBOL_GPL(user_mode); 281EXPORT_SYMBOL_GPL(user_mode);
276 282
277static int set_amode_and_uaccess(unsigned long user_amode, 283static int set_amode_primary(void)
278 unsigned long user32_amode)
279{ 284{
280 psw_user_bits = PSW_BASE_BITS | PSW_MASK_DAT | user_amode | 285 psw_kernel_bits = (psw_kernel_bits & ~PSW_MASK_ASC) | PSW_ASC_HOME;
281 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | 286 psw_user_bits = (psw_user_bits & ~PSW_MASK_ASC) | PSW_ASC_PRIMARY;
282 PSW_MASK_PSTATE | PSW_DEFAULT_KEY;
283#ifdef CONFIG_COMPAT 287#ifdef CONFIG_COMPAT
284 psw_user32_bits = PSW_BASE32_BITS | PSW_MASK_DAT | user_amode | 288 psw32_user_bits =
285 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | 289 (psw32_user_bits & ~PSW32_MASK_ASC) | PSW32_ASC_PRIMARY;
286 PSW_MASK_PSTATE | PSW_DEFAULT_KEY;
287 psw32_user_bits = PSW32_BASE_BITS | PSW32_MASK_DAT | user32_amode |
288 PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK |
289 PSW32_MASK_PSTATE;
290#endif 290#endif
291 psw_kernel_bits = PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
292 PSW_MASK_MCHECK | PSW_DEFAULT_KEY;
293 291
294 if (MACHINE_HAS_MVCOS) { 292 if (MACHINE_HAS_MVCOS) {
295 memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess)); 293 memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess));
@@ -325,7 +323,7 @@ early_param("user_mode", early_parse_user_mode);
325static void setup_addressing_mode(void) 323static void setup_addressing_mode(void)
326{ 324{
327 if (user_mode == PRIMARY_SPACE_MODE) { 325 if (user_mode == PRIMARY_SPACE_MODE) {
328 if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY)) 326 if (set_amode_primary())
329 pr_info("Address spaces switched, " 327 pr_info("Address spaces switched, "
330 "mvcos available\n"); 328 "mvcos available\n");
331 else 329 else
@@ -344,24 +342,25 @@ setup_lowcore(void)
344 */ 342 */
345 BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096); 343 BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096);
346 lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0); 344 lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
347 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; 345 lc->restart_psw.mask = psw_kernel_bits;
348 lc->restart_psw.addr = 346 lc->restart_psw.addr =
349 PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; 347 PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler;
350 if (user_mode != HOME_SPACE_MODE) 348 lc->external_new_psw.mask = psw_kernel_bits |
351 lc->restart_psw.mask |= PSW_ASC_HOME; 349 PSW_MASK_DAT | PSW_MASK_MCHECK;
352 lc->external_new_psw.mask = psw_kernel_bits;
353 lc->external_new_psw.addr = 350 lc->external_new_psw.addr =
354 PSW_ADDR_AMODE | (unsigned long) ext_int_handler; 351 PSW_ADDR_AMODE | (unsigned long) ext_int_handler;
355 lc->svc_new_psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT; 352 lc->svc_new_psw.mask = psw_kernel_bits |
353 PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
356 lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; 354 lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call;
357 lc->program_new_psw.mask = psw_kernel_bits; 355 lc->program_new_psw.mask = psw_kernel_bits |
356 PSW_MASK_DAT | PSW_MASK_MCHECK;
358 lc->program_new_psw.addr = 357 lc->program_new_psw.addr =
359 PSW_ADDR_AMODE | (unsigned long)pgm_check_handler; 358 PSW_ADDR_AMODE | (unsigned long) pgm_check_handler;
360 lc->mcck_new_psw.mask = 359 lc->mcck_new_psw.mask = psw_kernel_bits;
361 psw_kernel_bits & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT;
362 lc->mcck_new_psw.addr = 360 lc->mcck_new_psw.addr =
363 PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; 361 PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
364 lc->io_new_psw.mask = psw_kernel_bits; 362 lc->io_new_psw.mask = psw_kernel_bits |
363 PSW_MASK_DAT | PSW_MASK_MCHECK;
365 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; 364 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
366 lc->clock_comparator = -1ULL; 365 lc->clock_comparator = -1ULL;
367 lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; 366 lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE;
@@ -435,10 +434,14 @@ static void __init setup_resources(void)
435 for (i = 0; i < MEMORY_CHUNKS; i++) { 434 for (i = 0; i < MEMORY_CHUNKS; i++) {
436 if (!memory_chunk[i].size) 435 if (!memory_chunk[i].size)
437 continue; 436 continue;
437 if (memory_chunk[i].type == CHUNK_OLDMEM ||
438 memory_chunk[i].type == CHUNK_CRASHK)
439 continue;
438 res = alloc_bootmem_low(sizeof(*res)); 440 res = alloc_bootmem_low(sizeof(*res));
439 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; 441 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
440 switch (memory_chunk[i].type) { 442 switch (memory_chunk[i].type) {
441 case CHUNK_READ_WRITE: 443 case CHUNK_READ_WRITE:
444 case CHUNK_CRASHK:
442 res->name = "System RAM"; 445 res->name = "System RAM";
443 break; 446 break;
444 case CHUNK_READ_ONLY: 447 case CHUNK_READ_ONLY:
@@ -479,8 +482,9 @@ static void __init setup_memory_end(void)
479 unsigned long max_mem; 482 unsigned long max_mem;
480 int i; 483 int i;
481 484
485
482#ifdef CONFIG_ZFCPDUMP 486#ifdef CONFIG_ZFCPDUMP
483 if (ipl_info.type == IPL_TYPE_FCP_DUMP) { 487 if (ipl_info.type == IPL_TYPE_FCP_DUMP && !OLDMEM_BASE) {
484 memory_end = ZFCPDUMP_HSA_SIZE; 488 memory_end = ZFCPDUMP_HSA_SIZE;
485 memory_end_set = 1; 489 memory_end_set = 1;
486 } 490 }
@@ -545,11 +549,201 @@ static void __init setup_restart_psw(void)
545 * Setup restart PSW for absolute zero lowcore. This is necesary 549 * Setup restart PSW for absolute zero lowcore. This is necesary
546 * if PSW restart is done on an offline CPU that has lowcore zero 550 * if PSW restart is done on an offline CPU that has lowcore zero
547 */ 551 */
548 psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; 552 psw.mask = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA;
549 psw.addr = PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; 553 psw.addr = PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler;
550 copy_to_absolute_zero(&S390_lowcore.restart_psw, &psw, sizeof(psw)); 554 copy_to_absolute_zero(&S390_lowcore.restart_psw, &psw, sizeof(psw));
551} 555}
552 556
557static void __init setup_vmcoreinfo(void)
558{
559#ifdef CONFIG_KEXEC
560 unsigned long ptr = paddr_vmcoreinfo_note();
561
562 copy_to_absolute_zero(&S390_lowcore.vmcore_info, &ptr, sizeof(ptr));
563#endif
564}
565
566#ifdef CONFIG_CRASH_DUMP
567
568/*
569 * Find suitable location for crashkernel memory
570 */
571static unsigned long __init find_crash_base(unsigned long crash_size,
572 char **msg)
573{
574 unsigned long crash_base;
575 struct mem_chunk *chunk;
576 int i;
577
578 if (memory_chunk[0].size < crash_size) {
579 *msg = "first memory chunk must be at least crashkernel size";
580 return 0;
581 }
582 if (is_kdump_kernel() && (crash_size == OLDMEM_SIZE))
583 return OLDMEM_BASE;
584
585 for (i = MEMORY_CHUNKS - 1; i >= 0; i--) {
586 chunk = &memory_chunk[i];
587 if (chunk->size == 0)
588 continue;
589 if (chunk->type != CHUNK_READ_WRITE)
590 continue;
591 if (chunk->size < crash_size)
592 continue;
593 crash_base = (chunk->addr + chunk->size) - crash_size;
594 if (crash_base < crash_size)
595 continue;
596 if (crash_base < ZFCPDUMP_HSA_SIZE_MAX)
597 continue;
598 if (crash_base < (unsigned long) INITRD_START + INITRD_SIZE)
599 continue;
600 return crash_base;
601 }
602 *msg = "no suitable area found";
603 return 0;
604}
605
606/*
607 * Check if crash_base and crash_size is valid
608 */
609static int __init verify_crash_base(unsigned long crash_base,
610 unsigned long crash_size,
611 char **msg)
612{
613 struct mem_chunk *chunk;
614 int i;
615
616 /*
617 * Because we do the swap to zero, we must have at least 'crash_size'
618 * bytes free space before crash_base
619 */
620 if (crash_size > crash_base) {
621 *msg = "crashkernel offset must be greater than size";
622 return -EINVAL;
623 }
624
625 /* First memory chunk must be at least crash_size */
626 if (memory_chunk[0].size < crash_size) {
627 *msg = "first memory chunk must be at least crashkernel size";
628 return -EINVAL;
629 }
630 /* Check if we fit into the respective memory chunk */
631 for (i = 0; i < MEMORY_CHUNKS; i++) {
632 chunk = &memory_chunk[i];
633 if (chunk->size == 0)
634 continue;
635 if (crash_base < chunk->addr)
636 continue;
637 if (crash_base >= chunk->addr + chunk->size)
638 continue;
639 /* we have found the memory chunk */
640 if (crash_base + crash_size > chunk->addr + chunk->size) {
641 *msg = "selected memory chunk is too small for "
642 "crashkernel memory";
643 return -EINVAL;
644 }
645 return 0;
646 }
647 *msg = "invalid memory range specified";
648 return -EINVAL;
649}
650
651/*
652 * Reserve kdump memory by creating a memory hole in the mem_chunk array
653 */
654static void __init reserve_kdump_bootmem(unsigned long addr, unsigned long size,
655 int type)
656{
657
658 create_mem_hole(memory_chunk, addr, size, type);
659}
660
661/*
662 * When kdump is enabled, we have to ensure that no memory from
663 * the area [0 - crashkernel memory size] and
664 * [crashk_res.start - crashk_res.end] is set offline.
665 */
666static int kdump_mem_notifier(struct notifier_block *nb,
667 unsigned long action, void *data)
668{
669 struct memory_notify *arg = data;
670
671 if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res)))
672 return NOTIFY_BAD;
673 if (arg->start_pfn > PFN_DOWN(crashk_res.end))
674 return NOTIFY_OK;
675 if (arg->start_pfn + arg->nr_pages - 1 < PFN_DOWN(crashk_res.start))
676 return NOTIFY_OK;
677 return NOTIFY_BAD;
678}
679
680static struct notifier_block kdump_mem_nb = {
681 .notifier_call = kdump_mem_notifier,
682};
683
684#endif
685
686/*
687 * Make sure that oldmem, where the dump is stored, is protected
688 */
689static void reserve_oldmem(void)
690{
691#ifdef CONFIG_CRASH_DUMP
692 if (!OLDMEM_BASE)
693 return;
694
695 reserve_kdump_bootmem(OLDMEM_BASE, OLDMEM_SIZE, CHUNK_OLDMEM);
696 reserve_kdump_bootmem(OLDMEM_SIZE, memory_end - OLDMEM_SIZE,
697 CHUNK_OLDMEM);
698 if (OLDMEM_BASE + OLDMEM_SIZE == real_memory_size)
699 saved_max_pfn = PFN_DOWN(OLDMEM_BASE) - 1;
700 else
701 saved_max_pfn = PFN_DOWN(real_memory_size) - 1;
702#endif
703}
704
705/*
706 * Reserve memory for kdump kernel to be loaded with kexec
707 */
708static void __init reserve_crashkernel(void)
709{
710#ifdef CONFIG_CRASH_DUMP
711 unsigned long long crash_base, crash_size;
712 char *msg;
713 int rc;
714
715 rc = parse_crashkernel(boot_command_line, memory_end, &crash_size,
716 &crash_base);
717 if (rc || crash_size == 0)
718 return;
719 crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN);
720 crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN);
721 if (register_memory_notifier(&kdump_mem_nb))
722 return;
723 if (!crash_base)
724 crash_base = find_crash_base(crash_size, &msg);
725 if (!crash_base) {
726 pr_info("crashkernel reservation failed: %s\n", msg);
727 unregister_memory_notifier(&kdump_mem_nb);
728 return;
729 }
730 if (verify_crash_base(crash_base, crash_size, &msg)) {
731 pr_info("crashkernel reservation failed: %s\n", msg);
732 unregister_memory_notifier(&kdump_mem_nb);
733 return;
734 }
735 if (!OLDMEM_BASE && MACHINE_IS_VM)
736 diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size));
737 crashk_res.start = crash_base;
738 crashk_res.end = crash_base + crash_size - 1;
739 insert_resource(&iomem_resource, &crashk_res);
740 reserve_kdump_bootmem(crash_base, crash_size, CHUNK_CRASHK);
741 pr_info("Reserving %lluMB of memory at %lluMB "
742 "for crashkernel (System RAM: %luMB)\n",
743 crash_size >> 20, crash_base >> 20, memory_end >> 20);
744#endif
745}
746
553static void __init 747static void __init
554setup_memory(void) 748setup_memory(void)
555{ 749{
@@ -580,6 +774,14 @@ setup_memory(void)
580 if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) { 774 if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) {
581 start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE; 775 start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE;
582 776
777#ifdef CONFIG_CRASH_DUMP
778 if (OLDMEM_BASE) {
779 /* Move initrd behind kdump oldmem */
780 if (start + INITRD_SIZE > OLDMEM_BASE &&
781 start < OLDMEM_BASE + OLDMEM_SIZE)
782 start = OLDMEM_BASE + OLDMEM_SIZE;
783 }
784#endif
583 if (start + INITRD_SIZE > memory_end) { 785 if (start + INITRD_SIZE > memory_end) {
584 pr_err("initrd extends beyond end of " 786 pr_err("initrd extends beyond end of "
585 "memory (0x%08lx > 0x%08lx) " 787 "memory (0x%08lx > 0x%08lx) "
@@ -610,7 +812,8 @@ setup_memory(void)
610 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { 812 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
611 unsigned long start_chunk, end_chunk, pfn; 813 unsigned long start_chunk, end_chunk, pfn;
612 814
613 if (memory_chunk[i].type != CHUNK_READ_WRITE) 815 if (memory_chunk[i].type != CHUNK_READ_WRITE &&
816 memory_chunk[i].type != CHUNK_CRASHK)
614 continue; 817 continue;
615 start_chunk = PFN_DOWN(memory_chunk[i].addr); 818 start_chunk = PFN_DOWN(memory_chunk[i].addr);
616 end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size); 819 end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size);
@@ -644,6 +847,15 @@ setup_memory(void)
644 reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size, 847 reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size,
645 BOOTMEM_DEFAULT); 848 BOOTMEM_DEFAULT);
646 849
850#ifdef CONFIG_CRASH_DUMP
851 if (crashk_res.start)
852 reserve_bootmem(crashk_res.start,
853 crashk_res.end - crashk_res.start + 1,
854 BOOTMEM_DEFAULT);
855 if (is_kdump_kernel())
856 reserve_bootmem(elfcorehdr_addr - OLDMEM_BASE,
857 PAGE_ALIGN(elfcorehdr_size), BOOTMEM_DEFAULT);
858#endif
647#ifdef CONFIG_BLK_DEV_INITRD 859#ifdef CONFIG_BLK_DEV_INITRD
648 if (INITRD_START && INITRD_SIZE) { 860 if (INITRD_START && INITRD_SIZE) {
649 if (INITRD_START + INITRD_SIZE <= memory_end) { 861 if (INITRD_START + INITRD_SIZE <= memory_end) {
@@ -812,8 +1024,11 @@ setup_arch(char **cmdline_p)
812 setup_ipl(); 1024 setup_ipl();
813 setup_memory_end(); 1025 setup_memory_end();
814 setup_addressing_mode(); 1026 setup_addressing_mode();
1027 reserve_oldmem();
1028 reserve_crashkernel();
815 setup_memory(); 1029 setup_memory();
816 setup_resources(); 1030 setup_resources();
1031 setup_vmcoreinfo();
817 setup_restart_psw(); 1032 setup_restart_psw();
818 setup_lowcore(); 1033 setup_lowcore();
819 1034