aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/s390/Kconfig3
-rw-r--r--arch/s390/include/asm/setup.h16
-rw-r--r--arch/s390/kernel/crash_dump.c83
-rw-r--r--arch/s390/kernel/early.c6
-rw-r--r--arch/s390/kernel/head31.S1
-rw-r--r--arch/s390/kernel/setup.c451
-rw-r--r--arch/s390/kernel/topology.c4
-rw-r--r--arch/s390/mm/mem_detect.c130
-rw-r--r--arch/s390/mm/vmem.c30
-rw-r--r--drivers/s390/char/zcore.c44
10 files changed, 270 insertions, 498 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index d68fe34799b0..d239e6afb923 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -60,7 +60,6 @@ config PCI_QUIRKS
60 60
61config S390 61config S390
62 def_bool y 62 def_bool y
63 select ARCH_DISCARD_MEMBLOCK
64 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE 63 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
65 select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS 64 select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
66 select ARCH_HAVE_NMI_SAFE_CMPXCHG 65 select ARCH_HAVE_NMI_SAFE_CMPXCHG
@@ -130,6 +129,7 @@ config S390
130 select HAVE_KVM if 64BIT 129 select HAVE_KVM if 64BIT
131 select HAVE_MEMBLOCK 130 select HAVE_MEMBLOCK
132 select HAVE_MEMBLOCK_NODE_MAP 131 select HAVE_MEMBLOCK_NODE_MAP
132 select HAVE_MEMBLOCK_PHYS_MAP
133 select HAVE_MOD_ARCH_SPECIFIC 133 select HAVE_MOD_ARCH_SPECIFIC
134 select HAVE_OPROFILE 134 select HAVE_OPROFILE
135 select HAVE_PERF_EVENTS 135 select HAVE_PERF_EVENTS
@@ -139,6 +139,7 @@ config S390
139 select HAVE_VIRT_CPU_ACCOUNTING 139 select HAVE_VIRT_CPU_ACCOUNTING
140 select KTIME_SCALAR if 32BIT 140 select KTIME_SCALAR if 32BIT
141 select MODULES_USE_ELF_RELA 141 select MODULES_USE_ELF_RELA
142 select NO_BOOTMEM
142 select OLD_SIGACTION 143 select OLD_SIGACTION
143 select OLD_SIGSUSPEND3 144 select OLD_SIGSUSPEND3
144 select SYSCTL_EXCEPTION_TRACE 145 select SYSCTL_EXCEPTION_TRACE
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index b31b22dba948..089a49814c50 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -9,7 +9,6 @@
9 9
10 10
11#define PARMAREA 0x10400 11#define PARMAREA 0x10400
12#define MEMORY_CHUNKS 256
13 12
14#ifndef __ASSEMBLY__ 13#ifndef __ASSEMBLY__
15 14
@@ -31,22 +30,11 @@
31#endif /* CONFIG_64BIT */ 30#endif /* CONFIG_64BIT */
32#define COMMAND_LINE ((char *) (0x10480)) 31#define COMMAND_LINE ((char *) (0x10480))
33 32
34#define CHUNK_READ_WRITE 0
35#define CHUNK_READ_ONLY 1
36
37struct mem_chunk {
38 unsigned long addr;
39 unsigned long size;
40 int type;
41};
42
43extern struct mem_chunk memory_chunk[];
44extern int memory_end_set; 33extern int memory_end_set;
45extern unsigned long memory_end; 34extern unsigned long memory_end;
35extern unsigned long max_physmem_end;
46 36
47void detect_memory_layout(struct mem_chunk chunk[], unsigned long maxsize); 37extern void detect_memory_memblock(void);
48void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
49 unsigned long size);
50 38
51/* 39/*
52 * Machine features detected in head.S 40 * Machine features detected in head.S
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index d7658c4b2ed5..a3b9150e6802 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -13,6 +13,7 @@
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/bootmem.h> 14#include <linux/bootmem.h>
15#include <linux/elf.h> 15#include <linux/elf.h>
16#include <linux/memblock.h>
16#include <asm/os_info.h> 17#include <asm/os_info.h>
17#include <asm/elf.h> 18#include <asm/elf.h>
18#include <asm/ipl.h> 19#include <asm/ipl.h>
@@ -22,6 +23,24 @@
22#define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y))) 23#define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
23#define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y)))) 24#define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y))))
24 25
26static struct memblock_region oldmem_region;
27
28static struct memblock_type oldmem_type = {
29 .cnt = 1,
30 .max = 1,
31 .total_size = 0,
32 .regions = &oldmem_region,
33};
34
35#define for_each_dump_mem_range(i, nid, p_start, p_end, p_nid) \
36 for (i = 0, __next_mem_range(&i, nid, &memblock.physmem, \
37 &oldmem_type, p_start, \
38 p_end, p_nid); \
39 i != (u64)ULLONG_MAX; \
40 __next_mem_range(&i, nid, &memblock.physmem, \
41 &oldmem_type, \
42 p_start, p_end, p_nid))
43
25struct dump_save_areas dump_save_areas; 44struct dump_save_areas dump_save_areas;
26 45
27/* 46/*
@@ -264,19 +283,6 @@ static void *kzalloc_panic(int len)
264} 283}
265 284
266/* 285/*
267 * Get memory layout and create hole for oldmem
268 */
269static struct mem_chunk *get_memory_layout(void)
270{
271 struct mem_chunk *chunk_array;
272
273 chunk_array = kzalloc_panic(MEMORY_CHUNKS * sizeof(struct mem_chunk));
274 detect_memory_layout(chunk_array, 0);
275 create_mem_hole(chunk_array, OLDMEM_BASE, OLDMEM_SIZE);
276 return chunk_array;
277}
278
279/*
280 * Initialize ELF note 286 * Initialize ELF note
281 */ 287 */
282static void *nt_init(void *buf, Elf64_Word type, void *desc, int d_len, 288static void *nt_init(void *buf, Elf64_Word type, void *desc, int d_len,
@@ -490,52 +496,33 @@ static int get_cpu_cnt(void)
490 */ 496 */
491static int get_mem_chunk_cnt(void) 497static int get_mem_chunk_cnt(void)
492{ 498{
493 struct mem_chunk *chunk_array, *mem_chunk; 499 int cnt = 0;
494 int i, cnt = 0; 500 u64 idx;
495 501
496 chunk_array = get_memory_layout(); 502 for_each_dump_mem_range(idx, NUMA_NO_NODE, NULL, NULL, NULL)
497 for (i = 0; i < MEMORY_CHUNKS; i++) {
498 mem_chunk = &chunk_array[i];
499 if (chunk_array[i].type != CHUNK_READ_WRITE &&
500 chunk_array[i].type != CHUNK_READ_ONLY)
501 continue;
502 if (mem_chunk->size == 0)
503 continue;
504 cnt++; 503 cnt++;
505 }
506 kfree(chunk_array);
507 return cnt; 504 return cnt;
508} 505}
509 506
510/* 507/*
511 * Initialize ELF loads (new kernel) 508 * Initialize ELF loads (new kernel)
512 */ 509 */
513static int loads_init(Elf64_Phdr *phdr, u64 loads_offset) 510static void loads_init(Elf64_Phdr *phdr, u64 loads_offset)
514{ 511{
515 struct mem_chunk *chunk_array, *mem_chunk; 512 phys_addr_t start, end;
516 int i; 513 u64 idx;
517 514
518 chunk_array = get_memory_layout(); 515 for_each_dump_mem_range(idx, NUMA_NO_NODE, &start, &end, NULL) {
519 for (i = 0; i < MEMORY_CHUNKS; i++) { 516 phdr->p_filesz = end - start;
520 mem_chunk = &chunk_array[i];
521 if (mem_chunk->size == 0)
522 continue;
523 if (chunk_array[i].type != CHUNK_READ_WRITE &&
524 chunk_array[i].type != CHUNK_READ_ONLY)
525 continue;
526 else
527 phdr->p_filesz = mem_chunk->size;
528 phdr->p_type = PT_LOAD; 517 phdr->p_type = PT_LOAD;
529 phdr->p_offset = mem_chunk->addr; 518 phdr->p_offset = start;
530 phdr->p_vaddr = mem_chunk->addr; 519 phdr->p_vaddr = start;
531 phdr->p_paddr = mem_chunk->addr; 520 phdr->p_paddr = start;
532 phdr->p_memsz = mem_chunk->size; 521 phdr->p_memsz = end - start;
533 phdr->p_flags = PF_R | PF_W | PF_X; 522 phdr->p_flags = PF_R | PF_W | PF_X;
534 phdr->p_align = PAGE_SIZE; 523 phdr->p_align = PAGE_SIZE;
535 phdr++; 524 phdr++;
536 } 525 }
537 kfree(chunk_array);
538 return i;
539} 526}
540 527
541/* 528/*
@@ -584,6 +571,14 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
584 /* If we cannot get HSA size for zfcpdump return error */ 571 /* If we cannot get HSA size for zfcpdump return error */
585 if (ipl_info.type == IPL_TYPE_FCP_DUMP && !sclp_get_hsa_size()) 572 if (ipl_info.type == IPL_TYPE_FCP_DUMP && !sclp_get_hsa_size())
586 return -ENODEV; 573 return -ENODEV;
574
575 /* For kdump, exclude previous crashkernel memory */
576 if (OLDMEM_BASE) {
577 oldmem_region.base = OLDMEM_BASE;
578 oldmem_region.size = OLDMEM_SIZE;
579 oldmem_type.total_size = OLDMEM_SIZE;
580 }
581
587 mem_chunk_cnt = get_mem_chunk_cnt(); 582 mem_chunk_cnt = get_mem_chunk_cnt();
588 583
589 alloc_size = 0x1000 + get_cpu_cnt() * 0x300 + 584 alloc_size = 0x1000 + get_cpu_cnt() * 0x300 +
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index a734f3585ceb..0dff972a169c 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -258,13 +258,19 @@ static __init void setup_topology(void)
258static void early_pgm_check_handler(void) 258static void early_pgm_check_handler(void)
259{ 259{
260 const struct exception_table_entry *fixup; 260 const struct exception_table_entry *fixup;
261 unsigned long cr0, cr0_new;
261 unsigned long addr; 262 unsigned long addr;
262 263
263 addr = S390_lowcore.program_old_psw.addr; 264 addr = S390_lowcore.program_old_psw.addr;
264 fixup = search_exception_tables(addr & PSW_ADDR_INSN); 265 fixup = search_exception_tables(addr & PSW_ADDR_INSN);
265 if (!fixup) 266 if (!fixup)
266 disabled_wait(0); 267 disabled_wait(0);
268 /* Disable low address protection before storing into lowcore. */
269 __ctl_store(cr0, 0, 0);
270 cr0_new = cr0 & ~(1UL << 28);
271 __ctl_load(cr0_new, 0, 0);
267 S390_lowcore.program_old_psw.addr = extable_fixup(fixup)|PSW_ADDR_AMODE; 272 S390_lowcore.program_old_psw.addr = extable_fixup(fixup)|PSW_ADDR_AMODE;
273 __ctl_load(cr0, 0, 0);
268} 274}
269 275
270static noinline __init void setup_lowcore_early(void) 276static noinline __init void setup_lowcore_early(void)
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
index 9a99856df1c9..6dbe80983a24 100644
--- a/arch/s390/kernel/head31.S
+++ b/arch/s390/kernel/head31.S
@@ -59,7 +59,6 @@ ENTRY(startup_continue)
59 .long 0 # cr13: home space segment table 59 .long 0 # cr13: home space segment table
60 .long 0xc0000000 # cr14: machine check handling off 60 .long 0xc0000000 # cr14: machine check handling off
61 .long 0 # cr15: linkage stack operations 61 .long 0 # cr15: linkage stack operations
62.Lmchunk:.long memory_chunk
63.Lbss_bgn: .long __bss_start 62.Lbss_bgn: .long __bss_start
64.Lbss_end: .long _end 63.Lbss_end: .long _end
65.Lparmaddr: .long PARMAREA 64.Lparmaddr: .long PARMAREA
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 88d1ca81e2dd..1f5536c2fd02 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -78,10 +78,9 @@ EXPORT_SYMBOL(console_irq);
78unsigned long elf_hwcap = 0; 78unsigned long elf_hwcap = 0;
79char elf_platform[ELF_PLATFORM_SIZE]; 79char elf_platform[ELF_PLATFORM_SIZE];
80 80
81struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS];
82
83int __initdata memory_end_set; 81int __initdata memory_end_set;
84unsigned long __initdata memory_end; 82unsigned long __initdata memory_end;
83unsigned long __initdata max_physmem_end;
85 84
86unsigned long VMALLOC_START; 85unsigned long VMALLOC_START;
87EXPORT_SYMBOL(VMALLOC_START); 86EXPORT_SYMBOL(VMALLOC_START);
@@ -273,6 +272,7 @@ EXPORT_SYMBOL_GPL(pm_power_off);
273static int __init early_parse_mem(char *p) 272static int __init early_parse_mem(char *p)
274{ 273{
275 memory_end = memparse(p, &p); 274 memory_end = memparse(p, &p);
275 memory_end &= PAGE_MASK;
276 memory_end_set = 1; 276 memory_end_set = 1;
277 return 0; 277 return 0;
278} 278}
@@ -401,7 +401,8 @@ static struct resource __initdata *standard_resources[] = {
401static void __init setup_resources(void) 401static void __init setup_resources(void)
402{ 402{
403 struct resource *res, *std_res, *sub_res; 403 struct resource *res, *std_res, *sub_res;
404 int i, j; 404 struct memblock_region *reg;
405 int j;
405 406
406 code_resource.start = (unsigned long) &_text; 407 code_resource.start = (unsigned long) &_text;
407 code_resource.end = (unsigned long) &_etext - 1; 408 code_resource.end = (unsigned long) &_etext - 1;
@@ -410,24 +411,13 @@ static void __init setup_resources(void)
410 bss_resource.start = (unsigned long) &__bss_start; 411 bss_resource.start = (unsigned long) &__bss_start;
411 bss_resource.end = (unsigned long) &__bss_stop - 1; 412 bss_resource.end = (unsigned long) &__bss_stop - 1;
412 413
413 for (i = 0; i < MEMORY_CHUNKS; i++) { 414 for_each_memblock(memory, reg) {
414 if (!memory_chunk[i].size)
415 continue;
416 res = alloc_bootmem_low(sizeof(*res)); 415 res = alloc_bootmem_low(sizeof(*res));
417 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; 416 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
418 switch (memory_chunk[i].type) { 417
419 case CHUNK_READ_WRITE: 418 res->name = "System RAM";
420 res->name = "System RAM"; 419 res->start = reg->base;
421 break; 420 res->end = reg->base + reg->size - 1;
422 case CHUNK_READ_ONLY:
423 res->name = "System ROM";
424 res->flags |= IORESOURCE_READONLY;
425 break;
426 default:
427 res->name = "reserved";
428 }
429 res->start = memory_chunk[i].addr;
430 res->end = res->start + memory_chunk[i].size - 1;
431 request_resource(&iomem_resource, res); 421 request_resource(&iomem_resource, res);
432 422
433 for (j = 0; j < ARRAY_SIZE(standard_resources); j++) { 423 for (j = 0; j < ARRAY_SIZE(standard_resources); j++) {
@@ -451,48 +441,11 @@ static void __init setup_resources(void)
451static void __init setup_memory_end(void) 441static void __init setup_memory_end(void)
452{ 442{
453 unsigned long vmax, vmalloc_size, tmp; 443 unsigned long vmax, vmalloc_size, tmp;
454 unsigned long real_memory_size = 0;
455 int i;
456
457
458#ifdef CONFIG_ZFCPDUMP
459 if (ipl_info.type == IPL_TYPE_FCP_DUMP &&
460 !OLDMEM_BASE && sclp_get_hsa_size()) {
461 memory_end = sclp_get_hsa_size();
462 memory_end_set = 1;
463 }
464#endif
465 memory_end &= PAGE_MASK;
466
467 /*
468 * Make sure all chunks are MAX_ORDER aligned so we don't need the
469 * extra checks that HOLES_IN_ZONE would require.
470 */
471 for (i = 0; i < MEMORY_CHUNKS; i++) {
472 unsigned long start, end;
473 struct mem_chunk *chunk;
474 unsigned long align;
475
476 chunk = &memory_chunk[i];
477 if (!chunk->size)
478 continue;
479 align = 1UL << (MAX_ORDER + PAGE_SHIFT - 1);
480 start = (chunk->addr + align - 1) & ~(align - 1);
481 end = (chunk->addr + chunk->size) & ~(align - 1);
482 if (start >= end)
483 memset(chunk, 0, sizeof(*chunk));
484 else {
485 chunk->addr = start;
486 chunk->size = end - start;
487 }
488 real_memory_size = max(real_memory_size,
489 chunk->addr + chunk->size);
490 }
491 444
492 /* Choose kernel address space layout: 2, 3, or 4 levels. */ 445 /* Choose kernel address space layout: 2, 3, or 4 levels. */
493#ifdef CONFIG_64BIT 446#ifdef CONFIG_64BIT
494 vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN; 447 vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN;
495 tmp = (memory_end ?: real_memory_size) / PAGE_SIZE; 448 tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE;
496 tmp = tmp * (sizeof(struct page) + PAGE_SIZE) + vmalloc_size; 449 tmp = tmp * (sizeof(struct page) + PAGE_SIZE) + vmalloc_size;
497 if (tmp <= (1UL << 42)) 450 if (tmp <= (1UL << 42))
498 vmax = 1UL << 42; /* 3-level kernel page table */ 451 vmax = 1UL << 42; /* 3-level kernel page table */
@@ -520,21 +473,11 @@ static void __init setup_memory_end(void)
520 vmemmap = (struct page *) tmp; 473 vmemmap = (struct page *) tmp;
521 474
522 /* Take care that memory_end is set and <= vmemmap */ 475 /* Take care that memory_end is set and <= vmemmap */
523 memory_end = min(memory_end ?: real_memory_size, tmp); 476 memory_end = min(memory_end ?: max_physmem_end, tmp);
524 477 max_pfn = max_low_pfn = PFN_DOWN(memory_end);
525 /* Fixup memory chunk array to fit into 0..memory_end */ 478 memblock_remove(memory_end, ULONG_MAX);
526 for (i = 0; i < MEMORY_CHUNKS; i++) {
527 struct mem_chunk *chunk = &memory_chunk[i];
528 479
529 if (!chunk->size) 480 pr_notice("Max memory size: %luMB\n", memory_end >> 20);
530 continue;
531 if (chunk->addr >= memory_end) {
532 memset(chunk, 0, sizeof(*chunk));
533 continue;
534 }
535 if (chunk->addr + chunk->size > memory_end)
536 chunk->size = memory_end - chunk->addr;
537 }
538} 481}
539 482
540static void __init setup_vmcoreinfo(void) 483static void __init setup_vmcoreinfo(void)
@@ -545,89 +488,6 @@ static void __init setup_vmcoreinfo(void)
545#ifdef CONFIG_CRASH_DUMP 488#ifdef CONFIG_CRASH_DUMP
546 489
547/* 490/*
548 * Find suitable location for crashkernel memory
549 */
550static unsigned long __init find_crash_base(unsigned long crash_size,
551 char **msg)
552{
553 unsigned long crash_base;
554 struct mem_chunk *chunk;
555 int i;
556
557 if (memory_chunk[0].size < crash_size) {
558 *msg = "first memory chunk must be at least crashkernel size";
559 return 0;
560 }
561 if (OLDMEM_BASE && crash_size == OLDMEM_SIZE)
562 return OLDMEM_BASE;
563
564 for (i = MEMORY_CHUNKS - 1; i >= 0; i--) {
565 chunk = &memory_chunk[i];
566 if (chunk->size == 0)
567 continue;
568 if (chunk->type != CHUNK_READ_WRITE)
569 continue;
570 if (chunk->size < crash_size)
571 continue;
572 crash_base = (chunk->addr + chunk->size) - crash_size;
573 if (crash_base < crash_size)
574 continue;
575 if (crash_base < sclp_get_hsa_size())
576 continue;
577 if (crash_base < (unsigned long) INITRD_START + INITRD_SIZE)
578 continue;
579 return crash_base;
580 }
581 *msg = "no suitable area found";
582 return 0;
583}
584
585/*
586 * Check if crash_base and crash_size is valid
587 */
588static int __init verify_crash_base(unsigned long crash_base,
589 unsigned long crash_size,
590 char **msg)
591{
592 struct mem_chunk *chunk;
593 int i;
594
595 /*
596 * Because we do the swap to zero, we must have at least 'crash_size'
597 * bytes free space before crash_base
598 */
599 if (crash_size > crash_base) {
600 *msg = "crashkernel offset must be greater than size";
601 return -EINVAL;
602 }
603
604 /* First memory chunk must be at least crash_size */
605 if (memory_chunk[0].size < crash_size) {
606 *msg = "first memory chunk must be at least crashkernel size";
607 return -EINVAL;
608 }
609 /* Check if we fit into the respective memory chunk */
610 for (i = 0; i < MEMORY_CHUNKS; i++) {
611 chunk = &memory_chunk[i];
612 if (chunk->size == 0)
613 continue;
614 if (crash_base < chunk->addr)
615 continue;
616 if (crash_base >= chunk->addr + chunk->size)
617 continue;
618 /* we have found the memory chunk */
619 if (crash_base + crash_size > chunk->addr + chunk->size) {
620 *msg = "selected memory chunk is too small for "
621 "crashkernel memory";
622 return -EINVAL;
623 }
624 return 0;
625 }
626 *msg = "invalid memory range specified";
627 return -EINVAL;
628}
629
630/*
631 * When kdump is enabled, we have to ensure that no memory from 491 * When kdump is enabled, we have to ensure that no memory from
632 * the area [0 - crashkernel memory size] and 492 * the area [0 - crashkernel memory size] and
633 * [crashk_res.start - crashk_res.end] is set offline. 493 * [crashk_res.start - crashk_res.end] is set offline.
@@ -653,23 +513,44 @@ static struct notifier_block kdump_mem_nb = {
653#endif 513#endif
654 514
655/* 515/*
516 * Make sure that the area behind memory_end is protected
517 */
518static void reserve_memory_end(void)
519{
520#ifdef CONFIG_ZFCPDUMP
521 if (ipl_info.type == IPL_TYPE_FCP_DUMP &&
522 !OLDMEM_BASE && sclp_get_hsa_size()) {
523 memory_end = sclp_get_hsa_size();
524 memory_end &= PAGE_MASK;
525 memory_end_set = 1;
526 }
527#endif
528 if (!memory_end_set)
529 return;
530 memblock_reserve(memory_end, ULONG_MAX);
531}
532
533/*
656 * Make sure that oldmem, where the dump is stored, is protected 534 * Make sure that oldmem, where the dump is stored, is protected
657 */ 535 */
658static void reserve_oldmem(void) 536static void reserve_oldmem(void)
659{ 537{
660#ifdef CONFIG_CRASH_DUMP 538#ifdef CONFIG_CRASH_DUMP
661 unsigned long real_size = 0; 539 if (OLDMEM_BASE)
662 int i; 540 /* Forget all memory above the running kdump system */
663 541 memblock_reserve(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX);
664 if (!OLDMEM_BASE) 542#endif
665 return; 543}
666 for (i = 0; i < MEMORY_CHUNKS; i++) {
667 struct mem_chunk *chunk = &memory_chunk[i];
668 544
669 real_size = max(real_size, chunk->addr + chunk->size); 545/*
670 } 546 * Make sure that oldmem, where the dump is stored, is protected
671 create_mem_hole(memory_chunk, OLDMEM_BASE, OLDMEM_SIZE); 547 */
672 create_mem_hole(memory_chunk, OLDMEM_SIZE, real_size - OLDMEM_SIZE); 548static void remove_oldmem(void)
549{
550#ifdef CONFIG_CRASH_DUMP
551 if (OLDMEM_BASE)
552 /* Forget all memory above the running kdump system */
553 memblock_remove(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX);
673#endif 554#endif
674} 555}
675 556
@@ -680,167 +561,132 @@ static void __init reserve_crashkernel(void)
680{ 561{
681#ifdef CONFIG_CRASH_DUMP 562#ifdef CONFIG_CRASH_DUMP
682 unsigned long long crash_base, crash_size; 563 unsigned long long crash_base, crash_size;
683 char *msg = NULL; 564 phys_addr_t low, high;
684 int rc; 565 int rc;
685 566
686 rc = parse_crashkernel(boot_command_line, memory_end, &crash_size, 567 rc = parse_crashkernel(boot_command_line, memory_end, &crash_size,
687 &crash_base); 568 &crash_base);
688 if (rc || crash_size == 0) 569
689 return;
690 crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN); 570 crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN);
691 crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN); 571 crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN);
692 if (register_memory_notifier(&kdump_mem_nb)) 572 if (rc || crash_size == 0)
693 return; 573 return;
694 if (!crash_base) 574
695 crash_base = find_crash_base(crash_size, &msg); 575 if (memblock.memory.regions[0].size < crash_size) {
696 if (!crash_base) { 576 pr_info("crashkernel reservation failed: %s\n",
697 pr_info("crashkernel reservation failed: %s\n", msg); 577 "first memory chunk must be at least crashkernel size");
698 unregister_memory_notifier(&kdump_mem_nb);
699 return; 578 return;
700 } 579 }
701 if (verify_crash_base(crash_base, crash_size, &msg)) { 580
702 pr_info("crashkernel reservation failed: %s\n", msg); 581 low = crash_base ?: OLDMEM_BASE;
703 unregister_memory_notifier(&kdump_mem_nb); 582 high = low + crash_size;
583 if (low >= OLDMEM_BASE && high <= OLDMEM_BASE + OLDMEM_SIZE) {
584 /* The crashkernel fits into OLDMEM, reuse OLDMEM */
585 crash_base = low;
586 } else {
587 /* Find suitable area in free memory */
588 low = max_t(unsigned long, crash_size, sclp_get_hsa_size());
589 high = crash_base ? crash_base + crash_size : ULONG_MAX;
590
591 if (crash_base && crash_base < low) {
592 pr_info("crashkernel reservation failed: %s\n",
593 "crash_base too low");
594 return;
595 }
596 low = crash_base ?: low;
597 crash_base = memblock_find_in_range(low, high, crash_size,
598 KEXEC_CRASH_MEM_ALIGN);
599 }
600
601 if (!crash_base) {
602 pr_info("crashkernel reservation failed: %s\n",
603 "no suitable area found");
704 return; 604 return;
705 } 605 }
606
607 if (register_memory_notifier(&kdump_mem_nb))
608 return;
609
706 if (!OLDMEM_BASE && MACHINE_IS_VM) 610 if (!OLDMEM_BASE && MACHINE_IS_VM)
707 diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size)); 611 diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size));
708 crashk_res.start = crash_base; 612 crashk_res.start = crash_base;
709 crashk_res.end = crash_base + crash_size - 1; 613 crashk_res.end = crash_base + crash_size - 1;
710 insert_resource(&iomem_resource, &crashk_res); 614 insert_resource(&iomem_resource, &crashk_res);
711 create_mem_hole(memory_chunk, crash_base, crash_size); 615 memblock_remove(crash_base, crash_size);
712 pr_info("Reserving %lluMB of memory at %lluMB " 616 pr_info("Reserving %lluMB of memory at %lluMB "
713 "for crashkernel (System RAM: %luMB)\n", 617 "for crashkernel (System RAM: %luMB)\n",
714 crash_size >> 20, crash_base >> 20, memory_end >> 20); 618 crash_size >> 20, crash_base >> 20,
619 (unsigned long)memblock.memory.total_size >> 20);
715 os_info_crashkernel_add(crash_base, crash_size); 620 os_info_crashkernel_add(crash_base, crash_size);
716#endif 621#endif
717} 622}
718 623
719static void __init setup_memory(void) 624/*
625 * Reserve the initrd from being used by memblock
626 */
627static void __init reserve_initrd(void)
720{ 628{
721 unsigned long bootmap_size; 629#ifdef CONFIG_BLK_DEV_INITRD
722 unsigned long start_pfn, end_pfn; 630 initrd_start = INITRD_START;
723 int i; 631 initrd_end = initrd_start + INITRD_SIZE;
632 memblock_reserve(INITRD_START, INITRD_SIZE);
633#endif
634}
724 635
725 /* 636/*
726 * partially used pages are not usable - thus 637 * Check for initrd being in usable memory
727 * we are rounding upwards: 638 */
728 */ 639static void __init check_initrd(void)
640{
641#ifdef CONFIG_BLK_DEV_INITRD
642 if (INITRD_START && INITRD_SIZE &&
643 !memblock_is_region_memory(INITRD_START, INITRD_SIZE)) {
644 pr_err("initrd does not fit memory.\n");
645 memblock_free(INITRD_START, INITRD_SIZE);
646 initrd_start = initrd_end = 0;
647 }
648#endif
649}
650
651/*
652 * Reserve all kernel text
653 */
654static void __init reserve_kernel(void)
655{
656 unsigned long start_pfn;
729 start_pfn = PFN_UP(__pa(&_end)); 657 start_pfn = PFN_UP(__pa(&_end));
730 end_pfn = max_pfn = PFN_DOWN(memory_end);
731 658
732#ifdef CONFIG_BLK_DEV_INITRD
733 /* 659 /*
734 * Move the initrd in case the bitmap of the bootmem allocater 660 * Reserve memory used for lowcore/command line/kernel image.
735 * would overwrite it.
736 */ 661 */
662 memblock_reserve(0, (unsigned long)_ehead);
663 memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn)
664 - (unsigned long)_stext);
665}
737 666
738 if (INITRD_START && INITRD_SIZE) { 667static void __init reserve_elfcorehdr(void)
739 unsigned long bmap_size; 668{
740 unsigned long start;
741
742 bmap_size = bootmem_bootmap_pages(end_pfn - start_pfn + 1);
743 bmap_size = PFN_PHYS(bmap_size);
744
745 if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) {
746 start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE;
747
748#ifdef CONFIG_CRASH_DUMP 669#ifdef CONFIG_CRASH_DUMP
749 if (OLDMEM_BASE) { 670 if (is_kdump_kernel())
750 /* Move initrd behind kdump oldmem */ 671 memblock_reserve(elfcorehdr_addr - OLDMEM_BASE,
751 if (start + INITRD_SIZE > OLDMEM_BASE && 672 PAGE_ALIGN(elfcorehdr_size));
752 start < OLDMEM_BASE + OLDMEM_SIZE)
753 start = OLDMEM_BASE + OLDMEM_SIZE;
754 }
755#endif
756 if (start + INITRD_SIZE > memory_end) {
757 pr_err("initrd extends beyond end of "
758 "memory (0x%08lx > 0x%08lx) "
759 "disabling initrd\n",
760 start + INITRD_SIZE, memory_end);
761 INITRD_START = INITRD_SIZE = 0;
762 } else {
763 pr_info("Moving initrd (0x%08lx -> "
764 "0x%08lx, size: %ld)\n",
765 INITRD_START, start, INITRD_SIZE);
766 memmove((void *) start, (void *) INITRD_START,
767 INITRD_SIZE);
768 INITRD_START = start;
769 }
770 }
771 }
772#endif 673#endif
674}
773 675
774 /* 676static void __init setup_memory(void)
775 * Initialize the boot-time allocator 677{
776 */ 678 struct memblock_region *reg;
777 bootmap_size = init_bootmem(start_pfn, end_pfn);
778 679
779 /* 680 /*
780 * Register RAM areas with the bootmem allocator. 681 * Init storage key for present memory
781 */ 682 */
782 683 for_each_memblock(memory, reg) {
783 for (i = 0; i < MEMORY_CHUNKS; i++) { 684 storage_key_init_range(reg->base, reg->base + reg->size);
784 unsigned long start_chunk, end_chunk, pfn;
785
786 if (!memory_chunk[i].size)
787 continue;
788 start_chunk = PFN_DOWN(memory_chunk[i].addr);
789 end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size);
790 end_chunk = min(end_chunk, end_pfn);
791 if (start_chunk >= end_chunk)
792 continue;
793 memblock_add_node(PFN_PHYS(start_chunk),
794 PFN_PHYS(end_chunk - start_chunk), 0);
795 pfn = max(start_chunk, start_pfn);
796 storage_key_init_range(PFN_PHYS(pfn), PFN_PHYS(end_chunk));
797 } 685 }
798
799 psw_set_key(PAGE_DEFAULT_KEY); 686 psw_set_key(PAGE_DEFAULT_KEY);
800 687
801 free_bootmem_with_active_regions(0, max_pfn); 688 /* Only cosmetics */
802 689 memblock_enforce_memory_limit(memblock_end_of_DRAM());
803 /*
804 * Reserve memory used for lowcore/command line/kernel image.
805 */
806 reserve_bootmem(0, (unsigned long)_ehead, BOOTMEM_DEFAULT);
807 reserve_bootmem((unsigned long)_stext,
808 PFN_PHYS(start_pfn) - (unsigned long)_stext,
809 BOOTMEM_DEFAULT);
810 /*
811 * Reserve the bootmem bitmap itself as well. We do this in two
812 * steps (first step was init_bootmem()) because this catches
813 * the (very unlikely) case of us accidentally initializing the
814 * bootmem allocator with an invalid RAM area.
815 */
816 reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size,
817 BOOTMEM_DEFAULT);
818
819#ifdef CONFIG_CRASH_DUMP
820 if (crashk_res.start)
821 reserve_bootmem(crashk_res.start,
822 crashk_res.end - crashk_res.start + 1,
823 BOOTMEM_DEFAULT);
824 if (is_kdump_kernel())
825 reserve_bootmem(elfcorehdr_addr - OLDMEM_BASE,
826 PAGE_ALIGN(elfcorehdr_size), BOOTMEM_DEFAULT);
827#endif
828#ifdef CONFIG_BLK_DEV_INITRD
829 if (INITRD_START && INITRD_SIZE) {
830 if (INITRD_START + INITRD_SIZE <= memory_end) {
831 reserve_bootmem(INITRD_START, INITRD_SIZE,
832 BOOTMEM_DEFAULT);
833 initrd_start = INITRD_START;
834 initrd_end = initrd_start + INITRD_SIZE;
835 } else {
836 pr_err("initrd extends beyond end of "
837 "memory (0x%08lx > 0x%08lx) "
838 "disabling initrd\n",
839 initrd_start + INITRD_SIZE, memory_end);
840 initrd_start = initrd_end = 0;
841 }
842 }
843#endif
844} 690}
845 691
846/* 692/*
@@ -989,23 +835,46 @@ void __init setup_arch(char **cmdline_p)
989 835
990 ROOT_DEV = Root_RAM0; 836 ROOT_DEV = Root_RAM0;
991 837
838 /* Is init_mm really needed? */
992 init_mm.start_code = PAGE_OFFSET; 839 init_mm.start_code = PAGE_OFFSET;
993 init_mm.end_code = (unsigned long) &_etext; 840 init_mm.end_code = (unsigned long) &_etext;
994 init_mm.end_data = (unsigned long) &_edata; 841 init_mm.end_data = (unsigned long) &_edata;
995 init_mm.brk = (unsigned long) &_end; 842 init_mm.brk = (unsigned long) &_end;
996 843
997 parse_early_param(); 844 parse_early_param();
998 detect_memory_layout(memory_chunk, memory_end);
999 os_info_init(); 845 os_info_init();
1000 setup_ipl(); 846 setup_ipl();
847
848 /* Do some memory reservations *before* memory is added to memblock */
849 reserve_memory_end();
1001 reserve_oldmem(); 850 reserve_oldmem();
851 reserve_kernel();
852 reserve_initrd();
853 reserve_elfcorehdr();
854 memblock_allow_resize();
855
856 /* Get information about *all* installed memory */
857 detect_memory_memblock();
858
859 remove_oldmem();
860
861 /*
862 * Make sure all chunks are MAX_ORDER aligned so we don't need the
863 * extra checks that HOLES_IN_ZONE would require.
864 *
865 * Is this still required?
866 */
867 memblock_trim_memory(1UL << (MAX_ORDER - 1 + PAGE_SHIFT));
868
1002 setup_memory_end(); 869 setup_memory_end();
1003 reserve_crashkernel();
1004 setup_memory(); 870 setup_memory();
871
872 check_initrd();
873 reserve_crashkernel();
874
1005 setup_resources(); 875 setup_resources();
1006 setup_vmcoreinfo(); 876 setup_vmcoreinfo();
1007 setup_lowcore(); 877 setup_lowcore();
1008
1009 smp_fill_possible_mask(); 878 smp_fill_possible_mask();
1010 cpu_init(); 879 cpu_init();
1011 s390_init_cpu_topology(); 880 s390_init_cpu_topology();
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 6298fed11ced..fa3b8cdaadac 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -333,7 +333,9 @@ static void __init alloc_masks(struct sysinfo_15_1_x *info,
333 nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i]; 333 nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
334 nr_masks = max(nr_masks, 1); 334 nr_masks = max(nr_masks, 1);
335 for (i = 0; i < nr_masks; i++) { 335 for (i = 0; i < nr_masks; i++) {
336 mask->next = alloc_bootmem(sizeof(struct mask_info)); 336 mask->next = alloc_bootmem_align(
337 roundup_pow_of_two(sizeof(struct mask_info)),
338 roundup_pow_of_two(sizeof(struct mask_info)));
337 mask = mask->next; 339 mask = mask->next;
338 } 340 }
339} 341}
diff --git a/arch/s390/mm/mem_detect.c b/arch/s390/mm/mem_detect.c
index cca388253a39..5535cfe0ee11 100644
--- a/arch/s390/mm/mem_detect.c
+++ b/arch/s390/mm/mem_detect.c
@@ -6,130 +6,60 @@
6 6
7#include <linux/kernel.h> 7#include <linux/kernel.h>
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/memblock.h>
10#include <linux/init.h>
11#include <linux/debugfs.h>
12#include <linux/seq_file.h>
9#include <asm/ipl.h> 13#include <asm/ipl.h>
10#include <asm/sclp.h> 14#include <asm/sclp.h>
11#include <asm/setup.h> 15#include <asm/setup.h>
12 16
13#define ADDR2G (1ULL << 31) 17#define ADDR2G (1ULL << 31)
14 18
15static void find_memory_chunks(struct mem_chunk chunk[], unsigned long maxsize) 19#define CHUNK_READ_WRITE 0
20#define CHUNK_READ_ONLY 1
21
22static inline void memblock_physmem_add(phys_addr_t start, phys_addr_t size)
23{
24 memblock_add_range(&memblock.memory, start, size, 0, 0);
25 memblock_add_range(&memblock.physmem, start, size, 0, 0);
26}
27
28void __init detect_memory_memblock(void)
16{ 29{
17 unsigned long long memsize, rnmax, rzm; 30 unsigned long long memsize, rnmax, rzm;
18 unsigned long addr = 0, size; 31 unsigned long addr, size;
19 int i = 0, type; 32 int type;
20 33
21 rzm = sclp_get_rzm(); 34 rzm = sclp_get_rzm();
22 rnmax = sclp_get_rnmax(); 35 rnmax = sclp_get_rnmax();
23 memsize = rzm * rnmax; 36 memsize = rzm * rnmax;
24 if (!rzm) 37 if (!rzm)
25 rzm = 1ULL << 17; 38 rzm = 1ULL << 17;
26 if (sizeof(long) == 4) { 39 if (IS_ENABLED(CONFIG_32BIT)) {
27 rzm = min(ADDR2G, rzm); 40 rzm = min(ADDR2G, rzm);
28 memsize = memsize ? min(ADDR2G, memsize) : ADDR2G; 41 memsize = min(ADDR2G, memsize);
29 } 42 }
30 if (maxsize) 43 max_physmem_end = memsize;
31 memsize = memsize ? min((unsigned long)memsize, maxsize) : maxsize; 44 addr = 0;
45 /* keep memblock lists close to the kernel */
46 memblock_set_bottom_up(true);
32 do { 47 do {
33 size = 0; 48 size = 0;
34 type = tprot(addr); 49 type = tprot(addr);
35 do { 50 do {
36 size += rzm; 51 size += rzm;
37 if (memsize && addr + size >= memsize) 52 if (max_physmem_end && addr + size >= max_physmem_end)
38 break; 53 break;
39 } while (type == tprot(addr + size)); 54 } while (type == tprot(addr + size));
40 if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) { 55 if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) {
41 if (memsize && (addr + size > memsize)) 56 if (max_physmem_end && (addr + size > max_physmem_end))
42 size = memsize - addr; 57 size = max_physmem_end - addr;
43 chunk[i].addr = addr; 58 memblock_physmem_add(addr, size);
44 chunk[i].size = size;
45 chunk[i].type = type;
46 i++;
47 } 59 }
48 addr += size; 60 addr += size;
49 } while (addr < memsize && i < MEMORY_CHUNKS); 61 } while (addr < max_physmem_end);
50} 62 memblock_set_bottom_up(false);
51 63 if (!max_physmem_end)
52/** 64 max_physmem_end = memblock_end_of_DRAM();
53 * detect_memory_layout - fill mem_chunk array with memory layout data
54 * @chunk: mem_chunk array to be filled
55 * @maxsize: maximum address where memory detection should stop
56 *
57 * Fills the passed in memory chunk array with the memory layout of the
58 * machine. The array must have a size of at least MEMORY_CHUNKS and will
59 * be fully initialized afterwards.
60 * If the maxsize paramater has a value > 0 memory detection will stop at
61 * that address. It is guaranteed that all chunks have an ending address
62 * that is smaller than maxsize.
63 * If maxsize is 0 all memory will be detected.
64 */
65void detect_memory_layout(struct mem_chunk chunk[], unsigned long maxsize)
66{
67 unsigned long flags, flags_dat, cr0;
68
69 memset(chunk, 0, MEMORY_CHUNKS * sizeof(struct mem_chunk));
70 /*
71 * Disable IRQs, DAT and low address protection so tprot does the
72 * right thing and we don't get scheduled away with low address
73 * protection disabled.
74 */
75 local_irq_save(flags);
76 flags_dat = __arch_local_irq_stnsm(0xfb);
77 /*
78 * In case DAT was enabled, make sure chunk doesn't reside in vmalloc
79 * space. We have disabled DAT and any access to vmalloc area will
80 * cause an exception.
81 * If DAT was disabled we are called from early ipl code.
82 */
83 if (test_bit(5, &flags_dat)) {
84 if (WARN_ON_ONCE(is_vmalloc_or_module_addr(chunk)))
85 goto out;
86 }
87 __ctl_store(cr0, 0, 0);
88 __ctl_clear_bit(0, 28);
89 find_memory_chunks(chunk, maxsize);
90 __ctl_load(cr0, 0, 0);
91out:
92 __arch_local_irq_ssm(flags_dat);
93 local_irq_restore(flags);
94}
95EXPORT_SYMBOL(detect_memory_layout);
96
97/*
98 * Create memory hole with given address and size.
99 */
100void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
101 unsigned long size)
102{
103 int i;
104
105 for (i = 0; i < MEMORY_CHUNKS; i++) {
106 struct mem_chunk *chunk = &mem_chunk[i];
107
108 if (chunk->size == 0)
109 continue;
110 if (addr > chunk->addr + chunk->size)
111 continue;
112 if (addr + size <= chunk->addr)
113 continue;
114 /* Split */
115 if ((addr > chunk->addr) &&
116 (addr + size < chunk->addr + chunk->size)) {
117 struct mem_chunk *new = chunk + 1;
118
119 memmove(new, chunk, (MEMORY_CHUNKS-i-1) * sizeof(*new));
120 new->addr = addr + size;
121 new->size = chunk->addr + chunk->size - new->addr;
122 chunk->size = addr - chunk->addr;
123 continue;
124 } else if ((addr <= chunk->addr) &&
125 (addr + size >= chunk->addr + chunk->size)) {
126 memmove(chunk, chunk + 1, (MEMORY_CHUNKS-i-1) * sizeof(*chunk));
127 memset(&mem_chunk[MEMORY_CHUNKS-1], 0, sizeof(*chunk));
128 } else if (addr + size < chunk->addr + chunk->size) {
129 chunk->size = chunk->addr + chunk->size - addr - size;
130 chunk->addr = addr + size;
131 } else if (addr > chunk->addr) {
132 chunk->size = addr - chunk->addr;
133 }
134 }
135} 65}
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 72b04de18283..fe9012a49aa5 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -10,6 +10,7 @@
10#include <linux/list.h> 10#include <linux/list.h>
11#include <linux/hugetlb.h> 11#include <linux/hugetlb.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/memblock.h>
13#include <asm/pgalloc.h> 14#include <asm/pgalloc.h>
14#include <asm/pgtable.h> 15#include <asm/pgtable.h>
15#include <asm/setup.h> 16#include <asm/setup.h>
@@ -66,7 +67,8 @@ static pte_t __ref *vmem_pte_alloc(unsigned long address)
66 if (slab_is_available()) 67 if (slab_is_available())
67 pte = (pte_t *) page_table_alloc(&init_mm, address); 68 pte = (pte_t *) page_table_alloc(&init_mm, address);
68 else 69 else
69 pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t)); 70 pte = alloc_bootmem_align(PTRS_PER_PTE * sizeof(pte_t),
71 PTRS_PER_PTE * sizeof(pte_t));
70 if (!pte) 72 if (!pte)
71 return NULL; 73 return NULL;
72 clear_table((unsigned long *) pte, _PAGE_INVALID, 74 clear_table((unsigned long *) pte, _PAGE_INVALID,
@@ -371,16 +373,14 @@ out:
371void __init vmem_map_init(void) 373void __init vmem_map_init(void)
372{ 374{
373 unsigned long ro_start, ro_end; 375 unsigned long ro_start, ro_end;
374 unsigned long start, end; 376 struct memblock_region *reg;
375 int i; 377 phys_addr_t start, end;
376 378
377 ro_start = PFN_ALIGN((unsigned long)&_stext); 379 ro_start = PFN_ALIGN((unsigned long)&_stext);
378 ro_end = (unsigned long)&_eshared & PAGE_MASK; 380 ro_end = (unsigned long)&_eshared & PAGE_MASK;
379 for (i = 0; i < MEMORY_CHUNKS; i++) { 381 for_each_memblock(memory, reg) {
380 if (!memory_chunk[i].size) 382 start = reg->base;
381 continue; 383 end = reg->base + reg->size - 1;
382 start = memory_chunk[i].addr;
383 end = memory_chunk[i].addr + memory_chunk[i].size;
384 if (start >= ro_end || end <= ro_start) 384 if (start >= ro_end || end <= ro_start)
385 vmem_add_mem(start, end - start, 0); 385 vmem_add_mem(start, end - start, 0);
386 else if (start >= ro_start && end <= ro_end) 386 else if (start >= ro_start && end <= ro_end)
@@ -400,23 +400,21 @@ void __init vmem_map_init(void)
400} 400}
401 401
402/* 402/*
403 * Convert memory chunk array to a memory segment list so there is a single 403 * Convert memblock.memory to a memory segment list so there is a single
404 * list that contains both r/w memory and shared memory segments. 404 * list that contains all memory segments.
405 */ 405 */
406static int __init vmem_convert_memory_chunk(void) 406static int __init vmem_convert_memory_chunk(void)
407{ 407{
408 struct memblock_region *reg;
408 struct memory_segment *seg; 409 struct memory_segment *seg;
409 int i;
410 410
411 mutex_lock(&vmem_mutex); 411 mutex_lock(&vmem_mutex);
412 for (i = 0; i < MEMORY_CHUNKS; i++) { 412 for_each_memblock(memory, reg) {
413 if (!memory_chunk[i].size)
414 continue;
415 seg = kzalloc(sizeof(*seg), GFP_KERNEL); 413 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
416 if (!seg) 414 if (!seg)
417 panic("Out of memory...\n"); 415 panic("Out of memory...\n");
418 seg->start = memory_chunk[i].addr; 416 seg->start = reg->base;
419 seg->size = memory_chunk[i].size; 417 seg->size = reg->size;
420 insert_memory_segment(seg); 418 insert_memory_segment(seg);
421 } 419 }
422 mutex_unlock(&vmem_mutex); 420 mutex_unlock(&vmem_mutex);
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 3d8e4d63f514..1884653e4472 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -17,6 +17,8 @@
17#include <linux/miscdevice.h> 17#include <linux/miscdevice.h>
18#include <linux/debugfs.h> 18#include <linux/debugfs.h>
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/memblock.h>
21
20#include <asm/asm-offsets.h> 22#include <asm/asm-offsets.h>
21#include <asm/ipl.h> 23#include <asm/ipl.h>
22#include <asm/sclp.h> 24#include <asm/sclp.h>
@@ -411,33 +413,24 @@ static ssize_t zcore_memmap_read(struct file *filp, char __user *buf,
411 size_t count, loff_t *ppos) 413 size_t count, loff_t *ppos)
412{ 414{
413 return simple_read_from_buffer(buf, count, ppos, filp->private_data, 415 return simple_read_from_buffer(buf, count, ppos, filp->private_data,
414 MEMORY_CHUNKS * CHUNK_INFO_SIZE); 416 memblock.memory.cnt * CHUNK_INFO_SIZE);
415} 417}
416 418
417static int zcore_memmap_open(struct inode *inode, struct file *filp) 419static int zcore_memmap_open(struct inode *inode, struct file *filp)
418{ 420{
419 int i; 421 struct memblock_region *reg;
420 char *buf; 422 char *buf;
421 struct mem_chunk *chunk_array; 423 int i = 0;
422 424
423 chunk_array = kzalloc(MEMORY_CHUNKS * sizeof(struct mem_chunk), 425 buf = kzalloc(memblock.memory.cnt * CHUNK_INFO_SIZE, GFP_KERNEL);
424 GFP_KERNEL);
425 if (!chunk_array)
426 return -ENOMEM;
427 detect_memory_layout(chunk_array, 0);
428 buf = kzalloc(MEMORY_CHUNKS * CHUNK_INFO_SIZE, GFP_KERNEL);
429 if (!buf) { 426 if (!buf) {
430 kfree(chunk_array);
431 return -ENOMEM; 427 return -ENOMEM;
432 } 428 }
433 for (i = 0; i < MEMORY_CHUNKS; i++) { 429 for_each_memblock(memory, reg) {
434 sprintf(buf + (i * CHUNK_INFO_SIZE), "%016llx %016llx ", 430 sprintf(buf + (i++ * CHUNK_INFO_SIZE), "%016llx %016llx ",
435 (unsigned long long) chunk_array[i].addr, 431 (unsigned long long) reg->base,
436 (unsigned long long) chunk_array[i].size); 432 (unsigned long long) reg->size);
437 if (chunk_array[i].size == 0)
438 break;
439 } 433 }
440 kfree(chunk_array);
441 filp->private_data = buf; 434 filp->private_data = buf;
442 return nonseekable_open(inode, filp); 435 return nonseekable_open(inode, filp);
443} 436}
@@ -593,21 +586,12 @@ static int __init check_sdias(void)
593 586
594static int __init get_mem_info(unsigned long *mem, unsigned long *end) 587static int __init get_mem_info(unsigned long *mem, unsigned long *end)
595{ 588{
596 int i; 589 struct memblock_region *reg;
597 struct mem_chunk *chunk_array;
598 590
599 chunk_array = kzalloc(MEMORY_CHUNKS * sizeof(struct mem_chunk), 591 for_each_memblock(memory, reg) {
600 GFP_KERNEL); 592 *mem += reg->size;
601 if (!chunk_array) 593 *end = max_t(unsigned long, *end, reg->base + reg->size);
602 return -ENOMEM;
603 detect_memory_layout(chunk_array, 0);
604 for (i = 0; i < MEMORY_CHUNKS; i++) {
605 if (chunk_array[i].size == 0)
606 break;
607 *mem += chunk_array[i].size;
608 *end = max(*end, chunk_array[i].addr + chunk_array[i].size);
609 } 594 }
610 kfree(chunk_array);
611 return 0; 595 return 0;
612} 596}
613 597