aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel/setup.c
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2011-12-27 05:27:07 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2011-12-27 05:27:10 -0500
commit14045ebf1e1156d966a796cacad91028e01797e5 (patch)
tree75fd97e935a9c337e95dc5202bafbf2634cf170b /arch/s390/kernel/setup.c
parent4999023aa95a00507d3f100ea75510c5c7270f74 (diff)
[S390] add support for physical memory > 4TB
The kernel address space of a 64 bit kernel currently uses a three level page table and the vmemmap array has a fixed address and a fixed maximum size. A three level page table is good enough for systems with less than 3.8TB of memory, for bigger systems four page table levels need to be used. Each page table level costs a bit of performance, use 3 levels for normal systems and 4 levels only for the really big systems. To avoid bloating sparse.o too much set MAX_PHYSMEM_BITS to 46 for a maximum of 64TB of memory. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/kernel/setup.c')
-rw-r--r--arch/s390/kernel/setup.c67
1 files changed, 53 insertions, 14 deletions
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 26b601c2b137..66903eed36e6 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -94,6 +94,15 @@ struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS];
94int __initdata memory_end_set; 94int __initdata memory_end_set;
95unsigned long __initdata memory_end; 95unsigned long __initdata memory_end;
96 96
97unsigned long VMALLOC_START;
98EXPORT_SYMBOL(VMALLOC_START);
99
100unsigned long VMALLOC_END;
101EXPORT_SYMBOL(VMALLOC_END);
102
103struct page *vmemmap;
104EXPORT_SYMBOL(vmemmap);
105
97/* An array with a pointer to the lowcore of every CPU. */ 106/* An array with a pointer to the lowcore of every CPU. */
98struct _lowcore *lowcore_ptr[NR_CPUS]; 107struct _lowcore *lowcore_ptr[NR_CPUS];
99EXPORT_SYMBOL(lowcore_ptr); 108EXPORT_SYMBOL(lowcore_ptr);
@@ -277,6 +286,15 @@ static int __init early_parse_mem(char *p)
277} 286}
278early_param("mem", early_parse_mem); 287early_param("mem", early_parse_mem);
279 288
289static int __init parse_vmalloc(char *arg)
290{
291 if (!arg)
292 return -EINVAL;
293 VMALLOC_END = (memparse(arg, &arg) + PAGE_SIZE - 1) & PAGE_MASK;
294 return 0;
295}
296early_param("vmalloc", parse_vmalloc);
297
280unsigned int user_mode = HOME_SPACE_MODE; 298unsigned int user_mode = HOME_SPACE_MODE;
281EXPORT_SYMBOL_GPL(user_mode); 299EXPORT_SYMBOL_GPL(user_mode);
282 300
@@ -478,8 +496,7 @@ EXPORT_SYMBOL_GPL(real_memory_size);
478 496
479static void __init setup_memory_end(void) 497static void __init setup_memory_end(void)
480{ 498{
481 unsigned long memory_size; 499 unsigned long vmax, vmalloc_size, tmp;
482 unsigned long max_mem;
483 int i; 500 int i;
484 501
485 502
@@ -489,12 +506,9 @@ static void __init setup_memory_end(void)
489 memory_end_set = 1; 506 memory_end_set = 1;
490 } 507 }
491#endif 508#endif
492 memory_size = 0; 509 real_memory_size = 0;
493 memory_end &= PAGE_MASK; 510 memory_end &= PAGE_MASK;
494 511
495 max_mem = memory_end ? min(VMEM_MAX_PHYS, memory_end) : VMEM_MAX_PHYS;
496 memory_end = min(max_mem, memory_end);
497
498 /* 512 /*
499 * Make sure all chunks are MAX_ORDER aligned so we don't need the 513 * Make sure all chunks are MAX_ORDER aligned so we don't need the
500 * extra checks that HOLES_IN_ZONE would require. 514 * extra checks that HOLES_IN_ZONE would require.
@@ -514,23 +528,48 @@ static void __init setup_memory_end(void)
514 chunk->addr = start; 528 chunk->addr = start;
515 chunk->size = end - start; 529 chunk->size = end - start;
516 } 530 }
531 real_memory_size = max(real_memory_size,
532 chunk->addr + chunk->size);
517 } 533 }
518 534
535 /* Choose kernel address space layout: 2, 3, or 4 levels. */
536#ifdef CONFIG_64BIT
537 vmalloc_size = VMALLOC_END ?: 128UL << 30;
538 tmp = (memory_end ?: real_memory_size) / PAGE_SIZE;
539 tmp = tmp * (sizeof(struct page) + PAGE_SIZE) + vmalloc_size;
540 if (tmp <= (1UL << 42))
541 vmax = 1UL << 42; /* 3-level kernel page table */
542 else
543 vmax = 1UL << 53; /* 4-level kernel page table */
544#else
545 vmalloc_size = VMALLOC_END ?: 96UL << 20;
546 vmax = 1UL << 31; /* 2-level kernel page table */
547#endif
548 /* vmalloc area is at the end of the kernel address space. */
549 VMALLOC_END = vmax;
550 VMALLOC_START = vmax - vmalloc_size;
551
552 /* Split remaining virtual space between 1:1 mapping & vmemmap array */
553 tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
554 tmp = VMALLOC_START - tmp * sizeof(struct page);
555 tmp &= ~((vmax >> 11) - 1); /* align to page table level */
556 tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS);
557 vmemmap = (struct page *) tmp;
558
559 /* Take care that memory_end is set and <= vmemmap */
560 memory_end = min(memory_end ?: real_memory_size, tmp);
561
562 /* Fixup memory chunk array to fit into 0..memory_end */
519 for (i = 0; i < MEMORY_CHUNKS; i++) { 563 for (i = 0; i < MEMORY_CHUNKS; i++) {
520 struct mem_chunk *chunk = &memory_chunk[i]; 564 struct mem_chunk *chunk = &memory_chunk[i];
521 565
522 real_memory_size = max(real_memory_size, 566 if (chunk->addr >= memory_end) {
523 chunk->addr + chunk->size);
524 if (chunk->addr >= max_mem) {
525 memset(chunk, 0, sizeof(*chunk)); 567 memset(chunk, 0, sizeof(*chunk));
526 continue; 568 continue;
527 } 569 }
528 if (chunk->addr + chunk->size > max_mem) 570 if (chunk->addr + chunk->size > memory_end)
529 chunk->size = max_mem - chunk->addr; 571 chunk->size = memory_end - chunk->addr;
530 memory_size = max(memory_size, chunk->addr + chunk->size);
531 } 572 }
532 if (!memory_end)
533 memory_end = memory_size;
534} 573}
535 574
536void *restart_stack __attribute__((__section__(".data"))); 575void *restart_stack __attribute__((__section__(".data")));