diff options
author | Christian Borntraeger <borntraeger@de.ibm.com> | 2008-01-26 08:11:00 -0500 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2008-01-26 08:11:12 -0500 |
commit | 5fd9c6e214547a32d3da6ee4284c79004d667bc8 (patch) | |
tree | def2e218393c9867e9db939f1d379c005895a912 /arch/s390 | |
parent | 8ffd74a0924e4e04f6455eb2d2187a9564678d01 (diff) |
[S390] Change vmalloc defintions
Currently the vmalloc area starts at a dynamic address depending on
the memory size. There was also an 8MB security hole after the
physical memory to catch out-of-bounds accesses.
We can simplify the code by putting the vmalloc area explicitely at
the top of the kernel mapping and setting the vmalloc size to a fixed
value of 128MB/128GB for 31bit/64bit systems. Part of the vmalloc
area will be used for the vmem_map. This leaves an area of 96MB/1GB
for normal vmalloc allocations.
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/kernel/setup.c | 6 | ||||
-rw-r--r-- | arch/s390/mm/vmem.c | 20 |
2 files changed, 10 insertions, 16 deletions
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index d68a4025486e..d071a81b62da 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -617,7 +617,7 @@ EXPORT_SYMBOL_GPL(real_memory_size); | |||
617 | static void __init setup_memory_end(void) | 617 | static void __init setup_memory_end(void) |
618 | { | 618 | { |
619 | unsigned long memory_size; | 619 | unsigned long memory_size; |
620 | unsigned long max_mem, max_phys; | 620 | unsigned long max_mem; |
621 | int i; | 621 | int i; |
622 | 622 | ||
623 | #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) | 623 | #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) |
@@ -625,10 +625,10 @@ static void __init setup_memory_end(void) | |||
625 | memory_end = ZFCPDUMP_HSA_SIZE; | 625 | memory_end = ZFCPDUMP_HSA_SIZE; |
626 | #endif | 626 | #endif |
627 | memory_size = 0; | 627 | memory_size = 0; |
628 | max_phys = VMALLOC_END_INIT - VMALLOC_MIN_SIZE; | ||
629 | memory_end &= PAGE_MASK; | 628 | memory_end &= PAGE_MASK; |
630 | 629 | ||
631 | max_mem = memory_end ? min(max_phys, memory_end) : max_phys; | 630 | max_mem = memory_end ? min(VMALLOC_START, memory_end) : VMALLOC_START; |
631 | memory_end = min(max_mem, memory_end); | ||
632 | 632 | ||
633 | for (i = 0; i < MEMORY_CHUNKS; i++) { | 633 | for (i = 0; i < MEMORY_CHUNKS; i++) { |
634 | struct mem_chunk *chunk = &memory_chunk[i]; | 634 | struct mem_chunk *chunk = &memory_chunk[i]; |
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index fb9c5a85aa56..ee625c8c3b28 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c | |||
@@ -15,10 +15,6 @@ | |||
15 | #include <asm/setup.h> | 15 | #include <asm/setup.h> |
16 | #include <asm/tlbflush.h> | 16 | #include <asm/tlbflush.h> |
17 | 17 | ||
18 | unsigned long vmalloc_end; | ||
19 | EXPORT_SYMBOL(vmalloc_end); | ||
20 | |||
21 | static struct page *vmem_map; | ||
22 | static DEFINE_MUTEX(vmem_mutex); | 18 | static DEFINE_MUTEX(vmem_mutex); |
23 | 19 | ||
24 | struct memory_segment { | 20 | struct memory_segment { |
@@ -188,8 +184,8 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size) | |||
188 | pte_t pte; | 184 | pte_t pte; |
189 | int ret = -ENOMEM; | 185 | int ret = -ENOMEM; |
190 | 186 | ||
191 | map_start = vmem_map + PFN_DOWN(start); | 187 | map_start = VMEM_MAP + PFN_DOWN(start); |
192 | map_end = vmem_map + PFN_DOWN(start + size); | 188 | map_end = VMEM_MAP + PFN_DOWN(start + size); |
193 | 189 | ||
194 | start_addr = (unsigned long) map_start & PAGE_MASK; | 190 | start_addr = (unsigned long) map_start & PAGE_MASK; |
195 | end_addr = PFN_ALIGN((unsigned long) map_end); | 191 | end_addr = PFN_ALIGN((unsigned long) map_end); |
@@ -254,7 +250,7 @@ static int insert_memory_segment(struct memory_segment *seg) | |||
254 | { | 250 | { |
255 | struct memory_segment *tmp; | 251 | struct memory_segment *tmp; |
256 | 252 | ||
257 | if (PFN_DOWN(seg->start + seg->size) > max_pfn || | 253 | if (seg->start + seg->size >= VMALLOC_START || |
258 | seg->start + seg->size < seg->start) | 254 | seg->start + seg->size < seg->start) |
259 | return -ERANGE; | 255 | return -ERANGE; |
260 | 256 | ||
@@ -357,17 +353,15 @@ out: | |||
357 | 353 | ||
358 | /* | 354 | /* |
359 | * map whole physical memory to virtual memory (identity mapping) | 355 | * map whole physical memory to virtual memory (identity mapping) |
356 | * we reserve enough space in the vmalloc area for vmemmap to hotplug | ||
357 | * additional memory segments. | ||
360 | */ | 358 | */ |
361 | void __init vmem_map_init(void) | 359 | void __init vmem_map_init(void) |
362 | { | 360 | { |
363 | unsigned long map_size; | ||
364 | int i; | 361 | int i; |
365 | 362 | ||
366 | map_size = ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * sizeof(struct page); | 363 | BUILD_BUG_ON((unsigned long)VMEM_MAP + VMEM_MAP_SIZE > VMEM_MAP_MAX); |
367 | vmalloc_end = PFN_ALIGN(VMALLOC_END_INIT) - PFN_ALIGN(map_size); | 364 | NODE_DATA(0)->node_mem_map = VMEM_MAP; |
368 | vmem_map = (struct page *) vmalloc_end; | ||
369 | NODE_DATA(0)->node_mem_map = vmem_map; | ||
370 | |||
371 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) | 365 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) |
372 | vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size); | 366 | vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size); |
373 | } | 367 | } |