aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm/vmem.c
diff options
context:
space:
mode:
authorChristian Borntraeger <borntraeger@de.ibm.com>2008-01-26 08:11:00 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2008-01-26 08:11:12 -0500
commit5fd9c6e214547a32d3da6ee4284c79004d667bc8 (patch)
treedef2e218393c9867e9db939f1d379c005895a912 /arch/s390/mm/vmem.c
parent8ffd74a0924e4e04f6455eb2d2187a9564678d01 (diff)
[S390] Change vmalloc defintions
Currently the vmalloc area starts at a dynamic address depending on the memory size. There was also an 8MB security hole after the physical memory to catch out-of-bounds accesses. We can simplify the code by putting the vmalloc area explicitely at the top of the kernel mapping and setting the vmalloc size to a fixed value of 128MB/128GB for 31bit/64bit systems. Part of the vmalloc area will be used for the vmem_map. This leaves an area of 96MB/1GB for normal vmalloc allocations. Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm/vmem.c')
-rw-r--r--arch/s390/mm/vmem.c20
1 files changed, 7 insertions, 13 deletions
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index fb9c5a85aa56..ee625c8c3b28 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -15,10 +15,6 @@
15#include <asm/setup.h> 15#include <asm/setup.h>
16#include <asm/tlbflush.h> 16#include <asm/tlbflush.h>
17 17
18unsigned long vmalloc_end;
19EXPORT_SYMBOL(vmalloc_end);
20
21static struct page *vmem_map;
22static DEFINE_MUTEX(vmem_mutex); 18static DEFINE_MUTEX(vmem_mutex);
23 19
24struct memory_segment { 20struct memory_segment {
@@ -188,8 +184,8 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size)
188 pte_t pte; 184 pte_t pte;
189 int ret = -ENOMEM; 185 int ret = -ENOMEM;
190 186
191 map_start = vmem_map + PFN_DOWN(start); 187 map_start = VMEM_MAP + PFN_DOWN(start);
192 map_end = vmem_map + PFN_DOWN(start + size); 188 map_end = VMEM_MAP + PFN_DOWN(start + size);
193 189
194 start_addr = (unsigned long) map_start & PAGE_MASK; 190 start_addr = (unsigned long) map_start & PAGE_MASK;
195 end_addr = PFN_ALIGN((unsigned long) map_end); 191 end_addr = PFN_ALIGN((unsigned long) map_end);
@@ -254,7 +250,7 @@ static int insert_memory_segment(struct memory_segment *seg)
254{ 250{
255 struct memory_segment *tmp; 251 struct memory_segment *tmp;
256 252
257 if (PFN_DOWN(seg->start + seg->size) > max_pfn || 253 if (seg->start + seg->size >= VMALLOC_START ||
258 seg->start + seg->size < seg->start) 254 seg->start + seg->size < seg->start)
259 return -ERANGE; 255 return -ERANGE;
260 256
@@ -357,17 +353,15 @@ out:
357 353
358/* 354/*
359 * map whole physical memory to virtual memory (identity mapping) 355 * map whole physical memory to virtual memory (identity mapping)
356 * we reserve enough space in the vmalloc area for vmemmap to hotplug
357 * additional memory segments.
360 */ 358 */
361void __init vmem_map_init(void) 359void __init vmem_map_init(void)
362{ 360{
363 unsigned long map_size;
364 int i; 361 int i;
365 362
366 map_size = ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * sizeof(struct page); 363 BUILD_BUG_ON((unsigned long)VMEM_MAP + VMEM_MAP_SIZE > VMEM_MAP_MAX);
367 vmalloc_end = PFN_ALIGN(VMALLOC_END_INIT) - PFN_ALIGN(map_size); 364 NODE_DATA(0)->node_mem_map = VMEM_MAP;
368 vmem_map = (struct page *) vmalloc_end;
369 NODE_DATA(0)->node_mem_map = vmem_map;
370
371 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) 365 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++)
372 vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size); 366 vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size);
373} 367}