aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristian Borntraeger <borntraeger@de.ibm.com>2008-01-26 08:11:00 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2008-01-26 08:11:12 -0500
commit5fd9c6e214547a32d3da6ee4284c79004d667bc8 (patch)
treedef2e218393c9867e9db939f1d379c005895a912
parent8ffd74a0924e4e04f6455eb2d2187a9564678d01 (diff)
[S390] Change vmalloc defintions
Currently the vmalloc area starts at a dynamic address depending on the memory size. There was also an 8MB security hole after the physical memory to catch out-of-bounds accesses. We can simplify the code by putting the vmalloc area explicitely at the top of the kernel mapping and setting the vmalloc size to a fixed value of 128MB/128GB for 31bit/64bit systems. Part of the vmalloc area will be used for the vmem_map. This leaves an area of 96MB/1GB for normal vmalloc allocations. Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--arch/s390/kernel/setup.c6
-rw-r--r--arch/s390/mm/vmem.c20
-rw-r--r--include/asm-s390/pgtable.h46
3 files changed, 26 insertions, 46 deletions
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index d68a4025486e..d071a81b62da 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -617,7 +617,7 @@ EXPORT_SYMBOL_GPL(real_memory_size);
617static void __init setup_memory_end(void) 617static void __init setup_memory_end(void)
618{ 618{
619 unsigned long memory_size; 619 unsigned long memory_size;
620 unsigned long max_mem, max_phys; 620 unsigned long max_mem;
621 int i; 621 int i;
622 622
623#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) 623#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
@@ -625,10 +625,10 @@ static void __init setup_memory_end(void)
625 memory_end = ZFCPDUMP_HSA_SIZE; 625 memory_end = ZFCPDUMP_HSA_SIZE;
626#endif 626#endif
627 memory_size = 0; 627 memory_size = 0;
628 max_phys = VMALLOC_END_INIT - VMALLOC_MIN_SIZE;
629 memory_end &= PAGE_MASK; 628 memory_end &= PAGE_MASK;
630 629
631 max_mem = memory_end ? min(max_phys, memory_end) : max_phys; 630 max_mem = memory_end ? min(VMALLOC_START, memory_end) : VMALLOC_START;
631 memory_end = min(max_mem, memory_end);
632 632
633 for (i = 0; i < MEMORY_CHUNKS; i++) { 633 for (i = 0; i < MEMORY_CHUNKS; i++) {
634 struct mem_chunk *chunk = &memory_chunk[i]; 634 struct mem_chunk *chunk = &memory_chunk[i];
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index fb9c5a85aa56..ee625c8c3b28 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -15,10 +15,6 @@
15#include <asm/setup.h> 15#include <asm/setup.h>
16#include <asm/tlbflush.h> 16#include <asm/tlbflush.h>
17 17
18unsigned long vmalloc_end;
19EXPORT_SYMBOL(vmalloc_end);
20
21static struct page *vmem_map;
22static DEFINE_MUTEX(vmem_mutex); 18static DEFINE_MUTEX(vmem_mutex);
23 19
24struct memory_segment { 20struct memory_segment {
@@ -188,8 +184,8 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size)
188 pte_t pte; 184 pte_t pte;
189 int ret = -ENOMEM; 185 int ret = -ENOMEM;
190 186
191 map_start = vmem_map + PFN_DOWN(start); 187 map_start = VMEM_MAP + PFN_DOWN(start);
192 map_end = vmem_map + PFN_DOWN(start + size); 188 map_end = VMEM_MAP + PFN_DOWN(start + size);
193 189
194 start_addr = (unsigned long) map_start & PAGE_MASK; 190 start_addr = (unsigned long) map_start & PAGE_MASK;
195 end_addr = PFN_ALIGN((unsigned long) map_end); 191 end_addr = PFN_ALIGN((unsigned long) map_end);
@@ -254,7 +250,7 @@ static int insert_memory_segment(struct memory_segment *seg)
254{ 250{
255 struct memory_segment *tmp; 251 struct memory_segment *tmp;
256 252
257 if (PFN_DOWN(seg->start + seg->size) > max_pfn || 253 if (seg->start + seg->size >= VMALLOC_START ||
258 seg->start + seg->size < seg->start) 254 seg->start + seg->size < seg->start)
259 return -ERANGE; 255 return -ERANGE;
260 256
@@ -357,17 +353,15 @@ out:
357 353
358/* 354/*
359 * map whole physical memory to virtual memory (identity mapping) 355 * map whole physical memory to virtual memory (identity mapping)
356 * we reserve enough space in the vmalloc area for vmemmap to hotplug
357 * additional memory segments.
360 */ 358 */
361void __init vmem_map_init(void) 359void __init vmem_map_init(void)
362{ 360{
363 unsigned long map_size;
364 int i; 361 int i;
365 362
366 map_size = ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * sizeof(struct page); 363 BUILD_BUG_ON((unsigned long)VMEM_MAP + VMEM_MAP_SIZE > VMEM_MAP_MAX);
367 vmalloc_end = PFN_ALIGN(VMALLOC_END_INIT) - PFN_ALIGN(map_size); 364 NODE_DATA(0)->node_mem_map = VMEM_MAP;
368 vmem_map = (struct page *) vmalloc_end;
369 NODE_DATA(0)->node_mem_map = vmem_map;
370
371 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) 365 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++)
372 vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size); 366 vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size);
373} 367}
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 1f530f8a6280..79b9eab1a0c7 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -104,41 +104,27 @@ extern char empty_zero_page[PAGE_SIZE];
104 104
105#ifndef __ASSEMBLY__ 105#ifndef __ASSEMBLY__
106/* 106/*
107 * Just any arbitrary offset to the start of the vmalloc VM area: the 107 * The vmalloc area will always be on the topmost area of the kernel
108 * current 8MB value just means that there will be a 8MB "hole" after the 108 * mapping. We reserve 96MB (31bit) / 1GB (64bit) for vmalloc,
109 * physical memory until the kernel virtual memory starts. That means that 109 * which should be enough for any sane case.
110 * any out-of-bounds memory accesses will hopefully be caught. 110 * By putting vmalloc at the top, we maximise the gap between physical
111 * The vmalloc() routines leaves a hole of 4kB between each vmalloced 111 * memory and vmalloc to catch misplaced memory accesses. As a side
112 * area for the same reason. ;) 112 * effect, this also makes sure that 64 bit module code cannot be used
113 * vmalloc area starts at 4GB to prevent syscall table entry exchanging 113 * as system call address.
114 * from modules.
115 */
116extern unsigned long vmalloc_end;
117
118#ifdef CONFIG_64BIT
119#define VMALLOC_ADDR (max(0x100000000UL, (unsigned long) high_memory))
120#else
121#define VMALLOC_ADDR ((unsigned long) high_memory)
122#endif
123#define VMALLOC_OFFSET (8*1024*1024)
124#define VMALLOC_START ((VMALLOC_ADDR + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
125#define VMALLOC_END vmalloc_end
126
127/*
128 * We need some free virtual space to be able to do vmalloc.
129 * VMALLOC_MIN_SIZE defines the minimum size of the vmalloc
130 * area. On a machine with 2GB memory we make sure that we
131 * have at least 128MB free space for vmalloc. On a machine
132 * with 4TB we make sure we have at least 128GB.
133 */ 114 */
134#ifndef __s390x__ 115#ifndef __s390x__
135#define VMALLOC_MIN_SIZE 0x8000000UL 116#define VMALLOC_START 0x78000000UL
136#define VMALLOC_END_INIT 0x80000000UL 117#define VMALLOC_END 0x7e000000UL
118#define VMEM_MAP_MAX 0x80000000UL
137#else /* __s390x__ */ 119#else /* __s390x__ */
138#define VMALLOC_MIN_SIZE 0x2000000000UL 120#define VMALLOC_START 0x3e000000000UL
139#define VMALLOC_END_INIT 0x40000000000UL 121#define VMALLOC_END 0x3e040000000UL
122#define VMEM_MAP_MAX 0x40000000000UL
140#endif /* __s390x__ */ 123#endif /* __s390x__ */
141 124
125#define VMEM_MAP ((struct page *) VMALLOC_END)
126#define VMEM_MAP_SIZE ((VMALLOC_START / PAGE_SIZE) * sizeof(struct page))
127
142/* 128/*
143 * A 31 bit pagetable entry of S390 has following format: 129 * A 31 bit pagetable entry of S390 has following format:
144 * | PFRA | | OS | 130 * | PFRA | | OS |