aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm/vmem.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/mm/vmem.c')
-rw-r--r--arch/s390/mm/vmem.c26
1 files changed, 10 insertions, 16 deletions
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index fb9c5a85aa56..79d13a166a3d 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -15,10 +15,6 @@
15#include <asm/setup.h> 15#include <asm/setup.h>
16#include <asm/tlbflush.h> 16#include <asm/tlbflush.h>
17 17
18unsigned long vmalloc_end;
19EXPORT_SYMBOL(vmalloc_end);
20
21static struct page *vmem_map;
22static DEFINE_MUTEX(vmem_mutex); 18static DEFINE_MUTEX(vmem_mutex);
23 19
24struct memory_segment { 20struct memory_segment {
@@ -188,8 +184,8 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size)
188 pte_t pte; 184 pte_t pte;
189 int ret = -ENOMEM; 185 int ret = -ENOMEM;
190 186
191 map_start = vmem_map + PFN_DOWN(start); 187 map_start = VMEM_MAP + PFN_DOWN(start);
192 map_end = vmem_map + PFN_DOWN(start + size); 188 map_end = VMEM_MAP + PFN_DOWN(start + size);
193 189
194 start_addr = (unsigned long) map_start & PAGE_MASK; 190 start_addr = (unsigned long) map_start & PAGE_MASK;
195 end_addr = PFN_ALIGN((unsigned long) map_end); 191 end_addr = PFN_ALIGN((unsigned long) map_end);
@@ -240,10 +236,10 @@ static int vmem_add_mem(unsigned long start, unsigned long size)
240{ 236{
241 int ret; 237 int ret;
242 238
243 ret = vmem_add_range(start, size); 239 ret = vmem_add_mem_map(start, size);
244 if (ret) 240 if (ret)
245 return ret; 241 return ret;
246 return vmem_add_mem_map(start, size); 242 return vmem_add_range(start, size);
247} 243}
248 244
249/* 245/*
@@ -254,7 +250,7 @@ static int insert_memory_segment(struct memory_segment *seg)
254{ 250{
255 struct memory_segment *tmp; 251 struct memory_segment *tmp;
256 252
257 if (PFN_DOWN(seg->start + seg->size) > max_pfn || 253 if (seg->start + seg->size >= VMALLOC_START ||
258 seg->start + seg->size < seg->start) 254 seg->start + seg->size < seg->start)
259 return -ERANGE; 255 return -ERANGE;
260 256
@@ -357,17 +353,15 @@ out:
357 353
358/* 354/*
359 * map whole physical memory to virtual memory (identity mapping) 355 * map whole physical memory to virtual memory (identity mapping)
356 * we reserve enough space in the vmalloc area for vmemmap to hotplug
357 * additional memory segments.
360 */ 358 */
361void __init vmem_map_init(void) 359void __init vmem_map_init(void)
362{ 360{
363 unsigned long map_size;
364 int i; 361 int i;
365 362
366 map_size = ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * sizeof(struct page); 363 BUILD_BUG_ON((unsigned long)VMEM_MAP + VMEM_MAP_SIZE > VMEM_MAP_MAX);
367 vmalloc_end = PFN_ALIGN(VMALLOC_END_INIT) - PFN_ALIGN(map_size); 364 NODE_DATA(0)->node_mem_map = VMEM_MAP;
368 vmem_map = (struct page *) vmalloc_end;
369 NODE_DATA(0)->node_mem_map = vmem_map;
370
371 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) 365 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++)
372 vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size); 366 vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size);
373} 367}
@@ -382,7 +376,7 @@ static int __init vmem_convert_memory_chunk(void)
382 int i; 376 int i;
383 377
384 mutex_lock(&vmem_mutex); 378 mutex_lock(&vmem_mutex);
385 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { 379 for (i = 0; i < MEMORY_CHUNKS; i++) {
386 if (!memory_chunk[i].size) 380 if (!memory_chunk[i].size)
387 continue; 381 continue;
388 seg = kzalloc(sizeof(*seg), GFP_KERNEL); 382 seg = kzalloc(sizeof(*seg), GFP_KERNEL);