aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2006-12-08 09:56:10 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2006-12-08 09:56:10 -0500
commit39b742f957a287a7514a8a35c9f516cdf30b9ff5 (patch)
tree4b4e9c0747bb047e906fed6c04919c00ccfa2b35 /arch/s390
parentf4eb07c17df2e6cf9bd58bfcd9cc9e05e9489d07 (diff)
[S390] Use add_active_range() and free_area_init_nodes().
Size zones and holes in an architecture independent manner for s390. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kconfig3
-rw-r--r--arch/s390/kernel/setup.c53
-rw-r--r--arch/s390/mm/init.c23
3 files changed, 21 insertions, 58 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index f12ca8fba71b..04f5a0230298 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -233,6 +233,9 @@ config WARN_STACK_SIZE
233 This allows you to specify the maximum frame size a function may 233 This allows you to specify the maximum frame size a function may
234 have without the compiler complaining about it. 234 have without the compiler complaining about it.
235 235
236config ARCH_POPULATES_NODE_MAP
237 def_bool y
238
236source "mm/Kconfig" 239source "mm/Kconfig"
237 240
238config HOLES_IN_ZONE 241config HOLES_IN_ZONE
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index b8a1ce215142..49ef206ec880 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -66,7 +66,6 @@ unsigned long machine_flags = 0;
66 66
67struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS]; 67struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS];
68volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ 68volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
69unsigned long __initdata zholes_size[MAX_NR_ZONES];
70static unsigned long __initdata memory_end; 69static unsigned long __initdata memory_end;
71 70
72/* 71/*
@@ -354,21 +353,6 @@ void machine_power_off(void)
354 */ 353 */
355void (*pm_power_off)(void) = machine_power_off; 354void (*pm_power_off)(void) = machine_power_off;
356 355
357static void __init
358add_memory_hole(unsigned long start, unsigned long end)
359{
360 unsigned long dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
361
362 if (end <= dma_pfn)
363 zholes_size[ZONE_DMA] += end - start + 1;
364 else if (start > dma_pfn)
365 zholes_size[ZONE_NORMAL] += end - start + 1;
366 else {
367 zholes_size[ZONE_DMA] += dma_pfn - start + 1;
368 zholes_size[ZONE_NORMAL] += end - dma_pfn;
369 }
370}
371
372static int __init early_parse_mem(char *p) 356static int __init early_parse_mem(char *p)
373{ 357{
374 memory_end = memparse(p, &p); 358 memory_end = memparse(p, &p);
@@ -521,7 +505,6 @@ setup_memory(void)
521{ 505{
522 unsigned long bootmap_size; 506 unsigned long bootmap_size;
523 unsigned long start_pfn, end_pfn, init_pfn; 507 unsigned long start_pfn, end_pfn, init_pfn;
524 unsigned long last_rw_end;
525 int i; 508 int i;
526 509
527 /* 510 /*
@@ -577,39 +560,27 @@ setup_memory(void)
577 /* 560 /*
578 * Register RAM areas with the bootmem allocator. 561 * Register RAM areas with the bootmem allocator.
579 */ 562 */
580 last_rw_end = start_pfn;
581 563
582 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { 564 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
583 unsigned long start_chunk, end_chunk; 565 unsigned long start_chunk, end_chunk, pfn;
584 566
585 if (memory_chunk[i].type != CHUNK_READ_WRITE) 567 if (memory_chunk[i].type != CHUNK_READ_WRITE)
586 continue; 568 continue;
587 start_chunk = (memory_chunk[i].addr + PAGE_SIZE - 1); 569 start_chunk = PFN_DOWN(memory_chunk[i].addr);
588 start_chunk >>= PAGE_SHIFT; 570 end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size) - 1;
589 end_chunk = (memory_chunk[i].addr + memory_chunk[i].size); 571 end_chunk = min(end_chunk, end_pfn);
590 end_chunk >>= PAGE_SHIFT; 572 if (start_chunk >= end_chunk)
591 if (start_chunk < start_pfn) 573 continue;
592 start_chunk = start_pfn; 574 add_active_range(0, start_chunk, end_chunk);
593 if (end_chunk > end_pfn) 575 pfn = max(start_chunk, start_pfn);
594 end_chunk = end_pfn; 576 for (; pfn <= end_chunk; pfn++)
595 if (start_chunk < end_chunk) { 577 page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY);
596 /* Initialize storage key for RAM pages */
597 for (init_pfn = start_chunk ; init_pfn < end_chunk;
598 init_pfn++)
599 page_set_storage_key(init_pfn << PAGE_SHIFT,
600 PAGE_DEFAULT_KEY);
601 free_bootmem(start_chunk << PAGE_SHIFT,
602 (end_chunk - start_chunk) << PAGE_SHIFT);
603 if (last_rw_end < start_chunk)
604 add_memory_hole(last_rw_end, start_chunk - 1);
605 last_rw_end = end_chunk;
606 }
607 } 578 }
608 579
609 psw_set_key(PAGE_DEFAULT_KEY); 580 psw_set_key(PAGE_DEFAULT_KEY);
610 581
611 if (last_rw_end < end_pfn - 1) 582 free_bootmem_with_active_regions(0, max_pfn);
612 add_memory_hole(last_rw_end, end_pfn - 1); 583 reserve_bootmem(0, PFN_PHYS(start_pfn));
613 584
614 /* 585 /*
615 * Reserve the bootmem bitmap itself as well. We do this in two 586 * Reserve the bootmem bitmap itself as well. We do this in two
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 5ea12a573cad..aa39591ca130 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -106,8 +106,8 @@ static void __init setup_ro_region(void)
106 } 106 }
107} 107}
108 108
109extern unsigned long __initdata zholes_size[];
110extern void vmem_map_init(void); 109extern void vmem_map_init(void);
110
111/* 111/*
112 * paging_init() sets up the page tables 112 * paging_init() sets up the page tables
113 */ 113 */
@@ -117,8 +117,7 @@ void __init paging_init(void)
117 int i; 117 int i;
118 unsigned long pgdir_k; 118 unsigned long pgdir_k;
119 static const int ssm_mask = 0x04000000L; 119 static const int ssm_mask = 0x04000000L;
120 unsigned long zones_size[MAX_NR_ZONES]; 120 unsigned long max_zone_pfns[MAX_NR_ZONES];
121 unsigned long dma_pfn, high_pfn;
122 121
123 pg_dir = swapper_pg_dir; 122 pg_dir = swapper_pg_dir;
124 123
@@ -142,20 +141,10 @@ void __init paging_init(void)
142 __ctl_load(pgdir_k, 13, 13); 141 __ctl_load(pgdir_k, 13, 13);
143 __raw_local_irq_ssm(ssm_mask); 142 __raw_local_irq_ssm(ssm_mask);
144 143
145 memset(zones_size, 0, sizeof(zones_size)); 144 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
146 dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT; 145 max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
147 high_pfn = max_low_pfn; 146 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
148 147 free_area_init_nodes(max_zone_pfns);
149 if (dma_pfn > high_pfn)
150 zones_size[ZONE_DMA] = high_pfn;
151 else {
152 zones_size[ZONE_DMA] = dma_pfn;
153 zones_size[ZONE_NORMAL] = high_pfn - dma_pfn;
154 }
155
156 /* Initialize mem_map[]. */
157 free_area_init_node(0, &contig_page_data, zones_size,
158 __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
159} 148}
160 149
161void __init mem_init(void) 150void __init mem_init(void)