aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2006-10-04 14:02:19 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2006-10-04 14:02:19 -0400
commit7676bef9c183fd573822cac9992927ef596d584c (patch)
tree1bf18f9cfda947b23e69d47b84500614a59ba95e /arch
parentcb601d41c175b7419efc91075a714d6a157bb0ac (diff)
[S390] Have s390 use add_active_range() and free_area_init_nodes.
Size zones and holes in an architecture independent manner for s390. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/s390/Kconfig3
-rw-r--r--arch/s390/defconfig1
-rw-r--r--arch/s390/kernel/setup.c55
-rw-r--r--arch/s390/mm/init.c35
4 files changed, 27 insertions, 67 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index f900a516f099..51c2dfe89c62 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -233,6 +233,9 @@ config WARN_STACK_SIZE
233 This allows you to specify the maximum frame size a function may 233 This allows you to specify the maximum frame size a function may
234 have without the compiler complaining about it. 234 have without the compiler complaining about it.
235 235
236config ARCH_POPULATES_NODE_MAP
237 def_bool y
238
236source "mm/Kconfig" 239source "mm/Kconfig"
237 240
238comment "I/O subsystem configuration" 241comment "I/O subsystem configuration"
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 62950f53ef5b..b6cad75fd1f4 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -118,6 +118,7 @@ CONFIG_PACK_STACK=y
118CONFIG_CHECK_STACK=y 118CONFIG_CHECK_STACK=y
119CONFIG_STACK_GUARD=256 119CONFIG_STACK_GUARD=256
120# CONFIG_WARN_STACK is not set 120# CONFIG_WARN_STACK is not set
121CONFIG_ARCH_POPULATES_NODE_MAP=y
121CONFIG_SELECT_MEMORY_MODEL=y 122CONFIG_SELECT_MEMORY_MODEL=y
122CONFIG_FLATMEM_MANUAL=y 123CONFIG_FLATMEM_MANUAL=y
123# CONFIG_DISCONTIGMEM_MANUAL is not set 124# CONFIG_DISCONTIGMEM_MANUAL is not set
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index a21cfbb9d97e..49f2b68e32b1 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -70,7 +70,6 @@ struct {
70#define CHUNK_READ_WRITE 0 70#define CHUNK_READ_WRITE 0
71#define CHUNK_READ_ONLY 1 71#define CHUNK_READ_ONLY 1
72volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ 72volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
73unsigned long __initdata zholes_size[MAX_NR_ZONES];
74static unsigned long __initdata memory_end; 73static unsigned long __initdata memory_end;
75 74
76/* 75/*
@@ -358,21 +357,6 @@ void machine_power_off(void)
358 */ 357 */
359void (*pm_power_off)(void) = machine_power_off; 358void (*pm_power_off)(void) = machine_power_off;
360 359
361static void __init
362add_memory_hole(unsigned long start, unsigned long end)
363{
364 unsigned long dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
365
366 if (end <= dma_pfn)
367 zholes_size[ZONE_DMA] += end - start + 1;
368 else if (start > dma_pfn)
369 zholes_size[ZONE_NORMAL] += end - start + 1;
370 else {
371 zholes_size[ZONE_DMA] += dma_pfn - start + 1;
372 zholes_size[ZONE_NORMAL] += end - dma_pfn;
373 }
374}
375
376static int __init early_parse_mem(char *p) 360static int __init early_parse_mem(char *p)
377{ 361{
378 memory_end = memparse(p, &p); 362 memory_end = memparse(p, &p);
@@ -494,7 +478,6 @@ setup_memory(void)
494{ 478{
495 unsigned long bootmap_size; 479 unsigned long bootmap_size;
496 unsigned long start_pfn, end_pfn, init_pfn; 480 unsigned long start_pfn, end_pfn, init_pfn;
497 unsigned long last_rw_end;
498 int i; 481 int i;
499 482
500 /* 483 /*
@@ -543,46 +526,34 @@ setup_memory(void)
543#endif 526#endif
544 527
545 /* 528 /*
546 * Initialize the boot-time allocator (with low memory only): 529 * Initialize the boot-time allocator
547 */ 530 */
548 bootmap_size = init_bootmem(start_pfn, end_pfn); 531 bootmap_size = init_bootmem(start_pfn, end_pfn);
549 532
550 /* 533 /*
551 * Register RAM areas with the bootmem allocator. 534 * Register RAM areas with the bootmem allocator.
552 */ 535 */
553 last_rw_end = start_pfn;
554 536
555 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { 537 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
556 unsigned long start_chunk, end_chunk; 538 unsigned long start_chunk, end_chunk, pfn;
557 539
558 if (memory_chunk[i].type != CHUNK_READ_WRITE) 540 if (memory_chunk[i].type != CHUNK_READ_WRITE)
559 continue; 541 continue;
560 start_chunk = (memory_chunk[i].addr + PAGE_SIZE - 1); 542 start_chunk = PFN_DOWN(memory_chunk[i].addr);
561 start_chunk >>= PAGE_SHIFT; 543 end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size) - 1;
562 end_chunk = (memory_chunk[i].addr + memory_chunk[i].size); 544 end_chunk = min(end_chunk, end_pfn);
563 end_chunk >>= PAGE_SHIFT; 545 if (start_chunk >= end_chunk)
564 if (start_chunk < start_pfn) 546 continue;
565 start_chunk = start_pfn; 547 add_active_range(0, start_chunk, end_chunk);
566 if (end_chunk > end_pfn) 548 pfn = max(start_chunk, start_pfn);
567 end_chunk = end_pfn; 549 for (; pfn <= end_chunk; pfn++)
568 if (start_chunk < end_chunk) { 550 page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY);
569 /* Initialize storage key for RAM pages */
570 for (init_pfn = start_chunk ; init_pfn < end_chunk;
571 init_pfn++)
572 page_set_storage_key(init_pfn << PAGE_SHIFT,
573 PAGE_DEFAULT_KEY);
574 free_bootmem(start_chunk << PAGE_SHIFT,
575 (end_chunk - start_chunk) << PAGE_SHIFT);
576 if (last_rw_end < start_chunk)
577 add_memory_hole(last_rw_end, start_chunk - 1);
578 last_rw_end = end_chunk;
579 }
580 } 551 }
581 552
582 psw_set_key(PAGE_DEFAULT_KEY); 553 psw_set_key(PAGE_DEFAULT_KEY);
583 554
584 if (last_rw_end < end_pfn - 1) 555 free_bootmem_with_active_regions(0, max_pfn);
585 add_memory_hole(last_rw_end, end_pfn - 1); 556 reserve_bootmem(0, PFN_PHYS(start_pfn));
586 557
587 /* 558 /*
588 * Reserve the bootmem bitmap itself as well. We do this in two 559 * Reserve the bootmem bitmap itself as well. We do this in two
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 127044e1707c..c302508ae31e 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -82,7 +82,6 @@ void show_mem(void)
82 printk("%d pages swap cached\n",cached); 82 printk("%d pages swap cached\n",cached);
83} 83}
84 84
85extern unsigned long __initdata zholes_size[];
86/* 85/*
87 * paging_init() sets up the page tables 86 * paging_init() sets up the page tables
88 */ 87 */
@@ -99,16 +98,15 @@ void __init paging_init(void)
99 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; 98 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
100 static const int ssm_mask = 0x04000000L; 99 static const int ssm_mask = 0x04000000L;
101 unsigned long ro_start_pfn, ro_end_pfn; 100 unsigned long ro_start_pfn, ro_end_pfn;
102 unsigned long zones_size[MAX_NR_ZONES]; 101 unsigned long max_zone_pfns[MAX_NR_ZONES];
103 102
104 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata); 103 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
105 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata); 104 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
106 105
107 memset(zones_size, 0, sizeof(zones_size)); 106 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
108 zones_size[ZONE_DMA] = max_low_pfn; 107 max_zone_pfns[ZONE_DMA] = max_low_pfn;
109 free_area_init_node(0, &contig_page_data, zones_size, 108 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
110 __pa(PAGE_OFFSET) >> PAGE_SHIFT, 109 free_area_init_nodes(max_zone_pfns);
111 zholes_size);
112 110
113 /* unmap whole virtual address space */ 111 /* unmap whole virtual address space */
114 112
@@ -153,7 +151,6 @@ void __init paging_init(void)
153 __raw_local_irq_ssm(ssm_mask); 151 __raw_local_irq_ssm(ssm_mask);
154 152
155 local_flush_tlb(); 153 local_flush_tlb();
156 return;
157} 154}
158 155
159#else /* CONFIG_64BIT */ 156#else /* CONFIG_64BIT */
@@ -169,26 +166,16 @@ void __init paging_init(void)
169 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | 166 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) |
170 _KERN_REGION_TABLE; 167 _KERN_REGION_TABLE;
171 static const int ssm_mask = 0x04000000L; 168 static const int ssm_mask = 0x04000000L;
172 unsigned long zones_size[MAX_NR_ZONES];
173 unsigned long dma_pfn, high_pfn;
174 unsigned long ro_start_pfn, ro_end_pfn; 169 unsigned long ro_start_pfn, ro_end_pfn;
170 unsigned long max_zone_pfns[MAX_NR_ZONES];
175 171
176 memset(zones_size, 0, sizeof(zones_size));
177 dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
178 high_pfn = max_low_pfn;
179 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata); 172 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
180 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata); 173 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
181 174
182 if (dma_pfn > high_pfn) 175 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
183 zones_size[ZONE_DMA] = high_pfn; 176 max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
184 else { 177 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
185 zones_size[ZONE_DMA] = dma_pfn; 178 free_area_init_nodes(max_zone_pfns);
186 zones_size[ZONE_NORMAL] = high_pfn - dma_pfn;
187 }
188
189 /* Initialize mem_map[]. */
190 free_area_init_node(0, &contig_page_data, zones_size,
191 __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
192 179
193 /* 180 /*
194 * map whole physical memory to virtual memory (identity mapping) 181 * map whole physical memory to virtual memory (identity mapping)
@@ -237,8 +224,6 @@ void __init paging_init(void)
237 __raw_local_irq_ssm(ssm_mask); 224 __raw_local_irq_ssm(ssm_mask);
238 225
239 local_flush_tlb(); 226 local_flush_tlb();
240
241 return;
242} 227}
243#endif /* CONFIG_64BIT */ 228#endif /* CONFIG_64BIT */
244 229