aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2006-10-04 14:02:19 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2006-10-04 14:02:19 -0400
commit7676bef9c183fd573822cac9992927ef596d584c (patch)
tree1bf18f9cfda947b23e69d47b84500614a59ba95e /arch/s390/mm
parentcb601d41c175b7419efc91075a714d6a157bb0ac (diff)
[S390] Have s390 use add_active_range() and free_area_init_nodes.
Size zones and holes in an architecture independent manner for s390. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/init.c35
1 files changed, 10 insertions, 25 deletions
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 127044e1707c..c302508ae31e 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -82,7 +82,6 @@ void show_mem(void)
82 printk("%d pages swap cached\n",cached); 82 printk("%d pages swap cached\n",cached);
83} 83}
84 84
85extern unsigned long __initdata zholes_size[];
86/* 85/*
87 * paging_init() sets up the page tables 86 * paging_init() sets up the page tables
88 */ 87 */
@@ -99,16 +98,15 @@ void __init paging_init(void)
99 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; 98 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
100 static const int ssm_mask = 0x04000000L; 99 static const int ssm_mask = 0x04000000L;
101 unsigned long ro_start_pfn, ro_end_pfn; 100 unsigned long ro_start_pfn, ro_end_pfn;
102 unsigned long zones_size[MAX_NR_ZONES]; 101 unsigned long max_zone_pfns[MAX_NR_ZONES];
103 102
104 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata); 103 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
105 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata); 104 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
106 105
107 memset(zones_size, 0, sizeof(zones_size)); 106 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
108 zones_size[ZONE_DMA] = max_low_pfn; 107 max_zone_pfns[ZONE_DMA] = max_low_pfn;
109 free_area_init_node(0, &contig_page_data, zones_size, 108 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
110 __pa(PAGE_OFFSET) >> PAGE_SHIFT, 109 free_area_init_nodes(max_zone_pfns);
111 zholes_size);
112 110
113 /* unmap whole virtual address space */ 111 /* unmap whole virtual address space */
114 112
@@ -153,7 +151,6 @@ void __init paging_init(void)
153 __raw_local_irq_ssm(ssm_mask); 151 __raw_local_irq_ssm(ssm_mask);
154 152
155 local_flush_tlb(); 153 local_flush_tlb();
156 return;
157} 154}
158 155
159#else /* CONFIG_64BIT */ 156#else /* CONFIG_64BIT */
@@ -169,26 +166,16 @@ void __init paging_init(void)
169 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | 166 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) |
170 _KERN_REGION_TABLE; 167 _KERN_REGION_TABLE;
171 static const int ssm_mask = 0x04000000L; 168 static const int ssm_mask = 0x04000000L;
172 unsigned long zones_size[MAX_NR_ZONES];
173 unsigned long dma_pfn, high_pfn;
174 unsigned long ro_start_pfn, ro_end_pfn; 169 unsigned long ro_start_pfn, ro_end_pfn;
170 unsigned long max_zone_pfns[MAX_NR_ZONES];
175 171
176 memset(zones_size, 0, sizeof(zones_size));
177 dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
178 high_pfn = max_low_pfn;
179 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata); 172 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
180 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata); 173 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
181 174
182 if (dma_pfn > high_pfn) 175 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
183 zones_size[ZONE_DMA] = high_pfn; 176 max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
184 else { 177 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
185 zones_size[ZONE_DMA] = dma_pfn; 178 free_area_init_nodes(max_zone_pfns);
186 zones_size[ZONE_NORMAL] = high_pfn - dma_pfn;
187 }
188
189 /* Initialize mem_map[]. */
190 free_area_init_node(0, &contig_page_data, zones_size,
191 __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
192 179
193 /* 180 /*
194 * map whole physical memory to virtual memory (identity mapping) 181 * map whole physical memory to virtual memory (identity mapping)
@@ -237,8 +224,6 @@ void __init paging_init(void)
237 __raw_local_irq_ssm(ssm_mask); 224 __raw_local_irq_ssm(ssm_mask);
238 225
239 local_flush_tlb(); 226 local_flush_tlb();
240
241 return;
242} 227}
243#endif /* CONFIG_64BIT */ 228#endif /* CONFIG_64BIT */
244 229