aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm/init.c
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2006-11-06 04:49:00 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2006-11-06 04:49:00 -0500
commitbcc8bcb1f0cc51c0042497d5de2d79743050e3bb (patch)
treeb60d66fb534589a2770f69265ccc768977711993 /arch/s390/mm/init.c
parentd1ed6a3ea10aa7b199c434f6ffd1b6761896567a (diff)
[S390] revert add_active_range() usage patch.
Commit 7676bef9c183fd573822cac9992927ef596d584c breaks DCSS support on s390. DCSS needs initialized struct pages to work. With the usage of add_active_range() only the struct pages for physically present pages are initialized. This could be fixed if the DCSS driver would initiliaze the struct pages itself, but this doesn't work too. This is because the mem_map array does not include holes after the last present memory area and therefore there is nothing that could be initialized. To fix this and to avoid some dirty hacks revert this patch for now. Will be added later when we move to a virtual mem_map. Cc: Carsten Otte <cotte@de.ibm.com> Cc: Adrian Bunk <bunk@stusta.de> Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm/init.c')
-rw-r--r--arch/s390/mm/init.c32
1 files changed, 22 insertions, 10 deletions
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index d99891718709..e1881c31b1cb 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -84,6 +84,7 @@ void show_mem(void)
84 printk("%d pages swap cached\n",cached); 84 printk("%d pages swap cached\n",cached);
85} 85}
86 86
87extern unsigned long __initdata zholes_size[];
87/* 88/*
88 * paging_init() sets up the page tables 89 * paging_init() sets up the page tables
89 */ 90 */
@@ -100,15 +101,16 @@ void __init paging_init(void)
100 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; 101 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
101 static const int ssm_mask = 0x04000000L; 102 static const int ssm_mask = 0x04000000L;
102 unsigned long ro_start_pfn, ro_end_pfn; 103 unsigned long ro_start_pfn, ro_end_pfn;
103 unsigned long max_zone_pfns[MAX_NR_ZONES]; 104 unsigned long zones_size[MAX_NR_ZONES];
104 105
105 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata); 106 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
106 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata); 107 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
107 108
108 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 109 memset(zones_size, 0, sizeof(zones_size));
109 max_zone_pfns[ZONE_DMA] = max_low_pfn; 110 zones_size[ZONE_DMA] = max_low_pfn;
110 max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 111 free_area_init_node(0, &contig_page_data, zones_size,
111 free_area_init_nodes(max_zone_pfns); 112 __pa(PAGE_OFFSET) >> PAGE_SHIFT,
113 zholes_size);
112 114
113 /* unmap whole virtual address space */ 115 /* unmap whole virtual address space */
114 116
@@ -168,16 +170,26 @@ void __init paging_init(void)
168 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | 170 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) |
169 _KERN_REGION_TABLE; 171 _KERN_REGION_TABLE;
170 static const int ssm_mask = 0x04000000L; 172 static const int ssm_mask = 0x04000000L;
173 unsigned long zones_size[MAX_NR_ZONES];
174 unsigned long dma_pfn, high_pfn;
171 unsigned long ro_start_pfn, ro_end_pfn; 175 unsigned long ro_start_pfn, ro_end_pfn;
172 unsigned long max_zone_pfns[MAX_NR_ZONES];
173 176
177 memset(zones_size, 0, sizeof(zones_size));
178 dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
179 high_pfn = max_low_pfn;
174 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata); 180 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
175 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata); 181 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
176 182
177 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 183 if (dma_pfn > high_pfn)
178 max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); 184 zones_size[ZONE_DMA] = high_pfn;
179 max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 185 else {
180 free_area_init_nodes(max_zone_pfns); 186 zones_size[ZONE_DMA] = dma_pfn;
187 zones_size[ZONE_NORMAL] = high_pfn - dma_pfn;
188 }
189
190 /* Initialize mem_map[]. */
191 free_area_init_node(0, &contig_page_data, zones_size,
192 __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
181 193
182 /* 194 /*
183 * map whole physical memory to virtual memory (identity mapping) 195 * map whole physical memory to virtual memory (identity mapping)