aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/mm/init.c')
-rw-r--r--arch/s390/mm/init.c184
1 files changed, 47 insertions, 137 deletions
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index e1881c31b1cb..4bb21be3b007 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -24,6 +24,7 @@
24#include <linux/pagemap.h> 24#include <linux/pagemap.h>
25#include <linux/bootmem.h> 25#include <linux/bootmem.h>
26#include <linux/pfn.h> 26#include <linux/pfn.h>
27#include <linux/poison.h>
27 28
28#include <asm/processor.h> 29#include <asm/processor.h>
29#include <asm/system.h> 30#include <asm/system.h>
@@ -69,6 +70,8 @@ void show_mem(void)
69 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); 70 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
70 i = max_mapnr; 71 i = max_mapnr;
71 while (i-- > 0) { 72 while (i-- > 0) {
73 if (!pfn_valid(i))
74 continue;
72 page = pfn_to_page(i); 75 page = pfn_to_page(i);
73 total++; 76 total++;
74 if (PageReserved(page)) 77 if (PageReserved(page))
@@ -84,150 +87,52 @@ void show_mem(void)
84 printk("%d pages swap cached\n",cached); 87 printk("%d pages swap cached\n",cached);
85} 88}
86 89
87extern unsigned long __initdata zholes_size[]; 90static void __init setup_ro_region(void)
88/*
89 * paging_init() sets up the page tables
90 */
91
92#ifndef CONFIG_64BIT
93void __init paging_init(void)
94{ 91{
95 pgd_t * pg_dir; 92 pgd_t *pgd;
96 pte_t * pg_table; 93 pmd_t *pmd;
97 pte_t pte; 94 pte_t *pte;
98 int i; 95 pte_t new_pte;
99 unsigned long tmp; 96 unsigned long address, end;
100 unsigned long pfn = 0; 97
101 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; 98 address = ((unsigned long)&__start_rodata) & PAGE_MASK;
102 static const int ssm_mask = 0x04000000L; 99 end = PFN_ALIGN((unsigned long)&__end_rodata);
103 unsigned long ro_start_pfn, ro_end_pfn; 100
104 unsigned long zones_size[MAX_NR_ZONES]; 101 for (; address < end; address += PAGE_SIZE) {
105 102 pgd = pgd_offset_k(address);
106 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata); 103 pmd = pmd_offset(pgd, address);
107 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata); 104 pte = pte_offset_kernel(pmd, address);
108 105 new_pte = mk_pte_phys(address, __pgprot(_PAGE_RO));
109 memset(zones_size, 0, sizeof(zones_size)); 106 set_pte(pte, new_pte);
110 zones_size[ZONE_DMA] = max_low_pfn; 107 }
111 free_area_init_node(0, &contig_page_data, zones_size,
112 __pa(PAGE_OFFSET) >> PAGE_SHIFT,
113 zholes_size);
114
115 /* unmap whole virtual address space */
116
117 pg_dir = swapper_pg_dir;
118
119 for (i = 0; i < PTRS_PER_PGD; i++)
120 pmd_clear((pmd_t *) pg_dir++);
121
122 /*
123 * map whole physical memory to virtual memory (identity mapping)
124 */
125
126 pg_dir = swapper_pg_dir;
127
128 while (pfn < max_low_pfn) {
129 /*
130 * pg_table is physical at this point
131 */
132 pg_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
133
134 pmd_populate_kernel(&init_mm, (pmd_t *) pg_dir, pg_table);
135 pg_dir++;
136
137 for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) {
138 if (pfn >= ro_start_pfn && pfn < ro_end_pfn)
139 pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
140 else
141 pte = pfn_pte(pfn, PAGE_KERNEL);
142 if (pfn >= max_low_pfn)
143 pte_val(pte) = _PAGE_TYPE_EMPTY;
144 set_pte(pg_table, pte);
145 pfn++;
146 }
147 }
148
149 S390_lowcore.kernel_asce = pgdir_k;
150
151 /* enable virtual mapping in kernel mode */
152 __ctl_load(pgdir_k, 1, 1);
153 __ctl_load(pgdir_k, 7, 7);
154 __ctl_load(pgdir_k, 13, 13);
155 __raw_local_irq_ssm(ssm_mask);
156
157 local_flush_tlb();
158} 108}
159 109
160#else /* CONFIG_64BIT */ 110extern void vmem_map_init(void);
161 111
112/*
113 * paging_init() sets up the page tables
114 */
162void __init paging_init(void) 115void __init paging_init(void)
163{ 116{
164 pgd_t * pg_dir; 117 pgd_t *pg_dir;
165 pmd_t * pm_dir; 118 int i;
166 pte_t * pt_dir; 119 unsigned long pgdir_k;
167 pte_t pte;
168 int i,j,k;
169 unsigned long pfn = 0;
170 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) |
171 _KERN_REGION_TABLE;
172 static const int ssm_mask = 0x04000000L; 120 static const int ssm_mask = 0x04000000L;
173 unsigned long zones_size[MAX_NR_ZONES]; 121 unsigned long max_zone_pfns[MAX_NR_ZONES];
174 unsigned long dma_pfn, high_pfn;
175 unsigned long ro_start_pfn, ro_end_pfn;
176
177 memset(zones_size, 0, sizeof(zones_size));
178 dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
179 high_pfn = max_low_pfn;
180 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
181 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
182
183 if (dma_pfn > high_pfn)
184 zones_size[ZONE_DMA] = high_pfn;
185 else {
186 zones_size[ZONE_DMA] = dma_pfn;
187 zones_size[ZONE_NORMAL] = high_pfn - dma_pfn;
188 }
189
190 /* Initialize mem_map[]. */
191 free_area_init_node(0, &contig_page_data, zones_size,
192 __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
193 122
194 /* 123 pg_dir = swapper_pg_dir;
195 * map whole physical memory to virtual memory (identity mapping)
196 */
197
198 pg_dir = swapper_pg_dir;
199
200 for (i = 0 ; i < PTRS_PER_PGD ; i++,pg_dir++) {
201 124
202 if (pfn >= max_low_pfn) { 125#ifdef CONFIG_64BIT
203 pgd_clear(pg_dir); 126 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERN_REGION_TABLE;
204 continue; 127 for (i = 0; i < PTRS_PER_PGD; i++)
205 } 128 pgd_clear(pg_dir + i);
206 129#else
207 pm_dir = (pmd_t *) alloc_bootmem_pages(PAGE_SIZE * 4); 130 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
208 pgd_populate(&init_mm, pg_dir, pm_dir); 131 for (i = 0; i < PTRS_PER_PGD; i++)
209 132 pmd_clear((pmd_t *)(pg_dir + i));
210 for (j = 0 ; j < PTRS_PER_PMD ; j++,pm_dir++) { 133#endif
211 if (pfn >= max_low_pfn) { 134 vmem_map_init();
212 pmd_clear(pm_dir); 135 setup_ro_region();
213 continue;
214 }
215
216 pt_dir = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
217 pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
218
219 for (k = 0 ; k < PTRS_PER_PTE ; k++,pt_dir++) {
220 if (pfn >= ro_start_pfn && pfn < ro_end_pfn)
221 pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
222 else
223 pte = pfn_pte(pfn, PAGE_KERNEL);
224 if (pfn >= max_low_pfn)
225 pte_val(pte) = _PAGE_TYPE_EMPTY;
226 set_pte(pt_dir, pte);
227 pfn++;
228 }
229 }
230 }
231 136
232 S390_lowcore.kernel_asce = pgdir_k; 137 S390_lowcore.kernel_asce = pgdir_k;
233 138
@@ -237,9 +142,11 @@ void __init paging_init(void)
237 __ctl_load(pgdir_k, 13, 13); 142 __ctl_load(pgdir_k, 13, 13);
238 __raw_local_irq_ssm(ssm_mask); 143 __raw_local_irq_ssm(ssm_mask);
239 144
240 local_flush_tlb(); 145 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
146 max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
147 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
148 free_area_init_nodes(max_zone_pfns);
241} 149}
242#endif /* CONFIG_64BIT */
243 150
244void __init mem_init(void) 151void __init mem_init(void)
245{ 152{
@@ -269,6 +176,8 @@ void __init mem_init(void)
269 printk("Write protected kernel read-only data: %#lx - %#lx\n", 176 printk("Write protected kernel read-only data: %#lx - %#lx\n",
270 (unsigned long)&__start_rodata, 177 (unsigned long)&__start_rodata,
271 PFN_ALIGN((unsigned long)&__end_rodata) - 1); 178 PFN_ALIGN((unsigned long)&__end_rodata) - 1);
179 printk("Virtual memmap size: %ldk\n",
180 (max_pfn * sizeof(struct page)) >> 10);
272} 181}
273 182
274void free_initmem(void) 183void free_initmem(void)
@@ -279,6 +188,7 @@ void free_initmem(void)
279 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { 188 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
280 ClearPageReserved(virt_to_page(addr)); 189 ClearPageReserved(virt_to_page(addr));
281 init_page_count(virt_to_page(addr)); 190 init_page_count(virt_to_page(addr));
191 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
282 free_page(addr); 192 free_page(addr);
283 totalram_pages++; 193 totalram_pages++;
284 } 194 }