diff options
Diffstat (limited to 'arch/s390/mm/vmem.c')
-rw-r--r-- | arch/s390/mm/vmem.c | 81 |
1 files changed, 6 insertions, 75 deletions
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 97bce6c97574..beccacf907f3 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c | |||
@@ -27,43 +27,6 @@ struct memory_segment { | |||
27 | 27 | ||
28 | static LIST_HEAD(mem_segs); | 28 | static LIST_HEAD(mem_segs); |
29 | 29 | ||
30 | void __meminit memmap_init(unsigned long size, int nid, unsigned long zone, | ||
31 | unsigned long start_pfn) | ||
32 | { | ||
33 | struct page *start, *end; | ||
34 | struct page *map_start, *map_end; | ||
35 | int i; | ||
36 | |||
37 | start = pfn_to_page(start_pfn); | ||
38 | end = start + size; | ||
39 | |||
40 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { | ||
41 | unsigned long cstart, cend; | ||
42 | |||
43 | cstart = PFN_DOWN(memory_chunk[i].addr); | ||
44 | cend = cstart + PFN_DOWN(memory_chunk[i].size); | ||
45 | |||
46 | map_start = mem_map + cstart; | ||
47 | map_end = mem_map + cend; | ||
48 | |||
49 | if (map_start < start) | ||
50 | map_start = start; | ||
51 | if (map_end > end) | ||
52 | map_end = end; | ||
53 | |||
54 | map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) | ||
55 | / sizeof(struct page); | ||
56 | map_end += ((PFN_ALIGN((unsigned long) map_end) | ||
57 | - (unsigned long) map_end) | ||
58 | / sizeof(struct page)); | ||
59 | |||
60 | if (map_start < map_end) | ||
61 | memmap_init_zone((unsigned long)(map_end - map_start), | ||
62 | nid, zone, page_to_pfn(map_start), | ||
63 | MEMMAP_EARLY); | ||
64 | } | ||
65 | } | ||
66 | |||
67 | static void __ref *vmem_alloc_pages(unsigned int order) | 30 | static void __ref *vmem_alloc_pages(unsigned int order) |
68 | { | 31 | { |
69 | if (slab_is_available()) | 32 | if (slab_is_available()) |
@@ -115,7 +78,7 @@ static pte_t __init_refok *vmem_pte_alloc(void) | |||
115 | /* | 78 | /* |
116 | * Add a physical memory range to the 1:1 mapping. | 79 | * Add a physical memory range to the 1:1 mapping. |
117 | */ | 80 | */ |
118 | static int vmem_add_range(unsigned long start, unsigned long size, int ro) | 81 | static int vmem_add_mem(unsigned long start, unsigned long size, int ro) |
119 | { | 82 | { |
120 | unsigned long address; | 83 | unsigned long address; |
121 | pgd_t *pg_dir; | 84 | pgd_t *pg_dir; |
@@ -209,10 +172,9 @@ static void vmem_remove_range(unsigned long start, unsigned long size) | |||
209 | /* | 172 | /* |
210 | * Add a backed mem_map array to the virtual mem_map array. | 173 | * Add a backed mem_map array to the virtual mem_map array. |
211 | */ | 174 | */ |
212 | static int vmem_add_mem_map(unsigned long start, unsigned long size) | 175 | int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) |
213 | { | 176 | { |
214 | unsigned long address, start_addr, end_addr; | 177 | unsigned long address, start_addr, end_addr; |
215 | struct page *map_start, *map_end; | ||
216 | pgd_t *pg_dir; | 178 | pgd_t *pg_dir; |
217 | pud_t *pu_dir; | 179 | pud_t *pu_dir; |
218 | pmd_t *pm_dir; | 180 | pmd_t *pm_dir; |
@@ -220,11 +182,8 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size) | |||
220 | pte_t pte; | 182 | pte_t pte; |
221 | int ret = -ENOMEM; | 183 | int ret = -ENOMEM; |
222 | 184 | ||
223 | map_start = VMEM_MAP + PFN_DOWN(start); | 185 | start_addr = (unsigned long) start; |
224 | map_end = VMEM_MAP + PFN_DOWN(start + size); | 186 | end_addr = (unsigned long) (start + nr); |
225 | |||
226 | start_addr = (unsigned long) map_start & PAGE_MASK; | ||
227 | end_addr = PFN_ALIGN((unsigned long) map_end); | ||
228 | 187 | ||
229 | for (address = start_addr; address < end_addr; address += PAGE_SIZE) { | 188 | for (address = start_addr; address < end_addr; address += PAGE_SIZE) { |
230 | pg_dir = pgd_offset_k(address); | 189 | pg_dir = pgd_offset_k(address); |
@@ -268,16 +227,6 @@ out: | |||
268 | return ret; | 227 | return ret; |
269 | } | 228 | } |
270 | 229 | ||
271 | static int vmem_add_mem(unsigned long start, unsigned long size, int ro) | ||
272 | { | ||
273 | int ret; | ||
274 | |||
275 | ret = vmem_add_mem_map(start, size); | ||
276 | if (ret) | ||
277 | return ret; | ||
278 | return vmem_add_range(start, size, ro); | ||
279 | } | ||
280 | |||
281 | /* | 230 | /* |
282 | * Add memory segment to the segment list if it doesn't overlap with | 231 | * Add memory segment to the segment list if it doesn't overlap with |
283 | * an already present segment. | 232 | * an already present segment. |
@@ -315,7 +264,7 @@ static void __remove_shared_memory(struct memory_segment *seg) | |||
315 | vmem_remove_range(seg->start, seg->size); | 264 | vmem_remove_range(seg->start, seg->size); |
316 | } | 265 | } |
317 | 266 | ||
318 | int remove_shared_memory(unsigned long start, unsigned long size) | 267 | int vmem_remove_mapping(unsigned long start, unsigned long size) |
319 | { | 268 | { |
320 | struct memory_segment *seg; | 269 | struct memory_segment *seg; |
321 | int ret; | 270 | int ret; |
@@ -339,11 +288,9 @@ out: | |||
339 | return ret; | 288 | return ret; |
340 | } | 289 | } |
341 | 290 | ||
342 | int add_shared_memory(unsigned long start, unsigned long size) | 291 | int vmem_add_mapping(unsigned long start, unsigned long size) |
343 | { | 292 | { |
344 | struct memory_segment *seg; | 293 | struct memory_segment *seg; |
345 | struct page *page; | ||
346 | unsigned long pfn, num_pfn, end_pfn; | ||
347 | int ret; | 294 | int ret; |
348 | 295 | ||
349 | mutex_lock(&vmem_mutex); | 296 | mutex_lock(&vmem_mutex); |
@@ -361,21 +308,6 @@ int add_shared_memory(unsigned long start, unsigned long size) | |||
361 | ret = vmem_add_mem(start, size, 0); | 308 | ret = vmem_add_mem(start, size, 0); |
362 | if (ret) | 309 | if (ret) |
363 | goto out_remove; | 310 | goto out_remove; |
364 | |||
365 | pfn = PFN_DOWN(start); | ||
366 | num_pfn = PFN_DOWN(size); | ||
367 | end_pfn = pfn + num_pfn; | ||
368 | |||
369 | page = pfn_to_page(pfn); | ||
370 | memset(page, 0, num_pfn * sizeof(struct page)); | ||
371 | |||
372 | for (; pfn < end_pfn; pfn++) { | ||
373 | page = pfn_to_page(pfn); | ||
374 | init_page_count(page); | ||
375 | reset_page_mapcount(page); | ||
376 | SetPageReserved(page); | ||
377 | INIT_LIST_HEAD(&page->lru); | ||
378 | } | ||
379 | goto out; | 311 | goto out; |
380 | 312 | ||
381 | out_remove: | 313 | out_remove: |
@@ -401,7 +333,6 @@ void __init vmem_map_init(void) | |||
401 | INIT_LIST_HEAD(&init_mm.context.crst_list); | 333 | INIT_LIST_HEAD(&init_mm.context.crst_list); |
402 | INIT_LIST_HEAD(&init_mm.context.pgtable_list); | 334 | INIT_LIST_HEAD(&init_mm.context.pgtable_list); |
403 | init_mm.context.noexec = 0; | 335 | init_mm.context.noexec = 0; |
404 | NODE_DATA(0)->node_mem_map = VMEM_MAP; | ||
405 | ro_start = ((unsigned long)&_stext) & PAGE_MASK; | 336 | ro_start = ((unsigned long)&_stext) & PAGE_MASK; |
406 | ro_end = PFN_ALIGN((unsigned long)&_eshared); | 337 | ro_end = PFN_ALIGN((unsigned long)&_eshared); |
407 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { | 338 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { |